sched: Validate CFS quota hierarchies
[linux-2.6-block.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
9745512c 23#include <linux/latencytop.h>
1983a922 24#include <linux/sched.h>
3436ae12 25#include <linux/cpumask.h>
9745512c 26
bf0f6f24 27/*
21805085 28 * Targeted preemption latency for CPU-bound tasks:
864616ee 29 * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24 30 *
21805085 31 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
32 * 'timeslice length' - timeslices in CFS are of variable length
33 * and have no persistent notion like in traditional, time-slice
34 * based scheduling concepts.
bf0f6f24 35 *
d274a4ce
IM
36 * (to see the precise effective timeslice length of your workload,
37 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 38 */
21406928
MG
39unsigned int sysctl_sched_latency = 6000000ULL;
40unsigned int normalized_sysctl_sched_latency = 6000000ULL;
2bd8e6d4 41
1983a922
CE
42/*
43 * The initial- and re-scaling of tunables is configurable
44 * (default SCHED_TUNABLESCALING_LOG = *(1+ilog(ncpus))
45 *
46 * Options are:
47 * SCHED_TUNABLESCALING_NONE - unscaled, always *1
48 * SCHED_TUNABLESCALING_LOG - scaled logarithmical, *1+ilog(ncpus)
49 * SCHED_TUNABLESCALING_LINEAR - scaled linear, *ncpus
50 */
51enum sched_tunable_scaling sysctl_sched_tunable_scaling
52 = SCHED_TUNABLESCALING_LOG;
53
2bd8e6d4 54/*
b2be5e96 55 * Minimal preemption granularity for CPU-bound tasks:
864616ee 56 * (default: 0.75 msec * (1 + ilog(ncpus)), units: nanoseconds)
2bd8e6d4 57 */
0bf377bb
IM
58unsigned int sysctl_sched_min_granularity = 750000ULL;
59unsigned int normalized_sysctl_sched_min_granularity = 750000ULL;
21805085
PZ
60
61/*
b2be5e96
PZ
62 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
63 */
0bf377bb 64static unsigned int sched_nr_latency = 8;
b2be5e96
PZ
65
66/*
2bba22c5 67 * After fork, child runs first. If set to 0 (default) then
b2be5e96 68 * parent will (try to) run first.
21805085 69 */
2bba22c5 70unsigned int sysctl_sched_child_runs_first __read_mostly;
bf0f6f24 71
bf0f6f24
IM
72/*
73 * SCHED_OTHER wake-up granularity.
172e082a 74 * (default: 1 msec * (1 + ilog(ncpus)), units: nanoseconds)
bf0f6f24
IM
75 *
76 * This option delays the preemption effects of decoupled workloads
77 * and reduces their over-scheduling. Synchronous workloads will still
78 * have immediate wakeup/sleep latencies.
79 */
172e082a 80unsigned int sysctl_sched_wakeup_granularity = 1000000UL;
0bcdcf28 81unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;
bf0f6f24 82
da84d961
IM
83const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
84
a7a4f8a7
PT
85/*
86 * The exponential sliding window over which load is averaged for shares
87 * distribution.
88 * (default: 10msec)
89 */
90unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL;
91
a4c2f00f
PZ
92static const struct sched_class fair_sched_class;
93
bf0f6f24
IM
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
62160e3f 98#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 99
62160e3f 100/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
62160e3f 103 return cfs_rq->rq;
bf0f6f24
IM
104}
105
62160e3f
IM
106/* An entity is a task if it doesn't "own" a runqueue */
107#define entity_is_task(se) (!se->my_q)
bf0f6f24 108
8f48894f
PZ
109static inline struct task_struct *task_of(struct sched_entity *se)
110{
111#ifdef CONFIG_SCHED_DEBUG
112 WARN_ON_ONCE(!entity_is_task(se));
113#endif
114 return container_of(se, struct task_struct, se);
115}
116
b758149c
PZ
117/* Walk up scheduling entities hierarchy */
118#define for_each_sched_entity(se) \
119 for (; se; se = se->parent)
120
121static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
122{
123 return p->se.cfs_rq;
124}
125
126/* runqueue on which this entity is (to be) queued */
127static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
128{
129 return se->cfs_rq;
130}
131
132/* runqueue "owned" by this group */
133static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
134{
135 return grp->my_q;
136}
137
3d4b47b4
PZ
138static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
139{
140 if (!cfs_rq->on_list) {
67e86250
PT
141 /*
142 * Ensure we either appear before our parent (if already
143 * enqueued) or force our parent to appear after us when it is
144 * enqueued. The fact that we always enqueue bottom-up
145 * reduces this to two cases.
146 */
147 if (cfs_rq->tg->parent &&
148 cfs_rq->tg->parent->cfs_rq[cpu_of(rq_of(cfs_rq))]->on_list) {
149 list_add_rcu(&cfs_rq->leaf_cfs_rq_list,
150 &rq_of(cfs_rq)->leaf_cfs_rq_list);
151 } else {
152 list_add_tail_rcu(&cfs_rq->leaf_cfs_rq_list,
3d4b47b4 153 &rq_of(cfs_rq)->leaf_cfs_rq_list);
67e86250 154 }
3d4b47b4
PZ
155
156 cfs_rq->on_list = 1;
157 }
158}
159
160static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
161{
162 if (cfs_rq->on_list) {
163 list_del_rcu(&cfs_rq->leaf_cfs_rq_list);
164 cfs_rq->on_list = 0;
165 }
166}
167
b758149c
PZ
168/* Iterate thr' all leaf cfs_rq's on a runqueue */
169#define for_each_leaf_cfs_rq(rq, cfs_rq) \
170 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
171
172/* Do the two (enqueued) entities belong to the same group ? */
173static inline int
174is_same_group(struct sched_entity *se, struct sched_entity *pse)
175{
176 if (se->cfs_rq == pse->cfs_rq)
177 return 1;
178
179 return 0;
180}
181
182static inline struct sched_entity *parent_entity(struct sched_entity *se)
183{
184 return se->parent;
185}
186
464b7527
PZ
187/* return depth at which a sched entity is present in the hierarchy */
188static inline int depth_se(struct sched_entity *se)
189{
190 int depth = 0;
191
192 for_each_sched_entity(se)
193 depth++;
194
195 return depth;
196}
197
198static void
199find_matching_se(struct sched_entity **se, struct sched_entity **pse)
200{
201 int se_depth, pse_depth;
202
203 /*
204 * preemption test can be made between sibling entities who are in the
205 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of
206 * both tasks until we find their ancestors who are siblings of common
207 * parent.
208 */
209
210 /* First walk up until both entities are at same depth */
211 se_depth = depth_se(*se);
212 pse_depth = depth_se(*pse);
213
214 while (se_depth > pse_depth) {
215 se_depth--;
216 *se = parent_entity(*se);
217 }
218
219 while (pse_depth > se_depth) {
220 pse_depth--;
221 *pse = parent_entity(*pse);
222 }
223
224 while (!is_same_group(*se, *pse)) {
225 *se = parent_entity(*se);
226 *pse = parent_entity(*pse);
227 }
228}
229
8f48894f
PZ
230#else /* !CONFIG_FAIR_GROUP_SCHED */
231
232static inline struct task_struct *task_of(struct sched_entity *se)
233{
234 return container_of(se, struct task_struct, se);
235}
bf0f6f24 236
62160e3f
IM
237static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
238{
239 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
240}
241
242#define entity_is_task(se) 1
243
b758149c
PZ
244#define for_each_sched_entity(se) \
245 for (; se; se = NULL)
bf0f6f24 246
b758149c 247static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
bf0f6f24 248{
b758149c 249 return &task_rq(p)->cfs;
bf0f6f24
IM
250}
251
b758149c
PZ
252static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
253{
254 struct task_struct *p = task_of(se);
255 struct rq *rq = task_rq(p);
256
257 return &rq->cfs;
258}
259
260/* runqueue "owned" by this group */
261static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
262{
263 return NULL;
264}
265
3d4b47b4
PZ
266static inline void list_add_leaf_cfs_rq(struct cfs_rq *cfs_rq)
267{
268}
269
270static inline void list_del_leaf_cfs_rq(struct cfs_rq *cfs_rq)
271{
272}
273
b758149c
PZ
274#define for_each_leaf_cfs_rq(rq, cfs_rq) \
275 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
276
277static inline int
278is_same_group(struct sched_entity *se, struct sched_entity *pse)
279{
280 return 1;
281}
282
283static inline struct sched_entity *parent_entity(struct sched_entity *se)
284{
285 return NULL;
286}
287
464b7527
PZ
288static inline void
289find_matching_se(struct sched_entity **se, struct sched_entity **pse)
290{
291}
292
b758149c
PZ
293#endif /* CONFIG_FAIR_GROUP_SCHED */
294
bf0f6f24
IM
295
296/**************************************************************
297 * Scheduling class tree data structure manipulation methods:
298 */
299
0702e3eb 300static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 301{
368059a9
PZ
302 s64 delta = (s64)(vruntime - min_vruntime);
303 if (delta > 0)
02e0431a
PZ
304 min_vruntime = vruntime;
305
306 return min_vruntime;
307}
308
0702e3eb 309static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
310{
311 s64 delta = (s64)(vruntime - min_vruntime);
312 if (delta < 0)
313 min_vruntime = vruntime;
314
315 return min_vruntime;
316}
317
54fdc581
FC
318static inline int entity_before(struct sched_entity *a,
319 struct sched_entity *b)
320{
321 return (s64)(a->vruntime - b->vruntime) < 0;
322}
323
1af5f730
PZ
324static void update_min_vruntime(struct cfs_rq *cfs_rq)
325{
326 u64 vruntime = cfs_rq->min_vruntime;
327
328 if (cfs_rq->curr)
329 vruntime = cfs_rq->curr->vruntime;
330
331 if (cfs_rq->rb_leftmost) {
332 struct sched_entity *se = rb_entry(cfs_rq->rb_leftmost,
333 struct sched_entity,
334 run_node);
335
e17036da 336 if (!cfs_rq->curr)
1af5f730
PZ
337 vruntime = se->vruntime;
338 else
339 vruntime = min_vruntime(vruntime, se->vruntime);
340 }
341
342 cfs_rq->min_vruntime = max_vruntime(cfs_rq->min_vruntime, vruntime);
3fe1698b
PZ
343#ifndef CONFIG_64BIT
344 smp_wmb();
345 cfs_rq->min_vruntime_copy = cfs_rq->min_vruntime;
346#endif
1af5f730
PZ
347}
348
bf0f6f24
IM
349/*
350 * Enqueue an entity into the rb-tree:
351 */
0702e3eb 352static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
353{
354 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
355 struct rb_node *parent = NULL;
356 struct sched_entity *entry;
bf0f6f24
IM
357 int leftmost = 1;
358
359 /*
360 * Find the right place in the rbtree:
361 */
362 while (*link) {
363 parent = *link;
364 entry = rb_entry(parent, struct sched_entity, run_node);
365 /*
366 * We dont care about collisions. Nodes with
367 * the same key stay together.
368 */
2bd2d6f2 369 if (entity_before(se, entry)) {
bf0f6f24
IM
370 link = &parent->rb_left;
371 } else {
372 link = &parent->rb_right;
373 leftmost = 0;
374 }
375 }
376
377 /*
378 * Maintain a cache of leftmost tree entries (it is frequently
379 * used):
380 */
1af5f730 381 if (leftmost)
57cb499d 382 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
383
384 rb_link_node(&se->run_node, parent, link);
385 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
386}
387
0702e3eb 388static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 389{
3fe69747
PZ
390 if (cfs_rq->rb_leftmost == &se->run_node) {
391 struct rb_node *next_node;
3fe69747
PZ
392
393 next_node = rb_next(&se->run_node);
394 cfs_rq->rb_leftmost = next_node;
3fe69747 395 }
e9acbff6 396
bf0f6f24 397 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
398}
399
ac53db59 400static struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
bf0f6f24 401{
f4b6755f
PZ
402 struct rb_node *left = cfs_rq->rb_leftmost;
403
404 if (!left)
405 return NULL;
406
407 return rb_entry(left, struct sched_entity, run_node);
bf0f6f24
IM
408}
409
ac53db59
RR
410static struct sched_entity *__pick_next_entity(struct sched_entity *se)
411{
412 struct rb_node *next = rb_next(&se->run_node);
413
414 if (!next)
415 return NULL;
416
417 return rb_entry(next, struct sched_entity, run_node);
418}
419
420#ifdef CONFIG_SCHED_DEBUG
f4b6755f 421static struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
aeb73b04 422{
7eee3e67 423 struct rb_node *last = rb_last(&cfs_rq->tasks_timeline);
aeb73b04 424
70eee74b
BS
425 if (!last)
426 return NULL;
7eee3e67
IM
427
428 return rb_entry(last, struct sched_entity, run_node);
aeb73b04
PZ
429}
430
bf0f6f24
IM
431/**************************************************************
432 * Scheduling class statistics methods:
433 */
434
acb4a848 435int sched_proc_update_handler(struct ctl_table *table, int write,
8d65af78 436 void __user *buffer, size_t *lenp,
b2be5e96
PZ
437 loff_t *ppos)
438{
8d65af78 439 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
acb4a848 440 int factor = get_update_sysctl_factor();
b2be5e96
PZ
441
442 if (ret || !write)
443 return ret;
444
445 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
446 sysctl_sched_min_granularity);
447
acb4a848
CE
448#define WRT_SYSCTL(name) \
449 (normalized_sysctl_##name = sysctl_##name / (factor))
450 WRT_SYSCTL(sched_min_granularity);
451 WRT_SYSCTL(sched_latency);
452 WRT_SYSCTL(sched_wakeup_granularity);
acb4a848
CE
453#undef WRT_SYSCTL
454
b2be5e96
PZ
455 return 0;
456}
457#endif
647e7cac 458
a7be37ac 459/*
f9c0b095 460 * delta /= w
a7be37ac
PZ
461 */
462static inline unsigned long
463calc_delta_fair(unsigned long delta, struct sched_entity *se)
464{
f9c0b095
PZ
465 if (unlikely(se->load.weight != NICE_0_LOAD))
466 delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
a7be37ac
PZ
467
468 return delta;
469}
470
647e7cac
IM
471/*
472 * The idea is to set a period in which each task runs once.
473 *
474 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
475 * this period because otherwise the slices get too small.
476 *
477 * p = (nr <= nl) ? l : l*nr/nl
478 */
4d78e7b6
PZ
479static u64 __sched_period(unsigned long nr_running)
480{
481 u64 period = sysctl_sched_latency;
b2be5e96 482 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
483
484 if (unlikely(nr_running > nr_latency)) {
4bf0b771 485 period = sysctl_sched_min_granularity;
4d78e7b6 486 period *= nr_running;
4d78e7b6
PZ
487 }
488
489 return period;
490}
491
647e7cac
IM
492/*
493 * We calculate the wall-time slice from the period by taking a part
494 * proportional to the weight.
495 *
f9c0b095 496 * s = p*P[w/rw]
647e7cac 497 */
6d0f0ebd 498static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 499{
0a582440 500 u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
f9c0b095 501
0a582440 502 for_each_sched_entity(se) {
6272d68c 503 struct load_weight *load;
3104bf03 504 struct load_weight lw;
6272d68c
LM
505
506 cfs_rq = cfs_rq_of(se);
507 load = &cfs_rq->load;
f9c0b095 508
0a582440 509 if (unlikely(!se->on_rq)) {
3104bf03 510 lw = cfs_rq->load;
0a582440
MG
511
512 update_load_add(&lw, se->load.weight);
513 load = &lw;
514 }
515 slice = calc_delta_mine(slice, se->load.weight, load);
516 }
517 return slice;
bf0f6f24
IM
518}
519
647e7cac 520/*
ac884dec 521 * We calculate the vruntime slice of a to be inserted task
647e7cac 522 *
f9c0b095 523 * vs = s/w
647e7cac 524 */
f9c0b095 525static u64 sched_vslice(struct cfs_rq *cfs_rq, struct sched_entity *se)
67e9fb2a 526{
f9c0b095 527 return calc_delta_fair(sched_slice(cfs_rq, se), se);
a7be37ac
PZ
528}
529
d6b55918 530static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update);
6d5ab293 531static void update_cfs_shares(struct cfs_rq *cfs_rq);
3b3d190e 532
bf0f6f24
IM
533/*
534 * Update the current task's runtime statistics. Skip current tasks that
535 * are not in our scheduling class.
536 */
537static inline void
8ebc91d9
IM
538__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
539 unsigned long delta_exec)
bf0f6f24 540{
bbdba7c0 541 unsigned long delta_exec_weighted;
bf0f6f24 542
41acab88
LDM
543 schedstat_set(curr->statistics.exec_max,
544 max((u64)delta_exec, curr->statistics.exec_max));
bf0f6f24
IM
545
546 curr->sum_exec_runtime += delta_exec;
7a62eabc 547 schedstat_add(cfs_rq, exec_clock, delta_exec);
a7be37ac 548 delta_exec_weighted = calc_delta_fair(delta_exec, curr);
88ec22d3 549
e9acbff6 550 curr->vruntime += delta_exec_weighted;
1af5f730 551 update_min_vruntime(cfs_rq);
3b3d190e 552
70caf8a6 553#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
3b3d190e 554 cfs_rq->load_unacc_exec_time += delta_exec;
3b3d190e 555#endif
bf0f6f24
IM
556}
557
b7cc0896 558static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 559{
429d43bc 560 struct sched_entity *curr = cfs_rq->curr;
305e6835 561 u64 now = rq_of(cfs_rq)->clock_task;
bf0f6f24
IM
562 unsigned long delta_exec;
563
564 if (unlikely(!curr))
565 return;
566
567 /*
568 * Get the amount of time the current task was running
569 * since the last time we changed load (this cannot
570 * overflow on 32 bits):
571 */
8ebc91d9 572 delta_exec = (unsigned long)(now - curr->exec_start);
34f28ecd
PZ
573 if (!delta_exec)
574 return;
bf0f6f24 575
8ebc91d9
IM
576 __update_curr(cfs_rq, curr, delta_exec);
577 curr->exec_start = now;
d842de87
SV
578
579 if (entity_is_task(curr)) {
580 struct task_struct *curtask = task_of(curr);
581
f977bb49 582 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime);
d842de87 583 cpuacct_charge(curtask, delta_exec);
f06febc9 584 account_group_exec_runtime(curtask, delta_exec);
d842de87 585 }
bf0f6f24
IM
586}
587
588static inline void
5870db5b 589update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 590{
41acab88 591 schedstat_set(se->statistics.wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
592}
593
bf0f6f24
IM
594/*
595 * Task is being enqueued - update stats:
596 */
d2417e5a 597static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 598{
bf0f6f24
IM
599 /*
600 * Are we enqueueing a waiting task? (for current tasks
601 * a dequeue/enqueue event is a NOP)
602 */
429d43bc 603 if (se != cfs_rq->curr)
5870db5b 604 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
605}
606
bf0f6f24 607static void
9ef0a961 608update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 609{
41acab88
LDM
610 schedstat_set(se->statistics.wait_max, max(se->statistics.wait_max,
611 rq_of(cfs_rq)->clock - se->statistics.wait_start));
612 schedstat_set(se->statistics.wait_count, se->statistics.wait_count + 1);
613 schedstat_set(se->statistics.wait_sum, se->statistics.wait_sum +
614 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
615#ifdef CONFIG_SCHEDSTATS
616 if (entity_is_task(se)) {
617 trace_sched_stat_wait(task_of(se),
41acab88 618 rq_of(cfs_rq)->clock - se->statistics.wait_start);
768d0c27
PZ
619 }
620#endif
41acab88 621 schedstat_set(se->statistics.wait_start, 0);
bf0f6f24
IM
622}
623
624static inline void
19b6a2e3 625update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 626{
bf0f6f24
IM
627 /*
628 * Mark the end of the wait period if dequeueing a
629 * waiting task:
630 */
429d43bc 631 if (se != cfs_rq->curr)
9ef0a961 632 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
633}
634
635/*
636 * We are picking a new current task - update its stats:
637 */
638static inline void
79303e9e 639update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
640{
641 /*
642 * We are starting a new run period:
643 */
305e6835 644 se->exec_start = rq_of(cfs_rq)->clock_task;
bf0f6f24
IM
645}
646
bf0f6f24
IM
647/**************************************************
648 * Scheduling class queueing methods:
649 */
650
c09595f6
PZ
651#if defined CONFIG_SMP && defined CONFIG_FAIR_GROUP_SCHED
652static void
653add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
654{
655 cfs_rq->task_weight += weight;
656}
657#else
658static inline void
659add_cfs_task_weight(struct cfs_rq *cfs_rq, unsigned long weight)
660{
661}
662#endif
663
30cfdcfc
DA
664static void
665account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
666{
667 update_load_add(&cfs_rq->load, se->load.weight);
c09595f6
PZ
668 if (!parent_entity(se))
669 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 670 if (entity_is_task(se)) {
c09595f6 671 add_cfs_task_weight(cfs_rq, se->load.weight);
b87f1724
BR
672 list_add(&se->group_node, &cfs_rq->tasks);
673 }
30cfdcfc 674 cfs_rq->nr_running++;
30cfdcfc
DA
675}
676
677static void
678account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
679{
680 update_load_sub(&cfs_rq->load, se->load.weight);
c09595f6
PZ
681 if (!parent_entity(se))
682 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
b87f1724 683 if (entity_is_task(se)) {
c09595f6 684 add_cfs_task_weight(cfs_rq, -se->load.weight);
b87f1724
BR
685 list_del_init(&se->group_node);
686 }
30cfdcfc 687 cfs_rq->nr_running--;
30cfdcfc
DA
688}
689
3ff6dcac
YZ
690#ifdef CONFIG_FAIR_GROUP_SCHED
691# ifdef CONFIG_SMP
d6b55918
PT
692static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
693 int global_update)
694{
695 struct task_group *tg = cfs_rq->tg;
696 long load_avg;
697
698 load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
699 load_avg -= cfs_rq->load_contribution;
700
701 if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
702 atomic_add(load_avg, &tg->load_weight);
703 cfs_rq->load_contribution += load_avg;
704 }
705}
706
707static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
2069dd75 708{
a7a4f8a7 709 u64 period = sysctl_sched_shares_window;
2069dd75 710 u64 now, delta;
e33078ba 711 unsigned long load = cfs_rq->load.weight;
2069dd75 712
b815f196 713 if (cfs_rq->tg == &root_task_group)
2069dd75
PZ
714 return;
715
05ca62c6 716 now = rq_of(cfs_rq)->clock_task;
2069dd75
PZ
717 delta = now - cfs_rq->load_stamp;
718
e33078ba
PT
719 /* truncate load history at 4 idle periods */
720 if (cfs_rq->load_stamp > cfs_rq->load_last &&
721 now - cfs_rq->load_last > 4 * period) {
722 cfs_rq->load_period = 0;
723 cfs_rq->load_avg = 0;
f07333bf 724 delta = period - 1;
e33078ba
PT
725 }
726
2069dd75 727 cfs_rq->load_stamp = now;
3b3d190e 728 cfs_rq->load_unacc_exec_time = 0;
2069dd75 729 cfs_rq->load_period += delta;
e33078ba
PT
730 if (load) {
731 cfs_rq->load_last = now;
732 cfs_rq->load_avg += delta * load;
733 }
2069dd75 734
d6b55918
PT
735 /* consider updating load contribution on each fold or truncate */
736 if (global_update || cfs_rq->load_period > period
737 || !cfs_rq->load_period)
738 update_cfs_rq_load_contribution(cfs_rq, global_update);
739
2069dd75
PZ
740 while (cfs_rq->load_period > period) {
741 /*
742 * Inline assembly required to prevent the compiler
743 * optimising this loop into a divmod call.
744 * See __iter_div_u64_rem() for another example of this.
745 */
746 asm("" : "+rm" (cfs_rq->load_period));
747 cfs_rq->load_period /= 2;
748 cfs_rq->load_avg /= 2;
749 }
3d4b47b4 750
e33078ba
PT
751 if (!cfs_rq->curr && !cfs_rq->nr_running && !cfs_rq->load_avg)
752 list_del_leaf_cfs_rq(cfs_rq);
2069dd75
PZ
753}
754
6d5ab293 755static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
756{
757 long load_weight, load, shares;
758
6d5ab293 759 load = cfs_rq->load.weight;
3ff6dcac
YZ
760
761 load_weight = atomic_read(&tg->load_weight);
3ff6dcac 762 load_weight += load;
6d5ab293 763 load_weight -= cfs_rq->load_contribution;
3ff6dcac
YZ
764
765 shares = (tg->shares * load);
766 if (load_weight)
767 shares /= load_weight;
768
769 if (shares < MIN_SHARES)
770 shares = MIN_SHARES;
771 if (shares > tg->shares)
772 shares = tg->shares;
773
774 return shares;
775}
776
777static void update_entity_shares_tick(struct cfs_rq *cfs_rq)
778{
779 if (cfs_rq->load_unacc_exec_time > sysctl_sched_shares_window) {
780 update_cfs_load(cfs_rq, 0);
6d5ab293 781 update_cfs_shares(cfs_rq);
3ff6dcac
YZ
782 }
783}
784# else /* CONFIG_SMP */
785static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
786{
787}
788
6d5ab293 789static inline long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
3ff6dcac
YZ
790{
791 return tg->shares;
792}
793
794static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
795{
796}
797# endif /* CONFIG_SMP */
2069dd75
PZ
798static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
799 unsigned long weight)
800{
19e5eebb
PT
801 if (se->on_rq) {
802 /* commit outstanding execution time */
803 if (cfs_rq->curr == se)
804 update_curr(cfs_rq);
2069dd75 805 account_entity_dequeue(cfs_rq, se);
19e5eebb 806 }
2069dd75
PZ
807
808 update_load_set(&se->load, weight);
809
810 if (se->on_rq)
811 account_entity_enqueue(cfs_rq, se);
812}
813
6d5ab293 814static void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
815{
816 struct task_group *tg;
817 struct sched_entity *se;
3ff6dcac 818 long shares;
2069dd75 819
2069dd75
PZ
820 tg = cfs_rq->tg;
821 se = tg->se[cpu_of(rq_of(cfs_rq))];
822 if (!se)
823 return;
3ff6dcac
YZ
824#ifndef CONFIG_SMP
825 if (likely(se->load.weight == tg->shares))
826 return;
827#endif
6d5ab293 828 shares = calc_cfs_shares(cfs_rq, tg);
2069dd75
PZ
829
830 reweight_entity(cfs_rq_of(se), se, shares);
831}
832#else /* CONFIG_FAIR_GROUP_SCHED */
d6b55918 833static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update)
2069dd75
PZ
834{
835}
836
6d5ab293 837static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
2069dd75
PZ
838{
839}
43365bd7
PT
840
841static inline void update_entity_shares_tick(struct cfs_rq *cfs_rq)
842{
843}
2069dd75
PZ
844#endif /* CONFIG_FAIR_GROUP_SCHED */
845
2396af69 846static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 847{
bf0f6f24 848#ifdef CONFIG_SCHEDSTATS
e414314c
PZ
849 struct task_struct *tsk = NULL;
850
851 if (entity_is_task(se))
852 tsk = task_of(se);
853
41acab88
LDM
854 if (se->statistics.sleep_start) {
855 u64 delta = rq_of(cfs_rq)->clock - se->statistics.sleep_start;
bf0f6f24
IM
856
857 if ((s64)delta < 0)
858 delta = 0;
859
41acab88
LDM
860 if (unlikely(delta > se->statistics.sleep_max))
861 se->statistics.sleep_max = delta;
bf0f6f24 862
41acab88
LDM
863 se->statistics.sleep_start = 0;
864 se->statistics.sum_sleep_runtime += delta;
9745512c 865
768d0c27 866 if (tsk) {
e414314c 867 account_scheduler_latency(tsk, delta >> 10, 1);
768d0c27
PZ
868 trace_sched_stat_sleep(tsk, delta);
869 }
bf0f6f24 870 }
41acab88
LDM
871 if (se->statistics.block_start) {
872 u64 delta = rq_of(cfs_rq)->clock - se->statistics.block_start;
bf0f6f24
IM
873
874 if ((s64)delta < 0)
875 delta = 0;
876
41acab88
LDM
877 if (unlikely(delta > se->statistics.block_max))
878 se->statistics.block_max = delta;
bf0f6f24 879
41acab88
LDM
880 se->statistics.block_start = 0;
881 se->statistics.sum_sleep_runtime += delta;
30084fbd 882
e414314c 883 if (tsk) {
8f0dfc34 884 if (tsk->in_iowait) {
41acab88
LDM
885 se->statistics.iowait_sum += delta;
886 se->statistics.iowait_count++;
768d0c27 887 trace_sched_stat_iowait(tsk, delta);
8f0dfc34
AV
888 }
889
e414314c
PZ
890 /*
891 * Blocking time is in units of nanosecs, so shift by
892 * 20 to get a milliseconds-range estimation of the
893 * amount of time that the task spent sleeping:
894 */
895 if (unlikely(prof_on == SLEEP_PROFILING)) {
896 profile_hits(SLEEP_PROFILING,
897 (void *)get_wchan(tsk),
898 delta >> 20);
899 }
900 account_scheduler_latency(tsk, delta >> 10, 0);
30084fbd 901 }
bf0f6f24
IM
902 }
903#endif
904}
905
ddc97297
PZ
906static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
907{
908#ifdef CONFIG_SCHED_DEBUG
909 s64 d = se->vruntime - cfs_rq->min_vruntime;
910
911 if (d < 0)
912 d = -d;
913
914 if (d > 3*sysctl_sched_latency)
915 schedstat_inc(cfs_rq, nr_spread_over);
916#endif
917}
918
aeb73b04
PZ
919static void
920place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
921{
1af5f730 922 u64 vruntime = cfs_rq->min_vruntime;
94dfb5e7 923
2cb8600e
PZ
924 /*
925 * The 'current' period is already promised to the current tasks,
926 * however the extra weight of the new task will slow them down a
927 * little, place the new task so that it fits in the slot that
928 * stays open at the end.
929 */
94dfb5e7 930 if (initial && sched_feat(START_DEBIT))
f9c0b095 931 vruntime += sched_vslice(cfs_rq, se);
aeb73b04 932
a2e7a7eb 933 /* sleeps up to a single latency don't count. */
5ca9880c 934 if (!initial) {
a2e7a7eb 935 unsigned long thresh = sysctl_sched_latency;
a7be37ac 936
a2e7a7eb
MG
937 /*
938 * Halve their sleep time's effect, to allow
939 * for a gentler effect of sleepers:
940 */
941 if (sched_feat(GENTLE_FAIR_SLEEPERS))
942 thresh >>= 1;
51e0304c 943
a2e7a7eb 944 vruntime -= thresh;
aeb73b04
PZ
945 }
946
b5d9d734
MG
947 /* ensure we never gain time by being placed backwards. */
948 vruntime = max_vruntime(se->vruntime, vruntime);
949
67e9fb2a 950 se->vruntime = vruntime;
aeb73b04
PZ
951}
952
bf0f6f24 953static void
88ec22d3 954enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 955{
88ec22d3
PZ
956 /*
957 * Update the normalized vruntime before updating min_vruntime
958 * through callig update_curr().
959 */
371fd7e7 960 if (!(flags & ENQUEUE_WAKEUP) || (flags & ENQUEUE_WAKING))
88ec22d3
PZ
961 se->vruntime += cfs_rq->min_vruntime;
962
bf0f6f24 963 /*
a2a2d680 964 * Update run-time statistics of the 'current'.
bf0f6f24 965 */
b7cc0896 966 update_curr(cfs_rq);
d6b55918 967 update_cfs_load(cfs_rq, 0);
a992241d 968 account_entity_enqueue(cfs_rq, se);
6d5ab293 969 update_cfs_shares(cfs_rq);
bf0f6f24 970
88ec22d3 971 if (flags & ENQUEUE_WAKEUP) {
aeb73b04 972 place_entity(cfs_rq, se, 0);
2396af69 973 enqueue_sleeper(cfs_rq, se);
e9acbff6 974 }
bf0f6f24 975
d2417e5a 976 update_stats_enqueue(cfs_rq, se);
ddc97297 977 check_spread(cfs_rq, se);
83b699ed
SV
978 if (se != cfs_rq->curr)
979 __enqueue_entity(cfs_rq, se);
2069dd75 980 se->on_rq = 1;
3d4b47b4
PZ
981
982 if (cfs_rq->nr_running == 1)
983 list_add_leaf_cfs_rq(cfs_rq);
bf0f6f24
IM
984}
985
2c13c919 986static void __clear_buddies_last(struct sched_entity *se)
2002c695 987{
2c13c919
RR
988 for_each_sched_entity(se) {
989 struct cfs_rq *cfs_rq = cfs_rq_of(se);
990 if (cfs_rq->last == se)
991 cfs_rq->last = NULL;
992 else
993 break;
994 }
995}
2002c695 996
2c13c919
RR
997static void __clear_buddies_next(struct sched_entity *se)
998{
999 for_each_sched_entity(se) {
1000 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1001 if (cfs_rq->next == se)
1002 cfs_rq->next = NULL;
1003 else
1004 break;
1005 }
2002c695
PZ
1006}
1007
ac53db59
RR
1008static void __clear_buddies_skip(struct sched_entity *se)
1009{
1010 for_each_sched_entity(se) {
1011 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1012 if (cfs_rq->skip == se)
1013 cfs_rq->skip = NULL;
1014 else
1015 break;
1016 }
1017}
1018
a571bbea
PZ
1019static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)
1020{
2c13c919
RR
1021 if (cfs_rq->last == se)
1022 __clear_buddies_last(se);
1023
1024 if (cfs_rq->next == se)
1025 __clear_buddies_next(se);
ac53db59
RR
1026
1027 if (cfs_rq->skip == se)
1028 __clear_buddies_skip(se);
a571bbea
PZ
1029}
1030
bf0f6f24 1031static void
371fd7e7 1032dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
bf0f6f24 1033{
a2a2d680
DA
1034 /*
1035 * Update run-time statistics of the 'current'.
1036 */
1037 update_curr(cfs_rq);
1038
19b6a2e3 1039 update_stats_dequeue(cfs_rq, se);
371fd7e7 1040 if (flags & DEQUEUE_SLEEP) {
67e9fb2a 1041#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
1042 if (entity_is_task(se)) {
1043 struct task_struct *tsk = task_of(se);
1044
1045 if (tsk->state & TASK_INTERRUPTIBLE)
41acab88 1046 se->statistics.sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 1047 if (tsk->state & TASK_UNINTERRUPTIBLE)
41acab88 1048 se->statistics.block_start = rq_of(cfs_rq)->clock;
bf0f6f24 1049 }
db36cc7d 1050#endif
67e9fb2a
PZ
1051 }
1052
2002c695 1053 clear_buddies(cfs_rq, se);
4793241b 1054
83b699ed 1055 if (se != cfs_rq->curr)
30cfdcfc 1056 __dequeue_entity(cfs_rq, se);
2069dd75 1057 se->on_rq = 0;
d6b55918 1058 update_cfs_load(cfs_rq, 0);
30cfdcfc 1059 account_entity_dequeue(cfs_rq, se);
88ec22d3
PZ
1060
1061 /*
1062 * Normalize the entity after updating the min_vruntime because the
1063 * update can refer to the ->curr item and we need to reflect this
1064 * movement in our normalized position.
1065 */
371fd7e7 1066 if (!(flags & DEQUEUE_SLEEP))
88ec22d3 1067 se->vruntime -= cfs_rq->min_vruntime;
1e876231
PZ
1068
1069 update_min_vruntime(cfs_rq);
1070 update_cfs_shares(cfs_rq);
bf0f6f24
IM
1071}
1072
1073/*
1074 * Preempt the current task with a newly woken task if needed:
1075 */
7c92e54f 1076static void
2e09bf55 1077check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 1078{
11697830
PZ
1079 unsigned long ideal_runtime, delta_exec;
1080
6d0f0ebd 1081 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 1082 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
a9f3e2b5 1083 if (delta_exec > ideal_runtime) {
bf0f6f24 1084 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5
MG
1085 /*
1086 * The current task ran long enough, ensure it doesn't get
1087 * re-elected due to buddy favours.
1088 */
1089 clear_buddies(cfs_rq, curr);
f685ceac
MG
1090 return;
1091 }
1092
1093 /*
1094 * Ensure that a task that missed wakeup preemption by a
1095 * narrow margin doesn't have to wait for a full slice.
1096 * This also mitigates buddy induced latencies under load.
1097 */
f685ceac
MG
1098 if (delta_exec < sysctl_sched_min_granularity)
1099 return;
1100
1101 if (cfs_rq->nr_running > 1) {
ac53db59 1102 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac
MG
1103 s64 delta = curr->vruntime - se->vruntime;
1104
d7d82944
MG
1105 if (delta < 0)
1106 return;
1107
f685ceac
MG
1108 if (delta > ideal_runtime)
1109 resched_task(rq_of(cfs_rq)->curr);
a9f3e2b5 1110 }
bf0f6f24
IM
1111}
1112
83b699ed 1113static void
8494f412 1114set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 1115{
83b699ed
SV
1116 /* 'current' is not kept within the tree. */
1117 if (se->on_rq) {
1118 /*
1119 * Any task has to be enqueued before it get to execute on
1120 * a CPU. So account for the time it spent waiting on the
1121 * runqueue.
1122 */
1123 update_stats_wait_end(cfs_rq, se);
1124 __dequeue_entity(cfs_rq, se);
1125 }
1126
79303e9e 1127 update_stats_curr_start(cfs_rq, se);
429d43bc 1128 cfs_rq->curr = se;
eba1ed4b
IM
1129#ifdef CONFIG_SCHEDSTATS
1130 /*
1131 * Track our maximum slice length, if the CPU's load is at
1132 * least twice that of our own weight (i.e. dont track it
1133 * when there are only lesser-weight tasks around):
1134 */
495eca49 1135 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
41acab88 1136 se->statistics.slice_max = max(se->statistics.slice_max,
eba1ed4b
IM
1137 se->sum_exec_runtime - se->prev_sum_exec_runtime);
1138 }
1139#endif
4a55b450 1140 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
1141}
1142
3f3a4904
PZ
1143static int
1144wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
1145
ac53db59
RR
1146/*
1147 * Pick the next process, keeping these things in mind, in this order:
1148 * 1) keep things fair between processes/task groups
1149 * 2) pick the "next" process, since someone really wants that to run
1150 * 3) pick the "last" process, for cache locality
1151 * 4) do not run the "skip" process, if something else is available
1152 */
f4b6755f 1153static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
aa2ac252 1154{
ac53db59 1155 struct sched_entity *se = __pick_first_entity(cfs_rq);
f685ceac 1156 struct sched_entity *left = se;
f4b6755f 1157
ac53db59
RR
1158 /*
1159 * Avoid running the skip buddy, if running something else can
1160 * be done without getting too unfair.
1161 */
1162 if (cfs_rq->skip == se) {
1163 struct sched_entity *second = __pick_next_entity(se);
1164 if (second && wakeup_preempt_entity(second, left) < 1)
1165 se = second;
1166 }
aa2ac252 1167
f685ceac
MG
1168 /*
1169 * Prefer last buddy, try to return the CPU to a preempted task.
1170 */
1171 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
1172 se = cfs_rq->last;
1173
ac53db59
RR
1174 /*
1175 * Someone really wants this to run. If it's not unfair, run it.
1176 */
1177 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
1178 se = cfs_rq->next;
1179
f685ceac 1180 clear_buddies(cfs_rq, se);
4793241b
PZ
1181
1182 return se;
aa2ac252
PZ
1183}
1184
ab6cde26 1185static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
1186{
1187 /*
1188 * If still on the runqueue then deactivate_task()
1189 * was not called and update_curr() has to be done:
1190 */
1191 if (prev->on_rq)
b7cc0896 1192 update_curr(cfs_rq);
bf0f6f24 1193
ddc97297 1194 check_spread(cfs_rq, prev);
30cfdcfc 1195 if (prev->on_rq) {
5870db5b 1196 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
1197 /* Put 'current' back into the tree. */
1198 __enqueue_entity(cfs_rq, prev);
1199 }
429d43bc 1200 cfs_rq->curr = NULL;
bf0f6f24
IM
1201}
1202
8f4d37ec
PZ
1203static void
1204entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
bf0f6f24 1205{
bf0f6f24 1206 /*
30cfdcfc 1207 * Update run-time statistics of the 'current'.
bf0f6f24 1208 */
30cfdcfc 1209 update_curr(cfs_rq);
bf0f6f24 1210
43365bd7
PT
1211 /*
1212 * Update share accounting for long-running entities.
1213 */
1214 update_entity_shares_tick(cfs_rq);
1215
8f4d37ec
PZ
1216#ifdef CONFIG_SCHED_HRTICK
1217 /*
1218 * queued ticks are scheduled to match the slice, so don't bother
1219 * validating it and just reschedule.
1220 */
983ed7a6
HH
1221 if (queued) {
1222 resched_task(rq_of(cfs_rq)->curr);
1223 return;
1224 }
8f4d37ec
PZ
1225 /*
1226 * don't let the period tick interfere with the hrtick preemption
1227 */
1228 if (!sched_feat(DOUBLE_TICK) &&
1229 hrtimer_active(&rq_of(cfs_rq)->hrtick_timer))
1230 return;
1231#endif
1232
2c2efaed 1233 if (cfs_rq->nr_running > 1)
2e09bf55 1234 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
1235}
1236
ab84d31e
PT
1237
1238/**************************************************
1239 * CFS bandwidth control machinery
1240 */
1241
1242#ifdef CONFIG_CFS_BANDWIDTH
1243/*
1244 * default period for cfs group bandwidth.
1245 * default: 0.1s, units: nanoseconds
1246 */
1247static inline u64 default_cfs_period(void)
1248{
1249 return 100000000ULL;
1250}
1251#endif
1252
bf0f6f24
IM
1253/**************************************************
1254 * CFS operations on tasks:
1255 */
1256
8f4d37ec
PZ
1257#ifdef CONFIG_SCHED_HRTICK
1258static void hrtick_start_fair(struct rq *rq, struct task_struct *p)
1259{
8f4d37ec
PZ
1260 struct sched_entity *se = &p->se;
1261 struct cfs_rq *cfs_rq = cfs_rq_of(se);
1262
1263 WARN_ON(task_rq(p) != rq);
1264
1265 if (hrtick_enabled(rq) && cfs_rq->nr_running > 1) {
1266 u64 slice = sched_slice(cfs_rq, se);
1267 u64 ran = se->sum_exec_runtime - se->prev_sum_exec_runtime;
1268 s64 delta = slice - ran;
1269
1270 if (delta < 0) {
1271 if (rq->curr == p)
1272 resched_task(p);
1273 return;
1274 }
1275
1276 /*
1277 * Don't schedule slices shorter than 10000ns, that just
1278 * doesn't make sense. Rely on vruntime for fairness.
1279 */
31656519 1280 if (rq->curr != p)
157124c1 1281 delta = max_t(s64, 10000LL, delta);
8f4d37ec 1282
31656519 1283 hrtick_start(rq, delta);
8f4d37ec
PZ
1284 }
1285}
a4c2f00f
PZ
1286
1287/*
1288 * called from enqueue/dequeue and updates the hrtick when the
1289 * current task is from our class and nr_running is low enough
1290 * to matter.
1291 */
1292static void hrtick_update(struct rq *rq)
1293{
1294 struct task_struct *curr = rq->curr;
1295
1296 if (curr->sched_class != &fair_sched_class)
1297 return;
1298
1299 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency)
1300 hrtick_start_fair(rq, curr);
1301}
55e12e5e 1302#else /* !CONFIG_SCHED_HRTICK */
8f4d37ec
PZ
1303static inline void
1304hrtick_start_fair(struct rq *rq, struct task_struct *p)
1305{
1306}
a4c2f00f
PZ
1307
1308static inline void hrtick_update(struct rq *rq)
1309{
1310}
8f4d37ec
PZ
1311#endif
1312
bf0f6f24
IM
1313/*
1314 * The enqueue_task method is called before nr_running is
1315 * increased. Here we update the fair scheduling stats and
1316 * then put the task into the rbtree:
1317 */
ea87bb78 1318static void
371fd7e7 1319enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
1320{
1321 struct cfs_rq *cfs_rq;
62fb1851 1322 struct sched_entity *se = &p->se;
bf0f6f24
IM
1323
1324 for_each_sched_entity(se) {
62fb1851 1325 if (se->on_rq)
bf0f6f24
IM
1326 break;
1327 cfs_rq = cfs_rq_of(se);
88ec22d3 1328 enqueue_entity(cfs_rq, se, flags);
953bfcd1 1329 cfs_rq->h_nr_running++;
88ec22d3 1330 flags = ENQUEUE_WAKEUP;
bf0f6f24 1331 }
8f4d37ec 1332
2069dd75 1333 for_each_sched_entity(se) {
0f317143 1334 cfs_rq = cfs_rq_of(se);
953bfcd1 1335 cfs_rq->h_nr_running++;
2069dd75 1336
d6b55918 1337 update_cfs_load(cfs_rq, 0);
6d5ab293 1338 update_cfs_shares(cfs_rq);
2069dd75
PZ
1339 }
1340
953bfcd1 1341 inc_nr_running(rq);
a4c2f00f 1342 hrtick_update(rq);
bf0f6f24
IM
1343}
1344
2f36825b
VP
1345static void set_next_buddy(struct sched_entity *se);
1346
bf0f6f24
IM
1347/*
1348 * The dequeue_task method is called before nr_running is
1349 * decreased. We remove the task from the rbtree and
1350 * update the fair scheduling stats:
1351 */
371fd7e7 1352static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bf0f6f24
IM
1353{
1354 struct cfs_rq *cfs_rq;
62fb1851 1355 struct sched_entity *se = &p->se;
2f36825b 1356 int task_sleep = flags & DEQUEUE_SLEEP;
bf0f6f24
IM
1357
1358 for_each_sched_entity(se) {
1359 cfs_rq = cfs_rq_of(se);
371fd7e7 1360 dequeue_entity(cfs_rq, se, flags);
953bfcd1 1361 cfs_rq->h_nr_running--;
2069dd75 1362
bf0f6f24 1363 /* Don't dequeue parent if it has other entities besides us */
2f36825b
VP
1364 if (cfs_rq->load.weight) {
1365 /*
1366 * Bias pick_next to pick a task from this cfs_rq, as
1367 * p is sleeping when it is within its sched_slice.
1368 */
1369 if (task_sleep && parent_entity(se))
1370 set_next_buddy(parent_entity(se));
9598c82d
PT
1371
1372 /* avoid re-evaluating load for this entity */
1373 se = parent_entity(se);
bf0f6f24 1374 break;
2f36825b 1375 }
371fd7e7 1376 flags |= DEQUEUE_SLEEP;
bf0f6f24 1377 }
8f4d37ec 1378
2069dd75 1379 for_each_sched_entity(se) {
0f317143 1380 cfs_rq = cfs_rq_of(se);
953bfcd1 1381 cfs_rq->h_nr_running--;
2069dd75 1382
d6b55918 1383 update_cfs_load(cfs_rq, 0);
6d5ab293 1384 update_cfs_shares(cfs_rq);
2069dd75
PZ
1385 }
1386
953bfcd1 1387 dec_nr_running(rq);
a4c2f00f 1388 hrtick_update(rq);
bf0f6f24
IM
1389}
1390
e7693a36 1391#ifdef CONFIG_SMP
098fb9db 1392
74f8e4b2 1393static void task_waking_fair(struct task_struct *p)
88ec22d3
PZ
1394{
1395 struct sched_entity *se = &p->se;
1396 struct cfs_rq *cfs_rq = cfs_rq_of(se);
3fe1698b
PZ
1397 u64 min_vruntime;
1398
1399#ifndef CONFIG_64BIT
1400 u64 min_vruntime_copy;
88ec22d3 1401
3fe1698b
PZ
1402 do {
1403 min_vruntime_copy = cfs_rq->min_vruntime_copy;
1404 smp_rmb();
1405 min_vruntime = cfs_rq->min_vruntime;
1406 } while (min_vruntime != min_vruntime_copy);
1407#else
1408 min_vruntime = cfs_rq->min_vruntime;
1409#endif
88ec22d3 1410
3fe1698b 1411 se->vruntime -= min_vruntime;
88ec22d3
PZ
1412}
1413
bb3469ac 1414#ifdef CONFIG_FAIR_GROUP_SCHED
f5bfb7d9
PZ
1415/*
1416 * effective_load() calculates the load change as seen from the root_task_group
1417 *
1418 * Adding load to a group doesn't make a group heavier, but can cause movement
1419 * of group shares between cpus. Assuming the shares were perfectly aligned one
1420 * can calculate the shift in shares.
f5bfb7d9 1421 */
2069dd75 1422static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
bb3469ac 1423{
4be9daaa 1424 struct sched_entity *se = tg->se[cpu];
f1d239f7
PZ
1425
1426 if (!tg->parent)
1427 return wl;
1428
4be9daaa 1429 for_each_sched_entity(se) {
977dda7c 1430 long lw, w;
4be9daaa 1431
977dda7c
PT
1432 tg = se->my_q->tg;
1433 w = se->my_q->load.weight;
bb3469ac 1434
977dda7c
PT
1435 /* use this cpu's instantaneous contribution */
1436 lw = atomic_read(&tg->load_weight);
1437 lw -= se->my_q->load_contribution;
1438 lw += w + wg;
4be9daaa 1439
977dda7c 1440 wl += w;
940959e9 1441
977dda7c
PT
1442 if (lw > 0 && wl < lw)
1443 wl = (wl * tg->shares) / lw;
1444 else
1445 wl = tg->shares;
940959e9 1446
977dda7c
PT
1447 /* zero point is MIN_SHARES */
1448 if (wl < MIN_SHARES)
1449 wl = MIN_SHARES;
1450 wl -= se->load.weight;
4be9daaa 1451 wg = 0;
4be9daaa 1452 }
bb3469ac 1453
4be9daaa 1454 return wl;
bb3469ac 1455}
4be9daaa 1456
bb3469ac 1457#else
4be9daaa 1458
83378269
PZ
1459static inline unsigned long effective_load(struct task_group *tg, int cpu,
1460 unsigned long wl, unsigned long wg)
4be9daaa 1461{
83378269 1462 return wl;
bb3469ac 1463}
4be9daaa 1464
bb3469ac
PZ
1465#endif
1466
c88d5910 1467static int wake_affine(struct sched_domain *sd, struct task_struct *p, int sync)
098fb9db 1468{
e37b6a7b 1469 s64 this_load, load;
c88d5910 1470 int idx, this_cpu, prev_cpu;
098fb9db 1471 unsigned long tl_per_task;
c88d5910 1472 struct task_group *tg;
83378269 1473 unsigned long weight;
b3137bc8 1474 int balanced;
098fb9db 1475
c88d5910
PZ
1476 idx = sd->wake_idx;
1477 this_cpu = smp_processor_id();
1478 prev_cpu = task_cpu(p);
1479 load = source_load(prev_cpu, idx);
1480 this_load = target_load(this_cpu, idx);
098fb9db 1481
b3137bc8
MG
1482 /*
1483 * If sync wakeup then subtract the (maximum possible)
1484 * effect of the currently running task from the load
1485 * of the current CPU:
1486 */
83378269
PZ
1487 if (sync) {
1488 tg = task_group(current);
1489 weight = current->se.load.weight;
1490
c88d5910 1491 this_load += effective_load(tg, this_cpu, -weight, -weight);
83378269
PZ
1492 load += effective_load(tg, prev_cpu, 0, -weight);
1493 }
b3137bc8 1494
83378269
PZ
1495 tg = task_group(p);
1496 weight = p->se.load.weight;
b3137bc8 1497
71a29aa7
PZ
1498 /*
1499 * In low-load situations, where prev_cpu is idle and this_cpu is idle
c88d5910
PZ
1500 * due to the sync cause above having dropped this_load to 0, we'll
1501 * always have an imbalance, but there's really nothing you can do
1502 * about that, so that's good too.
71a29aa7
PZ
1503 *
1504 * Otherwise check if either cpus are near enough in load to allow this
1505 * task to be woken on this_cpu.
1506 */
e37b6a7b
PT
1507 if (this_load > 0) {
1508 s64 this_eff_load, prev_eff_load;
e51fd5e2
PZ
1509
1510 this_eff_load = 100;
1511 this_eff_load *= power_of(prev_cpu);
1512 this_eff_load *= this_load +
1513 effective_load(tg, this_cpu, weight, weight);
1514
1515 prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2;
1516 prev_eff_load *= power_of(this_cpu);
1517 prev_eff_load *= load + effective_load(tg, prev_cpu, 0, weight);
1518
1519 balanced = this_eff_load <= prev_eff_load;
1520 } else
1521 balanced = true;
b3137bc8 1522
098fb9db 1523 /*
4ae7d5ce
IM
1524 * If the currently running task will sleep within
1525 * a reasonable amount of time then attract this newly
1526 * woken task:
098fb9db 1527 */
2fb7635c
PZ
1528 if (sync && balanced)
1529 return 1;
098fb9db 1530
41acab88 1531 schedstat_inc(p, se.statistics.nr_wakeups_affine_attempts);
098fb9db
IM
1532 tl_per_task = cpu_avg_load_per_task(this_cpu);
1533
c88d5910
PZ
1534 if (balanced ||
1535 (this_load <= load &&
1536 this_load + target_load(prev_cpu, idx) <= tl_per_task)) {
098fb9db
IM
1537 /*
1538 * This domain has SD_WAKE_AFFINE and
1539 * p is cache cold in this domain, and
1540 * there is no bad imbalance.
1541 */
c88d5910 1542 schedstat_inc(sd, ttwu_move_affine);
41acab88 1543 schedstat_inc(p, se.statistics.nr_wakeups_affine);
098fb9db
IM
1544
1545 return 1;
1546 }
1547 return 0;
1548}
1549
aaee1203
PZ
1550/*
1551 * find_idlest_group finds and returns the least busy CPU group within the
1552 * domain.
1553 */
1554static struct sched_group *
78e7ed53 1555find_idlest_group(struct sched_domain *sd, struct task_struct *p,
5158f4e4 1556 int this_cpu, int load_idx)
e7693a36 1557{
b3bd3de6 1558 struct sched_group *idlest = NULL, *group = sd->groups;
aaee1203 1559 unsigned long min_load = ULONG_MAX, this_load = 0;
aaee1203 1560 int imbalance = 100 + (sd->imbalance_pct-100)/2;
e7693a36 1561
aaee1203
PZ
1562 do {
1563 unsigned long load, avg_load;
1564 int local_group;
1565 int i;
e7693a36 1566
aaee1203
PZ
1567 /* Skip over this group if it has no CPUs allowed */
1568 if (!cpumask_intersects(sched_group_cpus(group),
1569 &p->cpus_allowed))
1570 continue;
1571
1572 local_group = cpumask_test_cpu(this_cpu,
1573 sched_group_cpus(group));
1574
1575 /* Tally up the load of all CPUs in the group */
1576 avg_load = 0;
1577
1578 for_each_cpu(i, sched_group_cpus(group)) {
1579 /* Bias balancing toward cpus of our domain */
1580 if (local_group)
1581 load = source_load(i, load_idx);
1582 else
1583 load = target_load(i, load_idx);
1584
1585 avg_load += load;
1586 }
1587
1588 /* Adjust by relative CPU power of the group */
9c3f75cb 1589 avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgp->power;
aaee1203
PZ
1590
1591 if (local_group) {
1592 this_load = avg_load;
aaee1203
PZ
1593 } else if (avg_load < min_load) {
1594 min_load = avg_load;
1595 idlest = group;
1596 }
1597 } while (group = group->next, group != sd->groups);
1598
1599 if (!idlest || 100*this_load < imbalance*min_load)
1600 return NULL;
1601 return idlest;
1602}
1603
1604/*
1605 * find_idlest_cpu - find the idlest cpu among the cpus in group.
1606 */
1607static int
1608find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
1609{
1610 unsigned long load, min_load = ULONG_MAX;
1611 int idlest = -1;
1612 int i;
1613
1614 /* Traverse only the allowed CPUs */
1615 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
1616 load = weighted_cpuload(i);
1617
1618 if (load < min_load || (load == min_load && i == this_cpu)) {
1619 min_load = load;
1620 idlest = i;
e7693a36
GH
1621 }
1622 }
1623
aaee1203
PZ
1624 return idlest;
1625}
e7693a36 1626
a50bde51
PZ
1627/*
1628 * Try and locate an idle CPU in the sched_domain.
1629 */
99bd5e2f 1630static int select_idle_sibling(struct task_struct *p, int target)
a50bde51
PZ
1631{
1632 int cpu = smp_processor_id();
1633 int prev_cpu = task_cpu(p);
99bd5e2f 1634 struct sched_domain *sd;
a50bde51
PZ
1635 int i;
1636
1637 /*
99bd5e2f
SS
1638 * If the task is going to be woken-up on this cpu and if it is
1639 * already idle, then it is the right target.
a50bde51 1640 */
99bd5e2f
SS
1641 if (target == cpu && idle_cpu(cpu))
1642 return cpu;
1643
1644 /*
1645 * If the task is going to be woken-up on the cpu where it previously
1646 * ran and if it is currently idle, then it the right target.
1647 */
1648 if (target == prev_cpu && idle_cpu(prev_cpu))
fe3bcfe1 1649 return prev_cpu;
a50bde51
PZ
1650
1651 /*
99bd5e2f 1652 * Otherwise, iterate the domains and find an elegible idle cpu.
a50bde51 1653 */
dce840a0 1654 rcu_read_lock();
99bd5e2f
SS
1655 for_each_domain(target, sd) {
1656 if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
fe3bcfe1 1657 break;
99bd5e2f
SS
1658
1659 for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) {
1660 if (idle_cpu(i)) {
1661 target = i;
1662 break;
1663 }
a50bde51 1664 }
99bd5e2f
SS
1665
1666 /*
1667 * Lets stop looking for an idle sibling when we reached
1668 * the domain that spans the current cpu and prev_cpu.
1669 */
1670 if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
1671 cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
1672 break;
a50bde51 1673 }
dce840a0 1674 rcu_read_unlock();
a50bde51
PZ
1675
1676 return target;
1677}
1678
aaee1203
PZ
1679/*
1680 * sched_balance_self: balance the current task (running on cpu) in domains
1681 * that have the 'flag' flag set. In practice, this is SD_BALANCE_FORK and
1682 * SD_BALANCE_EXEC.
1683 *
1684 * Balance, ie. select the least loaded group.
1685 *
1686 * Returns the target CPU number, or the same CPU if no balancing is needed.
1687 *
1688 * preempt must be disabled.
1689 */
0017d735 1690static int
7608dec2 1691select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
aaee1203 1692{
29cd8bae 1693 struct sched_domain *tmp, *affine_sd = NULL, *sd = NULL;
c88d5910
PZ
1694 int cpu = smp_processor_id();
1695 int prev_cpu = task_cpu(p);
1696 int new_cpu = cpu;
99bd5e2f 1697 int want_affine = 0;
29cd8bae 1698 int want_sd = 1;
5158f4e4 1699 int sync = wake_flags & WF_SYNC;
c88d5910 1700
0763a660 1701 if (sd_flag & SD_BALANCE_WAKE) {
beac4c7e 1702 if (cpumask_test_cpu(cpu, &p->cpus_allowed))
c88d5910
PZ
1703 want_affine = 1;
1704 new_cpu = prev_cpu;
1705 }
aaee1203 1706
dce840a0 1707 rcu_read_lock();
aaee1203 1708 for_each_domain(cpu, tmp) {
e4f42888
PZ
1709 if (!(tmp->flags & SD_LOAD_BALANCE))
1710 continue;
1711
aaee1203 1712 /*
ae154be1
PZ
1713 * If power savings logic is enabled for a domain, see if we
1714 * are not overloaded, if so, don't balance wider.
aaee1203 1715 */
59abf026 1716 if (tmp->flags & (SD_POWERSAVINGS_BALANCE|SD_PREFER_LOCAL)) {
ae154be1
PZ
1717 unsigned long power = 0;
1718 unsigned long nr_running = 0;
1719 unsigned long capacity;
1720 int i;
1721
1722 for_each_cpu(i, sched_domain_span(tmp)) {
1723 power += power_of(i);
1724 nr_running += cpu_rq(i)->cfs.nr_running;
1725 }
1726
1399fa78 1727 capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE);
ae154be1 1728
59abf026
PZ
1729 if (tmp->flags & SD_POWERSAVINGS_BALANCE)
1730 nr_running /= 2;
1731
1732 if (nr_running < capacity)
29cd8bae 1733 want_sd = 0;
ae154be1 1734 }
aaee1203 1735
fe3bcfe1 1736 /*
99bd5e2f
SS
1737 * If both cpu and prev_cpu are part of this domain,
1738 * cpu is a valid SD_WAKE_AFFINE target.
fe3bcfe1 1739 */
99bd5e2f
SS
1740 if (want_affine && (tmp->flags & SD_WAKE_AFFINE) &&
1741 cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) {
1742 affine_sd = tmp;
1743 want_affine = 0;
c88d5910
PZ
1744 }
1745
29cd8bae
PZ
1746 if (!want_sd && !want_affine)
1747 break;
1748
0763a660 1749 if (!(tmp->flags & sd_flag))
c88d5910
PZ
1750 continue;
1751
29cd8bae
PZ
1752 if (want_sd)
1753 sd = tmp;
1754 }
1755
8b911acd 1756 if (affine_sd) {
99bd5e2f 1757 if (cpu == prev_cpu || wake_affine(affine_sd, p, sync))
dce840a0
PZ
1758 prev_cpu = cpu;
1759
1760 new_cpu = select_idle_sibling(p, prev_cpu);
1761 goto unlock;
8b911acd 1762 }
e7693a36 1763
aaee1203 1764 while (sd) {
5158f4e4 1765 int load_idx = sd->forkexec_idx;
aaee1203 1766 struct sched_group *group;
c88d5910 1767 int weight;
098fb9db 1768
0763a660 1769 if (!(sd->flags & sd_flag)) {
aaee1203
PZ
1770 sd = sd->child;
1771 continue;
1772 }
098fb9db 1773
5158f4e4
PZ
1774 if (sd_flag & SD_BALANCE_WAKE)
1775 load_idx = sd->wake_idx;
098fb9db 1776
5158f4e4 1777 group = find_idlest_group(sd, p, cpu, load_idx);
aaee1203
PZ
1778 if (!group) {
1779 sd = sd->child;
1780 continue;
1781 }
4ae7d5ce 1782
d7c33c49 1783 new_cpu = find_idlest_cpu(group, p, cpu);
aaee1203
PZ
1784 if (new_cpu == -1 || new_cpu == cpu) {
1785 /* Now try balancing at a lower domain level of cpu */
1786 sd = sd->child;
1787 continue;
e7693a36 1788 }
aaee1203
PZ
1789
1790 /* Now try balancing at a lower domain level of new_cpu */
1791 cpu = new_cpu;
669c55e9 1792 weight = sd->span_weight;
aaee1203
PZ
1793 sd = NULL;
1794 for_each_domain(cpu, tmp) {
669c55e9 1795 if (weight <= tmp->span_weight)
aaee1203 1796 break;
0763a660 1797 if (tmp->flags & sd_flag)
aaee1203
PZ
1798 sd = tmp;
1799 }
1800 /* while loop will break here if sd == NULL */
e7693a36 1801 }
dce840a0
PZ
1802unlock:
1803 rcu_read_unlock();
e7693a36 1804
c88d5910 1805 return new_cpu;
e7693a36
GH
1806}
1807#endif /* CONFIG_SMP */
1808
e52fb7c0
PZ
1809static unsigned long
1810wakeup_gran(struct sched_entity *curr, struct sched_entity *se)
0bbd3336
PZ
1811{
1812 unsigned long gran = sysctl_sched_wakeup_granularity;
1813
1814 /*
e52fb7c0
PZ
1815 * Since its curr running now, convert the gran from real-time
1816 * to virtual-time in his units.
13814d42
MG
1817 *
1818 * By using 'se' instead of 'curr' we penalize light tasks, so
1819 * they get preempted easier. That is, if 'se' < 'curr' then
1820 * the resulting gran will be larger, therefore penalizing the
1821 * lighter, if otoh 'se' > 'curr' then the resulting gran will
1822 * be smaller, again penalizing the lighter task.
1823 *
1824 * This is especially important for buddies when the leftmost
1825 * task is higher priority than the buddy.
0bbd3336 1826 */
f4ad9bd2 1827 return calc_delta_fair(gran, se);
0bbd3336
PZ
1828}
1829
464b7527
PZ
1830/*
1831 * Should 'se' preempt 'curr'.
1832 *
1833 * |s1
1834 * |s2
1835 * |s3
1836 * g
1837 * |<--->|c
1838 *
1839 * w(c, s1) = -1
1840 * w(c, s2) = 0
1841 * w(c, s3) = 1
1842 *
1843 */
1844static int
1845wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1846{
1847 s64 gran, vdiff = curr->vruntime - se->vruntime;
1848
1849 if (vdiff <= 0)
1850 return -1;
1851
e52fb7c0 1852 gran = wakeup_gran(curr, se);
464b7527
PZ
1853 if (vdiff > gran)
1854 return 1;
1855
1856 return 0;
1857}
1858
02479099
PZ
1859static void set_last_buddy(struct sched_entity *se)
1860{
69c80f3e
VP
1861 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
1862 return;
1863
1864 for_each_sched_entity(se)
1865 cfs_rq_of(se)->last = se;
02479099
PZ
1866}
1867
1868static void set_next_buddy(struct sched_entity *se)
1869{
69c80f3e
VP
1870 if (entity_is_task(se) && unlikely(task_of(se)->policy == SCHED_IDLE))
1871 return;
1872
1873 for_each_sched_entity(se)
1874 cfs_rq_of(se)->next = se;
02479099
PZ
1875}
1876
ac53db59
RR
1877static void set_skip_buddy(struct sched_entity *se)
1878{
69c80f3e
VP
1879 for_each_sched_entity(se)
1880 cfs_rq_of(se)->skip = se;
ac53db59
RR
1881}
1882
bf0f6f24
IM
1883/*
1884 * Preempt the current task with a newly woken task if needed:
1885 */
5a9b86f6 1886static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_flags)
bf0f6f24
IM
1887{
1888 struct task_struct *curr = rq->curr;
8651a86c 1889 struct sched_entity *se = &curr->se, *pse = &p->se;
03e89e45 1890 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
f685ceac 1891 int scale = cfs_rq->nr_running >= sched_nr_latency;
2f36825b 1892 int next_buddy_marked = 0;
bf0f6f24 1893
4ae7d5ce
IM
1894 if (unlikely(se == pse))
1895 return;
1896
2f36825b 1897 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) {
3cb63d52 1898 set_next_buddy(pse);
2f36825b
VP
1899 next_buddy_marked = 1;
1900 }
57fdc26d 1901
aec0a514
BR
1902 /*
1903 * We can come here with TIF_NEED_RESCHED already set from new task
1904 * wake up path.
1905 */
1906 if (test_tsk_need_resched(curr))
1907 return;
1908
a2f5c9ab
DH
1909 /* Idle tasks are by definition preempted by non-idle tasks. */
1910 if (unlikely(curr->policy == SCHED_IDLE) &&
1911 likely(p->policy != SCHED_IDLE))
1912 goto preempt;
1913
91c234b4 1914 /*
a2f5c9ab
DH
1915 * Batch and idle tasks do not preempt non-idle tasks (their preemption
1916 * is driven by the tick):
91c234b4 1917 */
6bc912b7 1918 if (unlikely(p->policy != SCHED_NORMAL))
91c234b4 1919 return;
bf0f6f24 1920
464b7527 1921 find_matching_se(&se, &pse);
9bbd7374 1922 update_curr(cfs_rq_of(se));
002f128b 1923 BUG_ON(!pse);
2f36825b
VP
1924 if (wakeup_preempt_entity(se, pse) == 1) {
1925 /*
1926 * Bias pick_next to pick the sched entity that is
1927 * triggering this preemption.
1928 */
1929 if (!next_buddy_marked)
1930 set_next_buddy(pse);
3a7e73a2 1931 goto preempt;
2f36825b 1932 }
464b7527 1933
3a7e73a2 1934 return;
a65ac745 1935
3a7e73a2
PZ
1936preempt:
1937 resched_task(curr);
1938 /*
1939 * Only set the backward buddy when the current task is still
1940 * on the rq. This can happen when a wakeup gets interleaved
1941 * with schedule on the ->pre_schedule() or idle_balance()
1942 * point, either of which can * drop the rq lock.
1943 *
1944 * Also, during early boot the idle thread is in the fair class,
1945 * for obvious reasons its a bad idea to schedule back to it.
1946 */
1947 if (unlikely(!se->on_rq || curr == rq->idle))
1948 return;
1949
1950 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1951 set_last_buddy(se);
bf0f6f24
IM
1952}
1953
fb8d4724 1954static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24 1955{
8f4d37ec 1956 struct task_struct *p;
bf0f6f24
IM
1957 struct cfs_rq *cfs_rq = &rq->cfs;
1958 struct sched_entity *se;
1959
36ace27e 1960 if (!cfs_rq->nr_running)
bf0f6f24
IM
1961 return NULL;
1962
1963 do {
9948f4b2 1964 se = pick_next_entity(cfs_rq);
f4b6755f 1965 set_next_entity(cfs_rq, se);
bf0f6f24
IM
1966 cfs_rq = group_cfs_rq(se);
1967 } while (cfs_rq);
1968
8f4d37ec
PZ
1969 p = task_of(se);
1970 hrtick_start_fair(rq, p);
1971
1972 return p;
bf0f6f24
IM
1973}
1974
1975/*
1976 * Account for a descheduled task:
1977 */
31ee529c 1978static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
1979{
1980 struct sched_entity *se = &prev->se;
1981 struct cfs_rq *cfs_rq;
1982
1983 for_each_sched_entity(se) {
1984 cfs_rq = cfs_rq_of(se);
ab6cde26 1985 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
1986 }
1987}
1988
ac53db59
RR
1989/*
1990 * sched_yield() is very simple
1991 *
1992 * The magic of dealing with the ->skip buddy is in pick_next_entity.
1993 */
1994static void yield_task_fair(struct rq *rq)
1995{
1996 struct task_struct *curr = rq->curr;
1997 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1998 struct sched_entity *se = &curr->se;
1999
2000 /*
2001 * Are we the only task in the tree?
2002 */
2003 if (unlikely(rq->nr_running == 1))
2004 return;
2005
2006 clear_buddies(cfs_rq, se);
2007
2008 if (curr->policy != SCHED_BATCH) {
2009 update_rq_clock(rq);
2010 /*
2011 * Update run-time statistics of the 'current'.
2012 */
2013 update_curr(cfs_rq);
2014 }
2015
2016 set_skip_buddy(se);
2017}
2018
d95f4122
MG
2019static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt)
2020{
2021 struct sched_entity *se = &p->se;
2022
2023 if (!se->on_rq)
2024 return false;
2025
2026 /* Tell the scheduler that we'd really like pse to run next. */
2027 set_next_buddy(se);
2028
d95f4122
MG
2029 yield_task_fair(rq);
2030
2031 return true;
2032}
2033
681f3e68 2034#ifdef CONFIG_SMP
bf0f6f24
IM
2035/**************************************************
2036 * Fair scheduling class load-balancing methods:
2037 */
2038
1e3c88bd
PZ
2039/*
2040 * pull_task - move a task from a remote runqueue to the local runqueue.
2041 * Both runqueues must be locked.
2042 */
2043static void pull_task(struct rq *src_rq, struct task_struct *p,
2044 struct rq *this_rq, int this_cpu)
2045{
2046 deactivate_task(src_rq, p, 0);
2047 set_task_cpu(p, this_cpu);
2048 activate_task(this_rq, p, 0);
2049 check_preempt_curr(this_rq, p, 0);
2050}
2051
2052/*
2053 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
2054 */
2055static
2056int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
2057 struct sched_domain *sd, enum cpu_idle_type idle,
2058 int *all_pinned)
2059{
2060 int tsk_cache_hot = 0;
2061 /*
2062 * We do not migrate tasks that are:
2063 * 1) running (obviously), or
2064 * 2) cannot be migrated to this CPU due to cpus_allowed, or
2065 * 3) are cache-hot on their current CPU.
2066 */
2067 if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) {
41acab88 2068 schedstat_inc(p, se.statistics.nr_failed_migrations_affine);
1e3c88bd
PZ
2069 return 0;
2070 }
2071 *all_pinned = 0;
2072
2073 if (task_running(rq, p)) {
41acab88 2074 schedstat_inc(p, se.statistics.nr_failed_migrations_running);
1e3c88bd
PZ
2075 return 0;
2076 }
2077
2078 /*
2079 * Aggressive migration if:
2080 * 1) task is cache cold, or
2081 * 2) too many balance attempts have failed.
2082 */
2083
305e6835 2084 tsk_cache_hot = task_hot(p, rq->clock_task, sd);
1e3c88bd
PZ
2085 if (!tsk_cache_hot ||
2086 sd->nr_balance_failed > sd->cache_nice_tries) {
2087#ifdef CONFIG_SCHEDSTATS
2088 if (tsk_cache_hot) {
2089 schedstat_inc(sd, lb_hot_gained[idle]);
41acab88 2090 schedstat_inc(p, se.statistics.nr_forced_migrations);
1e3c88bd
PZ
2091 }
2092#endif
2093 return 1;
2094 }
2095
2096 if (tsk_cache_hot) {
41acab88 2097 schedstat_inc(p, se.statistics.nr_failed_migrations_hot);
1e3c88bd
PZ
2098 return 0;
2099 }
2100 return 1;
2101}
2102
897c395f
PZ
2103/*
2104 * move_one_task tries to move exactly one task from busiest to this_rq, as
2105 * part of active balancing operations within "domain".
2106 * Returns 1 if successful and 0 otherwise.
2107 *
2108 * Called with both runqueues locked.
2109 */
2110static int
2111move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest,
2112 struct sched_domain *sd, enum cpu_idle_type idle)
2113{
2114 struct task_struct *p, *n;
2115 struct cfs_rq *cfs_rq;
2116 int pinned = 0;
2117
2118 for_each_leaf_cfs_rq(busiest, cfs_rq) {
2119 list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) {
2120
2121 if (!can_migrate_task(p, busiest, this_cpu,
2122 sd, idle, &pinned))
2123 continue;
2124
2125 pull_task(busiest, p, this_rq, this_cpu);
2126 /*
2127 * Right now, this is only the second place pull_task()
2128 * is called, so we can safely collect pull_task()
2129 * stats here rather than inside pull_task().
2130 */
2131 schedstat_inc(sd, lb_gained[idle]);
2132 return 1;
2133 }
2134 }
2135
2136 return 0;
2137}
2138
1e3c88bd
PZ
2139static unsigned long
2140balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2141 unsigned long max_load_move, struct sched_domain *sd,
2142 enum cpu_idle_type idle, int *all_pinned,
931aeeda 2143 struct cfs_rq *busiest_cfs_rq)
1e3c88bd 2144{
b30aef17 2145 int loops = 0, pulled = 0;
1e3c88bd 2146 long rem_load_move = max_load_move;
ee00e66f 2147 struct task_struct *p, *n;
1e3c88bd
PZ
2148
2149 if (max_load_move == 0)
2150 goto out;
2151
ee00e66f
PZ
2152 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2153 if (loops++ > sysctl_sched_nr_migrate)
2154 break;
1e3c88bd 2155
ee00e66f 2156 if ((p->se.load.weight >> 1) > rem_load_move ||
b30aef17
KC
2157 !can_migrate_task(p, busiest, this_cpu, sd, idle,
2158 all_pinned))
ee00e66f 2159 continue;
1e3c88bd 2160
ee00e66f
PZ
2161 pull_task(busiest, p, this_rq, this_cpu);
2162 pulled++;
2163 rem_load_move -= p->se.load.weight;
1e3c88bd
PZ
2164
2165#ifdef CONFIG_PREEMPT
ee00e66f
PZ
2166 /*
2167 * NEWIDLE balancing is a source of latency, so preemptible
2168 * kernels will stop after the first task is pulled to minimize
2169 * the critical section.
2170 */
2171 if (idle == CPU_NEWLY_IDLE)
2172 break;
1e3c88bd
PZ
2173#endif
2174
ee00e66f
PZ
2175 /*
2176 * We only want to steal up to the prescribed amount of
2177 * weighted load.
2178 */
2179 if (rem_load_move <= 0)
2180 break;
1e3c88bd
PZ
2181 }
2182out:
2183 /*
2184 * Right now, this is one of only two places pull_task() is called,
2185 * so we can safely collect pull_task() stats here rather than
2186 * inside pull_task().
2187 */
2188 schedstat_add(sd, lb_gained[idle], pulled);
2189
1e3c88bd
PZ
2190 return max_load_move - rem_load_move;
2191}
2192
230059de 2193#ifdef CONFIG_FAIR_GROUP_SCHED
9e3081ca
PZ
2194/*
2195 * update tg->load_weight by folding this cpu's load_avg
2196 */
67e86250 2197static int update_shares_cpu(struct task_group *tg, int cpu)
9e3081ca
PZ
2198{
2199 struct cfs_rq *cfs_rq;
2200 unsigned long flags;
2201 struct rq *rq;
9e3081ca
PZ
2202
2203 if (!tg->se[cpu])
2204 return 0;
2205
2206 rq = cpu_rq(cpu);
2207 cfs_rq = tg->cfs_rq[cpu];
2208
2209 raw_spin_lock_irqsave(&rq->lock, flags);
2210
2211 update_rq_clock(rq);
d6b55918 2212 update_cfs_load(cfs_rq, 1);
9e3081ca
PZ
2213
2214 /*
2215 * We need to update shares after updating tg->load_weight in
2216 * order to adjust the weight of groups with long running tasks.
2217 */
6d5ab293 2218 update_cfs_shares(cfs_rq);
9e3081ca
PZ
2219
2220 raw_spin_unlock_irqrestore(&rq->lock, flags);
2221
2222 return 0;
2223}
2224
2225static void update_shares(int cpu)
2226{
2227 struct cfs_rq *cfs_rq;
2228 struct rq *rq = cpu_rq(cpu);
2229
2230 rcu_read_lock();
9763b67f
PZ
2231 /*
2232 * Iterates the task_group tree in a bottom up fashion, see
2233 * list_add_leaf_cfs_rq() for details.
2234 */
67e86250
PT
2235 for_each_leaf_cfs_rq(rq, cfs_rq)
2236 update_shares_cpu(cfs_rq->tg, cpu);
9e3081ca
PZ
2237 rcu_read_unlock();
2238}
2239
9763b67f
PZ
2240/*
2241 * Compute the cpu's hierarchical load factor for each task group.
2242 * This needs to be done in a top-down fashion because the load of a child
2243 * group is a fraction of its parents load.
2244 */
2245static int tg_load_down(struct task_group *tg, void *data)
2246{
2247 unsigned long load;
2248 long cpu = (long)data;
2249
2250 if (!tg->parent) {
2251 load = cpu_rq(cpu)->load.weight;
2252 } else {
2253 load = tg->parent->cfs_rq[cpu]->h_load;
2254 load *= tg->se[cpu]->load.weight;
2255 load /= tg->parent->cfs_rq[cpu]->load.weight + 1;
2256 }
2257
2258 tg->cfs_rq[cpu]->h_load = load;
2259
2260 return 0;
2261}
2262
2263static void update_h_load(long cpu)
2264{
2265 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
2266}
2267
230059de
PZ
2268static unsigned long
2269load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2270 unsigned long max_load_move,
2271 struct sched_domain *sd, enum cpu_idle_type idle,
931aeeda 2272 int *all_pinned)
230059de
PZ
2273{
2274 long rem_load_move = max_load_move;
9763b67f 2275 struct cfs_rq *busiest_cfs_rq;
230059de
PZ
2276
2277 rcu_read_lock();
9763b67f 2278 update_h_load(cpu_of(busiest));
230059de 2279
9763b67f 2280 for_each_leaf_cfs_rq(busiest, busiest_cfs_rq) {
230059de
PZ
2281 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
2282 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
2283 u64 rem_load, moved_load;
2284
2285 /*
2286 * empty group
2287 */
2288 if (!busiest_cfs_rq->task_weight)
2289 continue;
2290
2291 rem_load = (u64)rem_load_move * busiest_weight;
2292 rem_load = div_u64(rem_load, busiest_h_load + 1);
2293
2294 moved_load = balance_tasks(this_rq, this_cpu, busiest,
931aeeda 2295 rem_load, sd, idle, all_pinned,
230059de
PZ
2296 busiest_cfs_rq);
2297
2298 if (!moved_load)
2299 continue;
2300
2301 moved_load *= busiest_h_load;
2302 moved_load = div_u64(moved_load, busiest_weight + 1);
2303
2304 rem_load_move -= moved_load;
2305 if (rem_load_move < 0)
2306 break;
2307 }
2308 rcu_read_unlock();
2309
2310 return max_load_move - rem_load_move;
2311}
2312#else
9e3081ca
PZ
2313static inline void update_shares(int cpu)
2314{
2315}
2316
230059de
PZ
2317static unsigned long
2318load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
2319 unsigned long max_load_move,
2320 struct sched_domain *sd, enum cpu_idle_type idle,
931aeeda 2321 int *all_pinned)
230059de
PZ
2322{
2323 return balance_tasks(this_rq, this_cpu, busiest,
2324 max_load_move, sd, idle, all_pinned,
931aeeda 2325 &busiest->cfs);
230059de
PZ
2326}
2327#endif
2328
1e3c88bd
PZ
2329/*
2330 * move_tasks tries to move up to max_load_move weighted load from busiest to
2331 * this_rq, as part of a balancing operation within domain "sd".
2332 * Returns 1 if successful and 0 otherwise.
2333 *
2334 * Called with both runqueues locked.
2335 */
2336static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2337 unsigned long max_load_move,
2338 struct sched_domain *sd, enum cpu_idle_type idle,
2339 int *all_pinned)
2340{
3d45fd80 2341 unsigned long total_load_moved = 0, load_moved;
1e3c88bd
PZ
2342
2343 do {
3d45fd80 2344 load_moved = load_balance_fair(this_rq, this_cpu, busiest,
1e3c88bd 2345 max_load_move - total_load_moved,
931aeeda 2346 sd, idle, all_pinned);
3d45fd80
PZ
2347
2348 total_load_moved += load_moved;
1e3c88bd
PZ
2349
2350#ifdef CONFIG_PREEMPT
2351 /*
2352 * NEWIDLE balancing is a source of latency, so preemptible
2353 * kernels will stop after the first task is pulled to minimize
2354 * the critical section.
2355 */
2356 if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
2357 break;
baa8c110
PZ
2358
2359 if (raw_spin_is_contended(&this_rq->lock) ||
2360 raw_spin_is_contended(&busiest->lock))
2361 break;
1e3c88bd 2362#endif
3d45fd80 2363 } while (load_moved && max_load_move > total_load_moved);
1e3c88bd
PZ
2364
2365 return total_load_moved > 0;
2366}
2367
1e3c88bd
PZ
2368/********** Helpers for find_busiest_group ************************/
2369/*
2370 * sd_lb_stats - Structure to store the statistics of a sched_domain
2371 * during load balancing.
2372 */
2373struct sd_lb_stats {
2374 struct sched_group *busiest; /* Busiest group in this sd */
2375 struct sched_group *this; /* Local group in this sd */
2376 unsigned long total_load; /* Total load of all groups in sd */
2377 unsigned long total_pwr; /* Total power of all groups in sd */
2378 unsigned long avg_load; /* Average load across all groups in sd */
2379
2380 /** Statistics of this group */
2381 unsigned long this_load;
2382 unsigned long this_load_per_task;
2383 unsigned long this_nr_running;
fab47622 2384 unsigned long this_has_capacity;
aae6d3dd 2385 unsigned int this_idle_cpus;
1e3c88bd
PZ
2386
2387 /* Statistics of the busiest group */
aae6d3dd 2388 unsigned int busiest_idle_cpus;
1e3c88bd
PZ
2389 unsigned long max_load;
2390 unsigned long busiest_load_per_task;
2391 unsigned long busiest_nr_running;
dd5feea1 2392 unsigned long busiest_group_capacity;
fab47622 2393 unsigned long busiest_has_capacity;
aae6d3dd 2394 unsigned int busiest_group_weight;
1e3c88bd
PZ
2395
2396 int group_imb; /* Is there imbalance in this sd */
2397#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2398 int power_savings_balance; /* Is powersave balance needed for this sd */
2399 struct sched_group *group_min; /* Least loaded group in sd */
2400 struct sched_group *group_leader; /* Group which relieves group_min */
2401 unsigned long min_load_per_task; /* load_per_task in group_min */
2402 unsigned long leader_nr_running; /* Nr running of group_leader */
2403 unsigned long min_nr_running; /* Nr running of group_min */
2404#endif
2405};
2406
2407/*
2408 * sg_lb_stats - stats of a sched_group required for load_balancing
2409 */
2410struct sg_lb_stats {
2411 unsigned long avg_load; /*Avg load across the CPUs of the group */
2412 unsigned long group_load; /* Total load over the CPUs of the group */
2413 unsigned long sum_nr_running; /* Nr tasks running in the group */
2414 unsigned long sum_weighted_load; /* Weighted load of group's tasks */
2415 unsigned long group_capacity;
aae6d3dd
SS
2416 unsigned long idle_cpus;
2417 unsigned long group_weight;
1e3c88bd 2418 int group_imb; /* Is there an imbalance in the group ? */
fab47622 2419 int group_has_capacity; /* Is there extra capacity in the group? */
1e3c88bd
PZ
2420};
2421
2422/**
2423 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
2424 * @group: The group whose first cpu is to be returned.
2425 */
2426static inline unsigned int group_first_cpu(struct sched_group *group)
2427{
2428 return cpumask_first(sched_group_cpus(group));
2429}
2430
2431/**
2432 * get_sd_load_idx - Obtain the load index for a given sched domain.
2433 * @sd: The sched_domain whose load_idx is to be obtained.
2434 * @idle: The Idle status of the CPU for whose sd load_icx is obtained.
2435 */
2436static inline int get_sd_load_idx(struct sched_domain *sd,
2437 enum cpu_idle_type idle)
2438{
2439 int load_idx;
2440
2441 switch (idle) {
2442 case CPU_NOT_IDLE:
2443 load_idx = sd->busy_idx;
2444 break;
2445
2446 case CPU_NEWLY_IDLE:
2447 load_idx = sd->newidle_idx;
2448 break;
2449 default:
2450 load_idx = sd->idle_idx;
2451 break;
2452 }
2453
2454 return load_idx;
2455}
2456
2457
2458#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
2459/**
2460 * init_sd_power_savings_stats - Initialize power savings statistics for
2461 * the given sched_domain, during load balancing.
2462 *
2463 * @sd: Sched domain whose power-savings statistics are to be initialized.
2464 * @sds: Variable containing the statistics for sd.
2465 * @idle: Idle status of the CPU at which we're performing load-balancing.
2466 */
2467static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2468 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2469{
2470 /*
2471 * Busy processors will not participate in power savings
2472 * balance.
2473 */
2474 if (idle == CPU_NOT_IDLE || !(sd->flags & SD_POWERSAVINGS_BALANCE))
2475 sds->power_savings_balance = 0;
2476 else {
2477 sds->power_savings_balance = 1;
2478 sds->min_nr_running = ULONG_MAX;
2479 sds->leader_nr_running = 0;
2480 }
2481}
2482
2483/**
2484 * update_sd_power_savings_stats - Update the power saving stats for a
2485 * sched_domain while performing load balancing.
2486 *
2487 * @group: sched_group belonging to the sched_domain under consideration.
2488 * @sds: Variable containing the statistics of the sched_domain
2489 * @local_group: Does group contain the CPU for which we're performing
2490 * load balancing ?
2491 * @sgs: Variable containing the statistics of the group.
2492 */
2493static inline void update_sd_power_savings_stats(struct sched_group *group,
2494 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2495{
2496
2497 if (!sds->power_savings_balance)
2498 return;
2499
2500 /*
2501 * If the local group is idle or completely loaded
2502 * no need to do power savings balance at this domain
2503 */
2504 if (local_group && (sds->this_nr_running >= sgs->group_capacity ||
2505 !sds->this_nr_running))
2506 sds->power_savings_balance = 0;
2507
2508 /*
2509 * If a group is already running at full capacity or idle,
2510 * don't include that group in power savings calculations
2511 */
2512 if (!sds->power_savings_balance ||
2513 sgs->sum_nr_running >= sgs->group_capacity ||
2514 !sgs->sum_nr_running)
2515 return;
2516
2517 /*
2518 * Calculate the group which has the least non-idle load.
2519 * This is the group from where we need to pick up the load
2520 * for saving power
2521 */
2522 if ((sgs->sum_nr_running < sds->min_nr_running) ||
2523 (sgs->sum_nr_running == sds->min_nr_running &&
2524 group_first_cpu(group) > group_first_cpu(sds->group_min))) {
2525 sds->group_min = group;
2526 sds->min_nr_running = sgs->sum_nr_running;
2527 sds->min_load_per_task = sgs->sum_weighted_load /
2528 sgs->sum_nr_running;
2529 }
2530
2531 /*
2532 * Calculate the group which is almost near its
2533 * capacity but still has some space to pick up some load
2534 * from other group and save more power
2535 */
2536 if (sgs->sum_nr_running + 1 > sgs->group_capacity)
2537 return;
2538
2539 if (sgs->sum_nr_running > sds->leader_nr_running ||
2540 (sgs->sum_nr_running == sds->leader_nr_running &&
2541 group_first_cpu(group) < group_first_cpu(sds->group_leader))) {
2542 sds->group_leader = group;
2543 sds->leader_nr_running = sgs->sum_nr_running;
2544 }
2545}
2546
2547/**
2548 * check_power_save_busiest_group - see if there is potential for some power-savings balance
2549 * @sds: Variable containing the statistics of the sched_domain
2550 * under consideration.
2551 * @this_cpu: Cpu at which we're currently performing load-balancing.
2552 * @imbalance: Variable to store the imbalance.
2553 *
2554 * Description:
2555 * Check if we have potential to perform some power-savings balance.
2556 * If yes, set the busiest group to be the least loaded group in the
2557 * sched_domain, so that it's CPUs can be put to idle.
2558 *
2559 * Returns 1 if there is potential to perform power-savings balance.
2560 * Else returns 0.
2561 */
2562static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2563 int this_cpu, unsigned long *imbalance)
2564{
2565 if (!sds->power_savings_balance)
2566 return 0;
2567
2568 if (sds->this != sds->group_leader ||
2569 sds->group_leader == sds->group_min)
2570 return 0;
2571
2572 *imbalance = sds->min_load_per_task;
2573 sds->busiest = sds->group_min;
2574
2575 return 1;
2576
2577}
2578#else /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2579static inline void init_sd_power_savings_stats(struct sched_domain *sd,
2580 struct sd_lb_stats *sds, enum cpu_idle_type idle)
2581{
2582 return;
2583}
2584
2585static inline void update_sd_power_savings_stats(struct sched_group *group,
2586 struct sd_lb_stats *sds, int local_group, struct sg_lb_stats *sgs)
2587{
2588 return;
2589}
2590
2591static inline int check_power_save_busiest_group(struct sd_lb_stats *sds,
2592 int this_cpu, unsigned long *imbalance)
2593{
2594 return 0;
2595}
2596#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
2597
2598
2599unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
2600{
1399fa78 2601 return SCHED_POWER_SCALE;
1e3c88bd
PZ
2602}
2603
2604unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
2605{
2606 return default_scale_freq_power(sd, cpu);
2607}
2608
2609unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu)
2610{
669c55e9 2611 unsigned long weight = sd->span_weight;
1e3c88bd
PZ
2612 unsigned long smt_gain = sd->smt_gain;
2613
2614 smt_gain /= weight;
2615
2616 return smt_gain;
2617}
2618
2619unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
2620{
2621 return default_scale_smt_power(sd, cpu);
2622}
2623
2624unsigned long scale_rt_power(int cpu)
2625{
2626 struct rq *rq = cpu_rq(cpu);
2627 u64 total, available;
2628
1e3c88bd 2629 total = sched_avg_period() + (rq->clock - rq->age_stamp);
aa483808
VP
2630
2631 if (unlikely(total < rq->rt_avg)) {
2632 /* Ensures that power won't end up being negative */
2633 available = 0;
2634 } else {
2635 available = total - rq->rt_avg;
2636 }
1e3c88bd 2637
1399fa78
NR
2638 if (unlikely((s64)total < SCHED_POWER_SCALE))
2639 total = SCHED_POWER_SCALE;
1e3c88bd 2640
1399fa78 2641 total >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
2642
2643 return div_u64(available, total);
2644}
2645
2646static void update_cpu_power(struct sched_domain *sd, int cpu)
2647{
669c55e9 2648 unsigned long weight = sd->span_weight;
1399fa78 2649 unsigned long power = SCHED_POWER_SCALE;
1e3c88bd
PZ
2650 struct sched_group *sdg = sd->groups;
2651
1e3c88bd
PZ
2652 if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) {
2653 if (sched_feat(ARCH_POWER))
2654 power *= arch_scale_smt_power(sd, cpu);
2655 else
2656 power *= default_scale_smt_power(sd, cpu);
2657
1399fa78 2658 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
2659 }
2660
9c3f75cb 2661 sdg->sgp->power_orig = power;
9d5efe05
SV
2662
2663 if (sched_feat(ARCH_POWER))
2664 power *= arch_scale_freq_power(sd, cpu);
2665 else
2666 power *= default_scale_freq_power(sd, cpu);
2667
1399fa78 2668 power >>= SCHED_POWER_SHIFT;
9d5efe05 2669
1e3c88bd 2670 power *= scale_rt_power(cpu);
1399fa78 2671 power >>= SCHED_POWER_SHIFT;
1e3c88bd
PZ
2672
2673 if (!power)
2674 power = 1;
2675
e51fd5e2 2676 cpu_rq(cpu)->cpu_power = power;
9c3f75cb 2677 sdg->sgp->power = power;
1e3c88bd
PZ
2678}
2679
2680static void update_group_power(struct sched_domain *sd, int cpu)
2681{
2682 struct sched_domain *child = sd->child;
2683 struct sched_group *group, *sdg = sd->groups;
2684 unsigned long power;
2685
2686 if (!child) {
2687 update_cpu_power(sd, cpu);
2688 return;
2689 }
2690
2691 power = 0;
2692
2693 group = child->groups;
2694 do {
9c3f75cb 2695 power += group->sgp->power;
1e3c88bd
PZ
2696 group = group->next;
2697 } while (group != child->groups);
2698
9c3f75cb 2699 sdg->sgp->power = power;
1e3c88bd
PZ
2700}
2701
9d5efe05
SV
2702/*
2703 * Try and fix up capacity for tiny siblings, this is needed when
2704 * things like SD_ASYM_PACKING need f_b_g to select another sibling
2705 * which on its own isn't powerful enough.
2706 *
2707 * See update_sd_pick_busiest() and check_asym_packing().
2708 */
2709static inline int
2710fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
2711{
2712 /*
1399fa78 2713 * Only siblings can have significantly less than SCHED_POWER_SCALE
9d5efe05 2714 */
a6c75f2f 2715 if (!(sd->flags & SD_SHARE_CPUPOWER))
9d5efe05
SV
2716 return 0;
2717
2718 /*
2719 * If ~90% of the cpu_power is still there, we're good.
2720 */
9c3f75cb 2721 if (group->sgp->power * 32 > group->sgp->power_orig * 29)
9d5efe05
SV
2722 return 1;
2723
2724 return 0;
2725}
2726
1e3c88bd
PZ
2727/**
2728 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
2729 * @sd: The sched_domain whose statistics are to be updated.
2730 * @group: sched_group whose statistics are to be updated.
2731 * @this_cpu: Cpu for which load balance is currently performed.
2732 * @idle: Idle status of this_cpu
2733 * @load_idx: Load index of sched_domain of this_cpu for load calc.
1e3c88bd
PZ
2734 * @local_group: Does group contain this_cpu.
2735 * @cpus: Set of cpus considered for load balancing.
2736 * @balance: Should we balance.
2737 * @sgs: variable to hold the statistics for this group.
2738 */
2739static inline void update_sg_lb_stats(struct sched_domain *sd,
2740 struct sched_group *group, int this_cpu,
46e49b38 2741 enum cpu_idle_type idle, int load_idx,
1e3c88bd
PZ
2742 int local_group, const struct cpumask *cpus,
2743 int *balance, struct sg_lb_stats *sgs)
2744{
2582f0eb 2745 unsigned long load, max_cpu_load, min_cpu_load, max_nr_running;
1e3c88bd
PZ
2746 int i;
2747 unsigned int balance_cpu = -1, first_idle_cpu = 0;
dd5feea1 2748 unsigned long avg_load_per_task = 0;
1e3c88bd 2749
871e35bc 2750 if (local_group)
1e3c88bd 2751 balance_cpu = group_first_cpu(group);
1e3c88bd
PZ
2752
2753 /* Tally up the load of all CPUs in the group */
1e3c88bd
PZ
2754 max_cpu_load = 0;
2755 min_cpu_load = ~0UL;
2582f0eb 2756 max_nr_running = 0;
1e3c88bd
PZ
2757
2758 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
2759 struct rq *rq = cpu_rq(i);
2760
1e3c88bd
PZ
2761 /* Bias balancing toward cpus of our domain */
2762 if (local_group) {
2763 if (idle_cpu(i) && !first_idle_cpu) {
2764 first_idle_cpu = 1;
2765 balance_cpu = i;
2766 }
2767
2768 load = target_load(i, load_idx);
2769 } else {
2770 load = source_load(i, load_idx);
2582f0eb 2771 if (load > max_cpu_load) {
1e3c88bd 2772 max_cpu_load = load;
2582f0eb
NR
2773 max_nr_running = rq->nr_running;
2774 }
1e3c88bd
PZ
2775 if (min_cpu_load > load)
2776 min_cpu_load = load;
2777 }
2778
2779 sgs->group_load += load;
2780 sgs->sum_nr_running += rq->nr_running;
2781 sgs->sum_weighted_load += weighted_cpuload(i);
aae6d3dd
SS
2782 if (idle_cpu(i))
2783 sgs->idle_cpus++;
1e3c88bd
PZ
2784 }
2785
2786 /*
2787 * First idle cpu or the first cpu(busiest) in this sched group
2788 * is eligible for doing load balancing at this and above
2789 * domains. In the newly idle case, we will allow all the cpu's
2790 * to do the newly idle load balance.
2791 */
bbc8cb5b
PZ
2792 if (idle != CPU_NEWLY_IDLE && local_group) {
2793 if (balance_cpu != this_cpu) {
2794 *balance = 0;
2795 return;
2796 }
2797 update_group_power(sd, this_cpu);
1e3c88bd
PZ
2798 }
2799
2800 /* Adjust by relative CPU power of the group */
9c3f75cb 2801 sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / group->sgp->power;
1e3c88bd 2802
1e3c88bd
PZ
2803 /*
2804 * Consider the group unbalanced when the imbalance is larger
866ab43e 2805 * than the average weight of a task.
1e3c88bd
PZ
2806 *
2807 * APZ: with cgroup the avg task weight can vary wildly and
2808 * might not be a suitable number - should we keep a
2809 * normalized nr_running number somewhere that negates
2810 * the hierarchy?
2811 */
dd5feea1
SS
2812 if (sgs->sum_nr_running)
2813 avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
1e3c88bd 2814
866ab43e 2815 if ((max_cpu_load - min_cpu_load) >= avg_load_per_task && max_nr_running > 1)
1e3c88bd
PZ
2816 sgs->group_imb = 1;
2817
9c3f75cb 2818 sgs->group_capacity = DIV_ROUND_CLOSEST(group->sgp->power,
1399fa78 2819 SCHED_POWER_SCALE);
9d5efe05
SV
2820 if (!sgs->group_capacity)
2821 sgs->group_capacity = fix_small_capacity(sd, group);
aae6d3dd 2822 sgs->group_weight = group->group_weight;
fab47622
NR
2823
2824 if (sgs->group_capacity > sgs->sum_nr_running)
2825 sgs->group_has_capacity = 1;
1e3c88bd
PZ
2826}
2827
532cb4c4
MN
2828/**
2829 * update_sd_pick_busiest - return 1 on busiest group
2830 * @sd: sched_domain whose statistics are to be checked
2831 * @sds: sched_domain statistics
2832 * @sg: sched_group candidate to be checked for being the busiest
b6b12294
MN
2833 * @sgs: sched_group statistics
2834 * @this_cpu: the current cpu
532cb4c4
MN
2835 *
2836 * Determine if @sg is a busier group than the previously selected
2837 * busiest group.
2838 */
2839static bool update_sd_pick_busiest(struct sched_domain *sd,
2840 struct sd_lb_stats *sds,
2841 struct sched_group *sg,
2842 struct sg_lb_stats *sgs,
2843 int this_cpu)
2844{
2845 if (sgs->avg_load <= sds->max_load)
2846 return false;
2847
2848 if (sgs->sum_nr_running > sgs->group_capacity)
2849 return true;
2850
2851 if (sgs->group_imb)
2852 return true;
2853
2854 /*
2855 * ASYM_PACKING needs to move all the work to the lowest
2856 * numbered CPUs in the group, therefore mark all groups
2857 * higher than ourself as busy.
2858 */
2859 if ((sd->flags & SD_ASYM_PACKING) && sgs->sum_nr_running &&
2860 this_cpu < group_first_cpu(sg)) {
2861 if (!sds->busiest)
2862 return true;
2863
2864 if (group_first_cpu(sds->busiest) > group_first_cpu(sg))
2865 return true;
2866 }
2867
2868 return false;
2869}
2870
1e3c88bd
PZ
2871/**
2872 * update_sd_lb_stats - Update sched_group's statistics for load balancing.
2873 * @sd: sched_domain whose statistics are to be updated.
2874 * @this_cpu: Cpu for which load balance is currently performed.
2875 * @idle: Idle status of this_cpu
1e3c88bd
PZ
2876 * @cpus: Set of cpus considered for load balancing.
2877 * @balance: Should we balance.
2878 * @sds: variable to hold the statistics for this sched_domain.
2879 */
2880static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
46e49b38
VP
2881 enum cpu_idle_type idle, const struct cpumask *cpus,
2882 int *balance, struct sd_lb_stats *sds)
1e3c88bd
PZ
2883{
2884 struct sched_domain *child = sd->child;
532cb4c4 2885 struct sched_group *sg = sd->groups;
1e3c88bd
PZ
2886 struct sg_lb_stats sgs;
2887 int load_idx, prefer_sibling = 0;
2888
2889 if (child && child->flags & SD_PREFER_SIBLING)
2890 prefer_sibling = 1;
2891
2892 init_sd_power_savings_stats(sd, sds, idle);
2893 load_idx = get_sd_load_idx(sd, idle);
2894
2895 do {
2896 int local_group;
2897
532cb4c4 2898 local_group = cpumask_test_cpu(this_cpu, sched_group_cpus(sg));
1e3c88bd 2899 memset(&sgs, 0, sizeof(sgs));
46e49b38 2900 update_sg_lb_stats(sd, sg, this_cpu, idle, load_idx,
1e3c88bd
PZ
2901 local_group, cpus, balance, &sgs);
2902
8f190fb3 2903 if (local_group && !(*balance))
1e3c88bd
PZ
2904 return;
2905
2906 sds->total_load += sgs.group_load;
9c3f75cb 2907 sds->total_pwr += sg->sgp->power;
1e3c88bd
PZ
2908
2909 /*
2910 * In case the child domain prefers tasks go to siblings
532cb4c4 2911 * first, lower the sg capacity to one so that we'll try
75dd321d
NR
2912 * and move all the excess tasks away. We lower the capacity
2913 * of a group only if the local group has the capacity to fit
2914 * these excess tasks, i.e. nr_running < group_capacity. The
2915 * extra check prevents the case where you always pull from the
2916 * heaviest group when it is already under-utilized (possible
2917 * with a large weight task outweighs the tasks on the system).
1e3c88bd 2918 */
75dd321d 2919 if (prefer_sibling && !local_group && sds->this_has_capacity)
1e3c88bd
PZ
2920 sgs.group_capacity = min(sgs.group_capacity, 1UL);
2921
2922 if (local_group) {
2923 sds->this_load = sgs.avg_load;
532cb4c4 2924 sds->this = sg;
1e3c88bd
PZ
2925 sds->this_nr_running = sgs.sum_nr_running;
2926 sds->this_load_per_task = sgs.sum_weighted_load;
fab47622 2927 sds->this_has_capacity = sgs.group_has_capacity;
aae6d3dd 2928 sds->this_idle_cpus = sgs.idle_cpus;
532cb4c4 2929 } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) {
1e3c88bd 2930 sds->max_load = sgs.avg_load;
532cb4c4 2931 sds->busiest = sg;
1e3c88bd 2932 sds->busiest_nr_running = sgs.sum_nr_running;
aae6d3dd 2933 sds->busiest_idle_cpus = sgs.idle_cpus;
dd5feea1 2934 sds->busiest_group_capacity = sgs.group_capacity;
1e3c88bd 2935 sds->busiest_load_per_task = sgs.sum_weighted_load;
fab47622 2936 sds->busiest_has_capacity = sgs.group_has_capacity;
aae6d3dd 2937 sds->busiest_group_weight = sgs.group_weight;
1e3c88bd
PZ
2938 sds->group_imb = sgs.group_imb;
2939 }
2940
532cb4c4
MN
2941 update_sd_power_savings_stats(sg, sds, local_group, &sgs);
2942 sg = sg->next;
2943 } while (sg != sd->groups);
2944}
2945
2ec57d44 2946int __weak arch_sd_sibling_asym_packing(void)
532cb4c4
MN
2947{
2948 return 0*SD_ASYM_PACKING;
2949}
2950
2951/**
2952 * check_asym_packing - Check to see if the group is packed into the
2953 * sched doman.
2954 *
2955 * This is primarily intended to used at the sibling level. Some
2956 * cores like POWER7 prefer to use lower numbered SMT threads. In the
2957 * case of POWER7, it can move to lower SMT modes only when higher
2958 * threads are idle. When in lower SMT modes, the threads will
2959 * perform better since they share less core resources. Hence when we
2960 * have idle threads, we want them to be the higher ones.
2961 *
2962 * This packing function is run on idle threads. It checks to see if
2963 * the busiest CPU in this domain (core in the P7 case) has a higher
2964 * CPU number than the packing function is being run on. Here we are
2965 * assuming lower CPU number will be equivalent to lower a SMT thread
2966 * number.
2967 *
b6b12294
MN
2968 * Returns 1 when packing is required and a task should be moved to
2969 * this CPU. The amount of the imbalance is returned in *imbalance.
2970 *
532cb4c4
MN
2971 * @sd: The sched_domain whose packing is to be checked.
2972 * @sds: Statistics of the sched_domain which is to be packed
2973 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
2974 * @imbalance: returns amount of imbalanced due to packing.
532cb4c4
MN
2975 */
2976static int check_asym_packing(struct sched_domain *sd,
2977 struct sd_lb_stats *sds,
2978 int this_cpu, unsigned long *imbalance)
2979{
2980 int busiest_cpu;
2981
2982 if (!(sd->flags & SD_ASYM_PACKING))
2983 return 0;
2984
2985 if (!sds->busiest)
2986 return 0;
2987
2988 busiest_cpu = group_first_cpu(sds->busiest);
2989 if (this_cpu > busiest_cpu)
2990 return 0;
2991
9c3f75cb 2992 *imbalance = DIV_ROUND_CLOSEST(sds->max_load * sds->busiest->sgp->power,
1399fa78 2993 SCHED_POWER_SCALE);
532cb4c4 2994 return 1;
1e3c88bd
PZ
2995}
2996
2997/**
2998 * fix_small_imbalance - Calculate the minor imbalance that exists
2999 * amongst the groups of a sched_domain, during
3000 * load balancing.
3001 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3002 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3003 * @imbalance: Variable to store the imbalance.
3004 */
3005static inline void fix_small_imbalance(struct sd_lb_stats *sds,
3006 int this_cpu, unsigned long *imbalance)
3007{
3008 unsigned long tmp, pwr_now = 0, pwr_move = 0;
3009 unsigned int imbn = 2;
dd5feea1 3010 unsigned long scaled_busy_load_per_task;
1e3c88bd
PZ
3011
3012 if (sds->this_nr_running) {
3013 sds->this_load_per_task /= sds->this_nr_running;
3014 if (sds->busiest_load_per_task >
3015 sds->this_load_per_task)
3016 imbn = 1;
3017 } else
3018 sds->this_load_per_task =
3019 cpu_avg_load_per_task(this_cpu);
3020
dd5feea1 3021 scaled_busy_load_per_task = sds->busiest_load_per_task
1399fa78 3022 * SCHED_POWER_SCALE;
9c3f75cb 3023 scaled_busy_load_per_task /= sds->busiest->sgp->power;
dd5feea1
SS
3024
3025 if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
3026 (scaled_busy_load_per_task * imbn)) {
1e3c88bd
PZ
3027 *imbalance = sds->busiest_load_per_task;
3028 return;
3029 }
3030
3031 /*
3032 * OK, we don't have enough imbalance to justify moving tasks,
3033 * however we may be able to increase total CPU power used by
3034 * moving them.
3035 */
3036
9c3f75cb 3037 pwr_now += sds->busiest->sgp->power *
1e3c88bd 3038 min(sds->busiest_load_per_task, sds->max_load);
9c3f75cb 3039 pwr_now += sds->this->sgp->power *
1e3c88bd 3040 min(sds->this_load_per_task, sds->this_load);
1399fa78 3041 pwr_now /= SCHED_POWER_SCALE;
1e3c88bd
PZ
3042
3043 /* Amount of load we'd subtract */
1399fa78 3044 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
9c3f75cb 3045 sds->busiest->sgp->power;
1e3c88bd 3046 if (sds->max_load > tmp)
9c3f75cb 3047 pwr_move += sds->busiest->sgp->power *
1e3c88bd
PZ
3048 min(sds->busiest_load_per_task, sds->max_load - tmp);
3049
3050 /* Amount of load we'd add */
9c3f75cb 3051 if (sds->max_load * sds->busiest->sgp->power <
1399fa78 3052 sds->busiest_load_per_task * SCHED_POWER_SCALE)
9c3f75cb
PZ
3053 tmp = (sds->max_load * sds->busiest->sgp->power) /
3054 sds->this->sgp->power;
1e3c88bd 3055 else
1399fa78 3056 tmp = (sds->busiest_load_per_task * SCHED_POWER_SCALE) /
9c3f75cb
PZ
3057 sds->this->sgp->power;
3058 pwr_move += sds->this->sgp->power *
1e3c88bd 3059 min(sds->this_load_per_task, sds->this_load + tmp);
1399fa78 3060 pwr_move /= SCHED_POWER_SCALE;
1e3c88bd
PZ
3061
3062 /* Move if we gain throughput */
3063 if (pwr_move > pwr_now)
3064 *imbalance = sds->busiest_load_per_task;
3065}
3066
3067/**
3068 * calculate_imbalance - Calculate the amount of imbalance present within the
3069 * groups of a given sched_domain during load balance.
3070 * @sds: statistics of the sched_domain whose imbalance is to be calculated.
3071 * @this_cpu: Cpu for which currently load balance is being performed.
3072 * @imbalance: The variable to store the imbalance.
3073 */
3074static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3075 unsigned long *imbalance)
3076{
dd5feea1
SS
3077 unsigned long max_pull, load_above_capacity = ~0UL;
3078
3079 sds->busiest_load_per_task /= sds->busiest_nr_running;
3080 if (sds->group_imb) {
3081 sds->busiest_load_per_task =
3082 min(sds->busiest_load_per_task, sds->avg_load);
3083 }
3084
1e3c88bd
PZ
3085 /*
3086 * In the presence of smp nice balancing, certain scenarios can have
3087 * max load less than avg load(as we skip the groups at or below
3088 * its cpu_power, while calculating max_load..)
3089 */
3090 if (sds->max_load < sds->avg_load) {
3091 *imbalance = 0;
3092 return fix_small_imbalance(sds, this_cpu, imbalance);
3093 }
3094
dd5feea1
SS
3095 if (!sds->group_imb) {
3096 /*
3097 * Don't want to pull so many tasks that a group would go idle.
3098 */
3099 load_above_capacity = (sds->busiest_nr_running -
3100 sds->busiest_group_capacity);
3101
1399fa78 3102 load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE);
dd5feea1 3103
9c3f75cb 3104 load_above_capacity /= sds->busiest->sgp->power;
dd5feea1
SS
3105 }
3106
3107 /*
3108 * We're trying to get all the cpus to the average_load, so we don't
3109 * want to push ourselves above the average load, nor do we wish to
3110 * reduce the max loaded cpu below the average load. At the same time,
3111 * we also don't want to reduce the group load below the group capacity
3112 * (so that we can implement power-savings policies etc). Thus we look
3113 * for the minimum possible imbalance.
3114 * Be careful of negative numbers as they'll appear as very large values
3115 * with unsigned longs.
3116 */
3117 max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
1e3c88bd
PZ
3118
3119 /* How much load to actually move to equalise the imbalance */
9c3f75cb
PZ
3120 *imbalance = min(max_pull * sds->busiest->sgp->power,
3121 (sds->avg_load - sds->this_load) * sds->this->sgp->power)
1399fa78 3122 / SCHED_POWER_SCALE;
1e3c88bd
PZ
3123
3124 /*
3125 * if *imbalance is less than the average load per runnable task
25985edc 3126 * there is no guarantee that any tasks will be moved so we'll have
1e3c88bd
PZ
3127 * a think about bumping its value to force at least one task to be
3128 * moved
3129 */
3130 if (*imbalance < sds->busiest_load_per_task)
3131 return fix_small_imbalance(sds, this_cpu, imbalance);
3132
3133}
fab47622 3134
1e3c88bd
PZ
3135/******* find_busiest_group() helpers end here *********************/
3136
3137/**
3138 * find_busiest_group - Returns the busiest group within the sched_domain
3139 * if there is an imbalance. If there isn't an imbalance, and
3140 * the user has opted for power-savings, it returns a group whose
3141 * CPUs can be put to idle by rebalancing those tasks elsewhere, if
3142 * such a group exists.
3143 *
3144 * Also calculates the amount of weighted load which should be moved
3145 * to restore balance.
3146 *
3147 * @sd: The sched_domain whose busiest group is to be returned.
3148 * @this_cpu: The cpu for which load balancing is currently being performed.
3149 * @imbalance: Variable which stores amount of weighted load which should
3150 * be moved to restore balance/put a group to idle.
3151 * @idle: The idle status of this_cpu.
1e3c88bd
PZ
3152 * @cpus: The set of CPUs under consideration for load-balancing.
3153 * @balance: Pointer to a variable indicating if this_cpu
3154 * is the appropriate cpu to perform load balancing at this_level.
3155 *
3156 * Returns: - the busiest group if imbalance exists.
3157 * - If no imbalance and user has opted for power-savings balance,
3158 * return the least loaded group whose CPUs can be
3159 * put to idle by rebalancing its tasks onto our group.
3160 */
3161static struct sched_group *
3162find_busiest_group(struct sched_domain *sd, int this_cpu,
3163 unsigned long *imbalance, enum cpu_idle_type idle,
46e49b38 3164 const struct cpumask *cpus, int *balance)
1e3c88bd
PZ
3165{
3166 struct sd_lb_stats sds;
3167
3168 memset(&sds, 0, sizeof(sds));
3169
3170 /*
3171 * Compute the various statistics relavent for load balancing at
3172 * this level.
3173 */
46e49b38 3174 update_sd_lb_stats(sd, this_cpu, idle, cpus, balance, &sds);
1e3c88bd 3175
cc57aa8f
PZ
3176 /*
3177 * this_cpu is not the appropriate cpu to perform load balancing at
3178 * this level.
1e3c88bd 3179 */
8f190fb3 3180 if (!(*balance))
1e3c88bd
PZ
3181 goto ret;
3182
532cb4c4
MN
3183 if ((idle == CPU_IDLE || idle == CPU_NEWLY_IDLE) &&
3184 check_asym_packing(sd, &sds, this_cpu, imbalance))
3185 return sds.busiest;
3186
cc57aa8f 3187 /* There is no busy sibling group to pull tasks from */
1e3c88bd
PZ
3188 if (!sds.busiest || sds.busiest_nr_running == 0)
3189 goto out_balanced;
3190
1399fa78 3191 sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_pwr;
b0432d8f 3192
866ab43e
PZ
3193 /*
3194 * If the busiest group is imbalanced the below checks don't
3195 * work because they assumes all things are equal, which typically
3196 * isn't true due to cpus_allowed constraints and the like.
3197 */
3198 if (sds.group_imb)
3199 goto force_balance;
3200
cc57aa8f 3201 /* SD_BALANCE_NEWIDLE trumps SMP nice when underutilized */
fab47622
NR
3202 if (idle == CPU_NEWLY_IDLE && sds.this_has_capacity &&
3203 !sds.busiest_has_capacity)
3204 goto force_balance;
3205
cc57aa8f
PZ
3206 /*
3207 * If the local group is more busy than the selected busiest group
3208 * don't try and pull any tasks.
3209 */
1e3c88bd
PZ
3210 if (sds.this_load >= sds.max_load)
3211 goto out_balanced;
3212
cc57aa8f
PZ
3213 /*
3214 * Don't pull any tasks if this group is already above the domain
3215 * average load.
3216 */
1e3c88bd
PZ
3217 if (sds.this_load >= sds.avg_load)
3218 goto out_balanced;
3219
c186fafe 3220 if (idle == CPU_IDLE) {
aae6d3dd
SS
3221 /*
3222 * This cpu is idle. If the busiest group load doesn't
3223 * have more tasks than the number of available cpu's and
3224 * there is no imbalance between this and busiest group
3225 * wrt to idle cpu's, it is balanced.
3226 */
c186fafe 3227 if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) &&
aae6d3dd
SS
3228 sds.busiest_nr_running <= sds.busiest_group_weight)
3229 goto out_balanced;
c186fafe
PZ
3230 } else {
3231 /*
3232 * In the CPU_NEWLY_IDLE, CPU_NOT_IDLE cases, use
3233 * imbalance_pct to be conservative.
3234 */
3235 if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
3236 goto out_balanced;
aae6d3dd 3237 }
1e3c88bd 3238
fab47622 3239force_balance:
1e3c88bd
PZ
3240 /* Looks like there is an imbalance. Compute it */
3241 calculate_imbalance(&sds, this_cpu, imbalance);
3242 return sds.busiest;
3243
3244out_balanced:
3245 /*
3246 * There is no obvious imbalance. But check if we can do some balancing
3247 * to save power.
3248 */
3249 if (check_power_save_busiest_group(&sds, this_cpu, imbalance))
3250 return sds.busiest;
3251ret:
3252 *imbalance = 0;
3253 return NULL;
3254}
3255
3256/*
3257 * find_busiest_queue - find the busiest runqueue among the cpus in group.
3258 */
3259static struct rq *
9d5efe05
SV
3260find_busiest_queue(struct sched_domain *sd, struct sched_group *group,
3261 enum cpu_idle_type idle, unsigned long imbalance,
3262 const struct cpumask *cpus)
1e3c88bd
PZ
3263{
3264 struct rq *busiest = NULL, *rq;
3265 unsigned long max_load = 0;
3266 int i;
3267
3268 for_each_cpu(i, sched_group_cpus(group)) {
3269 unsigned long power = power_of(i);
1399fa78
NR
3270 unsigned long capacity = DIV_ROUND_CLOSEST(power,
3271 SCHED_POWER_SCALE);
1e3c88bd
PZ
3272 unsigned long wl;
3273
9d5efe05
SV
3274 if (!capacity)
3275 capacity = fix_small_capacity(sd, group);
3276
1e3c88bd
PZ
3277 if (!cpumask_test_cpu(i, cpus))
3278 continue;
3279
3280 rq = cpu_rq(i);
6e40f5bb 3281 wl = weighted_cpuload(i);
1e3c88bd 3282
6e40f5bb
TG
3283 /*
3284 * When comparing with imbalance, use weighted_cpuload()
3285 * which is not scaled with the cpu power.
3286 */
1e3c88bd
PZ
3287 if (capacity && rq->nr_running == 1 && wl > imbalance)
3288 continue;
3289
6e40f5bb
TG
3290 /*
3291 * For the load comparisons with the other cpu's, consider
3292 * the weighted_cpuload() scaled with the cpu power, so that
3293 * the load can be moved away from the cpu that is potentially
3294 * running at a lower capacity.
3295 */
1399fa78 3296 wl = (wl * SCHED_POWER_SCALE) / power;
6e40f5bb 3297
1e3c88bd
PZ
3298 if (wl > max_load) {
3299 max_load = wl;
3300 busiest = rq;
3301 }
3302 }
3303
3304 return busiest;
3305}
3306
3307/*
3308 * Max backoff if we encounter pinned tasks. Pretty arbitrary value, but
3309 * so long as it is large enough.
3310 */
3311#define MAX_PINNED_INTERVAL 512
3312
3313/* Working cpumask for load_balance and load_balance_newidle. */
3314static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
3315
46e49b38 3316static int need_active_balance(struct sched_domain *sd, int idle,
532cb4c4 3317 int busiest_cpu, int this_cpu)
1af3ed3d
PZ
3318{
3319 if (idle == CPU_NEWLY_IDLE) {
532cb4c4
MN
3320
3321 /*
3322 * ASYM_PACKING needs to force migrate tasks from busy but
3323 * higher numbered CPUs in order to pack all tasks in the
3324 * lowest numbered CPUs.
3325 */
3326 if ((sd->flags & SD_ASYM_PACKING) && busiest_cpu > this_cpu)
3327 return 1;
3328
1af3ed3d
PZ
3329 /*
3330 * The only task running in a non-idle cpu can be moved to this
3331 * cpu in an attempt to completely freeup the other CPU
3332 * package.
3333 *
3334 * The package power saving logic comes from
3335 * find_busiest_group(). If there are no imbalance, then
3336 * f_b_g() will return NULL. However when sched_mc={1,2} then
3337 * f_b_g() will select a group from which a running task may be
3338 * pulled to this cpu in order to make the other package idle.
3339 * If there is no opportunity to make a package idle and if
3340 * there are no imbalance, then f_b_g() will return NULL and no
3341 * action will be taken in load_balance_newidle().
3342 *
3343 * Under normal task pull operation due to imbalance, there
3344 * will be more than one task in the source run queue and
3345 * move_tasks() will succeed. ld_moved will be true and this
3346 * active balance code will not be triggered.
3347 */
1af3ed3d
PZ
3348 if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
3349 return 0;
3350 }
3351
3352 return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
3353}
3354
969c7921
TH
3355static int active_load_balance_cpu_stop(void *data);
3356
1e3c88bd
PZ
3357/*
3358 * Check this_cpu to ensure it is balanced within domain. Attempt to move
3359 * tasks if there is an imbalance.
3360 */
3361static int load_balance(int this_cpu, struct rq *this_rq,
3362 struct sched_domain *sd, enum cpu_idle_type idle,
3363 int *balance)
3364{
46e49b38 3365 int ld_moved, all_pinned = 0, active_balance = 0;
1e3c88bd
PZ
3366 struct sched_group *group;
3367 unsigned long imbalance;
3368 struct rq *busiest;
3369 unsigned long flags;
3370 struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
3371
3372 cpumask_copy(cpus, cpu_active_mask);
3373
1e3c88bd
PZ
3374 schedstat_inc(sd, lb_count[idle]);
3375
3376redo:
46e49b38 3377 group = find_busiest_group(sd, this_cpu, &imbalance, idle,
1e3c88bd
PZ
3378 cpus, balance);
3379
3380 if (*balance == 0)
3381 goto out_balanced;
3382
3383 if (!group) {
3384 schedstat_inc(sd, lb_nobusyg[idle]);
3385 goto out_balanced;
3386 }
3387
9d5efe05 3388 busiest = find_busiest_queue(sd, group, idle, imbalance, cpus);
1e3c88bd
PZ
3389 if (!busiest) {
3390 schedstat_inc(sd, lb_nobusyq[idle]);
3391 goto out_balanced;
3392 }
3393
3394 BUG_ON(busiest == this_rq);
3395
3396 schedstat_add(sd, lb_imbalance[idle], imbalance);
3397
3398 ld_moved = 0;
3399 if (busiest->nr_running > 1) {
3400 /*
3401 * Attempt to move tasks. If find_busiest_group has found
3402 * an imbalance but busiest->nr_running <= 1, the group is
3403 * still unbalanced. ld_moved simply stays zero, so it is
3404 * correctly treated as an imbalance.
3405 */
b30aef17 3406 all_pinned = 1;
1e3c88bd
PZ
3407 local_irq_save(flags);
3408 double_rq_lock(this_rq, busiest);
3409 ld_moved = move_tasks(this_rq, this_cpu, busiest,
3410 imbalance, sd, idle, &all_pinned);
3411 double_rq_unlock(this_rq, busiest);
3412 local_irq_restore(flags);
3413
3414 /*
3415 * some other cpu did the load balance for us.
3416 */
3417 if (ld_moved && this_cpu != smp_processor_id())
3418 resched_cpu(this_cpu);
3419
3420 /* All tasks on this runqueue were pinned by CPU affinity */
3421 if (unlikely(all_pinned)) {
3422 cpumask_clear_cpu(cpu_of(busiest), cpus);
3423 if (!cpumask_empty(cpus))
3424 goto redo;
3425 goto out_balanced;
3426 }
3427 }
3428
3429 if (!ld_moved) {
3430 schedstat_inc(sd, lb_failed[idle]);
58b26c4c
VP
3431 /*
3432 * Increment the failure counter only on periodic balance.
3433 * We do not want newidle balance, which can be very
3434 * frequent, pollute the failure counter causing
3435 * excessive cache_hot migrations and active balances.
3436 */
3437 if (idle != CPU_NEWLY_IDLE)
3438 sd->nr_balance_failed++;
1e3c88bd 3439
46e49b38 3440 if (need_active_balance(sd, idle, cpu_of(busiest), this_cpu)) {
1e3c88bd
PZ
3441 raw_spin_lock_irqsave(&busiest->lock, flags);
3442
969c7921
TH
3443 /* don't kick the active_load_balance_cpu_stop,
3444 * if the curr task on busiest cpu can't be
3445 * moved to this_cpu
1e3c88bd
PZ
3446 */
3447 if (!cpumask_test_cpu(this_cpu,
3448 &busiest->curr->cpus_allowed)) {
3449 raw_spin_unlock_irqrestore(&busiest->lock,
3450 flags);
3451 all_pinned = 1;
3452 goto out_one_pinned;
3453 }
3454
969c7921
TH
3455 /*
3456 * ->active_balance synchronizes accesses to
3457 * ->active_balance_work. Once set, it's cleared
3458 * only after active load balance is finished.
3459 */
1e3c88bd
PZ
3460 if (!busiest->active_balance) {
3461 busiest->active_balance = 1;
3462 busiest->push_cpu = this_cpu;
3463 active_balance = 1;
3464 }
3465 raw_spin_unlock_irqrestore(&busiest->lock, flags);
969c7921 3466
1e3c88bd 3467 if (active_balance)
969c7921
TH
3468 stop_one_cpu_nowait(cpu_of(busiest),
3469 active_load_balance_cpu_stop, busiest,
3470 &busiest->active_balance_work);
1e3c88bd
PZ
3471
3472 /*
3473 * We've kicked active balancing, reset the failure
3474 * counter.
3475 */
3476 sd->nr_balance_failed = sd->cache_nice_tries+1;
3477 }
3478 } else
3479 sd->nr_balance_failed = 0;
3480
3481 if (likely(!active_balance)) {
3482 /* We were unbalanced, so reset the balancing interval */
3483 sd->balance_interval = sd->min_interval;
3484 } else {
3485 /*
3486 * If we've begun active balancing, start to back off. This
3487 * case may not be covered by the all_pinned logic if there
3488 * is only 1 task on the busy runqueue (because we don't call
3489 * move_tasks).
3490 */
3491 if (sd->balance_interval < sd->max_interval)
3492 sd->balance_interval *= 2;
3493 }
3494
1e3c88bd
PZ
3495 goto out;
3496
3497out_balanced:
3498 schedstat_inc(sd, lb_balanced[idle]);
3499
3500 sd->nr_balance_failed = 0;
3501
3502out_one_pinned:
3503 /* tune up the balancing interval */
3504 if ((all_pinned && sd->balance_interval < MAX_PINNED_INTERVAL) ||
3505 (sd->balance_interval < sd->max_interval))
3506 sd->balance_interval *= 2;
3507
46e49b38 3508 ld_moved = 0;
1e3c88bd 3509out:
1e3c88bd
PZ
3510 return ld_moved;
3511}
3512
1e3c88bd
PZ
3513/*
3514 * idle_balance is called by schedule() if this_cpu is about to become
3515 * idle. Attempts to pull tasks from other CPUs.
3516 */
3517static void idle_balance(int this_cpu, struct rq *this_rq)
3518{
3519 struct sched_domain *sd;
3520 int pulled_task = 0;
3521 unsigned long next_balance = jiffies + HZ;
3522
3523 this_rq->idle_stamp = this_rq->clock;
3524
3525 if (this_rq->avg_idle < sysctl_sched_migration_cost)
3526 return;
3527
f492e12e
PZ
3528 /*
3529 * Drop the rq->lock, but keep IRQ/preempt disabled.
3530 */
3531 raw_spin_unlock(&this_rq->lock);
3532
c66eaf61 3533 update_shares(this_cpu);
dce840a0 3534 rcu_read_lock();
1e3c88bd
PZ
3535 for_each_domain(this_cpu, sd) {
3536 unsigned long interval;
f492e12e 3537 int balance = 1;
1e3c88bd
PZ
3538
3539 if (!(sd->flags & SD_LOAD_BALANCE))
3540 continue;
3541
f492e12e 3542 if (sd->flags & SD_BALANCE_NEWIDLE) {
1e3c88bd 3543 /* If we've pulled tasks over stop searching: */
f492e12e
PZ
3544 pulled_task = load_balance(this_cpu, this_rq,
3545 sd, CPU_NEWLY_IDLE, &balance);
3546 }
1e3c88bd
PZ
3547
3548 interval = msecs_to_jiffies(sd->balance_interval);
3549 if (time_after(next_balance, sd->last_balance + interval))
3550 next_balance = sd->last_balance + interval;
d5ad140b
NR
3551 if (pulled_task) {
3552 this_rq->idle_stamp = 0;
1e3c88bd 3553 break;
d5ad140b 3554 }
1e3c88bd 3555 }
dce840a0 3556 rcu_read_unlock();
f492e12e
PZ
3557
3558 raw_spin_lock(&this_rq->lock);
3559
1e3c88bd
PZ
3560 if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
3561 /*
3562 * We are going idle. next_balance may be set based on
3563 * a busy processor. So reset next_balance.
3564 */
3565 this_rq->next_balance = next_balance;
3566 }
3567}
3568
3569/*
969c7921
TH
3570 * active_load_balance_cpu_stop is run by cpu stopper. It pushes
3571 * running tasks off the busiest CPU onto idle CPUs. It requires at
3572 * least 1 task to be running on each physical CPU where possible, and
3573 * avoids physical / logical imbalances.
1e3c88bd 3574 */
969c7921 3575static int active_load_balance_cpu_stop(void *data)
1e3c88bd 3576{
969c7921
TH
3577 struct rq *busiest_rq = data;
3578 int busiest_cpu = cpu_of(busiest_rq);
1e3c88bd 3579 int target_cpu = busiest_rq->push_cpu;
969c7921 3580 struct rq *target_rq = cpu_rq(target_cpu);
1e3c88bd 3581 struct sched_domain *sd;
969c7921
TH
3582
3583 raw_spin_lock_irq(&busiest_rq->lock);
3584
3585 /* make sure the requested cpu hasn't gone down in the meantime */
3586 if (unlikely(busiest_cpu != smp_processor_id() ||
3587 !busiest_rq->active_balance))
3588 goto out_unlock;
1e3c88bd
PZ
3589
3590 /* Is there any task to move? */
3591 if (busiest_rq->nr_running <= 1)
969c7921 3592 goto out_unlock;
1e3c88bd
PZ
3593
3594 /*
3595 * This condition is "impossible", if it occurs
3596 * we need to fix it. Originally reported by
3597 * Bjorn Helgaas on a 128-cpu setup.
3598 */
3599 BUG_ON(busiest_rq == target_rq);
3600
3601 /* move a task from busiest_rq to target_rq */
3602 double_lock_balance(busiest_rq, target_rq);
1e3c88bd
PZ
3603
3604 /* Search for an sd spanning us and the target CPU. */
dce840a0 3605 rcu_read_lock();
1e3c88bd
PZ
3606 for_each_domain(target_cpu, sd) {
3607 if ((sd->flags & SD_LOAD_BALANCE) &&
3608 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3609 break;
3610 }
3611
3612 if (likely(sd)) {
3613 schedstat_inc(sd, alb_count);
3614
3615 if (move_one_task(target_rq, target_cpu, busiest_rq,
3616 sd, CPU_IDLE))
3617 schedstat_inc(sd, alb_pushed);
3618 else
3619 schedstat_inc(sd, alb_failed);
3620 }
dce840a0 3621 rcu_read_unlock();
1e3c88bd 3622 double_unlock_balance(busiest_rq, target_rq);
969c7921
TH
3623out_unlock:
3624 busiest_rq->active_balance = 0;
3625 raw_spin_unlock_irq(&busiest_rq->lock);
3626 return 0;
1e3c88bd
PZ
3627}
3628
3629#ifdef CONFIG_NO_HZ
83cd4fe2
VP
3630
3631static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb);
3632
3633static void trigger_sched_softirq(void *data)
3634{
3635 raise_softirq_irqoff(SCHED_SOFTIRQ);
3636}
3637
3638static inline void init_sched_softirq_csd(struct call_single_data *csd)
3639{
3640 csd->func = trigger_sched_softirq;
3641 csd->info = NULL;
3642 csd->flags = 0;
3643 csd->priv = 0;
3644}
3645
3646/*
3647 * idle load balancing details
3648 * - One of the idle CPUs nominates itself as idle load_balancer, while
3649 * entering idle.
3650 * - This idle load balancer CPU will also go into tickless mode when
3651 * it is idle, just like all other idle CPUs
3652 * - When one of the busy CPUs notice that there may be an idle rebalancing
3653 * needed, they will kick the idle load balancer, which then does idle
3654 * load balancing for all the idle CPUs.
3655 */
1e3c88bd
PZ
3656static struct {
3657 atomic_t load_balancer;
83cd4fe2
VP
3658 atomic_t first_pick_cpu;
3659 atomic_t second_pick_cpu;
3660 cpumask_var_t idle_cpus_mask;
3661 cpumask_var_t grp_idle_mask;
3662 unsigned long next_balance; /* in jiffy units */
3663} nohz ____cacheline_aligned;
1e3c88bd
PZ
3664
3665int get_nohz_load_balancer(void)
3666{
3667 return atomic_read(&nohz.load_balancer);
3668}
3669
3670#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
3671/**
3672 * lowest_flag_domain - Return lowest sched_domain containing flag.
3673 * @cpu: The cpu whose lowest level of sched domain is to
3674 * be returned.
3675 * @flag: The flag to check for the lowest sched_domain
3676 * for the given cpu.
3677 *
3678 * Returns the lowest sched_domain of a cpu which contains the given flag.
3679 */
3680static inline struct sched_domain *lowest_flag_domain(int cpu, int flag)
3681{
3682 struct sched_domain *sd;
3683
3684 for_each_domain(cpu, sd)
08354716 3685 if (sd->flags & flag)
1e3c88bd
PZ
3686 break;
3687
3688 return sd;
3689}
3690
3691/**
3692 * for_each_flag_domain - Iterates over sched_domains containing the flag.
3693 * @cpu: The cpu whose domains we're iterating over.
3694 * @sd: variable holding the value of the power_savings_sd
3695 * for cpu.
3696 * @flag: The flag to filter the sched_domains to be iterated.
3697 *
3698 * Iterates over all the scheduler domains for a given cpu that has the 'flag'
3699 * set, starting from the lowest sched_domain to the highest.
3700 */
3701#define for_each_flag_domain(cpu, sd, flag) \
3702 for (sd = lowest_flag_domain(cpu, flag); \
3703 (sd && (sd->flags & flag)); sd = sd->parent)
3704
3705/**
3706 * is_semi_idle_group - Checks if the given sched_group is semi-idle.
3707 * @ilb_group: group to be checked for semi-idleness
3708 *
3709 * Returns: 1 if the group is semi-idle. 0 otherwise.
3710 *
3711 * We define a sched_group to be semi idle if it has atleast one idle-CPU
3712 * and atleast one non-idle CPU. This helper function checks if the given
3713 * sched_group is semi-idle or not.
3714 */
3715static inline int is_semi_idle_group(struct sched_group *ilb_group)
3716{
83cd4fe2 3717 cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
1e3c88bd
PZ
3718 sched_group_cpus(ilb_group));
3719
3720 /*
3721 * A sched_group is semi-idle when it has atleast one busy cpu
3722 * and atleast one idle cpu.
3723 */
83cd4fe2 3724 if (cpumask_empty(nohz.grp_idle_mask))
1e3c88bd
PZ
3725 return 0;
3726
83cd4fe2 3727 if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
1e3c88bd
PZ
3728 return 0;
3729
3730 return 1;
3731}
3732/**
3733 * find_new_ilb - Finds the optimum idle load balancer for nomination.
3734 * @cpu: The cpu which is nominating a new idle_load_balancer.
3735 *
3736 * Returns: Returns the id of the idle load balancer if it exists,
3737 * Else, returns >= nr_cpu_ids.
3738 *
3739 * This algorithm picks the idle load balancer such that it belongs to a
3740 * semi-idle powersavings sched_domain. The idea is to try and avoid
3741 * completely idle packages/cores just for the purpose of idle load balancing
3742 * when there are other idle cpu's which are better suited for that job.
3743 */
3744static int find_new_ilb(int cpu)
3745{
3746 struct sched_domain *sd;
3747 struct sched_group *ilb_group;
dce840a0 3748 int ilb = nr_cpu_ids;
1e3c88bd
PZ
3749
3750 /*
3751 * Have idle load balancer selection from semi-idle packages only
3752 * when power-aware load balancing is enabled
3753 */
3754 if (!(sched_smt_power_savings || sched_mc_power_savings))
3755 goto out_done;
3756
3757 /*
3758 * Optimize for the case when we have no idle CPUs or only one
3759 * idle CPU. Don't walk the sched_domain hierarchy in such cases
3760 */
83cd4fe2 3761 if (cpumask_weight(nohz.idle_cpus_mask) < 2)
1e3c88bd
PZ
3762 goto out_done;
3763
dce840a0 3764 rcu_read_lock();
1e3c88bd
PZ
3765 for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
3766 ilb_group = sd->groups;
3767
3768 do {
dce840a0
PZ
3769 if (is_semi_idle_group(ilb_group)) {
3770 ilb = cpumask_first(nohz.grp_idle_mask);
3771 goto unlock;
3772 }
1e3c88bd
PZ
3773
3774 ilb_group = ilb_group->next;
3775
3776 } while (ilb_group != sd->groups);
3777 }
dce840a0
PZ
3778unlock:
3779 rcu_read_unlock();
1e3c88bd
PZ
3780
3781out_done:
dce840a0 3782 return ilb;
1e3c88bd
PZ
3783}
3784#else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
3785static inline int find_new_ilb(int call_cpu)
3786{
83cd4fe2 3787 return nr_cpu_ids;
1e3c88bd
PZ
3788}
3789#endif
3790
83cd4fe2
VP
3791/*
3792 * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
3793 * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
3794 * CPU (if there is one).
3795 */
3796static void nohz_balancer_kick(int cpu)
3797{
3798 int ilb_cpu;
3799
3800 nohz.next_balance++;
3801
3802 ilb_cpu = get_nohz_load_balancer();
3803
3804 if (ilb_cpu >= nr_cpu_ids) {
3805 ilb_cpu = cpumask_first(nohz.idle_cpus_mask);
3806 if (ilb_cpu >= nr_cpu_ids)
3807 return;
3808 }
3809
3810 if (!cpu_rq(ilb_cpu)->nohz_balance_kick) {
3811 struct call_single_data *cp;
3812
3813 cpu_rq(ilb_cpu)->nohz_balance_kick = 1;
3814 cp = &per_cpu(remote_sched_softirq_cb, cpu);
3815 __smp_call_function_single(ilb_cpu, cp, 0);
3816 }
3817 return;
3818}
3819
1e3c88bd
PZ
3820/*
3821 * This routine will try to nominate the ilb (idle load balancing)
3822 * owner among the cpus whose ticks are stopped. ilb owner will do the idle
83cd4fe2 3823 * load balancing on behalf of all those cpus.
1e3c88bd 3824 *
83cd4fe2
VP
3825 * When the ilb owner becomes busy, we will not have new ilb owner until some
3826 * idle CPU wakes up and goes back to idle or some busy CPU tries to kick
3827 * idle load balancing by kicking one of the idle CPUs.
1e3c88bd 3828 *
83cd4fe2
VP
3829 * Ticks are stopped for the ilb owner as well, with busy CPU kicking this
3830 * ilb owner CPU in future (when there is a need for idle load balancing on
3831 * behalf of all idle CPUs).
1e3c88bd 3832 */
83cd4fe2 3833void select_nohz_load_balancer(int stop_tick)
1e3c88bd
PZ
3834{
3835 int cpu = smp_processor_id();
3836
3837 if (stop_tick) {
1e3c88bd
PZ
3838 if (!cpu_active(cpu)) {
3839 if (atomic_read(&nohz.load_balancer) != cpu)
83cd4fe2 3840 return;
1e3c88bd
PZ
3841
3842 /*
3843 * If we are going offline and still the leader,
3844 * give up!
3845 */
83cd4fe2
VP
3846 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3847 nr_cpu_ids) != cpu)
1e3c88bd
PZ
3848 BUG();
3849
83cd4fe2 3850 return;
1e3c88bd
PZ
3851 }
3852
83cd4fe2 3853 cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
1e3c88bd 3854
83cd4fe2
VP
3855 if (atomic_read(&nohz.first_pick_cpu) == cpu)
3856 atomic_cmpxchg(&nohz.first_pick_cpu, cpu, nr_cpu_ids);
3857 if (atomic_read(&nohz.second_pick_cpu) == cpu)
3858 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
1e3c88bd 3859
83cd4fe2 3860 if (atomic_read(&nohz.load_balancer) >= nr_cpu_ids) {
1e3c88bd
PZ
3861 int new_ilb;
3862
83cd4fe2
VP
3863 /* make me the ilb owner */
3864 if (atomic_cmpxchg(&nohz.load_balancer, nr_cpu_ids,
3865 cpu) != nr_cpu_ids)
3866 return;
3867
1e3c88bd
PZ
3868 /*
3869 * Check to see if there is a more power-efficient
3870 * ilb.
3871 */
3872 new_ilb = find_new_ilb(cpu);
3873 if (new_ilb < nr_cpu_ids && new_ilb != cpu) {
83cd4fe2 3874 atomic_set(&nohz.load_balancer, nr_cpu_ids);
1e3c88bd 3875 resched_cpu(new_ilb);
83cd4fe2 3876 return;
1e3c88bd 3877 }
83cd4fe2 3878 return;
1e3c88bd
PZ
3879 }
3880 } else {
83cd4fe2
VP
3881 if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask))
3882 return;
1e3c88bd 3883
83cd4fe2 3884 cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
1e3c88bd
PZ
3885
3886 if (atomic_read(&nohz.load_balancer) == cpu)
83cd4fe2
VP
3887 if (atomic_cmpxchg(&nohz.load_balancer, cpu,
3888 nr_cpu_ids) != cpu)
1e3c88bd
PZ
3889 BUG();
3890 }
83cd4fe2 3891 return;
1e3c88bd
PZ
3892}
3893#endif
3894
3895static DEFINE_SPINLOCK(balancing);
3896
49c022e6
PZ
3897static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3898
3899/*
3900 * Scale the max load_balance interval with the number of CPUs in the system.
3901 * This trades load-balance latency on larger machines for less cross talk.
3902 */
3903static void update_max_interval(void)
3904{
3905 max_load_balance_interval = HZ*num_online_cpus()/10;
3906}
3907
1e3c88bd
PZ
3908/*
3909 * It checks each scheduling domain to see if it is due to be balanced,
3910 * and initiates a balancing operation if so.
3911 *
3912 * Balancing parameters are set up in arch_init_sched_domains.
3913 */
3914static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3915{
3916 int balance = 1;
3917 struct rq *rq = cpu_rq(cpu);
3918 unsigned long interval;
3919 struct sched_domain *sd;
3920 /* Earliest time when we have to do rebalance again */
3921 unsigned long next_balance = jiffies + 60*HZ;
3922 int update_next_balance = 0;
3923 int need_serialize;
3924
2069dd75
PZ
3925 update_shares(cpu);
3926
dce840a0 3927 rcu_read_lock();
1e3c88bd
PZ
3928 for_each_domain(cpu, sd) {
3929 if (!(sd->flags & SD_LOAD_BALANCE))
3930 continue;
3931
3932 interval = sd->balance_interval;
3933 if (idle != CPU_IDLE)
3934 interval *= sd->busy_factor;
3935
3936 /* scale ms to jiffies */
3937 interval = msecs_to_jiffies(interval);
49c022e6 3938 interval = clamp(interval, 1UL, max_load_balance_interval);
1e3c88bd
PZ
3939
3940 need_serialize = sd->flags & SD_SERIALIZE;
3941
3942 if (need_serialize) {
3943 if (!spin_trylock(&balancing))
3944 goto out;
3945 }
3946
3947 if (time_after_eq(jiffies, sd->last_balance + interval)) {
3948 if (load_balance(cpu, rq, sd, idle, &balance)) {
3949 /*
3950 * We've pulled tasks over so either we're no
c186fafe 3951 * longer idle.
1e3c88bd
PZ
3952 */
3953 idle = CPU_NOT_IDLE;
3954 }
3955 sd->last_balance = jiffies;
3956 }
3957 if (need_serialize)
3958 spin_unlock(&balancing);
3959out:
3960 if (time_after(next_balance, sd->last_balance + interval)) {
3961 next_balance = sd->last_balance + interval;
3962 update_next_balance = 1;
3963 }
3964
3965 /*
3966 * Stop the load balance at this level. There is another
3967 * CPU in our sched group which is doing load balancing more
3968 * actively.
3969 */
3970 if (!balance)
3971 break;
3972 }
dce840a0 3973 rcu_read_unlock();
1e3c88bd
PZ
3974
3975 /*
3976 * next_balance will be updated only when there is a need.
3977 * When the cpu is attached to null domain for ex, it will not be
3978 * updated.
3979 */
3980 if (likely(update_next_balance))
3981 rq->next_balance = next_balance;
3982}
3983
83cd4fe2 3984#ifdef CONFIG_NO_HZ
1e3c88bd 3985/*
83cd4fe2 3986 * In CONFIG_NO_HZ case, the idle balance kickee will do the
1e3c88bd
PZ
3987 * rebalancing for all the cpus for whom scheduler ticks are stopped.
3988 */
83cd4fe2
VP
3989static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle)
3990{
3991 struct rq *this_rq = cpu_rq(this_cpu);
3992 struct rq *rq;
3993 int balance_cpu;
3994
3995 if (idle != CPU_IDLE || !this_rq->nohz_balance_kick)
3996 return;
3997
3998 for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
3999 if (balance_cpu == this_cpu)
4000 continue;
4001
4002 /*
4003 * If this cpu gets work to do, stop the load balancing
4004 * work being done for other cpus. Next load
4005 * balancing owner will pick it up.
4006 */
4007 if (need_resched()) {
4008 this_rq->nohz_balance_kick = 0;
4009 break;
4010 }
4011
4012 raw_spin_lock_irq(&this_rq->lock);
5343bdb8 4013 update_rq_clock(this_rq);
83cd4fe2
VP
4014 update_cpu_load(this_rq);
4015 raw_spin_unlock_irq(&this_rq->lock);
4016
4017 rebalance_domains(balance_cpu, CPU_IDLE);
4018
4019 rq = cpu_rq(balance_cpu);
4020 if (time_after(this_rq->next_balance, rq->next_balance))
4021 this_rq->next_balance = rq->next_balance;
4022 }
4023 nohz.next_balance = this_rq->next_balance;
4024 this_rq->nohz_balance_kick = 0;
4025}
4026
4027/*
4028 * Current heuristic for kicking the idle load balancer
4029 * - first_pick_cpu is the one of the busy CPUs. It will kick
4030 * idle load balancer when it has more than one process active. This
4031 * eliminates the need for idle load balancing altogether when we have
4032 * only one running process in the system (common case).
4033 * - If there are more than one busy CPU, idle load balancer may have
4034 * to run for active_load_balance to happen (i.e., two busy CPUs are
4035 * SMT or core siblings and can run better if they move to different
4036 * physical CPUs). So, second_pick_cpu is the second of the busy CPUs
4037 * which will kick idle load balancer as soon as it has any load.
4038 */
4039static inline int nohz_kick_needed(struct rq *rq, int cpu)
4040{
4041 unsigned long now = jiffies;
4042 int ret;
4043 int first_pick_cpu, second_pick_cpu;
4044
4045 if (time_before(now, nohz.next_balance))
4046 return 0;
4047
f6c3f168 4048 if (rq->idle_at_tick)
83cd4fe2
VP
4049 return 0;
4050
4051 first_pick_cpu = atomic_read(&nohz.first_pick_cpu);
4052 second_pick_cpu = atomic_read(&nohz.second_pick_cpu);
4053
4054 if (first_pick_cpu < nr_cpu_ids && first_pick_cpu != cpu &&
4055 second_pick_cpu < nr_cpu_ids && second_pick_cpu != cpu)
4056 return 0;
4057
4058 ret = atomic_cmpxchg(&nohz.first_pick_cpu, nr_cpu_ids, cpu);
4059 if (ret == nr_cpu_ids || ret == cpu) {
4060 atomic_cmpxchg(&nohz.second_pick_cpu, cpu, nr_cpu_ids);
4061 if (rq->nr_running > 1)
4062 return 1;
4063 } else {
4064 ret = atomic_cmpxchg(&nohz.second_pick_cpu, nr_cpu_ids, cpu);
4065 if (ret == nr_cpu_ids || ret == cpu) {
4066 if (rq->nr_running)
4067 return 1;
4068 }
4069 }
4070 return 0;
4071}
4072#else
4073static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
4074#endif
4075
4076/*
4077 * run_rebalance_domains is triggered when needed from the scheduler tick.
4078 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
4079 */
1e3c88bd
PZ
4080static void run_rebalance_domains(struct softirq_action *h)
4081{
4082 int this_cpu = smp_processor_id();
4083 struct rq *this_rq = cpu_rq(this_cpu);
4084 enum cpu_idle_type idle = this_rq->idle_at_tick ?
4085 CPU_IDLE : CPU_NOT_IDLE;
4086
4087 rebalance_domains(this_cpu, idle);
4088
1e3c88bd 4089 /*
83cd4fe2 4090 * If this cpu has a pending nohz_balance_kick, then do the
1e3c88bd
PZ
4091 * balancing on behalf of the other idle cpus whose ticks are
4092 * stopped.
4093 */
83cd4fe2 4094 nohz_idle_balance(this_cpu, idle);
1e3c88bd
PZ
4095}
4096
4097static inline int on_null_domain(int cpu)
4098{
90a6501f 4099 return !rcu_dereference_sched(cpu_rq(cpu)->sd);
1e3c88bd
PZ
4100}
4101
4102/*
4103 * Trigger the SCHED_SOFTIRQ if it is time to do periodic load balancing.
1e3c88bd
PZ
4104 */
4105static inline void trigger_load_balance(struct rq *rq, int cpu)
4106{
1e3c88bd
PZ
4107 /* Don't need to rebalance while attached to NULL domain */
4108 if (time_after_eq(jiffies, rq->next_balance) &&
4109 likely(!on_null_domain(cpu)))
4110 raise_softirq(SCHED_SOFTIRQ);
83cd4fe2
VP
4111#ifdef CONFIG_NO_HZ
4112 else if (nohz_kick_needed(rq, cpu) && likely(!on_null_domain(cpu)))
4113 nohz_balancer_kick(cpu);
4114#endif
1e3c88bd
PZ
4115}
4116
0bcdcf28
CE
4117static void rq_online_fair(struct rq *rq)
4118{
4119 update_sysctl();
4120}
4121
4122static void rq_offline_fair(struct rq *rq)
4123{
4124 update_sysctl();
4125}
4126
1e3c88bd
PZ
4127#else /* CONFIG_SMP */
4128
4129/*
4130 * on UP we do not need to balance between CPUs:
4131 */
4132static inline void idle_balance(int cpu, struct rq *rq)
4133{
4134}
4135
55e12e5e 4136#endif /* CONFIG_SMP */
e1d1484f 4137
bf0f6f24
IM
4138/*
4139 * scheduler tick hitting a task of our scheduling class:
4140 */
8f4d37ec 4141static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
bf0f6f24
IM
4142{
4143 struct cfs_rq *cfs_rq;
4144 struct sched_entity *se = &curr->se;
4145
4146 for_each_sched_entity(se) {
4147 cfs_rq = cfs_rq_of(se);
8f4d37ec 4148 entity_tick(cfs_rq, se, queued);
bf0f6f24
IM
4149 }
4150}
4151
4152/*
cd29fe6f
PZ
4153 * called on fork with the child task as argument from the parent's context
4154 * - child not yet on the tasklist
4155 * - preemption disabled
bf0f6f24 4156 */
cd29fe6f 4157static void task_fork_fair(struct task_struct *p)
bf0f6f24 4158{
cd29fe6f 4159 struct cfs_rq *cfs_rq = task_cfs_rq(current);
429d43bc 4160 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 4161 int this_cpu = smp_processor_id();
cd29fe6f
PZ
4162 struct rq *rq = this_rq();
4163 unsigned long flags;
4164
05fa785c 4165 raw_spin_lock_irqsave(&rq->lock, flags);
bf0f6f24 4166
861d034e
PZ
4167 update_rq_clock(rq);
4168
b0a0f667
PM
4169 if (unlikely(task_cpu(p) != this_cpu)) {
4170 rcu_read_lock();
cd29fe6f 4171 __set_task_cpu(p, this_cpu);
b0a0f667
PM
4172 rcu_read_unlock();
4173 }
bf0f6f24 4174
7109c442 4175 update_curr(cfs_rq);
cd29fe6f 4176
b5d9d734
MG
4177 if (curr)
4178 se->vruntime = curr->vruntime;
aeb73b04 4179 place_entity(cfs_rq, se, 1);
4d78e7b6 4180
cd29fe6f 4181 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) {
87fefa38 4182 /*
edcb60a3
IM
4183 * Upon rescheduling, sched_class::put_prev_task() will place
4184 * 'current' within the tree based on its new key value.
4185 */
4d78e7b6 4186 swap(curr->vruntime, se->vruntime);
aec0a514 4187 resched_task(rq->curr);
4d78e7b6 4188 }
bf0f6f24 4189
88ec22d3
PZ
4190 se->vruntime -= cfs_rq->min_vruntime;
4191
05fa785c 4192 raw_spin_unlock_irqrestore(&rq->lock, flags);
bf0f6f24
IM
4193}
4194
cb469845
SR
4195/*
4196 * Priority of the task has changed. Check to see if we preempt
4197 * the current task.
4198 */
da7a735e
PZ
4199static void
4200prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 4201{
da7a735e
PZ
4202 if (!p->se.on_rq)
4203 return;
4204
cb469845
SR
4205 /*
4206 * Reschedule if we are currently running on this runqueue and
4207 * our priority decreased, or if we are not currently running on
4208 * this runqueue and our priority is higher than the current's
4209 */
da7a735e 4210 if (rq->curr == p) {
cb469845
SR
4211 if (p->prio > oldprio)
4212 resched_task(rq->curr);
4213 } else
15afe09b 4214 check_preempt_curr(rq, p, 0);
cb469845
SR
4215}
4216
da7a735e
PZ
4217static void switched_from_fair(struct rq *rq, struct task_struct *p)
4218{
4219 struct sched_entity *se = &p->se;
4220 struct cfs_rq *cfs_rq = cfs_rq_of(se);
4221
4222 /*
4223 * Ensure the task's vruntime is normalized, so that when its
4224 * switched back to the fair class the enqueue_entity(.flags=0) will
4225 * do the right thing.
4226 *
4227 * If it was on_rq, then the dequeue_entity(.flags=0) will already
4228 * have normalized the vruntime, if it was !on_rq, then only when
4229 * the task is sleeping will it still have non-normalized vruntime.
4230 */
4231 if (!se->on_rq && p->state != TASK_RUNNING) {
4232 /*
4233 * Fix up our vruntime so that the current sleep doesn't
4234 * cause 'unlimited' sleep bonus.
4235 */
4236 place_entity(cfs_rq, se, 0);
4237 se->vruntime -= cfs_rq->min_vruntime;
4238 }
4239}
4240
cb469845
SR
4241/*
4242 * We switched to the sched_fair class.
4243 */
da7a735e 4244static void switched_to_fair(struct rq *rq, struct task_struct *p)
cb469845 4245{
da7a735e
PZ
4246 if (!p->se.on_rq)
4247 return;
4248
cb469845
SR
4249 /*
4250 * We were most likely switched from sched_rt, so
4251 * kick off the schedule if running, otherwise just see
4252 * if we can still preempt the current task.
4253 */
da7a735e 4254 if (rq->curr == p)
cb469845
SR
4255 resched_task(rq->curr);
4256 else
15afe09b 4257 check_preempt_curr(rq, p, 0);
cb469845
SR
4258}
4259
83b699ed
SV
4260/* Account for a task changing its policy or group.
4261 *
4262 * This routine is mostly called to set cfs_rq->curr field when a task
4263 * migrates between groups/classes.
4264 */
4265static void set_curr_task_fair(struct rq *rq)
4266{
4267 struct sched_entity *se = &rq->curr->se;
4268
4269 for_each_sched_entity(se)
4270 set_next_entity(cfs_rq_of(se), se);
4271}
4272
810b3817 4273#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 4274static void task_move_group_fair(struct task_struct *p, int on_rq)
810b3817 4275{
b2b5ce02
PZ
4276 /*
4277 * If the task was not on the rq at the time of this cgroup movement
4278 * it must have been asleep, sleeping tasks keep their ->vruntime
4279 * absolute on their old rq until wakeup (needed for the fair sleeper
4280 * bonus in place_entity()).
4281 *
4282 * If it was on the rq, we've just 'preempted' it, which does convert
4283 * ->vruntime to a relative base.
4284 *
4285 * Make sure both cases convert their relative position when migrating
4286 * to another cgroup's rq. This does somewhat interfere with the
4287 * fair sleeper stuff for the first placement, but who cares.
4288 */
4289 if (!on_rq)
4290 p->se.vruntime -= cfs_rq_of(&p->se)->min_vruntime;
4291 set_task_rq(p, task_cpu(p));
88ec22d3 4292 if (!on_rq)
b2b5ce02 4293 p->se.vruntime += cfs_rq_of(&p->se)->min_vruntime;
810b3817
PZ
4294}
4295#endif
4296
6d686f45 4297static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task)
0d721cea
PW
4298{
4299 struct sched_entity *se = &task->se;
0d721cea
PW
4300 unsigned int rr_interval = 0;
4301
4302 /*
4303 * Time slice is 0 for SCHED_OTHER tasks that are on an otherwise
4304 * idle runqueue:
4305 */
0d721cea
PW
4306 if (rq->cfs.load.weight)
4307 rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se));
0d721cea
PW
4308
4309 return rr_interval;
4310}
4311
bf0f6f24
IM
4312/*
4313 * All the scheduling class methods:
4314 */
5522d5d5
IM
4315static const struct sched_class fair_sched_class = {
4316 .next = &idle_sched_class,
bf0f6f24
IM
4317 .enqueue_task = enqueue_task_fair,
4318 .dequeue_task = dequeue_task_fair,
4319 .yield_task = yield_task_fair,
d95f4122 4320 .yield_to_task = yield_to_task_fair,
bf0f6f24 4321
2e09bf55 4322 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
4323
4324 .pick_next_task = pick_next_task_fair,
4325 .put_prev_task = put_prev_task_fair,
4326
681f3e68 4327#ifdef CONFIG_SMP
4ce72a2c
LZ
4328 .select_task_rq = select_task_rq_fair,
4329
0bcdcf28
CE
4330 .rq_online = rq_online_fair,
4331 .rq_offline = rq_offline_fair,
88ec22d3
PZ
4332
4333 .task_waking = task_waking_fair,
681f3e68 4334#endif
bf0f6f24 4335
83b699ed 4336 .set_curr_task = set_curr_task_fair,
bf0f6f24 4337 .task_tick = task_tick_fair,
cd29fe6f 4338 .task_fork = task_fork_fair,
cb469845
SR
4339
4340 .prio_changed = prio_changed_fair,
da7a735e 4341 .switched_from = switched_from_fair,
cb469845 4342 .switched_to = switched_to_fair,
810b3817 4343
0d721cea
PW
4344 .get_rr_interval = get_rr_interval_fair,
4345
810b3817 4346#ifdef CONFIG_FAIR_GROUP_SCHED
b2b5ce02 4347 .task_move_group = task_move_group_fair,
810b3817 4348#endif
bf0f6f24
IM
4349};
4350
4351#ifdef CONFIG_SCHED_DEBUG
5cef9eca 4352static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 4353{
bf0f6f24
IM
4354 struct cfs_rq *cfs_rq;
4355
5973e5b9 4356 rcu_read_lock();
c3b64f1e 4357 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 4358 print_cfs_rq(m, cpu, cfs_rq);
5973e5b9 4359 rcu_read_unlock();
bf0f6f24
IM
4360}
4361#endif