sched: improve prev_sum_exec_runtime setting
[linux-2.6-block.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
23/*
21805085
PZ
24 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms, units: nanoseconds)
bf0f6f24 26 *
21805085
PZ
27 * NOTE: this latency value is not the same as the concept of
28 * 'timeslice length' - timeslices in CFS are of variable length.
29 * (to see the precise effective timeslice length of your workload,
30 * run vmstat and monitor the context-switches field)
bf0f6f24
IM
31 *
32 * On SMP systems the value of this is multiplied by the log2 of the
33 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
34 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
21805085 35 * Targeted preemption latency for CPU-bound tasks:
bf0f6f24 36 */
21805085
PZ
37unsigned int sysctl_sched_latency __read_mostly = 20000000ULL;
38
39/*
40 * Minimal preemption granularity for CPU-bound tasks:
41 * (default: 2 msec, units: nanoseconds)
42 */
172ac3db 43unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
bf0f6f24
IM
44
45/*
46 * SCHED_BATCH wake-up granularity.
71fd3714 47 * (default: 25 msec, units: nanoseconds)
bf0f6f24
IM
48 *
49 * This option delays the preemption effects of decoupled workloads
50 * and reduces their over-scheduling. Synchronous workloads will still
51 * have immediate wakeup/sleep latencies.
52 */
71fd3714 53unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;
bf0f6f24
IM
54
55/*
56 * SCHED_OTHER wake-up granularity.
57 * (default: 1 msec, units: nanoseconds)
58 *
59 * This option delays the preemption effects of decoupled workloads
60 * and reduces their over-scheduling. Synchronous workloads will still
61 * have immediate wakeup/sleep latencies.
62 */
71fd3714 63unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;
bf0f6f24
IM
64
65unsigned int sysctl_sched_stat_granularity __read_mostly;
66
67/*
71fd3714 68 * Initialized in sched_init_granularity() [to 5 times the base granularity]:
bf0f6f24
IM
69 */
70unsigned int sysctl_sched_runtime_limit __read_mostly;
71
72/*
73 * Debugging: various feature bits
74 */
75enum {
76 SCHED_FEAT_FAIR_SLEEPERS = 1,
77 SCHED_FEAT_SLEEPER_AVG = 2,
78 SCHED_FEAT_SLEEPER_LOAD_AVG = 4,
79 SCHED_FEAT_PRECISE_CPU_LOAD = 8,
80 SCHED_FEAT_START_DEBIT = 16,
81 SCHED_FEAT_SKIP_INITIAL = 32,
82};
83
84unsigned int sysctl_sched_features __read_mostly =
85 SCHED_FEAT_FAIR_SLEEPERS *1 |
5d2b3d36 86 SCHED_FEAT_SLEEPER_AVG *0 |
bf0f6f24
IM
87 SCHED_FEAT_SLEEPER_LOAD_AVG *1 |
88 SCHED_FEAT_PRECISE_CPU_LOAD *1 |
89 SCHED_FEAT_START_DEBIT *1 |
90 SCHED_FEAT_SKIP_INITIAL *0;
91
92extern struct sched_class fair_sched_class;
93
94/**************************************************************
95 * CFS operations on generic schedulable entities:
96 */
97
98#ifdef CONFIG_FAIR_GROUP_SCHED
99
100/* cpu runqueue to which this cfs_rq is attached */
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
103 return cfs_rq->rq;
104}
105
106/* currently running entity (if any) on this cfs_rq */
107static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
108{
109 return cfs_rq->curr;
110}
111
112/* An entity is a task if it doesn't "own" a runqueue */
113#define entity_is_task(se) (!se->my_q)
114
115static inline void
116set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se)
117{
118 cfs_rq->curr = se;
119}
120
121#else /* CONFIG_FAIR_GROUP_SCHED */
122
123static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
124{
125 return container_of(cfs_rq, struct rq, cfs);
126}
127
128static inline struct sched_entity *cfs_rq_curr(struct cfs_rq *cfs_rq)
129{
130 struct rq *rq = rq_of(cfs_rq);
131
132 if (unlikely(rq->curr->sched_class != &fair_sched_class))
133 return NULL;
134
135 return &rq->curr->se;
136}
137
138#define entity_is_task(se) 1
139
140static inline void
141set_cfs_rq_curr(struct cfs_rq *cfs_rq, struct sched_entity *se) { }
142
143#endif /* CONFIG_FAIR_GROUP_SCHED */
144
145static inline struct task_struct *task_of(struct sched_entity *se)
146{
147 return container_of(se, struct task_struct, se);
148}
149
150
151/**************************************************************
152 * Scheduling class tree data structure manipulation methods:
153 */
154
155/*
156 * Enqueue an entity into the rb-tree:
157 */
158static inline void
159__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
160{
161 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
162 struct rb_node *parent = NULL;
163 struct sched_entity *entry;
164 s64 key = se->fair_key;
165 int leftmost = 1;
166
167 /*
168 * Find the right place in the rbtree:
169 */
170 while (*link) {
171 parent = *link;
172 entry = rb_entry(parent, struct sched_entity, run_node);
173 /*
174 * We dont care about collisions. Nodes with
175 * the same key stay together.
176 */
177 if (key - entry->fair_key < 0) {
178 link = &parent->rb_left;
179 } else {
180 link = &parent->rb_right;
181 leftmost = 0;
182 }
183 }
184
185 /*
186 * Maintain a cache of leftmost tree entries (it is frequently
187 * used):
188 */
189 if (leftmost)
190 cfs_rq->rb_leftmost = &se->run_node;
191
192 rb_link_node(&se->run_node, parent, link);
193 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
194 update_load_add(&cfs_rq->load, se->load.weight);
195 cfs_rq->nr_running++;
196 se->on_rq = 1;
a206c072
IM
197
198 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
bf0f6f24
IM
199}
200
201static inline void
202__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
203{
204 if (cfs_rq->rb_leftmost == &se->run_node)
205 cfs_rq->rb_leftmost = rb_next(&se->run_node);
206 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
207 update_load_sub(&cfs_rq->load, se->load.weight);
208 cfs_rq->nr_running--;
209 se->on_rq = 0;
a206c072
IM
210
211 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
bf0f6f24
IM
212}
213
214static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
215{
216 return cfs_rq->rb_leftmost;
217}
218
219static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
220{
221 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
222}
223
224/**************************************************************
225 * Scheduling class statistics methods:
226 */
227
21805085
PZ
228/*
229 * Calculate the preemption granularity needed to schedule every
230 * runnable task once per sysctl_sched_latency amount of time.
231 * (down to a sensible low limit on granularity)
232 *
233 * For example, if there are 2 tasks running and latency is 10 msecs,
234 * we switch tasks every 5 msecs. If we have 3 tasks running, we have
235 * to switch tasks every 3.33 msecs to get a 10 msecs observed latency
236 * for each task. We do finer and finer scheduling up to until we
237 * reach the minimum granularity value.
238 *
239 * To achieve this we use the following dynamic-granularity rule:
240 *
241 * gran = lat/nr - lat/nr/nr
242 *
243 * This comes out of the following equations:
244 *
245 * kA1 + gran = kB1
246 * kB2 + gran = kA2
247 * kA2 = kA1
248 * kB2 = kB1 - d + d/nr
249 * lat = d * nr
250 *
251 * Where 'k' is key, 'A' is task A (waiting), 'B' is task B (running),
252 * '1' is start of time, '2' is end of time, 'd' is delay between
253 * 1 and 2 (during which task B was running), 'nr' is number of tasks
254 * running, 'lat' is the the period of each task. ('lat' is the
255 * sched_latency that we aim for.)
256 */
257static long
258sched_granularity(struct cfs_rq *cfs_rq)
259{
260 unsigned int gran = sysctl_sched_latency;
261 unsigned int nr = cfs_rq->nr_running;
262
263 if (nr > 1) {
264 gran = gran/nr - gran/nr/nr;
172ac3db 265 gran = max(gran, sysctl_sched_min_granularity);
21805085
PZ
266 }
267
268 return gran;
269}
270
bf0f6f24
IM
271/*
272 * We rescale the rescheduling granularity of tasks according to their
273 * nice level, but only linearly, not exponentially:
274 */
275static long
276niced_granularity(struct sched_entity *curr, unsigned long granularity)
277{
278 u64 tmp;
279
7cff8cf6
IM
280 if (likely(curr->load.weight == NICE_0_LOAD))
281 return granularity;
bf0f6f24 282 /*
7cff8cf6 283 * Positive nice levels get the same granularity as nice-0:
bf0f6f24 284 */
7cff8cf6
IM
285 if (likely(curr->load.weight < NICE_0_LOAD)) {
286 tmp = curr->load.weight * (u64)granularity;
287 return (long) (tmp >> NICE_0_SHIFT);
288 }
bf0f6f24 289 /*
7cff8cf6 290 * Negative nice level tasks get linearly finer
bf0f6f24
IM
291 * granularity:
292 */
7cff8cf6 293 tmp = curr->load.inv_weight * (u64)granularity;
bf0f6f24
IM
294
295 /*
296 * It will always fit into 'long':
297 */
a0dc7260 298 return (long) (tmp >> (WMULT_SHIFT-NICE_0_SHIFT));
bf0f6f24
IM
299}
300
301static inline void
302limit_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se)
303{
304 long limit = sysctl_sched_runtime_limit;
305
306 /*
307 * Niced tasks have the same history dynamic range as
308 * non-niced tasks:
309 */
310 if (unlikely(se->wait_runtime > limit)) {
311 se->wait_runtime = limit;
312 schedstat_inc(se, wait_runtime_overruns);
313 schedstat_inc(cfs_rq, wait_runtime_overruns);
314 }
315 if (unlikely(se->wait_runtime < -limit)) {
316 se->wait_runtime = -limit;
317 schedstat_inc(se, wait_runtime_underruns);
318 schedstat_inc(cfs_rq, wait_runtime_underruns);
319 }
320}
321
322static inline void
323__add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
324{
325 se->wait_runtime += delta;
326 schedstat_add(se, sum_wait_runtime, delta);
327 limit_wait_runtime(cfs_rq, se);
328}
329
330static void
331add_wait_runtime(struct cfs_rq *cfs_rq, struct sched_entity *se, long delta)
332{
333 schedstat_add(cfs_rq, wait_runtime, -se->wait_runtime);
334 __add_wait_runtime(cfs_rq, se, delta);
335 schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
336}
337
338/*
339 * Update the current task's runtime statistics. Skip current tasks that
340 * are not in our scheduling class.
341 */
342static inline void
b7cc0896 343__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 344{
c5dcfe72 345 unsigned long delta, delta_exec, delta_fair, delta_mine;
bf0f6f24
IM
346 struct load_weight *lw = &cfs_rq->load;
347 unsigned long load = lw->weight;
348
bf0f6f24 349 delta_exec = curr->delta_exec;
8179ca23 350 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
bf0f6f24
IM
351
352 curr->sum_exec_runtime += delta_exec;
353 cfs_rq->exec_clock += delta_exec;
354
fd8bb43e
IM
355 if (unlikely(!load))
356 return;
357
bf0f6f24
IM
358 delta_fair = calc_delta_fair(delta_exec, lw);
359 delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);
360
5f01d519 361 if (cfs_rq->sleeper_bonus > sysctl_sched_min_granularity) {
ea0aa3b2 362 delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
b2133c8b
IM
363 delta = min(delta, (unsigned long)(
364 (long)sysctl_sched_runtime_limit - curr->wait_runtime));
bf0f6f24
IM
365 cfs_rq->sleeper_bonus -= delta;
366 delta_mine -= delta;
367 }
368
369 cfs_rq->fair_clock += delta_fair;
370 /*
371 * We executed delta_exec amount of time on the CPU,
372 * but we were only entitled to delta_mine amount of
373 * time during that period (if nr_running == 1 then
374 * the two values are equal)
375 * [Note: delta_mine - delta_exec is negative]:
376 */
377 add_wait_runtime(cfs_rq, curr, delta_mine - delta_exec);
378}
379
b7cc0896 380static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24
IM
381{
382 struct sched_entity *curr = cfs_rq_curr(cfs_rq);
383 unsigned long delta_exec;
384
385 if (unlikely(!curr))
386 return;
387
388 /*
389 * Get the amount of time the current task was running
390 * since the last time we changed load (this cannot
391 * overflow on 32 bits):
392 */
d281918d 393 delta_exec = (unsigned long)(rq_of(cfs_rq)->clock - curr->exec_start);
bf0f6f24
IM
394
395 curr->delta_exec += delta_exec;
396
397 if (unlikely(curr->delta_exec > sysctl_sched_stat_granularity)) {
b7cc0896 398 __update_curr(cfs_rq, curr);
bf0f6f24
IM
399 curr->delta_exec = 0;
400 }
d281918d 401 curr->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
402}
403
404static inline void
5870db5b 405update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
406{
407 se->wait_start_fair = cfs_rq->fair_clock;
d281918d 408 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
409}
410
411/*
412 * We calculate fair deltas here, so protect against the random effects
413 * of a multiplication overflow by capping it to the runtime limit:
414 */
415#if BITS_PER_LONG == 32
416static inline unsigned long
417calc_weighted(unsigned long delta, unsigned long weight, int shift)
418{
419 u64 tmp = (u64)delta * weight >> shift;
420
421 if (unlikely(tmp > sysctl_sched_runtime_limit*2))
422 return sysctl_sched_runtime_limit*2;
423 return tmp;
424}
425#else
426static inline unsigned long
427calc_weighted(unsigned long delta, unsigned long weight, int shift)
428{
429 return delta * weight >> shift;
430}
431#endif
432
433/*
434 * Task is being enqueued - update stats:
435 */
d2417e5a 436static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
437{
438 s64 key;
439
440 /*
441 * Are we enqueueing a waiting task? (for current tasks
442 * a dequeue/enqueue event is a NOP)
443 */
444 if (se != cfs_rq_curr(cfs_rq))
5870db5b 445 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
446 /*
447 * Update the key:
448 */
449 key = cfs_rq->fair_clock;
450
451 /*
452 * Optimize the common nice 0 case:
453 */
454 if (likely(se->load.weight == NICE_0_LOAD)) {
455 key -= se->wait_runtime;
456 } else {
457 u64 tmp;
458
459 if (se->wait_runtime < 0) {
460 tmp = -se->wait_runtime;
461 key += (tmp * se->load.inv_weight) >>
462 (WMULT_SHIFT - NICE_0_SHIFT);
463 } else {
464 tmp = se->wait_runtime;
a69edb55
IM
465 key -= (tmp * se->load.inv_weight) >>
466 (WMULT_SHIFT - NICE_0_SHIFT);
bf0f6f24
IM
467 }
468 }
469
470 se->fair_key = key;
471}
472
473/*
474 * Note: must be called with a freshly updated rq->fair_clock.
475 */
476static inline void
eac55ea3 477__update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
478{
479 unsigned long delta_fair = se->delta_fair_run;
480
d281918d
IM
481 schedstat_set(se->wait_max, max(se->wait_max,
482 rq_of(cfs_rq)->clock - se->wait_start));
bf0f6f24
IM
483
484 if (unlikely(se->load.weight != NICE_0_LOAD))
485 delta_fair = calc_weighted(delta_fair, se->load.weight,
486 NICE_0_SHIFT);
487
488 add_wait_runtime(cfs_rq, se, delta_fair);
489}
490
491static void
9ef0a961 492update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
493{
494 unsigned long delta_fair;
495
b77d69db
IM
496 if (unlikely(!se->wait_start_fair))
497 return;
498
bf0f6f24
IM
499 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
500 (u64)(cfs_rq->fair_clock - se->wait_start_fair));
501
502 se->delta_fair_run += delta_fair;
503 if (unlikely(abs(se->delta_fair_run) >=
504 sysctl_sched_stat_granularity)) {
eac55ea3 505 __update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
506 se->delta_fair_run = 0;
507 }
508
509 se->wait_start_fair = 0;
6cfb0d5d 510 schedstat_set(se->wait_start, 0);
bf0f6f24
IM
511}
512
513static inline void
19b6a2e3 514update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 515{
b7cc0896 516 update_curr(cfs_rq);
bf0f6f24
IM
517 /*
518 * Mark the end of the wait period if dequeueing a
519 * waiting task:
520 */
521 if (se != cfs_rq_curr(cfs_rq))
9ef0a961 522 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
523}
524
525/*
526 * We are picking a new current task - update its stats:
527 */
528static inline void
79303e9e 529update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
530{
531 /*
532 * We are starting a new run period:
533 */
d281918d 534 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
535}
536
537/*
538 * We are descheduling a task - update its stats:
539 */
540static inline void
c7e9b5b2 541update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
542{
543 se->exec_start = 0;
544}
545
546/**************************************************
547 * Scheduling class queueing methods:
548 */
549
dfdc119e 550static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
551{
552 unsigned long load = cfs_rq->load.weight, delta_fair;
553 long prev_runtime;
554
b2133c8b
IM
555 /*
556 * Do not boost sleepers if there's too much bonus 'in flight'
557 * already:
558 */
559 if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
560 return;
561
bf0f6f24
IM
562 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
563 load = rq_of(cfs_rq)->cpu_load[2];
564
565 delta_fair = se->delta_fair_sleep;
566
567 /*
568 * Fix up delta_fair with the effect of us running
569 * during the whole sleep period:
570 */
571 if (sysctl_sched_features & SCHED_FEAT_SLEEPER_AVG)
572 delta_fair = div64_likely32((u64)delta_fair * load,
573 load + se->load.weight);
574
575 if (unlikely(se->load.weight != NICE_0_LOAD))
576 delta_fair = calc_weighted(delta_fair, se->load.weight,
577 NICE_0_SHIFT);
578
579 prev_runtime = se->wait_runtime;
580 __add_wait_runtime(cfs_rq, se, delta_fair);
581 delta_fair = se->wait_runtime - prev_runtime;
582
583 /*
584 * Track the amount of bonus we've given to sleepers:
585 */
586 cfs_rq->sleeper_bonus += delta_fair;
bf0f6f24
IM
587}
588
2396af69 589static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
590{
591 struct task_struct *tsk = task_of(se);
592 unsigned long delta_fair;
593
594 if ((entity_is_task(se) && tsk->policy == SCHED_BATCH) ||
595 !(sysctl_sched_features & SCHED_FEAT_FAIR_SLEEPERS))
596 return;
597
598 delta_fair = (unsigned long)min((u64)(2*sysctl_sched_runtime_limit),
599 (u64)(cfs_rq->fair_clock - se->sleep_start_fair));
600
601 se->delta_fair_sleep += delta_fair;
602 if (unlikely(abs(se->delta_fair_sleep) >=
603 sysctl_sched_stat_granularity)) {
dfdc119e 604 __enqueue_sleeper(cfs_rq, se);
bf0f6f24
IM
605 se->delta_fair_sleep = 0;
606 }
607
608 se->sleep_start_fair = 0;
609
610#ifdef CONFIG_SCHEDSTATS
611 if (se->sleep_start) {
d281918d 612 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
bf0f6f24
IM
613
614 if ((s64)delta < 0)
615 delta = 0;
616
617 if (unlikely(delta > se->sleep_max))
618 se->sleep_max = delta;
619
620 se->sleep_start = 0;
621 se->sum_sleep_runtime += delta;
622 }
623 if (se->block_start) {
d281918d 624 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
bf0f6f24
IM
625
626 if ((s64)delta < 0)
627 delta = 0;
628
629 if (unlikely(delta > se->block_max))
630 se->block_max = delta;
631
632 se->block_start = 0;
633 se->sum_sleep_runtime += delta;
634 }
635#endif
636}
637
638static void
668031ca 639enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
bf0f6f24
IM
640{
641 /*
642 * Update the fair clock.
643 */
b7cc0896 644 update_curr(cfs_rq);
bf0f6f24
IM
645
646 if (wakeup)
2396af69 647 enqueue_sleeper(cfs_rq, se);
bf0f6f24 648
d2417e5a 649 update_stats_enqueue(cfs_rq, se);
bf0f6f24
IM
650 __enqueue_entity(cfs_rq, se);
651}
652
653static void
525c2716 654dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 655{
19b6a2e3 656 update_stats_dequeue(cfs_rq, se);
bf0f6f24
IM
657 if (sleep) {
658 se->sleep_start_fair = cfs_rq->fair_clock;
659#ifdef CONFIG_SCHEDSTATS
660 if (entity_is_task(se)) {
661 struct task_struct *tsk = task_of(se);
662
663 if (tsk->state & TASK_INTERRUPTIBLE)
d281918d 664 se->sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 665 if (tsk->state & TASK_UNINTERRUPTIBLE)
d281918d 666 se->block_start = rq_of(cfs_rq)->clock;
bf0f6f24 667 }
bf0f6f24
IM
668#endif
669 }
670 __dequeue_entity(cfs_rq, se);
671}
672
673/*
674 * Preempt the current task with a newly woken task if needed:
675 */
7c92e54f 676static void
bf0f6f24
IM
677__check_preempt_curr_fair(struct cfs_rq *cfs_rq, struct sched_entity *se,
678 struct sched_entity *curr, unsigned long granularity)
679{
680 s64 __delta = curr->fair_key - se->fair_key;
681
682 /*
683 * Take scheduling granularity into account - do not
684 * preempt the current task unless the best task has
685 * a larger than sched_granularity fairness advantage:
686 */
4a55b450 687 if (__delta > niced_granularity(curr, granularity))
bf0f6f24
IM
688 resched_task(rq_of(cfs_rq)->curr);
689}
690
691static inline void
8494f412 692set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
693{
694 /*
695 * Any task has to be enqueued before it get to execute on
696 * a CPU. So account for the time it spent waiting on the
697 * runqueue. (note, here we rely on pick_next_task() having
698 * done a put_prev_task_fair() shortly before this, which
699 * updated rq->fair_clock - used by update_stats_wait_end())
700 */
9ef0a961 701 update_stats_wait_end(cfs_rq, se);
79303e9e 702 update_stats_curr_start(cfs_rq, se);
bf0f6f24 703 set_cfs_rq_curr(cfs_rq, se);
4a55b450 704 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
705}
706
9948f4b2 707static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
bf0f6f24
IM
708{
709 struct sched_entity *se = __pick_next_entity(cfs_rq);
710
8494f412 711 set_next_entity(cfs_rq, se);
bf0f6f24
IM
712
713 return se;
714}
715
ab6cde26 716static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
717{
718 /*
719 * If still on the runqueue then deactivate_task()
720 * was not called and update_curr() has to be done:
721 */
722 if (prev->on_rq)
b7cc0896 723 update_curr(cfs_rq);
bf0f6f24 724
c7e9b5b2 725 update_stats_curr_end(cfs_rq, prev);
bf0f6f24
IM
726
727 if (prev->on_rq)
5870db5b 728 update_stats_wait_start(cfs_rq, prev);
bf0f6f24
IM
729 set_cfs_rq_curr(cfs_rq, NULL);
730}
731
732static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
733{
f6cf891c 734 unsigned long gran, ideal_runtime, delta_exec;
bf0f6f24 735 struct sched_entity *next;
c1b3da3e 736
bf0f6f24
IM
737 /*
738 * Dequeue and enqueue the task to update its
739 * position within the tree:
740 */
525c2716 741 dequeue_entity(cfs_rq, curr, 0);
668031ca 742 enqueue_entity(cfs_rq, curr, 0);
bf0f6f24
IM
743
744 /*
745 * Reschedule if another task tops the current one.
746 */
747 next = __pick_next_entity(cfs_rq);
748 if (next == curr)
749 return;
750
f6cf891c
IM
751 gran = sched_granularity(cfs_rq);
752 ideal_runtime = niced_granularity(curr,
753 max(sysctl_sched_latency / cfs_rq->nr_running,
754 (unsigned long)sysctl_sched_min_granularity));
755 /*
756 * If we executed more than what the latency constraint suggests,
757 * reduce the rescheduling granularity. This way the total latency
758 * of how much a task is not scheduled converges to
759 * sysctl_sched_latency:
760 */
761 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
762 if (delta_exec > ideal_runtime)
763 gran = 0;
764
7c92e54f 765 __check_preempt_curr_fair(cfs_rq, next, curr, gran);
bf0f6f24
IM
766}
767
768/**************************************************
769 * CFS operations on tasks:
770 */
771
772#ifdef CONFIG_FAIR_GROUP_SCHED
773
774/* Walk up scheduling entities hierarchy */
775#define for_each_sched_entity(se) \
776 for (; se; se = se->parent)
777
778static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
779{
780 return p->se.cfs_rq;
781}
782
783/* runqueue on which this entity is (to be) queued */
784static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
785{
786 return se->cfs_rq;
787}
788
789/* runqueue "owned" by this group */
790static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
791{
792 return grp->my_q;
793}
794
795/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
796 * another cpu ('this_cpu')
797 */
798static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
799{
800 /* A later patch will take group into account */
801 return &cpu_rq(this_cpu)->cfs;
802}
803
804/* Iterate thr' all leaf cfs_rq's on a runqueue */
805#define for_each_leaf_cfs_rq(rq, cfs_rq) \
806 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
807
808/* Do the two (enqueued) tasks belong to the same group ? */
809static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
810{
811 if (curr->se.cfs_rq == p->se.cfs_rq)
812 return 1;
813
814 return 0;
815}
816
817#else /* CONFIG_FAIR_GROUP_SCHED */
818
819#define for_each_sched_entity(se) \
820 for (; se; se = NULL)
821
822static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
823{
824 return &task_rq(p)->cfs;
825}
826
827static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
828{
829 struct task_struct *p = task_of(se);
830 struct rq *rq = task_rq(p);
831
832 return &rq->cfs;
833}
834
835/* runqueue "owned" by this group */
836static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
837{
838 return NULL;
839}
840
841static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
842{
843 return &cpu_rq(this_cpu)->cfs;
844}
845
846#define for_each_leaf_cfs_rq(rq, cfs_rq) \
847 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
848
849static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
850{
851 return 1;
852}
853
854#endif /* CONFIG_FAIR_GROUP_SCHED */
855
856/*
857 * The enqueue_task method is called before nr_running is
858 * increased. Here we update the fair scheduling stats and
859 * then put the task into the rbtree:
860 */
fd390f6a 861static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
bf0f6f24
IM
862{
863 struct cfs_rq *cfs_rq;
864 struct sched_entity *se = &p->se;
865
866 for_each_sched_entity(se) {
867 if (se->on_rq)
868 break;
869 cfs_rq = cfs_rq_of(se);
668031ca 870 enqueue_entity(cfs_rq, se, wakeup);
bf0f6f24
IM
871 }
872}
873
874/*
875 * The dequeue_task method is called before nr_running is
876 * decreased. We remove the task from the rbtree and
877 * update the fair scheduling stats:
878 */
f02231e5 879static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
880{
881 struct cfs_rq *cfs_rq;
882 struct sched_entity *se = &p->se;
883
884 for_each_sched_entity(se) {
885 cfs_rq = cfs_rq_of(se);
525c2716 886 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24
IM
887 /* Don't dequeue parent if it has other entities besides us */
888 if (cfs_rq->load.weight)
889 break;
890 }
891}
892
893/*
894 * sched_yield() support is very simple - we dequeue and enqueue
895 */
896static void yield_task_fair(struct rq *rq, struct task_struct *p)
897{
898 struct cfs_rq *cfs_rq = task_cfs_rq(p);
bf0f6f24 899
c1b3da3e 900 __update_rq_clock(rq);
bf0f6f24
IM
901 /*
902 * Dequeue and enqueue the task to update its
903 * position within the tree:
904 */
525c2716 905 dequeue_entity(cfs_rq, &p->se, 0);
668031ca 906 enqueue_entity(cfs_rq, &p->se, 0);
bf0f6f24
IM
907}
908
909/*
910 * Preempt the current task with a newly woken task if needed:
911 */
912static void check_preempt_curr_fair(struct rq *rq, struct task_struct *p)
913{
914 struct task_struct *curr = rq->curr;
915 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
916 unsigned long gran;
917
918 if (unlikely(rt_prio(p->prio))) {
a8e504d2 919 update_rq_clock(rq);
b7cc0896 920 update_curr(cfs_rq);
bf0f6f24
IM
921 resched_task(curr);
922 return;
923 }
924
925 gran = sysctl_sched_wakeup_granularity;
926 /*
927 * Batch tasks prefer throughput over latency:
928 */
929 if (unlikely(p->policy == SCHED_BATCH))
930 gran = sysctl_sched_batch_wakeup_granularity;
931
932 if (is_same_group(curr, p))
933 __check_preempt_curr_fair(cfs_rq, &p->se, &curr->se, gran);
934}
935
fb8d4724 936static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24
IM
937{
938 struct cfs_rq *cfs_rq = &rq->cfs;
939 struct sched_entity *se;
940
941 if (unlikely(!cfs_rq->nr_running))
942 return NULL;
943
944 do {
9948f4b2 945 se = pick_next_entity(cfs_rq);
bf0f6f24
IM
946 cfs_rq = group_cfs_rq(se);
947 } while (cfs_rq);
948
949 return task_of(se);
950}
951
952/*
953 * Account for a descheduled task:
954 */
31ee529c 955static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
956{
957 struct sched_entity *se = &prev->se;
958 struct cfs_rq *cfs_rq;
959
960 for_each_sched_entity(se) {
961 cfs_rq = cfs_rq_of(se);
ab6cde26 962 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
963 }
964}
965
966/**************************************************
967 * Fair scheduling class load-balancing methods:
968 */
969
970/*
971 * Load-balancing iterator. Note: while the runqueue stays locked
972 * during the whole iteration, the current task might be
973 * dequeued so the iterator has to be dequeue-safe. Here we
974 * achieve that by always pre-iterating before returning
975 * the current task:
976 */
977static inline struct task_struct *
978__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
979{
980 struct task_struct *p;
981
982 if (!curr)
983 return NULL;
984
985 p = rb_entry(curr, struct task_struct, se.run_node);
986 cfs_rq->rb_load_balance_curr = rb_next(curr);
987
988 return p;
989}
990
991static struct task_struct *load_balance_start_fair(void *arg)
992{
993 struct cfs_rq *cfs_rq = arg;
994
995 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
996}
997
998static struct task_struct *load_balance_next_fair(void *arg)
999{
1000 struct cfs_rq *cfs_rq = arg;
1001
1002 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
1003}
1004
a4ac01c3 1005#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24
IM
1006static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
1007{
1008 struct sched_entity *curr;
1009 struct task_struct *p;
1010
1011 if (!cfs_rq->nr_running)
1012 return MAX_PRIO;
1013
1014 curr = __pick_next_entity(cfs_rq);
1015 p = task_of(curr);
1016
1017 return p->prio;
1018}
a4ac01c3 1019#endif
bf0f6f24 1020
43010659 1021static unsigned long
bf0f6f24 1022load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
a4ac01c3
PW
1023 unsigned long max_nr_move, unsigned long max_load_move,
1024 struct sched_domain *sd, enum cpu_idle_type idle,
1025 int *all_pinned, int *this_best_prio)
bf0f6f24
IM
1026{
1027 struct cfs_rq *busy_cfs_rq;
1028 unsigned long load_moved, total_nr_moved = 0, nr_moved;
1029 long rem_load_move = max_load_move;
1030 struct rq_iterator cfs_rq_iterator;
1031
1032 cfs_rq_iterator.start = load_balance_start_fair;
1033 cfs_rq_iterator.next = load_balance_next_fair;
1034
1035 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
a4ac01c3 1036#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 1037 struct cfs_rq *this_cfs_rq;
e56f31aa 1038 long imbalance;
bf0f6f24 1039 unsigned long maxload;
bf0f6f24
IM
1040
1041 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
1042
e56f31aa 1043 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
bf0f6f24
IM
1044 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
1045 if (imbalance <= 0)
1046 continue;
1047
1048 /* Don't pull more than imbalance/2 */
1049 imbalance /= 2;
1050 maxload = min(rem_load_move, imbalance);
1051
a4ac01c3
PW
1052 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
1053#else
e56f31aa 1054# define maxload rem_load_move
a4ac01c3 1055#endif
bf0f6f24
IM
1056 /* pass busy_cfs_rq argument into
1057 * load_balance_[start|next]_fair iterators
1058 */
1059 cfs_rq_iterator.arg = busy_cfs_rq;
1060 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
1061 max_nr_move, maxload, sd, idle, all_pinned,
a4ac01c3 1062 &load_moved, this_best_prio, &cfs_rq_iterator);
bf0f6f24
IM
1063
1064 total_nr_moved += nr_moved;
1065 max_nr_move -= nr_moved;
1066 rem_load_move -= load_moved;
1067
1068 if (max_nr_move <= 0 || rem_load_move <= 0)
1069 break;
1070 }
1071
43010659 1072 return max_load_move - rem_load_move;
bf0f6f24
IM
1073}
1074
1075/*
1076 * scheduler tick hitting a task of our scheduling class:
1077 */
1078static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1079{
1080 struct cfs_rq *cfs_rq;
1081 struct sched_entity *se = &curr->se;
1082
1083 for_each_sched_entity(se) {
1084 cfs_rq = cfs_rq_of(se);
1085 entity_tick(cfs_rq, se);
1086 }
1087}
1088
1089/*
1090 * Share the fairness runtime between parent and child, thus the
1091 * total amount of pressure for CPU stays equal - new tasks
1092 * get a chance to run but frequent forkers are not allowed to
1093 * monopolize the CPU. Note: the parent runqueue is locked,
1094 * the child is not running yet.
1095 */
ee0827d8 1096static void task_new_fair(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
1097{
1098 struct cfs_rq *cfs_rq = task_cfs_rq(p);
7109c442 1099 struct sched_entity *se = &p->se, *curr = cfs_rq_curr(cfs_rq);
bf0f6f24
IM
1100
1101 sched_info_queued(p);
1102
7109c442 1103 update_curr(cfs_rq);
d2417e5a 1104 update_stats_enqueue(cfs_rq, se);
bf0f6f24
IM
1105 /*
1106 * Child runs first: we let it run before the parent
1107 * until it reschedules once. We set up the key so that
1108 * it will preempt the parent:
1109 */
9f508f82 1110 se->fair_key = curr->fair_key -
7109c442 1111 niced_granularity(curr, sched_granularity(cfs_rq)) - 1;
bf0f6f24
IM
1112 /*
1113 * The first wait is dominated by the child-runs-first logic,
1114 * so do not credit it with that waiting time yet:
1115 */
1116 if (sysctl_sched_features & SCHED_FEAT_SKIP_INITIAL)
9f508f82 1117 se->wait_start_fair = 0;
bf0f6f24
IM
1118
1119 /*
1120 * The statistical average of wait_runtime is about
1121 * -granularity/2, so initialize the task with that:
1122 */
a206c072 1123 if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
9f508f82 1124 se->wait_runtime = -(sched_granularity(cfs_rq) / 2);
bf0f6f24
IM
1125
1126 __enqueue_entity(cfs_rq, se);
bf0f6f24
IM
1127}
1128
1129#ifdef CONFIG_FAIR_GROUP_SCHED
1130/* Account for a task changing its policy or group.
1131 *
1132 * This routine is mostly called to set cfs_rq->curr field when a task
1133 * migrates between groups/classes.
1134 */
1135static void set_curr_task_fair(struct rq *rq)
1136{
7c6c16f3 1137 struct sched_entity *se = &rq->curr->se;
a8e504d2 1138
c3b64f1e
IM
1139 for_each_sched_entity(se)
1140 set_next_entity(cfs_rq_of(se), se);
bf0f6f24
IM
1141}
1142#else
1143static void set_curr_task_fair(struct rq *rq)
1144{
1145}
1146#endif
1147
1148/*
1149 * All the scheduling class methods:
1150 */
1151struct sched_class fair_sched_class __read_mostly = {
1152 .enqueue_task = enqueue_task_fair,
1153 .dequeue_task = dequeue_task_fair,
1154 .yield_task = yield_task_fair,
1155
1156 .check_preempt_curr = check_preempt_curr_fair,
1157
1158 .pick_next_task = pick_next_task_fair,
1159 .put_prev_task = put_prev_task_fair,
1160
1161 .load_balance = load_balance_fair,
1162
1163 .set_curr_task = set_curr_task_fair,
1164 .task_tick = task_tick_fair,
1165 .task_new = task_new_fair,
1166};
1167
1168#ifdef CONFIG_SCHED_DEBUG
5cef9eca 1169static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 1170{
bf0f6f24
IM
1171 struct cfs_rq *cfs_rq;
1172
c3b64f1e 1173 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 1174 print_cfs_rq(m, cpu, cfs_rq);
bf0f6f24
IM
1175}
1176#endif