sched: wakeup preemption fix
[linux-2.6-block.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
23/*
21805085 24 * Targeted preemption latency for CPU-bound tasks:
19978ca6 25 * (default: 20ms * ilog(ncpus), units: nanoseconds)
bf0f6f24 26 *
21805085 27 * NOTE: this latency value is not the same as the concept of
d274a4ce
IM
28 * 'timeslice length' - timeslices in CFS are of variable length
29 * and have no persistent notion like in traditional, time-slice
30 * based scheduling concepts.
bf0f6f24 31 *
d274a4ce
IM
32 * (to see the precise effective timeslice length of your workload,
33 * run vmstat and monitor the context-switches (cs) field)
bf0f6f24 34 */
19978ca6 35unsigned int sysctl_sched_latency = 20000000ULL;
2bd8e6d4
IM
36
37/*
b2be5e96 38 * Minimal preemption granularity for CPU-bound tasks:
19978ca6 39 * (default: 1 msec * ilog(ncpus), units: nanoseconds)
2bd8e6d4 40 */
19978ca6 41unsigned int sysctl_sched_min_granularity = 1000000ULL;
21805085
PZ
42
43/*
b2be5e96
PZ
44 * is kept at sysctl_sched_latency / sysctl_sched_min_granularity
45 */
19978ca6 46unsigned int sched_nr_latency = 20;
b2be5e96
PZ
47
48/*
49 * After fork, child runs first. (default) If set to 0 then
50 * parent will (try to) run first.
21805085 51 */
b2be5e96 52const_debug unsigned int sysctl_sched_child_runs_first = 1;
bf0f6f24 53
1799e35d
IM
54/*
55 * sys_sched_yield() compat mode
56 *
57 * This option switches the agressive yield implementation of the
58 * old scheduler back on.
59 */
60unsigned int __read_mostly sysctl_sched_compat_yield;
61
bf0f6f24
IM
62/*
63 * SCHED_BATCH wake-up granularity.
19978ca6 64 * (default: 10 msec * ilog(ncpus), units: nanoseconds)
bf0f6f24
IM
65 *
66 * This option delays the preemption effects of decoupled workloads
67 * and reduces their over-scheduling. Synchronous workloads will still
68 * have immediate wakeup/sleep latencies.
69 */
19978ca6 70unsigned int sysctl_sched_batch_wakeup_granularity = 10000000UL;
bf0f6f24
IM
71
72/*
73 * SCHED_OTHER wake-up granularity.
19978ca6 74 * (default: 10 msec * ilog(ncpus), units: nanoseconds)
bf0f6f24
IM
75 *
76 * This option delays the preemption effects of decoupled workloads
77 * and reduces their over-scheduling. Synchronous workloads will still
78 * have immediate wakeup/sleep latencies.
79 */
19978ca6 80unsigned int sysctl_sched_wakeup_granularity = 10000000UL;
bf0f6f24 81
da84d961
IM
82const_debug unsigned int sysctl_sched_migration_cost = 500000UL;
83
bf0f6f24
IM
84/**************************************************************
85 * CFS operations on generic schedulable entities:
86 */
87
62160e3f 88#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 89
62160e3f 90/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
91static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
92{
62160e3f 93 return cfs_rq->rq;
bf0f6f24
IM
94}
95
62160e3f
IM
96/* An entity is a task if it doesn't "own" a runqueue */
97#define entity_is_task(se) (!se->my_q)
bf0f6f24 98
62160e3f 99#else /* CONFIG_FAIR_GROUP_SCHED */
bf0f6f24 100
62160e3f
IM
101static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
102{
103 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
104}
105
106#define entity_is_task(se) 1
107
bf0f6f24
IM
108#endif /* CONFIG_FAIR_GROUP_SCHED */
109
110static inline struct task_struct *task_of(struct sched_entity *se)
111{
112 return container_of(se, struct task_struct, se);
113}
114
115
116/**************************************************************
117 * Scheduling class tree data structure manipulation methods:
118 */
119
0702e3eb 120static inline u64 max_vruntime(u64 min_vruntime, u64 vruntime)
02e0431a 121{
368059a9
PZ
122 s64 delta = (s64)(vruntime - min_vruntime);
123 if (delta > 0)
02e0431a
PZ
124 min_vruntime = vruntime;
125
126 return min_vruntime;
127}
128
0702e3eb 129static inline u64 min_vruntime(u64 min_vruntime, u64 vruntime)
b0ffd246
PZ
130{
131 s64 delta = (s64)(vruntime - min_vruntime);
132 if (delta < 0)
133 min_vruntime = vruntime;
134
135 return min_vruntime;
136}
137
0702e3eb 138static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 139{
30cfdcfc 140 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
141}
142
bf0f6f24
IM
143/*
144 * Enqueue an entity into the rb-tree:
145 */
0702e3eb 146static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
147{
148 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
149 struct rb_node *parent = NULL;
150 struct sched_entity *entry;
9014623c 151 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
152 int leftmost = 1;
153
154 /*
155 * Find the right place in the rbtree:
156 */
157 while (*link) {
158 parent = *link;
159 entry = rb_entry(parent, struct sched_entity, run_node);
160 /*
161 * We dont care about collisions. Nodes with
162 * the same key stay together.
163 */
9014623c 164 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
165 link = &parent->rb_left;
166 } else {
167 link = &parent->rb_right;
168 leftmost = 0;
169 }
170 }
171
172 /*
173 * Maintain a cache of leftmost tree entries (it is frequently
174 * used):
175 */
176 if (leftmost)
57cb499d 177 cfs_rq->rb_leftmost = &se->run_node;
bf0f6f24
IM
178
179 rb_link_node(&se->run_node, parent, link);
180 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
181}
182
0702e3eb 183static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
184{
185 if (cfs_rq->rb_leftmost == &se->run_node)
57cb499d 186 cfs_rq->rb_leftmost = rb_next(&se->run_node);
e9acbff6 187
bf0f6f24 188 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
189}
190
191static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
192{
193 return cfs_rq->rb_leftmost;
194}
195
196static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
197{
198 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
199}
200
aeb73b04
PZ
201static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
202{
203 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
204 struct sched_entity *se = NULL;
205 struct rb_node *parent;
206
207 while (*link) {
208 parent = *link;
209 se = rb_entry(parent, struct sched_entity, run_node);
210 link = &parent->rb_right;
211 }
212
213 return se;
214}
215
bf0f6f24
IM
216/**************************************************************
217 * Scheduling class statistics methods:
218 */
219
b2be5e96
PZ
220#ifdef CONFIG_SCHED_DEBUG
221int sched_nr_latency_handler(struct ctl_table *table, int write,
222 struct file *filp, void __user *buffer, size_t *lenp,
223 loff_t *ppos)
224{
225 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
226
227 if (ret || !write)
228 return ret;
229
230 sched_nr_latency = DIV_ROUND_UP(sysctl_sched_latency,
231 sysctl_sched_min_granularity);
232
233 return 0;
234}
235#endif
647e7cac
IM
236
237/*
238 * The idea is to set a period in which each task runs once.
239 *
240 * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch
241 * this period because otherwise the slices get too small.
242 *
243 * p = (nr <= nl) ? l : l*nr/nl
244 */
4d78e7b6
PZ
245static u64 __sched_period(unsigned long nr_running)
246{
247 u64 period = sysctl_sched_latency;
b2be5e96 248 unsigned long nr_latency = sched_nr_latency;
4d78e7b6
PZ
249
250 if (unlikely(nr_running > nr_latency)) {
251 period *= nr_running;
252 do_div(period, nr_latency);
253 }
254
255 return period;
256}
257
647e7cac
IM
258/*
259 * We calculate the wall-time slice from the period by taking a part
260 * proportional to the weight.
261 *
262 * s = p*w/rw
263 */
6d0f0ebd 264static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 265{
647e7cac 266 u64 slice = __sched_period(cfs_rq->nr_running);
21805085 267
647e7cac
IM
268 slice *= se->load.weight;
269 do_div(slice, cfs_rq->load.weight);
21805085 270
647e7cac 271 return slice;
bf0f6f24
IM
272}
273
647e7cac
IM
274/*
275 * We calculate the vruntime slice.
276 *
277 * vs = s/w = p/rw
278 */
279static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
67e9fb2a 280{
647e7cac 281 u64 vslice = __sched_period(nr_running);
67e9fb2a 282
10b77724 283 vslice *= NICE_0_LOAD;
647e7cac 284 do_div(vslice, rq_weight);
67e9fb2a 285
647e7cac
IM
286 return vslice;
287}
5f6d858e 288
647e7cac
IM
289static u64 sched_vslice(struct cfs_rq *cfs_rq)
290{
291 return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
292}
293
294static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
295{
296 return __sched_vslice(cfs_rq->load.weight + se->load.weight,
297 cfs_rq->nr_running + 1);
67e9fb2a
PZ
298}
299
bf0f6f24
IM
300/*
301 * Update the current task's runtime statistics. Skip current tasks that
302 * are not in our scheduling class.
303 */
304static inline void
8ebc91d9
IM
305__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
306 unsigned long delta_exec)
bf0f6f24 307{
bbdba7c0 308 unsigned long delta_exec_weighted;
b0ffd246 309 u64 vruntime;
bf0f6f24 310
8179ca23 311 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
bf0f6f24
IM
312
313 curr->sum_exec_runtime += delta_exec;
7a62eabc 314 schedstat_add(cfs_rq, exec_clock, delta_exec);
e9acbff6
IM
315 delta_exec_weighted = delta_exec;
316 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
317 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
318 &curr->load);
319 }
320 curr->vruntime += delta_exec_weighted;
02e0431a
PZ
321
322 /*
323 * maintain cfs_rq->min_vruntime to be a monotonic increasing
324 * value tracking the leftmost vruntime in the tree.
325 */
326 if (first_fair(cfs_rq)) {
b0ffd246
PZ
327 vruntime = min_vruntime(curr->vruntime,
328 __pick_next_entity(cfs_rq)->vruntime);
02e0431a 329 } else
b0ffd246 330 vruntime = curr->vruntime;
02e0431a
PZ
331
332 cfs_rq->min_vruntime =
b0ffd246 333 max_vruntime(cfs_rq->min_vruntime, vruntime);
bf0f6f24
IM
334}
335
b7cc0896 336static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 337{
429d43bc 338 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 339 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
340 unsigned long delta_exec;
341
342 if (unlikely(!curr))
343 return;
344
345 /*
346 * Get the amount of time the current task was running
347 * since the last time we changed load (this cannot
348 * overflow on 32 bits):
349 */
8ebc91d9 350 delta_exec = (unsigned long)(now - curr->exec_start);
bf0f6f24 351
8ebc91d9
IM
352 __update_curr(cfs_rq, curr, delta_exec);
353 curr->exec_start = now;
bf0f6f24
IM
354}
355
356static inline void
5870db5b 357update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 358{
d281918d 359 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
360}
361
bf0f6f24
IM
362/*
363 * Task is being enqueued - update stats:
364 */
d2417e5a 365static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 366{
bf0f6f24
IM
367 /*
368 * Are we enqueueing a waiting task? (for current tasks
369 * a dequeue/enqueue event is a NOP)
370 */
429d43bc 371 if (se != cfs_rq->curr)
5870db5b 372 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
373}
374
bf0f6f24 375static void
9ef0a961 376update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 377{
bbdba7c0
IM
378 schedstat_set(se->wait_max, max(se->wait_max,
379 rq_of(cfs_rq)->clock - se->wait_start));
6cfb0d5d 380 schedstat_set(se->wait_start, 0);
bf0f6f24
IM
381}
382
383static inline void
19b6a2e3 384update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 385{
bf0f6f24
IM
386 /*
387 * Mark the end of the wait period if dequeueing a
388 * waiting task:
389 */
429d43bc 390 if (se != cfs_rq->curr)
9ef0a961 391 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
392}
393
394/*
395 * We are picking a new current task - update its stats:
396 */
397static inline void
79303e9e 398update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
399{
400 /*
401 * We are starting a new run period:
402 */
d281918d 403 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
404}
405
bf0f6f24
IM
406/**************************************************
407 * Scheduling class queueing methods:
408 */
409
30cfdcfc
DA
410static void
411account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
412{
413 update_load_add(&cfs_rq->load, se->load.weight);
414 cfs_rq->nr_running++;
415 se->on_rq = 1;
416}
417
418static void
419account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
420{
421 update_load_sub(&cfs_rq->load, se->load.weight);
422 cfs_rq->nr_running--;
423 se->on_rq = 0;
424}
425
2396af69 426static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 427{
bf0f6f24
IM
428#ifdef CONFIG_SCHEDSTATS
429 if (se->sleep_start) {
d281918d 430 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
bf0f6f24
IM
431
432 if ((s64)delta < 0)
433 delta = 0;
434
435 if (unlikely(delta > se->sleep_max))
436 se->sleep_max = delta;
437
438 se->sleep_start = 0;
439 se->sum_sleep_runtime += delta;
440 }
441 if (se->block_start) {
d281918d 442 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
bf0f6f24
IM
443
444 if ((s64)delta < 0)
445 delta = 0;
446
447 if (unlikely(delta > se->block_max))
448 se->block_max = delta;
449
450 se->block_start = 0;
451 se->sum_sleep_runtime += delta;
30084fbd
IM
452
453 /*
454 * Blocking time is in units of nanosecs, so shift by 20 to
455 * get a milliseconds-range estimation of the amount of
456 * time that the task spent sleeping:
457 */
458 if (unlikely(prof_on == SLEEP_PROFILING)) {
e22f5bbf
IM
459 struct task_struct *tsk = task_of(se);
460
30084fbd
IM
461 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
462 delta >> 20);
463 }
bf0f6f24
IM
464 }
465#endif
466}
467
ddc97297
PZ
468static void check_spread(struct cfs_rq *cfs_rq, struct sched_entity *se)
469{
470#ifdef CONFIG_SCHED_DEBUG
471 s64 d = se->vruntime - cfs_rq->min_vruntime;
472
473 if (d < 0)
474 d = -d;
475
476 if (d > 3*sysctl_sched_latency)
477 schedstat_inc(cfs_rq, nr_spread_over);
478#endif
479}
480
aeb73b04
PZ
481static void
482place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
483{
67e9fb2a 484 u64 vruntime;
aeb73b04 485
67e9fb2a 486 vruntime = cfs_rq->min_vruntime;
94dfb5e7 487
06877c33 488 if (sched_feat(TREE_AVG)) {
94dfb5e7
PZ
489 struct sched_entity *last = __pick_last_entity(cfs_rq);
490 if (last) {
67e9fb2a
PZ
491 vruntime += last->vruntime;
492 vruntime >>= 1;
94dfb5e7 493 }
67e9fb2a 494 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
647e7cac 495 vruntime += sched_vslice(cfs_rq)/2;
94dfb5e7 496
2cb8600e
PZ
497 /*
498 * The 'current' period is already promised to the current tasks,
499 * however the extra weight of the new task will slow them down a
500 * little, place the new task so that it fits in the slot that
501 * stays open at the end.
502 */
94dfb5e7 503 if (initial && sched_feat(START_DEBIT))
647e7cac 504 vruntime += sched_vslice_add(cfs_rq, se);
aeb73b04 505
8465e792 506 if (!initial) {
2cb8600e 507 /* sleeps upto a single latency don't count. */
e62dd02e
DA
508 if (sched_feat(NEW_FAIR_SLEEPERS) && entity_is_task(se) &&
509 task_of(se)->policy != SCHED_BATCH)
94359f05
IM
510 vruntime -= sysctl_sched_latency;
511
2cb8600e
PZ
512 /* ensure we never gain time by being placed backwards. */
513 vruntime = max_vruntime(se->vruntime, vruntime);
aeb73b04
PZ
514 }
515
67e9fb2a 516 se->vruntime = vruntime;
aeb73b04
PZ
517}
518
bf0f6f24 519static void
83b699ed 520enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
bf0f6f24
IM
521{
522 /*
a2a2d680 523 * Update run-time statistics of the 'current'.
bf0f6f24 524 */
b7cc0896 525 update_curr(cfs_rq);
bf0f6f24 526
e9acbff6 527 if (wakeup) {
aeb73b04 528 place_entity(cfs_rq, se, 0);
2396af69 529 enqueue_sleeper(cfs_rq, se);
e9acbff6 530 }
bf0f6f24 531
d2417e5a 532 update_stats_enqueue(cfs_rq, se);
ddc97297 533 check_spread(cfs_rq, se);
83b699ed
SV
534 if (se != cfs_rq->curr)
535 __enqueue_entity(cfs_rq, se);
30cfdcfc 536 account_entity_enqueue(cfs_rq, se);
bf0f6f24
IM
537}
538
539static void
525c2716 540dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 541{
a2a2d680
DA
542 /*
543 * Update run-time statistics of the 'current'.
544 */
545 update_curr(cfs_rq);
546
19b6a2e3 547 update_stats_dequeue(cfs_rq, se);
db36cc7d 548 if (sleep) {
67e9fb2a 549#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
550 if (entity_is_task(se)) {
551 struct task_struct *tsk = task_of(se);
552
553 if (tsk->state & TASK_INTERRUPTIBLE)
d281918d 554 se->sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 555 if (tsk->state & TASK_UNINTERRUPTIBLE)
d281918d 556 se->block_start = rq_of(cfs_rq)->clock;
bf0f6f24 557 }
db36cc7d 558#endif
67e9fb2a
PZ
559 }
560
83b699ed 561 if (se != cfs_rq->curr)
30cfdcfc
DA
562 __dequeue_entity(cfs_rq, se);
563 account_entity_dequeue(cfs_rq, se);
bf0f6f24
IM
564}
565
566/*
567 * Preempt the current task with a newly woken task if needed:
568 */
7c92e54f 569static void
2e09bf55 570check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 571{
11697830
PZ
572 unsigned long ideal_runtime, delta_exec;
573
6d0f0ebd 574 ideal_runtime = sched_slice(cfs_rq, curr);
11697830 575 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
3e3e13f3 576 if (delta_exec > ideal_runtime)
bf0f6f24
IM
577 resched_task(rq_of(cfs_rq)->curr);
578}
579
83b699ed 580static void
8494f412 581set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 582{
83b699ed
SV
583 /* 'current' is not kept within the tree. */
584 if (se->on_rq) {
585 /*
586 * Any task has to be enqueued before it get to execute on
587 * a CPU. So account for the time it spent waiting on the
588 * runqueue.
589 */
590 update_stats_wait_end(cfs_rq, se);
591 __dequeue_entity(cfs_rq, se);
592 }
593
79303e9e 594 update_stats_curr_start(cfs_rq, se);
429d43bc 595 cfs_rq->curr = se;
eba1ed4b
IM
596#ifdef CONFIG_SCHEDSTATS
597 /*
598 * Track our maximum slice length, if the CPU's load is at
599 * least twice that of our own weight (i.e. dont track it
600 * when there are only lesser-weight tasks around):
601 */
495eca49 602 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
eba1ed4b
IM
603 se->slice_max = max(se->slice_max,
604 se->sum_exec_runtime - se->prev_sum_exec_runtime);
605 }
606#endif
4a55b450 607 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
608}
609
9948f4b2 610static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
bf0f6f24 611{
08ec3df5 612 struct sched_entity *se = NULL;
bf0f6f24 613
08ec3df5
DA
614 if (first_fair(cfs_rq)) {
615 se = __pick_next_entity(cfs_rq);
616 set_next_entity(cfs_rq, se);
617 }
bf0f6f24
IM
618
619 return se;
620}
621
ab6cde26 622static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
623{
624 /*
625 * If still on the runqueue then deactivate_task()
626 * was not called and update_curr() has to be done:
627 */
628 if (prev->on_rq)
b7cc0896 629 update_curr(cfs_rq);
bf0f6f24 630
ddc97297 631 check_spread(cfs_rq, prev);
30cfdcfc 632 if (prev->on_rq) {
5870db5b 633 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
634 /* Put 'current' back into the tree. */
635 __enqueue_entity(cfs_rq, prev);
636 }
429d43bc 637 cfs_rq->curr = NULL;
bf0f6f24
IM
638}
639
640static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
641{
bf0f6f24 642 /*
30cfdcfc 643 * Update run-time statistics of the 'current'.
bf0f6f24 644 */
30cfdcfc 645 update_curr(cfs_rq);
bf0f6f24 646
ce6c1311 647 if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT))
2e09bf55 648 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
649}
650
651/**************************************************
652 * CFS operations on tasks:
653 */
654
655#ifdef CONFIG_FAIR_GROUP_SCHED
656
657/* Walk up scheduling entities hierarchy */
658#define for_each_sched_entity(se) \
659 for (; se; se = se->parent)
660
661static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
662{
663 return p->se.cfs_rq;
664}
665
666/* runqueue on which this entity is (to be) queued */
667static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
668{
669 return se->cfs_rq;
670}
671
672/* runqueue "owned" by this group */
673static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
674{
675 return grp->my_q;
676}
677
678/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
679 * another cpu ('this_cpu')
680 */
681static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
682{
29f59db3 683 return cfs_rq->tg->cfs_rq[this_cpu];
bf0f6f24
IM
684}
685
686/* Iterate thr' all leaf cfs_rq's on a runqueue */
687#define for_each_leaf_cfs_rq(rq, cfs_rq) \
688 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
689
fad095a7
SV
690/* Do the two (enqueued) entities belong to the same group ? */
691static inline int
692is_same_group(struct sched_entity *se, struct sched_entity *pse)
bf0f6f24 693{
fad095a7 694 if (se->cfs_rq == pse->cfs_rq)
bf0f6f24
IM
695 return 1;
696
697 return 0;
698}
699
fad095a7
SV
700static inline struct sched_entity *parent_entity(struct sched_entity *se)
701{
702 return se->parent;
703}
704
bf0f6f24
IM
705#else /* CONFIG_FAIR_GROUP_SCHED */
706
707#define for_each_sched_entity(se) \
708 for (; se; se = NULL)
709
710static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
711{
712 return &task_rq(p)->cfs;
713}
714
715static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
716{
717 struct task_struct *p = task_of(se);
718 struct rq *rq = task_rq(p);
719
720 return &rq->cfs;
721}
722
723/* runqueue "owned" by this group */
724static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
725{
726 return NULL;
727}
728
729static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
730{
731 return &cpu_rq(this_cpu)->cfs;
732}
733
734#define for_each_leaf_cfs_rq(rq, cfs_rq) \
735 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
736
fad095a7
SV
737static inline int
738is_same_group(struct sched_entity *se, struct sched_entity *pse)
bf0f6f24
IM
739{
740 return 1;
741}
742
fad095a7
SV
743static inline struct sched_entity *parent_entity(struct sched_entity *se)
744{
745 return NULL;
746}
747
bf0f6f24
IM
748#endif /* CONFIG_FAIR_GROUP_SCHED */
749
750/*
751 * The enqueue_task method is called before nr_running is
752 * increased. Here we update the fair scheduling stats and
753 * then put the task into the rbtree:
754 */
fd390f6a 755static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
bf0f6f24
IM
756{
757 struct cfs_rq *cfs_rq;
758 struct sched_entity *se = &p->se;
759
760 for_each_sched_entity(se) {
761 if (se->on_rq)
762 break;
763 cfs_rq = cfs_rq_of(se);
83b699ed 764 enqueue_entity(cfs_rq, se, wakeup);
b9fa3df3 765 wakeup = 1;
bf0f6f24
IM
766 }
767}
768
769/*
770 * The dequeue_task method is called before nr_running is
771 * decreased. We remove the task from the rbtree and
772 * update the fair scheduling stats:
773 */
f02231e5 774static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
775{
776 struct cfs_rq *cfs_rq;
777 struct sched_entity *se = &p->se;
778
779 for_each_sched_entity(se) {
780 cfs_rq = cfs_rq_of(se);
525c2716 781 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24
IM
782 /* Don't dequeue parent if it has other entities besides us */
783 if (cfs_rq->load.weight)
784 break;
b9fa3df3 785 sleep = 1;
bf0f6f24
IM
786 }
787}
788
789/*
1799e35d
IM
790 * sched_yield() support is very simple - we dequeue and enqueue.
791 *
792 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 793 */
4530d7ab 794static void yield_task_fair(struct rq *rq)
bf0f6f24 795{
72ea22f8 796 struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
4530d7ab 797 struct sched_entity *rightmost, *se = &rq->curr->se;
bf0f6f24
IM
798
799 /*
1799e35d
IM
800 * Are we the only task in the tree?
801 */
802 if (unlikely(cfs_rq->nr_running == 1))
803 return;
804
805 if (likely(!sysctl_sched_compat_yield)) {
806 __update_rq_clock(rq);
807 /*
a2a2d680 808 * Update run-time statistics of the 'current'.
1799e35d 809 */
2b1e315d 810 update_curr(cfs_rq);
1799e35d
IM
811
812 return;
813 }
814 /*
815 * Find the rightmost entry in the rbtree:
bf0f6f24 816 */
2b1e315d 817 rightmost = __pick_last_entity(cfs_rq);
1799e35d
IM
818 /*
819 * Already in the rightmost position?
820 */
2b1e315d 821 if (unlikely(rightmost->vruntime < se->vruntime))
1799e35d
IM
822 return;
823
824 /*
825 * Minimally necessary key value to be last in the tree:
2b1e315d
DA
826 * Upon rescheduling, sched_class::put_prev_task() will place
827 * 'current' within the tree based on its new key value.
1799e35d 828 */
30cfdcfc 829 se->vruntime = rightmost->vruntime + 1;
bf0f6f24
IM
830}
831
832/*
833 * Preempt the current task with a newly woken task if needed:
834 */
2e09bf55 835static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
836{
837 struct task_struct *curr = rq->curr;
fad095a7 838 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
8651a86c 839 struct sched_entity *se = &curr->se, *pse = &p->se;
810e95cc 840 s64 delta, gran;
bf0f6f24
IM
841
842 if (unlikely(rt_prio(p->prio))) {
a8e504d2 843 update_rq_clock(rq);
b7cc0896 844 update_curr(cfs_rq);
bf0f6f24
IM
845 resched_task(curr);
846 return;
847 }
91c234b4
IM
848 /*
849 * Batch tasks do not preempt (their preemption is driven by
850 * the tick):
851 */
852 if (unlikely(p->policy == SCHED_BATCH))
853 return;
bf0f6f24 854
ce6c1311
PZ
855 if (sched_feat(WAKEUP_PREEMPT)) {
856 while (!is_same_group(se, pse)) {
857 se = parent_entity(se);
858 pse = parent_entity(pse);
859 }
8651a86c 860
ce6c1311
PZ
861 delta = se->vruntime - pse->vruntime;
862 gran = sysctl_sched_wakeup_granularity;
863 if (unlikely(se->load.weight != NICE_0_LOAD))
864 gran = calc_delta_fair(gran, &se->load);
8651a86c 865
8bc6767a
IM
866 if (delta > gran)
867 resched_task(curr);
ce6c1311 868 }
bf0f6f24
IM
869}
870
fb8d4724 871static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24
IM
872{
873 struct cfs_rq *cfs_rq = &rq->cfs;
874 struct sched_entity *se;
875
876 if (unlikely(!cfs_rq->nr_running))
877 return NULL;
878
879 do {
9948f4b2 880 se = pick_next_entity(cfs_rq);
bf0f6f24
IM
881 cfs_rq = group_cfs_rq(se);
882 } while (cfs_rq);
883
884 return task_of(se);
885}
886
887/*
888 * Account for a descheduled task:
889 */
31ee529c 890static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
891{
892 struct sched_entity *se = &prev->se;
893 struct cfs_rq *cfs_rq;
894
895 for_each_sched_entity(se) {
896 cfs_rq = cfs_rq_of(se);
ab6cde26 897 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
898 }
899}
900
681f3e68 901#ifdef CONFIG_SMP
bf0f6f24
IM
902/**************************************************
903 * Fair scheduling class load-balancing methods:
904 */
905
906/*
907 * Load-balancing iterator. Note: while the runqueue stays locked
908 * during the whole iteration, the current task might be
909 * dequeued so the iterator has to be dequeue-safe. Here we
910 * achieve that by always pre-iterating before returning
911 * the current task:
912 */
a9957449 913static struct task_struct *
bf0f6f24
IM
914__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
915{
916 struct task_struct *p;
917
918 if (!curr)
919 return NULL;
920
921 p = rb_entry(curr, struct task_struct, se.run_node);
922 cfs_rq->rb_load_balance_curr = rb_next(curr);
923
924 return p;
925}
926
927static struct task_struct *load_balance_start_fair(void *arg)
928{
929 struct cfs_rq *cfs_rq = arg;
930
931 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
932}
933
934static struct task_struct *load_balance_next_fair(void *arg)
935{
936 struct cfs_rq *cfs_rq = arg;
937
938 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
939}
940
a4ac01c3 941#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24
IM
942static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
943{
944 struct sched_entity *curr;
945 struct task_struct *p;
946
947 if (!cfs_rq->nr_running)
948 return MAX_PRIO;
949
9b5b7751
SV
950 curr = cfs_rq->curr;
951 if (!curr)
952 curr = __pick_next_entity(cfs_rq);
953
bf0f6f24
IM
954 p = task_of(curr);
955
956 return p->prio;
957}
a4ac01c3 958#endif
bf0f6f24 959
43010659 960static unsigned long
bf0f6f24 961load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
e1d1484f 962 unsigned long max_load_move,
a4ac01c3
PW
963 struct sched_domain *sd, enum cpu_idle_type idle,
964 int *all_pinned, int *this_best_prio)
bf0f6f24
IM
965{
966 struct cfs_rq *busy_cfs_rq;
bf0f6f24
IM
967 long rem_load_move = max_load_move;
968 struct rq_iterator cfs_rq_iterator;
969
970 cfs_rq_iterator.start = load_balance_start_fair;
971 cfs_rq_iterator.next = load_balance_next_fair;
972
973 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
a4ac01c3 974#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 975 struct cfs_rq *this_cfs_rq;
e56f31aa 976 long imbalance;
bf0f6f24 977 unsigned long maxload;
bf0f6f24
IM
978
979 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
980
e56f31aa 981 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
bf0f6f24
IM
982 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
983 if (imbalance <= 0)
984 continue;
985
986 /* Don't pull more than imbalance/2 */
987 imbalance /= 2;
988 maxload = min(rem_load_move, imbalance);
989
a4ac01c3
PW
990 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
991#else
e56f31aa 992# define maxload rem_load_move
a4ac01c3 993#endif
e1d1484f
PW
994 /*
995 * pass busy_cfs_rq argument into
bf0f6f24
IM
996 * load_balance_[start|next]_fair iterators
997 */
998 cfs_rq_iterator.arg = busy_cfs_rq;
e1d1484f
PW
999 rem_load_move -= balance_tasks(this_rq, this_cpu, busiest,
1000 maxload, sd, idle, all_pinned,
1001 this_best_prio,
1002 &cfs_rq_iterator);
bf0f6f24 1003
e1d1484f 1004 if (rem_load_move <= 0)
bf0f6f24
IM
1005 break;
1006 }
1007
43010659 1008 return max_load_move - rem_load_move;
bf0f6f24
IM
1009}
1010
e1d1484f
PW
1011static int
1012move_one_task_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1013 struct sched_domain *sd, enum cpu_idle_type idle)
1014{
1015 struct cfs_rq *busy_cfs_rq;
1016 struct rq_iterator cfs_rq_iterator;
1017
1018 cfs_rq_iterator.start = load_balance_start_fair;
1019 cfs_rq_iterator.next = load_balance_next_fair;
1020
1021 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
1022 /*
1023 * pass busy_cfs_rq argument into
1024 * load_balance_[start|next]_fair iterators
1025 */
1026 cfs_rq_iterator.arg = busy_cfs_rq;
1027 if (iter_move_one_task(this_rq, this_cpu, busiest, sd, idle,
1028 &cfs_rq_iterator))
1029 return 1;
1030 }
1031
1032 return 0;
1033}
681f3e68 1034#endif
e1d1484f 1035
bf0f6f24
IM
1036/*
1037 * scheduler tick hitting a task of our scheduling class:
1038 */
1039static void task_tick_fair(struct rq *rq, struct task_struct *curr)
1040{
1041 struct cfs_rq *cfs_rq;
1042 struct sched_entity *se = &curr->se;
1043
1044 for_each_sched_entity(se) {
1045 cfs_rq = cfs_rq_of(se);
1046 entity_tick(cfs_rq, se);
1047 }
1048}
1049
8eb172d9 1050#define swap(a, b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
4d78e7b6 1051
bf0f6f24
IM
1052/*
1053 * Share the fairness runtime between parent and child, thus the
1054 * total amount of pressure for CPU stays equal - new tasks
1055 * get a chance to run but frequent forkers are not allowed to
1056 * monopolize the CPU. Note: the parent runqueue is locked,
1057 * the child is not running yet.
1058 */
ee0827d8 1059static void task_new_fair(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
1060{
1061 struct cfs_rq *cfs_rq = task_cfs_rq(p);
429d43bc 1062 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
00bf7bfc 1063 int this_cpu = smp_processor_id();
bf0f6f24
IM
1064
1065 sched_info_queued(p);
1066
7109c442 1067 update_curr(cfs_rq);
aeb73b04 1068 place_entity(cfs_rq, se, 1);
4d78e7b6 1069
00bf7bfc 1070 if (sysctl_sched_child_runs_first && this_cpu == task_cpu(p) &&
4d78e7b6 1071 curr->vruntime < se->vruntime) {
87fefa38 1072 /*
edcb60a3
IM
1073 * Upon rescheduling, sched_class::put_prev_task() will place
1074 * 'current' within the tree based on its new key value.
1075 */
4d78e7b6 1076 swap(curr->vruntime, se->vruntime);
4d78e7b6 1077 }
bf0f6f24 1078
b9dca1e0 1079 enqueue_task_fair(rq, p, 0);
bb61c210 1080 resched_task(rq->curr);
bf0f6f24
IM
1081}
1082
83b699ed
SV
1083/* Account for a task changing its policy or group.
1084 *
1085 * This routine is mostly called to set cfs_rq->curr field when a task
1086 * migrates between groups/classes.
1087 */
1088static void set_curr_task_fair(struct rq *rq)
1089{
1090 struct sched_entity *se = &rq->curr->se;
1091
1092 for_each_sched_entity(se)
1093 set_next_entity(cfs_rq_of(se), se);
1094}
1095
bf0f6f24
IM
1096/*
1097 * All the scheduling class methods:
1098 */
5522d5d5
IM
1099static const struct sched_class fair_sched_class = {
1100 .next = &idle_sched_class,
bf0f6f24
IM
1101 .enqueue_task = enqueue_task_fair,
1102 .dequeue_task = dequeue_task_fair,
1103 .yield_task = yield_task_fair,
1104
2e09bf55 1105 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
1106
1107 .pick_next_task = pick_next_task_fair,
1108 .put_prev_task = put_prev_task_fair,
1109
681f3e68 1110#ifdef CONFIG_SMP
bf0f6f24 1111 .load_balance = load_balance_fair,
e1d1484f 1112 .move_one_task = move_one_task_fair,
681f3e68 1113#endif
bf0f6f24 1114
83b699ed 1115 .set_curr_task = set_curr_task_fair,
bf0f6f24
IM
1116 .task_tick = task_tick_fair,
1117 .task_new = task_new_fair,
1118};
1119
1120#ifdef CONFIG_SCHED_DEBUG
5cef9eca 1121static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 1122{
bf0f6f24
IM
1123 struct cfs_rq *cfs_rq;
1124
75c28ace
SV
1125#ifdef CONFIG_FAIR_GROUP_SCHED
1126 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
1127#endif
c3b64f1e 1128 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 1129 print_cfs_rq(m, cpu, cfs_rq);
bf0f6f24
IM
1130}
1131#endif