sched debug: more width for parameter printouts
[linux-2.6-block.git] / kernel / sched_fair.c
CommitLineData
bf0f6f24
IM
1/*
2 * Completely Fair Scheduling (CFS) Class (SCHED_NORMAL/SCHED_BATCH)
3 *
4 * Copyright (C) 2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * Interactivity improvements by Mike Galbraith
7 * (C) 2007 Mike Galbraith <efault@gmx.de>
8 *
9 * Various enhancements by Dmitry Adamushko.
10 * (C) 2007 Dmitry Adamushko <dmitry.adamushko@gmail.com>
11 *
12 * Group scheduling enhancements by Srivatsa Vaddagiri
13 * Copyright IBM Corporation, 2007
14 * Author: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
15 *
16 * Scaled math optimizations by Thomas Gleixner
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
21805085
PZ
18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
bf0f6f24
IM
21 */
22
23/*
21805085
PZ
24 * Targeted preemption latency for CPU-bound tasks:
25 * (default: 20ms, units: nanoseconds)
bf0f6f24 26 *
21805085
PZ
27 * NOTE: this latency value is not the same as the concept of
28 * 'timeslice length' - timeslices in CFS are of variable length.
29 * (to see the precise effective timeslice length of your workload,
30 * run vmstat and monitor the context-switches field)
bf0f6f24
IM
31 *
32 * On SMP systems the value of this is multiplied by the log2 of the
33 * number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
34 * systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
21805085 35 * Targeted preemption latency for CPU-bound tasks:
bf0f6f24 36 */
2bd8e6d4
IM
37const_debug unsigned int sysctl_sched_latency = 20000000ULL;
38
39/*
40 * After fork, child runs first. (default) If set to 0 then
41 * parent will (try to) run first.
42 */
43const_debug unsigned int sysctl_sched_child_runs_first = 1;
21805085
PZ
44
45/*
46 * Minimal preemption granularity for CPU-bound tasks:
47 * (default: 2 msec, units: nanoseconds)
48 */
172ac3db 49unsigned int sysctl_sched_min_granularity __read_mostly = 2000000ULL;
bf0f6f24 50
1799e35d
IM
51/*
52 * sys_sched_yield() compat mode
53 *
54 * This option switches the agressive yield implementation of the
55 * old scheduler back on.
56 */
57unsigned int __read_mostly sysctl_sched_compat_yield;
58
bf0f6f24
IM
59/*
60 * SCHED_BATCH wake-up granularity.
71fd3714 61 * (default: 25 msec, units: nanoseconds)
bf0f6f24
IM
62 *
63 * This option delays the preemption effects of decoupled workloads
64 * and reduces their over-scheduling. Synchronous workloads will still
65 * have immediate wakeup/sleep latencies.
66 */
2bd8e6d4 67const_debug unsigned int sysctl_sched_batch_wakeup_granularity = 25000000UL;
bf0f6f24
IM
68
69/*
70 * SCHED_OTHER wake-up granularity.
71 * (default: 1 msec, units: nanoseconds)
72 *
73 * This option delays the preemption effects of decoupled workloads
74 * and reduces their over-scheduling. Synchronous workloads will still
75 * have immediate wakeup/sleep latencies.
76 */
2e09bf55 77const_debug unsigned int sysctl_sched_wakeup_granularity = 2000000UL;
bf0f6f24 78
bf0f6f24
IM
79extern struct sched_class fair_sched_class;
80
81/**************************************************************
82 * CFS operations on generic schedulable entities:
83 */
84
62160e3f 85#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 86
62160e3f 87/* cpu runqueue to which this cfs_rq is attached */
bf0f6f24
IM
88static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
89{
62160e3f 90 return cfs_rq->rq;
bf0f6f24
IM
91}
92
62160e3f
IM
93/* An entity is a task if it doesn't "own" a runqueue */
94#define entity_is_task(se) (!se->my_q)
bf0f6f24 95
62160e3f 96#else /* CONFIG_FAIR_GROUP_SCHED */
bf0f6f24 97
62160e3f
IM
98static inline struct rq *rq_of(struct cfs_rq *cfs_rq)
99{
100 return container_of(cfs_rq, struct rq, cfs);
bf0f6f24
IM
101}
102
103#define entity_is_task(se) 1
104
bf0f6f24
IM
105#endif /* CONFIG_FAIR_GROUP_SCHED */
106
107static inline struct task_struct *task_of(struct sched_entity *se)
108{
109 return container_of(se, struct task_struct, se);
110}
111
112
113/**************************************************************
114 * Scheduling class tree data structure manipulation methods:
115 */
116
02e0431a
PZ
117static inline u64
118max_vruntime(u64 min_vruntime, u64 vruntime)
119{
120 if ((vruntime > min_vruntime) ||
121 (min_vruntime > (1ULL << 61) && vruntime < (1ULL << 50)))
122 min_vruntime = vruntime;
123
124 return min_vruntime;
125}
126
e9acbff6
IM
127static inline void
128set_leftmost(struct cfs_rq *cfs_rq, struct rb_node *leftmost)
129{
130 struct sched_entity *se;
131
132 cfs_rq->rb_leftmost = leftmost;
02e0431a 133 if (leftmost)
e9acbff6 134 se = rb_entry(leftmost, struct sched_entity, run_node);
e9acbff6
IM
135}
136
02e0431a
PZ
137static inline s64
138entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
9014623c 139{
30cfdcfc 140 return se->vruntime - cfs_rq->min_vruntime;
9014623c
PZ
141}
142
bf0f6f24
IM
143/*
144 * Enqueue an entity into the rb-tree:
145 */
19ccd97a 146static void
bf0f6f24
IM
147__enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
148{
149 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
150 struct rb_node *parent = NULL;
151 struct sched_entity *entry;
9014623c 152 s64 key = entity_key(cfs_rq, se);
bf0f6f24
IM
153 int leftmost = 1;
154
155 /*
156 * Find the right place in the rbtree:
157 */
158 while (*link) {
159 parent = *link;
160 entry = rb_entry(parent, struct sched_entity, run_node);
161 /*
162 * We dont care about collisions. Nodes with
163 * the same key stay together.
164 */
9014623c 165 if (key < entity_key(cfs_rq, entry)) {
bf0f6f24
IM
166 link = &parent->rb_left;
167 } else {
168 link = &parent->rb_right;
169 leftmost = 0;
170 }
171 }
172
173 /*
174 * Maintain a cache of leftmost tree entries (it is frequently
175 * used):
176 */
177 if (leftmost)
e9acbff6 178 set_leftmost(cfs_rq, &se->run_node);
bf0f6f24
IM
179
180 rb_link_node(&se->run_node, parent, link);
181 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
182}
183
19ccd97a 184static void
bf0f6f24
IM
185__dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
186{
187 if (cfs_rq->rb_leftmost == &se->run_node)
e9acbff6
IM
188 set_leftmost(cfs_rq, rb_next(&se->run_node));
189
bf0f6f24 190 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
191}
192
193static inline struct rb_node *first_fair(struct cfs_rq *cfs_rq)
194{
195 return cfs_rq->rb_leftmost;
196}
197
198static struct sched_entity *__pick_next_entity(struct cfs_rq *cfs_rq)
199{
200 return rb_entry(first_fair(cfs_rq), struct sched_entity, run_node);
201}
202
aeb73b04
PZ
203static inline struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
204{
205 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
206 struct sched_entity *se = NULL;
207 struct rb_node *parent;
208
209 while (*link) {
210 parent = *link;
211 se = rb_entry(parent, struct sched_entity, run_node);
212 link = &parent->rb_right;
213 }
214
215 return se;
216}
217
bf0f6f24
IM
218/**************************************************************
219 * Scheduling class statistics methods:
220 */
221
4d78e7b6
PZ
222static u64 __sched_period(unsigned long nr_running)
223{
224 u64 period = sysctl_sched_latency;
225 unsigned long nr_latency =
226 sysctl_sched_latency / sysctl_sched_min_granularity;
227
228 if (unlikely(nr_running > nr_latency)) {
229 period *= nr_running;
230 do_div(period, nr_latency);
231 }
232
233 return period;
234}
235
6d0f0ebd 236static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
21805085 237{
6d0f0ebd 238 u64 period = __sched_period(cfs_rq->nr_running);
21805085 239
6d0f0ebd
PZ
240 period *= se->load.weight;
241 do_div(period, cfs_rq->load.weight);
21805085 242
6d0f0ebd 243 return period;
bf0f6f24
IM
244}
245
67e9fb2a
PZ
246static u64 __sched_vslice(unsigned long nr_running)
247{
248 u64 period = __sched_period(nr_running);
249
250 do_div(period, nr_running);
251
252 return period;
253}
254
bf0f6f24
IM
255/*
256 * Update the current task's runtime statistics. Skip current tasks that
257 * are not in our scheduling class.
258 */
259static inline void
8ebc91d9
IM
260__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
261 unsigned long delta_exec)
bf0f6f24 262{
bbdba7c0 263 unsigned long delta_exec_weighted;
02e0431a 264 u64 next_vruntime, min_vruntime;
bf0f6f24 265
8179ca23 266 schedstat_set(curr->exec_max, max((u64)delta_exec, curr->exec_max));
bf0f6f24
IM
267
268 curr->sum_exec_runtime += delta_exec;
7a62eabc 269 schedstat_add(cfs_rq, exec_clock, delta_exec);
e9acbff6
IM
270 delta_exec_weighted = delta_exec;
271 if (unlikely(curr->load.weight != NICE_0_LOAD)) {
272 delta_exec_weighted = calc_delta_fair(delta_exec_weighted,
273 &curr->load);
274 }
275 curr->vruntime += delta_exec_weighted;
02e0431a
PZ
276
277 /*
278 * maintain cfs_rq->min_vruntime to be a monotonic increasing
279 * value tracking the leftmost vruntime in the tree.
280 */
281 if (first_fair(cfs_rq)) {
282 next_vruntime = __pick_next_entity(cfs_rq)->vruntime;
283
284 /* min_vruntime() := !max_vruntime() */
285 min_vruntime = max_vruntime(curr->vruntime, next_vruntime);
286 if (min_vruntime == next_vruntime)
287 min_vruntime = curr->vruntime;
288 else
289 min_vruntime = next_vruntime;
290 } else
291 min_vruntime = curr->vruntime;
292
293 cfs_rq->min_vruntime =
294 max_vruntime(cfs_rq->min_vruntime, min_vruntime);
bf0f6f24
IM
295}
296
b7cc0896 297static void update_curr(struct cfs_rq *cfs_rq)
bf0f6f24 298{
429d43bc 299 struct sched_entity *curr = cfs_rq->curr;
8ebc91d9 300 u64 now = rq_of(cfs_rq)->clock;
bf0f6f24
IM
301 unsigned long delta_exec;
302
303 if (unlikely(!curr))
304 return;
305
306 /*
307 * Get the amount of time the current task was running
308 * since the last time we changed load (this cannot
309 * overflow on 32 bits):
310 */
8ebc91d9 311 delta_exec = (unsigned long)(now - curr->exec_start);
bf0f6f24 312
8ebc91d9
IM
313 __update_curr(cfs_rq, curr, delta_exec);
314 curr->exec_start = now;
bf0f6f24
IM
315}
316
317static inline void
5870db5b 318update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 319{
d281918d 320 schedstat_set(se->wait_start, rq_of(cfs_rq)->clock);
bf0f6f24
IM
321}
322
bf0f6f24 323static inline unsigned long
08e2388a 324calc_weighted(unsigned long delta, struct sched_entity *se)
bf0f6f24 325{
08e2388a 326 unsigned long weight = se->load.weight;
bf0f6f24 327
08e2388a
IM
328 if (unlikely(weight != NICE_0_LOAD))
329 return (u64)delta * se->load.weight >> NICE_0_SHIFT;
330 else
331 return delta;
bf0f6f24 332}
bf0f6f24
IM
333
334/*
335 * Task is being enqueued - update stats:
336 */
d2417e5a 337static void update_stats_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 338{
bf0f6f24
IM
339 /*
340 * Are we enqueueing a waiting task? (for current tasks
341 * a dequeue/enqueue event is a NOP)
342 */
429d43bc 343 if (se != cfs_rq->curr)
5870db5b 344 update_stats_wait_start(cfs_rq, se);
bf0f6f24
IM
345}
346
bf0f6f24 347static void
9ef0a961 348update_stats_wait_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 349{
bbdba7c0
IM
350 schedstat_set(se->wait_max, max(se->wait_max,
351 rq_of(cfs_rq)->clock - se->wait_start));
6cfb0d5d 352 schedstat_set(se->wait_start, 0);
bf0f6f24
IM
353}
354
355static inline void
19b6a2e3 356update_stats_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 357{
b7cc0896 358 update_curr(cfs_rq);
bf0f6f24
IM
359 /*
360 * Mark the end of the wait period if dequeueing a
361 * waiting task:
362 */
429d43bc 363 if (se != cfs_rq->curr)
9ef0a961 364 update_stats_wait_end(cfs_rq, se);
bf0f6f24
IM
365}
366
367/*
368 * We are picking a new current task - update its stats:
369 */
370static inline void
79303e9e 371update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
372{
373 /*
374 * We are starting a new run period:
375 */
d281918d 376 se->exec_start = rq_of(cfs_rq)->clock;
bf0f6f24
IM
377}
378
379/*
380 * We are descheduling a task - update its stats:
381 */
382static inline void
c7e9b5b2 383update_stats_curr_end(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24
IM
384{
385 se->exec_start = 0;
386}
387
388/**************************************************
389 * Scheduling class queueing methods:
390 */
391
30cfdcfc
DA
392static void
393account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
394{
395 update_load_add(&cfs_rq->load, se->load.weight);
396 cfs_rq->nr_running++;
397 se->on_rq = 1;
398}
399
400static void
401account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
402{
403 update_load_sub(&cfs_rq->load, se->load.weight);
404 cfs_rq->nr_running--;
405 se->on_rq = 0;
406}
407
2396af69 408static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 409{
bf0f6f24
IM
410#ifdef CONFIG_SCHEDSTATS
411 if (se->sleep_start) {
d281918d 412 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
bf0f6f24
IM
413
414 if ((s64)delta < 0)
415 delta = 0;
416
417 if (unlikely(delta > se->sleep_max))
418 se->sleep_max = delta;
419
420 se->sleep_start = 0;
421 se->sum_sleep_runtime += delta;
422 }
423 if (se->block_start) {
d281918d 424 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
bf0f6f24
IM
425
426 if ((s64)delta < 0)
427 delta = 0;
428
429 if (unlikely(delta > se->block_max))
430 se->block_max = delta;
431
432 se->block_start = 0;
433 se->sum_sleep_runtime += delta;
30084fbd
IM
434
435 /*
436 * Blocking time is in units of nanosecs, so shift by 20 to
437 * get a milliseconds-range estimation of the amount of
438 * time that the task spent sleeping:
439 */
440 if (unlikely(prof_on == SLEEP_PROFILING)) {
e22f5bbf
IM
441 struct task_struct *tsk = task_of(se);
442
30084fbd
IM
443 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk),
444 delta >> 20);
445 }
bf0f6f24
IM
446 }
447#endif
448}
449
aeb73b04
PZ
450static void
451place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
452{
67e9fb2a 453 u64 vruntime;
aeb73b04 454
67e9fb2a 455 vruntime = cfs_rq->min_vruntime;
94dfb5e7
PZ
456
457 if (sched_feat(USE_TREE_AVG)) {
458 struct sched_entity *last = __pick_last_entity(cfs_rq);
459 if (last) {
67e9fb2a
PZ
460 vruntime += last->vruntime;
461 vruntime >>= 1;
94dfb5e7 462 }
67e9fb2a
PZ
463 } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
464 vruntime += __sched_vslice(cfs_rq->nr_running)/2;
94dfb5e7
PZ
465
466 if (initial && sched_feat(START_DEBIT))
67e9fb2a 467 vruntime += __sched_vslice(cfs_rq->nr_running + 1);
aeb73b04
PZ
468
469 if (!initial && sched_feat(NEW_FAIR_SLEEPERS)) {
67e9fb2a
PZ
470 s64 latency = cfs_rq->min_vruntime - se->last_min_vruntime;
471 if (latency < 0 || !cfs_rq->nr_running)
472 latency = 0;
aeb73b04 473 else
67e9fb2a
PZ
474 latency = min_t(s64, latency, sysctl_sched_latency);
475 vruntime -= latency;
aeb73b04
PZ
476 }
477
67e9fb2a
PZ
478 se->vruntime = vruntime;
479
aeb73b04
PZ
480}
481
bf0f6f24 482static void
83b699ed 483enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int wakeup)
bf0f6f24
IM
484{
485 /*
486 * Update the fair clock.
487 */
b7cc0896 488 update_curr(cfs_rq);
bf0f6f24 489
e9acbff6 490 if (wakeup) {
67e9fb2a 491 /* se->vruntime += cfs_rq->min_vruntime; */
aeb73b04 492 place_entity(cfs_rq, se, 0);
2396af69 493 enqueue_sleeper(cfs_rq, se);
e9acbff6 494 }
bf0f6f24 495
d2417e5a 496 update_stats_enqueue(cfs_rq, se);
83b699ed
SV
497 if (se != cfs_rq->curr)
498 __enqueue_entity(cfs_rq, se);
30cfdcfc 499 account_entity_enqueue(cfs_rq, se);
bf0f6f24
IM
500}
501
502static void
525c2716 503dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int sleep)
bf0f6f24 504{
19b6a2e3 505 update_stats_dequeue(cfs_rq, se);
db36cc7d 506 if (sleep) {
67e9fb2a 507#ifdef CONFIG_SCHEDSTATS
bf0f6f24
IM
508 if (entity_is_task(se)) {
509 struct task_struct *tsk = task_of(se);
510
511 if (tsk->state & TASK_INTERRUPTIBLE)
d281918d 512 se->sleep_start = rq_of(cfs_rq)->clock;
bf0f6f24 513 if (tsk->state & TASK_UNINTERRUPTIBLE)
d281918d 514 se->block_start = rq_of(cfs_rq)->clock;
bf0f6f24 515 }
db36cc7d 516#endif
67e9fb2a
PZ
517 /* se->vruntime = entity_key(cfs_rq, se); */
518 se->last_min_vruntime = cfs_rq->min_vruntime;
519 }
520
83b699ed 521 if (se != cfs_rq->curr)
30cfdcfc
DA
522 __dequeue_entity(cfs_rq, se);
523 account_entity_dequeue(cfs_rq, se);
bf0f6f24
IM
524}
525
526/*
527 * Preempt the current task with a newly woken task if needed:
528 */
7c92e54f 529static void
2e09bf55 530check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
bf0f6f24 531{
11697830
PZ
532 unsigned long ideal_runtime, delta_exec;
533
6d0f0ebd 534 ideal_runtime = sched_slice(cfs_rq, curr);
11697830
PZ
535 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
536 if (delta_exec > ideal_runtime)
bf0f6f24
IM
537 resched_task(rq_of(cfs_rq)->curr);
538}
539
83b699ed 540static void
8494f412 541set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
bf0f6f24 542{
83b699ed
SV
543 /* 'current' is not kept within the tree. */
544 if (se->on_rq) {
545 /*
546 * Any task has to be enqueued before it get to execute on
547 * a CPU. So account for the time it spent waiting on the
548 * runqueue.
549 */
550 update_stats_wait_end(cfs_rq, se);
551 __dequeue_entity(cfs_rq, se);
552 }
553
79303e9e 554 update_stats_curr_start(cfs_rq, se);
429d43bc 555 cfs_rq->curr = se;
eba1ed4b
IM
556#ifdef CONFIG_SCHEDSTATS
557 /*
558 * Track our maximum slice length, if the CPU's load is at
559 * least twice that of our own weight (i.e. dont track it
560 * when there are only lesser-weight tasks around):
561 */
495eca49 562 if (rq_of(cfs_rq)->load.weight >= 2*se->load.weight) {
eba1ed4b
IM
563 se->slice_max = max(se->slice_max,
564 se->sum_exec_runtime - se->prev_sum_exec_runtime);
565 }
566#endif
4a55b450 567 se->prev_sum_exec_runtime = se->sum_exec_runtime;
bf0f6f24
IM
568}
569
9948f4b2 570static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
bf0f6f24
IM
571{
572 struct sched_entity *se = __pick_next_entity(cfs_rq);
573
8494f412 574 set_next_entity(cfs_rq, se);
bf0f6f24
IM
575
576 return se;
577}
578
ab6cde26 579static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev)
bf0f6f24
IM
580{
581 /*
582 * If still on the runqueue then deactivate_task()
583 * was not called and update_curr() has to be done:
584 */
585 if (prev->on_rq)
b7cc0896 586 update_curr(cfs_rq);
bf0f6f24 587
c7e9b5b2 588 update_stats_curr_end(cfs_rq, prev);
bf0f6f24 589
30cfdcfc 590 if (prev->on_rq) {
5870db5b 591 update_stats_wait_start(cfs_rq, prev);
30cfdcfc
DA
592 /* Put 'current' back into the tree. */
593 __enqueue_entity(cfs_rq, prev);
594 }
429d43bc 595 cfs_rq->curr = NULL;
bf0f6f24
IM
596}
597
598static void entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
599{
bf0f6f24 600 /*
30cfdcfc 601 * Update run-time statistics of the 'current'.
bf0f6f24 602 */
30cfdcfc 603 update_curr(cfs_rq);
bf0f6f24 604
2e09bf55
IM
605 if (cfs_rq->nr_running > 1)
606 check_preempt_tick(cfs_rq, curr);
bf0f6f24
IM
607}
608
609/**************************************************
610 * CFS operations on tasks:
611 */
612
613#ifdef CONFIG_FAIR_GROUP_SCHED
614
615/* Walk up scheduling entities hierarchy */
616#define for_each_sched_entity(se) \
617 for (; se; se = se->parent)
618
619static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
620{
621 return p->se.cfs_rq;
622}
623
624/* runqueue on which this entity is (to be) queued */
625static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
626{
627 return se->cfs_rq;
628}
629
630/* runqueue "owned" by this group */
631static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
632{
633 return grp->my_q;
634}
635
636/* Given a group's cfs_rq on one cpu, return its corresponding cfs_rq on
637 * another cpu ('this_cpu')
638 */
639static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
640{
29f59db3 641 return cfs_rq->tg->cfs_rq[this_cpu];
bf0f6f24
IM
642}
643
644/* Iterate thr' all leaf cfs_rq's on a runqueue */
645#define for_each_leaf_cfs_rq(rq, cfs_rq) \
646 list_for_each_entry(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
647
648/* Do the two (enqueued) tasks belong to the same group ? */
649static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
650{
651 if (curr->se.cfs_rq == p->se.cfs_rq)
652 return 1;
653
654 return 0;
655}
656
657#else /* CONFIG_FAIR_GROUP_SCHED */
658
659#define for_each_sched_entity(se) \
660 for (; se; se = NULL)
661
662static inline struct cfs_rq *task_cfs_rq(struct task_struct *p)
663{
664 return &task_rq(p)->cfs;
665}
666
667static inline struct cfs_rq *cfs_rq_of(struct sched_entity *se)
668{
669 struct task_struct *p = task_of(se);
670 struct rq *rq = task_rq(p);
671
672 return &rq->cfs;
673}
674
675/* runqueue "owned" by this group */
676static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp)
677{
678 return NULL;
679}
680
681static inline struct cfs_rq *cpu_cfs_rq(struct cfs_rq *cfs_rq, int this_cpu)
682{
683 return &cpu_rq(this_cpu)->cfs;
684}
685
686#define for_each_leaf_cfs_rq(rq, cfs_rq) \
687 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
688
689static inline int is_same_group(struct task_struct *curr, struct task_struct *p)
690{
691 return 1;
692}
693
694#endif /* CONFIG_FAIR_GROUP_SCHED */
695
696/*
697 * The enqueue_task method is called before nr_running is
698 * increased. Here we update the fair scheduling stats and
699 * then put the task into the rbtree:
700 */
fd390f6a 701static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
bf0f6f24
IM
702{
703 struct cfs_rq *cfs_rq;
704 struct sched_entity *se = &p->se;
705
706 for_each_sched_entity(se) {
707 if (se->on_rq)
708 break;
709 cfs_rq = cfs_rq_of(se);
83b699ed 710 enqueue_entity(cfs_rq, se, wakeup);
bf0f6f24
IM
711 }
712}
713
714/*
715 * The dequeue_task method is called before nr_running is
716 * decreased. We remove the task from the rbtree and
717 * update the fair scheduling stats:
718 */
f02231e5 719static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int sleep)
bf0f6f24
IM
720{
721 struct cfs_rq *cfs_rq;
722 struct sched_entity *se = &p->se;
723
724 for_each_sched_entity(se) {
725 cfs_rq = cfs_rq_of(se);
525c2716 726 dequeue_entity(cfs_rq, se, sleep);
bf0f6f24
IM
727 /* Don't dequeue parent if it has other entities besides us */
728 if (cfs_rq->load.weight)
729 break;
730 }
731}
732
733/*
1799e35d
IM
734 * sched_yield() support is very simple - we dequeue and enqueue.
735 *
736 * If compat_yield is turned on then we requeue to the end of the tree.
bf0f6f24 737 */
4530d7ab 738static void yield_task_fair(struct rq *rq)
bf0f6f24 739{
72ea22f8 740 struct cfs_rq *cfs_rq = task_cfs_rq(rq->curr);
1799e35d 741 struct rb_node **link = &cfs_rq->tasks_timeline.rb_node;
4530d7ab 742 struct sched_entity *rightmost, *se = &rq->curr->se;
1799e35d 743 struct rb_node *parent;
bf0f6f24
IM
744
745 /*
1799e35d
IM
746 * Are we the only task in the tree?
747 */
748 if (unlikely(cfs_rq->nr_running == 1))
749 return;
750
751 if (likely(!sysctl_sched_compat_yield)) {
752 __update_rq_clock(rq);
753 /*
754 * Dequeue and enqueue the task to update its
755 * position within the tree:
756 */
4530d7ab 757 dequeue_entity(cfs_rq, se, 0);
83b699ed 758 enqueue_entity(cfs_rq, se, 0);
1799e35d
IM
759
760 return;
761 }
762 /*
763 * Find the rightmost entry in the rbtree:
bf0f6f24 764 */
1799e35d
IM
765 do {
766 parent = *link;
767 link = &parent->rb_right;
768 } while (*link);
769
770 rightmost = rb_entry(parent, struct sched_entity, run_node);
771 /*
772 * Already in the rightmost position?
773 */
774 if (unlikely(rightmost == se))
775 return;
776
777 /*
778 * Minimally necessary key value to be last in the tree:
779 */
30cfdcfc 780 se->vruntime = rightmost->vruntime + 1;
1799e35d
IM
781
782 if (cfs_rq->rb_leftmost == &se->run_node)
783 cfs_rq->rb_leftmost = rb_next(&se->run_node);
784 /*
785 * Relink the task to the rightmost position:
786 */
787 rb_erase(&se->run_node, &cfs_rq->tasks_timeline);
788 rb_link_node(&se->run_node, parent, link);
789 rb_insert_color(&se->run_node, &cfs_rq->tasks_timeline);
bf0f6f24
IM
790}
791
792/*
793 * Preempt the current task with a newly woken task if needed:
794 */
2e09bf55 795static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
796{
797 struct task_struct *curr = rq->curr;
798 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
bf0f6f24
IM
799
800 if (unlikely(rt_prio(p->prio))) {
a8e504d2 801 update_rq_clock(rq);
b7cc0896 802 update_curr(cfs_rq);
bf0f6f24
IM
803 resched_task(curr);
804 return;
805 }
2e09bf55
IM
806 if (is_same_group(curr, p)) {
807 s64 delta = curr->se.vruntime - p->se.vruntime;
bf0f6f24 808
2e09bf55
IM
809 if (delta > (s64)sysctl_sched_wakeup_granularity)
810 resched_task(curr);
811 }
bf0f6f24
IM
812}
813
fb8d4724 814static struct task_struct *pick_next_task_fair(struct rq *rq)
bf0f6f24
IM
815{
816 struct cfs_rq *cfs_rq = &rq->cfs;
817 struct sched_entity *se;
818
819 if (unlikely(!cfs_rq->nr_running))
820 return NULL;
821
822 do {
9948f4b2 823 se = pick_next_entity(cfs_rq);
bf0f6f24
IM
824 cfs_rq = group_cfs_rq(se);
825 } while (cfs_rq);
826
827 return task_of(se);
828}
829
830/*
831 * Account for a descheduled task:
832 */
31ee529c 833static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
bf0f6f24
IM
834{
835 struct sched_entity *se = &prev->se;
836 struct cfs_rq *cfs_rq;
837
838 for_each_sched_entity(se) {
839 cfs_rq = cfs_rq_of(se);
ab6cde26 840 put_prev_entity(cfs_rq, se);
bf0f6f24
IM
841 }
842}
843
844/**************************************************
845 * Fair scheduling class load-balancing methods:
846 */
847
848/*
849 * Load-balancing iterator. Note: while the runqueue stays locked
850 * during the whole iteration, the current task might be
851 * dequeued so the iterator has to be dequeue-safe. Here we
852 * achieve that by always pre-iterating before returning
853 * the current task:
854 */
855static inline struct task_struct *
856__load_balance_iterator(struct cfs_rq *cfs_rq, struct rb_node *curr)
857{
858 struct task_struct *p;
859
860 if (!curr)
861 return NULL;
862
863 p = rb_entry(curr, struct task_struct, se.run_node);
864 cfs_rq->rb_load_balance_curr = rb_next(curr);
865
866 return p;
867}
868
869static struct task_struct *load_balance_start_fair(void *arg)
870{
871 struct cfs_rq *cfs_rq = arg;
872
873 return __load_balance_iterator(cfs_rq, first_fair(cfs_rq));
874}
875
876static struct task_struct *load_balance_next_fair(void *arg)
877{
878 struct cfs_rq *cfs_rq = arg;
879
880 return __load_balance_iterator(cfs_rq, cfs_rq->rb_load_balance_curr);
881}
882
a4ac01c3 883#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24
IM
884static int cfs_rq_best_prio(struct cfs_rq *cfs_rq)
885{
886 struct sched_entity *curr;
887 struct task_struct *p;
888
889 if (!cfs_rq->nr_running)
890 return MAX_PRIO;
891
9b5b7751
SV
892 curr = cfs_rq->curr;
893 if (!curr)
894 curr = __pick_next_entity(cfs_rq);
895
bf0f6f24
IM
896 p = task_of(curr);
897
898 return p->prio;
899}
a4ac01c3 900#endif
bf0f6f24 901
43010659 902static unsigned long
bf0f6f24 903load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
a4ac01c3
PW
904 unsigned long max_nr_move, unsigned long max_load_move,
905 struct sched_domain *sd, enum cpu_idle_type idle,
906 int *all_pinned, int *this_best_prio)
bf0f6f24
IM
907{
908 struct cfs_rq *busy_cfs_rq;
909 unsigned long load_moved, total_nr_moved = 0, nr_moved;
910 long rem_load_move = max_load_move;
911 struct rq_iterator cfs_rq_iterator;
912
913 cfs_rq_iterator.start = load_balance_start_fair;
914 cfs_rq_iterator.next = load_balance_next_fair;
915
916 for_each_leaf_cfs_rq(busiest, busy_cfs_rq) {
a4ac01c3 917#ifdef CONFIG_FAIR_GROUP_SCHED
bf0f6f24 918 struct cfs_rq *this_cfs_rq;
e56f31aa 919 long imbalance;
bf0f6f24 920 unsigned long maxload;
bf0f6f24
IM
921
922 this_cfs_rq = cpu_cfs_rq(busy_cfs_rq, this_cpu);
923
e56f31aa 924 imbalance = busy_cfs_rq->load.weight - this_cfs_rq->load.weight;
bf0f6f24
IM
925 /* Don't pull if this_cfs_rq has more load than busy_cfs_rq */
926 if (imbalance <= 0)
927 continue;
928
929 /* Don't pull more than imbalance/2 */
930 imbalance /= 2;
931 maxload = min(rem_load_move, imbalance);
932
a4ac01c3
PW
933 *this_best_prio = cfs_rq_best_prio(this_cfs_rq);
934#else
e56f31aa 935# define maxload rem_load_move
a4ac01c3 936#endif
bf0f6f24
IM
937 /* pass busy_cfs_rq argument into
938 * load_balance_[start|next]_fair iterators
939 */
940 cfs_rq_iterator.arg = busy_cfs_rq;
941 nr_moved = balance_tasks(this_rq, this_cpu, busiest,
942 max_nr_move, maxload, sd, idle, all_pinned,
a4ac01c3 943 &load_moved, this_best_prio, &cfs_rq_iterator);
bf0f6f24
IM
944
945 total_nr_moved += nr_moved;
946 max_nr_move -= nr_moved;
947 rem_load_move -= load_moved;
948
949 if (max_nr_move <= 0 || rem_load_move <= 0)
950 break;
951 }
952
43010659 953 return max_load_move - rem_load_move;
bf0f6f24
IM
954}
955
956/*
957 * scheduler tick hitting a task of our scheduling class:
958 */
959static void task_tick_fair(struct rq *rq, struct task_struct *curr)
960{
961 struct cfs_rq *cfs_rq;
962 struct sched_entity *se = &curr->se;
963
964 for_each_sched_entity(se) {
965 cfs_rq = cfs_rq_of(se);
966 entity_tick(cfs_rq, se);
967 }
968}
969
4d78e7b6
PZ
970#define swap(a,b) do { typeof(a) tmp = (a); (a) = (b); (b) = tmp; } while (0)
971
bf0f6f24
IM
972/*
973 * Share the fairness runtime between parent and child, thus the
974 * total amount of pressure for CPU stays equal - new tasks
975 * get a chance to run but frequent forkers are not allowed to
976 * monopolize the CPU. Note: the parent runqueue is locked,
977 * the child is not running yet.
978 */
ee0827d8 979static void task_new_fair(struct rq *rq, struct task_struct *p)
bf0f6f24
IM
980{
981 struct cfs_rq *cfs_rq = task_cfs_rq(p);
429d43bc 982 struct sched_entity *se = &p->se, *curr = cfs_rq->curr;
bf0f6f24
IM
983
984 sched_info_queued(p);
985
7109c442 986 update_curr(cfs_rq);
aeb73b04 987 place_entity(cfs_rq, se, 1);
4d78e7b6 988
4d78e7b6
PZ
989 if (sysctl_sched_child_runs_first &&
990 curr->vruntime < se->vruntime) {
87fefa38 991 /*
edcb60a3
IM
992 * Upon rescheduling, sched_class::put_prev_task() will place
993 * 'current' within the tree based on its new key value.
994 */
4d78e7b6 995 swap(curr->vruntime, se->vruntime);
4d78e7b6 996 }
bf0f6f24 997
e9acbff6 998 update_stats_enqueue(cfs_rq, se);
bf0f6f24 999 __enqueue_entity(cfs_rq, se);
30cfdcfc 1000 account_entity_enqueue(cfs_rq, se);
bb61c210 1001 resched_task(rq->curr);
bf0f6f24
IM
1002}
1003
83b699ed
SV
1004/* Account for a task changing its policy or group.
1005 *
1006 * This routine is mostly called to set cfs_rq->curr field when a task
1007 * migrates between groups/classes.
1008 */
1009static void set_curr_task_fair(struct rq *rq)
1010{
1011 struct sched_entity *se = &rq->curr->se;
1012
1013 for_each_sched_entity(se)
1014 set_next_entity(cfs_rq_of(se), se);
1015}
1016
bf0f6f24
IM
1017/*
1018 * All the scheduling class methods:
1019 */
1020struct sched_class fair_sched_class __read_mostly = {
1021 .enqueue_task = enqueue_task_fair,
1022 .dequeue_task = dequeue_task_fair,
1023 .yield_task = yield_task_fair,
1024
2e09bf55 1025 .check_preempt_curr = check_preempt_wakeup,
bf0f6f24
IM
1026
1027 .pick_next_task = pick_next_task_fair,
1028 .put_prev_task = put_prev_task_fair,
1029
1030 .load_balance = load_balance_fair,
1031
83b699ed 1032 .set_curr_task = set_curr_task_fair,
bf0f6f24
IM
1033 .task_tick = task_tick_fair,
1034 .task_new = task_new_fair,
1035};
1036
1037#ifdef CONFIG_SCHED_DEBUG
5cef9eca 1038static void print_cfs_stats(struct seq_file *m, int cpu)
bf0f6f24 1039{
bf0f6f24
IM
1040 struct cfs_rq *cfs_rq;
1041
75c28ace
SV
1042#ifdef CONFIG_FAIR_GROUP_SCHED
1043 print_cfs_rq(m, cpu, &cpu_rq(cpu)->cfs);
1044#endif
c3b64f1e 1045 for_each_leaf_cfs_rq(cpu_rq(cpu), cfs_rq)
5cef9eca 1046 print_cfs_rq(m, cpu, cfs_rq);
bf0f6f24
IM
1047}
1048#endif