sched/fair: Remove idle_balance() declaration in sched.h
[linux-2.6-block.git] / kernel / sched / deadline.c
CommitLineData
aab03e05
DF
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 13 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
6bfd6d72
JL
19#include <linux/slab.h>
20
332ac17e
DF
21struct dl_bandwidth def_dl_bandwidth;
22
aab03e05
DF
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
332ac17e
DF
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
60extern unsigned long to_ratio(u64 period, u64 runtime);
61
62void init_dl_bw(struct dl_bw *dl_b)
63{
64 raw_spin_lock_init(&dl_b->lock);
65 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
1724813d 66 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
67 dl_b->bw = -1;
68 else
1724813d 69 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
70 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
71 dl_b->total_bw = 0;
72}
73
aab03e05
DF
74void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
75{
76 dl_rq->rb_root = RB_ROOT;
1baca4ce
JL
77
78#ifdef CONFIG_SMP
79 /* zero means no -deadline tasks */
80 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
81
82 dl_rq->dl_nr_migratory = 0;
83 dl_rq->overloaded = 0;
84 dl_rq->pushable_dl_tasks_root = RB_ROOT;
332ac17e
DF
85#else
86 init_dl_bw(&dl_rq->dl_bw);
1baca4ce
JL
87#endif
88}
89
90#ifdef CONFIG_SMP
91
92static inline int dl_overloaded(struct rq *rq)
93{
94 return atomic_read(&rq->rd->dlo_count);
95}
96
97static inline void dl_set_overload(struct rq *rq)
98{
99 if (!rq->online)
100 return;
101
102 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
103 /*
104 * Must be visible before the overload count is
105 * set (as in sched_rt.c).
106 *
107 * Matched by the barrier in pull_dl_task().
108 */
109 smp_wmb();
110 atomic_inc(&rq->rd->dlo_count);
111}
112
113static inline void dl_clear_overload(struct rq *rq)
114{
115 if (!rq->online)
116 return;
117
118 atomic_dec(&rq->rd->dlo_count);
119 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
120}
121
122static void update_dl_migration(struct dl_rq *dl_rq)
123{
124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) {
125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1;
128 }
129 } else if (dl_rq->overloaded) {
130 dl_clear_overload(rq_of_dl_rq(dl_rq));
131 dl_rq->overloaded = 0;
132 }
133}
134
135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136{
137 struct task_struct *p = dl_task_of(dl_se);
138 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
139
140 dl_rq->dl_nr_total++;
141 if (p->nr_cpus_allowed > 1)
142 dl_rq->dl_nr_migratory++;
143
144 update_dl_migration(dl_rq);
145}
146
147static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
148{
149 struct task_struct *p = dl_task_of(dl_se);
150 dl_rq = &rq_of_dl_rq(dl_rq)->dl;
151
152 dl_rq->dl_nr_total--;
153 if (p->nr_cpus_allowed > 1)
154 dl_rq->dl_nr_migratory--;
155
156 update_dl_migration(dl_rq);
157}
158
159/*
160 * The list of pushable -deadline task is not a plist, like in
161 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
162 */
163static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
164{
165 struct dl_rq *dl_rq = &rq->dl;
166 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
167 struct rb_node *parent = NULL;
168 struct task_struct *entry;
169 int leftmost = 1;
170
171 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
172
173 while (*link) {
174 parent = *link;
175 entry = rb_entry(parent, struct task_struct,
176 pushable_dl_tasks);
177 if (dl_entity_preempt(&p->dl, &entry->dl))
178 link = &parent->rb_left;
179 else {
180 link = &parent->rb_right;
181 leftmost = 0;
182 }
183 }
184
185 if (leftmost)
186 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
187
188 rb_link_node(&p->pushable_dl_tasks, parent, link);
189 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
aab03e05
DF
190}
191
1baca4ce
JL
192static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
193{
194 struct dl_rq *dl_rq = &rq->dl;
195
196 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
197 return;
198
199 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
200 struct rb_node *next_node;
201
202 next_node = rb_next(&p->pushable_dl_tasks);
203 dl_rq->pushable_dl_tasks_leftmost = next_node;
204 }
205
206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
207 RB_CLEAR_NODE(&p->pushable_dl_tasks);
208}
209
210static inline int has_pushable_dl_tasks(struct rq *rq)
211{
212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
213}
214
215static int push_dl_task(struct rq *rq);
216
217#else
218
219static inline
220void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
221{
222}
223
224static inline
225void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
226{
227}
228
229static inline
230void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
231{
232}
233
234static inline
235void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
236{
237}
238
239#endif /* CONFIG_SMP */
240
aab03e05
DF
241static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
242static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
243static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
244 int flags);
245
246/*
247 * We are being explicitly informed that a new instance is starting,
248 * and this means that:
249 * - the absolute deadline of the entity has to be placed at
250 * current time + relative deadline;
251 * - the runtime of the entity has to be set to the maximum value.
252 *
253 * The capability of specifying such event is useful whenever a -deadline
254 * entity wants to (try to!) synchronize its behaviour with the scheduler's
255 * one, and to (try to!) reconcile itself with its own scheduling
256 * parameters.
257 */
2d3d891d
DF
258static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
259 struct sched_dl_entity *pi_se)
aab03e05
DF
260{
261 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
262 struct rq *rq = rq_of_dl_rq(dl_rq);
263
264 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
265
266 /*
267 * We use the regular wall clock time to set deadlines in the
268 * future; in fact, we must consider execution overheads (time
269 * spent on hardirq context, etc.).
270 */
2d3d891d
DF
271 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
272 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
273 dl_se->dl_new = 0;
274}
275
276/*
277 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
278 * possibility of a entity lasting more than what it declared, and thus
279 * exhausting its runtime.
280 *
281 * Here we are interested in making runtime overrun possible, but we do
282 * not want a entity which is misbehaving to affect the scheduling of all
283 * other entities.
284 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
285 * is used, in order to confine each entity within its own bandwidth.
286 *
287 * This function deals exactly with that, and ensures that when the runtime
288 * of a entity is replenished, its deadline is also postponed. That ensures
289 * the overrunning entity can't interfere with other entity in the system and
290 * can't make them miss their deadlines. Reasons why this kind of overruns
291 * could happen are, typically, a entity voluntarily trying to overcome its
292 * runtime, or it just underestimated it during sched_setscheduler_ex().
293 */
2d3d891d
DF
294static void replenish_dl_entity(struct sched_dl_entity *dl_se,
295 struct sched_dl_entity *pi_se)
aab03e05
DF
296{
297 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
298 struct rq *rq = rq_of_dl_rq(dl_rq);
299
2d3d891d
DF
300 BUG_ON(pi_se->dl_runtime <= 0);
301
302 /*
303 * This could be the case for a !-dl task that is boosted.
304 * Just go with full inherited parameters.
305 */
306 if (dl_se->dl_deadline == 0) {
307 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
308 dl_se->runtime = pi_se->dl_runtime;
309 }
310
aab03e05
DF
311 /*
312 * We keep moving the deadline away until we get some
313 * available runtime for the entity. This ensures correct
314 * handling of situations where the runtime overrun is
315 * arbitrary large.
316 */
317 while (dl_se->runtime <= 0) {
2d3d891d
DF
318 dl_se->deadline += pi_se->dl_period;
319 dl_se->runtime += pi_se->dl_runtime;
aab03e05
DF
320 }
321
322 /*
323 * At this point, the deadline really should be "in
324 * the future" with respect to rq->clock. If it's
325 * not, we are, for some reason, lagging too much!
326 * Anyway, after having warn userspace abut that,
327 * we still try to keep the things running by
328 * resetting the deadline and the budget of the
329 * entity.
330 */
331 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
332 static bool lag_once = false;
333
334 if (!lag_once) {
335 lag_once = true;
336 printk_sched("sched: DL replenish lagged to much\n");
337 }
2d3d891d
DF
338 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
339 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
340 }
341}
342
343/*
344 * Here we check if --at time t-- an entity (which is probably being
345 * [re]activated or, in general, enqueued) can use its remaining runtime
346 * and its current deadline _without_ exceeding the bandwidth it is
347 * assigned (function returns true if it can't). We are in fact applying
348 * one of the CBS rules: when a task wakes up, if the residual runtime
349 * over residual deadline fits within the allocated bandwidth, then we
350 * can keep the current (absolute) deadline and residual budget without
351 * disrupting the schedulability of the system. Otherwise, we should
352 * refill the runtime and set the deadline a period in the future,
353 * because keeping the current (absolute) deadline of the task would
712e5e34
DF
354 * result in breaking guarantees promised to other tasks (refer to
355 * Documentation/scheduler/sched-deadline.txt for more informations).
aab03e05
DF
356 *
357 * This function returns true if:
358 *
755378a4 359 * runtime / (deadline - t) > dl_runtime / dl_period ,
aab03e05
DF
360 *
361 * IOW we can't recycle current parameters.
755378a4
HG
362 *
363 * Notice that the bandwidth check is done against the period. For
364 * task with deadline equal to period this is the same of using
365 * dl_deadline instead of dl_period in the equation above.
aab03e05 366 */
2d3d891d
DF
367static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
368 struct sched_dl_entity *pi_se, u64 t)
aab03e05
DF
369{
370 u64 left, right;
371
372 /*
373 * left and right are the two sides of the equation above,
374 * after a bit of shuffling to use multiplications instead
375 * of divisions.
376 *
377 * Note that none of the time values involved in the two
378 * multiplications are absolute: dl_deadline and dl_runtime
379 * are the relative deadline and the maximum runtime of each
380 * instance, runtime is the runtime left for the last instance
381 * and (deadline - t), since t is rq->clock, is the time left
382 * to the (absolute) deadline. Even if overflowing the u64 type
383 * is very unlikely to occur in both cases, here we scale down
384 * as we want to avoid that risk at all. Scaling down by 10
385 * means that we reduce granularity to 1us. We are fine with it,
386 * since this is only a true/false check and, anyway, thinking
387 * of anything below microseconds resolution is actually fiction
388 * (but still we want to give the user that illusion >;).
389 */
332ac17e
DF
390 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
391 right = ((dl_se->deadline - t) >> DL_SCALE) *
392 (pi_se->dl_runtime >> DL_SCALE);
aab03e05
DF
393
394 return dl_time_before(right, left);
395}
396
397/*
398 * When a -deadline entity is queued back on the runqueue, its runtime and
399 * deadline might need updating.
400 *
401 * The policy here is that we update the deadline of the entity only if:
402 * - the current deadline is in the past,
403 * - using the remaining runtime with the current deadline would make
404 * the entity exceed its bandwidth.
405 */
2d3d891d
DF
406static void update_dl_entity(struct sched_dl_entity *dl_se,
407 struct sched_dl_entity *pi_se)
aab03e05
DF
408{
409 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
410 struct rq *rq = rq_of_dl_rq(dl_rq);
411
412 /*
413 * The arrival of a new instance needs special treatment, i.e.,
414 * the actual scheduling parameters have to be "renewed".
415 */
416 if (dl_se->dl_new) {
2d3d891d 417 setup_new_dl_entity(dl_se, pi_se);
aab03e05
DF
418 return;
419 }
420
421 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2d3d891d
DF
422 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
423 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
424 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
425 }
426}
427
428/*
429 * If the entity depleted all its runtime, and if we want it to sleep
430 * while waiting for some new execution time to become available, we
431 * set the bandwidth enforcement timer to the replenishment instant
432 * and try to activate it.
433 *
434 * Notice that it is important for the caller to know if the timer
435 * actually started or not (i.e., the replenishment instant is in
436 * the future or in the past).
437 */
2d3d891d 438static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
aab03e05
DF
439{
440 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
441 struct rq *rq = rq_of_dl_rq(dl_rq);
442 ktime_t now, act;
443 ktime_t soft, hard;
444 unsigned long range;
445 s64 delta;
446
2d3d891d
DF
447 if (boosted)
448 return 0;
aab03e05
DF
449 /*
450 * We want the timer to fire at the deadline, but considering
451 * that it is actually coming from rq->clock and not from
452 * hrtimer's time base reading.
453 */
454 act = ns_to_ktime(dl_se->deadline);
455 now = hrtimer_cb_get_time(&dl_se->dl_timer);
456 delta = ktime_to_ns(now) - rq_clock(rq);
457 act = ktime_add_ns(act, delta);
458
459 /*
460 * If the expiry time already passed, e.g., because the value
461 * chosen as the deadline is too small, don't even try to
462 * start the timer in the past!
463 */
464 if (ktime_us_delta(act, now) < 0)
465 return 0;
466
467 hrtimer_set_expires(&dl_se->dl_timer, act);
468
469 soft = hrtimer_get_softexpires(&dl_se->dl_timer);
470 hard = hrtimer_get_expires(&dl_se->dl_timer);
471 range = ktime_to_ns(ktime_sub(hard, soft));
472 __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
473 range, HRTIMER_MODE_ABS, 0);
474
475 return hrtimer_active(&dl_se->dl_timer);
476}
477
478/*
479 * This is the bandwidth enforcement timer callback. If here, we know
480 * a task is not on its dl_rq, since the fact that the timer was running
481 * means the task is throttled and needs a runtime replenishment.
482 *
483 * However, what we actually do depends on the fact the task is active,
484 * (it is on its rq) or has been removed from there by a call to
485 * dequeue_task_dl(). In the former case we must issue the runtime
486 * replenishment and add the task back to the dl_rq; in the latter, we just
487 * do nothing but clearing dl_throttled, so that runtime and deadline
488 * updating (and the queueing back to dl_rq) will be done by the
489 * next call to enqueue_task_dl().
490 */
491static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
492{
493 struct sched_dl_entity *dl_se = container_of(timer,
494 struct sched_dl_entity,
495 dl_timer);
496 struct task_struct *p = dl_task_of(dl_se);
497 struct rq *rq = task_rq(p);
498 raw_spin_lock(&rq->lock);
499
500 /*
501 * We need to take care of a possible races here. In fact, the
502 * task might have changed its scheduling policy to something
503 * different from SCHED_DEADLINE or changed its reservation
504 * parameters (through sched_setscheduler()).
505 */
506 if (!dl_task(p) || dl_se->dl_new)
507 goto unlock;
508
509 sched_clock_tick();
510 update_rq_clock(rq);
511 dl_se->dl_throttled = 0;
512 if (p->on_rq) {
513 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
514 if (task_has_dl_policy(rq->curr))
515 check_preempt_curr_dl(rq, p, 0);
516 else
517 resched_task(rq->curr);
1baca4ce
JL
518#ifdef CONFIG_SMP
519 /*
520 * Queueing this task back might have overloaded rq,
521 * check if we need to kick someone away.
522 */
523 if (has_pushable_dl_tasks(rq))
524 push_dl_task(rq);
525#endif
aab03e05
DF
526 }
527unlock:
528 raw_spin_unlock(&rq->lock);
529
530 return HRTIMER_NORESTART;
531}
532
533void init_dl_task_timer(struct sched_dl_entity *dl_se)
534{
535 struct hrtimer *timer = &dl_se->dl_timer;
536
537 if (hrtimer_active(timer)) {
538 hrtimer_try_to_cancel(timer);
539 return;
540 }
541
542 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
543 timer->function = dl_task_timer;
544}
545
546static
547int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
548{
549 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
550 int rorun = dl_se->runtime <= 0;
551
552 if (!rorun && !dmiss)
553 return 0;
554
555 /*
556 * If we are beyond our current deadline and we are still
557 * executing, then we have already used some of the runtime of
558 * the next instance. Thus, if we do not account that, we are
559 * stealing bandwidth from the system at each deadline miss!
560 */
561 if (dmiss) {
562 dl_se->runtime = rorun ? dl_se->runtime : 0;
563 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
564 }
565
566 return 1;
567}
568
569/*
570 * Update the current task's runtime statistics (provided it is still
571 * a -deadline task and has not been removed from the dl_rq).
572 */
573static void update_curr_dl(struct rq *rq)
574{
575 struct task_struct *curr = rq->curr;
576 struct sched_dl_entity *dl_se = &curr->dl;
577 u64 delta_exec;
578
579 if (!dl_task(curr) || !on_dl_rq(dl_se))
580 return;
581
582 /*
583 * Consumed budget is computed considering the time as
584 * observed by schedulable tasks (excluding time spent
585 * in hardirq context, etc.). Deadlines are instead
586 * computed using hard walltime. This seems to be the more
587 * natural solution, but the full ramifications of this
588 * approach need further study.
589 */
590 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
591 if (unlikely((s64)delta_exec < 0))
592 delta_exec = 0;
593
594 schedstat_set(curr->se.statistics.exec_max,
595 max(curr->se.statistics.exec_max, delta_exec));
596
597 curr->se.sum_exec_runtime += delta_exec;
598 account_group_exec_runtime(curr, delta_exec);
599
600 curr->se.exec_start = rq_clock_task(rq);
601 cpuacct_charge(curr, delta_exec);
602
239be4a9
DF
603 sched_rt_avg_update(rq, delta_exec);
604
aab03e05
DF
605 dl_se->runtime -= delta_exec;
606 if (dl_runtime_exceeded(rq, dl_se)) {
607 __dequeue_task_dl(rq, curr, 0);
2d3d891d 608 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
aab03e05
DF
609 dl_se->dl_throttled = 1;
610 else
611 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
612
613 if (!is_leftmost(curr, &rq->dl))
614 resched_task(curr);
615 }
1724813d
PZ
616
617 /*
618 * Because -- for now -- we share the rt bandwidth, we need to
619 * account our runtime there too, otherwise actual rt tasks
620 * would be able to exceed the shared quota.
621 *
622 * Account to the root rt group for now.
623 *
624 * The solution we're working towards is having the RT groups scheduled
625 * using deadline servers -- however there's a few nasties to figure
626 * out before that can happen.
627 */
628 if (rt_bandwidth_enabled()) {
629 struct rt_rq *rt_rq = &rq->rt;
630
631 raw_spin_lock(&rt_rq->rt_runtime_lock);
632 rt_rq->rt_time += delta_exec;
633 /*
634 * We'll let actual RT tasks worry about the overflow here, we
635 * have our own CBS to keep us inline -- see above.
636 */
637 raw_spin_unlock(&rt_rq->rt_runtime_lock);
638 }
aab03e05
DF
639}
640
1baca4ce
JL
641#ifdef CONFIG_SMP
642
643static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
644
645static inline u64 next_deadline(struct rq *rq)
646{
647 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
648
649 if (next && dl_prio(next->prio))
650 return next->dl.deadline;
651 else
652 return 0;
653}
654
655static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
656{
657 struct rq *rq = rq_of_dl_rq(dl_rq);
658
659 if (dl_rq->earliest_dl.curr == 0 ||
660 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
661 /*
662 * If the dl_rq had no -deadline tasks, or if the new task
663 * has shorter deadline than the current one on dl_rq, we
664 * know that the previous earliest becomes our next earliest,
665 * as the new task becomes the earliest itself.
666 */
667 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
668 dl_rq->earliest_dl.curr = deadline;
6bfd6d72 669 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
1baca4ce
JL
670 } else if (dl_rq->earliest_dl.next == 0 ||
671 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
672 /*
673 * On the other hand, if the new -deadline task has a
674 * a later deadline than the earliest one on dl_rq, but
675 * it is earlier than the next (if any), we must
676 * recompute the next-earliest.
677 */
678 dl_rq->earliest_dl.next = next_deadline(rq);
679 }
680}
681
682static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
683{
684 struct rq *rq = rq_of_dl_rq(dl_rq);
685
686 /*
687 * Since we may have removed our earliest (and/or next earliest)
688 * task we must recompute them.
689 */
690 if (!dl_rq->dl_nr_running) {
691 dl_rq->earliest_dl.curr = 0;
692 dl_rq->earliest_dl.next = 0;
6bfd6d72 693 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
694 } else {
695 struct rb_node *leftmost = dl_rq->rb_leftmost;
696 struct sched_dl_entity *entry;
697
698 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
699 dl_rq->earliest_dl.curr = entry->deadline;
700 dl_rq->earliest_dl.next = next_deadline(rq);
6bfd6d72 701 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
1baca4ce
JL
702 }
703}
704
705#else
706
707static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
708static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
709
710#endif /* CONFIG_SMP */
711
712static inline
713void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
714{
715 int prio = dl_task_of(dl_se)->prio;
716 u64 deadline = dl_se->deadline;
717
718 WARN_ON(!dl_prio(prio));
719 dl_rq->dl_nr_running++;
720
721 inc_dl_deadline(dl_rq, deadline);
722 inc_dl_migration(dl_se, dl_rq);
723}
724
725static inline
726void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
727{
728 int prio = dl_task_of(dl_se)->prio;
729
730 WARN_ON(!dl_prio(prio));
731 WARN_ON(!dl_rq->dl_nr_running);
732 dl_rq->dl_nr_running--;
733
734 dec_dl_deadline(dl_rq, dl_se->deadline);
735 dec_dl_migration(dl_se, dl_rq);
736}
737
aab03e05
DF
738static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
739{
740 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
741 struct rb_node **link = &dl_rq->rb_root.rb_node;
742 struct rb_node *parent = NULL;
743 struct sched_dl_entity *entry;
744 int leftmost = 1;
745
746 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
747
748 while (*link) {
749 parent = *link;
750 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
751 if (dl_time_before(dl_se->deadline, entry->deadline))
752 link = &parent->rb_left;
753 else {
754 link = &parent->rb_right;
755 leftmost = 0;
756 }
757 }
758
759 if (leftmost)
760 dl_rq->rb_leftmost = &dl_se->rb_node;
761
762 rb_link_node(&dl_se->rb_node, parent, link);
763 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
764
1baca4ce 765 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
766}
767
768static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
769{
770 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
771
772 if (RB_EMPTY_NODE(&dl_se->rb_node))
773 return;
774
775 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
776 struct rb_node *next_node;
777
778 next_node = rb_next(&dl_se->rb_node);
779 dl_rq->rb_leftmost = next_node;
780 }
781
782 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
783 RB_CLEAR_NODE(&dl_se->rb_node);
784
1baca4ce 785 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
786}
787
788static void
2d3d891d
DF
789enqueue_dl_entity(struct sched_dl_entity *dl_se,
790 struct sched_dl_entity *pi_se, int flags)
aab03e05
DF
791{
792 BUG_ON(on_dl_rq(dl_se));
793
794 /*
795 * If this is a wakeup or a new instance, the scheduling
796 * parameters of the task might need updating. Otherwise,
797 * we want a replenishment of its runtime.
798 */
799 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
2d3d891d 800 replenish_dl_entity(dl_se, pi_se);
aab03e05 801 else
2d3d891d 802 update_dl_entity(dl_se, pi_se);
aab03e05
DF
803
804 __enqueue_dl_entity(dl_se);
805}
806
807static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
808{
809 __dequeue_dl_entity(dl_se);
810}
811
812static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
813{
2d3d891d
DF
814 struct task_struct *pi_task = rt_mutex_get_top_task(p);
815 struct sched_dl_entity *pi_se = &p->dl;
816
817 /*
818 * Use the scheduling parameters of the top pi-waiter
819 * task if we have one and its (relative) deadline is
820 * smaller than our one... OTW we keep our runtime and
821 * deadline.
822 */
823 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
824 pi_se = &pi_task->dl;
825
aab03e05
DF
826 /*
827 * If p is throttled, we do nothing. In fact, if it exhausted
828 * its budget it needs a replenishment and, since it now is on
829 * its rq, the bandwidth timer callback (which clearly has not
830 * run yet) will take care of this.
831 */
832 if (p->dl.dl_throttled)
833 return;
834
2d3d891d 835 enqueue_dl_entity(&p->dl, pi_se, flags);
1baca4ce
JL
836
837 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
838 enqueue_pushable_dl_task(rq, p);
839
aab03e05
DF
840 inc_nr_running(rq);
841}
842
843static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
844{
845 dequeue_dl_entity(&p->dl);
1baca4ce 846 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
847}
848
849static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
850{
851 update_curr_dl(rq);
852 __dequeue_task_dl(rq, p, flags);
853
854 dec_nr_running(rq);
855}
856
857/*
858 * Yield task semantic for -deadline tasks is:
859 *
860 * get off from the CPU until our next instance, with
861 * a new runtime. This is of little use now, since we
862 * don't have a bandwidth reclaiming mechanism. Anyway,
863 * bandwidth reclaiming is planned for the future, and
864 * yield_task_dl will indicate that some spare budget
865 * is available for other task instances to use it.
866 */
867static void yield_task_dl(struct rq *rq)
868{
869 struct task_struct *p = rq->curr;
870
871 /*
872 * We make the task go to sleep until its current deadline by
873 * forcing its runtime to zero. This way, update_curr_dl() stops
874 * it and the bandwidth timer will wake it up and will give it
875 * new scheduling parameters (thanks to dl_new=1).
876 */
877 if (p->dl.runtime > 0) {
878 rq->curr->dl.dl_new = 1;
879 p->dl.runtime = 0;
880 }
881 update_curr_dl(rq);
882}
883
1baca4ce
JL
884#ifdef CONFIG_SMP
885
886static int find_later_rq(struct task_struct *task);
1baca4ce
JL
887
888static int
889select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
890{
891 struct task_struct *curr;
892 struct rq *rq;
893
894 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
895 goto out;
896
897 rq = cpu_rq(cpu);
898
899 rcu_read_lock();
900 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
901
902 /*
903 * If we are dealing with a -deadline task, we must
904 * decide where to wake it up.
905 * If it has a later deadline and the current task
906 * on this rq can't move (provided the waking task
907 * can!) we prefer to send it somewhere else. On the
908 * other hand, if it has a shorter deadline, we
909 * try to make it stay here, it might be important.
910 */
911 if (unlikely(dl_task(curr)) &&
912 (curr->nr_cpus_allowed < 2 ||
913 !dl_entity_preempt(&p->dl, &curr->dl)) &&
914 (p->nr_cpus_allowed > 1)) {
915 int target = find_later_rq(p);
916
917 if (target != -1)
918 cpu = target;
919 }
920 rcu_read_unlock();
921
922out:
923 return cpu;
924}
925
926static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
927{
928 /*
929 * Current can't be migrated, useless to reschedule,
930 * let's hope p can move out.
931 */
932 if (rq->curr->nr_cpus_allowed == 1 ||
6bfd6d72 933 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1baca4ce
JL
934 return;
935
936 /*
937 * p is migratable, so let's not schedule it and
938 * see if it is pushed or pulled somewhere else.
939 */
940 if (p->nr_cpus_allowed != 1 &&
6bfd6d72 941 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1baca4ce
JL
942 return;
943
944 resched_task(rq->curr);
945}
946
38033c37
PZ
947static int pull_dl_task(struct rq *this_rq);
948
1baca4ce
JL
949#endif /* CONFIG_SMP */
950
aab03e05
DF
951/*
952 * Only called when both the current and waking task are -deadline
953 * tasks.
954 */
955static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
956 int flags)
957{
1baca4ce 958 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
aab03e05 959 resched_task(rq->curr);
1baca4ce
JL
960 return;
961 }
962
963#ifdef CONFIG_SMP
964 /*
965 * In the unlikely case current and p have the same deadline
966 * let us try to decide what's the best thing to do...
967 */
332ac17e
DF
968 if ((p->dl.deadline == rq->curr->dl.deadline) &&
969 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
970 check_preempt_equal_dl(rq, p);
971#endif /* CONFIG_SMP */
aab03e05
DF
972}
973
974#ifdef CONFIG_SCHED_HRTICK
975static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
976{
977 s64 delta = p->dl.dl_runtime - p->dl.runtime;
978
979 if (delta > 10000)
980 hrtick_start(rq, p->dl.runtime);
981}
982#endif
983
984static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
985 struct dl_rq *dl_rq)
986{
987 struct rb_node *left = dl_rq->rb_leftmost;
988
989 if (!left)
990 return NULL;
991
992 return rb_entry(left, struct sched_dl_entity, rb_node);
993}
994
606dba2e 995struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
aab03e05
DF
996{
997 struct sched_dl_entity *dl_se;
998 struct task_struct *p;
999 struct dl_rq *dl_rq;
1000
1001 dl_rq = &rq->dl;
1002
38033c37
PZ
1003#ifdef CONFIG_SMP
1004 if (dl_task(prev))
1005 pull_dl_task(rq);
1006#endif
1007
aab03e05
DF
1008 if (unlikely(!dl_rq->dl_nr_running))
1009 return NULL;
1010
606dba2e
PZ
1011 if (prev)
1012 prev->sched_class->put_prev_task(rq, prev);
1013
aab03e05
DF
1014 dl_se = pick_next_dl_entity(rq, dl_rq);
1015 BUG_ON(!dl_se);
1016
1017 p = dl_task_of(dl_se);
1018 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1019
1020 /* Running task will never be pushed. */
71362650 1021 dequeue_pushable_dl_task(rq, p);
1baca4ce 1022
aab03e05
DF
1023#ifdef CONFIG_SCHED_HRTICK
1024 if (hrtick_enabled(rq))
1025 start_hrtick_dl(rq, p);
1026#endif
1baca4ce
JL
1027
1028#ifdef CONFIG_SMP
1029 rq->post_schedule = has_pushable_dl_tasks(rq);
1030#endif /* CONFIG_SMP */
1031
aab03e05
DF
1032 return p;
1033}
1034
1035static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1036{
1037 update_curr_dl(rq);
1baca4ce
JL
1038
1039 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1040 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1041}
1042
1043static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1044{
1045 update_curr_dl(rq);
1046
1047#ifdef CONFIG_SCHED_HRTICK
1048 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
1049 start_hrtick_dl(rq, p);
1050#endif
1051}
1052
1053static void task_fork_dl(struct task_struct *p)
1054{
1055 /*
1056 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1057 * sched_fork()
1058 */
1059}
1060
1061static void task_dead_dl(struct task_struct *p)
1062{
1063 struct hrtimer *timer = &p->dl.dl_timer;
332ac17e
DF
1064 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1065
1066 /*
1067 * Since we are TASK_DEAD we won't slip out of the domain!
1068 */
1069 raw_spin_lock_irq(&dl_b->lock);
1070 dl_b->total_bw -= p->dl.dl_bw;
1071 raw_spin_unlock_irq(&dl_b->lock);
aab03e05 1072
2d3d891d 1073 hrtimer_cancel(timer);
aab03e05
DF
1074}
1075
1076static void set_curr_task_dl(struct rq *rq)
1077{
1078 struct task_struct *p = rq->curr;
1079
1080 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1081
1082 /* You can't push away the running task */
1083 dequeue_pushable_dl_task(rq, p);
1084}
1085
1086#ifdef CONFIG_SMP
1087
1088/* Only try algorithms three times */
1089#define DL_MAX_TRIES 3
1090
1091static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1092{
1093 if (!task_running(rq, p) &&
1094 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1095 (p->nr_cpus_allowed > 1))
1096 return 1;
1097
1098 return 0;
1099}
1100
1101/* Returns the second earliest -deadline task, NULL otherwise */
1102static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1103{
1104 struct rb_node *next_node = rq->dl.rb_leftmost;
1105 struct sched_dl_entity *dl_se;
1106 struct task_struct *p = NULL;
1107
1108next_node:
1109 next_node = rb_next(next_node);
1110 if (next_node) {
1111 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1112 p = dl_task_of(dl_se);
1113
1114 if (pick_dl_task(rq, p, cpu))
1115 return p;
1116
1117 goto next_node;
1118 }
1119
1120 return NULL;
1121}
1122
1baca4ce
JL
1123static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1124
1125static int find_later_rq(struct task_struct *task)
1126{
1127 struct sched_domain *sd;
1128 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
1129 int this_cpu = smp_processor_id();
1130 int best_cpu, cpu = task_cpu(task);
1131
1132 /* Make sure the mask is initialized first */
1133 if (unlikely(!later_mask))
1134 return -1;
1135
1136 if (task->nr_cpus_allowed == 1)
1137 return -1;
1138
6bfd6d72
JL
1139 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1140 task, later_mask);
1baca4ce
JL
1141 if (best_cpu == -1)
1142 return -1;
1143
1144 /*
1145 * If we are here, some target has been found,
1146 * the most suitable of which is cached in best_cpu.
1147 * This is, among the runqueues where the current tasks
1148 * have later deadlines than the task's one, the rq
1149 * with the latest possible one.
1150 *
1151 * Now we check how well this matches with task's
1152 * affinity and system topology.
1153 *
1154 * The last cpu where the task run is our first
1155 * guess, since it is most likely cache-hot there.
1156 */
1157 if (cpumask_test_cpu(cpu, later_mask))
1158 return cpu;
1159 /*
1160 * Check if this_cpu is to be skipped (i.e., it is
1161 * not in the mask) or not.
1162 */
1163 if (!cpumask_test_cpu(this_cpu, later_mask))
1164 this_cpu = -1;
1165
1166 rcu_read_lock();
1167 for_each_domain(cpu, sd) {
1168 if (sd->flags & SD_WAKE_AFFINE) {
1169
1170 /*
1171 * If possible, preempting this_cpu is
1172 * cheaper than migrating.
1173 */
1174 if (this_cpu != -1 &&
1175 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1176 rcu_read_unlock();
1177 return this_cpu;
1178 }
1179
1180 /*
1181 * Last chance: if best_cpu is valid and is
1182 * in the mask, that becomes our choice.
1183 */
1184 if (best_cpu < nr_cpu_ids &&
1185 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1186 rcu_read_unlock();
1187 return best_cpu;
1188 }
1189 }
1190 }
1191 rcu_read_unlock();
1192
1193 /*
1194 * At this point, all our guesses failed, we just return
1195 * 'something', and let the caller sort the things out.
1196 */
1197 if (this_cpu != -1)
1198 return this_cpu;
1199
1200 cpu = cpumask_any(later_mask);
1201 if (cpu < nr_cpu_ids)
1202 return cpu;
1203
1204 return -1;
1205}
1206
1207/* Locks the rq it finds */
1208static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1209{
1210 struct rq *later_rq = NULL;
1211 int tries;
1212 int cpu;
1213
1214 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1215 cpu = find_later_rq(task);
1216
1217 if ((cpu == -1) || (cpu == rq->cpu))
1218 break;
1219
1220 later_rq = cpu_rq(cpu);
1221
1222 /* Retry if something changed. */
1223 if (double_lock_balance(rq, later_rq)) {
1224 if (unlikely(task_rq(task) != rq ||
1225 !cpumask_test_cpu(later_rq->cpu,
1226 &task->cpus_allowed) ||
1227 task_running(rq, task) || !task->on_rq)) {
1228 double_unlock_balance(rq, later_rq);
1229 later_rq = NULL;
1230 break;
1231 }
1232 }
1233
1234 /*
1235 * If the rq we found has no -deadline task, or
1236 * its earliest one has a later deadline than our
1237 * task, the rq is a good one.
1238 */
1239 if (!later_rq->dl.dl_nr_running ||
1240 dl_time_before(task->dl.deadline,
1241 later_rq->dl.earliest_dl.curr))
1242 break;
1243
1244 /* Otherwise we try again. */
1245 double_unlock_balance(rq, later_rq);
1246 later_rq = NULL;
1247 }
1248
1249 return later_rq;
1250}
1251
1252static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1253{
1254 struct task_struct *p;
1255
1256 if (!has_pushable_dl_tasks(rq))
1257 return NULL;
1258
1259 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1260 struct task_struct, pushable_dl_tasks);
1261
1262 BUG_ON(rq->cpu != task_cpu(p));
1263 BUG_ON(task_current(rq, p));
1264 BUG_ON(p->nr_cpus_allowed <= 1);
1265
332ac17e 1266 BUG_ON(!p->on_rq);
1baca4ce
JL
1267 BUG_ON(!dl_task(p));
1268
1269 return p;
1270}
1271
1272/*
1273 * See if the non running -deadline tasks on this rq
1274 * can be sent to some other CPU where they can preempt
1275 * and start executing.
1276 */
1277static int push_dl_task(struct rq *rq)
1278{
1279 struct task_struct *next_task;
1280 struct rq *later_rq;
1281
1282 if (!rq->dl.overloaded)
1283 return 0;
1284
1285 next_task = pick_next_pushable_dl_task(rq);
1286 if (!next_task)
1287 return 0;
1288
1289retry:
1290 if (unlikely(next_task == rq->curr)) {
1291 WARN_ON(1);
1292 return 0;
1293 }
1294
1295 /*
1296 * If next_task preempts rq->curr, and rq->curr
1297 * can move away, it makes sense to just reschedule
1298 * without going further in pushing next_task.
1299 */
1300 if (dl_task(rq->curr) &&
1301 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1302 rq->curr->nr_cpus_allowed > 1) {
1303 resched_task(rq->curr);
1304 return 0;
1305 }
1306
1307 /* We might release rq lock */
1308 get_task_struct(next_task);
1309
1310 /* Will lock the rq it'll find */
1311 later_rq = find_lock_later_rq(next_task, rq);
1312 if (!later_rq) {
1313 struct task_struct *task;
1314
1315 /*
1316 * We must check all this again, since
1317 * find_lock_later_rq releases rq->lock and it is
1318 * then possible that next_task has migrated.
1319 */
1320 task = pick_next_pushable_dl_task(rq);
1321 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1322 /*
1323 * The task is still there. We don't try
1324 * again, some other cpu will pull it when ready.
1325 */
1326 dequeue_pushable_dl_task(rq, next_task);
1327 goto out;
1328 }
1329
1330 if (!task)
1331 /* No more tasks */
1332 goto out;
1333
1334 put_task_struct(next_task);
1335 next_task = task;
1336 goto retry;
1337 }
1338
1339 deactivate_task(rq, next_task, 0);
1340 set_task_cpu(next_task, later_rq->cpu);
1341 activate_task(later_rq, next_task, 0);
1342
1343 resched_task(later_rq->curr);
1344
1345 double_unlock_balance(rq, later_rq);
1346
1347out:
1348 put_task_struct(next_task);
1349
1350 return 1;
1351}
1352
1353static void push_dl_tasks(struct rq *rq)
1354{
1355 /* Terminates as it moves a -deadline task */
1356 while (push_dl_task(rq))
1357 ;
aab03e05
DF
1358}
1359
1baca4ce
JL
1360static int pull_dl_task(struct rq *this_rq)
1361{
1362 int this_cpu = this_rq->cpu, ret = 0, cpu;
1363 struct task_struct *p;
1364 struct rq *src_rq;
1365 u64 dmin = LONG_MAX;
1366
1367 if (likely(!dl_overloaded(this_rq)))
1368 return 0;
1369
1370 /*
1371 * Match the barrier from dl_set_overloaded; this guarantees that if we
1372 * see overloaded we must also see the dlo_mask bit.
1373 */
1374 smp_rmb();
1375
1376 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1377 if (this_cpu == cpu)
1378 continue;
1379
1380 src_rq = cpu_rq(cpu);
1381
1382 /*
1383 * It looks racy, abd it is! However, as in sched_rt.c,
1384 * we are fine with this.
1385 */
1386 if (this_rq->dl.dl_nr_running &&
1387 dl_time_before(this_rq->dl.earliest_dl.curr,
1388 src_rq->dl.earliest_dl.next))
1389 continue;
1390
1391 /* Might drop this_rq->lock */
1392 double_lock_balance(this_rq, src_rq);
1393
1394 /*
1395 * If there are no more pullable tasks on the
1396 * rq, we're done with it.
1397 */
1398 if (src_rq->dl.dl_nr_running <= 1)
1399 goto skip;
1400
1401 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1402
1403 /*
1404 * We found a task to be pulled if:
1405 * - it preempts our current (if there's one),
1406 * - it will preempt the last one we pulled (if any).
1407 */
1408 if (p && dl_time_before(p->dl.deadline, dmin) &&
1409 (!this_rq->dl.dl_nr_running ||
1410 dl_time_before(p->dl.deadline,
1411 this_rq->dl.earliest_dl.curr))) {
1412 WARN_ON(p == src_rq->curr);
332ac17e 1413 WARN_ON(!p->on_rq);
1baca4ce
JL
1414
1415 /*
1416 * Then we pull iff p has actually an earlier
1417 * deadline than the current task of its runqueue.
1418 */
1419 if (dl_time_before(p->dl.deadline,
1420 src_rq->curr->dl.deadline))
1421 goto skip;
1422
1423 ret = 1;
1424
1425 deactivate_task(src_rq, p, 0);
1426 set_task_cpu(p, this_cpu);
1427 activate_task(this_rq, p, 0);
1428 dmin = p->dl.deadline;
1429
1430 /* Is there any other task even earlier? */
1431 }
1432skip:
1433 double_unlock_balance(this_rq, src_rq);
1434 }
1435
1436 return ret;
1437}
1438
1baca4ce
JL
1439static void post_schedule_dl(struct rq *rq)
1440{
1441 push_dl_tasks(rq);
1442}
1443
1444/*
1445 * Since the task is not running and a reschedule is not going to happen
1446 * anytime soon on its runqueue, we try pushing it away now.
1447 */
1448static void task_woken_dl(struct rq *rq, struct task_struct *p)
1449{
1450 if (!task_running(rq, p) &&
1451 !test_tsk_need_resched(rq->curr) &&
1452 has_pushable_dl_tasks(rq) &&
1453 p->nr_cpus_allowed > 1 &&
1454 dl_task(rq->curr) &&
1455 (rq->curr->nr_cpus_allowed < 2 ||
1456 dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1457 push_dl_tasks(rq);
1458 }
1459}
1460
1461static void set_cpus_allowed_dl(struct task_struct *p,
1462 const struct cpumask *new_mask)
1463{
1464 struct rq *rq;
1465 int weight;
1466
1467 BUG_ON(!dl_task(p));
1468
1469 /*
1470 * Update only if the task is actually running (i.e.,
1471 * it is on the rq AND it is not throttled).
1472 */
1473 if (!on_dl_rq(&p->dl))
1474 return;
1475
1476 weight = cpumask_weight(new_mask);
1477
1478 /*
1479 * Only update if the process changes its state from whether it
1480 * can migrate or not.
1481 */
1482 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1483 return;
1484
1485 rq = task_rq(p);
1486
1487 /*
1488 * The process used to be able to migrate OR it can now migrate
1489 */
1490 if (weight <= 1) {
1491 if (!task_current(rq, p))
1492 dequeue_pushable_dl_task(rq, p);
1493 BUG_ON(!rq->dl.dl_nr_migratory);
1494 rq->dl.dl_nr_migratory--;
1495 } else {
1496 if (!task_current(rq, p))
1497 enqueue_pushable_dl_task(rq, p);
1498 rq->dl.dl_nr_migratory++;
1499 }
1500
1501 update_dl_migration(&rq->dl);
1502}
1503
1504/* Assumes rq->lock is held */
1505static void rq_online_dl(struct rq *rq)
1506{
1507 if (rq->dl.overloaded)
1508 dl_set_overload(rq);
6bfd6d72
JL
1509
1510 if (rq->dl.dl_nr_running > 0)
1511 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1baca4ce
JL
1512}
1513
1514/* Assumes rq->lock is held */
1515static void rq_offline_dl(struct rq *rq)
1516{
1517 if (rq->dl.overloaded)
1518 dl_clear_overload(rq);
6bfd6d72
JL
1519
1520 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
1521}
1522
1523void init_sched_dl_class(void)
1524{
1525 unsigned int i;
1526
1527 for_each_possible_cpu(i)
1528 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1529 GFP_KERNEL, cpu_to_node(i));
1530}
1531
1532#endif /* CONFIG_SMP */
1533
aab03e05
DF
1534static void switched_from_dl(struct rq *rq, struct task_struct *p)
1535{
1baca4ce 1536 if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
aab03e05 1537 hrtimer_try_to_cancel(&p->dl.dl_timer);
1baca4ce
JL
1538
1539#ifdef CONFIG_SMP
1540 /*
1541 * Since this might be the only -deadline task on the rq,
1542 * this is the right place to try to pull some other one
1543 * from an overloaded cpu, if any.
1544 */
1545 if (!rq->dl.dl_nr_running)
1546 pull_dl_task(rq);
1547#endif
aab03e05
DF
1548}
1549
1baca4ce
JL
1550/*
1551 * When switching to -deadline, we may overload the rq, then
1552 * we try to push someone off, if possible.
1553 */
aab03e05
DF
1554static void switched_to_dl(struct rq *rq, struct task_struct *p)
1555{
1baca4ce
JL
1556 int check_resched = 1;
1557
aab03e05
DF
1558 /*
1559 * If p is throttled, don't consider the possibility
1560 * of preempting rq->curr, the check will be done right
1561 * after its runtime will get replenished.
1562 */
1563 if (unlikely(p->dl.dl_throttled))
1564 return;
1565
390f3258 1566 if (p->on_rq && rq->curr != p) {
1baca4ce
JL
1567#ifdef CONFIG_SMP
1568 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1569 /* Only reschedule if pushing failed */
1570 check_resched = 0;
1571#endif /* CONFIG_SMP */
1572 if (check_resched && task_has_dl_policy(rq->curr))
aab03e05 1573 check_preempt_curr_dl(rq, p, 0);
aab03e05
DF
1574 }
1575}
1576
1baca4ce
JL
1577/*
1578 * If the scheduling parameters of a -deadline task changed,
1579 * a push or pull operation might be needed.
1580 */
aab03e05
DF
1581static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1582 int oldprio)
1583{
1baca4ce 1584 if (p->on_rq || rq->curr == p) {
aab03e05 1585#ifdef CONFIG_SMP
1baca4ce
JL
1586 /*
1587 * This might be too much, but unfortunately
1588 * we don't have the old deadline value, and
1589 * we can't argue if the task is increasing
1590 * or lowering its prio, so...
1591 */
1592 if (!rq->dl.overloaded)
1593 pull_dl_task(rq);
1594
1595 /*
1596 * If we now have a earlier deadline task than p,
1597 * then reschedule, provided p is still on this
1598 * runqueue.
1599 */
1600 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1601 rq->curr == p)
1602 resched_task(p);
1603#else
1604 /*
1605 * Again, we don't know if p has a earlier
1606 * or later deadline, so let's blindly set a
1607 * (maybe not needed) rescheduling point.
1608 */
1609 resched_task(p);
1610#endif /* CONFIG_SMP */
1611 } else
1612 switched_to_dl(rq, p);
aab03e05 1613}
aab03e05
DF
1614
1615const struct sched_class dl_sched_class = {
1616 .next = &rt_sched_class,
1617 .enqueue_task = enqueue_task_dl,
1618 .dequeue_task = dequeue_task_dl,
1619 .yield_task = yield_task_dl,
1620
1621 .check_preempt_curr = check_preempt_curr_dl,
1622
1623 .pick_next_task = pick_next_task_dl,
1624 .put_prev_task = put_prev_task_dl,
1625
1626#ifdef CONFIG_SMP
1627 .select_task_rq = select_task_rq_dl,
1baca4ce
JL
1628 .set_cpus_allowed = set_cpus_allowed_dl,
1629 .rq_online = rq_online_dl,
1630 .rq_offline = rq_offline_dl,
1baca4ce
JL
1631 .post_schedule = post_schedule_dl,
1632 .task_woken = task_woken_dl,
aab03e05
DF
1633#endif
1634
1635 .set_curr_task = set_curr_task_dl,
1636 .task_tick = task_tick_dl,
1637 .task_fork = task_fork_dl,
1638 .task_dead = task_dead_dl,
1639
1640 .prio_changed = prio_changed_dl,
1641 .switched_from = switched_from_dl,
1642 .switched_to = switched_to_dl,
1643};