sched/numa: Kill the wrong/dead TASK_DEAD check in task_numa_fault()
[linux-2.6-block.git] / kernel / sched / deadline.c
CommitLineData
aab03e05
DF
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 13 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
6bfd6d72
JL
19#include <linux/slab.h>
20
332ac17e
DF
21struct dl_bandwidth def_dl_bandwidth;
22
aab03e05
DF
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
332ac17e
DF
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
332ac17e
DF
60void init_dl_bw(struct dl_bw *dl_b)
61{
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
1724813d 64 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
65 dl_b->bw = -1;
66 else
1724813d 67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70}
71
aab03e05
DF
72void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
73{
74 dl_rq->rb_root = RB_ROOT;
1baca4ce
JL
75
76#ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
332ac17e
DF
83#else
84 init_dl_bw(&dl_rq->dl_bw);
1baca4ce
JL
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92 return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 /*
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
104 *
105 * Matched by the barrier in pull_dl_task().
106 */
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
995b9ea4 122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
1baca4ce
JL
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 136
1baca4ce
JL
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 146
1baca4ce
JL
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151}
152
153/*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
179 if (leftmost)
180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
181
182 rb_link_node(&p->pushable_dl_tasks, parent, link);
183 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
aab03e05
DF
184}
185
1baca4ce
JL
186static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
187{
188 struct dl_rq *dl_rq = &rq->dl;
189
190 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
191 return;
192
193 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
194 struct rb_node *next_node;
195
196 next_node = rb_next(&p->pushable_dl_tasks);
197 dl_rq->pushable_dl_tasks_leftmost = next_node;
198 }
199
200 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
201 RB_CLEAR_NODE(&p->pushable_dl_tasks);
202}
203
204static inline int has_pushable_dl_tasks(struct rq *rq)
205{
206 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
207}
208
209static int push_dl_task(struct rq *rq);
210
dc877341
PZ
211static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
212{
213 return dl_task(prev);
214}
215
216static inline void set_post_schedule(struct rq *rq)
217{
218 rq->post_schedule = has_pushable_dl_tasks(rq);
219}
220
1baca4ce
JL
221#else
222
223static inline
224void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
225{
226}
227
228static inline
229void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
230{
231}
232
233static inline
234void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
235{
236}
237
238static inline
239void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
240{
241}
242
dc877341
PZ
243static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
244{
245 return false;
246}
247
248static inline int pull_dl_task(struct rq *rq)
249{
250 return 0;
251}
252
253static inline void set_post_schedule(struct rq *rq)
254{
255}
1baca4ce
JL
256#endif /* CONFIG_SMP */
257
aab03e05
DF
258static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
259static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
260static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
261 int flags);
262
263/*
264 * We are being explicitly informed that a new instance is starting,
265 * and this means that:
266 * - the absolute deadline of the entity has to be placed at
267 * current time + relative deadline;
268 * - the runtime of the entity has to be set to the maximum value.
269 *
270 * The capability of specifying such event is useful whenever a -deadline
271 * entity wants to (try to!) synchronize its behaviour with the scheduler's
272 * one, and to (try to!) reconcile itself with its own scheduling
273 * parameters.
274 */
2d3d891d
DF
275static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
276 struct sched_dl_entity *pi_se)
aab03e05
DF
277{
278 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
279 struct rq *rq = rq_of_dl_rq(dl_rq);
280
281 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
282
283 /*
284 * We use the regular wall clock time to set deadlines in the
285 * future; in fact, we must consider execution overheads (time
286 * spent on hardirq context, etc.).
287 */
2d3d891d
DF
288 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
289 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
290 dl_se->dl_new = 0;
291}
292
293/*
294 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
295 * possibility of a entity lasting more than what it declared, and thus
296 * exhausting its runtime.
297 *
298 * Here we are interested in making runtime overrun possible, but we do
299 * not want a entity which is misbehaving to affect the scheduling of all
300 * other entities.
301 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
302 * is used, in order to confine each entity within its own bandwidth.
303 *
304 * This function deals exactly with that, and ensures that when the runtime
305 * of a entity is replenished, its deadline is also postponed. That ensures
306 * the overrunning entity can't interfere with other entity in the system and
307 * can't make them miss their deadlines. Reasons why this kind of overruns
308 * could happen are, typically, a entity voluntarily trying to overcome its
1b09d29b 309 * runtime, or it just underestimated it during sched_setattr().
aab03e05 310 */
2d3d891d
DF
311static void replenish_dl_entity(struct sched_dl_entity *dl_se,
312 struct sched_dl_entity *pi_se)
aab03e05
DF
313{
314 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
315 struct rq *rq = rq_of_dl_rq(dl_rq);
316
2d3d891d
DF
317 BUG_ON(pi_se->dl_runtime <= 0);
318
319 /*
320 * This could be the case for a !-dl task that is boosted.
321 * Just go with full inherited parameters.
322 */
323 if (dl_se->dl_deadline == 0) {
324 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
325 dl_se->runtime = pi_se->dl_runtime;
326 }
327
aab03e05
DF
328 /*
329 * We keep moving the deadline away until we get some
330 * available runtime for the entity. This ensures correct
331 * handling of situations where the runtime overrun is
332 * arbitrary large.
333 */
334 while (dl_se->runtime <= 0) {
2d3d891d
DF
335 dl_se->deadline += pi_se->dl_period;
336 dl_se->runtime += pi_se->dl_runtime;
aab03e05
DF
337 }
338
339 /*
340 * At this point, the deadline really should be "in
341 * the future" with respect to rq->clock. If it's
342 * not, we are, for some reason, lagging too much!
343 * Anyway, after having warn userspace abut that,
344 * we still try to keep the things running by
345 * resetting the deadline and the budget of the
346 * entity.
347 */
348 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
c224815d 349 printk_deferred_once("sched: DL replenish lagged to much\n");
2d3d891d
DF
350 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
351 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
352 }
353}
354
355/*
356 * Here we check if --at time t-- an entity (which is probably being
357 * [re]activated or, in general, enqueued) can use its remaining runtime
358 * and its current deadline _without_ exceeding the bandwidth it is
359 * assigned (function returns true if it can't). We are in fact applying
360 * one of the CBS rules: when a task wakes up, if the residual runtime
361 * over residual deadline fits within the allocated bandwidth, then we
362 * can keep the current (absolute) deadline and residual budget without
363 * disrupting the schedulability of the system. Otherwise, we should
364 * refill the runtime and set the deadline a period in the future,
365 * because keeping the current (absolute) deadline of the task would
712e5e34
DF
366 * result in breaking guarantees promised to other tasks (refer to
367 * Documentation/scheduler/sched-deadline.txt for more informations).
aab03e05
DF
368 *
369 * This function returns true if:
370 *
755378a4 371 * runtime / (deadline - t) > dl_runtime / dl_period ,
aab03e05
DF
372 *
373 * IOW we can't recycle current parameters.
755378a4
HG
374 *
375 * Notice that the bandwidth check is done against the period. For
376 * task with deadline equal to period this is the same of using
377 * dl_deadline instead of dl_period in the equation above.
aab03e05 378 */
2d3d891d
DF
379static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
380 struct sched_dl_entity *pi_se, u64 t)
aab03e05
DF
381{
382 u64 left, right;
383
384 /*
385 * left and right are the two sides of the equation above,
386 * after a bit of shuffling to use multiplications instead
387 * of divisions.
388 *
389 * Note that none of the time values involved in the two
390 * multiplications are absolute: dl_deadline and dl_runtime
391 * are the relative deadline and the maximum runtime of each
392 * instance, runtime is the runtime left for the last instance
393 * and (deadline - t), since t is rq->clock, is the time left
394 * to the (absolute) deadline. Even if overflowing the u64 type
395 * is very unlikely to occur in both cases, here we scale down
396 * as we want to avoid that risk at all. Scaling down by 10
397 * means that we reduce granularity to 1us. We are fine with it,
398 * since this is only a true/false check and, anyway, thinking
399 * of anything below microseconds resolution is actually fiction
400 * (but still we want to give the user that illusion >;).
401 */
332ac17e
DF
402 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
403 right = ((dl_se->deadline - t) >> DL_SCALE) *
404 (pi_se->dl_runtime >> DL_SCALE);
aab03e05
DF
405
406 return dl_time_before(right, left);
407}
408
409/*
410 * When a -deadline entity is queued back on the runqueue, its runtime and
411 * deadline might need updating.
412 *
413 * The policy here is that we update the deadline of the entity only if:
414 * - the current deadline is in the past,
415 * - using the remaining runtime with the current deadline would make
416 * the entity exceed its bandwidth.
417 */
2d3d891d
DF
418static void update_dl_entity(struct sched_dl_entity *dl_se,
419 struct sched_dl_entity *pi_se)
aab03e05
DF
420{
421 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
422 struct rq *rq = rq_of_dl_rq(dl_rq);
423
424 /*
425 * The arrival of a new instance needs special treatment, i.e.,
426 * the actual scheduling parameters have to be "renewed".
427 */
428 if (dl_se->dl_new) {
2d3d891d 429 setup_new_dl_entity(dl_se, pi_se);
aab03e05
DF
430 return;
431 }
432
433 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2d3d891d
DF
434 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
435 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
436 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
437 }
438}
439
440/*
441 * If the entity depleted all its runtime, and if we want it to sleep
442 * while waiting for some new execution time to become available, we
443 * set the bandwidth enforcement timer to the replenishment instant
444 * and try to activate it.
445 *
446 * Notice that it is important for the caller to know if the timer
447 * actually started or not (i.e., the replenishment instant is in
448 * the future or in the past).
449 */
2d3d891d 450static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
aab03e05
DF
451{
452 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
453 struct rq *rq = rq_of_dl_rq(dl_rq);
454 ktime_t now, act;
455 ktime_t soft, hard;
456 unsigned long range;
457 s64 delta;
458
2d3d891d
DF
459 if (boosted)
460 return 0;
aab03e05
DF
461 /*
462 * We want the timer to fire at the deadline, but considering
463 * that it is actually coming from rq->clock and not from
464 * hrtimer's time base reading.
465 */
466 act = ns_to_ktime(dl_se->deadline);
467 now = hrtimer_cb_get_time(&dl_se->dl_timer);
468 delta = ktime_to_ns(now) - rq_clock(rq);
469 act = ktime_add_ns(act, delta);
470
471 /*
472 * If the expiry time already passed, e.g., because the value
473 * chosen as the deadline is too small, don't even try to
474 * start the timer in the past!
475 */
476 if (ktime_us_delta(act, now) < 0)
477 return 0;
478
479 hrtimer_set_expires(&dl_se->dl_timer, act);
480
481 soft = hrtimer_get_softexpires(&dl_se->dl_timer);
482 hard = hrtimer_get_expires(&dl_se->dl_timer);
483 range = ktime_to_ns(ktime_sub(hard, soft));
484 __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
485 range, HRTIMER_MODE_ABS, 0);
486
487 return hrtimer_active(&dl_se->dl_timer);
488}
489
490/*
491 * This is the bandwidth enforcement timer callback. If here, we know
492 * a task is not on its dl_rq, since the fact that the timer was running
493 * means the task is throttled and needs a runtime replenishment.
494 *
495 * However, what we actually do depends on the fact the task is active,
496 * (it is on its rq) or has been removed from there by a call to
497 * dequeue_task_dl(). In the former case we must issue the runtime
498 * replenishment and add the task back to the dl_rq; in the latter, we just
499 * do nothing but clearing dl_throttled, so that runtime and deadline
500 * updating (and the queueing back to dl_rq) will be done by the
501 * next call to enqueue_task_dl().
502 */
503static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
504{
505 struct sched_dl_entity *dl_se = container_of(timer,
506 struct sched_dl_entity,
507 dl_timer);
508 struct task_struct *p = dl_task_of(dl_se);
0f397f2c
KT
509 struct rq *rq;
510again:
511 rq = task_rq(p);
aab03e05
DF
512 raw_spin_lock(&rq->lock);
513
0f397f2c
KT
514 if (rq != task_rq(p)) {
515 /* Task was moved, retrying. */
516 raw_spin_unlock(&rq->lock);
517 goto again;
518 }
519
aab03e05
DF
520 /*
521 * We need to take care of a possible races here. In fact, the
522 * task might have changed its scheduling policy to something
523 * different from SCHED_DEADLINE or changed its reservation
4027d080 524 * parameters (through sched_setattr()).
aab03e05
DF
525 */
526 if (!dl_task(p) || dl_se->dl_new)
527 goto unlock;
528
529 sched_clock_tick();
530 update_rq_clock(rq);
531 dl_se->dl_throttled = 0;
5bfd126e 532 dl_se->dl_yielded = 0;
da0c1e65 533 if (task_on_rq_queued(p)) {
aab03e05
DF
534 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535 if (task_has_dl_policy(rq->curr))
536 check_preempt_curr_dl(rq, p, 0);
537 else
8875125e 538 resched_curr(rq);
1baca4ce
JL
539#ifdef CONFIG_SMP
540 /*
541 * Queueing this task back might have overloaded rq,
542 * check if we need to kick someone away.
543 */
544 if (has_pushable_dl_tasks(rq))
545 push_dl_task(rq);
546#endif
aab03e05
DF
547 }
548unlock:
549 raw_spin_unlock(&rq->lock);
550
551 return HRTIMER_NORESTART;
552}
553
554void init_dl_task_timer(struct sched_dl_entity *dl_se)
555{
556 struct hrtimer *timer = &dl_se->dl_timer;
557
558 if (hrtimer_active(timer)) {
559 hrtimer_try_to_cancel(timer);
560 return;
561 }
562
563 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
564 timer->function = dl_task_timer;
565}
566
567static
568int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
569{
570 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
571 int rorun = dl_se->runtime <= 0;
572
573 if (!rorun && !dmiss)
574 return 0;
575
576 /*
577 * If we are beyond our current deadline and we are still
578 * executing, then we have already used some of the runtime of
579 * the next instance. Thus, if we do not account that, we are
580 * stealing bandwidth from the system at each deadline miss!
581 */
582 if (dmiss) {
583 dl_se->runtime = rorun ? dl_se->runtime : 0;
584 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
585 }
586
587 return 1;
588}
589
faa59937
JL
590extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
591
aab03e05
DF
592/*
593 * Update the current task's runtime statistics (provided it is still
594 * a -deadline task and has not been removed from the dl_rq).
595 */
596static void update_curr_dl(struct rq *rq)
597{
598 struct task_struct *curr = rq->curr;
599 struct sched_dl_entity *dl_se = &curr->dl;
600 u64 delta_exec;
601
602 if (!dl_task(curr) || !on_dl_rq(dl_se))
603 return;
604
605 /*
606 * Consumed budget is computed considering the time as
607 * observed by schedulable tasks (excluding time spent
608 * in hardirq context, etc.). Deadlines are instead
609 * computed using hard walltime. This seems to be the more
610 * natural solution, but the full ramifications of this
611 * approach need further study.
612 */
613 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
734ff2a7
KT
614 if (unlikely((s64)delta_exec <= 0))
615 return;
aab03e05
DF
616
617 schedstat_set(curr->se.statistics.exec_max,
618 max(curr->se.statistics.exec_max, delta_exec));
619
620 curr->se.sum_exec_runtime += delta_exec;
621 account_group_exec_runtime(curr, delta_exec);
622
623 curr->se.exec_start = rq_clock_task(rq);
624 cpuacct_charge(curr, delta_exec);
625
239be4a9
DF
626 sched_rt_avg_update(rq, delta_exec);
627
aab03e05
DF
628 dl_se->runtime -= delta_exec;
629 if (dl_runtime_exceeded(rq, dl_se)) {
630 __dequeue_task_dl(rq, curr, 0);
2d3d891d 631 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
aab03e05
DF
632 dl_se->dl_throttled = 1;
633 else
634 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
635
636 if (!is_leftmost(curr, &rq->dl))
8875125e 637 resched_curr(rq);
aab03e05 638 }
1724813d
PZ
639
640 /*
641 * Because -- for now -- we share the rt bandwidth, we need to
642 * account our runtime there too, otherwise actual rt tasks
643 * would be able to exceed the shared quota.
644 *
645 * Account to the root rt group for now.
646 *
647 * The solution we're working towards is having the RT groups scheduled
648 * using deadline servers -- however there's a few nasties to figure
649 * out before that can happen.
650 */
651 if (rt_bandwidth_enabled()) {
652 struct rt_rq *rt_rq = &rq->rt;
653
654 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
655 /*
656 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
657 * have our own CBS to keep us inline; only account when RT
658 * bandwidth is relevant.
1724813d 659 */
faa59937
JL
660 if (sched_rt_bandwidth_account(rt_rq))
661 rt_rq->rt_time += delta_exec;
1724813d
PZ
662 raw_spin_unlock(&rt_rq->rt_runtime_lock);
663 }
aab03e05
DF
664}
665
1baca4ce
JL
666#ifdef CONFIG_SMP
667
668static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
669
670static inline u64 next_deadline(struct rq *rq)
671{
672 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
673
674 if (next && dl_prio(next->prio))
675 return next->dl.deadline;
676 else
677 return 0;
678}
679
680static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
681{
682 struct rq *rq = rq_of_dl_rq(dl_rq);
683
684 if (dl_rq->earliest_dl.curr == 0 ||
685 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
686 /*
687 * If the dl_rq had no -deadline tasks, or if the new task
688 * has shorter deadline than the current one on dl_rq, we
689 * know that the previous earliest becomes our next earliest,
690 * as the new task becomes the earliest itself.
691 */
692 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
693 dl_rq->earliest_dl.curr = deadline;
6bfd6d72 694 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
1baca4ce
JL
695 } else if (dl_rq->earliest_dl.next == 0 ||
696 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
697 /*
698 * On the other hand, if the new -deadline task has a
699 * a later deadline than the earliest one on dl_rq, but
700 * it is earlier than the next (if any), we must
701 * recompute the next-earliest.
702 */
703 dl_rq->earliest_dl.next = next_deadline(rq);
704 }
705}
706
707static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
708{
709 struct rq *rq = rq_of_dl_rq(dl_rq);
710
711 /*
712 * Since we may have removed our earliest (and/or next earliest)
713 * task we must recompute them.
714 */
715 if (!dl_rq->dl_nr_running) {
716 dl_rq->earliest_dl.curr = 0;
717 dl_rq->earliest_dl.next = 0;
6bfd6d72 718 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
719 } else {
720 struct rb_node *leftmost = dl_rq->rb_leftmost;
721 struct sched_dl_entity *entry;
722
723 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
724 dl_rq->earliest_dl.curr = entry->deadline;
725 dl_rq->earliest_dl.next = next_deadline(rq);
6bfd6d72 726 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
1baca4ce
JL
727 }
728}
729
730#else
731
732static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
733static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
734
735#endif /* CONFIG_SMP */
736
737static inline
738void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
739{
740 int prio = dl_task_of(dl_se)->prio;
741 u64 deadline = dl_se->deadline;
742
743 WARN_ON(!dl_prio(prio));
744 dl_rq->dl_nr_running++;
72465447 745 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
746
747 inc_dl_deadline(dl_rq, deadline);
748 inc_dl_migration(dl_se, dl_rq);
749}
750
751static inline
752void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
753{
754 int prio = dl_task_of(dl_se)->prio;
755
756 WARN_ON(!dl_prio(prio));
757 WARN_ON(!dl_rq->dl_nr_running);
758 dl_rq->dl_nr_running--;
72465447 759 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
760
761 dec_dl_deadline(dl_rq, dl_se->deadline);
762 dec_dl_migration(dl_se, dl_rq);
763}
764
aab03e05
DF
765static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
766{
767 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
768 struct rb_node **link = &dl_rq->rb_root.rb_node;
769 struct rb_node *parent = NULL;
770 struct sched_dl_entity *entry;
771 int leftmost = 1;
772
773 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
774
775 while (*link) {
776 parent = *link;
777 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
778 if (dl_time_before(dl_se->deadline, entry->deadline))
779 link = &parent->rb_left;
780 else {
781 link = &parent->rb_right;
782 leftmost = 0;
783 }
784 }
785
786 if (leftmost)
787 dl_rq->rb_leftmost = &dl_se->rb_node;
788
789 rb_link_node(&dl_se->rb_node, parent, link);
790 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
791
1baca4ce 792 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
793}
794
795static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
796{
797 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
798
799 if (RB_EMPTY_NODE(&dl_se->rb_node))
800 return;
801
802 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
803 struct rb_node *next_node;
804
805 next_node = rb_next(&dl_se->rb_node);
806 dl_rq->rb_leftmost = next_node;
807 }
808
809 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
810 RB_CLEAR_NODE(&dl_se->rb_node);
811
1baca4ce 812 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
813}
814
815static void
2d3d891d
DF
816enqueue_dl_entity(struct sched_dl_entity *dl_se,
817 struct sched_dl_entity *pi_se, int flags)
aab03e05
DF
818{
819 BUG_ON(on_dl_rq(dl_se));
820
821 /*
822 * If this is a wakeup or a new instance, the scheduling
823 * parameters of the task might need updating. Otherwise,
824 * we want a replenishment of its runtime.
825 */
826 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
2d3d891d 827 replenish_dl_entity(dl_se, pi_se);
aab03e05 828 else
2d3d891d 829 update_dl_entity(dl_se, pi_se);
aab03e05
DF
830
831 __enqueue_dl_entity(dl_se);
832}
833
834static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
835{
836 __dequeue_dl_entity(dl_se);
837}
838
839static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
840{
2d3d891d
DF
841 struct task_struct *pi_task = rt_mutex_get_top_task(p);
842 struct sched_dl_entity *pi_se = &p->dl;
843
844 /*
845 * Use the scheduling parameters of the top pi-waiter
846 * task if we have one and its (relative) deadline is
847 * smaller than our one... OTW we keep our runtime and
848 * deadline.
849 */
850 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
851 pi_se = &pi_task->dl;
852
aab03e05
DF
853 /*
854 * If p is throttled, we do nothing. In fact, if it exhausted
855 * its budget it needs a replenishment and, since it now is on
856 * its rq, the bandwidth timer callback (which clearly has not
857 * run yet) will take care of this.
858 */
859 if (p->dl.dl_throttled)
860 return;
861
2d3d891d 862 enqueue_dl_entity(&p->dl, pi_se, flags);
1baca4ce
JL
863
864 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
865 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
866}
867
868static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
869{
870 dequeue_dl_entity(&p->dl);
1baca4ce 871 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
872}
873
874static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
875{
876 update_curr_dl(rq);
877 __dequeue_task_dl(rq, p, flags);
aab03e05
DF
878}
879
880/*
881 * Yield task semantic for -deadline tasks is:
882 *
883 * get off from the CPU until our next instance, with
884 * a new runtime. This is of little use now, since we
885 * don't have a bandwidth reclaiming mechanism. Anyway,
886 * bandwidth reclaiming is planned for the future, and
887 * yield_task_dl will indicate that some spare budget
888 * is available for other task instances to use it.
889 */
890static void yield_task_dl(struct rq *rq)
891{
892 struct task_struct *p = rq->curr;
893
894 /*
895 * We make the task go to sleep until its current deadline by
896 * forcing its runtime to zero. This way, update_curr_dl() stops
897 * it and the bandwidth timer will wake it up and will give it
5bfd126e 898 * new scheduling parameters (thanks to dl_yielded=1).
aab03e05
DF
899 */
900 if (p->dl.runtime > 0) {
5bfd126e 901 rq->curr->dl.dl_yielded = 1;
aab03e05
DF
902 p->dl.runtime = 0;
903 }
904 update_curr_dl(rq);
905}
906
1baca4ce
JL
907#ifdef CONFIG_SMP
908
909static int find_later_rq(struct task_struct *task);
1baca4ce
JL
910
911static int
912select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
913{
914 struct task_struct *curr;
915 struct rq *rq;
916
917 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
918 goto out;
919
920 rq = cpu_rq(cpu);
921
922 rcu_read_lock();
923 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
924
925 /*
926 * If we are dealing with a -deadline task, we must
927 * decide where to wake it up.
928 * If it has a later deadline and the current task
929 * on this rq can't move (provided the waking task
930 * can!) we prefer to send it somewhere else. On the
931 * other hand, if it has a shorter deadline, we
932 * try to make it stay here, it might be important.
933 */
934 if (unlikely(dl_task(curr)) &&
935 (curr->nr_cpus_allowed < 2 ||
936 !dl_entity_preempt(&p->dl, &curr->dl)) &&
937 (p->nr_cpus_allowed > 1)) {
938 int target = find_later_rq(p);
939
940 if (target != -1)
941 cpu = target;
942 }
943 rcu_read_unlock();
944
945out:
946 return cpu;
947}
948
949static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
950{
951 /*
952 * Current can't be migrated, useless to reschedule,
953 * let's hope p can move out.
954 */
955 if (rq->curr->nr_cpus_allowed == 1 ||
6bfd6d72 956 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1baca4ce
JL
957 return;
958
959 /*
960 * p is migratable, so let's not schedule it and
961 * see if it is pushed or pulled somewhere else.
962 */
963 if (p->nr_cpus_allowed != 1 &&
6bfd6d72 964 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1baca4ce
JL
965 return;
966
8875125e 967 resched_curr(rq);
1baca4ce
JL
968}
969
38033c37
PZ
970static int pull_dl_task(struct rq *this_rq);
971
1baca4ce
JL
972#endif /* CONFIG_SMP */
973
aab03e05
DF
974/*
975 * Only called when both the current and waking task are -deadline
976 * tasks.
977 */
978static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
979 int flags)
980{
1baca4ce 981 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
8875125e 982 resched_curr(rq);
1baca4ce
JL
983 return;
984 }
985
986#ifdef CONFIG_SMP
987 /*
988 * In the unlikely case current and p have the same deadline
989 * let us try to decide what's the best thing to do...
990 */
332ac17e
DF
991 if ((p->dl.deadline == rq->curr->dl.deadline) &&
992 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
993 check_preempt_equal_dl(rq, p);
994#endif /* CONFIG_SMP */
aab03e05
DF
995}
996
997#ifdef CONFIG_SCHED_HRTICK
998static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
999{
177ef2a6 1000 hrtick_start(rq, p->dl.runtime);
aab03e05
DF
1001}
1002#endif
1003
1004static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1005 struct dl_rq *dl_rq)
1006{
1007 struct rb_node *left = dl_rq->rb_leftmost;
1008
1009 if (!left)
1010 return NULL;
1011
1012 return rb_entry(left, struct sched_dl_entity, rb_node);
1013}
1014
606dba2e 1015struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
aab03e05
DF
1016{
1017 struct sched_dl_entity *dl_se;
1018 struct task_struct *p;
1019 struct dl_rq *dl_rq;
1020
1021 dl_rq = &rq->dl;
1022
a1d9a323 1023 if (need_pull_dl_task(rq, prev)) {
38033c37 1024 pull_dl_task(rq);
a1d9a323
KT
1025 /*
1026 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1027 * means a stop task can slip in, in which case we need to
1028 * re-start task selection.
1029 */
da0c1e65 1030 if (rq->stop && task_on_rq_queued(rq->stop))
a1d9a323
KT
1031 return RETRY_TASK;
1032 }
1033
734ff2a7
KT
1034 /*
1035 * When prev is DL, we may throttle it in put_prev_task().
1036 * So, we update time before we check for dl_nr_running.
1037 */
1038 if (prev->sched_class == &dl_sched_class)
1039 update_curr_dl(rq);
38033c37 1040
aab03e05
DF
1041 if (unlikely(!dl_rq->dl_nr_running))
1042 return NULL;
1043
3f1d2a31 1044 put_prev_task(rq, prev);
606dba2e 1045
aab03e05
DF
1046 dl_se = pick_next_dl_entity(rq, dl_rq);
1047 BUG_ON(!dl_se);
1048
1049 p = dl_task_of(dl_se);
1050 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1051
1052 /* Running task will never be pushed. */
71362650 1053 dequeue_pushable_dl_task(rq, p);
1baca4ce 1054
aab03e05
DF
1055#ifdef CONFIG_SCHED_HRTICK
1056 if (hrtick_enabled(rq))
1057 start_hrtick_dl(rq, p);
1058#endif
1baca4ce 1059
dc877341 1060 set_post_schedule(rq);
1baca4ce 1061
aab03e05
DF
1062 return p;
1063}
1064
1065static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1066{
1067 update_curr_dl(rq);
1baca4ce
JL
1068
1069 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1070 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1071}
1072
1073static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1074{
1075 update_curr_dl(rq);
1076
1077#ifdef CONFIG_SCHED_HRTICK
1078 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
1079 start_hrtick_dl(rq, p);
1080#endif
1081}
1082
1083static void task_fork_dl(struct task_struct *p)
1084{
1085 /*
1086 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1087 * sched_fork()
1088 */
1089}
1090
1091static void task_dead_dl(struct task_struct *p)
1092{
1093 struct hrtimer *timer = &p->dl.dl_timer;
332ac17e
DF
1094 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1095
1096 /*
1097 * Since we are TASK_DEAD we won't slip out of the domain!
1098 */
1099 raw_spin_lock_irq(&dl_b->lock);
1100 dl_b->total_bw -= p->dl.dl_bw;
1101 raw_spin_unlock_irq(&dl_b->lock);
aab03e05 1102
2d3d891d 1103 hrtimer_cancel(timer);
aab03e05
DF
1104}
1105
1106static void set_curr_task_dl(struct rq *rq)
1107{
1108 struct task_struct *p = rq->curr;
1109
1110 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1111
1112 /* You can't push away the running task */
1113 dequeue_pushable_dl_task(rq, p);
1114}
1115
1116#ifdef CONFIG_SMP
1117
1118/* Only try algorithms three times */
1119#define DL_MAX_TRIES 3
1120
1121static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1122{
1123 if (!task_running(rq, p) &&
1ba93d42 1124 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1baca4ce 1125 return 1;
1baca4ce
JL
1126 return 0;
1127}
1128
1129/* Returns the second earliest -deadline task, NULL otherwise */
1130static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1131{
1132 struct rb_node *next_node = rq->dl.rb_leftmost;
1133 struct sched_dl_entity *dl_se;
1134 struct task_struct *p = NULL;
1135
1136next_node:
1137 next_node = rb_next(next_node);
1138 if (next_node) {
1139 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1140 p = dl_task_of(dl_se);
1141
1142 if (pick_dl_task(rq, p, cpu))
1143 return p;
1144
1145 goto next_node;
1146 }
1147
1148 return NULL;
1149}
1150
1baca4ce
JL
1151static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1152
1153static int find_later_rq(struct task_struct *task)
1154{
1155 struct sched_domain *sd;
1156 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
1157 int this_cpu = smp_processor_id();
1158 int best_cpu, cpu = task_cpu(task);
1159
1160 /* Make sure the mask is initialized first */
1161 if (unlikely(!later_mask))
1162 return -1;
1163
1164 if (task->nr_cpus_allowed == 1)
1165 return -1;
1166
6bfd6d72
JL
1167 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1168 task, later_mask);
1baca4ce
JL
1169 if (best_cpu == -1)
1170 return -1;
1171
1172 /*
1173 * If we are here, some target has been found,
1174 * the most suitable of which is cached in best_cpu.
1175 * This is, among the runqueues where the current tasks
1176 * have later deadlines than the task's one, the rq
1177 * with the latest possible one.
1178 *
1179 * Now we check how well this matches with task's
1180 * affinity and system topology.
1181 *
1182 * The last cpu where the task run is our first
1183 * guess, since it is most likely cache-hot there.
1184 */
1185 if (cpumask_test_cpu(cpu, later_mask))
1186 return cpu;
1187 /*
1188 * Check if this_cpu is to be skipped (i.e., it is
1189 * not in the mask) or not.
1190 */
1191 if (!cpumask_test_cpu(this_cpu, later_mask))
1192 this_cpu = -1;
1193
1194 rcu_read_lock();
1195 for_each_domain(cpu, sd) {
1196 if (sd->flags & SD_WAKE_AFFINE) {
1197
1198 /*
1199 * If possible, preempting this_cpu is
1200 * cheaper than migrating.
1201 */
1202 if (this_cpu != -1 &&
1203 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1204 rcu_read_unlock();
1205 return this_cpu;
1206 }
1207
1208 /*
1209 * Last chance: if best_cpu is valid and is
1210 * in the mask, that becomes our choice.
1211 */
1212 if (best_cpu < nr_cpu_ids &&
1213 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1214 rcu_read_unlock();
1215 return best_cpu;
1216 }
1217 }
1218 }
1219 rcu_read_unlock();
1220
1221 /*
1222 * At this point, all our guesses failed, we just return
1223 * 'something', and let the caller sort the things out.
1224 */
1225 if (this_cpu != -1)
1226 return this_cpu;
1227
1228 cpu = cpumask_any(later_mask);
1229 if (cpu < nr_cpu_ids)
1230 return cpu;
1231
1232 return -1;
1233}
1234
1235/* Locks the rq it finds */
1236static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1237{
1238 struct rq *later_rq = NULL;
1239 int tries;
1240 int cpu;
1241
1242 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1243 cpu = find_later_rq(task);
1244
1245 if ((cpu == -1) || (cpu == rq->cpu))
1246 break;
1247
1248 later_rq = cpu_rq(cpu);
1249
1250 /* Retry if something changed. */
1251 if (double_lock_balance(rq, later_rq)) {
1252 if (unlikely(task_rq(task) != rq ||
1253 !cpumask_test_cpu(later_rq->cpu,
1254 &task->cpus_allowed) ||
da0c1e65
KT
1255 task_running(rq, task) ||
1256 !task_on_rq_queued(task))) {
1baca4ce
JL
1257 double_unlock_balance(rq, later_rq);
1258 later_rq = NULL;
1259 break;
1260 }
1261 }
1262
1263 /*
1264 * If the rq we found has no -deadline task, or
1265 * its earliest one has a later deadline than our
1266 * task, the rq is a good one.
1267 */
1268 if (!later_rq->dl.dl_nr_running ||
1269 dl_time_before(task->dl.deadline,
1270 later_rq->dl.earliest_dl.curr))
1271 break;
1272
1273 /* Otherwise we try again. */
1274 double_unlock_balance(rq, later_rq);
1275 later_rq = NULL;
1276 }
1277
1278 return later_rq;
1279}
1280
1281static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1282{
1283 struct task_struct *p;
1284
1285 if (!has_pushable_dl_tasks(rq))
1286 return NULL;
1287
1288 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1289 struct task_struct, pushable_dl_tasks);
1290
1291 BUG_ON(rq->cpu != task_cpu(p));
1292 BUG_ON(task_current(rq, p));
1293 BUG_ON(p->nr_cpus_allowed <= 1);
1294
da0c1e65 1295 BUG_ON(!task_on_rq_queued(p));
1baca4ce
JL
1296 BUG_ON(!dl_task(p));
1297
1298 return p;
1299}
1300
1301/*
1302 * See if the non running -deadline tasks on this rq
1303 * can be sent to some other CPU where they can preempt
1304 * and start executing.
1305 */
1306static int push_dl_task(struct rq *rq)
1307{
1308 struct task_struct *next_task;
1309 struct rq *later_rq;
1310
1311 if (!rq->dl.overloaded)
1312 return 0;
1313
1314 next_task = pick_next_pushable_dl_task(rq);
1315 if (!next_task)
1316 return 0;
1317
1318retry:
1319 if (unlikely(next_task == rq->curr)) {
1320 WARN_ON(1);
1321 return 0;
1322 }
1323
1324 /*
1325 * If next_task preempts rq->curr, and rq->curr
1326 * can move away, it makes sense to just reschedule
1327 * without going further in pushing next_task.
1328 */
1329 if (dl_task(rq->curr) &&
1330 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1331 rq->curr->nr_cpus_allowed > 1) {
8875125e 1332 resched_curr(rq);
1baca4ce
JL
1333 return 0;
1334 }
1335
1336 /* We might release rq lock */
1337 get_task_struct(next_task);
1338
1339 /* Will lock the rq it'll find */
1340 later_rq = find_lock_later_rq(next_task, rq);
1341 if (!later_rq) {
1342 struct task_struct *task;
1343
1344 /*
1345 * We must check all this again, since
1346 * find_lock_later_rq releases rq->lock and it is
1347 * then possible that next_task has migrated.
1348 */
1349 task = pick_next_pushable_dl_task(rq);
1350 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1351 /*
1352 * The task is still there. We don't try
1353 * again, some other cpu will pull it when ready.
1354 */
1355 dequeue_pushable_dl_task(rq, next_task);
1356 goto out;
1357 }
1358
1359 if (!task)
1360 /* No more tasks */
1361 goto out;
1362
1363 put_task_struct(next_task);
1364 next_task = task;
1365 goto retry;
1366 }
1367
1368 deactivate_task(rq, next_task, 0);
1369 set_task_cpu(next_task, later_rq->cpu);
1370 activate_task(later_rq, next_task, 0);
1371
8875125e 1372 resched_curr(later_rq);
1baca4ce
JL
1373
1374 double_unlock_balance(rq, later_rq);
1375
1376out:
1377 put_task_struct(next_task);
1378
1379 return 1;
1380}
1381
1382static void push_dl_tasks(struct rq *rq)
1383{
1384 /* Terminates as it moves a -deadline task */
1385 while (push_dl_task(rq))
1386 ;
aab03e05
DF
1387}
1388
1baca4ce
JL
1389static int pull_dl_task(struct rq *this_rq)
1390{
1391 int this_cpu = this_rq->cpu, ret = 0, cpu;
1392 struct task_struct *p;
1393 struct rq *src_rq;
1394 u64 dmin = LONG_MAX;
1395
1396 if (likely(!dl_overloaded(this_rq)))
1397 return 0;
1398
1399 /*
1400 * Match the barrier from dl_set_overloaded; this guarantees that if we
1401 * see overloaded we must also see the dlo_mask bit.
1402 */
1403 smp_rmb();
1404
1405 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1406 if (this_cpu == cpu)
1407 continue;
1408
1409 src_rq = cpu_rq(cpu);
1410
1411 /*
1412 * It looks racy, abd it is! However, as in sched_rt.c,
1413 * we are fine with this.
1414 */
1415 if (this_rq->dl.dl_nr_running &&
1416 dl_time_before(this_rq->dl.earliest_dl.curr,
1417 src_rq->dl.earliest_dl.next))
1418 continue;
1419
1420 /* Might drop this_rq->lock */
1421 double_lock_balance(this_rq, src_rq);
1422
1423 /*
1424 * If there are no more pullable tasks on the
1425 * rq, we're done with it.
1426 */
1427 if (src_rq->dl.dl_nr_running <= 1)
1428 goto skip;
1429
1430 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1431
1432 /*
1433 * We found a task to be pulled if:
1434 * - it preempts our current (if there's one),
1435 * - it will preempt the last one we pulled (if any).
1436 */
1437 if (p && dl_time_before(p->dl.deadline, dmin) &&
1438 (!this_rq->dl.dl_nr_running ||
1439 dl_time_before(p->dl.deadline,
1440 this_rq->dl.earliest_dl.curr))) {
1441 WARN_ON(p == src_rq->curr);
da0c1e65 1442 WARN_ON(!task_on_rq_queued(p));
1baca4ce
JL
1443
1444 /*
1445 * Then we pull iff p has actually an earlier
1446 * deadline than the current task of its runqueue.
1447 */
1448 if (dl_time_before(p->dl.deadline,
1449 src_rq->curr->dl.deadline))
1450 goto skip;
1451
1452 ret = 1;
1453
1454 deactivate_task(src_rq, p, 0);
1455 set_task_cpu(p, this_cpu);
1456 activate_task(this_rq, p, 0);
1457 dmin = p->dl.deadline;
1458
1459 /* Is there any other task even earlier? */
1460 }
1461skip:
1462 double_unlock_balance(this_rq, src_rq);
1463 }
1464
1465 return ret;
1466}
1467
1baca4ce
JL
1468static void post_schedule_dl(struct rq *rq)
1469{
1470 push_dl_tasks(rq);
1471}
1472
1473/*
1474 * Since the task is not running and a reschedule is not going to happen
1475 * anytime soon on its runqueue, we try pushing it away now.
1476 */
1477static void task_woken_dl(struct rq *rq, struct task_struct *p)
1478{
1479 if (!task_running(rq, p) &&
1480 !test_tsk_need_resched(rq->curr) &&
1481 has_pushable_dl_tasks(rq) &&
1482 p->nr_cpus_allowed > 1 &&
1483 dl_task(rq->curr) &&
1484 (rq->curr->nr_cpus_allowed < 2 ||
1485 dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1486 push_dl_tasks(rq);
1487 }
1488}
1489
1490static void set_cpus_allowed_dl(struct task_struct *p,
1491 const struct cpumask *new_mask)
1492{
1493 struct rq *rq;
1494 int weight;
1495
1496 BUG_ON(!dl_task(p));
1497
1498 /*
1499 * Update only if the task is actually running (i.e.,
1500 * it is on the rq AND it is not throttled).
1501 */
1502 if (!on_dl_rq(&p->dl))
1503 return;
1504
1505 weight = cpumask_weight(new_mask);
1506
1507 /*
1508 * Only update if the process changes its state from whether it
1509 * can migrate or not.
1510 */
1511 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1512 return;
1513
1514 rq = task_rq(p);
1515
1516 /*
1517 * The process used to be able to migrate OR it can now migrate
1518 */
1519 if (weight <= 1) {
1520 if (!task_current(rq, p))
1521 dequeue_pushable_dl_task(rq, p);
1522 BUG_ON(!rq->dl.dl_nr_migratory);
1523 rq->dl.dl_nr_migratory--;
1524 } else {
1525 if (!task_current(rq, p))
1526 enqueue_pushable_dl_task(rq, p);
1527 rq->dl.dl_nr_migratory++;
1528 }
1529
1530 update_dl_migration(&rq->dl);
1531}
1532
1533/* Assumes rq->lock is held */
1534static void rq_online_dl(struct rq *rq)
1535{
1536 if (rq->dl.overloaded)
1537 dl_set_overload(rq);
6bfd6d72
JL
1538
1539 if (rq->dl.dl_nr_running > 0)
1540 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1baca4ce
JL
1541}
1542
1543/* Assumes rq->lock is held */
1544static void rq_offline_dl(struct rq *rq)
1545{
1546 if (rq->dl.overloaded)
1547 dl_clear_overload(rq);
6bfd6d72
JL
1548
1549 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
1550}
1551
1552void init_sched_dl_class(void)
1553{
1554 unsigned int i;
1555
1556 for_each_possible_cpu(i)
1557 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1558 GFP_KERNEL, cpu_to_node(i));
1559}
1560
1561#endif /* CONFIG_SMP */
1562
aab03e05
DF
1563static void switched_from_dl(struct rq *rq, struct task_struct *p)
1564{
1baca4ce 1565 if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
aab03e05 1566 hrtimer_try_to_cancel(&p->dl.dl_timer);
1baca4ce
JL
1567
1568#ifdef CONFIG_SMP
1569 /*
1570 * Since this might be the only -deadline task on the rq,
1571 * this is the right place to try to pull some other one
1572 * from an overloaded cpu, if any.
1573 */
1574 if (!rq->dl.dl_nr_running)
1575 pull_dl_task(rq);
1576#endif
aab03e05
DF
1577}
1578
1baca4ce
JL
1579/*
1580 * When switching to -deadline, we may overload the rq, then
1581 * we try to push someone off, if possible.
1582 */
aab03e05
DF
1583static void switched_to_dl(struct rq *rq, struct task_struct *p)
1584{
1baca4ce
JL
1585 int check_resched = 1;
1586
aab03e05
DF
1587 /*
1588 * If p is throttled, don't consider the possibility
1589 * of preempting rq->curr, the check will be done right
1590 * after its runtime will get replenished.
1591 */
1592 if (unlikely(p->dl.dl_throttled))
1593 return;
1594
da0c1e65 1595 if (task_on_rq_queued(p) && rq->curr != p) {
1baca4ce
JL
1596#ifdef CONFIG_SMP
1597 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1598 /* Only reschedule if pushing failed */
1599 check_resched = 0;
1600#endif /* CONFIG_SMP */
1601 if (check_resched && task_has_dl_policy(rq->curr))
aab03e05 1602 check_preempt_curr_dl(rq, p, 0);
aab03e05
DF
1603 }
1604}
1605
1baca4ce
JL
1606/*
1607 * If the scheduling parameters of a -deadline task changed,
1608 * a push or pull operation might be needed.
1609 */
aab03e05
DF
1610static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1611 int oldprio)
1612{
da0c1e65 1613 if (task_on_rq_queued(p) || rq->curr == p) {
aab03e05 1614#ifdef CONFIG_SMP
1baca4ce
JL
1615 /*
1616 * This might be too much, but unfortunately
1617 * we don't have the old deadline value, and
1618 * we can't argue if the task is increasing
1619 * or lowering its prio, so...
1620 */
1621 if (!rq->dl.overloaded)
1622 pull_dl_task(rq);
1623
1624 /*
1625 * If we now have a earlier deadline task than p,
1626 * then reschedule, provided p is still on this
1627 * runqueue.
1628 */
1629 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1630 rq->curr == p)
8875125e 1631 resched_curr(rq);
1baca4ce
JL
1632#else
1633 /*
1634 * Again, we don't know if p has a earlier
1635 * or later deadline, so let's blindly set a
1636 * (maybe not needed) rescheduling point.
1637 */
8875125e 1638 resched_curr(rq);
1baca4ce
JL
1639#endif /* CONFIG_SMP */
1640 } else
1641 switched_to_dl(rq, p);
aab03e05 1642}
aab03e05
DF
1643
1644const struct sched_class dl_sched_class = {
1645 .next = &rt_sched_class,
1646 .enqueue_task = enqueue_task_dl,
1647 .dequeue_task = dequeue_task_dl,
1648 .yield_task = yield_task_dl,
1649
1650 .check_preempt_curr = check_preempt_curr_dl,
1651
1652 .pick_next_task = pick_next_task_dl,
1653 .put_prev_task = put_prev_task_dl,
1654
1655#ifdef CONFIG_SMP
1656 .select_task_rq = select_task_rq_dl,
1baca4ce
JL
1657 .set_cpus_allowed = set_cpus_allowed_dl,
1658 .rq_online = rq_online_dl,
1659 .rq_offline = rq_offline_dl,
1baca4ce
JL
1660 .post_schedule = post_schedule_dl,
1661 .task_woken = task_woken_dl,
aab03e05
DF
1662#endif
1663
1664 .set_curr_task = set_curr_task_dl,
1665 .task_tick = task_tick_dl,
1666 .task_fork = task_fork_dl,
1667 .task_dead = task_dead_dl,
1668
1669 .prio_changed = prio_changed_dl,
1670 .switched_from = switched_from_dl,
1671 .switched_to = switched_to_dl,
1672};