sched: Sanitize irq accounting madness
[linux-2.6-block.git] / kernel / sched / deadline.c
CommitLineData
aab03e05
DF
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 13 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
6bfd6d72
JL
19#include <linux/slab.h>
20
332ac17e
DF
21struct dl_bandwidth def_dl_bandwidth;
22
aab03e05
DF
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
332ac17e
DF
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
60extern unsigned long to_ratio(u64 period, u64 runtime);
61
62void init_dl_bw(struct dl_bw *dl_b)
63{
64 raw_spin_lock_init(&dl_b->lock);
65 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
1724813d 66 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
67 dl_b->bw = -1;
68 else
1724813d 69 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
70 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
71 dl_b->total_bw = 0;
72}
73
aab03e05
DF
74void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
75{
76 dl_rq->rb_root = RB_ROOT;
1baca4ce
JL
77
78#ifdef CONFIG_SMP
79 /* zero means no -deadline tasks */
80 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
81
82 dl_rq->dl_nr_migratory = 0;
83 dl_rq->overloaded = 0;
84 dl_rq->pushable_dl_tasks_root = RB_ROOT;
332ac17e
DF
85#else
86 init_dl_bw(&dl_rq->dl_bw);
1baca4ce
JL
87#endif
88}
89
90#ifdef CONFIG_SMP
91
92static inline int dl_overloaded(struct rq *rq)
93{
94 return atomic_read(&rq->rd->dlo_count);
95}
96
97static inline void dl_set_overload(struct rq *rq)
98{
99 if (!rq->online)
100 return;
101
102 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
103 /*
104 * Must be visible before the overload count is
105 * set (as in sched_rt.c).
106 *
107 * Matched by the barrier in pull_dl_task().
108 */
109 smp_wmb();
110 atomic_inc(&rq->rd->dlo_count);
111}
112
113static inline void dl_clear_overload(struct rq *rq)
114{
115 if (!rq->online)
116 return;
117
118 atomic_dec(&rq->rd->dlo_count);
119 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
120}
121
122static void update_dl_migration(struct dl_rq *dl_rq)
123{
995b9ea4 124 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
1baca4ce
JL
125 if (!dl_rq->overloaded) {
126 dl_set_overload(rq_of_dl_rq(dl_rq));
127 dl_rq->overloaded = 1;
128 }
129 } else if (dl_rq->overloaded) {
130 dl_clear_overload(rq_of_dl_rq(dl_rq));
131 dl_rq->overloaded = 0;
132 }
133}
134
135static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
136{
137 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 138
1baca4ce
JL
139 if (p->nr_cpus_allowed > 1)
140 dl_rq->dl_nr_migratory++;
141
142 update_dl_migration(dl_rq);
143}
144
145static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
146{
147 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 148
1baca4ce
JL
149 if (p->nr_cpus_allowed > 1)
150 dl_rq->dl_nr_migratory--;
151
152 update_dl_migration(dl_rq);
153}
154
155/*
156 * The list of pushable -deadline task is not a plist, like in
157 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
158 */
159static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
160{
161 struct dl_rq *dl_rq = &rq->dl;
162 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
163 struct rb_node *parent = NULL;
164 struct task_struct *entry;
165 int leftmost = 1;
166
167 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
168
169 while (*link) {
170 parent = *link;
171 entry = rb_entry(parent, struct task_struct,
172 pushable_dl_tasks);
173 if (dl_entity_preempt(&p->dl, &entry->dl))
174 link = &parent->rb_left;
175 else {
176 link = &parent->rb_right;
177 leftmost = 0;
178 }
179 }
180
181 if (leftmost)
182 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
183
184 rb_link_node(&p->pushable_dl_tasks, parent, link);
185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
aab03e05
DF
186}
187
1baca4ce
JL
188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
189{
190 struct dl_rq *dl_rq = &rq->dl;
191
192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
193 return;
194
195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
196 struct rb_node *next_node;
197
198 next_node = rb_next(&p->pushable_dl_tasks);
199 dl_rq->pushable_dl_tasks_leftmost = next_node;
200 }
201
202 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
203 RB_CLEAR_NODE(&p->pushable_dl_tasks);
204}
205
206static inline int has_pushable_dl_tasks(struct rq *rq)
207{
208 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
209}
210
211static int push_dl_task(struct rq *rq);
212
dc877341
PZ
213static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
214{
215 return dl_task(prev);
216}
217
218static inline void set_post_schedule(struct rq *rq)
219{
220 rq->post_schedule = has_pushable_dl_tasks(rq);
221}
222
1baca4ce
JL
223#else
224
225static inline
226void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
227{
228}
229
230static inline
231void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
232{
233}
234
235static inline
236void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
237{
238}
239
240static inline
241void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
242{
243}
244
dc877341
PZ
245static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
246{
247 return false;
248}
249
250static inline int pull_dl_task(struct rq *rq)
251{
252 return 0;
253}
254
255static inline void set_post_schedule(struct rq *rq)
256{
257}
1baca4ce
JL
258#endif /* CONFIG_SMP */
259
aab03e05
DF
260static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
261static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
262static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
263 int flags);
264
265/*
266 * We are being explicitly informed that a new instance is starting,
267 * and this means that:
268 * - the absolute deadline of the entity has to be placed at
269 * current time + relative deadline;
270 * - the runtime of the entity has to be set to the maximum value.
271 *
272 * The capability of specifying such event is useful whenever a -deadline
273 * entity wants to (try to!) synchronize its behaviour with the scheduler's
274 * one, and to (try to!) reconcile itself with its own scheduling
275 * parameters.
276 */
2d3d891d
DF
277static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
278 struct sched_dl_entity *pi_se)
aab03e05
DF
279{
280 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
281 struct rq *rq = rq_of_dl_rq(dl_rq);
282
283 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
284
285 /*
286 * We use the regular wall clock time to set deadlines in the
287 * future; in fact, we must consider execution overheads (time
288 * spent on hardirq context, etc.).
289 */
2d3d891d
DF
290 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
291 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
292 dl_se->dl_new = 0;
293}
294
295/*
296 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
297 * possibility of a entity lasting more than what it declared, and thus
298 * exhausting its runtime.
299 *
300 * Here we are interested in making runtime overrun possible, but we do
301 * not want a entity which is misbehaving to affect the scheduling of all
302 * other entities.
303 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
304 * is used, in order to confine each entity within its own bandwidth.
305 *
306 * This function deals exactly with that, and ensures that when the runtime
307 * of a entity is replenished, its deadline is also postponed. That ensures
308 * the overrunning entity can't interfere with other entity in the system and
309 * can't make them miss their deadlines. Reasons why this kind of overruns
310 * could happen are, typically, a entity voluntarily trying to overcome its
311 * runtime, or it just underestimated it during sched_setscheduler_ex().
312 */
2d3d891d
DF
313static void replenish_dl_entity(struct sched_dl_entity *dl_se,
314 struct sched_dl_entity *pi_se)
aab03e05
DF
315{
316 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
317 struct rq *rq = rq_of_dl_rq(dl_rq);
318
2d3d891d
DF
319 BUG_ON(pi_se->dl_runtime <= 0);
320
321 /*
322 * This could be the case for a !-dl task that is boosted.
323 * Just go with full inherited parameters.
324 */
325 if (dl_se->dl_deadline == 0) {
326 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
327 dl_se->runtime = pi_se->dl_runtime;
328 }
329
aab03e05
DF
330 /*
331 * We keep moving the deadline away until we get some
332 * available runtime for the entity. This ensures correct
333 * handling of situations where the runtime overrun is
334 * arbitrary large.
335 */
336 while (dl_se->runtime <= 0) {
2d3d891d
DF
337 dl_se->deadline += pi_se->dl_period;
338 dl_se->runtime += pi_se->dl_runtime;
aab03e05
DF
339 }
340
341 /*
342 * At this point, the deadline really should be "in
343 * the future" with respect to rq->clock. If it's
344 * not, we are, for some reason, lagging too much!
345 * Anyway, after having warn userspace abut that,
346 * we still try to keep the things running by
347 * resetting the deadline and the budget of the
348 * entity.
349 */
350 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
351 static bool lag_once = false;
352
353 if (!lag_once) {
354 lag_once = true;
355 printk_sched("sched: DL replenish lagged to much\n");
356 }
2d3d891d
DF
357 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
358 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
359 }
360}
361
362/*
363 * Here we check if --at time t-- an entity (which is probably being
364 * [re]activated or, in general, enqueued) can use its remaining runtime
365 * and its current deadline _without_ exceeding the bandwidth it is
366 * assigned (function returns true if it can't). We are in fact applying
367 * one of the CBS rules: when a task wakes up, if the residual runtime
368 * over residual deadline fits within the allocated bandwidth, then we
369 * can keep the current (absolute) deadline and residual budget without
370 * disrupting the schedulability of the system. Otherwise, we should
371 * refill the runtime and set the deadline a period in the future,
372 * because keeping the current (absolute) deadline of the task would
712e5e34
DF
373 * result in breaking guarantees promised to other tasks (refer to
374 * Documentation/scheduler/sched-deadline.txt for more informations).
aab03e05
DF
375 *
376 * This function returns true if:
377 *
755378a4 378 * runtime / (deadline - t) > dl_runtime / dl_period ,
aab03e05
DF
379 *
380 * IOW we can't recycle current parameters.
755378a4
HG
381 *
382 * Notice that the bandwidth check is done against the period. For
383 * task with deadline equal to period this is the same of using
384 * dl_deadline instead of dl_period in the equation above.
aab03e05 385 */
2d3d891d
DF
386static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
387 struct sched_dl_entity *pi_se, u64 t)
aab03e05
DF
388{
389 u64 left, right;
390
391 /*
392 * left and right are the two sides of the equation above,
393 * after a bit of shuffling to use multiplications instead
394 * of divisions.
395 *
396 * Note that none of the time values involved in the two
397 * multiplications are absolute: dl_deadline and dl_runtime
398 * are the relative deadline and the maximum runtime of each
399 * instance, runtime is the runtime left for the last instance
400 * and (deadline - t), since t is rq->clock, is the time left
401 * to the (absolute) deadline. Even if overflowing the u64 type
402 * is very unlikely to occur in both cases, here we scale down
403 * as we want to avoid that risk at all. Scaling down by 10
404 * means that we reduce granularity to 1us. We are fine with it,
405 * since this is only a true/false check and, anyway, thinking
406 * of anything below microseconds resolution is actually fiction
407 * (but still we want to give the user that illusion >;).
408 */
332ac17e
DF
409 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
410 right = ((dl_se->deadline - t) >> DL_SCALE) *
411 (pi_se->dl_runtime >> DL_SCALE);
aab03e05
DF
412
413 return dl_time_before(right, left);
414}
415
416/*
417 * When a -deadline entity is queued back on the runqueue, its runtime and
418 * deadline might need updating.
419 *
420 * The policy here is that we update the deadline of the entity only if:
421 * - the current deadline is in the past,
422 * - using the remaining runtime with the current deadline would make
423 * the entity exceed its bandwidth.
424 */
2d3d891d
DF
425static void update_dl_entity(struct sched_dl_entity *dl_se,
426 struct sched_dl_entity *pi_se)
aab03e05
DF
427{
428 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
429 struct rq *rq = rq_of_dl_rq(dl_rq);
430
431 /*
432 * The arrival of a new instance needs special treatment, i.e.,
433 * the actual scheduling parameters have to be "renewed".
434 */
435 if (dl_se->dl_new) {
2d3d891d 436 setup_new_dl_entity(dl_se, pi_se);
aab03e05
DF
437 return;
438 }
439
440 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2d3d891d
DF
441 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
442 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
443 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
444 }
445}
446
447/*
448 * If the entity depleted all its runtime, and if we want it to sleep
449 * while waiting for some new execution time to become available, we
450 * set the bandwidth enforcement timer to the replenishment instant
451 * and try to activate it.
452 *
453 * Notice that it is important for the caller to know if the timer
454 * actually started or not (i.e., the replenishment instant is in
455 * the future or in the past).
456 */
2d3d891d 457static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
aab03e05
DF
458{
459 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
460 struct rq *rq = rq_of_dl_rq(dl_rq);
461 ktime_t now, act;
462 ktime_t soft, hard;
463 unsigned long range;
464 s64 delta;
465
2d3d891d
DF
466 if (boosted)
467 return 0;
aab03e05
DF
468 /*
469 * We want the timer to fire at the deadline, but considering
470 * that it is actually coming from rq->clock and not from
471 * hrtimer's time base reading.
472 */
473 act = ns_to_ktime(dl_se->deadline);
474 now = hrtimer_cb_get_time(&dl_se->dl_timer);
475 delta = ktime_to_ns(now) - rq_clock(rq);
476 act = ktime_add_ns(act, delta);
477
478 /*
479 * If the expiry time already passed, e.g., because the value
480 * chosen as the deadline is too small, don't even try to
481 * start the timer in the past!
482 */
483 if (ktime_us_delta(act, now) < 0)
484 return 0;
485
486 hrtimer_set_expires(&dl_se->dl_timer, act);
487
488 soft = hrtimer_get_softexpires(&dl_se->dl_timer);
489 hard = hrtimer_get_expires(&dl_se->dl_timer);
490 range = ktime_to_ns(ktime_sub(hard, soft));
491 __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
492 range, HRTIMER_MODE_ABS, 0);
493
494 return hrtimer_active(&dl_se->dl_timer);
495}
496
497/*
498 * This is the bandwidth enforcement timer callback. If here, we know
499 * a task is not on its dl_rq, since the fact that the timer was running
500 * means the task is throttled and needs a runtime replenishment.
501 *
502 * However, what we actually do depends on the fact the task is active,
503 * (it is on its rq) or has been removed from there by a call to
504 * dequeue_task_dl(). In the former case we must issue the runtime
505 * replenishment and add the task back to the dl_rq; in the latter, we just
506 * do nothing but clearing dl_throttled, so that runtime and deadline
507 * updating (and the queueing back to dl_rq) will be done by the
508 * next call to enqueue_task_dl().
509 */
510static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
511{
512 struct sched_dl_entity *dl_se = container_of(timer,
513 struct sched_dl_entity,
514 dl_timer);
515 struct task_struct *p = dl_task_of(dl_se);
516 struct rq *rq = task_rq(p);
517 raw_spin_lock(&rq->lock);
518
519 /*
520 * We need to take care of a possible races here. In fact, the
521 * task might have changed its scheduling policy to something
522 * different from SCHED_DEADLINE or changed its reservation
523 * parameters (through sched_setscheduler()).
524 */
525 if (!dl_task(p) || dl_se->dl_new)
526 goto unlock;
527
528 sched_clock_tick();
529 update_rq_clock(rq);
530 dl_se->dl_throttled = 0;
531 if (p->on_rq) {
532 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
533 if (task_has_dl_policy(rq->curr))
534 check_preempt_curr_dl(rq, p, 0);
535 else
536 resched_task(rq->curr);
1baca4ce
JL
537#ifdef CONFIG_SMP
538 /*
539 * Queueing this task back might have overloaded rq,
540 * check if we need to kick someone away.
541 */
542 if (has_pushable_dl_tasks(rq))
543 push_dl_task(rq);
544#endif
aab03e05
DF
545 }
546unlock:
547 raw_spin_unlock(&rq->lock);
548
549 return HRTIMER_NORESTART;
550}
551
552void init_dl_task_timer(struct sched_dl_entity *dl_se)
553{
554 struct hrtimer *timer = &dl_se->dl_timer;
555
556 if (hrtimer_active(timer)) {
557 hrtimer_try_to_cancel(timer);
558 return;
559 }
560
561 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
562 timer->function = dl_task_timer;
563}
564
565static
566int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
567{
568 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
569 int rorun = dl_se->runtime <= 0;
570
571 if (!rorun && !dmiss)
572 return 0;
573
574 /*
575 * If we are beyond our current deadline and we are still
576 * executing, then we have already used some of the runtime of
577 * the next instance. Thus, if we do not account that, we are
578 * stealing bandwidth from the system at each deadline miss!
579 */
580 if (dmiss) {
581 dl_se->runtime = rorun ? dl_se->runtime : 0;
582 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
583 }
584
585 return 1;
586}
587
faa59937
JL
588extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
589
aab03e05
DF
590/*
591 * Update the current task's runtime statistics (provided it is still
592 * a -deadline task and has not been removed from the dl_rq).
593 */
594static void update_curr_dl(struct rq *rq)
595{
596 struct task_struct *curr = rq->curr;
597 struct sched_dl_entity *dl_se = &curr->dl;
598 u64 delta_exec;
599
600 if (!dl_task(curr) || !on_dl_rq(dl_se))
601 return;
602
603 /*
604 * Consumed budget is computed considering the time as
605 * observed by schedulable tasks (excluding time spent
606 * in hardirq context, etc.). Deadlines are instead
607 * computed using hard walltime. This seems to be the more
608 * natural solution, but the full ramifications of this
609 * approach need further study.
610 */
611 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
734ff2a7
KT
612 if (unlikely((s64)delta_exec <= 0))
613 return;
aab03e05
DF
614
615 schedstat_set(curr->se.statistics.exec_max,
616 max(curr->se.statistics.exec_max, delta_exec));
617
618 curr->se.sum_exec_runtime += delta_exec;
619 account_group_exec_runtime(curr, delta_exec);
620
621 curr->se.exec_start = rq_clock_task(rq);
622 cpuacct_charge(curr, delta_exec);
623
239be4a9
DF
624 sched_rt_avg_update(rq, delta_exec);
625
aab03e05
DF
626 dl_se->runtime -= delta_exec;
627 if (dl_runtime_exceeded(rq, dl_se)) {
628 __dequeue_task_dl(rq, curr, 0);
2d3d891d 629 if (likely(start_dl_timer(dl_se, curr->dl.dl_boosted)))
aab03e05
DF
630 dl_se->dl_throttled = 1;
631 else
632 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
633
634 if (!is_leftmost(curr, &rq->dl))
635 resched_task(curr);
636 }
1724813d
PZ
637
638 /*
639 * Because -- for now -- we share the rt bandwidth, we need to
640 * account our runtime there too, otherwise actual rt tasks
641 * would be able to exceed the shared quota.
642 *
643 * Account to the root rt group for now.
644 *
645 * The solution we're working towards is having the RT groups scheduled
646 * using deadline servers -- however there's a few nasties to figure
647 * out before that can happen.
648 */
649 if (rt_bandwidth_enabled()) {
650 struct rt_rq *rt_rq = &rq->rt;
651
652 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
653 /*
654 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
655 * have our own CBS to keep us inline; only account when RT
656 * bandwidth is relevant.
1724813d 657 */
faa59937
JL
658 if (sched_rt_bandwidth_account(rt_rq))
659 rt_rq->rt_time += delta_exec;
1724813d
PZ
660 raw_spin_unlock(&rt_rq->rt_runtime_lock);
661 }
aab03e05
DF
662}
663
1baca4ce
JL
664#ifdef CONFIG_SMP
665
666static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu);
667
668static inline u64 next_deadline(struct rq *rq)
669{
670 struct task_struct *next = pick_next_earliest_dl_task(rq, rq->cpu);
671
672 if (next && dl_prio(next->prio))
673 return next->dl.deadline;
674 else
675 return 0;
676}
677
678static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
679{
680 struct rq *rq = rq_of_dl_rq(dl_rq);
681
682 if (dl_rq->earliest_dl.curr == 0 ||
683 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
684 /*
685 * If the dl_rq had no -deadline tasks, or if the new task
686 * has shorter deadline than the current one on dl_rq, we
687 * know that the previous earliest becomes our next earliest,
688 * as the new task becomes the earliest itself.
689 */
690 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr;
691 dl_rq->earliest_dl.curr = deadline;
6bfd6d72 692 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
1baca4ce
JL
693 } else if (dl_rq->earliest_dl.next == 0 ||
694 dl_time_before(deadline, dl_rq->earliest_dl.next)) {
695 /*
696 * On the other hand, if the new -deadline task has a
697 * a later deadline than the earliest one on dl_rq, but
698 * it is earlier than the next (if any), we must
699 * recompute the next-earliest.
700 */
701 dl_rq->earliest_dl.next = next_deadline(rq);
702 }
703}
704
705static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
706{
707 struct rq *rq = rq_of_dl_rq(dl_rq);
708
709 /*
710 * Since we may have removed our earliest (and/or next earliest)
711 * task we must recompute them.
712 */
713 if (!dl_rq->dl_nr_running) {
714 dl_rq->earliest_dl.curr = 0;
715 dl_rq->earliest_dl.next = 0;
6bfd6d72 716 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
717 } else {
718 struct rb_node *leftmost = dl_rq->rb_leftmost;
719 struct sched_dl_entity *entry;
720
721 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
722 dl_rq->earliest_dl.curr = entry->deadline;
723 dl_rq->earliest_dl.next = next_deadline(rq);
6bfd6d72 724 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
1baca4ce
JL
725 }
726}
727
728#else
729
730static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
731static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
732
733#endif /* CONFIG_SMP */
734
735static inline
736void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
737{
738 int prio = dl_task_of(dl_se)->prio;
739 u64 deadline = dl_se->deadline;
740
741 WARN_ON(!dl_prio(prio));
742 dl_rq->dl_nr_running++;
3d5f35bd 743 inc_nr_running(rq_of_dl_rq(dl_rq));
1baca4ce
JL
744
745 inc_dl_deadline(dl_rq, deadline);
746 inc_dl_migration(dl_se, dl_rq);
747}
748
749static inline
750void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
751{
752 int prio = dl_task_of(dl_se)->prio;
753
754 WARN_ON(!dl_prio(prio));
755 WARN_ON(!dl_rq->dl_nr_running);
756 dl_rq->dl_nr_running--;
3d5f35bd 757 dec_nr_running(rq_of_dl_rq(dl_rq));
1baca4ce
JL
758
759 dec_dl_deadline(dl_rq, dl_se->deadline);
760 dec_dl_migration(dl_se, dl_rq);
761}
762
aab03e05
DF
763static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
764{
765 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
766 struct rb_node **link = &dl_rq->rb_root.rb_node;
767 struct rb_node *parent = NULL;
768 struct sched_dl_entity *entry;
769 int leftmost = 1;
770
771 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
772
773 while (*link) {
774 parent = *link;
775 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
776 if (dl_time_before(dl_se->deadline, entry->deadline))
777 link = &parent->rb_left;
778 else {
779 link = &parent->rb_right;
780 leftmost = 0;
781 }
782 }
783
784 if (leftmost)
785 dl_rq->rb_leftmost = &dl_se->rb_node;
786
787 rb_link_node(&dl_se->rb_node, parent, link);
788 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
789
1baca4ce 790 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
791}
792
793static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
794{
795 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
796
797 if (RB_EMPTY_NODE(&dl_se->rb_node))
798 return;
799
800 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
801 struct rb_node *next_node;
802
803 next_node = rb_next(&dl_se->rb_node);
804 dl_rq->rb_leftmost = next_node;
805 }
806
807 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
808 RB_CLEAR_NODE(&dl_se->rb_node);
809
1baca4ce 810 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
811}
812
813static void
2d3d891d
DF
814enqueue_dl_entity(struct sched_dl_entity *dl_se,
815 struct sched_dl_entity *pi_se, int flags)
aab03e05
DF
816{
817 BUG_ON(on_dl_rq(dl_se));
818
819 /*
820 * If this is a wakeup or a new instance, the scheduling
821 * parameters of the task might need updating. Otherwise,
822 * we want a replenishment of its runtime.
823 */
824 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
2d3d891d 825 replenish_dl_entity(dl_se, pi_se);
aab03e05 826 else
2d3d891d 827 update_dl_entity(dl_se, pi_se);
aab03e05
DF
828
829 __enqueue_dl_entity(dl_se);
830}
831
832static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
833{
834 __dequeue_dl_entity(dl_se);
835}
836
837static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
838{
2d3d891d
DF
839 struct task_struct *pi_task = rt_mutex_get_top_task(p);
840 struct sched_dl_entity *pi_se = &p->dl;
841
842 /*
843 * Use the scheduling parameters of the top pi-waiter
844 * task if we have one and its (relative) deadline is
845 * smaller than our one... OTW we keep our runtime and
846 * deadline.
847 */
848 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio))
849 pi_se = &pi_task->dl;
850
aab03e05
DF
851 /*
852 * If p is throttled, we do nothing. In fact, if it exhausted
853 * its budget it needs a replenishment and, since it now is on
854 * its rq, the bandwidth timer callback (which clearly has not
855 * run yet) will take care of this.
856 */
857 if (p->dl.dl_throttled)
858 return;
859
2d3d891d 860 enqueue_dl_entity(&p->dl, pi_se, flags);
1baca4ce
JL
861
862 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
863 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
864}
865
866static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
867{
868 dequeue_dl_entity(&p->dl);
1baca4ce 869 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
870}
871
872static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
873{
874 update_curr_dl(rq);
875 __dequeue_task_dl(rq, p, flags);
aab03e05
DF
876}
877
878/*
879 * Yield task semantic for -deadline tasks is:
880 *
881 * get off from the CPU until our next instance, with
882 * a new runtime. This is of little use now, since we
883 * don't have a bandwidth reclaiming mechanism. Anyway,
884 * bandwidth reclaiming is planned for the future, and
885 * yield_task_dl will indicate that some spare budget
886 * is available for other task instances to use it.
887 */
888static void yield_task_dl(struct rq *rq)
889{
890 struct task_struct *p = rq->curr;
891
892 /*
893 * We make the task go to sleep until its current deadline by
894 * forcing its runtime to zero. This way, update_curr_dl() stops
895 * it and the bandwidth timer will wake it up and will give it
896 * new scheduling parameters (thanks to dl_new=1).
897 */
898 if (p->dl.runtime > 0) {
899 rq->curr->dl.dl_new = 1;
900 p->dl.runtime = 0;
901 }
902 update_curr_dl(rq);
903}
904
1baca4ce
JL
905#ifdef CONFIG_SMP
906
907static int find_later_rq(struct task_struct *task);
1baca4ce
JL
908
909static int
910select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
911{
912 struct task_struct *curr;
913 struct rq *rq;
914
915 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
916 goto out;
917
918 rq = cpu_rq(cpu);
919
920 rcu_read_lock();
921 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
922
923 /*
924 * If we are dealing with a -deadline task, we must
925 * decide where to wake it up.
926 * If it has a later deadline and the current task
927 * on this rq can't move (provided the waking task
928 * can!) we prefer to send it somewhere else. On the
929 * other hand, if it has a shorter deadline, we
930 * try to make it stay here, it might be important.
931 */
932 if (unlikely(dl_task(curr)) &&
933 (curr->nr_cpus_allowed < 2 ||
934 !dl_entity_preempt(&p->dl, &curr->dl)) &&
935 (p->nr_cpus_allowed > 1)) {
936 int target = find_later_rq(p);
937
938 if (target != -1)
939 cpu = target;
940 }
941 rcu_read_unlock();
942
943out:
944 return cpu;
945}
946
947static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
948{
949 /*
950 * Current can't be migrated, useless to reschedule,
951 * let's hope p can move out.
952 */
953 if (rq->curr->nr_cpus_allowed == 1 ||
6bfd6d72 954 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1baca4ce
JL
955 return;
956
957 /*
958 * p is migratable, so let's not schedule it and
959 * see if it is pushed or pulled somewhere else.
960 */
961 if (p->nr_cpus_allowed != 1 &&
6bfd6d72 962 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1baca4ce
JL
963 return;
964
965 resched_task(rq->curr);
966}
967
38033c37
PZ
968static int pull_dl_task(struct rq *this_rq);
969
1baca4ce
JL
970#endif /* CONFIG_SMP */
971
aab03e05
DF
972/*
973 * Only called when both the current and waking task are -deadline
974 * tasks.
975 */
976static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
977 int flags)
978{
1baca4ce 979 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
aab03e05 980 resched_task(rq->curr);
1baca4ce
JL
981 return;
982 }
983
984#ifdef CONFIG_SMP
985 /*
986 * In the unlikely case current and p have the same deadline
987 * let us try to decide what's the best thing to do...
988 */
332ac17e
DF
989 if ((p->dl.deadline == rq->curr->dl.deadline) &&
990 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
991 check_preempt_equal_dl(rq, p);
992#endif /* CONFIG_SMP */
aab03e05
DF
993}
994
995#ifdef CONFIG_SCHED_HRTICK
996static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
997{
998 s64 delta = p->dl.dl_runtime - p->dl.runtime;
999
1000 if (delta > 10000)
1001 hrtick_start(rq, p->dl.runtime);
1002}
1003#endif
1004
1005static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1006 struct dl_rq *dl_rq)
1007{
1008 struct rb_node *left = dl_rq->rb_leftmost;
1009
1010 if (!left)
1011 return NULL;
1012
1013 return rb_entry(left, struct sched_dl_entity, rb_node);
1014}
1015
606dba2e 1016struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
aab03e05
DF
1017{
1018 struct sched_dl_entity *dl_se;
1019 struct task_struct *p;
1020 struct dl_rq *dl_rq;
1021
1022 dl_rq = &rq->dl;
1023
a1d9a323 1024 if (need_pull_dl_task(rq, prev)) {
38033c37 1025 pull_dl_task(rq);
a1d9a323
KT
1026 /*
1027 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1028 * means a stop task can slip in, in which case we need to
1029 * re-start task selection.
1030 */
1031 if (rq->stop && rq->stop->on_rq)
1032 return RETRY_TASK;
1033 }
1034
734ff2a7
KT
1035 /*
1036 * When prev is DL, we may throttle it in put_prev_task().
1037 * So, we update time before we check for dl_nr_running.
1038 */
1039 if (prev->sched_class == &dl_sched_class)
1040 update_curr_dl(rq);
38033c37 1041
aab03e05
DF
1042 if (unlikely(!dl_rq->dl_nr_running))
1043 return NULL;
1044
3f1d2a31 1045 put_prev_task(rq, prev);
606dba2e 1046
aab03e05
DF
1047 dl_se = pick_next_dl_entity(rq, dl_rq);
1048 BUG_ON(!dl_se);
1049
1050 p = dl_task_of(dl_se);
1051 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1052
1053 /* Running task will never be pushed. */
71362650 1054 dequeue_pushable_dl_task(rq, p);
1baca4ce 1055
aab03e05
DF
1056#ifdef CONFIG_SCHED_HRTICK
1057 if (hrtick_enabled(rq))
1058 start_hrtick_dl(rq, p);
1059#endif
1baca4ce 1060
dc877341 1061 set_post_schedule(rq);
1baca4ce 1062
aab03e05
DF
1063 return p;
1064}
1065
1066static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1067{
1068 update_curr_dl(rq);
1baca4ce
JL
1069
1070 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1071 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1072}
1073
1074static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1075{
1076 update_curr_dl(rq);
1077
1078#ifdef CONFIG_SCHED_HRTICK
1079 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0)
1080 start_hrtick_dl(rq, p);
1081#endif
1082}
1083
1084static void task_fork_dl(struct task_struct *p)
1085{
1086 /*
1087 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1088 * sched_fork()
1089 */
1090}
1091
1092static void task_dead_dl(struct task_struct *p)
1093{
1094 struct hrtimer *timer = &p->dl.dl_timer;
332ac17e
DF
1095 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1096
1097 /*
1098 * Since we are TASK_DEAD we won't slip out of the domain!
1099 */
1100 raw_spin_lock_irq(&dl_b->lock);
1101 dl_b->total_bw -= p->dl.dl_bw;
1102 raw_spin_unlock_irq(&dl_b->lock);
aab03e05 1103
2d3d891d 1104 hrtimer_cancel(timer);
aab03e05
DF
1105}
1106
1107static void set_curr_task_dl(struct rq *rq)
1108{
1109 struct task_struct *p = rq->curr;
1110
1111 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1112
1113 /* You can't push away the running task */
1114 dequeue_pushable_dl_task(rq, p);
1115}
1116
1117#ifdef CONFIG_SMP
1118
1119/* Only try algorithms three times */
1120#define DL_MAX_TRIES 3
1121
1122static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1123{
1124 if (!task_running(rq, p) &&
1125 (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) &&
1126 (p->nr_cpus_allowed > 1))
1127 return 1;
1128
1129 return 0;
1130}
1131
1132/* Returns the second earliest -deadline task, NULL otherwise */
1133static struct task_struct *pick_next_earliest_dl_task(struct rq *rq, int cpu)
1134{
1135 struct rb_node *next_node = rq->dl.rb_leftmost;
1136 struct sched_dl_entity *dl_se;
1137 struct task_struct *p = NULL;
1138
1139next_node:
1140 next_node = rb_next(next_node);
1141 if (next_node) {
1142 dl_se = rb_entry(next_node, struct sched_dl_entity, rb_node);
1143 p = dl_task_of(dl_se);
1144
1145 if (pick_dl_task(rq, p, cpu))
1146 return p;
1147
1148 goto next_node;
1149 }
1150
1151 return NULL;
1152}
1153
1baca4ce
JL
1154static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1155
1156static int find_later_rq(struct task_struct *task)
1157{
1158 struct sched_domain *sd;
1159 struct cpumask *later_mask = __get_cpu_var(local_cpu_mask_dl);
1160 int this_cpu = smp_processor_id();
1161 int best_cpu, cpu = task_cpu(task);
1162
1163 /* Make sure the mask is initialized first */
1164 if (unlikely(!later_mask))
1165 return -1;
1166
1167 if (task->nr_cpus_allowed == 1)
1168 return -1;
1169
6bfd6d72
JL
1170 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1171 task, later_mask);
1baca4ce
JL
1172 if (best_cpu == -1)
1173 return -1;
1174
1175 /*
1176 * If we are here, some target has been found,
1177 * the most suitable of which is cached in best_cpu.
1178 * This is, among the runqueues where the current tasks
1179 * have later deadlines than the task's one, the rq
1180 * with the latest possible one.
1181 *
1182 * Now we check how well this matches with task's
1183 * affinity and system topology.
1184 *
1185 * The last cpu where the task run is our first
1186 * guess, since it is most likely cache-hot there.
1187 */
1188 if (cpumask_test_cpu(cpu, later_mask))
1189 return cpu;
1190 /*
1191 * Check if this_cpu is to be skipped (i.e., it is
1192 * not in the mask) or not.
1193 */
1194 if (!cpumask_test_cpu(this_cpu, later_mask))
1195 this_cpu = -1;
1196
1197 rcu_read_lock();
1198 for_each_domain(cpu, sd) {
1199 if (sd->flags & SD_WAKE_AFFINE) {
1200
1201 /*
1202 * If possible, preempting this_cpu is
1203 * cheaper than migrating.
1204 */
1205 if (this_cpu != -1 &&
1206 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1207 rcu_read_unlock();
1208 return this_cpu;
1209 }
1210
1211 /*
1212 * Last chance: if best_cpu is valid and is
1213 * in the mask, that becomes our choice.
1214 */
1215 if (best_cpu < nr_cpu_ids &&
1216 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1217 rcu_read_unlock();
1218 return best_cpu;
1219 }
1220 }
1221 }
1222 rcu_read_unlock();
1223
1224 /*
1225 * At this point, all our guesses failed, we just return
1226 * 'something', and let the caller sort the things out.
1227 */
1228 if (this_cpu != -1)
1229 return this_cpu;
1230
1231 cpu = cpumask_any(later_mask);
1232 if (cpu < nr_cpu_ids)
1233 return cpu;
1234
1235 return -1;
1236}
1237
1238/* Locks the rq it finds */
1239static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1240{
1241 struct rq *later_rq = NULL;
1242 int tries;
1243 int cpu;
1244
1245 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1246 cpu = find_later_rq(task);
1247
1248 if ((cpu == -1) || (cpu == rq->cpu))
1249 break;
1250
1251 later_rq = cpu_rq(cpu);
1252
1253 /* Retry if something changed. */
1254 if (double_lock_balance(rq, later_rq)) {
1255 if (unlikely(task_rq(task) != rq ||
1256 !cpumask_test_cpu(later_rq->cpu,
1257 &task->cpus_allowed) ||
1258 task_running(rq, task) || !task->on_rq)) {
1259 double_unlock_balance(rq, later_rq);
1260 later_rq = NULL;
1261 break;
1262 }
1263 }
1264
1265 /*
1266 * If the rq we found has no -deadline task, or
1267 * its earliest one has a later deadline than our
1268 * task, the rq is a good one.
1269 */
1270 if (!later_rq->dl.dl_nr_running ||
1271 dl_time_before(task->dl.deadline,
1272 later_rq->dl.earliest_dl.curr))
1273 break;
1274
1275 /* Otherwise we try again. */
1276 double_unlock_balance(rq, later_rq);
1277 later_rq = NULL;
1278 }
1279
1280 return later_rq;
1281}
1282
1283static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1284{
1285 struct task_struct *p;
1286
1287 if (!has_pushable_dl_tasks(rq))
1288 return NULL;
1289
1290 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1291 struct task_struct, pushable_dl_tasks);
1292
1293 BUG_ON(rq->cpu != task_cpu(p));
1294 BUG_ON(task_current(rq, p));
1295 BUG_ON(p->nr_cpus_allowed <= 1);
1296
332ac17e 1297 BUG_ON(!p->on_rq);
1baca4ce
JL
1298 BUG_ON(!dl_task(p));
1299
1300 return p;
1301}
1302
1303/*
1304 * See if the non running -deadline tasks on this rq
1305 * can be sent to some other CPU where they can preempt
1306 * and start executing.
1307 */
1308static int push_dl_task(struct rq *rq)
1309{
1310 struct task_struct *next_task;
1311 struct rq *later_rq;
1312
1313 if (!rq->dl.overloaded)
1314 return 0;
1315
1316 next_task = pick_next_pushable_dl_task(rq);
1317 if (!next_task)
1318 return 0;
1319
1320retry:
1321 if (unlikely(next_task == rq->curr)) {
1322 WARN_ON(1);
1323 return 0;
1324 }
1325
1326 /*
1327 * If next_task preempts rq->curr, and rq->curr
1328 * can move away, it makes sense to just reschedule
1329 * without going further in pushing next_task.
1330 */
1331 if (dl_task(rq->curr) &&
1332 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1333 rq->curr->nr_cpus_allowed > 1) {
1334 resched_task(rq->curr);
1335 return 0;
1336 }
1337
1338 /* We might release rq lock */
1339 get_task_struct(next_task);
1340
1341 /* Will lock the rq it'll find */
1342 later_rq = find_lock_later_rq(next_task, rq);
1343 if (!later_rq) {
1344 struct task_struct *task;
1345
1346 /*
1347 * We must check all this again, since
1348 * find_lock_later_rq releases rq->lock and it is
1349 * then possible that next_task has migrated.
1350 */
1351 task = pick_next_pushable_dl_task(rq);
1352 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1353 /*
1354 * The task is still there. We don't try
1355 * again, some other cpu will pull it when ready.
1356 */
1357 dequeue_pushable_dl_task(rq, next_task);
1358 goto out;
1359 }
1360
1361 if (!task)
1362 /* No more tasks */
1363 goto out;
1364
1365 put_task_struct(next_task);
1366 next_task = task;
1367 goto retry;
1368 }
1369
1370 deactivate_task(rq, next_task, 0);
1371 set_task_cpu(next_task, later_rq->cpu);
1372 activate_task(later_rq, next_task, 0);
1373
1374 resched_task(later_rq->curr);
1375
1376 double_unlock_balance(rq, later_rq);
1377
1378out:
1379 put_task_struct(next_task);
1380
1381 return 1;
1382}
1383
1384static void push_dl_tasks(struct rq *rq)
1385{
1386 /* Terminates as it moves a -deadline task */
1387 while (push_dl_task(rq))
1388 ;
aab03e05
DF
1389}
1390
1baca4ce
JL
1391static int pull_dl_task(struct rq *this_rq)
1392{
1393 int this_cpu = this_rq->cpu, ret = 0, cpu;
1394 struct task_struct *p;
1395 struct rq *src_rq;
1396 u64 dmin = LONG_MAX;
1397
1398 if (likely(!dl_overloaded(this_rq)))
1399 return 0;
1400
1401 /*
1402 * Match the barrier from dl_set_overloaded; this guarantees that if we
1403 * see overloaded we must also see the dlo_mask bit.
1404 */
1405 smp_rmb();
1406
1407 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1408 if (this_cpu == cpu)
1409 continue;
1410
1411 src_rq = cpu_rq(cpu);
1412
1413 /*
1414 * It looks racy, abd it is! However, as in sched_rt.c,
1415 * we are fine with this.
1416 */
1417 if (this_rq->dl.dl_nr_running &&
1418 dl_time_before(this_rq->dl.earliest_dl.curr,
1419 src_rq->dl.earliest_dl.next))
1420 continue;
1421
1422 /* Might drop this_rq->lock */
1423 double_lock_balance(this_rq, src_rq);
1424
1425 /*
1426 * If there are no more pullable tasks on the
1427 * rq, we're done with it.
1428 */
1429 if (src_rq->dl.dl_nr_running <= 1)
1430 goto skip;
1431
1432 p = pick_next_earliest_dl_task(src_rq, this_cpu);
1433
1434 /*
1435 * We found a task to be pulled if:
1436 * - it preempts our current (if there's one),
1437 * - it will preempt the last one we pulled (if any).
1438 */
1439 if (p && dl_time_before(p->dl.deadline, dmin) &&
1440 (!this_rq->dl.dl_nr_running ||
1441 dl_time_before(p->dl.deadline,
1442 this_rq->dl.earliest_dl.curr))) {
1443 WARN_ON(p == src_rq->curr);
332ac17e 1444 WARN_ON(!p->on_rq);
1baca4ce
JL
1445
1446 /*
1447 * Then we pull iff p has actually an earlier
1448 * deadline than the current task of its runqueue.
1449 */
1450 if (dl_time_before(p->dl.deadline,
1451 src_rq->curr->dl.deadline))
1452 goto skip;
1453
1454 ret = 1;
1455
1456 deactivate_task(src_rq, p, 0);
1457 set_task_cpu(p, this_cpu);
1458 activate_task(this_rq, p, 0);
1459 dmin = p->dl.deadline;
1460
1461 /* Is there any other task even earlier? */
1462 }
1463skip:
1464 double_unlock_balance(this_rq, src_rq);
1465 }
1466
1467 return ret;
1468}
1469
1baca4ce
JL
1470static void post_schedule_dl(struct rq *rq)
1471{
1472 push_dl_tasks(rq);
1473}
1474
1475/*
1476 * Since the task is not running and a reschedule is not going to happen
1477 * anytime soon on its runqueue, we try pushing it away now.
1478 */
1479static void task_woken_dl(struct rq *rq, struct task_struct *p)
1480{
1481 if (!task_running(rq, p) &&
1482 !test_tsk_need_resched(rq->curr) &&
1483 has_pushable_dl_tasks(rq) &&
1484 p->nr_cpus_allowed > 1 &&
1485 dl_task(rq->curr) &&
1486 (rq->curr->nr_cpus_allowed < 2 ||
1487 dl_entity_preempt(&rq->curr->dl, &p->dl))) {
1488 push_dl_tasks(rq);
1489 }
1490}
1491
1492static void set_cpus_allowed_dl(struct task_struct *p,
1493 const struct cpumask *new_mask)
1494{
1495 struct rq *rq;
1496 int weight;
1497
1498 BUG_ON(!dl_task(p));
1499
1500 /*
1501 * Update only if the task is actually running (i.e.,
1502 * it is on the rq AND it is not throttled).
1503 */
1504 if (!on_dl_rq(&p->dl))
1505 return;
1506
1507 weight = cpumask_weight(new_mask);
1508
1509 /*
1510 * Only update if the process changes its state from whether it
1511 * can migrate or not.
1512 */
1513 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1514 return;
1515
1516 rq = task_rq(p);
1517
1518 /*
1519 * The process used to be able to migrate OR it can now migrate
1520 */
1521 if (weight <= 1) {
1522 if (!task_current(rq, p))
1523 dequeue_pushable_dl_task(rq, p);
1524 BUG_ON(!rq->dl.dl_nr_migratory);
1525 rq->dl.dl_nr_migratory--;
1526 } else {
1527 if (!task_current(rq, p))
1528 enqueue_pushable_dl_task(rq, p);
1529 rq->dl.dl_nr_migratory++;
1530 }
1531
1532 update_dl_migration(&rq->dl);
1533}
1534
1535/* Assumes rq->lock is held */
1536static void rq_online_dl(struct rq *rq)
1537{
1538 if (rq->dl.overloaded)
1539 dl_set_overload(rq);
6bfd6d72
JL
1540
1541 if (rq->dl.dl_nr_running > 0)
1542 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1baca4ce
JL
1543}
1544
1545/* Assumes rq->lock is held */
1546static void rq_offline_dl(struct rq *rq)
1547{
1548 if (rq->dl.overloaded)
1549 dl_clear_overload(rq);
6bfd6d72
JL
1550
1551 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
1552}
1553
1554void init_sched_dl_class(void)
1555{
1556 unsigned int i;
1557
1558 for_each_possible_cpu(i)
1559 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1560 GFP_KERNEL, cpu_to_node(i));
1561}
1562
1563#endif /* CONFIG_SMP */
1564
aab03e05
DF
1565static void switched_from_dl(struct rq *rq, struct task_struct *p)
1566{
1baca4ce 1567 if (hrtimer_active(&p->dl.dl_timer) && !dl_policy(p->policy))
aab03e05 1568 hrtimer_try_to_cancel(&p->dl.dl_timer);
1baca4ce
JL
1569
1570#ifdef CONFIG_SMP
1571 /*
1572 * Since this might be the only -deadline task on the rq,
1573 * this is the right place to try to pull some other one
1574 * from an overloaded cpu, if any.
1575 */
1576 if (!rq->dl.dl_nr_running)
1577 pull_dl_task(rq);
1578#endif
aab03e05
DF
1579}
1580
1baca4ce
JL
1581/*
1582 * When switching to -deadline, we may overload the rq, then
1583 * we try to push someone off, if possible.
1584 */
aab03e05
DF
1585static void switched_to_dl(struct rq *rq, struct task_struct *p)
1586{
1baca4ce
JL
1587 int check_resched = 1;
1588
aab03e05
DF
1589 /*
1590 * If p is throttled, don't consider the possibility
1591 * of preempting rq->curr, the check will be done right
1592 * after its runtime will get replenished.
1593 */
1594 if (unlikely(p->dl.dl_throttled))
1595 return;
1596
390f3258 1597 if (p->on_rq && rq->curr != p) {
1baca4ce
JL
1598#ifdef CONFIG_SMP
1599 if (rq->dl.overloaded && push_dl_task(rq) && rq != task_rq(p))
1600 /* Only reschedule if pushing failed */
1601 check_resched = 0;
1602#endif /* CONFIG_SMP */
1603 if (check_resched && task_has_dl_policy(rq->curr))
aab03e05 1604 check_preempt_curr_dl(rq, p, 0);
aab03e05
DF
1605 }
1606}
1607
1baca4ce
JL
1608/*
1609 * If the scheduling parameters of a -deadline task changed,
1610 * a push or pull operation might be needed.
1611 */
aab03e05
DF
1612static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1613 int oldprio)
1614{
1baca4ce 1615 if (p->on_rq || rq->curr == p) {
aab03e05 1616#ifdef CONFIG_SMP
1baca4ce
JL
1617 /*
1618 * This might be too much, but unfortunately
1619 * we don't have the old deadline value, and
1620 * we can't argue if the task is increasing
1621 * or lowering its prio, so...
1622 */
1623 if (!rq->dl.overloaded)
1624 pull_dl_task(rq);
1625
1626 /*
1627 * If we now have a earlier deadline task than p,
1628 * then reschedule, provided p is still on this
1629 * runqueue.
1630 */
1631 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline) &&
1632 rq->curr == p)
1633 resched_task(p);
1634#else
1635 /*
1636 * Again, we don't know if p has a earlier
1637 * or later deadline, so let's blindly set a
1638 * (maybe not needed) rescheduling point.
1639 */
1640 resched_task(p);
1641#endif /* CONFIG_SMP */
1642 } else
1643 switched_to_dl(rq, p);
aab03e05 1644}
aab03e05
DF
1645
1646const struct sched_class dl_sched_class = {
1647 .next = &rt_sched_class,
1648 .enqueue_task = enqueue_task_dl,
1649 .dequeue_task = dequeue_task_dl,
1650 .yield_task = yield_task_dl,
1651
1652 .check_preempt_curr = check_preempt_curr_dl,
1653
1654 .pick_next_task = pick_next_task_dl,
1655 .put_prev_task = put_prev_task_dl,
1656
1657#ifdef CONFIG_SMP
1658 .select_task_rq = select_task_rq_dl,
1baca4ce
JL
1659 .set_cpus_allowed = set_cpus_allowed_dl,
1660 .rq_online = rq_online_dl,
1661 .rq_offline = rq_offline_dl,
1baca4ce
JL
1662 .post_schedule = post_schedule_dl,
1663 .task_woken = task_woken_dl,
aab03e05
DF
1664#endif
1665
1666 .set_curr_task = set_curr_task_dl,
1667 .task_tick = task_tick_dl,
1668 .task_fork = task_fork_dl,
1669 .task_dead = task_dead_dl,
1670
1671 .prio_changed = prio_changed_dl,
1672 .switched_from = switched_from_dl,
1673 .switched_to = switched_to_dl,
1674};