sched/debug: Fix preempt_disable_ip recording for preempt_disable()
[linux-2.6-block.git] / kernel / sched / deadline.c
CommitLineData
aab03e05
DF
1/*
2 * Deadline Scheduling Class (SCHED_DEADLINE)
3 *
4 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
5 *
6 * Tasks that periodically executes their instances for less than their
7 * runtime won't miss any of their deadlines.
8 * Tasks that are not periodic or sporadic or that tries to execute more
9 * than their reserved bandwidth will be slowed down (and may potentially
10 * miss some of their deadlines), and won't affect any other task.
11 *
12 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 13 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
14 * Michael Trimarchi <michael@amarulasolutions.com>,
15 * Fabio Checconi <fchecconi@gmail.com>
16 */
17#include "sched.h"
18
6bfd6d72
JL
19#include <linux/slab.h>
20
332ac17e
DF
21struct dl_bandwidth def_dl_bandwidth;
22
aab03e05
DF
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
46static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
47{
48 struct sched_dl_entity *dl_se = &p->dl;
49
50 return dl_rq->rb_leftmost == &dl_se->rb_node;
51}
52
332ac17e
DF
53void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
54{
55 raw_spin_lock_init(&dl_b->dl_runtime_lock);
56 dl_b->dl_period = period;
57 dl_b->dl_runtime = runtime;
58}
59
332ac17e
DF
60void init_dl_bw(struct dl_bw *dl_b)
61{
62 raw_spin_lock_init(&dl_b->lock);
63 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
1724813d 64 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
65 dl_b->bw = -1;
66 else
1724813d 67 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
68 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
69 dl_b->total_bw = 0;
70}
71
07c54f7a 72void init_dl_rq(struct dl_rq *dl_rq)
aab03e05
DF
73{
74 dl_rq->rb_root = RB_ROOT;
1baca4ce
JL
75
76#ifdef CONFIG_SMP
77 /* zero means no -deadline tasks */
78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
79
80 dl_rq->dl_nr_migratory = 0;
81 dl_rq->overloaded = 0;
82 dl_rq->pushable_dl_tasks_root = RB_ROOT;
332ac17e
DF
83#else
84 init_dl_bw(&dl_rq->dl_bw);
1baca4ce
JL
85#endif
86}
87
88#ifdef CONFIG_SMP
89
90static inline int dl_overloaded(struct rq *rq)
91{
92 return atomic_read(&rq->rd->dlo_count);
93}
94
95static inline void dl_set_overload(struct rq *rq)
96{
97 if (!rq->online)
98 return;
99
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
101 /*
102 * Must be visible before the overload count is
103 * set (as in sched_rt.c).
104 *
105 * Matched by the barrier in pull_dl_task().
106 */
107 smp_wmb();
108 atomic_inc(&rq->rd->dlo_count);
109}
110
111static inline void dl_clear_overload(struct rq *rq)
112{
113 if (!rq->online)
114 return;
115
116 atomic_dec(&rq->rd->dlo_count);
117 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
118}
119
120static void update_dl_migration(struct dl_rq *dl_rq)
121{
995b9ea4 122 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
1baca4ce
JL
123 if (!dl_rq->overloaded) {
124 dl_set_overload(rq_of_dl_rq(dl_rq));
125 dl_rq->overloaded = 1;
126 }
127 } else if (dl_rq->overloaded) {
128 dl_clear_overload(rq_of_dl_rq(dl_rq));
129 dl_rq->overloaded = 0;
130 }
131}
132
133static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
134{
135 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 136
1baca4ce
JL
137 if (p->nr_cpus_allowed > 1)
138 dl_rq->dl_nr_migratory++;
139
140 update_dl_migration(dl_rq);
141}
142
143static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 146
1baca4ce
JL
147 if (p->nr_cpus_allowed > 1)
148 dl_rq->dl_nr_migratory--;
149
150 update_dl_migration(dl_rq);
151}
152
153/*
154 * The list of pushable -deadline task is not a plist, like in
155 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
156 */
157static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
158{
159 struct dl_rq *dl_rq = &rq->dl;
160 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_node;
161 struct rb_node *parent = NULL;
162 struct task_struct *entry;
163 int leftmost = 1;
164
165 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
166
167 while (*link) {
168 parent = *link;
169 entry = rb_entry(parent, struct task_struct,
170 pushable_dl_tasks);
171 if (dl_entity_preempt(&p->dl, &entry->dl))
172 link = &parent->rb_left;
173 else {
174 link = &parent->rb_right;
175 leftmost = 0;
176 }
177 }
178
7d92de3a 179 if (leftmost) {
1baca4ce 180 dl_rq->pushable_dl_tasks_leftmost = &p->pushable_dl_tasks;
7d92de3a
WL
181 dl_rq->earliest_dl.next = p->dl.deadline;
182 }
1baca4ce
JL
183
184 rb_link_node(&p->pushable_dl_tasks, parent, link);
185 rb_insert_color(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
aab03e05
DF
186}
187
1baca4ce
JL
188static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
189{
190 struct dl_rq *dl_rq = &rq->dl;
191
192 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
193 return;
194
195 if (dl_rq->pushable_dl_tasks_leftmost == &p->pushable_dl_tasks) {
196 struct rb_node *next_node;
197
198 next_node = rb_next(&p->pushable_dl_tasks);
199 dl_rq->pushable_dl_tasks_leftmost = next_node;
7d92de3a
WL
200 if (next_node) {
201 dl_rq->earliest_dl.next = rb_entry(next_node,
202 struct task_struct, pushable_dl_tasks)->dl.deadline;
203 }
1baca4ce
JL
204 }
205
206 rb_erase(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
207 RB_CLEAR_NODE(&p->pushable_dl_tasks);
208}
209
210static inline int has_pushable_dl_tasks(struct rq *rq)
211{
212 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root);
213}
214
215static int push_dl_task(struct rq *rq);
216
dc877341
PZ
217static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
218{
219 return dl_task(prev);
220}
221
9916e214
PZ
222static DEFINE_PER_CPU(struct callback_head, dl_push_head);
223static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
e3fca9e7
PZ
224
225static void push_dl_tasks(struct rq *);
9916e214 226static void pull_dl_task(struct rq *);
e3fca9e7
PZ
227
228static inline void queue_push_tasks(struct rq *rq)
dc877341 229{
e3fca9e7
PZ
230 if (!has_pushable_dl_tasks(rq))
231 return;
232
9916e214
PZ
233 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
234}
235
236static inline void queue_pull_task(struct rq *rq)
237{
238 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
dc877341
PZ
239}
240
fa9c9d10
WL
241static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
242
a649f237 243static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
fa9c9d10
WL
244{
245 struct rq *later_rq = NULL;
246 bool fallback = false;
247
248 later_rq = find_lock_later_rq(p, rq);
249
250 if (!later_rq) {
251 int cpu;
252
253 /*
254 * If we cannot preempt any rq, fall back to pick any
255 * online cpu.
256 */
257 fallback = true;
258 cpu = cpumask_any_and(cpu_active_mask, tsk_cpus_allowed(p));
259 if (cpu >= nr_cpu_ids) {
260 /*
261 * Fail to find any suitable cpu.
262 * The task will never come back!
263 */
264 BUG_ON(dl_bandwidth_enabled());
265
266 /*
267 * If admission control is disabled we
268 * try a little harder to let the task
269 * run.
270 */
271 cpu = cpumask_any(cpu_active_mask);
272 }
273 later_rq = cpu_rq(cpu);
274 double_lock_balance(rq, later_rq);
275 }
276
a649f237
PZ
277 /*
278 * By now the task is replenished and enqueued; migrate it.
279 */
fa9c9d10
WL
280 deactivate_task(rq, p, 0);
281 set_task_cpu(p, later_rq->cpu);
a649f237 282 activate_task(later_rq, p, 0);
fa9c9d10
WL
283
284 if (!fallback)
285 resched_curr(later_rq);
286
a649f237
PZ
287 double_unlock_balance(later_rq, rq);
288
289 return later_rq;
fa9c9d10
WL
290}
291
1baca4ce
JL
292#else
293
294static inline
295void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
296{
297}
298
299static inline
300void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
301{
302}
303
304static inline
305void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
306{
307}
308
309static inline
310void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311{
312}
313
dc877341
PZ
314static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
315{
316 return false;
317}
318
0ea60c20 319static inline void pull_dl_task(struct rq *rq)
dc877341 320{
dc877341
PZ
321}
322
e3fca9e7 323static inline void queue_push_tasks(struct rq *rq)
dc877341 324{
dc877341
PZ
325}
326
9916e214 327static inline void queue_pull_task(struct rq *rq)
dc877341
PZ
328{
329}
1baca4ce
JL
330#endif /* CONFIG_SMP */
331
aab03e05
DF
332static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
333static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
334static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
335 int flags);
336
337/*
338 * We are being explicitly informed that a new instance is starting,
339 * and this means that:
340 * - the absolute deadline of the entity has to be placed at
341 * current time + relative deadline;
342 * - the runtime of the entity has to be set to the maximum value.
343 *
344 * The capability of specifying such event is useful whenever a -deadline
345 * entity wants to (try to!) synchronize its behaviour with the scheduler's
346 * one, and to (try to!) reconcile itself with its own scheduling
347 * parameters.
348 */
2d3d891d
DF
349static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se,
350 struct sched_dl_entity *pi_se)
aab03e05
DF
351{
352 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
353 struct rq *rq = rq_of_dl_rq(dl_rq);
354
355 WARN_ON(!dl_se->dl_new || dl_se->dl_throttled);
356
357 /*
358 * We use the regular wall clock time to set deadlines in the
359 * future; in fact, we must consider execution overheads (time
360 * spent on hardirq context, etc.).
361 */
2d3d891d
DF
362 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
363 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
364 dl_se->dl_new = 0;
365}
366
367/*
368 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
369 * possibility of a entity lasting more than what it declared, and thus
370 * exhausting its runtime.
371 *
372 * Here we are interested in making runtime overrun possible, but we do
373 * not want a entity which is misbehaving to affect the scheduling of all
374 * other entities.
375 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
376 * is used, in order to confine each entity within its own bandwidth.
377 *
378 * This function deals exactly with that, and ensures that when the runtime
379 * of a entity is replenished, its deadline is also postponed. That ensures
380 * the overrunning entity can't interfere with other entity in the system and
381 * can't make them miss their deadlines. Reasons why this kind of overruns
382 * could happen are, typically, a entity voluntarily trying to overcome its
1b09d29b 383 * runtime, or it just underestimated it during sched_setattr().
aab03e05 384 */
2d3d891d
DF
385static void replenish_dl_entity(struct sched_dl_entity *dl_se,
386 struct sched_dl_entity *pi_se)
aab03e05
DF
387{
388 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
389 struct rq *rq = rq_of_dl_rq(dl_rq);
390
2d3d891d
DF
391 BUG_ON(pi_se->dl_runtime <= 0);
392
393 /*
394 * This could be the case for a !-dl task that is boosted.
395 * Just go with full inherited parameters.
396 */
397 if (dl_se->dl_deadline == 0) {
398 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
399 dl_se->runtime = pi_se->dl_runtime;
400 }
401
48be3a67
PZ
402 if (dl_se->dl_yielded && dl_se->runtime > 0)
403 dl_se->runtime = 0;
404
aab03e05
DF
405 /*
406 * We keep moving the deadline away until we get some
407 * available runtime for the entity. This ensures correct
408 * handling of situations where the runtime overrun is
409 * arbitrary large.
410 */
411 while (dl_se->runtime <= 0) {
2d3d891d
DF
412 dl_se->deadline += pi_se->dl_period;
413 dl_se->runtime += pi_se->dl_runtime;
aab03e05
DF
414 }
415
416 /*
417 * At this point, the deadline really should be "in
418 * the future" with respect to rq->clock. If it's
419 * not, we are, for some reason, lagging too much!
420 * Anyway, after having warn userspace abut that,
421 * we still try to keep the things running by
422 * resetting the deadline and the budget of the
423 * entity.
424 */
425 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
c219b7dd 426 printk_deferred_once("sched: DL replenish lagged too much\n");
2d3d891d
DF
427 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
428 dl_se->runtime = pi_se->dl_runtime;
aab03e05 429 }
1019a359
PZ
430
431 if (dl_se->dl_yielded)
432 dl_se->dl_yielded = 0;
433 if (dl_se->dl_throttled)
434 dl_se->dl_throttled = 0;
aab03e05
DF
435}
436
437/*
438 * Here we check if --at time t-- an entity (which is probably being
439 * [re]activated or, in general, enqueued) can use its remaining runtime
440 * and its current deadline _without_ exceeding the bandwidth it is
441 * assigned (function returns true if it can't). We are in fact applying
442 * one of the CBS rules: when a task wakes up, if the residual runtime
443 * over residual deadline fits within the allocated bandwidth, then we
444 * can keep the current (absolute) deadline and residual budget without
445 * disrupting the schedulability of the system. Otherwise, we should
446 * refill the runtime and set the deadline a period in the future,
447 * because keeping the current (absolute) deadline of the task would
712e5e34
DF
448 * result in breaking guarantees promised to other tasks (refer to
449 * Documentation/scheduler/sched-deadline.txt for more informations).
aab03e05
DF
450 *
451 * This function returns true if:
452 *
755378a4 453 * runtime / (deadline - t) > dl_runtime / dl_period ,
aab03e05
DF
454 *
455 * IOW we can't recycle current parameters.
755378a4
HG
456 *
457 * Notice that the bandwidth check is done against the period. For
458 * task with deadline equal to period this is the same of using
459 * dl_deadline instead of dl_period in the equation above.
aab03e05 460 */
2d3d891d
DF
461static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
462 struct sched_dl_entity *pi_se, u64 t)
aab03e05
DF
463{
464 u64 left, right;
465
466 /*
467 * left and right are the two sides of the equation above,
468 * after a bit of shuffling to use multiplications instead
469 * of divisions.
470 *
471 * Note that none of the time values involved in the two
472 * multiplications are absolute: dl_deadline and dl_runtime
473 * are the relative deadline and the maximum runtime of each
474 * instance, runtime is the runtime left for the last instance
475 * and (deadline - t), since t is rq->clock, is the time left
476 * to the (absolute) deadline. Even if overflowing the u64 type
477 * is very unlikely to occur in both cases, here we scale down
478 * as we want to avoid that risk at all. Scaling down by 10
479 * means that we reduce granularity to 1us. We are fine with it,
480 * since this is only a true/false check and, anyway, thinking
481 * of anything below microseconds resolution is actually fiction
482 * (but still we want to give the user that illusion >;).
483 */
332ac17e
DF
484 left = (pi_se->dl_period >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
485 right = ((dl_se->deadline - t) >> DL_SCALE) *
486 (pi_se->dl_runtime >> DL_SCALE);
aab03e05
DF
487
488 return dl_time_before(right, left);
489}
490
491/*
492 * When a -deadline entity is queued back on the runqueue, its runtime and
493 * deadline might need updating.
494 *
495 * The policy here is that we update the deadline of the entity only if:
496 * - the current deadline is in the past,
497 * - using the remaining runtime with the current deadline would make
498 * the entity exceed its bandwidth.
499 */
2d3d891d
DF
500static void update_dl_entity(struct sched_dl_entity *dl_se,
501 struct sched_dl_entity *pi_se)
aab03e05
DF
502{
503 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
504 struct rq *rq = rq_of_dl_rq(dl_rq);
505
506 /*
507 * The arrival of a new instance needs special treatment, i.e.,
508 * the actual scheduling parameters have to be "renewed".
509 */
510 if (dl_se->dl_new) {
2d3d891d 511 setup_new_dl_entity(dl_se, pi_se);
aab03e05
DF
512 return;
513 }
514
515 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2d3d891d
DF
516 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
517 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
518 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
519 }
520}
521
522/*
523 * If the entity depleted all its runtime, and if we want it to sleep
524 * while waiting for some new execution time to become available, we
525 * set the bandwidth enforcement timer to the replenishment instant
526 * and try to activate it.
527 *
528 * Notice that it is important for the caller to know if the timer
529 * actually started or not (i.e., the replenishment instant is in
530 * the future or in the past).
531 */
a649f237 532static int start_dl_timer(struct task_struct *p)
aab03e05 533{
a649f237
PZ
534 struct sched_dl_entity *dl_se = &p->dl;
535 struct hrtimer *timer = &dl_se->dl_timer;
536 struct rq *rq = task_rq(p);
aab03e05 537 ktime_t now, act;
aab03e05
DF
538 s64 delta;
539
a649f237
PZ
540 lockdep_assert_held(&rq->lock);
541
aab03e05
DF
542 /*
543 * We want the timer to fire at the deadline, but considering
544 * that it is actually coming from rq->clock and not from
545 * hrtimer's time base reading.
546 */
547 act = ns_to_ktime(dl_se->deadline);
a649f237 548 now = hrtimer_cb_get_time(timer);
aab03e05
DF
549 delta = ktime_to_ns(now) - rq_clock(rq);
550 act = ktime_add_ns(act, delta);
551
552 /*
553 * If the expiry time already passed, e.g., because the value
554 * chosen as the deadline is too small, don't even try to
555 * start the timer in the past!
556 */
557 if (ktime_us_delta(act, now) < 0)
558 return 0;
559
a649f237
PZ
560 /*
561 * !enqueued will guarantee another callback; even if one is already in
562 * progress. This ensures a balanced {get,put}_task_struct().
563 *
564 * The race against __run_timer() clearing the enqueued state is
565 * harmless because we're holding task_rq()->lock, therefore the timer
566 * expiring after we've done the check will wait on its task_rq_lock()
567 * and observe our state.
568 */
569 if (!hrtimer_is_queued(timer)) {
570 get_task_struct(p);
571 hrtimer_start(timer, act, HRTIMER_MODE_ABS);
572 }
aab03e05 573
cc9684d3 574 return 1;
aab03e05
DF
575}
576
577/*
578 * This is the bandwidth enforcement timer callback. If here, we know
579 * a task is not on its dl_rq, since the fact that the timer was running
580 * means the task is throttled and needs a runtime replenishment.
581 *
582 * However, what we actually do depends on the fact the task is active,
583 * (it is on its rq) or has been removed from there by a call to
584 * dequeue_task_dl(). In the former case we must issue the runtime
585 * replenishment and add the task back to the dl_rq; in the latter, we just
586 * do nothing but clearing dl_throttled, so that runtime and deadline
587 * updating (and the queueing back to dl_rq) will be done by the
588 * next call to enqueue_task_dl().
589 */
590static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
591{
592 struct sched_dl_entity *dl_se = container_of(timer,
593 struct sched_dl_entity,
594 dl_timer);
595 struct task_struct *p = dl_task_of(dl_se);
3960c8c0 596 unsigned long flags;
0f397f2c 597 struct rq *rq;
3960c8c0 598
4cd57f97 599 rq = task_rq_lock(p, &flags);
0f397f2c 600
aab03e05 601 /*
a649f237
PZ
602 * The task might have changed its scheduling policy to something
603 * different than SCHED_DEADLINE (through switched_fromd_dl()).
604 */
605 if (!dl_task(p)) {
606 __dl_clear_params(p);
607 goto unlock;
608 }
609
610 /*
611 * This is possible if switched_from_dl() raced against a running
612 * callback that took the above !dl_task() path and we've since then
613 * switched back into SCHED_DEADLINE.
aee38ea9 614 *
a649f237 615 * There's nothing to do except drop our task reference.
aab03e05 616 */
a649f237 617 if (dl_se->dl_new)
aab03e05
DF
618 goto unlock;
619
a649f237
PZ
620 /*
621 * The task might have been boosted by someone else and might be in the
622 * boosting/deboosting path, its not throttled.
623 */
624 if (dl_se->dl_boosted)
625 goto unlock;
a79ec89f 626
fa9c9d10 627 /*
a649f237
PZ
628 * Spurious timer due to start_dl_timer() race; or we already received
629 * a replenishment from rt_mutex_setprio().
fa9c9d10 630 */
a649f237 631 if (!dl_se->dl_throttled)
fa9c9d10 632 goto unlock;
a649f237
PZ
633
634 sched_clock_tick();
635 update_rq_clock(rq);
fa9c9d10 636
a79ec89f
KT
637 /*
638 * If the throttle happened during sched-out; like:
639 *
640 * schedule()
641 * deactivate_task()
642 * dequeue_task_dl()
643 * update_curr_dl()
644 * start_dl_timer()
645 * __dequeue_task_dl()
646 * prev->on_rq = 0;
647 *
648 * We can be both throttled and !queued. Replenish the counter
649 * but do not enqueue -- wait for our wakeup to do that.
650 */
651 if (!task_on_rq_queued(p)) {
652 replenish_dl_entity(dl_se, dl_se);
653 goto unlock;
654 }
655
1019a359
PZ
656 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
657 if (dl_task(rq->curr))
658 check_preempt_curr_dl(rq, p, 0);
659 else
660 resched_curr(rq);
a649f237 661
1baca4ce 662#ifdef CONFIG_SMP
1019a359 663 /*
a649f237
PZ
664 * Perform balancing operations here; after the replenishments. We
665 * cannot drop rq->lock before this, otherwise the assertion in
666 * start_dl_timer() about not missing updates is not true.
667 *
668 * If we find that the rq the task was on is no longer available, we
669 * need to select a new rq.
670 *
671 * XXX figure out if select_task_rq_dl() deals with offline cpus.
672 */
673 if (unlikely(!rq->online))
674 rq = dl_task_offline_migration(rq, p);
675
676 /*
677 * Queueing this task back might have overloaded rq, check if we need
678 * to kick someone away.
1019a359 679 */
0aaafaab
PZ
680 if (has_pushable_dl_tasks(rq)) {
681 /*
682 * Nothing relies on rq->lock after this, so its safe to drop
683 * rq->lock.
684 */
685 lockdep_unpin_lock(&rq->lock);
1019a359 686 push_dl_task(rq);
0aaafaab
PZ
687 lockdep_pin_lock(&rq->lock);
688 }
1baca4ce 689#endif
a649f237 690
aab03e05 691unlock:
4cd57f97 692 task_rq_unlock(rq, p, &flags);
aab03e05 693
a649f237
PZ
694 /*
695 * This can free the task_struct, including this hrtimer, do not touch
696 * anything related to that after this.
697 */
698 put_task_struct(p);
699
aab03e05
DF
700 return HRTIMER_NORESTART;
701}
702
703void init_dl_task_timer(struct sched_dl_entity *dl_se)
704{
705 struct hrtimer *timer = &dl_se->dl_timer;
706
aab03e05
DF
707 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
708 timer->function = dl_task_timer;
709}
710
711static
6fab5410 712int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
aab03e05 713{
269ad801 714 return (dl_se->runtime <= 0);
aab03e05
DF
715}
716
faa59937
JL
717extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
718
aab03e05
DF
719/*
720 * Update the current task's runtime statistics (provided it is still
721 * a -deadline task and has not been removed from the dl_rq).
722 */
723static void update_curr_dl(struct rq *rq)
724{
725 struct task_struct *curr = rq->curr;
726 struct sched_dl_entity *dl_se = &curr->dl;
727 u64 delta_exec;
728
729 if (!dl_task(curr) || !on_dl_rq(dl_se))
730 return;
731
732 /*
733 * Consumed budget is computed considering the time as
734 * observed by schedulable tasks (excluding time spent
735 * in hardirq context, etc.). Deadlines are instead
736 * computed using hard walltime. This seems to be the more
737 * natural solution, but the full ramifications of this
738 * approach need further study.
739 */
740 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
48be3a67
PZ
741 if (unlikely((s64)delta_exec <= 0)) {
742 if (unlikely(dl_se->dl_yielded))
743 goto throttle;
734ff2a7 744 return;
48be3a67 745 }
aab03e05
DF
746
747 schedstat_set(curr->se.statistics.exec_max,
748 max(curr->se.statistics.exec_max, delta_exec));
749
750 curr->se.sum_exec_runtime += delta_exec;
751 account_group_exec_runtime(curr, delta_exec);
752
753 curr->se.exec_start = rq_clock_task(rq);
754 cpuacct_charge(curr, delta_exec);
755
239be4a9
DF
756 sched_rt_avg_update(rq, delta_exec);
757
48be3a67
PZ
758 dl_se->runtime -= delta_exec;
759
760throttle:
761 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1019a359 762 dl_se->dl_throttled = 1;
aab03e05 763 __dequeue_task_dl(rq, curr, 0);
a649f237 764 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
aab03e05
DF
765 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
766
767 if (!is_leftmost(curr, &rq->dl))
8875125e 768 resched_curr(rq);
aab03e05 769 }
1724813d
PZ
770
771 /*
772 * Because -- for now -- we share the rt bandwidth, we need to
773 * account our runtime there too, otherwise actual rt tasks
774 * would be able to exceed the shared quota.
775 *
776 * Account to the root rt group for now.
777 *
778 * The solution we're working towards is having the RT groups scheduled
779 * using deadline servers -- however there's a few nasties to figure
780 * out before that can happen.
781 */
782 if (rt_bandwidth_enabled()) {
783 struct rt_rq *rt_rq = &rq->rt;
784
785 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
786 /*
787 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
788 * have our own CBS to keep us inline; only account when RT
789 * bandwidth is relevant.
1724813d 790 */
faa59937
JL
791 if (sched_rt_bandwidth_account(rt_rq))
792 rt_rq->rt_time += delta_exec;
1724813d
PZ
793 raw_spin_unlock(&rt_rq->rt_runtime_lock);
794 }
aab03e05
DF
795}
796
1baca4ce
JL
797#ifdef CONFIG_SMP
798
1baca4ce
JL
799static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
800{
801 struct rq *rq = rq_of_dl_rq(dl_rq);
802
803 if (dl_rq->earliest_dl.curr == 0 ||
804 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1baca4ce 805 dl_rq->earliest_dl.curr = deadline;
6bfd6d72 806 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline, 1);
1baca4ce
JL
807 }
808}
809
810static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
811{
812 struct rq *rq = rq_of_dl_rq(dl_rq);
813
814 /*
815 * Since we may have removed our earliest (and/or next earliest)
816 * task we must recompute them.
817 */
818 if (!dl_rq->dl_nr_running) {
819 dl_rq->earliest_dl.curr = 0;
820 dl_rq->earliest_dl.next = 0;
6bfd6d72 821 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
1baca4ce
JL
822 } else {
823 struct rb_node *leftmost = dl_rq->rb_leftmost;
824 struct sched_dl_entity *entry;
825
826 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
827 dl_rq->earliest_dl.curr = entry->deadline;
6bfd6d72 828 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline, 1);
1baca4ce
JL
829 }
830}
831
832#else
833
834static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
835static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
836
837#endif /* CONFIG_SMP */
838
839static inline
840void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
841{
842 int prio = dl_task_of(dl_se)->prio;
843 u64 deadline = dl_se->deadline;
844
845 WARN_ON(!dl_prio(prio));
846 dl_rq->dl_nr_running++;
72465447 847 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
848
849 inc_dl_deadline(dl_rq, deadline);
850 inc_dl_migration(dl_se, dl_rq);
851}
852
853static inline
854void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
855{
856 int prio = dl_task_of(dl_se)->prio;
857
858 WARN_ON(!dl_prio(prio));
859 WARN_ON(!dl_rq->dl_nr_running);
860 dl_rq->dl_nr_running--;
72465447 861 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
862
863 dec_dl_deadline(dl_rq, dl_se->deadline);
864 dec_dl_migration(dl_se, dl_rq);
865}
866
aab03e05
DF
867static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
868{
869 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
870 struct rb_node **link = &dl_rq->rb_root.rb_node;
871 struct rb_node *parent = NULL;
872 struct sched_dl_entity *entry;
873 int leftmost = 1;
874
875 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
876
877 while (*link) {
878 parent = *link;
879 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
880 if (dl_time_before(dl_se->deadline, entry->deadline))
881 link = &parent->rb_left;
882 else {
883 link = &parent->rb_right;
884 leftmost = 0;
885 }
886 }
887
888 if (leftmost)
889 dl_rq->rb_leftmost = &dl_se->rb_node;
890
891 rb_link_node(&dl_se->rb_node, parent, link);
892 rb_insert_color(&dl_se->rb_node, &dl_rq->rb_root);
893
1baca4ce 894 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
895}
896
897static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
898{
899 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
900
901 if (RB_EMPTY_NODE(&dl_se->rb_node))
902 return;
903
904 if (dl_rq->rb_leftmost == &dl_se->rb_node) {
905 struct rb_node *next_node;
906
907 next_node = rb_next(&dl_se->rb_node);
908 dl_rq->rb_leftmost = next_node;
909 }
910
911 rb_erase(&dl_se->rb_node, &dl_rq->rb_root);
912 RB_CLEAR_NODE(&dl_se->rb_node);
913
1baca4ce 914 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
915}
916
917static void
2d3d891d
DF
918enqueue_dl_entity(struct sched_dl_entity *dl_se,
919 struct sched_dl_entity *pi_se, int flags)
aab03e05
DF
920{
921 BUG_ON(on_dl_rq(dl_se));
922
923 /*
924 * If this is a wakeup or a new instance, the scheduling
925 * parameters of the task might need updating. Otherwise,
926 * we want a replenishment of its runtime.
927 */
6a503c3b 928 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
2d3d891d 929 update_dl_entity(dl_se, pi_se);
6a503c3b
LA
930 else if (flags & ENQUEUE_REPLENISH)
931 replenish_dl_entity(dl_se, pi_se);
aab03e05
DF
932
933 __enqueue_dl_entity(dl_se);
934}
935
936static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
937{
938 __dequeue_dl_entity(dl_se);
939}
940
941static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
942{
2d3d891d
DF
943 struct task_struct *pi_task = rt_mutex_get_top_task(p);
944 struct sched_dl_entity *pi_se = &p->dl;
945
946 /*
947 * Use the scheduling parameters of the top pi-waiter
ff277d42 948 * task if we have one and its (absolute) deadline is
2d3d891d
DF
949 * smaller than our one... OTW we keep our runtime and
950 * deadline.
951 */
64be6f1f 952 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
2d3d891d 953 pi_se = &pi_task->dl;
64be6f1f
JL
954 } else if (!dl_prio(p->normal_prio)) {
955 /*
956 * Special case in which we have a !SCHED_DEADLINE task
957 * that is going to be deboosted, but exceedes its
958 * runtime while doing so. No point in replenishing
959 * it, as it's going to return back to its original
960 * scheduling class after this.
961 */
962 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
963 return;
964 }
2d3d891d 965
aab03e05
DF
966 /*
967 * If p is throttled, we do nothing. In fact, if it exhausted
968 * its budget it needs a replenishment and, since it now is on
969 * its rq, the bandwidth timer callback (which clearly has not
970 * run yet) will take care of this.
971 */
1019a359 972 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH))
aab03e05
DF
973 return;
974
2d3d891d 975 enqueue_dl_entity(&p->dl, pi_se, flags);
1baca4ce
JL
976
977 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
978 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
979}
980
981static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
982{
983 dequeue_dl_entity(&p->dl);
1baca4ce 984 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
985}
986
987static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
988{
989 update_curr_dl(rq);
990 __dequeue_task_dl(rq, p, flags);
aab03e05
DF
991}
992
993/*
994 * Yield task semantic for -deadline tasks is:
995 *
996 * get off from the CPU until our next instance, with
997 * a new runtime. This is of little use now, since we
998 * don't have a bandwidth reclaiming mechanism. Anyway,
999 * bandwidth reclaiming is planned for the future, and
1000 * yield_task_dl will indicate that some spare budget
1001 * is available for other task instances to use it.
1002 */
1003static void yield_task_dl(struct rq *rq)
1004{
aab03e05
DF
1005 /*
1006 * We make the task go to sleep until its current deadline by
1007 * forcing its runtime to zero. This way, update_curr_dl() stops
1008 * it and the bandwidth timer will wake it up and will give it
5bfd126e 1009 * new scheduling parameters (thanks to dl_yielded=1).
aab03e05 1010 */
48be3a67
PZ
1011 rq->curr->dl.dl_yielded = 1;
1012
6f1607f1 1013 update_rq_clock(rq);
aab03e05 1014 update_curr_dl(rq);
44fb085b
WL
1015 /*
1016 * Tell update_rq_clock() that we've just updated,
1017 * so we don't do microscopic update in schedule()
1018 * and double the fastpath cost.
1019 */
1020 rq_clock_skip_update(rq, true);
aab03e05
DF
1021}
1022
1baca4ce
JL
1023#ifdef CONFIG_SMP
1024
1025static int find_later_rq(struct task_struct *task);
1baca4ce
JL
1026
1027static int
1028select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1029{
1030 struct task_struct *curr;
1031 struct rq *rq;
1032
1d7e974c 1033 if (sd_flag != SD_BALANCE_WAKE)
1baca4ce
JL
1034 goto out;
1035
1036 rq = cpu_rq(cpu);
1037
1038 rcu_read_lock();
316c1608 1039 curr = READ_ONCE(rq->curr); /* unlocked access */
1baca4ce
JL
1040
1041 /*
1042 * If we are dealing with a -deadline task, we must
1043 * decide where to wake it up.
1044 * If it has a later deadline and the current task
1045 * on this rq can't move (provided the waking task
1046 * can!) we prefer to send it somewhere else. On the
1047 * other hand, if it has a shorter deadline, we
1048 * try to make it stay here, it might be important.
1049 */
1050 if (unlikely(dl_task(curr)) &&
1051 (curr->nr_cpus_allowed < 2 ||
1052 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1053 (p->nr_cpus_allowed > 1)) {
1054 int target = find_later_rq(p);
1055
9d514262 1056 if (target != -1 &&
5aa50507
LA
1057 (dl_time_before(p->dl.deadline,
1058 cpu_rq(target)->dl.earliest_dl.curr) ||
1059 (cpu_rq(target)->dl.dl_nr_running == 0)))
1baca4ce
JL
1060 cpu = target;
1061 }
1062 rcu_read_unlock();
1063
1064out:
1065 return cpu;
1066}
1067
1068static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1069{
1070 /*
1071 * Current can't be migrated, useless to reschedule,
1072 * let's hope p can move out.
1073 */
1074 if (rq->curr->nr_cpus_allowed == 1 ||
6bfd6d72 1075 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1)
1baca4ce
JL
1076 return;
1077
1078 /*
1079 * p is migratable, so let's not schedule it and
1080 * see if it is pushed or pulled somewhere else.
1081 */
1082 if (p->nr_cpus_allowed != 1 &&
6bfd6d72 1083 cpudl_find(&rq->rd->cpudl, p, NULL) != -1)
1baca4ce
JL
1084 return;
1085
8875125e 1086 resched_curr(rq);
1baca4ce
JL
1087}
1088
1089#endif /* CONFIG_SMP */
1090
aab03e05
DF
1091/*
1092 * Only called when both the current and waking task are -deadline
1093 * tasks.
1094 */
1095static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1096 int flags)
1097{
1baca4ce 1098 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
8875125e 1099 resched_curr(rq);
1baca4ce
JL
1100 return;
1101 }
1102
1103#ifdef CONFIG_SMP
1104 /*
1105 * In the unlikely case current and p have the same deadline
1106 * let us try to decide what's the best thing to do...
1107 */
332ac17e
DF
1108 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1109 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
1110 check_preempt_equal_dl(rq, p);
1111#endif /* CONFIG_SMP */
aab03e05
DF
1112}
1113
1114#ifdef CONFIG_SCHED_HRTICK
1115static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1116{
177ef2a6 1117 hrtick_start(rq, p->dl.runtime);
aab03e05 1118}
36ce9881
WL
1119#else /* !CONFIG_SCHED_HRTICK */
1120static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1121{
1122}
aab03e05
DF
1123#endif
1124
1125static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1126 struct dl_rq *dl_rq)
1127{
1128 struct rb_node *left = dl_rq->rb_leftmost;
1129
1130 if (!left)
1131 return NULL;
1132
1133 return rb_entry(left, struct sched_dl_entity, rb_node);
1134}
1135
606dba2e 1136struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev)
aab03e05
DF
1137{
1138 struct sched_dl_entity *dl_se;
1139 struct task_struct *p;
1140 struct dl_rq *dl_rq;
1141
1142 dl_rq = &rq->dl;
1143
a1d9a323 1144 if (need_pull_dl_task(rq, prev)) {
cbce1a68
PZ
1145 /*
1146 * This is OK, because current is on_cpu, which avoids it being
1147 * picked for load-balance and preemption/IRQs are still
1148 * disabled avoiding further scheduler activity on it and we're
1149 * being very careful to re-start the picking loop.
1150 */
1151 lockdep_unpin_lock(&rq->lock);
38033c37 1152 pull_dl_task(rq);
cbce1a68 1153 lockdep_pin_lock(&rq->lock);
a1d9a323
KT
1154 /*
1155 * pull_rt_task() can drop (and re-acquire) rq->lock; this
1156 * means a stop task can slip in, in which case we need to
1157 * re-start task selection.
1158 */
da0c1e65 1159 if (rq->stop && task_on_rq_queued(rq->stop))
a1d9a323
KT
1160 return RETRY_TASK;
1161 }
1162
734ff2a7
KT
1163 /*
1164 * When prev is DL, we may throttle it in put_prev_task().
1165 * So, we update time before we check for dl_nr_running.
1166 */
1167 if (prev->sched_class == &dl_sched_class)
1168 update_curr_dl(rq);
38033c37 1169
aab03e05
DF
1170 if (unlikely(!dl_rq->dl_nr_running))
1171 return NULL;
1172
3f1d2a31 1173 put_prev_task(rq, prev);
606dba2e 1174
aab03e05
DF
1175 dl_se = pick_next_dl_entity(rq, dl_rq);
1176 BUG_ON(!dl_se);
1177
1178 p = dl_task_of(dl_se);
1179 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1180
1181 /* Running task will never be pushed. */
71362650 1182 dequeue_pushable_dl_task(rq, p);
1baca4ce 1183
aab03e05
DF
1184 if (hrtick_enabled(rq))
1185 start_hrtick_dl(rq, p);
1baca4ce 1186
e3fca9e7 1187 queue_push_tasks(rq);
1baca4ce 1188
aab03e05
DF
1189 return p;
1190}
1191
1192static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1193{
1194 update_curr_dl(rq);
1baca4ce
JL
1195
1196 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1197 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1198}
1199
1200static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1201{
1202 update_curr_dl(rq);
1203
a7bebf48
WL
1204 /*
1205 * Even when we have runtime, update_curr_dl() might have resulted in us
1206 * not being the leftmost task anymore. In that case NEED_RESCHED will
1207 * be set and schedule() will start a new hrtick for the next task.
1208 */
1209 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1210 is_leftmost(p, &rq->dl))
aab03e05 1211 start_hrtick_dl(rq, p);
aab03e05
DF
1212}
1213
1214static void task_fork_dl(struct task_struct *p)
1215{
1216 /*
1217 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1218 * sched_fork()
1219 */
1220}
1221
1222static void task_dead_dl(struct task_struct *p)
1223{
332ac17e
DF
1224 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1225
1226 /*
1227 * Since we are TASK_DEAD we won't slip out of the domain!
1228 */
1229 raw_spin_lock_irq(&dl_b->lock);
40767b0d 1230 /* XXX we should retain the bw until 0-lag */
332ac17e
DF
1231 dl_b->total_bw -= p->dl.dl_bw;
1232 raw_spin_unlock_irq(&dl_b->lock);
aab03e05
DF
1233}
1234
1235static void set_curr_task_dl(struct rq *rq)
1236{
1237 struct task_struct *p = rq->curr;
1238
1239 p->se.exec_start = rq_clock_task(rq);
1baca4ce
JL
1240
1241 /* You can't push away the running task */
1242 dequeue_pushable_dl_task(rq, p);
1243}
1244
1245#ifdef CONFIG_SMP
1246
1247/* Only try algorithms three times */
1248#define DL_MAX_TRIES 3
1249
1250static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1251{
1252 if (!task_running(rq, p) &&
1ba93d42 1253 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
1baca4ce 1254 return 1;
1baca4ce
JL
1255 return 0;
1256}
1257
8b5e770e
WL
1258/*
1259 * Return the earliest pushable rq's task, which is suitable to be executed
1260 * on the CPU, NULL otherwise:
1261 */
1262static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1263{
1264 struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
1265 struct task_struct *p = NULL;
1266
1267 if (!has_pushable_dl_tasks(rq))
1268 return NULL;
1269
1270next_node:
1271 if (next_node) {
1272 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1273
1274 if (pick_dl_task(rq, p, cpu))
1275 return p;
1276
1277 next_node = rb_next(next_node);
1278 goto next_node;
1279 }
1280
1281 return NULL;
1282}
1283
1baca4ce
JL
1284static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1285
1286static int find_later_rq(struct task_struct *task)
1287{
1288 struct sched_domain *sd;
4ba29684 1289 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1baca4ce
JL
1290 int this_cpu = smp_processor_id();
1291 int best_cpu, cpu = task_cpu(task);
1292
1293 /* Make sure the mask is initialized first */
1294 if (unlikely(!later_mask))
1295 return -1;
1296
1297 if (task->nr_cpus_allowed == 1)
1298 return -1;
1299
91ec6778
JL
1300 /*
1301 * We have to consider system topology and task affinity
1302 * first, then we can look for a suitable cpu.
1303 */
6bfd6d72
JL
1304 best_cpu = cpudl_find(&task_rq(task)->rd->cpudl,
1305 task, later_mask);
1baca4ce
JL
1306 if (best_cpu == -1)
1307 return -1;
1308
1309 /*
1310 * If we are here, some target has been found,
1311 * the most suitable of which is cached in best_cpu.
1312 * This is, among the runqueues where the current tasks
1313 * have later deadlines than the task's one, the rq
1314 * with the latest possible one.
1315 *
1316 * Now we check how well this matches with task's
1317 * affinity and system topology.
1318 *
1319 * The last cpu where the task run is our first
1320 * guess, since it is most likely cache-hot there.
1321 */
1322 if (cpumask_test_cpu(cpu, later_mask))
1323 return cpu;
1324 /*
1325 * Check if this_cpu is to be skipped (i.e., it is
1326 * not in the mask) or not.
1327 */
1328 if (!cpumask_test_cpu(this_cpu, later_mask))
1329 this_cpu = -1;
1330
1331 rcu_read_lock();
1332 for_each_domain(cpu, sd) {
1333 if (sd->flags & SD_WAKE_AFFINE) {
1334
1335 /*
1336 * If possible, preempting this_cpu is
1337 * cheaper than migrating.
1338 */
1339 if (this_cpu != -1 &&
1340 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1341 rcu_read_unlock();
1342 return this_cpu;
1343 }
1344
1345 /*
1346 * Last chance: if best_cpu is valid and is
1347 * in the mask, that becomes our choice.
1348 */
1349 if (best_cpu < nr_cpu_ids &&
1350 cpumask_test_cpu(best_cpu, sched_domain_span(sd))) {
1351 rcu_read_unlock();
1352 return best_cpu;
1353 }
1354 }
1355 }
1356 rcu_read_unlock();
1357
1358 /*
1359 * At this point, all our guesses failed, we just return
1360 * 'something', and let the caller sort the things out.
1361 */
1362 if (this_cpu != -1)
1363 return this_cpu;
1364
1365 cpu = cpumask_any(later_mask);
1366 if (cpu < nr_cpu_ids)
1367 return cpu;
1368
1369 return -1;
1370}
1371
1372/* Locks the rq it finds */
1373static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1374{
1375 struct rq *later_rq = NULL;
1376 int tries;
1377 int cpu;
1378
1379 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1380 cpu = find_later_rq(task);
1381
1382 if ((cpu == -1) || (cpu == rq->cpu))
1383 break;
1384
1385 later_rq = cpu_rq(cpu);
1386
5aa50507
LA
1387 if (later_rq->dl.dl_nr_running &&
1388 !dl_time_before(task->dl.deadline,
9d514262
WL
1389 later_rq->dl.earliest_dl.curr)) {
1390 /*
1391 * Target rq has tasks of equal or earlier deadline,
1392 * retrying does not release any lock and is unlikely
1393 * to yield a different result.
1394 */
1395 later_rq = NULL;
1396 break;
1397 }
1398
1baca4ce
JL
1399 /* Retry if something changed. */
1400 if (double_lock_balance(rq, later_rq)) {
1401 if (unlikely(task_rq(task) != rq ||
1402 !cpumask_test_cpu(later_rq->cpu,
1403 &task->cpus_allowed) ||
da0c1e65
KT
1404 task_running(rq, task) ||
1405 !task_on_rq_queued(task))) {
1baca4ce
JL
1406 double_unlock_balance(rq, later_rq);
1407 later_rq = NULL;
1408 break;
1409 }
1410 }
1411
1412 /*
1413 * If the rq we found has no -deadline task, or
1414 * its earliest one has a later deadline than our
1415 * task, the rq is a good one.
1416 */
1417 if (!later_rq->dl.dl_nr_running ||
1418 dl_time_before(task->dl.deadline,
1419 later_rq->dl.earliest_dl.curr))
1420 break;
1421
1422 /* Otherwise we try again. */
1423 double_unlock_balance(rq, later_rq);
1424 later_rq = NULL;
1425 }
1426
1427 return later_rq;
1428}
1429
1430static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
1431{
1432 struct task_struct *p;
1433
1434 if (!has_pushable_dl_tasks(rq))
1435 return NULL;
1436
1437 p = rb_entry(rq->dl.pushable_dl_tasks_leftmost,
1438 struct task_struct, pushable_dl_tasks);
1439
1440 BUG_ON(rq->cpu != task_cpu(p));
1441 BUG_ON(task_current(rq, p));
1442 BUG_ON(p->nr_cpus_allowed <= 1);
1443
da0c1e65 1444 BUG_ON(!task_on_rq_queued(p));
1baca4ce
JL
1445 BUG_ON(!dl_task(p));
1446
1447 return p;
1448}
1449
1450/*
1451 * See if the non running -deadline tasks on this rq
1452 * can be sent to some other CPU where they can preempt
1453 * and start executing.
1454 */
1455static int push_dl_task(struct rq *rq)
1456{
1457 struct task_struct *next_task;
1458 struct rq *later_rq;
c51b8ab5 1459 int ret = 0;
1baca4ce
JL
1460
1461 if (!rq->dl.overloaded)
1462 return 0;
1463
1464 next_task = pick_next_pushable_dl_task(rq);
1465 if (!next_task)
1466 return 0;
1467
1468retry:
1469 if (unlikely(next_task == rq->curr)) {
1470 WARN_ON(1);
1471 return 0;
1472 }
1473
1474 /*
1475 * If next_task preempts rq->curr, and rq->curr
1476 * can move away, it makes sense to just reschedule
1477 * without going further in pushing next_task.
1478 */
1479 if (dl_task(rq->curr) &&
1480 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
1481 rq->curr->nr_cpus_allowed > 1) {
8875125e 1482 resched_curr(rq);
1baca4ce
JL
1483 return 0;
1484 }
1485
1486 /* We might release rq lock */
1487 get_task_struct(next_task);
1488
1489 /* Will lock the rq it'll find */
1490 later_rq = find_lock_later_rq(next_task, rq);
1491 if (!later_rq) {
1492 struct task_struct *task;
1493
1494 /*
1495 * We must check all this again, since
1496 * find_lock_later_rq releases rq->lock and it is
1497 * then possible that next_task has migrated.
1498 */
1499 task = pick_next_pushable_dl_task(rq);
1500 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1501 /*
1502 * The task is still there. We don't try
1503 * again, some other cpu will pull it when ready.
1504 */
1baca4ce
JL
1505 goto out;
1506 }
1507
1508 if (!task)
1509 /* No more tasks */
1510 goto out;
1511
1512 put_task_struct(next_task);
1513 next_task = task;
1514 goto retry;
1515 }
1516
1517 deactivate_task(rq, next_task, 0);
1518 set_task_cpu(next_task, later_rq->cpu);
1519 activate_task(later_rq, next_task, 0);
c51b8ab5 1520 ret = 1;
1baca4ce 1521
8875125e 1522 resched_curr(later_rq);
1baca4ce
JL
1523
1524 double_unlock_balance(rq, later_rq);
1525
1526out:
1527 put_task_struct(next_task);
1528
c51b8ab5 1529 return ret;
1baca4ce
JL
1530}
1531
1532static void push_dl_tasks(struct rq *rq)
1533{
4ffa08ed 1534 /* push_dl_task() will return true if it moved a -deadline task */
1baca4ce
JL
1535 while (push_dl_task(rq))
1536 ;
aab03e05
DF
1537}
1538
0ea60c20 1539static void pull_dl_task(struct rq *this_rq)
1baca4ce 1540{
0ea60c20 1541 int this_cpu = this_rq->cpu, cpu;
1baca4ce 1542 struct task_struct *p;
0ea60c20 1543 bool resched = false;
1baca4ce
JL
1544 struct rq *src_rq;
1545 u64 dmin = LONG_MAX;
1546
1547 if (likely(!dl_overloaded(this_rq)))
0ea60c20 1548 return;
1baca4ce
JL
1549
1550 /*
1551 * Match the barrier from dl_set_overloaded; this guarantees that if we
1552 * see overloaded we must also see the dlo_mask bit.
1553 */
1554 smp_rmb();
1555
1556 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
1557 if (this_cpu == cpu)
1558 continue;
1559
1560 src_rq = cpu_rq(cpu);
1561
1562 /*
1563 * It looks racy, abd it is! However, as in sched_rt.c,
1564 * we are fine with this.
1565 */
1566 if (this_rq->dl.dl_nr_running &&
1567 dl_time_before(this_rq->dl.earliest_dl.curr,
1568 src_rq->dl.earliest_dl.next))
1569 continue;
1570
1571 /* Might drop this_rq->lock */
1572 double_lock_balance(this_rq, src_rq);
1573
1574 /*
1575 * If there are no more pullable tasks on the
1576 * rq, we're done with it.
1577 */
1578 if (src_rq->dl.dl_nr_running <= 1)
1579 goto skip;
1580
8b5e770e 1581 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1baca4ce
JL
1582
1583 /*
1584 * We found a task to be pulled if:
1585 * - it preempts our current (if there's one),
1586 * - it will preempt the last one we pulled (if any).
1587 */
1588 if (p && dl_time_before(p->dl.deadline, dmin) &&
1589 (!this_rq->dl.dl_nr_running ||
1590 dl_time_before(p->dl.deadline,
1591 this_rq->dl.earliest_dl.curr))) {
1592 WARN_ON(p == src_rq->curr);
da0c1e65 1593 WARN_ON(!task_on_rq_queued(p));
1baca4ce
JL
1594
1595 /*
1596 * Then we pull iff p has actually an earlier
1597 * deadline than the current task of its runqueue.
1598 */
1599 if (dl_time_before(p->dl.deadline,
1600 src_rq->curr->dl.deadline))
1601 goto skip;
1602
0ea60c20 1603 resched = true;
1baca4ce
JL
1604
1605 deactivate_task(src_rq, p, 0);
1606 set_task_cpu(p, this_cpu);
1607 activate_task(this_rq, p, 0);
1608 dmin = p->dl.deadline;
1609
1610 /* Is there any other task even earlier? */
1611 }
1612skip:
1613 double_unlock_balance(this_rq, src_rq);
1614 }
1615
0ea60c20
PZ
1616 if (resched)
1617 resched_curr(this_rq);
1baca4ce
JL
1618}
1619
1620/*
1621 * Since the task is not running and a reschedule is not going to happen
1622 * anytime soon on its runqueue, we try pushing it away now.
1623 */
1624static void task_woken_dl(struct rq *rq, struct task_struct *p)
1625{
1626 if (!task_running(rq, p) &&
1627 !test_tsk_need_resched(rq->curr) &&
1baca4ce
JL
1628 p->nr_cpus_allowed > 1 &&
1629 dl_task(rq->curr) &&
1630 (rq->curr->nr_cpus_allowed < 2 ||
6b0a563f 1631 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1baca4ce
JL
1632 push_dl_tasks(rq);
1633 }
1634}
1635
1636static void set_cpus_allowed_dl(struct task_struct *p,
1637 const struct cpumask *new_mask)
1638{
7f51412a 1639 struct root_domain *src_rd;
6c37067e 1640 struct rq *rq;
1baca4ce
JL
1641
1642 BUG_ON(!dl_task(p));
1643
7f51412a
JL
1644 rq = task_rq(p);
1645 src_rd = rq->rd;
1646 /*
1647 * Migrating a SCHED_DEADLINE task between exclusive
1648 * cpusets (different root_domains) entails a bandwidth
1649 * update. We already made space for us in the destination
1650 * domain (see cpuset_can_attach()).
1651 */
1652 if (!cpumask_intersects(src_rd->span, new_mask)) {
1653 struct dl_bw *src_dl_b;
1654
1655 src_dl_b = dl_bw_of(cpu_of(rq));
1656 /*
1657 * We now free resources of the root_domain we are migrating
1658 * off. In the worst case, sched_setattr() may temporary fail
1659 * until we complete the update.
1660 */
1661 raw_spin_lock(&src_dl_b->lock);
1662 __dl_clear(src_dl_b, p->dl.dl_bw);
1663 raw_spin_unlock(&src_dl_b->lock);
1664 }
1665
6c37067e 1666 set_cpus_allowed_common(p, new_mask);
1baca4ce
JL
1667}
1668
1669/* Assumes rq->lock is held */
1670static void rq_online_dl(struct rq *rq)
1671{
1672 if (rq->dl.overloaded)
1673 dl_set_overload(rq);
6bfd6d72 1674
16b26943 1675 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
6bfd6d72
JL
1676 if (rq->dl.dl_nr_running > 0)
1677 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1);
1baca4ce
JL
1678}
1679
1680/* Assumes rq->lock is held */
1681static void rq_offline_dl(struct rq *rq)
1682{
1683 if (rq->dl.overloaded)
1684 dl_clear_overload(rq);
6bfd6d72
JL
1685
1686 cpudl_set(&rq->rd->cpudl, rq->cpu, 0, 0);
16b26943 1687 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1baca4ce
JL
1688}
1689
a6c0e746 1690void __init init_sched_dl_class(void)
1baca4ce
JL
1691{
1692 unsigned int i;
1693
1694 for_each_possible_cpu(i)
1695 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
1696 GFP_KERNEL, cpu_to_node(i));
1697}
1698
1699#endif /* CONFIG_SMP */
1700
aab03e05
DF
1701static void switched_from_dl(struct rq *rq, struct task_struct *p)
1702{
a649f237
PZ
1703 /*
1704 * Start the deadline timer; if we switch back to dl before this we'll
1705 * continue consuming our current CBS slice. If we stay outside of
1706 * SCHED_DEADLINE until the deadline passes, the timer will reset the
1707 * task.
1708 */
1709 if (!start_dl_timer(p))
1710 __dl_clear_params(p);
a5e7be3b 1711
1baca4ce
JL
1712 /*
1713 * Since this might be the only -deadline task on the rq,
1714 * this is the right place to try to pull some other one
1715 * from an overloaded cpu, if any.
1716 */
cd660911
WL
1717 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
1718 return;
1719
9916e214 1720 queue_pull_task(rq);
aab03e05
DF
1721}
1722
1baca4ce
JL
1723/*
1724 * When switching to -deadline, we may overload the rq, then
1725 * we try to push someone off, if possible.
1726 */
aab03e05
DF
1727static void switched_to_dl(struct rq *rq, struct task_struct *p)
1728{
da0c1e65 1729 if (task_on_rq_queued(p) && rq->curr != p) {
1baca4ce 1730#ifdef CONFIG_SMP
9916e214
PZ
1731 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
1732 queue_push_tasks(rq);
1733#else
1734 if (dl_task(rq->curr))
1735 check_preempt_curr_dl(rq, p, 0);
1736 else
1737 resched_curr(rq);
1738#endif
aab03e05
DF
1739 }
1740}
1741
1baca4ce
JL
1742/*
1743 * If the scheduling parameters of a -deadline task changed,
1744 * a push or pull operation might be needed.
1745 */
aab03e05
DF
1746static void prio_changed_dl(struct rq *rq, struct task_struct *p,
1747 int oldprio)
1748{
da0c1e65 1749 if (task_on_rq_queued(p) || rq->curr == p) {
aab03e05 1750#ifdef CONFIG_SMP
1baca4ce
JL
1751 /*
1752 * This might be too much, but unfortunately
1753 * we don't have the old deadline value, and
1754 * we can't argue if the task is increasing
1755 * or lowering its prio, so...
1756 */
1757 if (!rq->dl.overloaded)
9916e214 1758 queue_pull_task(rq);
1baca4ce
JL
1759
1760 /*
1761 * If we now have a earlier deadline task than p,
1762 * then reschedule, provided p is still on this
1763 * runqueue.
1764 */
9916e214 1765 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
8875125e 1766 resched_curr(rq);
1baca4ce
JL
1767#else
1768 /*
1769 * Again, we don't know if p has a earlier
1770 * or later deadline, so let's blindly set a
1771 * (maybe not needed) rescheduling point.
1772 */
8875125e 1773 resched_curr(rq);
1baca4ce
JL
1774#endif /* CONFIG_SMP */
1775 } else
1776 switched_to_dl(rq, p);
aab03e05 1777}
aab03e05
DF
1778
1779const struct sched_class dl_sched_class = {
1780 .next = &rt_sched_class,
1781 .enqueue_task = enqueue_task_dl,
1782 .dequeue_task = dequeue_task_dl,
1783 .yield_task = yield_task_dl,
1784
1785 .check_preempt_curr = check_preempt_curr_dl,
1786
1787 .pick_next_task = pick_next_task_dl,
1788 .put_prev_task = put_prev_task_dl,
1789
1790#ifdef CONFIG_SMP
1791 .select_task_rq = select_task_rq_dl,
1baca4ce
JL
1792 .set_cpus_allowed = set_cpus_allowed_dl,
1793 .rq_online = rq_online_dl,
1794 .rq_offline = rq_offline_dl,
1baca4ce 1795 .task_woken = task_woken_dl,
aab03e05
DF
1796#endif
1797
1798 .set_curr_task = set_curr_task_dl,
1799 .task_tick = task_tick_dl,
1800 .task_fork = task_fork_dl,
1801 .task_dead = task_dead_dl,
1802
1803 .prio_changed = prio_changed_dl,
1804 .switched_from = switched_from_dl,
1805 .switched_to = switched_to_dl,
6e998916
SG
1806
1807 .update_curr = update_curr_dl,
aab03e05 1808};
acb32132
WL
1809
1810#ifdef CONFIG_SCHED_DEBUG
1811extern void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq);
1812
1813void print_dl_stats(struct seq_file *m, int cpu)
1814{
1815 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
1816}
1817#endif /* CONFIG_SCHED_DEBUG */