Merge tag 'tag-chrome-platform-fixes-for-v5.7-rc2' of git://git.kernel.org/pub/scm...
[linux-block.git] / kernel / sched / deadline.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
aab03e05
DF
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 14 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
18#include "sched.h"
3727e0e1 19#include "pelt.h"
aab03e05 20
332ac17e
DF
21struct dl_bandwidth def_dl_bandwidth;
22
aab03e05
DF
23static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
24{
25 return container_of(dl_se, struct task_struct, dl);
26}
27
28static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
29{
30 return container_of(dl_rq, struct rq, dl);
31}
32
33static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
34{
35 struct task_struct *p = dl_task_of(dl_se);
36 struct rq *rq = task_rq(p);
37
38 return &rq->dl;
39}
40
41static inline int on_dl_rq(struct sched_dl_entity *dl_se)
42{
43 return !RB_EMPTY_NODE(&dl_se->rb_node);
44}
45
06a76fe0
NP
46#ifdef CONFIG_SMP
47static inline struct dl_bw *dl_bw_of(int i)
48{
49 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
50 "sched RCU must be held");
51 return &cpu_rq(i)->rd->dl_bw;
52}
53
54static inline int dl_bw_cpus(int i)
55{
56 struct root_domain *rd = cpu_rq(i)->rd;
57 int cpus = 0;
58
59 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
60 "sched RCU must be held");
61 for_each_cpu_and(i, rd->span, cpu_active_mask)
62 cpus++;
63
64 return cpus;
65}
66#else
67static inline struct dl_bw *dl_bw_of(int i)
68{
69 return &cpu_rq(i)->dl.dl_bw;
70}
71
72static inline int dl_bw_cpus(int i)
73{
74 return 1;
75}
76#endif
77
e36d8677 78static inline
794a56eb 79void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
e36d8677
LA
80{
81 u64 old = dl_rq->running_bw;
82
83 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
84 dl_rq->running_bw += dl_bw;
85 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
8fd27231 86 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
e0367b12 87 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
4042d003 88 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
e36d8677
LA
89}
90
91static inline
794a56eb 92void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
e36d8677
LA
93{
94 u64 old = dl_rq->running_bw;
95
96 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
97 dl_rq->running_bw -= dl_bw;
98 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
99 if (dl_rq->running_bw > old)
100 dl_rq->running_bw = 0;
e0367b12 101 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
4042d003 102 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
e36d8677
LA
103}
104
8fd27231 105static inline
794a56eb 106void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
8fd27231
LA
107{
108 u64 old = dl_rq->this_bw;
109
110 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
111 dl_rq->this_bw += dl_bw;
112 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
113}
114
115static inline
794a56eb 116void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
8fd27231
LA
117{
118 u64 old = dl_rq->this_bw;
119
120 lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock);
121 dl_rq->this_bw -= dl_bw;
122 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
123 if (dl_rq->this_bw > old)
124 dl_rq->this_bw = 0;
125 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
126}
127
794a56eb
JL
128static inline
129void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
130{
131 if (!dl_entity_is_special(dl_se))
132 __add_rq_bw(dl_se->dl_bw, dl_rq);
133}
134
135static inline
136void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
137{
138 if (!dl_entity_is_special(dl_se))
139 __sub_rq_bw(dl_se->dl_bw, dl_rq);
140}
141
142static inline
143void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
144{
145 if (!dl_entity_is_special(dl_se))
146 __add_running_bw(dl_se->dl_bw, dl_rq);
147}
148
149static inline
150void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
151{
152 if (!dl_entity_is_special(dl_se))
153 __sub_running_bw(dl_se->dl_bw, dl_rq);
154}
155
ba4f7bc1 156static void dl_change_utilization(struct task_struct *p, u64 new_bw)
209a0cbd 157{
8fd27231 158 struct rq *rq;
209a0cbd 159
794a56eb
JL
160 BUG_ON(p->dl.flags & SCHED_FLAG_SUGOV);
161
8fd27231 162 if (task_on_rq_queued(p))
209a0cbd
LA
163 return;
164
8fd27231
LA
165 rq = task_rq(p);
166 if (p->dl.dl_non_contending) {
794a56eb 167 sub_running_bw(&p->dl, &rq->dl);
8fd27231
LA
168 p->dl.dl_non_contending = 0;
169 /*
170 * If the timer handler is currently running and the
171 * timer cannot be cancelled, inactive_task_timer()
172 * will see that dl_not_contending is not set, and
173 * will not touch the rq's active utilization,
174 * so we are still safe.
175 */
176 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
177 put_task_struct(p);
178 }
794a56eb
JL
179 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
180 __add_rq_bw(new_bw, &rq->dl);
209a0cbd
LA
181}
182
183/*
184 * The utilization of a task cannot be immediately removed from
185 * the rq active utilization (running_bw) when the task blocks.
186 * Instead, we have to wait for the so called "0-lag time".
187 *
188 * If a task blocks before the "0-lag time", a timer (the inactive
189 * timer) is armed, and running_bw is decreased when the timer
190 * fires.
191 *
192 * If the task wakes up again before the inactive timer fires,
193 * the timer is cancelled, whereas if the task wakes up after the
194 * inactive timer fired (and running_bw has been decreased) the
195 * task's utilization has to be added to running_bw again.
196 * A flag in the deadline scheduling entity (dl_non_contending)
197 * is used to avoid race conditions between the inactive timer handler
198 * and task wakeups.
199 *
200 * The following diagram shows how running_bw is updated. A task is
201 * "ACTIVE" when its utilization contributes to running_bw; an
202 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
203 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
204 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
205 * time already passed, which does not contribute to running_bw anymore.
206 * +------------------+
207 * wakeup | ACTIVE |
208 * +------------------>+ contending |
209 * | add_running_bw | |
210 * | +----+------+------+
211 * | | ^
212 * | dequeue | |
213 * +--------+-------+ | |
214 * | | t >= 0-lag | | wakeup
215 * | INACTIVE |<---------------+ |
216 * | | sub_running_bw | |
217 * +--------+-------+ | |
218 * ^ | |
219 * | t < 0-lag | |
220 * | | |
221 * | V |
222 * | +----+------+------+
223 * | sub_running_bw | ACTIVE |
224 * +-------------------+ |
225 * inactive timer | non contending |
226 * fired +------------------+
227 *
228 * The task_non_contending() function is invoked when a task
229 * blocks, and checks if the 0-lag time already passed or
230 * not (in the first case, it directly updates running_bw;
231 * in the second case, it arms the inactive timer).
232 *
233 * The task_contending() function is invoked when a task wakes
234 * up, and checks if the task is still in the "ACTIVE non contending"
235 * state or not (in the second case, it updates running_bw).
236 */
237static void task_non_contending(struct task_struct *p)
238{
239 struct sched_dl_entity *dl_se = &p->dl;
240 struct hrtimer *timer = &dl_se->inactive_timer;
241 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
242 struct rq *rq = rq_of_dl_rq(dl_rq);
243 s64 zerolag_time;
244
245 /*
246 * If this is a non-deadline task that has been boosted,
247 * do nothing
248 */
249 if (dl_se->dl_runtime == 0)
250 return;
251
794a56eb
JL
252 if (dl_entity_is_special(dl_se))
253 return;
254
209a0cbd
LA
255 WARN_ON(dl_se->dl_non_contending);
256
257 zerolag_time = dl_se->deadline -
258 div64_long((dl_se->runtime * dl_se->dl_period),
259 dl_se->dl_runtime);
260
261 /*
262 * Using relative times instead of the absolute "0-lag time"
263 * allows to simplify the code
264 */
265 zerolag_time -= rq_clock(rq);
266
267 /*
268 * If the "0-lag time" already passed, decrease the active
269 * utilization now, instead of starting a timer
270 */
1b02cd6a 271 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
209a0cbd 272 if (dl_task(p))
794a56eb 273 sub_running_bw(dl_se, dl_rq);
387e3130
LA
274 if (!dl_task(p) || p->state == TASK_DEAD) {
275 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
276
8fd27231 277 if (p->state == TASK_DEAD)
794a56eb 278 sub_rq_bw(&p->dl, &rq->dl);
387e3130 279 raw_spin_lock(&dl_b->lock);
8c0944ce 280 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
209a0cbd 281 __dl_clear_params(p);
387e3130
LA
282 raw_spin_unlock(&dl_b->lock);
283 }
209a0cbd
LA
284
285 return;
286 }
287
288 dl_se->dl_non_contending = 1;
289 get_task_struct(p);
850377a8 290 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
209a0cbd
LA
291}
292
8fd27231 293static void task_contending(struct sched_dl_entity *dl_se, int flags)
209a0cbd
LA
294{
295 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
296
297 /*
298 * If this is a non-deadline task that has been boosted,
299 * do nothing
300 */
301 if (dl_se->dl_runtime == 0)
302 return;
303
8fd27231 304 if (flags & ENQUEUE_MIGRATED)
794a56eb 305 add_rq_bw(dl_se, dl_rq);
8fd27231 306
209a0cbd
LA
307 if (dl_se->dl_non_contending) {
308 dl_se->dl_non_contending = 0;
309 /*
310 * If the timer handler is currently running and the
311 * timer cannot be cancelled, inactive_task_timer()
312 * will see that dl_not_contending is not set, and
313 * will not touch the rq's active utilization,
314 * so we are still safe.
315 */
316 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1)
317 put_task_struct(dl_task_of(dl_se));
318 } else {
319 /*
320 * Since "dl_non_contending" is not set, the
321 * task's utilization has already been removed from
322 * active utilization (either when the task blocked,
323 * when the "inactive timer" fired).
324 * So, add it back.
325 */
794a56eb 326 add_running_bw(dl_se, dl_rq);
209a0cbd
LA
327 }
328}
329
aab03e05
DF
330static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq)
331{
332 struct sched_dl_entity *dl_se = &p->dl;
333
2161573e 334 return dl_rq->root.rb_leftmost == &dl_se->rb_node;
aab03e05
DF
335}
336
ba4f7bc1
YC
337static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
338
332ac17e
DF
339void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
340{
341 raw_spin_lock_init(&dl_b->dl_runtime_lock);
342 dl_b->dl_period = period;
343 dl_b->dl_runtime = runtime;
344}
345
332ac17e
DF
346void init_dl_bw(struct dl_bw *dl_b)
347{
348 raw_spin_lock_init(&dl_b->lock);
349 raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
1724813d 350 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
351 dl_b->bw = -1;
352 else
1724813d 353 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
354 raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
355 dl_b->total_bw = 0;
356}
357
07c54f7a 358void init_dl_rq(struct dl_rq *dl_rq)
aab03e05 359{
2161573e 360 dl_rq->root = RB_ROOT_CACHED;
1baca4ce
JL
361
362#ifdef CONFIG_SMP
363 /* zero means no -deadline tasks */
364 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
365
366 dl_rq->dl_nr_migratory = 0;
367 dl_rq->overloaded = 0;
2161573e 368 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332ac17e
DF
369#else
370 init_dl_bw(&dl_rq->dl_bw);
1baca4ce 371#endif
e36d8677
LA
372
373 dl_rq->running_bw = 0;
8fd27231 374 dl_rq->this_bw = 0;
4da3abce 375 init_dl_rq_bw_ratio(dl_rq);
1baca4ce
JL
376}
377
378#ifdef CONFIG_SMP
379
380static inline int dl_overloaded(struct rq *rq)
381{
382 return atomic_read(&rq->rd->dlo_count);
383}
384
385static inline void dl_set_overload(struct rq *rq)
386{
387 if (!rq->online)
388 return;
389
390 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
391 /*
392 * Must be visible before the overload count is
393 * set (as in sched_rt.c).
394 *
395 * Matched by the barrier in pull_dl_task().
396 */
397 smp_wmb();
398 atomic_inc(&rq->rd->dlo_count);
399}
400
401static inline void dl_clear_overload(struct rq *rq)
402{
403 if (!rq->online)
404 return;
405
406 atomic_dec(&rq->rd->dlo_count);
407 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
408}
409
410static void update_dl_migration(struct dl_rq *dl_rq)
411{
995b9ea4 412 if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
1baca4ce
JL
413 if (!dl_rq->overloaded) {
414 dl_set_overload(rq_of_dl_rq(dl_rq));
415 dl_rq->overloaded = 1;
416 }
417 } else if (dl_rq->overloaded) {
418 dl_clear_overload(rq_of_dl_rq(dl_rq));
419 dl_rq->overloaded = 0;
420 }
421}
422
423static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
424{
425 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 426
4b53a341 427 if (p->nr_cpus_allowed > 1)
1baca4ce
JL
428 dl_rq->dl_nr_migratory++;
429
430 update_dl_migration(dl_rq);
431}
432
433static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
434{
435 struct task_struct *p = dl_task_of(dl_se);
1baca4ce 436
4b53a341 437 if (p->nr_cpus_allowed > 1)
1baca4ce
JL
438 dl_rq->dl_nr_migratory--;
439
440 update_dl_migration(dl_rq);
441}
442
443/*
444 * The list of pushable -deadline task is not a plist, like in
445 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
446 */
447static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
448{
449 struct dl_rq *dl_rq = &rq->dl;
2161573e 450 struct rb_node **link = &dl_rq->pushable_dl_tasks_root.rb_root.rb_node;
1baca4ce
JL
451 struct rb_node *parent = NULL;
452 struct task_struct *entry;
2161573e 453 bool leftmost = true;
1baca4ce
JL
454
455 BUG_ON(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
456
457 while (*link) {
458 parent = *link;
459 entry = rb_entry(parent, struct task_struct,
460 pushable_dl_tasks);
461 if (dl_entity_preempt(&p->dl, &entry->dl))
462 link = &parent->rb_left;
463 else {
464 link = &parent->rb_right;
2161573e 465 leftmost = false;
1baca4ce
JL
466 }
467 }
468
2161573e 469 if (leftmost)
7d92de3a 470 dl_rq->earliest_dl.next = p->dl.deadline;
1baca4ce
JL
471
472 rb_link_node(&p->pushable_dl_tasks, parent, link);
2161573e
DB
473 rb_insert_color_cached(&p->pushable_dl_tasks,
474 &dl_rq->pushable_dl_tasks_root, leftmost);
aab03e05
DF
475}
476
1baca4ce
JL
477static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
478{
479 struct dl_rq *dl_rq = &rq->dl;
480
481 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
482 return;
483
2161573e 484 if (dl_rq->pushable_dl_tasks_root.rb_leftmost == &p->pushable_dl_tasks) {
1baca4ce
JL
485 struct rb_node *next_node;
486
487 next_node = rb_next(&p->pushable_dl_tasks);
7d92de3a
WL
488 if (next_node) {
489 dl_rq->earliest_dl.next = rb_entry(next_node,
490 struct task_struct, pushable_dl_tasks)->dl.deadline;
491 }
1baca4ce
JL
492 }
493
2161573e 494 rb_erase_cached(&p->pushable_dl_tasks, &dl_rq->pushable_dl_tasks_root);
1baca4ce
JL
495 RB_CLEAR_NODE(&p->pushable_dl_tasks);
496}
497
498static inline int has_pushable_dl_tasks(struct rq *rq)
499{
2161573e 500 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
1baca4ce
JL
501}
502
503static int push_dl_task(struct rq *rq);
504
dc877341
PZ
505static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
506{
507 return dl_task(prev);
508}
509
9916e214
PZ
510static DEFINE_PER_CPU(struct callback_head, dl_push_head);
511static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
e3fca9e7
PZ
512
513static void push_dl_tasks(struct rq *);
9916e214 514static void pull_dl_task(struct rq *);
e3fca9e7 515
02d8ec94 516static inline void deadline_queue_push_tasks(struct rq *rq)
dc877341 517{
e3fca9e7
PZ
518 if (!has_pushable_dl_tasks(rq))
519 return;
520
9916e214
PZ
521 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
522}
523
02d8ec94 524static inline void deadline_queue_pull_task(struct rq *rq)
9916e214
PZ
525{
526 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
dc877341
PZ
527}
528
fa9c9d10
WL
529static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
530
a649f237 531static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
fa9c9d10
WL
532{
533 struct rq *later_rq = NULL;
59d06cea 534 struct dl_bw *dl_b;
fa9c9d10
WL
535
536 later_rq = find_lock_later_rq(p, rq);
fa9c9d10
WL
537 if (!later_rq) {
538 int cpu;
539
540 /*
541 * If we cannot preempt any rq, fall back to pick any
97fb7a0a 542 * online CPU:
fa9c9d10 543 */
3bd37062 544 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
fa9c9d10
WL
545 if (cpu >= nr_cpu_ids) {
546 /*
97fb7a0a 547 * Failed to find any suitable CPU.
fa9c9d10
WL
548 * The task will never come back!
549 */
550 BUG_ON(dl_bandwidth_enabled());
551
552 /*
553 * If admission control is disabled we
554 * try a little harder to let the task
555 * run.
556 */
557 cpu = cpumask_any(cpu_active_mask);
558 }
559 later_rq = cpu_rq(cpu);
560 double_lock_balance(rq, later_rq);
561 }
562
59d06cea
JL
563 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
564 /*
565 * Inactive timer is armed (or callback is running, but
566 * waiting for us to release rq locks). In any case, when it
567 * will fire (or continue), it will see running_bw of this
568 * task migrated to later_rq (and correctly handle it).
569 */
570 sub_running_bw(&p->dl, &rq->dl);
571 sub_rq_bw(&p->dl, &rq->dl);
572
573 add_rq_bw(&p->dl, &later_rq->dl);
574 add_running_bw(&p->dl, &later_rq->dl);
575 } else {
576 sub_rq_bw(&p->dl, &rq->dl);
577 add_rq_bw(&p->dl, &later_rq->dl);
578 }
579
580 /*
581 * And we finally need to fixup root_domain(s) bandwidth accounting,
582 * since p is still hanging out in the old (now moved to default) root
583 * domain.
584 */
585 dl_b = &rq->rd->dl_bw;
586 raw_spin_lock(&dl_b->lock);
587 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
588 raw_spin_unlock(&dl_b->lock);
589
590 dl_b = &later_rq->rd->dl_bw;
591 raw_spin_lock(&dl_b->lock);
592 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
593 raw_spin_unlock(&dl_b->lock);
594
fa9c9d10 595 set_task_cpu(p, later_rq->cpu);
a649f237
PZ
596 double_unlock_balance(later_rq, rq);
597
598 return later_rq;
fa9c9d10
WL
599}
600
1baca4ce
JL
601#else
602
603static inline
604void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
605{
606}
607
608static inline
609void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
610{
611}
612
613static inline
614void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
615{
616}
617
618static inline
619void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
620{
621}
622
dc877341
PZ
623static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
624{
625 return false;
626}
627
0ea60c20 628static inline void pull_dl_task(struct rq *rq)
dc877341 629{
dc877341
PZ
630}
631
02d8ec94 632static inline void deadline_queue_push_tasks(struct rq *rq)
dc877341 633{
dc877341
PZ
634}
635
02d8ec94 636static inline void deadline_queue_pull_task(struct rq *rq)
dc877341
PZ
637{
638}
1baca4ce
JL
639#endif /* CONFIG_SMP */
640
aab03e05
DF
641static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
642static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags);
97fb7a0a 643static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p, int flags);
aab03e05
DF
644
645/*
646 * We are being explicitly informed that a new instance is starting,
647 * and this means that:
648 * - the absolute deadline of the entity has to be placed at
649 * current time + relative deadline;
650 * - the runtime of the entity has to be set to the maximum value.
651 *
652 * The capability of specifying such event is useful whenever a -deadline
653 * entity wants to (try to!) synchronize its behaviour with the scheduler's
654 * one, and to (try to!) reconcile itself with its own scheduling
655 * parameters.
656 */
98b0a857 657static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
aab03e05
DF
658{
659 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
660 struct rq *rq = rq_of_dl_rq(dl_rq);
661
98b0a857 662 WARN_ON(dl_se->dl_boosted);
72f9f3fd
LA
663 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
664
665 /*
666 * We are racing with the deadline timer. So, do nothing because
667 * the deadline timer handler will take care of properly recharging
668 * the runtime and postponing the deadline
669 */
670 if (dl_se->dl_throttled)
671 return;
aab03e05
DF
672
673 /*
674 * We use the regular wall clock time to set deadlines in the
675 * future; in fact, we must consider execution overheads (time
676 * spent on hardirq context, etc.).
677 */
98b0a857
JL
678 dl_se->deadline = rq_clock(rq) + dl_se->dl_deadline;
679 dl_se->runtime = dl_se->dl_runtime;
aab03e05
DF
680}
681
682/*
683 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
684 * possibility of a entity lasting more than what it declared, and thus
685 * exhausting its runtime.
686 *
687 * Here we are interested in making runtime overrun possible, but we do
688 * not want a entity which is misbehaving to affect the scheduling of all
689 * other entities.
690 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
691 * is used, in order to confine each entity within its own bandwidth.
692 *
693 * This function deals exactly with that, and ensures that when the runtime
694 * of a entity is replenished, its deadline is also postponed. That ensures
695 * the overrunning entity can't interfere with other entity in the system and
696 * can't make them miss their deadlines. Reasons why this kind of overruns
697 * could happen are, typically, a entity voluntarily trying to overcome its
1b09d29b 698 * runtime, or it just underestimated it during sched_setattr().
aab03e05 699 */
2d3d891d
DF
700static void replenish_dl_entity(struct sched_dl_entity *dl_se,
701 struct sched_dl_entity *pi_se)
aab03e05
DF
702{
703 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
704 struct rq *rq = rq_of_dl_rq(dl_rq);
705
2d3d891d
DF
706 BUG_ON(pi_se->dl_runtime <= 0);
707
708 /*
709 * This could be the case for a !-dl task that is boosted.
710 * Just go with full inherited parameters.
711 */
712 if (dl_se->dl_deadline == 0) {
713 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
714 dl_se->runtime = pi_se->dl_runtime;
715 }
716
48be3a67
PZ
717 if (dl_se->dl_yielded && dl_se->runtime > 0)
718 dl_se->runtime = 0;
719
aab03e05
DF
720 /*
721 * We keep moving the deadline away until we get some
722 * available runtime for the entity. This ensures correct
723 * handling of situations where the runtime overrun is
724 * arbitrary large.
725 */
726 while (dl_se->runtime <= 0) {
2d3d891d
DF
727 dl_se->deadline += pi_se->dl_period;
728 dl_se->runtime += pi_se->dl_runtime;
aab03e05
DF
729 }
730
731 /*
732 * At this point, the deadline really should be "in
733 * the future" with respect to rq->clock. If it's
734 * not, we are, for some reason, lagging too much!
735 * Anyway, after having warn userspace abut that,
736 * we still try to keep the things running by
737 * resetting the deadline and the budget of the
738 * entity.
739 */
740 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
c219b7dd 741 printk_deferred_once("sched: DL replenish lagged too much\n");
2d3d891d
DF
742 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
743 dl_se->runtime = pi_se->dl_runtime;
aab03e05 744 }
1019a359
PZ
745
746 if (dl_se->dl_yielded)
747 dl_se->dl_yielded = 0;
748 if (dl_se->dl_throttled)
749 dl_se->dl_throttled = 0;
aab03e05
DF
750}
751
752/*
753 * Here we check if --at time t-- an entity (which is probably being
754 * [re]activated or, in general, enqueued) can use its remaining runtime
755 * and its current deadline _without_ exceeding the bandwidth it is
756 * assigned (function returns true if it can't). We are in fact applying
757 * one of the CBS rules: when a task wakes up, if the residual runtime
758 * over residual deadline fits within the allocated bandwidth, then we
759 * can keep the current (absolute) deadline and residual budget without
760 * disrupting the schedulability of the system. Otherwise, we should
761 * refill the runtime and set the deadline a period in the future,
762 * because keeping the current (absolute) deadline of the task would
712e5e34 763 * result in breaking guarantees promised to other tasks (refer to
d6a3b247 764 * Documentation/scheduler/sched-deadline.rst for more information).
aab03e05
DF
765 *
766 * This function returns true if:
767 *
2317d5f1 768 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
aab03e05
DF
769 *
770 * IOW we can't recycle current parameters.
755378a4 771 *
2317d5f1 772 * Notice that the bandwidth check is done against the deadline. For
755378a4 773 * task with deadline equal to period this is the same of using
2317d5f1 774 * dl_period instead of dl_deadline in the equation above.
aab03e05 775 */
2d3d891d
DF
776static bool dl_entity_overflow(struct sched_dl_entity *dl_se,
777 struct sched_dl_entity *pi_se, u64 t)
aab03e05
DF
778{
779 u64 left, right;
780
781 /*
782 * left and right are the two sides of the equation above,
783 * after a bit of shuffling to use multiplications instead
784 * of divisions.
785 *
786 * Note that none of the time values involved in the two
787 * multiplications are absolute: dl_deadline and dl_runtime
788 * are the relative deadline and the maximum runtime of each
789 * instance, runtime is the runtime left for the last instance
790 * and (deadline - t), since t is rq->clock, is the time left
791 * to the (absolute) deadline. Even if overflowing the u64 type
792 * is very unlikely to occur in both cases, here we scale down
793 * as we want to avoid that risk at all. Scaling down by 10
794 * means that we reduce granularity to 1us. We are fine with it,
795 * since this is only a true/false check and, anyway, thinking
796 * of anything below microseconds resolution is actually fiction
797 * (but still we want to give the user that illusion >;).
798 */
2317d5f1 799 left = (pi_se->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
332ac17e
DF
800 right = ((dl_se->deadline - t) >> DL_SCALE) *
801 (pi_se->dl_runtime >> DL_SCALE);
aab03e05
DF
802
803 return dl_time_before(right, left);
804}
805
806/*
3effcb42
DBO
807 * Revised wakeup rule [1]: For self-suspending tasks, rather then
808 * re-initializing task's runtime and deadline, the revised wakeup
809 * rule adjusts the task's runtime to avoid the task to overrun its
810 * density.
aab03e05 811 *
3effcb42
DBO
812 * Reasoning: a task may overrun the density if:
813 * runtime / (deadline - t) > dl_runtime / dl_deadline
814 *
815 * Therefore, runtime can be adjusted to:
816 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
817 *
818 * In such way that runtime will be equal to the maximum density
819 * the task can use without breaking any rule.
820 *
821 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
822 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
823 */
824static void
825update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
826{
827 u64 laxity = dl_se->deadline - rq_clock(rq);
828
829 /*
830 * If the task has deadline < period, and the deadline is in the past,
831 * it should already be throttled before this check.
832 *
833 * See update_dl_entity() comments for further details.
834 */
835 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
836
837 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
838}
839
840/*
841 * Regarding the deadline, a task with implicit deadline has a relative
842 * deadline == relative period. A task with constrained deadline has a
843 * relative deadline <= relative period.
844 *
845 * We support constrained deadline tasks. However, there are some restrictions
846 * applied only for tasks which do not have an implicit deadline. See
847 * update_dl_entity() to know more about such restrictions.
848 *
849 * The dl_is_implicit() returns true if the task has an implicit deadline.
850 */
851static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
852{
853 return dl_se->dl_deadline == dl_se->dl_period;
854}
855
856/*
857 * When a deadline entity is placed in the runqueue, its runtime and deadline
858 * might need to be updated. This is done by a CBS wake up rule. There are two
859 * different rules: 1) the original CBS; and 2) the Revisited CBS.
860 *
861 * When the task is starting a new period, the Original CBS is used. In this
862 * case, the runtime is replenished and a new absolute deadline is set.
863 *
864 * When a task is queued before the begin of the next period, using the
865 * remaining runtime and deadline could make the entity to overflow, see
866 * dl_entity_overflow() to find more about runtime overflow. When such case
867 * is detected, the runtime and deadline need to be updated.
868 *
869 * If the task has an implicit deadline, i.e., deadline == period, the Original
870 * CBS is applied. the runtime is replenished and a new absolute deadline is
871 * set, as in the previous cases.
872 *
873 * However, the Original CBS does not work properly for tasks with
874 * deadline < period, which are said to have a constrained deadline. By
875 * applying the Original CBS, a constrained deadline task would be able to run
876 * runtime/deadline in a period. With deadline < period, the task would
877 * overrun the runtime/period allowed bandwidth, breaking the admission test.
878 *
879 * In order to prevent this misbehave, the Revisited CBS is used for
880 * constrained deadline tasks when a runtime overflow is detected. In the
881 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
882 * the remaining runtime of the task is reduced to avoid runtime overflow.
883 * Please refer to the comments update_dl_revised_wakeup() function to find
884 * more about the Revised CBS rule.
aab03e05 885 */
2d3d891d
DF
886static void update_dl_entity(struct sched_dl_entity *dl_se,
887 struct sched_dl_entity *pi_se)
aab03e05
DF
888{
889 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
890 struct rq *rq = rq_of_dl_rq(dl_rq);
891
aab03e05 892 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2d3d891d 893 dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) {
3effcb42
DBO
894
895 if (unlikely(!dl_is_implicit(dl_se) &&
896 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
897 !dl_se->dl_boosted)){
898 update_dl_revised_wakeup(dl_se, rq);
899 return;
900 }
901
2d3d891d
DF
902 dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
903 dl_se->runtime = pi_se->dl_runtime;
aab03e05
DF
904 }
905}
906
5ac69d37
DBO
907static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
908{
909 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
910}
911
aab03e05
DF
912/*
913 * If the entity depleted all its runtime, and if we want it to sleep
914 * while waiting for some new execution time to become available, we
5ac69d37 915 * set the bandwidth replenishment timer to the replenishment instant
aab03e05
DF
916 * and try to activate it.
917 *
918 * Notice that it is important for the caller to know if the timer
919 * actually started or not (i.e., the replenishment instant is in
920 * the future or in the past).
921 */
a649f237 922static int start_dl_timer(struct task_struct *p)
aab03e05 923{
a649f237
PZ
924 struct sched_dl_entity *dl_se = &p->dl;
925 struct hrtimer *timer = &dl_se->dl_timer;
926 struct rq *rq = task_rq(p);
aab03e05 927 ktime_t now, act;
aab03e05
DF
928 s64 delta;
929
a649f237
PZ
930 lockdep_assert_held(&rq->lock);
931
aab03e05
DF
932 /*
933 * We want the timer to fire at the deadline, but considering
934 * that it is actually coming from rq->clock and not from
935 * hrtimer's time base reading.
936 */
5ac69d37 937 act = ns_to_ktime(dl_next_period(dl_se));
a649f237 938 now = hrtimer_cb_get_time(timer);
aab03e05
DF
939 delta = ktime_to_ns(now) - rq_clock(rq);
940 act = ktime_add_ns(act, delta);
941
942 /*
943 * If the expiry time already passed, e.g., because the value
944 * chosen as the deadline is too small, don't even try to
945 * start the timer in the past!
946 */
947 if (ktime_us_delta(act, now) < 0)
948 return 0;
949
a649f237
PZ
950 /*
951 * !enqueued will guarantee another callback; even if one is already in
952 * progress. This ensures a balanced {get,put}_task_struct().
953 *
954 * The race against __run_timer() clearing the enqueued state is
955 * harmless because we're holding task_rq()->lock, therefore the timer
956 * expiring after we've done the check will wait on its task_rq_lock()
957 * and observe our state.
958 */
959 if (!hrtimer_is_queued(timer)) {
960 get_task_struct(p);
d5096aa6 961 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
a649f237 962 }
aab03e05 963
cc9684d3 964 return 1;
aab03e05
DF
965}
966
967/*
968 * This is the bandwidth enforcement timer callback. If here, we know
969 * a task is not on its dl_rq, since the fact that the timer was running
970 * means the task is throttled and needs a runtime replenishment.
971 *
972 * However, what we actually do depends on the fact the task is active,
973 * (it is on its rq) or has been removed from there by a call to
974 * dequeue_task_dl(). In the former case we must issue the runtime
975 * replenishment and add the task back to the dl_rq; in the latter, we just
976 * do nothing but clearing dl_throttled, so that runtime and deadline
977 * updating (and the queueing back to dl_rq) will be done by the
978 * next call to enqueue_task_dl().
979 */
980static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
981{
982 struct sched_dl_entity *dl_se = container_of(timer,
983 struct sched_dl_entity,
984 dl_timer);
985 struct task_struct *p = dl_task_of(dl_se);
eb580751 986 struct rq_flags rf;
0f397f2c 987 struct rq *rq;
3960c8c0 988
eb580751 989 rq = task_rq_lock(p, &rf);
0f397f2c 990
aab03e05 991 /*
a649f237 992 * The task might have changed its scheduling policy to something
9846d50d 993 * different than SCHED_DEADLINE (through switched_from_dl()).
a649f237 994 */
209a0cbd 995 if (!dl_task(p))
a649f237 996 goto unlock;
a649f237 997
a649f237
PZ
998 /*
999 * The task might have been boosted by someone else and might be in the
1000 * boosting/deboosting path, its not throttled.
1001 */
1002 if (dl_se->dl_boosted)
1003 goto unlock;
a79ec89f 1004
fa9c9d10 1005 /*
a649f237
PZ
1006 * Spurious timer due to start_dl_timer() race; or we already received
1007 * a replenishment from rt_mutex_setprio().
fa9c9d10 1008 */
a649f237 1009 if (!dl_se->dl_throttled)
fa9c9d10 1010 goto unlock;
a649f237
PZ
1011
1012 sched_clock_tick();
1013 update_rq_clock(rq);
fa9c9d10 1014
a79ec89f
KT
1015 /*
1016 * If the throttle happened during sched-out; like:
1017 *
1018 * schedule()
1019 * deactivate_task()
1020 * dequeue_task_dl()
1021 * update_curr_dl()
1022 * start_dl_timer()
1023 * __dequeue_task_dl()
1024 * prev->on_rq = 0;
1025 *
1026 * We can be both throttled and !queued. Replenish the counter
1027 * but do not enqueue -- wait for our wakeup to do that.
1028 */
1029 if (!task_on_rq_queued(p)) {
1030 replenish_dl_entity(dl_se, dl_se);
1031 goto unlock;
1032 }
1033
1baca4ce 1034#ifdef CONFIG_SMP
c0c8c9fa 1035 if (unlikely(!rq->online)) {
61c7aca6
WL
1036 /*
1037 * If the runqueue is no longer available, migrate the
1038 * task elsewhere. This necessarily changes rq.
1039 */
c0c8c9fa 1040 lockdep_unpin_lock(&rq->lock, rf.cookie);
a649f237 1041 rq = dl_task_offline_migration(rq, p);
c0c8c9fa 1042 rf.cookie = lockdep_pin_lock(&rq->lock);
dcc3b5ff 1043 update_rq_clock(rq);
61c7aca6
WL
1044
1045 /*
1046 * Now that the task has been migrated to the new RQ and we
1047 * have that locked, proceed as normal and enqueue the task
1048 * there.
1049 */
c0c8c9fa 1050 }
61c7aca6 1051#endif
a649f237 1052
61c7aca6
WL
1053 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1054 if (dl_task(rq->curr))
1055 check_preempt_curr_dl(rq, p, 0);
1056 else
1057 resched_curr(rq);
a649f237 1058
61c7aca6 1059#ifdef CONFIG_SMP
a649f237
PZ
1060 /*
1061 * Queueing this task back might have overloaded rq, check if we need
1062 * to kick someone away.
1019a359 1063 */
0aaafaab
PZ
1064 if (has_pushable_dl_tasks(rq)) {
1065 /*
1066 * Nothing relies on rq->lock after this, so its safe to drop
1067 * rq->lock.
1068 */
d8ac8971 1069 rq_unpin_lock(rq, &rf);
1019a359 1070 push_dl_task(rq);
d8ac8971 1071 rq_repin_lock(rq, &rf);
0aaafaab 1072 }
1baca4ce 1073#endif
a649f237 1074
aab03e05 1075unlock:
eb580751 1076 task_rq_unlock(rq, p, &rf);
aab03e05 1077
a649f237
PZ
1078 /*
1079 * This can free the task_struct, including this hrtimer, do not touch
1080 * anything related to that after this.
1081 */
1082 put_task_struct(p);
1083
aab03e05
DF
1084 return HRTIMER_NORESTART;
1085}
1086
1087void init_dl_task_timer(struct sched_dl_entity *dl_se)
1088{
1089 struct hrtimer *timer = &dl_se->dl_timer;
1090
d5096aa6 1091 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
aab03e05
DF
1092 timer->function = dl_task_timer;
1093}
1094
df8eac8c
DBO
1095/*
1096 * During the activation, CBS checks if it can reuse the current task's
1097 * runtime and period. If the deadline of the task is in the past, CBS
1098 * cannot use the runtime, and so it replenishes the task. This rule
1099 * works fine for implicit deadline tasks (deadline == period), and the
1100 * CBS was designed for implicit deadline tasks. However, a task with
1101 * constrained deadline (deadine < period) might be awakened after the
1102 * deadline, but before the next period. In this case, replenishing the
1103 * task would allow it to run for runtime / deadline. As in this case
1104 * deadline < period, CBS enables a task to run for more than the
1105 * runtime / period. In a very loaded system, this can cause a domino
1106 * effect, making other tasks miss their deadlines.
1107 *
1108 * To avoid this problem, in the activation of a constrained deadline
1109 * task after the deadline but before the next period, throttle the
1110 * task and set the replenishing timer to the begin of the next period,
1111 * unless it is boosted.
1112 */
1113static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1114{
1115 struct task_struct *p = dl_task_of(dl_se);
1116 struct rq *rq = rq_of_dl_rq(dl_rq_of_se(dl_se));
1117
1118 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1119 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
1120 if (unlikely(dl_se->dl_boosted || !start_dl_timer(p)))
1121 return;
1122 dl_se->dl_throttled = 1;
ae83b56a
XP
1123 if (dl_se->runtime > 0)
1124 dl_se->runtime = 0;
df8eac8c
DBO
1125 }
1126}
1127
aab03e05 1128static
6fab5410 1129int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
aab03e05 1130{
269ad801 1131 return (dl_se->runtime <= 0);
aab03e05
DF
1132}
1133
faa59937
JL
1134extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
1135
c52f14d3
LA
1136/*
1137 * This function implements the GRUB accounting rule:
1138 * according to the GRUB reclaiming algorithm, the runtime is
daec5798
LA
1139 * not decreased as "dq = -dt", but as
1140 * "dq = -max{u / Umax, (1 - Uinact - Uextra)} dt",
1141 * where u is the utilization of the task, Umax is the maximum reclaimable
1142 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1143 * as the difference between the "total runqueue utilization" and the
1144 * runqueue active utilization, and Uextra is the (per runqueue) extra
1145 * reclaimable utilization.
9f0d1a50 1146 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations
daec5798
LA
1147 * multiplied by 2^BW_SHIFT, the result has to be shifted right by
1148 * BW_SHIFT.
1149 * Since rq->dl.bw_ratio contains 1 / Umax multipled by 2^RATIO_SHIFT,
1150 * dl_bw is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1151 * Since delta is a 64 bit variable, to have an overflow its value
1152 * should be larger than 2^(64 - 20 - 8), which is more than 64 seconds.
1153 * So, overflow is not an issue here.
c52f14d3 1154 */
3febfc8a 1155static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
c52f14d3 1156{
9f0d1a50
LA
1157 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
1158 u64 u_act;
daec5798 1159 u64 u_act_min = (dl_se->dl_bw * rq->dl.bw_ratio) >> RATIO_SHIFT;
c52f14d3 1160
9f0d1a50 1161 /*
daec5798
LA
1162 * Instead of computing max{u * bw_ratio, (1 - u_inact - u_extra)},
1163 * we compare u_inact + rq->dl.extra_bw with
1164 * 1 - (u * rq->dl.bw_ratio >> RATIO_SHIFT), because
1165 * u_inact + rq->dl.extra_bw can be larger than
1166 * 1 * (so, 1 - u_inact - rq->dl.extra_bw would be negative
1167 * leading to wrong results)
9f0d1a50 1168 */
daec5798
LA
1169 if (u_inact + rq->dl.extra_bw > BW_UNIT - u_act_min)
1170 u_act = u_act_min;
9f0d1a50 1171 else
daec5798 1172 u_act = BW_UNIT - u_inact - rq->dl.extra_bw;
9f0d1a50
LA
1173
1174 return (delta * u_act) >> BW_SHIFT;
c52f14d3
LA
1175}
1176
aab03e05
DF
1177/*
1178 * Update the current task's runtime statistics (provided it is still
1179 * a -deadline task and has not been removed from the dl_rq).
1180 */
1181static void update_curr_dl(struct rq *rq)
1182{
1183 struct task_struct *curr = rq->curr;
1184 struct sched_dl_entity *dl_se = &curr->dl;
07881166
JL
1185 u64 delta_exec, scaled_delta_exec;
1186 int cpu = cpu_of(rq);
6fe0ce1e 1187 u64 now;
aab03e05
DF
1188
1189 if (!dl_task(curr) || !on_dl_rq(dl_se))
1190 return;
1191
1192 /*
1193 * Consumed budget is computed considering the time as
1194 * observed by schedulable tasks (excluding time spent
1195 * in hardirq context, etc.). Deadlines are instead
1196 * computed using hard walltime. This seems to be the more
1197 * natural solution, but the full ramifications of this
1198 * approach need further study.
1199 */
6fe0ce1e
WY
1200 now = rq_clock_task(rq);
1201 delta_exec = now - curr->se.exec_start;
48be3a67
PZ
1202 if (unlikely((s64)delta_exec <= 0)) {
1203 if (unlikely(dl_se->dl_yielded))
1204 goto throttle;
734ff2a7 1205 return;
48be3a67 1206 }
aab03e05
DF
1207
1208 schedstat_set(curr->se.statistics.exec_max,
1209 max(curr->se.statistics.exec_max, delta_exec));
1210
1211 curr->se.sum_exec_runtime += delta_exec;
1212 account_group_exec_runtime(curr, delta_exec);
1213
6fe0ce1e 1214 curr->se.exec_start = now;
d2cc5ed6 1215 cgroup_account_cputime(curr, delta_exec);
aab03e05 1216
794a56eb
JL
1217 if (dl_entity_is_special(dl_se))
1218 return;
1219
07881166
JL
1220 /*
1221 * For tasks that participate in GRUB, we implement GRUB-PA: the
1222 * spare reclaimed bandwidth is used to clock down frequency.
1223 *
1224 * For the others, we still need to scale reservation parameters
1225 * according to current frequency and CPU maximum capacity.
1226 */
1227 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
1228 scaled_delta_exec = grub_reclaim(delta_exec,
1229 rq,
1230 &curr->dl);
1231 } else {
1232 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
8ec59c0f 1233 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
07881166
JL
1234
1235 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1236 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1237 }
1238
1239 dl_se->runtime -= scaled_delta_exec;
48be3a67
PZ
1240
1241throttle:
1242 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1019a359 1243 dl_se->dl_throttled = 1;
34be3930
JL
1244
1245 /* If requested, inform the user about runtime overruns. */
1246 if (dl_runtime_exceeded(dl_se) &&
1247 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1248 dl_se->dl_overrun = 1;
1249
aab03e05 1250 __dequeue_task_dl(rq, curr, 0);
a649f237 1251 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr)))
aab03e05
DF
1252 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH);
1253
1254 if (!is_leftmost(curr, &rq->dl))
8875125e 1255 resched_curr(rq);
aab03e05 1256 }
1724813d
PZ
1257
1258 /*
1259 * Because -- for now -- we share the rt bandwidth, we need to
1260 * account our runtime there too, otherwise actual rt tasks
1261 * would be able to exceed the shared quota.
1262 *
1263 * Account to the root rt group for now.
1264 *
1265 * The solution we're working towards is having the RT groups scheduled
1266 * using deadline servers -- however there's a few nasties to figure
1267 * out before that can happen.
1268 */
1269 if (rt_bandwidth_enabled()) {
1270 struct rt_rq *rt_rq = &rq->rt;
1271
1272 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
1273 /*
1274 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
1275 * have our own CBS to keep us inline; only account when RT
1276 * bandwidth is relevant.
1724813d 1277 */
faa59937
JL
1278 if (sched_rt_bandwidth_account(rt_rq))
1279 rt_rq->rt_time += delta_exec;
1724813d
PZ
1280 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1281 }
aab03e05
DF
1282}
1283
209a0cbd
LA
1284static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1285{
1286 struct sched_dl_entity *dl_se = container_of(timer,
1287 struct sched_dl_entity,
1288 inactive_timer);
1289 struct task_struct *p = dl_task_of(dl_se);
1290 struct rq_flags rf;
1291 struct rq *rq;
1292
1293 rq = task_rq_lock(p, &rf);
1294
ecda2b66
JL
1295 sched_clock_tick();
1296 update_rq_clock(rq);
1297
209a0cbd 1298 if (!dl_task(p) || p->state == TASK_DEAD) {
387e3130
LA
1299 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1300
209a0cbd 1301 if (p->state == TASK_DEAD && dl_se->dl_non_contending) {
794a56eb
JL
1302 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1303 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
209a0cbd
LA
1304 dl_se->dl_non_contending = 0;
1305 }
387e3130
LA
1306
1307 raw_spin_lock(&dl_b->lock);
8c0944ce 1308 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
387e3130 1309 raw_spin_unlock(&dl_b->lock);
209a0cbd
LA
1310 __dl_clear_params(p);
1311
1312 goto unlock;
1313 }
1314 if (dl_se->dl_non_contending == 0)
1315 goto unlock;
1316
794a56eb 1317 sub_running_bw(dl_se, &rq->dl);
209a0cbd
LA
1318 dl_se->dl_non_contending = 0;
1319unlock:
1320 task_rq_unlock(rq, p, &rf);
1321 put_task_struct(p);
1322
1323 return HRTIMER_NORESTART;
1324}
1325
1326void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
1327{
1328 struct hrtimer *timer = &dl_se->inactive_timer;
1329
850377a8 1330 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
209a0cbd
LA
1331 timer->function = inactive_task_timer;
1332}
1333
1baca4ce
JL
1334#ifdef CONFIG_SMP
1335
1baca4ce
JL
1336static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1337{
1338 struct rq *rq = rq_of_dl_rq(dl_rq);
1339
1340 if (dl_rq->earliest_dl.curr == 0 ||
1341 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
1baca4ce 1342 dl_rq->earliest_dl.curr = deadline;
d8206bb3 1343 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1baca4ce
JL
1344 }
1345}
1346
1347static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1348{
1349 struct rq *rq = rq_of_dl_rq(dl_rq);
1350
1351 /*
1352 * Since we may have removed our earliest (and/or next earliest)
1353 * task we must recompute them.
1354 */
1355 if (!dl_rq->dl_nr_running) {
1356 dl_rq->earliest_dl.curr = 0;
1357 dl_rq->earliest_dl.next = 0;
d8206bb3 1358 cpudl_clear(&rq->rd->cpudl, rq->cpu);
1baca4ce 1359 } else {
2161573e 1360 struct rb_node *leftmost = dl_rq->root.rb_leftmost;
1baca4ce
JL
1361 struct sched_dl_entity *entry;
1362
1363 entry = rb_entry(leftmost, struct sched_dl_entity, rb_node);
1364 dl_rq->earliest_dl.curr = entry->deadline;
d8206bb3 1365 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1baca4ce
JL
1366 }
1367}
1368
1369#else
1370
1371static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1372static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1373
1374#endif /* CONFIG_SMP */
1375
1376static inline
1377void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1378{
1379 int prio = dl_task_of(dl_se)->prio;
1380 u64 deadline = dl_se->deadline;
1381
1382 WARN_ON(!dl_prio(prio));
1383 dl_rq->dl_nr_running++;
72465447 1384 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1385
1386 inc_dl_deadline(dl_rq, deadline);
1387 inc_dl_migration(dl_se, dl_rq);
1388}
1389
1390static inline
1391void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1392{
1393 int prio = dl_task_of(dl_se)->prio;
1394
1395 WARN_ON(!dl_prio(prio));
1396 WARN_ON(!dl_rq->dl_nr_running);
1397 dl_rq->dl_nr_running--;
72465447 1398 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1399
1400 dec_dl_deadline(dl_rq, dl_se->deadline);
1401 dec_dl_migration(dl_se, dl_rq);
1402}
1403
aab03e05
DF
1404static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1405{
1406 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
2161573e 1407 struct rb_node **link = &dl_rq->root.rb_root.rb_node;
aab03e05
DF
1408 struct rb_node *parent = NULL;
1409 struct sched_dl_entity *entry;
1410 int leftmost = 1;
1411
1412 BUG_ON(!RB_EMPTY_NODE(&dl_se->rb_node));
1413
1414 while (*link) {
1415 parent = *link;
1416 entry = rb_entry(parent, struct sched_dl_entity, rb_node);
1417 if (dl_time_before(dl_se->deadline, entry->deadline))
1418 link = &parent->rb_left;
1419 else {
1420 link = &parent->rb_right;
1421 leftmost = 0;
1422 }
1423 }
1424
aab03e05 1425 rb_link_node(&dl_se->rb_node, parent, link);
2161573e 1426 rb_insert_color_cached(&dl_se->rb_node, &dl_rq->root, leftmost);
aab03e05 1427
1baca4ce 1428 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1429}
1430
1431static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1432{
1433 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1434
1435 if (RB_EMPTY_NODE(&dl_se->rb_node))
1436 return;
1437
2161573e 1438 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
aab03e05
DF
1439 RB_CLEAR_NODE(&dl_se->rb_node);
1440
1baca4ce 1441 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1442}
1443
1444static void
2d3d891d
DF
1445enqueue_dl_entity(struct sched_dl_entity *dl_se,
1446 struct sched_dl_entity *pi_se, int flags)
aab03e05
DF
1447{
1448 BUG_ON(on_dl_rq(dl_se));
1449
1450 /*
1451 * If this is a wakeup or a new instance, the scheduling
1452 * parameters of the task might need updating. Otherwise,
1453 * we want a replenishment of its runtime.
1454 */
e36d8677 1455 if (flags & ENQUEUE_WAKEUP) {
8fd27231 1456 task_contending(dl_se, flags);
2d3d891d 1457 update_dl_entity(dl_se, pi_se);
e36d8677 1458 } else if (flags & ENQUEUE_REPLENISH) {
6a503c3b 1459 replenish_dl_entity(dl_se, pi_se);
295d6d5e
LA
1460 } else if ((flags & ENQUEUE_RESTORE) &&
1461 dl_time_before(dl_se->deadline,
1462 rq_clock(rq_of_dl_rq(dl_rq_of_se(dl_se))))) {
1463 setup_new_dl_entity(dl_se);
e36d8677 1464 }
aab03e05
DF
1465
1466 __enqueue_dl_entity(dl_se);
1467}
1468
1469static void dequeue_dl_entity(struct sched_dl_entity *dl_se)
1470{
1471 __dequeue_dl_entity(dl_se);
1472}
1473
1474static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1475{
2d3d891d
DF
1476 struct task_struct *pi_task = rt_mutex_get_top_task(p);
1477 struct sched_dl_entity *pi_se = &p->dl;
1478
1479 /*
193be41e
JF
1480 * Use the scheduling parameters of the top pi-waiter task if:
1481 * - we have a top pi-waiter which is a SCHED_DEADLINE task AND
1482 * - our dl_boosted is set (i.e. the pi-waiter's (absolute) deadline is
1483 * smaller than our deadline OR we are a !SCHED_DEADLINE task getting
1484 * boosted due to a SCHED_DEADLINE pi-waiter).
1485 * Otherwise we keep our runtime and deadline.
2d3d891d 1486 */
193be41e 1487 if (pi_task && dl_prio(pi_task->normal_prio) && p->dl.dl_boosted) {
2d3d891d 1488 pi_se = &pi_task->dl;
64be6f1f
JL
1489 } else if (!dl_prio(p->normal_prio)) {
1490 /*
1491 * Special case in which we have a !SCHED_DEADLINE task
193be41e 1492 * that is going to be deboosted, but exceeds its
64be6f1f
JL
1493 * runtime while doing so. No point in replenishing
1494 * it, as it's going to return back to its original
1495 * scheduling class after this.
1496 */
1497 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
1498 return;
1499 }
2d3d891d 1500
df8eac8c
DBO
1501 /*
1502 * Check if a constrained deadline task was activated
1503 * after the deadline but before the next period.
1504 * If that is the case, the task will be throttled and
1505 * the replenishment timer will be set to the next period.
1506 */
3effcb42 1507 if (!p->dl.dl_throttled && !dl_is_implicit(&p->dl))
df8eac8c
DBO
1508 dl_check_constrained_dl(&p->dl);
1509
8fd27231 1510 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & ENQUEUE_RESTORE) {
794a56eb
JL
1511 add_rq_bw(&p->dl, &rq->dl);
1512 add_running_bw(&p->dl, &rq->dl);
8fd27231 1513 }
e36d8677 1514
aab03e05 1515 /*
e36d8677 1516 * If p is throttled, we do not enqueue it. In fact, if it exhausted
aab03e05
DF
1517 * its budget it needs a replenishment and, since it now is on
1518 * its rq, the bandwidth timer callback (which clearly has not
1519 * run yet) will take care of this.
e36d8677
LA
1520 * However, the active utilization does not depend on the fact
1521 * that the task is on the runqueue or not (but depends on the
1522 * task's state - in GRUB parlance, "inactive" vs "active contending").
1523 * In other words, even if a task is throttled its utilization must
1524 * be counted in the active utilization; hence, we need to call
1525 * add_running_bw().
aab03e05 1526 */
e36d8677 1527 if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
209a0cbd 1528 if (flags & ENQUEUE_WAKEUP)
8fd27231 1529 task_contending(&p->dl, flags);
209a0cbd 1530
aab03e05 1531 return;
e36d8677 1532 }
aab03e05 1533
2d3d891d 1534 enqueue_dl_entity(&p->dl, pi_se, flags);
1baca4ce 1535
4b53a341 1536 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1baca4ce 1537 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1538}
1539
1540static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1541{
1542 dequeue_dl_entity(&p->dl);
1baca4ce 1543 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
1544}
1545
1546static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1547{
1548 update_curr_dl(rq);
1549 __dequeue_task_dl(rq, p, flags);
e36d8677 1550
8fd27231 1551 if (p->on_rq == TASK_ON_RQ_MIGRATING || flags & DEQUEUE_SAVE) {
794a56eb
JL
1552 sub_running_bw(&p->dl, &rq->dl);
1553 sub_rq_bw(&p->dl, &rq->dl);
8fd27231 1554 }
e36d8677
LA
1555
1556 /*
209a0cbd
LA
1557 * This check allows to start the inactive timer (or to immediately
1558 * decrease the active utilization, if needed) in two cases:
e36d8677
LA
1559 * when the task blocks and when it is terminating
1560 * (p->state == TASK_DEAD). We can handle the two cases in the same
1561 * way, because from GRUB's point of view the same thing is happening
1562 * (the task moves from "active contending" to "active non contending"
1563 * or "inactive")
1564 */
1565 if (flags & DEQUEUE_SLEEP)
209a0cbd 1566 task_non_contending(p);
aab03e05
DF
1567}
1568
1569/*
1570 * Yield task semantic for -deadline tasks is:
1571 *
1572 * get off from the CPU until our next instance, with
1573 * a new runtime. This is of little use now, since we
1574 * don't have a bandwidth reclaiming mechanism. Anyway,
1575 * bandwidth reclaiming is planned for the future, and
1576 * yield_task_dl will indicate that some spare budget
1577 * is available for other task instances to use it.
1578 */
1579static void yield_task_dl(struct rq *rq)
1580{
aab03e05
DF
1581 /*
1582 * We make the task go to sleep until its current deadline by
1583 * forcing its runtime to zero. This way, update_curr_dl() stops
1584 * it and the bandwidth timer will wake it up and will give it
5bfd126e 1585 * new scheduling parameters (thanks to dl_yielded=1).
aab03e05 1586 */
48be3a67
PZ
1587 rq->curr->dl.dl_yielded = 1;
1588
6f1607f1 1589 update_rq_clock(rq);
aab03e05 1590 update_curr_dl(rq);
44fb085b
WL
1591 /*
1592 * Tell update_rq_clock() that we've just updated,
1593 * so we don't do microscopic update in schedule()
1594 * and double the fastpath cost.
1595 */
adcc8da8 1596 rq_clock_skip_update(rq);
aab03e05
DF
1597}
1598
1baca4ce
JL
1599#ifdef CONFIG_SMP
1600
1601static int find_later_rq(struct task_struct *task);
1baca4ce
JL
1602
1603static int
1604select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
1605{
1606 struct task_struct *curr;
1607 struct rq *rq;
1608
1d7e974c 1609 if (sd_flag != SD_BALANCE_WAKE)
1baca4ce
JL
1610 goto out;
1611
1612 rq = cpu_rq(cpu);
1613
1614 rcu_read_lock();
316c1608 1615 curr = READ_ONCE(rq->curr); /* unlocked access */
1baca4ce
JL
1616
1617 /*
1618 * If we are dealing with a -deadline task, we must
1619 * decide where to wake it up.
1620 * If it has a later deadline and the current task
1621 * on this rq can't move (provided the waking task
1622 * can!) we prefer to send it somewhere else. On the
1623 * other hand, if it has a shorter deadline, we
1624 * try to make it stay here, it might be important.
1625 */
1626 if (unlikely(dl_task(curr)) &&
4b53a341 1627 (curr->nr_cpus_allowed < 2 ||
1baca4ce 1628 !dl_entity_preempt(&p->dl, &curr->dl)) &&
4b53a341 1629 (p->nr_cpus_allowed > 1)) {
1baca4ce
JL
1630 int target = find_later_rq(p);
1631
9d514262 1632 if (target != -1 &&
5aa50507
LA
1633 (dl_time_before(p->dl.deadline,
1634 cpu_rq(target)->dl.earliest_dl.curr) ||
1635 (cpu_rq(target)->dl.dl_nr_running == 0)))
1baca4ce
JL
1636 cpu = target;
1637 }
1638 rcu_read_unlock();
1639
1640out:
1641 return cpu;
1642}
1643
1327237a 1644static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
209a0cbd
LA
1645{
1646 struct rq *rq;
1647
8fd27231 1648 if (p->state != TASK_WAKING)
209a0cbd
LA
1649 return;
1650
1651 rq = task_rq(p);
1652 /*
1653 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1654 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1655 * rq->lock is not... So, lock it
1656 */
1657 raw_spin_lock(&rq->lock);
8fd27231 1658 if (p->dl.dl_non_contending) {
794a56eb 1659 sub_running_bw(&p->dl, &rq->dl);
8fd27231
LA
1660 p->dl.dl_non_contending = 0;
1661 /*
1662 * If the timer handler is currently running and the
1663 * timer cannot be cancelled, inactive_task_timer()
1664 * will see that dl_not_contending is not set, and
1665 * will not touch the rq's active utilization,
1666 * so we are still safe.
1667 */
1668 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1669 put_task_struct(p);
1670 }
794a56eb 1671 sub_rq_bw(&p->dl, &rq->dl);
209a0cbd
LA
1672 raw_spin_unlock(&rq->lock);
1673}
1674
1baca4ce
JL
1675static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1676{
1677 /*
1678 * Current can't be migrated, useless to reschedule,
1679 * let's hope p can move out.
1680 */
4b53a341 1681 if (rq->curr->nr_cpus_allowed == 1 ||
3261ed0b 1682 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1baca4ce
JL
1683 return;
1684
1685 /*
1686 * p is migratable, so let's not schedule it and
1687 * see if it is pushed or pulled somewhere else.
1688 */
4b53a341 1689 if (p->nr_cpus_allowed != 1 &&
3261ed0b 1690 cpudl_find(&rq->rd->cpudl, p, NULL))
1baca4ce
JL
1691 return;
1692
8875125e 1693 resched_curr(rq);
1baca4ce
JL
1694}
1695
6e2df058
PZ
1696static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1697{
1698 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1699 /*
1700 * This is OK, because current is on_cpu, which avoids it being
1701 * picked for load-balance and preemption/IRQs are still
1702 * disabled avoiding further scheduler activity on it and we've
1703 * not yet started the picking loop.
1704 */
1705 rq_unpin_lock(rq, rf);
1706 pull_dl_task(rq);
1707 rq_repin_lock(rq, rf);
1708 }
1709
1710 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1711}
1baca4ce
JL
1712#endif /* CONFIG_SMP */
1713
aab03e05
DF
1714/*
1715 * Only called when both the current and waking task are -deadline
1716 * tasks.
1717 */
1718static void check_preempt_curr_dl(struct rq *rq, struct task_struct *p,
1719 int flags)
1720{
1baca4ce 1721 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
8875125e 1722 resched_curr(rq);
1baca4ce
JL
1723 return;
1724 }
1725
1726#ifdef CONFIG_SMP
1727 /*
1728 * In the unlikely case current and p have the same deadline
1729 * let us try to decide what's the best thing to do...
1730 */
332ac17e
DF
1731 if ((p->dl.deadline == rq->curr->dl.deadline) &&
1732 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
1733 check_preempt_equal_dl(rq, p);
1734#endif /* CONFIG_SMP */
aab03e05
DF
1735}
1736
1737#ifdef CONFIG_SCHED_HRTICK
1738static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1739{
177ef2a6 1740 hrtick_start(rq, p->dl.runtime);
aab03e05 1741}
36ce9881
WL
1742#else /* !CONFIG_SCHED_HRTICK */
1743static void start_hrtick_dl(struct rq *rq, struct task_struct *p)
1744{
1745}
aab03e05
DF
1746#endif
1747
a0e813f2 1748static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
ff1cdc94
MS
1749{
1750 p->se.exec_start = rq_clock_task(rq);
1751
1752 /* You can't push away the running task */
1753 dequeue_pushable_dl_task(rq, p);
f95d4eae 1754
a0e813f2
PZ
1755 if (!first)
1756 return;
1757
f95d4eae
PZ
1758 if (hrtick_enabled(rq))
1759 start_hrtick_dl(rq, p);
1760
1761 if (rq->curr->sched_class != &dl_sched_class)
1762 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1763
1764 deadline_queue_push_tasks(rq);
ff1cdc94
MS
1765}
1766
aab03e05
DF
1767static struct sched_dl_entity *pick_next_dl_entity(struct rq *rq,
1768 struct dl_rq *dl_rq)
1769{
2161573e 1770 struct rb_node *left = rb_first_cached(&dl_rq->root);
aab03e05
DF
1771
1772 if (!left)
1773 return NULL;
1774
1775 return rb_entry(left, struct sched_dl_entity, rb_node);
1776}
1777
98c2f700 1778static struct task_struct *pick_next_task_dl(struct rq *rq)
aab03e05
DF
1779{
1780 struct sched_dl_entity *dl_se;
6e2df058 1781 struct dl_rq *dl_rq = &rq->dl;
aab03e05 1782 struct task_struct *p;
aab03e05 1783
6e2df058 1784 if (!sched_dl_runnable(rq))
aab03e05
DF
1785 return NULL;
1786
1787 dl_se = pick_next_dl_entity(rq, dl_rq);
1788 BUG_ON(!dl_se);
aab03e05 1789 p = dl_task_of(dl_se);
a0e813f2 1790 set_next_task_dl(rq, p, true);
aab03e05
DF
1791 return p;
1792}
1793
6e2df058 1794static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
aab03e05
DF
1795{
1796 update_curr_dl(rq);
1baca4ce 1797
23127296 1798 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
4b53a341 1799 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1baca4ce 1800 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1801}
1802
d84b3131
FW
1803/*
1804 * scheduler tick hitting a task of our scheduling class.
1805 *
1806 * NOTE: This function can be called remotely by the tick offload that
1807 * goes along full dynticks. Therefore no local assumption can be made
1808 * and everything must be accessed through the @rq and @curr passed in
1809 * parameters.
1810 */
aab03e05
DF
1811static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
1812{
1813 update_curr_dl(rq);
1814
23127296 1815 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
a7bebf48
WL
1816 /*
1817 * Even when we have runtime, update_curr_dl() might have resulted in us
1818 * not being the leftmost task anymore. In that case NEED_RESCHED will
1819 * be set and schedule() will start a new hrtick for the next task.
1820 */
1821 if (hrtick_enabled(rq) && queued && p->dl.runtime > 0 &&
1822 is_leftmost(p, &rq->dl))
aab03e05 1823 start_hrtick_dl(rq, p);
aab03e05
DF
1824}
1825
1826static void task_fork_dl(struct task_struct *p)
1827{
1828 /*
1829 * SCHED_DEADLINE tasks cannot fork and this is achieved through
1830 * sched_fork()
1831 */
1832}
1833
1baca4ce
JL
1834#ifdef CONFIG_SMP
1835
1836/* Only try algorithms three times */
1837#define DL_MAX_TRIES 3
1838
1839static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
1840{
1841 if (!task_running(rq, p) &&
3bd37062 1842 cpumask_test_cpu(cpu, p->cpus_ptr))
1baca4ce 1843 return 1;
1baca4ce
JL
1844 return 0;
1845}
1846
8b5e770e
WL
1847/*
1848 * Return the earliest pushable rq's task, which is suitable to be executed
1849 * on the CPU, NULL otherwise:
1850 */
1851static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
1852{
2161573e 1853 struct rb_node *next_node = rq->dl.pushable_dl_tasks_root.rb_leftmost;
8b5e770e
WL
1854 struct task_struct *p = NULL;
1855
1856 if (!has_pushable_dl_tasks(rq))
1857 return NULL;
1858
1859next_node:
1860 if (next_node) {
1861 p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
1862
1863 if (pick_dl_task(rq, p, cpu))
1864 return p;
1865
1866 next_node = rb_next(next_node);
1867 goto next_node;
1868 }
1869
1870 return NULL;
1871}
1872
1baca4ce
JL
1873static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
1874
1875static int find_later_rq(struct task_struct *task)
1876{
1877 struct sched_domain *sd;
4ba29684 1878 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1baca4ce 1879 int this_cpu = smp_processor_id();
b18c3ca1 1880 int cpu = task_cpu(task);
1baca4ce
JL
1881
1882 /* Make sure the mask is initialized first */
1883 if (unlikely(!later_mask))
1884 return -1;
1885
4b53a341 1886 if (task->nr_cpus_allowed == 1)
1baca4ce
JL
1887 return -1;
1888
91ec6778
JL
1889 /*
1890 * We have to consider system topology and task affinity
97fb7a0a 1891 * first, then we can look for a suitable CPU.
91ec6778 1892 */
3261ed0b 1893 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1baca4ce
JL
1894 return -1;
1895
1896 /*
b18c3ca1
BP
1897 * If we are here, some targets have been found, including
1898 * the most suitable which is, among the runqueues where the
1899 * current tasks have later deadlines than the task's one, the
1900 * rq with the latest possible one.
1baca4ce
JL
1901 *
1902 * Now we check how well this matches with task's
1903 * affinity and system topology.
1904 *
97fb7a0a 1905 * The last CPU where the task run is our first
1baca4ce
JL
1906 * guess, since it is most likely cache-hot there.
1907 */
1908 if (cpumask_test_cpu(cpu, later_mask))
1909 return cpu;
1910 /*
1911 * Check if this_cpu is to be skipped (i.e., it is
1912 * not in the mask) or not.
1913 */
1914 if (!cpumask_test_cpu(this_cpu, later_mask))
1915 this_cpu = -1;
1916
1917 rcu_read_lock();
1918 for_each_domain(cpu, sd) {
1919 if (sd->flags & SD_WAKE_AFFINE) {
b18c3ca1 1920 int best_cpu;
1baca4ce
JL
1921
1922 /*
1923 * If possible, preempting this_cpu is
1924 * cheaper than migrating.
1925 */
1926 if (this_cpu != -1 &&
1927 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1928 rcu_read_unlock();
1929 return this_cpu;
1930 }
1931
b18c3ca1
BP
1932 best_cpu = cpumask_first_and(later_mask,
1933 sched_domain_span(sd));
1baca4ce 1934 /*
97fb7a0a 1935 * Last chance: if a CPU being in both later_mask
b18c3ca1 1936 * and current sd span is valid, that becomes our
97fb7a0a 1937 * choice. Of course, the latest possible CPU is
b18c3ca1 1938 * already under consideration through later_mask.
1baca4ce 1939 */
b18c3ca1 1940 if (best_cpu < nr_cpu_ids) {
1baca4ce
JL
1941 rcu_read_unlock();
1942 return best_cpu;
1943 }
1944 }
1945 }
1946 rcu_read_unlock();
1947
1948 /*
1949 * At this point, all our guesses failed, we just return
1950 * 'something', and let the caller sort the things out.
1951 */
1952 if (this_cpu != -1)
1953 return this_cpu;
1954
1955 cpu = cpumask_any(later_mask);
1956 if (cpu < nr_cpu_ids)
1957 return cpu;
1958
1959 return -1;
1960}
1961
1962/* Locks the rq it finds */
1963static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
1964{
1965 struct rq *later_rq = NULL;
1966 int tries;
1967 int cpu;
1968
1969 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
1970 cpu = find_later_rq(task);
1971
1972 if ((cpu == -1) || (cpu == rq->cpu))
1973 break;
1974
1975 later_rq = cpu_rq(cpu);
1976
5aa50507
LA
1977 if (later_rq->dl.dl_nr_running &&
1978 !dl_time_before(task->dl.deadline,
9d514262
WL
1979 later_rq->dl.earliest_dl.curr)) {
1980 /*
1981 * Target rq has tasks of equal or earlier deadline,
1982 * retrying does not release any lock and is unlikely
1983 * to yield a different result.
1984 */
1985 later_rq = NULL;
1986 break;
1987 }
1988
1baca4ce
JL
1989 /* Retry if something changed. */
1990 if (double_lock_balance(rq, later_rq)) {
1991 if (unlikely(task_rq(task) != rq ||
3bd37062 1992 !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
da0c1e65 1993 task_running(rq, task) ||
13b5ab02 1994 !dl_task(task) ||
da0c1e65 1995 !task_on_rq_queued(task))) {
1baca4ce
JL
1996 double_unlock_balance(rq, later_rq);
1997 later_rq = NULL;
1998 break;
1999 }
2000 }
2001
2002 /*
2003 * If the rq we found has no -deadline task, or
2004 * its earliest one has a later deadline than our
2005 * task, the rq is a good one.
2006 */
2007 if (!later_rq->dl.dl_nr_running ||
2008 dl_time_before(task->dl.deadline,
2009 later_rq->dl.earliest_dl.curr))
2010 break;
2011
2012 /* Otherwise we try again. */
2013 double_unlock_balance(rq, later_rq);
2014 later_rq = NULL;
2015 }
2016
2017 return later_rq;
2018}
2019
2020static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2021{
2022 struct task_struct *p;
2023
2024 if (!has_pushable_dl_tasks(rq))
2025 return NULL;
2026
2161573e 2027 p = rb_entry(rq->dl.pushable_dl_tasks_root.rb_leftmost,
1baca4ce
JL
2028 struct task_struct, pushable_dl_tasks);
2029
2030 BUG_ON(rq->cpu != task_cpu(p));
2031 BUG_ON(task_current(rq, p));
4b53a341 2032 BUG_ON(p->nr_cpus_allowed <= 1);
1baca4ce 2033
da0c1e65 2034 BUG_ON(!task_on_rq_queued(p));
1baca4ce
JL
2035 BUG_ON(!dl_task(p));
2036
2037 return p;
2038}
2039
2040/*
2041 * See if the non running -deadline tasks on this rq
2042 * can be sent to some other CPU where they can preempt
2043 * and start executing.
2044 */
2045static int push_dl_task(struct rq *rq)
2046{
2047 struct task_struct *next_task;
2048 struct rq *later_rq;
c51b8ab5 2049 int ret = 0;
1baca4ce
JL
2050
2051 if (!rq->dl.overloaded)
2052 return 0;
2053
2054 next_task = pick_next_pushable_dl_task(rq);
2055 if (!next_task)
2056 return 0;
2057
2058retry:
9ebc6053 2059 if (WARN_ON(next_task == rq->curr))
1baca4ce 2060 return 0;
1baca4ce
JL
2061
2062 /*
2063 * If next_task preempts rq->curr, and rq->curr
2064 * can move away, it makes sense to just reschedule
2065 * without going further in pushing next_task.
2066 */
2067 if (dl_task(rq->curr) &&
2068 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
4b53a341 2069 rq->curr->nr_cpus_allowed > 1) {
8875125e 2070 resched_curr(rq);
1baca4ce
JL
2071 return 0;
2072 }
2073
2074 /* We might release rq lock */
2075 get_task_struct(next_task);
2076
2077 /* Will lock the rq it'll find */
2078 later_rq = find_lock_later_rq(next_task, rq);
2079 if (!later_rq) {
2080 struct task_struct *task;
2081
2082 /*
2083 * We must check all this again, since
2084 * find_lock_later_rq releases rq->lock and it is
2085 * then possible that next_task has migrated.
2086 */
2087 task = pick_next_pushable_dl_task(rq);
a776b968 2088 if (task == next_task) {
1baca4ce
JL
2089 /*
2090 * The task is still there. We don't try
97fb7a0a 2091 * again, some other CPU will pull it when ready.
1baca4ce 2092 */
1baca4ce
JL
2093 goto out;
2094 }
2095
2096 if (!task)
2097 /* No more tasks */
2098 goto out;
2099
2100 put_task_struct(next_task);
2101 next_task = task;
2102 goto retry;
2103 }
2104
2105 deactivate_task(rq, next_task, 0);
2106 set_task_cpu(next_task, later_rq->cpu);
840d7196
DBO
2107
2108 /*
2109 * Update the later_rq clock here, because the clock is used
2110 * by the cpufreq_update_util() inside __add_running_bw().
2111 */
2112 update_rq_clock(later_rq);
840d7196 2113 activate_task(later_rq, next_task, ENQUEUE_NOCLOCK);
c51b8ab5 2114 ret = 1;
1baca4ce 2115
8875125e 2116 resched_curr(later_rq);
1baca4ce
JL
2117
2118 double_unlock_balance(rq, later_rq);
2119
2120out:
2121 put_task_struct(next_task);
2122
c51b8ab5 2123 return ret;
1baca4ce
JL
2124}
2125
2126static void push_dl_tasks(struct rq *rq)
2127{
4ffa08ed 2128 /* push_dl_task() will return true if it moved a -deadline task */
1baca4ce
JL
2129 while (push_dl_task(rq))
2130 ;
aab03e05
DF
2131}
2132
0ea60c20 2133static void pull_dl_task(struct rq *this_rq)
1baca4ce 2134{
0ea60c20 2135 int this_cpu = this_rq->cpu, cpu;
1baca4ce 2136 struct task_struct *p;
0ea60c20 2137 bool resched = false;
1baca4ce
JL
2138 struct rq *src_rq;
2139 u64 dmin = LONG_MAX;
2140
2141 if (likely(!dl_overloaded(this_rq)))
0ea60c20 2142 return;
1baca4ce
JL
2143
2144 /*
2145 * Match the barrier from dl_set_overloaded; this guarantees that if we
2146 * see overloaded we must also see the dlo_mask bit.
2147 */
2148 smp_rmb();
2149
2150 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2151 if (this_cpu == cpu)
2152 continue;
2153
2154 src_rq = cpu_rq(cpu);
2155
2156 /*
2157 * It looks racy, abd it is! However, as in sched_rt.c,
2158 * we are fine with this.
2159 */
2160 if (this_rq->dl.dl_nr_running &&
2161 dl_time_before(this_rq->dl.earliest_dl.curr,
2162 src_rq->dl.earliest_dl.next))
2163 continue;
2164
2165 /* Might drop this_rq->lock */
2166 double_lock_balance(this_rq, src_rq);
2167
2168 /*
2169 * If there are no more pullable tasks on the
2170 * rq, we're done with it.
2171 */
2172 if (src_rq->dl.dl_nr_running <= 1)
2173 goto skip;
2174
8b5e770e 2175 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1baca4ce
JL
2176
2177 /*
2178 * We found a task to be pulled if:
2179 * - it preempts our current (if there's one),
2180 * - it will preempt the last one we pulled (if any).
2181 */
2182 if (p && dl_time_before(p->dl.deadline, dmin) &&
2183 (!this_rq->dl.dl_nr_running ||
2184 dl_time_before(p->dl.deadline,
2185 this_rq->dl.earliest_dl.curr))) {
2186 WARN_ON(p == src_rq->curr);
da0c1e65 2187 WARN_ON(!task_on_rq_queued(p));
1baca4ce
JL
2188
2189 /*
2190 * Then we pull iff p has actually an earlier
2191 * deadline than the current task of its runqueue.
2192 */
2193 if (dl_time_before(p->dl.deadline,
2194 src_rq->curr->dl.deadline))
2195 goto skip;
2196
0ea60c20 2197 resched = true;
1baca4ce
JL
2198
2199 deactivate_task(src_rq, p, 0);
2200 set_task_cpu(p, this_cpu);
2201 activate_task(this_rq, p, 0);
2202 dmin = p->dl.deadline;
2203
2204 /* Is there any other task even earlier? */
2205 }
2206skip:
2207 double_unlock_balance(this_rq, src_rq);
2208 }
2209
0ea60c20
PZ
2210 if (resched)
2211 resched_curr(this_rq);
1baca4ce
JL
2212}
2213
2214/*
2215 * Since the task is not running and a reschedule is not going to happen
2216 * anytime soon on its runqueue, we try pushing it away now.
2217 */
2218static void task_woken_dl(struct rq *rq, struct task_struct *p)
2219{
2220 if (!task_running(rq, p) &&
2221 !test_tsk_need_resched(rq->curr) &&
4b53a341 2222 p->nr_cpus_allowed > 1 &&
1baca4ce 2223 dl_task(rq->curr) &&
4b53a341 2224 (rq->curr->nr_cpus_allowed < 2 ||
6b0a563f 2225 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1baca4ce
JL
2226 push_dl_tasks(rq);
2227 }
2228}
2229
2230static void set_cpus_allowed_dl(struct task_struct *p,
2231 const struct cpumask *new_mask)
2232{
7f51412a 2233 struct root_domain *src_rd;
6c37067e 2234 struct rq *rq;
1baca4ce
JL
2235
2236 BUG_ON(!dl_task(p));
2237
7f51412a
JL
2238 rq = task_rq(p);
2239 src_rd = rq->rd;
2240 /*
2241 * Migrating a SCHED_DEADLINE task between exclusive
2242 * cpusets (different root_domains) entails a bandwidth
2243 * update. We already made space for us in the destination
2244 * domain (see cpuset_can_attach()).
2245 */
2246 if (!cpumask_intersects(src_rd->span, new_mask)) {
2247 struct dl_bw *src_dl_b;
2248
2249 src_dl_b = dl_bw_of(cpu_of(rq));
2250 /*
2251 * We now free resources of the root_domain we are migrating
2252 * off. In the worst case, sched_setattr() may temporary fail
2253 * until we complete the update.
2254 */
2255 raw_spin_lock(&src_dl_b->lock);
8c0944ce 2256 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
7f51412a
JL
2257 raw_spin_unlock(&src_dl_b->lock);
2258 }
2259
6c37067e 2260 set_cpus_allowed_common(p, new_mask);
1baca4ce
JL
2261}
2262
2263/* Assumes rq->lock is held */
2264static void rq_online_dl(struct rq *rq)
2265{
2266 if (rq->dl.overloaded)
2267 dl_set_overload(rq);
6bfd6d72 2268
16b26943 2269 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
6bfd6d72 2270 if (rq->dl.dl_nr_running > 0)
d8206bb3 2271 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1baca4ce
JL
2272}
2273
2274/* Assumes rq->lock is held */
2275static void rq_offline_dl(struct rq *rq)
2276{
2277 if (rq->dl.overloaded)
2278 dl_clear_overload(rq);
6bfd6d72 2279
d8206bb3 2280 cpudl_clear(&rq->rd->cpudl, rq->cpu);
16b26943 2281 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1baca4ce
JL
2282}
2283
a6c0e746 2284void __init init_sched_dl_class(void)
1baca4ce
JL
2285{
2286 unsigned int i;
2287
2288 for_each_possible_cpu(i)
2289 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2290 GFP_KERNEL, cpu_to_node(i));
2291}
2292
f9a25f77
MP
2293void dl_add_task_root_domain(struct task_struct *p)
2294{
2295 struct rq_flags rf;
2296 struct rq *rq;
2297 struct dl_bw *dl_b;
2298
2299 rq = task_rq_lock(p, &rf);
2300 if (!dl_task(p))
2301 goto unlock;
2302
2303 dl_b = &rq->rd->dl_bw;
2304 raw_spin_lock(&dl_b->lock);
2305
2306 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2307
2308 raw_spin_unlock(&dl_b->lock);
2309
2310unlock:
2311 task_rq_unlock(rq, p, &rf);
2312}
2313
2314void dl_clear_root_domain(struct root_domain *rd)
2315{
2316 unsigned long flags;
2317
2318 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2319 rd->dl_bw.total_bw = 0;
2320 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2321}
2322
1baca4ce
JL
2323#endif /* CONFIG_SMP */
2324
aab03e05
DF
2325static void switched_from_dl(struct rq *rq, struct task_struct *p)
2326{
a649f237 2327 /*
209a0cbd
LA
2328 * task_non_contending() can start the "inactive timer" (if the 0-lag
2329 * time is in the future). If the task switches back to dl before
2330 * the "inactive timer" fires, it can continue to consume its current
2331 * runtime using its current deadline. If it stays outside of
2332 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2333 * will reset the task parameters.
a649f237 2334 */
209a0cbd
LA
2335 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2336 task_non_contending(p);
2337
e117cb52
JL
2338 if (!task_on_rq_queued(p)) {
2339 /*
2340 * Inactive timer is armed. However, p is leaving DEADLINE and
2341 * might migrate away from this rq while continuing to run on
2342 * some other class. We need to remove its contribution from
2343 * this rq running_bw now, or sub_rq_bw (below) will complain.
2344 */
2345 if (p->dl.dl_non_contending)
2346 sub_running_bw(&p->dl, &rq->dl);
794a56eb 2347 sub_rq_bw(&p->dl, &rq->dl);
e117cb52 2348 }
8fd27231 2349
209a0cbd
LA
2350 /*
2351 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2352 * at the 0-lag time, because the task could have been migrated
2353 * while SCHED_OTHER in the meanwhile.
2354 */
2355 if (p->dl.dl_non_contending)
2356 p->dl.dl_non_contending = 0;
a5e7be3b 2357
1baca4ce
JL
2358 /*
2359 * Since this might be the only -deadline task on the rq,
2360 * this is the right place to try to pull some other one
97fb7a0a 2361 * from an overloaded CPU, if any.
1baca4ce 2362 */
cd660911
WL
2363 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2364 return;
2365
02d8ec94 2366 deadline_queue_pull_task(rq);
aab03e05
DF
2367}
2368
1baca4ce
JL
2369/*
2370 * When switching to -deadline, we may overload the rq, then
2371 * we try to push someone off, if possible.
2372 */
aab03e05
DF
2373static void switched_to_dl(struct rq *rq, struct task_struct *p)
2374{
209a0cbd
LA
2375 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2376 put_task_struct(p);
98b0a857
JL
2377
2378 /* If p is not queued we will update its parameters at next wakeup. */
8fd27231 2379 if (!task_on_rq_queued(p)) {
794a56eb 2380 add_rq_bw(&p->dl, &rq->dl);
98b0a857 2381
8fd27231
LA
2382 return;
2383 }
72f9f3fd 2384
98b0a857 2385 if (rq->curr != p) {
1baca4ce 2386#ifdef CONFIG_SMP
4b53a341 2387 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
02d8ec94 2388 deadline_queue_push_tasks(rq);
619bd4a7 2389#endif
9916e214
PZ
2390 if (dl_task(rq->curr))
2391 check_preempt_curr_dl(rq, p, 0);
2392 else
2393 resched_curr(rq);
aab03e05
DF
2394 }
2395}
2396
1baca4ce
JL
2397/*
2398 * If the scheduling parameters of a -deadline task changed,
2399 * a push or pull operation might be needed.
2400 */
aab03e05
DF
2401static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2402 int oldprio)
2403{
da0c1e65 2404 if (task_on_rq_queued(p) || rq->curr == p) {
aab03e05 2405#ifdef CONFIG_SMP
1baca4ce
JL
2406 /*
2407 * This might be too much, but unfortunately
2408 * we don't have the old deadline value, and
2409 * we can't argue if the task is increasing
2410 * or lowering its prio, so...
2411 */
2412 if (!rq->dl.overloaded)
02d8ec94 2413 deadline_queue_pull_task(rq);
1baca4ce
JL
2414
2415 /*
2416 * If we now have a earlier deadline task than p,
2417 * then reschedule, provided p is still on this
2418 * runqueue.
2419 */
9916e214 2420 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
8875125e 2421 resched_curr(rq);
1baca4ce
JL
2422#else
2423 /*
2424 * Again, we don't know if p has a earlier
2425 * or later deadline, so let's blindly set a
2426 * (maybe not needed) rescheduling point.
2427 */
8875125e 2428 resched_curr(rq);
1baca4ce 2429#endif /* CONFIG_SMP */
801ccdbf 2430 }
aab03e05 2431}
aab03e05
DF
2432
2433const struct sched_class dl_sched_class = {
2434 .next = &rt_sched_class,
2435 .enqueue_task = enqueue_task_dl,
2436 .dequeue_task = dequeue_task_dl,
2437 .yield_task = yield_task_dl,
2438
2439 .check_preempt_curr = check_preempt_curr_dl,
2440
2441 .pick_next_task = pick_next_task_dl,
2442 .put_prev_task = put_prev_task_dl,
03b7fad1 2443 .set_next_task = set_next_task_dl,
aab03e05
DF
2444
2445#ifdef CONFIG_SMP
6e2df058 2446 .balance = balance_dl,
aab03e05 2447 .select_task_rq = select_task_rq_dl,
209a0cbd 2448 .migrate_task_rq = migrate_task_rq_dl,
1baca4ce
JL
2449 .set_cpus_allowed = set_cpus_allowed_dl,
2450 .rq_online = rq_online_dl,
2451 .rq_offline = rq_offline_dl,
1baca4ce 2452 .task_woken = task_woken_dl,
aab03e05
DF
2453#endif
2454
aab03e05
DF
2455 .task_tick = task_tick_dl,
2456 .task_fork = task_fork_dl,
aab03e05
DF
2457
2458 .prio_changed = prio_changed_dl,
2459 .switched_from = switched_from_dl,
2460 .switched_to = switched_to_dl,
6e998916
SG
2461
2462 .update_curr = update_curr_dl,
aab03e05 2463};
acb32132 2464
06a76fe0
NP
2465int sched_dl_global_validate(void)
2466{
2467 u64 runtime = global_rt_runtime();
2468 u64 period = global_rt_period();
2469 u64 new_bw = to_ratio(period, runtime);
2470 struct dl_bw *dl_b;
2471 int cpu, ret = 0;
2472 unsigned long flags;
2473
2474 /*
2475 * Here we want to check the bandwidth not being set to some
2476 * value smaller than the currently allocated bandwidth in
2477 * any of the root_domains.
2478 *
2479 * FIXME: Cycling on all the CPUs is overdoing, but simpler than
2480 * cycling on root_domains... Discussion on different/better
2481 * solutions is welcome!
2482 */
2483 for_each_possible_cpu(cpu) {
2484 rcu_read_lock_sched();
2485 dl_b = dl_bw_of(cpu);
2486
2487 raw_spin_lock_irqsave(&dl_b->lock, flags);
2488 if (new_bw < dl_b->total_bw)
2489 ret = -EBUSY;
2490 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2491
2492 rcu_read_unlock_sched();
2493
2494 if (ret)
2495 break;
2496 }
2497
2498 return ret;
2499}
2500
ba4f7bc1 2501static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
06a76fe0
NP
2502{
2503 if (global_rt_runtime() == RUNTIME_INF) {
2504 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
2505 dl_rq->extra_bw = 1 << BW_SHIFT;
2506 } else {
2507 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2508 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
2509 dl_rq->extra_bw = to_ratio(global_rt_period(),
2510 global_rt_runtime());
2511 }
2512}
2513
2514void sched_dl_do_global(void)
2515{
2516 u64 new_bw = -1;
2517 struct dl_bw *dl_b;
2518 int cpu;
2519 unsigned long flags;
2520
2521 def_dl_bandwidth.dl_period = global_rt_period();
2522 def_dl_bandwidth.dl_runtime = global_rt_runtime();
2523
2524 if (global_rt_runtime() != RUNTIME_INF)
2525 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2526
2527 /*
2528 * FIXME: As above...
2529 */
2530 for_each_possible_cpu(cpu) {
2531 rcu_read_lock_sched();
2532 dl_b = dl_bw_of(cpu);
2533
2534 raw_spin_lock_irqsave(&dl_b->lock, flags);
2535 dl_b->bw = new_bw;
2536 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2537
2538 rcu_read_unlock_sched();
2539 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2540 }
2541}
2542
2543/*
2544 * We must be sure that accepting a new task (or allowing changing the
2545 * parameters of an existing one) is consistent with the bandwidth
2546 * constraints. If yes, this function also accordingly updates the currently
2547 * allocated bandwidth to reflect the new situation.
2548 *
2549 * This function is called while holding p's rq->lock.
2550 */
2551int sched_dl_overflow(struct task_struct *p, int policy,
2552 const struct sched_attr *attr)
2553{
2554 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
2555 u64 period = attr->sched_period ?: attr->sched_deadline;
2556 u64 runtime = attr->sched_runtime;
2557 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
2558 int cpus, err = -1;
2559
794a56eb
JL
2560 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2561 return 0;
2562
06a76fe0
NP
2563 /* !deadline task may carry old deadline bandwidth */
2564 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2565 return 0;
2566
2567 /*
2568 * Either if a task, enters, leave, or stays -deadline but changes
2569 * its parameters, we may need to update accordingly the total
2570 * allocated bandwidth of the container.
2571 */
2572 raw_spin_lock(&dl_b->lock);
2573 cpus = dl_bw_cpus(task_cpu(p));
2574 if (dl_policy(policy) && !task_has_dl_policy(p) &&
2575 !__dl_overflow(dl_b, cpus, 0, new_bw)) {
2576 if (hrtimer_active(&p->dl.inactive_timer))
8c0944ce 2577 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
2578 __dl_add(dl_b, new_bw, cpus);
2579 err = 0;
2580 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
2581 !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
2582 /*
2583 * XXX this is slightly incorrect: when the task
2584 * utilization decreases, we should delay the total
2585 * utilization change until the task's 0-lag point.
2586 * But this would require to set the task's "inactive
2587 * timer" when the task is not inactive.
2588 */
8c0944ce 2589 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
2590 __dl_add(dl_b, new_bw, cpus);
2591 dl_change_utilization(p, new_bw);
2592 err = 0;
2593 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2594 /*
2595 * Do not decrease the total deadline utilization here,
2596 * switched_from_dl() will take care to do it at the correct
2597 * (0-lag) time.
2598 */
2599 err = 0;
2600 }
2601 raw_spin_unlock(&dl_b->lock);
2602
2603 return err;
2604}
2605
2606/*
2607 * This function initializes the sched_dl_entity of a newly becoming
2608 * SCHED_DEADLINE task.
2609 *
2610 * Only the static values are considered here, the actual runtime and the
2611 * absolute deadline will be properly calculated when the task is enqueued
2612 * for the first time with its new policy.
2613 */
2614void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
2615{
2616 struct sched_dl_entity *dl_se = &p->dl;
2617
2618 dl_se->dl_runtime = attr->sched_runtime;
2619 dl_se->dl_deadline = attr->sched_deadline;
2620 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
2621 dl_se->flags = attr->sched_flags;
2622 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
2623 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
2624}
2625
2626void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
2627{
2628 struct sched_dl_entity *dl_se = &p->dl;
2629
2630 attr->sched_priority = p->rt_priority;
2631 attr->sched_runtime = dl_se->dl_runtime;
2632 attr->sched_deadline = dl_se->dl_deadline;
2633 attr->sched_period = dl_se->dl_period;
2634 attr->sched_flags = dl_se->flags;
2635}
2636
2637/*
2638 * This function validates the new parameters of a -deadline task.
2639 * We ask for the deadline not being zero, and greater or equal
2640 * than the runtime, as well as the period of being zero or
2641 * greater than deadline. Furthermore, we have to be sure that
2642 * user parameters are above the internal resolution of 1us (we
2643 * check sched_runtime only since it is always the smaller one) and
2644 * below 2^63 ns (we have to check both sched_deadline and
2645 * sched_period, as the latter can be zero).
2646 */
2647bool __checkparam_dl(const struct sched_attr *attr)
2648{
794a56eb
JL
2649 /* special dl tasks don't actually use any parameter */
2650 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2651 return true;
2652
06a76fe0
NP
2653 /* deadline != 0 */
2654 if (attr->sched_deadline == 0)
2655 return false;
2656
2657 /*
2658 * Since we truncate DL_SCALE bits, make sure we're at least
2659 * that big.
2660 */
2661 if (attr->sched_runtime < (1ULL << DL_SCALE))
2662 return false;
2663
2664 /*
2665 * Since we use the MSB for wrap-around and sign issues, make
2666 * sure it's not set (mind that period can be equal to zero).
2667 */
2668 if (attr->sched_deadline & (1ULL << 63) ||
2669 attr->sched_period & (1ULL << 63))
2670 return false;
2671
2672 /* runtime <= deadline <= period (if period != 0) */
2673 if ((attr->sched_period != 0 &&
2674 attr->sched_period < attr->sched_deadline) ||
2675 attr->sched_deadline < attr->sched_runtime)
2676 return false;
2677
2678 return true;
2679}
2680
2681/*
2682 * This function clears the sched_dl_entity static params.
2683 */
2684void __dl_clear_params(struct task_struct *p)
2685{
2686 struct sched_dl_entity *dl_se = &p->dl;
2687
97fb7a0a
IM
2688 dl_se->dl_runtime = 0;
2689 dl_se->dl_deadline = 0;
2690 dl_se->dl_period = 0;
2691 dl_se->flags = 0;
2692 dl_se->dl_bw = 0;
2693 dl_se->dl_density = 0;
06a76fe0 2694
97fb7a0a
IM
2695 dl_se->dl_throttled = 0;
2696 dl_se->dl_yielded = 0;
2697 dl_se->dl_non_contending = 0;
2698 dl_se->dl_overrun = 0;
06a76fe0
NP
2699}
2700
2701bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
2702{
2703 struct sched_dl_entity *dl_se = &p->dl;
2704
2705 if (dl_se->dl_runtime != attr->sched_runtime ||
2706 dl_se->dl_deadline != attr->sched_deadline ||
2707 dl_se->dl_period != attr->sched_period ||
2708 dl_se->flags != attr->sched_flags)
2709 return true;
2710
2711 return false;
2712}
2713
2714#ifdef CONFIG_SMP
2715int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed)
2716{
97fb7a0a 2717 unsigned int dest_cpu;
06a76fe0
NP
2718 struct dl_bw *dl_b;
2719 bool overflow;
2720 int cpus, ret;
2721 unsigned long flags;
2722
97fb7a0a
IM
2723 dest_cpu = cpumask_any_and(cpu_active_mask, cs_cpus_allowed);
2724
06a76fe0
NP
2725 rcu_read_lock_sched();
2726 dl_b = dl_bw_of(dest_cpu);
2727 raw_spin_lock_irqsave(&dl_b->lock, flags);
2728 cpus = dl_bw_cpus(dest_cpu);
2729 overflow = __dl_overflow(dl_b, cpus, 0, p->dl.dl_bw);
97fb7a0a 2730 if (overflow) {
06a76fe0 2731 ret = -EBUSY;
97fb7a0a 2732 } else {
06a76fe0
NP
2733 /*
2734 * We reserve space for this task in the destination
2735 * root_domain, as we can't fail after this point.
2736 * We will free resources in the source root_domain
2737 * later on (see set_cpus_allowed_dl()).
2738 */
2739 __dl_add(dl_b, p->dl.dl_bw, cpus);
2740 ret = 0;
2741 }
2742 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2743 rcu_read_unlock_sched();
97fb7a0a 2744
06a76fe0
NP
2745 return ret;
2746}
2747
2748int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
2749 const struct cpumask *trial)
2750{
2751 int ret = 1, trial_cpus;
2752 struct dl_bw *cur_dl_b;
2753 unsigned long flags;
2754
2755 rcu_read_lock_sched();
2756 cur_dl_b = dl_bw_of(cpumask_any(cur));
2757 trial_cpus = cpumask_weight(trial);
2758
2759 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
2760 if (cur_dl_b->bw != -1 &&
2761 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
2762 ret = 0;
2763 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
2764 rcu_read_unlock_sched();
97fb7a0a 2765
06a76fe0
NP
2766 return ret;
2767}
2768
2769bool dl_cpu_busy(unsigned int cpu)
2770{
2771 unsigned long flags;
2772 struct dl_bw *dl_b;
2773 bool overflow;
2774 int cpus;
2775
2776 rcu_read_lock_sched();
2777 dl_b = dl_bw_of(cpu);
2778 raw_spin_lock_irqsave(&dl_b->lock, flags);
2779 cpus = dl_bw_cpus(cpu);
2780 overflow = __dl_overflow(dl_b, cpus, 0, 0);
2781 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2782 rcu_read_unlock_sched();
97fb7a0a 2783
06a76fe0
NP
2784 return overflow;
2785}
2786#endif
2787
acb32132 2788#ifdef CONFIG_SCHED_DEBUG
acb32132
WL
2789void print_dl_stats(struct seq_file *m, int cpu)
2790{
2791 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
2792}
2793#endif /* CONFIG_SCHED_DEBUG */