Merge tag 'input-for-v6.10-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / kernel / sched / deadline.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
aab03e05
DF
2/*
3 * Deadline Scheduling Class (SCHED_DEADLINE)
4 *
5 * Earliest Deadline First (EDF) + Constant Bandwidth Server (CBS).
6 *
7 * Tasks that periodically executes their instances for less than their
8 * runtime won't miss any of their deadlines.
9 * Tasks that are not periodic or sporadic or that tries to execute more
10 * than their reserved bandwidth will be slowed down (and may potentially
11 * miss some of their deadlines), and won't affect any other task.
12 *
13 * Copyright (C) 2012 Dario Faggioli <raistlin@linux.it>,
1baca4ce 14 * Juri Lelli <juri.lelli@gmail.com>,
aab03e05
DF
15 * Michael Trimarchi <michael@amarulasolutions.com>,
16 * Fabio Checconi <fchecconi@gmail.com>
17 */
aab03e05 18
6c24849f
JL
19#include <linux/cpuset.h>
20
84227c12
ZN
21/*
22 * Default limits for DL period; on the top end we guard against small util
23 * tasks still getting ridiculously long effective runtimes, on the bottom end we
24 * guard against timer DoS.
25 */
26static unsigned int sysctl_sched_dl_period_max = 1 << 22; /* ~4 seconds */
27static unsigned int sysctl_sched_dl_period_min = 100; /* 100 us */
28#ifdef CONFIG_SYSCTL
29static struct ctl_table sched_dl_sysctls[] = {
30 {
31 .procname = "sched_deadline_period_max_us",
32 .data = &sysctl_sched_dl_period_max,
33 .maxlen = sizeof(unsigned int),
34 .mode = 0644,
2ed81e76
YD
35 .proc_handler = proc_douintvec_minmax,
36 .extra1 = (void *)&sysctl_sched_dl_period_min,
84227c12
ZN
37 },
38 {
39 .procname = "sched_deadline_period_min_us",
40 .data = &sysctl_sched_dl_period_min,
41 .maxlen = sizeof(unsigned int),
42 .mode = 0644,
2ed81e76
YD
43 .proc_handler = proc_douintvec_minmax,
44 .extra2 = (void *)&sysctl_sched_dl_period_max,
84227c12 45 },
84227c12
ZN
46};
47
48static int __init sched_dl_sysctl_init(void)
49{
50 register_sysctl_init("kernel", sched_dl_sysctls);
51 return 0;
52}
53late_initcall(sched_dl_sysctl_init);
54#endif
55
63ba8422
PZ
56static bool dl_server(struct sched_dl_entity *dl_se)
57{
58 return dl_se->dl_server;
59}
60
aab03e05
DF
61static inline struct task_struct *dl_task_of(struct sched_dl_entity *dl_se)
62{
63ba8422 63 BUG_ON(dl_server(dl_se));
aab03e05
DF
64 return container_of(dl_se, struct task_struct, dl);
65}
66
67static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq)
68{
69 return container_of(dl_rq, struct rq, dl);
70}
71
63ba8422 72static inline struct rq *rq_of_dl_se(struct sched_dl_entity *dl_se)
aab03e05 73{
63ba8422
PZ
74 struct rq *rq = dl_se->rq;
75
76 if (!dl_server(dl_se))
77 rq = task_rq(dl_task_of(dl_se));
aab03e05 78
63ba8422
PZ
79 return rq;
80}
81
82static inline struct dl_rq *dl_rq_of_se(struct sched_dl_entity *dl_se)
83{
84 return &rq_of_dl_se(dl_se)->dl;
aab03e05
DF
85}
86
87static inline int on_dl_rq(struct sched_dl_entity *dl_se)
88{
89 return !RB_EMPTY_NODE(&dl_se->rb_node);
90}
91
2279f540
JL
92#ifdef CONFIG_RT_MUTEXES
93static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
94{
95 return dl_se->pi_se;
96}
97
98static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
99{
100 return pi_of(dl_se) != dl_se;
101}
102#else
103static inline struct sched_dl_entity *pi_of(struct sched_dl_entity *dl_se)
104{
105 return dl_se;
106}
107
108static inline bool is_dl_boosted(struct sched_dl_entity *dl_se)
109{
110 return false;
111}
112#endif
113
06a76fe0
NP
114#ifdef CONFIG_SMP
115static inline struct dl_bw *dl_bw_of(int i)
116{
117 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
118 "sched RCU must be held");
119 return &cpu_rq(i)->rd->dl_bw;
120}
121
122static inline int dl_bw_cpus(int i)
123{
124 struct root_domain *rd = cpu_rq(i)->rd;
c81b8932 125 int cpus;
06a76fe0
NP
126
127 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
128 "sched RCU must be held");
c81b8932
DE
129
130 if (cpumask_subset(rd->span, cpu_active_mask))
131 return cpumask_weight(rd->span);
132
133 cpus = 0;
134
06a76fe0
NP
135 for_each_cpu_and(i, rd->span, cpu_active_mask)
136 cpus++;
137
138 return cpus;
139}
fc9dc698 140
6092478b 141static inline unsigned long __dl_bw_capacity(const struct cpumask *mask)
fc9dc698 142{
fc9dc698 143 unsigned long cap = 0;
6092478b 144 int i;
fc9dc698 145
6092478b 146 for_each_cpu_and(i, mask, cpu_active_mask)
7bc26384 147 cap += arch_scale_cpu_capacity(i);
fc9dc698
DE
148
149 return cap;
150}
151
152/*
153 * XXX Fix: If 'rq->rd == def_root_domain' perform AC against capacity
154 * of the CPU the task is running on rather rd's \Sum CPU capacity.
155 */
156static inline unsigned long dl_bw_capacity(int i)
157{
740cf8a7 158 if (!sched_asym_cpucap_active() &&
7bc26384 159 arch_scale_cpu_capacity(i) == SCHED_CAPACITY_SCALE) {
fc9dc698
DE
160 return dl_bw_cpus(i) << SCHED_CAPACITY_SHIFT;
161 } else {
6092478b
DE
162 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
163 "sched RCU must be held");
164
165 return __dl_bw_capacity(cpu_rq(i)->rd->span);
fc9dc698
DE
166 }
167}
26762423
PL
168
169static inline bool dl_bw_visited(int cpu, u64 gen)
170{
171 struct root_domain *rd = cpu_rq(cpu)->rd;
172
173 if (rd->visit_gen == gen)
174 return true;
175
176 rd->visit_gen = gen;
177 return false;
178}
f1304ecb
DE
179
180static inline
181void __dl_update(struct dl_bw *dl_b, s64 bw)
182{
183 struct root_domain *rd = container_of(dl_b, struct root_domain, dl_bw);
184 int i;
185
186 RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held(),
187 "sched RCU must be held");
188 for_each_cpu_and(i, rd->span, cpu_active_mask) {
189 struct rq *rq = cpu_rq(i);
190
191 rq->dl.extra_bw += bw;
192 }
193}
06a76fe0
NP
194#else
195static inline struct dl_bw *dl_bw_of(int i)
196{
197 return &cpu_rq(i)->dl.dl_bw;
198}
199
200static inline int dl_bw_cpus(int i)
201{
202 return 1;
203}
fc9dc698
DE
204
205static inline unsigned long dl_bw_capacity(int i)
206{
207 return SCHED_CAPACITY_SCALE;
208}
26762423
PL
209
210static inline bool dl_bw_visited(int cpu, u64 gen)
211{
212 return false;
213}
f1304ecb
DE
214
215static inline
216void __dl_update(struct dl_bw *dl_b, s64 bw)
217{
218 struct dl_rq *dl = container_of(dl_b, struct dl_rq, dl_bw);
219
220 dl->extra_bw += bw;
221}
06a76fe0
NP
222#endif
223
f1304ecb
DE
224static inline
225void __dl_sub(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
226{
227 dl_b->total_bw -= tsk_bw;
228 __dl_update(dl_b, (s32)tsk_bw / cpus);
229}
230
231static inline
232void __dl_add(struct dl_bw *dl_b, u64 tsk_bw, int cpus)
233{
234 dl_b->total_bw += tsk_bw;
235 __dl_update(dl_b, -((s32)tsk_bw / cpus));
236}
237
238static inline bool
239__dl_overflow(struct dl_bw *dl_b, unsigned long cap, u64 old_bw, u64 new_bw)
240{
241 return dl_b->bw != -1 &&
242 cap_scale(dl_b->bw, cap) < dl_b->total_bw - old_bw + new_bw;
243}
244
e36d8677 245static inline
794a56eb 246void __add_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
e36d8677
LA
247{
248 u64 old = dl_rq->running_bw;
249
5cb9eaa3 250 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
e36d8677
LA
251 dl_rq->running_bw += dl_bw;
252 SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */
8fd27231 253 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
e0367b12 254 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
4042d003 255 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
e36d8677
LA
256}
257
258static inline
794a56eb 259void __sub_running_bw(u64 dl_bw, struct dl_rq *dl_rq)
e36d8677
LA
260{
261 u64 old = dl_rq->running_bw;
262
5cb9eaa3 263 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
e36d8677
LA
264 dl_rq->running_bw -= dl_bw;
265 SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */
266 if (dl_rq->running_bw > old)
267 dl_rq->running_bw = 0;
e0367b12 268 /* kick cpufreq (see the comment in kernel/sched/sched.h). */
4042d003 269 cpufreq_update_util(rq_of_dl_rq(dl_rq), 0);
e36d8677
LA
270}
271
8fd27231 272static inline
794a56eb 273void __add_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
8fd27231
LA
274{
275 u64 old = dl_rq->this_bw;
276
5cb9eaa3 277 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
8fd27231
LA
278 dl_rq->this_bw += dl_bw;
279 SCHED_WARN_ON(dl_rq->this_bw < old); /* overflow */
280}
281
282static inline
794a56eb 283void __sub_rq_bw(u64 dl_bw, struct dl_rq *dl_rq)
8fd27231
LA
284{
285 u64 old = dl_rq->this_bw;
286
5cb9eaa3 287 lockdep_assert_rq_held(rq_of_dl_rq(dl_rq));
8fd27231
LA
288 dl_rq->this_bw -= dl_bw;
289 SCHED_WARN_ON(dl_rq->this_bw > old); /* underflow */
290 if (dl_rq->this_bw > old)
291 dl_rq->this_bw = 0;
292 SCHED_WARN_ON(dl_rq->running_bw > dl_rq->this_bw);
293}
294
794a56eb
JL
295static inline
296void add_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
297{
298 if (!dl_entity_is_special(dl_se))
299 __add_rq_bw(dl_se->dl_bw, dl_rq);
300}
301
302static inline
303void sub_rq_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
304{
305 if (!dl_entity_is_special(dl_se))
306 __sub_rq_bw(dl_se->dl_bw, dl_rq);
307}
308
309static inline
310void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
311{
312 if (!dl_entity_is_special(dl_se))
313 __add_running_bw(dl_se->dl_bw, dl_rq);
314}
315
316static inline
317void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
318{
319 if (!dl_entity_is_special(dl_se))
320 __sub_running_bw(dl_se->dl_bw, dl_rq);
321}
322
ba4f7bc1 323static void dl_change_utilization(struct task_struct *p, u64 new_bw)
209a0cbd 324{
8fd27231 325 struct rq *rq;
209a0cbd 326
09348d75 327 WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV);
794a56eb 328
8fd27231 329 if (task_on_rq_queued(p))
209a0cbd
LA
330 return;
331
8fd27231
LA
332 rq = task_rq(p);
333 if (p->dl.dl_non_contending) {
794a56eb 334 sub_running_bw(&p->dl, &rq->dl);
8fd27231
LA
335 p->dl.dl_non_contending = 0;
336 /*
337 * If the timer handler is currently running and the
3b03706f 338 * timer cannot be canceled, inactive_task_timer()
8fd27231
LA
339 * will see that dl_not_contending is not set, and
340 * will not touch the rq's active utilization,
341 * so we are still safe.
342 */
343 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
344 put_task_struct(p);
345 }
794a56eb
JL
346 __sub_rq_bw(p->dl.dl_bw, &rq->dl);
347 __add_rq_bw(new_bw, &rq->dl);
209a0cbd
LA
348}
349
9e07d45c
PZ
350static void __dl_clear_params(struct sched_dl_entity *dl_se);
351
209a0cbd
LA
352/*
353 * The utilization of a task cannot be immediately removed from
354 * the rq active utilization (running_bw) when the task blocks.
355 * Instead, we have to wait for the so called "0-lag time".
356 *
357 * If a task blocks before the "0-lag time", a timer (the inactive
358 * timer) is armed, and running_bw is decreased when the timer
359 * fires.
360 *
361 * If the task wakes up again before the inactive timer fires,
3b03706f 362 * the timer is canceled, whereas if the task wakes up after the
209a0cbd
LA
363 * inactive timer fired (and running_bw has been decreased) the
364 * task's utilization has to be added to running_bw again.
365 * A flag in the deadline scheduling entity (dl_non_contending)
366 * is used to avoid race conditions between the inactive timer handler
367 * and task wakeups.
368 *
369 * The following diagram shows how running_bw is updated. A task is
370 * "ACTIVE" when its utilization contributes to running_bw; an
371 * "ACTIVE contending" task is in the TASK_RUNNING state, while an
372 * "ACTIVE non contending" task is a blocked task for which the "0-lag time"
373 * has not passed yet. An "INACTIVE" task is a task for which the "0-lag"
374 * time already passed, which does not contribute to running_bw anymore.
375 * +------------------+
376 * wakeup | ACTIVE |
377 * +------------------>+ contending |
378 * | add_running_bw | |
379 * | +----+------+------+
380 * | | ^
381 * | dequeue | |
382 * +--------+-------+ | |
383 * | | t >= 0-lag | | wakeup
384 * | INACTIVE |<---------------+ |
385 * | | sub_running_bw | |
386 * +--------+-------+ | |
387 * ^ | |
388 * | t < 0-lag | |
389 * | | |
390 * | V |
391 * | +----+------+------+
392 * | sub_running_bw | ACTIVE |
393 * +-------------------+ |
394 * inactive timer | non contending |
395 * fired +------------------+
396 *
397 * The task_non_contending() function is invoked when a task
398 * blocks, and checks if the 0-lag time already passed or
399 * not (in the first case, it directly updates running_bw;
400 * in the second case, it arms the inactive timer).
401 *
402 * The task_contending() function is invoked when a task wakes
403 * up, and checks if the task is still in the "ACTIVE non contending"
404 * state or not (in the second case, it updates running_bw).
405 */
2f7a0f58 406static void task_non_contending(struct sched_dl_entity *dl_se)
209a0cbd 407{
209a0cbd 408 struct hrtimer *timer = &dl_se->inactive_timer;
63ba8422
PZ
409 struct rq *rq = rq_of_dl_se(dl_se);
410 struct dl_rq *dl_rq = &rq->dl;
209a0cbd
LA
411 s64 zerolag_time;
412
413 /*
414 * If this is a non-deadline task that has been boosted,
415 * do nothing
416 */
417 if (dl_se->dl_runtime == 0)
418 return;
419
794a56eb
JL
420 if (dl_entity_is_special(dl_se))
421 return;
422
209a0cbd
LA
423 WARN_ON(dl_se->dl_non_contending);
424
425 zerolag_time = dl_se->deadline -
426 div64_long((dl_se->runtime * dl_se->dl_period),
427 dl_se->dl_runtime);
428
429 /*
430 * Using relative times instead of the absolute "0-lag time"
431 * allows to simplify the code
432 */
433 zerolag_time -= rq_clock(rq);
434
435 /*
436 * If the "0-lag time" already passed, decrease the active
437 * utilization now, instead of starting a timer
438 */
1b02cd6a 439 if ((zerolag_time < 0) || hrtimer_active(&dl_se->inactive_timer)) {
63ba8422 440 if (dl_server(dl_se)) {
794a56eb 441 sub_running_bw(dl_se, dl_rq);
63ba8422
PZ
442 } else {
443 struct task_struct *p = dl_task_of(dl_se);
2f7a0f58 444
63ba8422
PZ
445 if (dl_task(p))
446 sub_running_bw(dl_se, dl_rq);
387e3130 447
63ba8422
PZ
448 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
449 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
450
451 if (READ_ONCE(p->__state) == TASK_DEAD)
452 sub_rq_bw(dl_se, &rq->dl);
453 raw_spin_lock(&dl_b->lock);
454 __dl_sub(dl_b, dl_se->dl_bw, dl_bw_cpus(task_cpu(p)));
455 raw_spin_unlock(&dl_b->lock);
456 __dl_clear_params(dl_se);
457 }
387e3130 458 }
209a0cbd
LA
459
460 return;
461 }
462
463 dl_se->dl_non_contending = 1;
63ba8422
PZ
464 if (!dl_server(dl_se))
465 get_task_struct(dl_task_of(dl_se));
466
850377a8 467 hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
209a0cbd
LA
468}
469
8fd27231 470static void task_contending(struct sched_dl_entity *dl_se, int flags)
209a0cbd
LA
471{
472 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
473
474 /*
475 * If this is a non-deadline task that has been boosted,
476 * do nothing
477 */
478 if (dl_se->dl_runtime == 0)
479 return;
480
8fd27231 481 if (flags & ENQUEUE_MIGRATED)
794a56eb 482 add_rq_bw(dl_se, dl_rq);
8fd27231 483
209a0cbd
LA
484 if (dl_se->dl_non_contending) {
485 dl_se->dl_non_contending = 0;
486 /*
487 * If the timer handler is currently running and the
3b03706f 488 * timer cannot be canceled, inactive_task_timer()
209a0cbd
LA
489 * will see that dl_not_contending is not set, and
490 * will not touch the rq's active utilization,
491 * so we are still safe.
492 */
63ba8422
PZ
493 if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) {
494 if (!dl_server(dl_se))
495 put_task_struct(dl_task_of(dl_se));
496 }
209a0cbd
LA
497 } else {
498 /*
499 * Since "dl_non_contending" is not set, the
500 * task's utilization has already been removed from
501 * active utilization (either when the task blocked,
502 * when the "inactive timer" fired).
503 * So, add it back.
504 */
794a56eb 505 add_running_bw(dl_se, dl_rq);
209a0cbd
LA
506 }
507}
508
63ba8422 509static inline int is_leftmost(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
aab03e05 510{
f4478e7c 511 return rb_first_cached(&dl_rq->root) == &dl_se->rb_node;
aab03e05
DF
512}
513
ba4f7bc1
YC
514static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq);
515
332ac17e
DF
516void init_dl_bw(struct dl_bw *dl_b)
517{
518 raw_spin_lock_init(&dl_b->lock);
1724813d 519 if (global_rt_runtime() == RUNTIME_INF)
332ac17e
DF
520 dl_b->bw = -1;
521 else
1724813d 522 dl_b->bw = to_ratio(global_rt_period(), global_rt_runtime());
332ac17e
DF
523 dl_b->total_bw = 0;
524}
525
07c54f7a 526void init_dl_rq(struct dl_rq *dl_rq)
aab03e05 527{
2161573e 528 dl_rq->root = RB_ROOT_CACHED;
1baca4ce
JL
529
530#ifdef CONFIG_SMP
531 /* zero means no -deadline tasks */
532 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0;
533
1baca4ce 534 dl_rq->overloaded = 0;
2161573e 535 dl_rq->pushable_dl_tasks_root = RB_ROOT_CACHED;
332ac17e
DF
536#else
537 init_dl_bw(&dl_rq->dl_bw);
1baca4ce 538#endif
e36d8677
LA
539
540 dl_rq->running_bw = 0;
8fd27231 541 dl_rq->this_bw = 0;
4da3abce 542 init_dl_rq_bw_ratio(dl_rq);
1baca4ce
JL
543}
544
545#ifdef CONFIG_SMP
546
547static inline int dl_overloaded(struct rq *rq)
548{
549 return atomic_read(&rq->rd->dlo_count);
550}
551
552static inline void dl_set_overload(struct rq *rq)
553{
554 if (!rq->online)
555 return;
556
557 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask);
558 /*
559 * Must be visible before the overload count is
560 * set (as in sched_rt.c).
561 *
562 * Matched by the barrier in pull_dl_task().
563 */
564 smp_wmb();
565 atomic_inc(&rq->rd->dlo_count);
566}
567
568static inline void dl_clear_overload(struct rq *rq)
569{
570 if (!rq->online)
571 return;
572
573 atomic_dec(&rq->rd->dlo_count);
574 cpumask_clear_cpu(rq->cpu, rq->rd->dlo_mask);
575}
576
8ecca394
PZ
577#define __node_2_pdl(node) \
578 rb_entry((node), struct task_struct, pushable_dl_tasks)
579
580static inline bool __pushable_less(struct rb_node *a, const struct rb_node *b)
581{
582 return dl_entity_preempt(&__node_2_pdl(a)->dl, &__node_2_pdl(b)->dl);
583}
584
5fe77659
VS
585static inline int has_pushable_dl_tasks(struct rq *rq)
586{
587 return !RB_EMPTY_ROOT(&rq->dl.pushable_dl_tasks_root.rb_root);
588}
589
1baca4ce
JL
590/*
591 * The list of pushable -deadline task is not a plist, like in
592 * sched_rt.c, it is an rb-tree with tasks ordered by deadline.
593 */
594static void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
595{
8ecca394 596 struct rb_node *leftmost;
1baca4ce 597
09348d75 598 WARN_ON_ONCE(!RB_EMPTY_NODE(&p->pushable_dl_tasks));
1baca4ce 599
8ecca394
PZ
600 leftmost = rb_add_cached(&p->pushable_dl_tasks,
601 &rq->dl.pushable_dl_tasks_root,
602 __pushable_less);
2161573e 603 if (leftmost)
8ecca394 604 rq->dl.earliest_dl.next = p->dl.deadline;
5fe77659
VS
605
606 if (!rq->dl.overloaded) {
607 dl_set_overload(rq);
608 rq->dl.overloaded = 1;
609 }
aab03e05
DF
610}
611
1baca4ce
JL
612static void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
613{
614 struct dl_rq *dl_rq = &rq->dl;
8ecca394
PZ
615 struct rb_root_cached *root = &dl_rq->pushable_dl_tasks_root;
616 struct rb_node *leftmost;
1baca4ce
JL
617
618 if (RB_EMPTY_NODE(&p->pushable_dl_tasks))
619 return;
620
8ecca394
PZ
621 leftmost = rb_erase_cached(&p->pushable_dl_tasks, root);
622 if (leftmost)
623 dl_rq->earliest_dl.next = __node_2_pdl(leftmost)->dl.deadline;
1baca4ce 624
1baca4ce 625 RB_CLEAR_NODE(&p->pushable_dl_tasks);
1baca4ce 626
5fe77659
VS
627 if (!has_pushable_dl_tasks(rq) && rq->dl.overloaded) {
628 dl_clear_overload(rq);
629 rq->dl.overloaded = 0;
630 }
1baca4ce
JL
631}
632
633static int push_dl_task(struct rq *rq);
634
dc877341
PZ
635static inline bool need_pull_dl_task(struct rq *rq, struct task_struct *prev)
636{
120455c5 637 return rq->online && dl_task(prev);
dc877341
PZ
638}
639
8e5bad7d
KC
640static DEFINE_PER_CPU(struct balance_callback, dl_push_head);
641static DEFINE_PER_CPU(struct balance_callback, dl_pull_head);
e3fca9e7
PZ
642
643static void push_dl_tasks(struct rq *);
9916e214 644static void pull_dl_task(struct rq *);
e3fca9e7 645
02d8ec94 646static inline void deadline_queue_push_tasks(struct rq *rq)
dc877341 647{
e3fca9e7
PZ
648 if (!has_pushable_dl_tasks(rq))
649 return;
650
9916e214
PZ
651 queue_balance_callback(rq, &per_cpu(dl_push_head, rq->cpu), push_dl_tasks);
652}
653
02d8ec94 654static inline void deadline_queue_pull_task(struct rq *rq)
9916e214
PZ
655{
656 queue_balance_callback(rq, &per_cpu(dl_pull_head, rq->cpu), pull_dl_task);
dc877341
PZ
657}
658
fa9c9d10
WL
659static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
660
a649f237 661static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
fa9c9d10
WL
662{
663 struct rq *later_rq = NULL;
59d06cea 664 struct dl_bw *dl_b;
fa9c9d10
WL
665
666 later_rq = find_lock_later_rq(p, rq);
fa9c9d10
WL
667 if (!later_rq) {
668 int cpu;
669
670 /*
671 * If we cannot preempt any rq, fall back to pick any
97fb7a0a 672 * online CPU:
fa9c9d10 673 */
3bd37062 674 cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
fa9c9d10
WL
675 if (cpu >= nr_cpu_ids) {
676 /*
97fb7a0a 677 * Failed to find any suitable CPU.
fa9c9d10
WL
678 * The task will never come back!
679 */
09348d75 680 WARN_ON_ONCE(dl_bandwidth_enabled());
fa9c9d10
WL
681
682 /*
683 * If admission control is disabled we
684 * try a little harder to let the task
685 * run.
686 */
687 cpu = cpumask_any(cpu_active_mask);
688 }
689 later_rq = cpu_rq(cpu);
690 double_lock_balance(rq, later_rq);
691 }
692
59d06cea
JL
693 if (p->dl.dl_non_contending || p->dl.dl_throttled) {
694 /*
695 * Inactive timer is armed (or callback is running, but
696 * waiting for us to release rq locks). In any case, when it
697 * will fire (or continue), it will see running_bw of this
698 * task migrated to later_rq (and correctly handle it).
699 */
700 sub_running_bw(&p->dl, &rq->dl);
701 sub_rq_bw(&p->dl, &rq->dl);
702
703 add_rq_bw(&p->dl, &later_rq->dl);
704 add_running_bw(&p->dl, &later_rq->dl);
705 } else {
706 sub_rq_bw(&p->dl, &rq->dl);
707 add_rq_bw(&p->dl, &later_rq->dl);
708 }
709
710 /*
711 * And we finally need to fixup root_domain(s) bandwidth accounting,
712 * since p is still hanging out in the old (now moved to default) root
713 * domain.
714 */
715 dl_b = &rq->rd->dl_bw;
716 raw_spin_lock(&dl_b->lock);
717 __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
718 raw_spin_unlock(&dl_b->lock);
719
720 dl_b = &later_rq->rd->dl_bw;
721 raw_spin_lock(&dl_b->lock);
722 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
723 raw_spin_unlock(&dl_b->lock);
724
fa9c9d10 725 set_task_cpu(p, later_rq->cpu);
a649f237
PZ
726 double_unlock_balance(later_rq, rq);
727
728 return later_rq;
fa9c9d10
WL
729}
730
1baca4ce
JL
731#else
732
733static inline
734void enqueue_pushable_dl_task(struct rq *rq, struct task_struct *p)
735{
736}
737
738static inline
739void dequeue_pushable_dl_task(struct rq *rq, struct task_struct *p)
740{
741}
742
743static inline
744void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
745{
746}
747
748static inline
749void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
750{
751}
752
02d8ec94 753static inline void deadline_queue_push_tasks(struct rq *rq)
dc877341 754{
dc877341
PZ
755}
756
02d8ec94 757static inline void deadline_queue_pull_task(struct rq *rq)
dc877341
PZ
758{
759}
1baca4ce
JL
760#endif /* CONFIG_SMP */
761
63ba8422
PZ
762static void
763enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags);
aab03e05 764static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags);
63ba8422 765static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags);
e23edc86 766static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p, int flags);
aab03e05 767
96458e7f
SX
768static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se,
769 struct rq *rq)
770{
771 /* for non-boosted task, pi_of(dl_se) == dl_se */
772 dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
773 dl_se->runtime = pi_of(dl_se)->dl_runtime;
774}
775
aab03e05
DF
776/*
777 * We are being explicitly informed that a new instance is starting,
778 * and this means that:
779 * - the absolute deadline of the entity has to be placed at
780 * current time + relative deadline;
781 * - the runtime of the entity has to be set to the maximum value.
782 *
783 * The capability of specifying such event is useful whenever a -deadline
784 * entity wants to (try to!) synchronize its behaviour with the scheduler's
785 * one, and to (try to!) reconcile itself with its own scheduling
786 * parameters.
787 */
98b0a857 788static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se)
aab03e05
DF
789{
790 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
791 struct rq *rq = rq_of_dl_rq(dl_rq);
792
2279f540 793 WARN_ON(is_dl_boosted(dl_se));
72f9f3fd
LA
794 WARN_ON(dl_time_before(rq_clock(rq), dl_se->deadline));
795
796 /*
797 * We are racing with the deadline timer. So, do nothing because
798 * the deadline timer handler will take care of properly recharging
799 * the runtime and postponing the deadline
800 */
801 if (dl_se->dl_throttled)
802 return;
aab03e05
DF
803
804 /*
805 * We use the regular wall clock time to set deadlines in the
806 * future; in fact, we must consider execution overheads (time
807 * spent on hardirq context, etc.).
808 */
96458e7f 809 replenish_dl_new_period(dl_se, rq);
aab03e05
DF
810}
811
812/*
813 * Pure Earliest Deadline First (EDF) scheduling does not deal with the
814 * possibility of a entity lasting more than what it declared, and thus
815 * exhausting its runtime.
816 *
817 * Here we are interested in making runtime overrun possible, but we do
818 * not want a entity which is misbehaving to affect the scheduling of all
819 * other entities.
820 * Therefore, a budgeting strategy called Constant Bandwidth Server (CBS)
821 * is used, in order to confine each entity within its own bandwidth.
822 *
823 * This function deals exactly with that, and ensures that when the runtime
824 * of a entity is replenished, its deadline is also postponed. That ensures
825 * the overrunning entity can't interfere with other entity in the system and
826 * can't make them miss their deadlines. Reasons why this kind of overruns
827 * could happen are, typically, a entity voluntarily trying to overcome its
1b09d29b 828 * runtime, or it just underestimated it during sched_setattr().
aab03e05 829 */
2279f540 830static void replenish_dl_entity(struct sched_dl_entity *dl_se)
aab03e05
DF
831{
832 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
833 struct rq *rq = rq_of_dl_rq(dl_rq);
834
09348d75 835 WARN_ON_ONCE(pi_of(dl_se)->dl_runtime <= 0);
2d3d891d
DF
836
837 /*
838 * This could be the case for a !-dl task that is boosted.
839 * Just go with full inherited parameters.
840 */
96458e7f
SX
841 if (dl_se->dl_deadline == 0)
842 replenish_dl_new_period(dl_se, rq);
2d3d891d 843
48be3a67
PZ
844 if (dl_se->dl_yielded && dl_se->runtime > 0)
845 dl_se->runtime = 0;
846
aab03e05
DF
847 /*
848 * We keep moving the deadline away until we get some
849 * available runtime for the entity. This ensures correct
850 * handling of situations where the runtime overrun is
851 * arbitrary large.
852 */
853 while (dl_se->runtime <= 0) {
2279f540
JL
854 dl_se->deadline += pi_of(dl_se)->dl_period;
855 dl_se->runtime += pi_of(dl_se)->dl_runtime;
aab03e05
DF
856 }
857
858 /*
859 * At this point, the deadline really should be "in
860 * the future" with respect to rq->clock. If it's
861 * not, we are, for some reason, lagging too much!
862 * Anyway, after having warn userspace abut that,
863 * we still try to keep the things running by
864 * resetting the deadline and the budget of the
865 * entity.
866 */
867 if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
c219b7dd 868 printk_deferred_once("sched: DL replenish lagged too much\n");
96458e7f 869 replenish_dl_new_period(dl_se, rq);
aab03e05 870 }
1019a359
PZ
871
872 if (dl_se->dl_yielded)
873 dl_se->dl_yielded = 0;
874 if (dl_se->dl_throttled)
875 dl_se->dl_throttled = 0;
aab03e05
DF
876}
877
878/*
879 * Here we check if --at time t-- an entity (which is probably being
880 * [re]activated or, in general, enqueued) can use its remaining runtime
881 * and its current deadline _without_ exceeding the bandwidth it is
882 * assigned (function returns true if it can't). We are in fact applying
883 * one of the CBS rules: when a task wakes up, if the residual runtime
884 * over residual deadline fits within the allocated bandwidth, then we
885 * can keep the current (absolute) deadline and residual budget without
886 * disrupting the schedulability of the system. Otherwise, we should
887 * refill the runtime and set the deadline a period in the future,
888 * because keeping the current (absolute) deadline of the task would
712e5e34 889 * result in breaking guarantees promised to other tasks (refer to
d6a3b247 890 * Documentation/scheduler/sched-deadline.rst for more information).
aab03e05
DF
891 *
892 * This function returns true if:
893 *
2317d5f1 894 * runtime / (deadline - t) > dl_runtime / dl_deadline ,
aab03e05
DF
895 *
896 * IOW we can't recycle current parameters.
755378a4 897 *
2317d5f1 898 * Notice that the bandwidth check is done against the deadline. For
755378a4 899 * task with deadline equal to period this is the same of using
2317d5f1 900 * dl_period instead of dl_deadline in the equation above.
aab03e05 901 */
2279f540 902static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t)
aab03e05
DF
903{
904 u64 left, right;
905
906 /*
907 * left and right are the two sides of the equation above,
908 * after a bit of shuffling to use multiplications instead
909 * of divisions.
910 *
911 * Note that none of the time values involved in the two
912 * multiplications are absolute: dl_deadline and dl_runtime
913 * are the relative deadline and the maximum runtime of each
914 * instance, runtime is the runtime left for the last instance
915 * and (deadline - t), since t is rq->clock, is the time left
916 * to the (absolute) deadline. Even if overflowing the u64 type
917 * is very unlikely to occur in both cases, here we scale down
918 * as we want to avoid that risk at all. Scaling down by 10
919 * means that we reduce granularity to 1us. We are fine with it,
920 * since this is only a true/false check and, anyway, thinking
921 * of anything below microseconds resolution is actually fiction
922 * (but still we want to give the user that illusion >;).
923 */
2279f540 924 left = (pi_of(dl_se)->dl_deadline >> DL_SCALE) * (dl_se->runtime >> DL_SCALE);
332ac17e 925 right = ((dl_se->deadline - t) >> DL_SCALE) *
2279f540 926 (pi_of(dl_se)->dl_runtime >> DL_SCALE);
aab03e05
DF
927
928 return dl_time_before(right, left);
929}
930
931/*
3effcb42
DBO
932 * Revised wakeup rule [1]: For self-suspending tasks, rather then
933 * re-initializing task's runtime and deadline, the revised wakeup
934 * rule adjusts the task's runtime to avoid the task to overrun its
935 * density.
aab03e05 936 *
3effcb42
DBO
937 * Reasoning: a task may overrun the density if:
938 * runtime / (deadline - t) > dl_runtime / dl_deadline
939 *
940 * Therefore, runtime can be adjusted to:
941 * runtime = (dl_runtime / dl_deadline) * (deadline - t)
942 *
943 * In such way that runtime will be equal to the maximum density
944 * the task can use without breaking any rule.
945 *
946 * [1] Luca Abeni, Giuseppe Lipari, and Juri Lelli. 2015. Constant
947 * bandwidth server revisited. SIGBED Rev. 11, 4 (January 2015), 19-24.
948 */
949static void
950update_dl_revised_wakeup(struct sched_dl_entity *dl_se, struct rq *rq)
951{
952 u64 laxity = dl_se->deadline - rq_clock(rq);
953
954 /*
955 * If the task has deadline < period, and the deadline is in the past,
956 * it should already be throttled before this check.
957 *
958 * See update_dl_entity() comments for further details.
959 */
960 WARN_ON(dl_time_before(dl_se->deadline, rq_clock(rq)));
961
962 dl_se->runtime = (dl_se->dl_density * laxity) >> BW_SHIFT;
963}
964
965/*
966 * Regarding the deadline, a task with implicit deadline has a relative
967 * deadline == relative period. A task with constrained deadline has a
968 * relative deadline <= relative period.
969 *
970 * We support constrained deadline tasks. However, there are some restrictions
971 * applied only for tasks which do not have an implicit deadline. See
972 * update_dl_entity() to know more about such restrictions.
973 *
974 * The dl_is_implicit() returns true if the task has an implicit deadline.
975 */
976static inline bool dl_is_implicit(struct sched_dl_entity *dl_se)
977{
978 return dl_se->dl_deadline == dl_se->dl_period;
979}
980
981/*
982 * When a deadline entity is placed in the runqueue, its runtime and deadline
983 * might need to be updated. This is done by a CBS wake up rule. There are two
984 * different rules: 1) the original CBS; and 2) the Revisited CBS.
985 *
986 * When the task is starting a new period, the Original CBS is used. In this
987 * case, the runtime is replenished and a new absolute deadline is set.
988 *
989 * When a task is queued before the begin of the next period, using the
990 * remaining runtime and deadline could make the entity to overflow, see
991 * dl_entity_overflow() to find more about runtime overflow. When such case
992 * is detected, the runtime and deadline need to be updated.
993 *
994 * If the task has an implicit deadline, i.e., deadline == period, the Original
995 * CBS is applied. the runtime is replenished and a new absolute deadline is
996 * set, as in the previous cases.
997 *
998 * However, the Original CBS does not work properly for tasks with
999 * deadline < period, which are said to have a constrained deadline. By
1000 * applying the Original CBS, a constrained deadline task would be able to run
1001 * runtime/deadline in a period. With deadline < period, the task would
1002 * overrun the runtime/period allowed bandwidth, breaking the admission test.
1003 *
1004 * In order to prevent this misbehave, the Revisited CBS is used for
1005 * constrained deadline tasks when a runtime overflow is detected. In the
1006 * Revisited CBS, rather than replenishing & setting a new absolute deadline,
1007 * the remaining runtime of the task is reduced to avoid runtime overflow.
1008 * Please refer to the comments update_dl_revised_wakeup() function to find
1009 * more about the Revised CBS rule.
aab03e05 1010 */
2279f540 1011static void update_dl_entity(struct sched_dl_entity *dl_se)
aab03e05 1012{
63ba8422 1013 struct rq *rq = rq_of_dl_se(dl_se);
aab03e05 1014
aab03e05 1015 if (dl_time_before(dl_se->deadline, rq_clock(rq)) ||
2279f540 1016 dl_entity_overflow(dl_se, rq_clock(rq))) {
3effcb42
DBO
1017
1018 if (unlikely(!dl_is_implicit(dl_se) &&
1019 !dl_time_before(dl_se->deadline, rq_clock(rq)) &&
2279f540 1020 !is_dl_boosted(dl_se))) {
3effcb42
DBO
1021 update_dl_revised_wakeup(dl_se, rq);
1022 return;
1023 }
1024
96458e7f 1025 replenish_dl_new_period(dl_se, rq);
aab03e05
DF
1026 }
1027}
1028
5ac69d37
DBO
1029static inline u64 dl_next_period(struct sched_dl_entity *dl_se)
1030{
1031 return dl_se->deadline - dl_se->dl_deadline + dl_se->dl_period;
1032}
1033
aab03e05
DF
1034/*
1035 * If the entity depleted all its runtime, and if we want it to sleep
1036 * while waiting for some new execution time to become available, we
5ac69d37 1037 * set the bandwidth replenishment timer to the replenishment instant
aab03e05
DF
1038 * and try to activate it.
1039 *
1040 * Notice that it is important for the caller to know if the timer
1041 * actually started or not (i.e., the replenishment instant is in
1042 * the future or in the past).
1043 */
63ba8422 1044static int start_dl_timer(struct sched_dl_entity *dl_se)
aab03e05 1045{
a649f237 1046 struct hrtimer *timer = &dl_se->dl_timer;
63ba8422
PZ
1047 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1048 struct rq *rq = rq_of_dl_rq(dl_rq);
aab03e05 1049 ktime_t now, act;
aab03e05
DF
1050 s64 delta;
1051
5cb9eaa3 1052 lockdep_assert_rq_held(rq);
a649f237 1053
aab03e05
DF
1054 /*
1055 * We want the timer to fire at the deadline, but considering
1056 * that it is actually coming from rq->clock and not from
1057 * hrtimer's time base reading.
1058 */
5ac69d37 1059 act = ns_to_ktime(dl_next_period(dl_se));
a649f237 1060 now = hrtimer_cb_get_time(timer);
aab03e05
DF
1061 delta = ktime_to_ns(now) - rq_clock(rq);
1062 act = ktime_add_ns(act, delta);
1063
1064 /*
1065 * If the expiry time already passed, e.g., because the value
1066 * chosen as the deadline is too small, don't even try to
1067 * start the timer in the past!
1068 */
1069 if (ktime_us_delta(act, now) < 0)
1070 return 0;
1071
a649f237
PZ
1072 /*
1073 * !enqueued will guarantee another callback; even if one is already in
1074 * progress. This ensures a balanced {get,put}_task_struct().
1075 *
1076 * The race against __run_timer() clearing the enqueued state is
1077 * harmless because we're holding task_rq()->lock, therefore the timer
1078 * expiring after we've done the check will wait on its task_rq_lock()
1079 * and observe our state.
1080 */
1081 if (!hrtimer_is_queued(timer)) {
63ba8422
PZ
1082 if (!dl_server(dl_se))
1083 get_task_struct(dl_task_of(dl_se));
d5096aa6 1084 hrtimer_start(timer, act, HRTIMER_MODE_ABS_HARD);
a649f237 1085 }
aab03e05 1086
cc9684d3 1087 return 1;
aab03e05
DF
1088}
1089
63ba8422
PZ
1090static void __push_dl_task(struct rq *rq, struct rq_flags *rf)
1091{
1092#ifdef CONFIG_SMP
1093 /*
1094 * Queueing this task back might have overloaded rq, check if we need
1095 * to kick someone away.
1096 */
1097 if (has_pushable_dl_tasks(rq)) {
1098 /*
1099 * Nothing relies on rq->lock after this, so its safe to drop
1100 * rq->lock.
1101 */
1102 rq_unpin_lock(rq, rf);
1103 push_dl_task(rq);
1104 rq_repin_lock(rq, rf);
1105 }
1106#endif
1107}
1108
aab03e05
DF
1109/*
1110 * This is the bandwidth enforcement timer callback. If here, we know
1111 * a task is not on its dl_rq, since the fact that the timer was running
1112 * means the task is throttled and needs a runtime replenishment.
1113 *
1114 * However, what we actually do depends on the fact the task is active,
1115 * (it is on its rq) or has been removed from there by a call to
1116 * dequeue_task_dl(). In the former case we must issue the runtime
1117 * replenishment and add the task back to the dl_rq; in the latter, we just
1118 * do nothing but clearing dl_throttled, so that runtime and deadline
1119 * updating (and the queueing back to dl_rq) will be done by the
1120 * next call to enqueue_task_dl().
1121 */
1122static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
1123{
1124 struct sched_dl_entity *dl_se = container_of(timer,
1125 struct sched_dl_entity,
1126 dl_timer);
63ba8422 1127 struct task_struct *p;
eb580751 1128 struct rq_flags rf;
0f397f2c 1129 struct rq *rq;
3960c8c0 1130
63ba8422
PZ
1131 if (dl_server(dl_se)) {
1132 struct rq *rq = rq_of_dl_se(dl_se);
1133 struct rq_flags rf;
1134
1135 rq_lock(rq, &rf);
1136 if (dl_se->dl_throttled) {
1137 sched_clock_tick();
1138 update_rq_clock(rq);
1139
1140 if (dl_se->server_has_tasks(dl_se)) {
1141 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1142 resched_curr(rq);
1143 __push_dl_task(rq, &rf);
1144 } else {
1145 replenish_dl_entity(dl_se);
1146 }
1147
1148 }
1149 rq_unlock(rq, &rf);
1150
1151 return HRTIMER_NORESTART;
1152 }
1153
1154 p = dl_task_of(dl_se);
eb580751 1155 rq = task_rq_lock(p, &rf);
0f397f2c 1156
aab03e05 1157 /*
a649f237 1158 * The task might have changed its scheduling policy to something
9846d50d 1159 * different than SCHED_DEADLINE (through switched_from_dl()).
a649f237 1160 */
209a0cbd 1161 if (!dl_task(p))
a649f237 1162 goto unlock;
a649f237 1163
a649f237
PZ
1164 /*
1165 * The task might have been boosted by someone else and might be in the
1166 * boosting/deboosting path, its not throttled.
1167 */
2279f540 1168 if (is_dl_boosted(dl_se))
a649f237 1169 goto unlock;
a79ec89f 1170
fa9c9d10 1171 /*
a649f237
PZ
1172 * Spurious timer due to start_dl_timer() race; or we already received
1173 * a replenishment from rt_mutex_setprio().
fa9c9d10 1174 */
a649f237 1175 if (!dl_se->dl_throttled)
fa9c9d10 1176 goto unlock;
a649f237
PZ
1177
1178 sched_clock_tick();
1179 update_rq_clock(rq);
fa9c9d10 1180
a79ec89f
KT
1181 /*
1182 * If the throttle happened during sched-out; like:
1183 *
1184 * schedule()
1185 * deactivate_task()
1186 * dequeue_task_dl()
1187 * update_curr_dl()
1188 * start_dl_timer()
1189 * __dequeue_task_dl()
1190 * prev->on_rq = 0;
1191 *
1192 * We can be both throttled and !queued. Replenish the counter
1193 * but do not enqueue -- wait for our wakeup to do that.
1194 */
1195 if (!task_on_rq_queued(p)) {
2279f540 1196 replenish_dl_entity(dl_se);
a79ec89f
KT
1197 goto unlock;
1198 }
1199
1baca4ce 1200#ifdef CONFIG_SMP
c0c8c9fa 1201 if (unlikely(!rq->online)) {
61c7aca6
WL
1202 /*
1203 * If the runqueue is no longer available, migrate the
1204 * task elsewhere. This necessarily changes rq.
1205 */
9ef7e7e3 1206 lockdep_unpin_lock(__rq_lockp(rq), rf.cookie);
a649f237 1207 rq = dl_task_offline_migration(rq, p);
9ef7e7e3 1208 rf.cookie = lockdep_pin_lock(__rq_lockp(rq));
dcc3b5ff 1209 update_rq_clock(rq);
61c7aca6
WL
1210
1211 /*
1212 * Now that the task has been migrated to the new RQ and we
1213 * have that locked, proceed as normal and enqueue the task
1214 * there.
1215 */
c0c8c9fa 1216 }
61c7aca6 1217#endif
a649f237 1218
61c7aca6
WL
1219 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
1220 if (dl_task(rq->curr))
e23edc86 1221 wakeup_preempt_dl(rq, p, 0);
61c7aca6
WL
1222 else
1223 resched_curr(rq);
a649f237 1224
63ba8422 1225 __push_dl_task(rq, &rf);
a649f237 1226
aab03e05 1227unlock:
eb580751 1228 task_rq_unlock(rq, p, &rf);
aab03e05 1229
a649f237
PZ
1230 /*
1231 * This can free the task_struct, including this hrtimer, do not touch
1232 * anything related to that after this.
1233 */
1234 put_task_struct(p);
1235
aab03e05
DF
1236 return HRTIMER_NORESTART;
1237}
1238
9e07d45c 1239static void init_dl_task_timer(struct sched_dl_entity *dl_se)
aab03e05
DF
1240{
1241 struct hrtimer *timer = &dl_se->dl_timer;
1242
d5096aa6 1243 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
aab03e05
DF
1244 timer->function = dl_task_timer;
1245}
1246
df8eac8c
DBO
1247/*
1248 * During the activation, CBS checks if it can reuse the current task's
1249 * runtime and period. If the deadline of the task is in the past, CBS
1250 * cannot use the runtime, and so it replenishes the task. This rule
1251 * works fine for implicit deadline tasks (deadline == period), and the
1252 * CBS was designed for implicit deadline tasks. However, a task with
c4969417 1253 * constrained deadline (deadline < period) might be awakened after the
df8eac8c
DBO
1254 * deadline, but before the next period. In this case, replenishing the
1255 * task would allow it to run for runtime / deadline. As in this case
1256 * deadline < period, CBS enables a task to run for more than the
1257 * runtime / period. In a very loaded system, this can cause a domino
1258 * effect, making other tasks miss their deadlines.
1259 *
1260 * To avoid this problem, in the activation of a constrained deadline
1261 * task after the deadline but before the next period, throttle the
1262 * task and set the replenishing timer to the begin of the next period,
1263 * unless it is boosted.
1264 */
1265static inline void dl_check_constrained_dl(struct sched_dl_entity *dl_se)
1266{
63ba8422 1267 struct rq *rq = rq_of_dl_se(dl_se);
df8eac8c
DBO
1268
1269 if (dl_time_before(dl_se->deadline, rq_clock(rq)) &&
1270 dl_time_before(rq_clock(rq), dl_next_period(dl_se))) {
63ba8422 1271 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se)))
df8eac8c
DBO
1272 return;
1273 dl_se->dl_throttled = 1;
ae83b56a
XP
1274 if (dl_se->runtime > 0)
1275 dl_se->runtime = 0;
df8eac8c
DBO
1276 }
1277}
1278
aab03e05 1279static
6fab5410 1280int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
aab03e05 1281{
269ad801 1282 return (dl_se->runtime <= 0);
aab03e05
DF
1283}
1284
c52f14d3 1285/*
6a9d623a
VP
1286 * This function implements the GRUB accounting rule. According to the
1287 * GRUB reclaiming algorithm, the runtime is not decreased as "dq = -dt",
1288 * but as "dq = -(max{u, (Umax - Uinact - Uextra)} / Umax) dt",
daec5798
LA
1289 * where u is the utilization of the task, Umax is the maximum reclaimable
1290 * utilization, Uinact is the (per-runqueue) inactive utilization, computed
1291 * as the difference between the "total runqueue utilization" and the
6a9d623a 1292 * "runqueue active utilization", and Uextra is the (per runqueue) extra
daec5798 1293 * reclaimable utilization.
6a9d623a
VP
1294 * Since rq->dl.running_bw and rq->dl.this_bw contain utilizations multiplied
1295 * by 2^BW_SHIFT, the result has to be shifted right by BW_SHIFT.
1296 * Since rq->dl.bw_ratio contains 1 / Umax multiplied by 2^RATIO_SHIFT, dl_bw
1297 * is multiped by rq->dl.bw_ratio and shifted right by RATIO_SHIFT.
1298 * Since delta is a 64 bit variable, to have an overflow its value should be
1299 * larger than 2^(64 - 20 - 8), which is more than 64 seconds. So, overflow is
1300 * not an issue here.
c52f14d3 1301 */
3febfc8a 1302static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se)
c52f14d3 1303{
9f0d1a50 1304 u64 u_act;
6a9d623a 1305 u64 u_inact = rq->dl.this_bw - rq->dl.running_bw; /* Utot - Uact */
c52f14d3 1306
9f0d1a50 1307 /*
6a9d623a
VP
1308 * Instead of computing max{u, (u_max - u_inact - u_extra)}, we
1309 * compare u_inact + u_extra with u_max - u, because u_inact + u_extra
1310 * can be larger than u_max. So, u_max - u_inact - u_extra would be
1311 * negative leading to wrong results.
9f0d1a50 1312 */
6a9d623a
VP
1313 if (u_inact + rq->dl.extra_bw > rq->dl.max_bw - dl_se->dl_bw)
1314 u_act = dl_se->dl_bw;
9f0d1a50 1315 else
6a9d623a 1316 u_act = rq->dl.max_bw - u_inact - rq->dl.extra_bw;
9f0d1a50 1317
6a9d623a 1318 u_act = (u_act * rq->dl.bw_ratio) >> RATIO_SHIFT;
9f0d1a50 1319 return (delta * u_act) >> BW_SHIFT;
c52f14d3
LA
1320}
1321
63ba8422
PZ
1322static inline void
1323update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1324 int flags);
1325static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec)
aab03e05 1326{
63ba8422 1327 s64 scaled_delta_exec;
aab03e05 1328
5d69eca5 1329 if (unlikely(delta_exec <= 0)) {
48be3a67
PZ
1330 if (unlikely(dl_se->dl_yielded))
1331 goto throttle;
734ff2a7 1332 return;
48be3a67 1333 }
aab03e05 1334
794a56eb
JL
1335 if (dl_entity_is_special(dl_se))
1336 return;
1337
07881166
JL
1338 /*
1339 * For tasks that participate in GRUB, we implement GRUB-PA: the
1340 * spare reclaimed bandwidth is used to clock down frequency.
1341 *
1342 * For the others, we still need to scale reservation parameters
1343 * according to current frequency and CPU maximum capacity.
1344 */
1345 if (unlikely(dl_se->flags & SCHED_FLAG_RECLAIM)) {
63ba8422 1346 scaled_delta_exec = grub_reclaim(delta_exec, rq, dl_se);
07881166 1347 } else {
63ba8422 1348 int cpu = cpu_of(rq);
07881166 1349 unsigned long scale_freq = arch_scale_freq_capacity(cpu);
8ec59c0f 1350 unsigned long scale_cpu = arch_scale_cpu_capacity(cpu);
07881166
JL
1351
1352 scaled_delta_exec = cap_scale(delta_exec, scale_freq);
1353 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu);
1354 }
1355
1356 dl_se->runtime -= scaled_delta_exec;
48be3a67
PZ
1357
1358throttle:
1359 if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) {
1019a359 1360 dl_se->dl_throttled = 1;
34be3930
JL
1361
1362 /* If requested, inform the user about runtime overruns. */
1363 if (dl_runtime_exceeded(dl_se) &&
1364 (dl_se->flags & SCHED_FLAG_DL_OVERRUN))
1365 dl_se->dl_overrun = 1;
1366
63ba8422
PZ
1367 dequeue_dl_entity(dl_se, 0);
1368 if (!dl_server(dl_se)) {
1369 update_stats_dequeue_dl(&rq->dl, dl_se, 0);
1370 dequeue_pushable_dl_task(rq, dl_task_of(dl_se));
1371 }
aab03e05 1372
63ba8422
PZ
1373 if (unlikely(is_dl_boosted(dl_se) || !start_dl_timer(dl_se))) {
1374 if (dl_server(dl_se))
1375 enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH);
1376 else
1377 enqueue_task_dl(rq, dl_task_of(dl_se), ENQUEUE_REPLENISH);
1378 }
1379
1380 if (!is_leftmost(dl_se, &rq->dl))
8875125e 1381 resched_curr(rq);
aab03e05 1382 }
1724813d
PZ
1383
1384 /*
1385 * Because -- for now -- we share the rt bandwidth, we need to
1386 * account our runtime there too, otherwise actual rt tasks
1387 * would be able to exceed the shared quota.
1388 *
1389 * Account to the root rt group for now.
1390 *
1391 * The solution we're working towards is having the RT groups scheduled
1392 * using deadline servers -- however there's a few nasties to figure
1393 * out before that can happen.
1394 */
1395 if (rt_bandwidth_enabled()) {
1396 struct rt_rq *rt_rq = &rq->rt;
1397
1398 raw_spin_lock(&rt_rq->rt_runtime_lock);
1724813d
PZ
1399 /*
1400 * We'll let actual RT tasks worry about the overflow here, we
faa59937
JL
1401 * have our own CBS to keep us inline; only account when RT
1402 * bandwidth is relevant.
1724813d 1403 */
faa59937
JL
1404 if (sched_rt_bandwidth_account(rt_rq))
1405 rt_rq->rt_time += delta_exec;
1724813d
PZ
1406 raw_spin_unlock(&rt_rq->rt_runtime_lock);
1407 }
aab03e05
DF
1408}
1409
63ba8422
PZ
1410void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec)
1411{
1412 update_curr_dl_se(dl_se->rq, dl_se, delta_exec);
1413}
1414
1415void dl_server_start(struct sched_dl_entity *dl_se)
1416{
1417 if (!dl_server(dl_se)) {
1418 dl_se->dl_server = 1;
1419 setup_new_dl_entity(dl_se);
1420 }
1421 enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP);
1422}
1423
1424void dl_server_stop(struct sched_dl_entity *dl_se)
1425{
1426 dequeue_dl_entity(dl_se, DEQUEUE_SLEEP);
1427}
1428
1429void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq,
1430 dl_server_has_tasks_f has_tasks,
1431 dl_server_pick_f pick)
1432{
1433 dl_se->rq = rq;
1434 dl_se->server_has_tasks = has_tasks;
1435 dl_se->server_pick = pick;
1436}
1437
1438/*
1439 * Update the current task's runtime statistics (provided it is still
1440 * a -deadline task and has not been removed from the dl_rq).
1441 */
1442static void update_curr_dl(struct rq *rq)
1443{
1444 struct task_struct *curr = rq->curr;
1445 struct sched_dl_entity *dl_se = &curr->dl;
1446 s64 delta_exec;
1447
1448 if (!dl_task(curr) || !on_dl_rq(dl_se))
1449 return;
1450
1451 /*
1452 * Consumed budget is computed considering the time as
1453 * observed by schedulable tasks (excluding time spent
1454 * in hardirq context, etc.). Deadlines are instead
1455 * computed using hard walltime. This seems to be the more
1456 * natural solution, but the full ramifications of this
1457 * approach need further study.
1458 */
1459 delta_exec = update_curr_common(rq);
1460 update_curr_dl_se(rq, dl_se, delta_exec);
1461}
1462
209a0cbd
LA
1463static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer)
1464{
1465 struct sched_dl_entity *dl_se = container_of(timer,
1466 struct sched_dl_entity,
1467 inactive_timer);
63ba8422 1468 struct task_struct *p = NULL;
209a0cbd
LA
1469 struct rq_flags rf;
1470 struct rq *rq;
1471
63ba8422
PZ
1472 if (!dl_server(dl_se)) {
1473 p = dl_task_of(dl_se);
1474 rq = task_rq_lock(p, &rf);
1475 } else {
1476 rq = dl_se->rq;
1477 rq_lock(rq, &rf);
1478 }
209a0cbd 1479
ecda2b66
JL
1480 sched_clock_tick();
1481 update_rq_clock(rq);
1482
63ba8422
PZ
1483 if (dl_server(dl_se))
1484 goto no_task;
1485
2f064a59 1486 if (!dl_task(p) || READ_ONCE(p->__state) == TASK_DEAD) {
387e3130
LA
1487 struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
1488
2f064a59 1489 if (READ_ONCE(p->__state) == TASK_DEAD && dl_se->dl_non_contending) {
794a56eb
JL
1490 sub_running_bw(&p->dl, dl_rq_of_se(&p->dl));
1491 sub_rq_bw(&p->dl, dl_rq_of_se(&p->dl));
209a0cbd
LA
1492 dl_se->dl_non_contending = 0;
1493 }
387e3130
LA
1494
1495 raw_spin_lock(&dl_b->lock);
8c0944ce 1496 __dl_sub(dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
387e3130 1497 raw_spin_unlock(&dl_b->lock);
9e07d45c 1498 __dl_clear_params(dl_se);
209a0cbd
LA
1499
1500 goto unlock;
1501 }
63ba8422
PZ
1502
1503no_task:
209a0cbd
LA
1504 if (dl_se->dl_non_contending == 0)
1505 goto unlock;
1506
794a56eb 1507 sub_running_bw(dl_se, &rq->dl);
209a0cbd
LA
1508 dl_se->dl_non_contending = 0;
1509unlock:
63ba8422
PZ
1510
1511 if (!dl_server(dl_se)) {
1512 task_rq_unlock(rq, p, &rf);
1513 put_task_struct(p);
1514 } else {
1515 rq_unlock(rq, &rf);
1516 }
209a0cbd
LA
1517
1518 return HRTIMER_NORESTART;
1519}
1520
9e07d45c 1521static void init_dl_inactive_task_timer(struct sched_dl_entity *dl_se)
209a0cbd
LA
1522{
1523 struct hrtimer *timer = &dl_se->inactive_timer;
1524
850377a8 1525 hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
209a0cbd
LA
1526 timer->function = inactive_task_timer;
1527}
1528
f4478e7c
DE
1529#define __node_2_dle(node) \
1530 rb_entry((node), struct sched_dl_entity, rb_node)
1531
1baca4ce
JL
1532#ifdef CONFIG_SMP
1533
1baca4ce
JL
1534static void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1535{
1536 struct rq *rq = rq_of_dl_rq(dl_rq);
1537
1538 if (dl_rq->earliest_dl.curr == 0 ||
1539 dl_time_before(deadline, dl_rq->earliest_dl.curr)) {
b13772f8
PZ
1540 if (dl_rq->earliest_dl.curr == 0)
1541 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_HIGHER);
1baca4ce 1542 dl_rq->earliest_dl.curr = deadline;
d8206bb3 1543 cpudl_set(&rq->rd->cpudl, rq->cpu, deadline);
1baca4ce
JL
1544 }
1545}
1546
1547static void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline)
1548{
1549 struct rq *rq = rq_of_dl_rq(dl_rq);
1550
1551 /*
1552 * Since we may have removed our earliest (and/or next earliest)
1553 * task we must recompute them.
1554 */
1555 if (!dl_rq->dl_nr_running) {
1556 dl_rq->earliest_dl.curr = 0;
1557 dl_rq->earliest_dl.next = 0;
d8206bb3 1558 cpudl_clear(&rq->rd->cpudl, rq->cpu);
b13772f8 1559 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
1baca4ce 1560 } else {
f4478e7c
DE
1561 struct rb_node *leftmost = rb_first_cached(&dl_rq->root);
1562 struct sched_dl_entity *entry = __node_2_dle(leftmost);
1baca4ce 1563
1baca4ce 1564 dl_rq->earliest_dl.curr = entry->deadline;
d8206bb3 1565 cpudl_set(&rq->rd->cpudl, rq->cpu, entry->deadline);
1baca4ce
JL
1566 }
1567}
1568
1569#else
1570
1571static inline void inc_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1572static inline void dec_dl_deadline(struct dl_rq *dl_rq, u64 deadline) {}
1573
1574#endif /* CONFIG_SMP */
1575
1576static inline
1577void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1578{
1baca4ce
JL
1579 u64 deadline = dl_se->deadline;
1580
1baca4ce 1581 dl_rq->dl_nr_running++;
72465447 1582 add_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1583
1584 inc_dl_deadline(dl_rq, deadline);
1baca4ce
JL
1585}
1586
1587static inline
1588void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
1589{
1baca4ce
JL
1590 WARN_ON(!dl_rq->dl_nr_running);
1591 dl_rq->dl_nr_running--;
72465447 1592 sub_nr_running(rq_of_dl_rq(dl_rq), 1);
1baca4ce
JL
1593
1594 dec_dl_deadline(dl_rq, dl_se->deadline);
1baca4ce
JL
1595}
1596
8ecca394
PZ
1597static inline bool __dl_less(struct rb_node *a, const struct rb_node *b)
1598{
1599 return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline);
1600}
1601
b5eb4a5f
YS
1602static inline struct sched_statistics *
1603__schedstats_from_dl_se(struct sched_dl_entity *dl_se)
1604{
1605 return &dl_task_of(dl_se)->stats;
1606}
1607
1608static inline void
1609update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1610{
1611 struct sched_statistics *stats;
1612
1613 if (!schedstat_enabled())
1614 return;
1615
1616 stats = __schedstats_from_dl_se(dl_se);
1617 __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1618}
1619
1620static inline void
1621update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1622{
1623 struct sched_statistics *stats;
1624
1625 if (!schedstat_enabled())
1626 return;
1627
1628 stats = __schedstats_from_dl_se(dl_se);
1629 __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1630}
1631
1632static inline void
1633update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se)
1634{
1635 struct sched_statistics *stats;
1636
1637 if (!schedstat_enabled())
1638 return;
1639
1640 stats = __schedstats_from_dl_se(dl_se);
1641 __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats);
1642}
1643
1644static inline void
1645update_stats_enqueue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1646 int flags)
1647{
1648 if (!schedstat_enabled())
1649 return;
1650
1651 if (flags & ENQUEUE_WAKEUP)
1652 update_stats_enqueue_sleeper_dl(dl_rq, dl_se);
1653}
1654
1655static inline void
1656update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se,
1657 int flags)
1658{
1659 struct task_struct *p = dl_task_of(dl_se);
1660
1661 if (!schedstat_enabled())
1662 return;
1663
1664 if ((flags & DEQUEUE_SLEEP)) {
1665 unsigned int state;
1666
1667 state = READ_ONCE(p->__state);
1668 if (state & TASK_INTERRUPTIBLE)
1669 __schedstat_set(p->stats.sleep_start,
1670 rq_clock(rq_of_dl_rq(dl_rq)));
1671
1672 if (state & TASK_UNINTERRUPTIBLE)
1673 __schedstat_set(p->stats.block_start,
1674 rq_clock(rq_of_dl_rq(dl_rq)));
1675 }
1676}
1677
aab03e05
DF
1678static void __enqueue_dl_entity(struct sched_dl_entity *dl_se)
1679{
1680 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
aab03e05 1681
09348d75 1682 WARN_ON_ONCE(!RB_EMPTY_NODE(&dl_se->rb_node));
aab03e05 1683
8ecca394 1684 rb_add_cached(&dl_se->rb_node, &dl_rq->root, __dl_less);
aab03e05 1685
1baca4ce 1686 inc_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1687}
1688
1689static void __dequeue_dl_entity(struct sched_dl_entity *dl_se)
1690{
1691 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1692
1693 if (RB_EMPTY_NODE(&dl_se->rb_node))
1694 return;
1695
2161573e 1696 rb_erase_cached(&dl_se->rb_node, &dl_rq->root);
8ecca394 1697
aab03e05
DF
1698 RB_CLEAR_NODE(&dl_se->rb_node);
1699
1baca4ce 1700 dec_dl_tasks(dl_se, dl_rq);
aab03e05
DF
1701}
1702
1703static void
2279f540 1704enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags)
aab03e05 1705{
09348d75 1706 WARN_ON_ONCE(on_dl_rq(dl_se));
aab03e05 1707
b5eb4a5f
YS
1708 update_stats_enqueue_dl(dl_rq_of_se(dl_se), dl_se, flags);
1709
2f7a0f58
PZ
1710 /*
1711 * Check if a constrained deadline task was activated
1712 * after the deadline but before the next period.
1713 * If that is the case, the task will be throttled and
1714 * the replenishment timer will be set to the next period.
1715 */
1716 if (!dl_se->dl_throttled && !dl_is_implicit(dl_se))
1717 dl_check_constrained_dl(dl_se);
1718
1719 if (flags & (ENQUEUE_RESTORE|ENQUEUE_MIGRATING)) {
1720 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1721
1722 add_rq_bw(dl_se, dl_rq);
1723 add_running_bw(dl_se, dl_rq);
1724 }
1725
1726 /*
1727 * If p is throttled, we do not enqueue it. In fact, if it exhausted
1728 * its budget it needs a replenishment and, since it now is on
1729 * its rq, the bandwidth timer callback (which clearly has not
1730 * run yet) will take care of this.
1731 * However, the active utilization does not depend on the fact
1732 * that the task is on the runqueue or not (but depends on the
1733 * task's state - in GRUB parlance, "inactive" vs "active contending").
1734 * In other words, even if a task is throttled its utilization must
1735 * be counted in the active utilization; hence, we need to call
1736 * add_running_bw().
1737 */
1738 if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) {
1739 if (flags & ENQUEUE_WAKEUP)
1740 task_contending(dl_se, flags);
1741
1742 return;
1743 }
1744
aab03e05
DF
1745 /*
1746 * If this is a wakeup or a new instance, the scheduling
1747 * parameters of the task might need updating. Otherwise,
1748 * we want a replenishment of its runtime.
1749 */
e36d8677 1750 if (flags & ENQUEUE_WAKEUP) {
8fd27231 1751 task_contending(dl_se, flags);
2279f540 1752 update_dl_entity(dl_se);
e36d8677 1753 } else if (flags & ENQUEUE_REPLENISH) {
2279f540 1754 replenish_dl_entity(dl_se);
295d6d5e 1755 } else if ((flags & ENQUEUE_RESTORE) &&
63ba8422 1756 dl_time_before(dl_se->deadline, rq_clock(rq_of_dl_se(dl_se)))) {
295d6d5e 1757 setup_new_dl_entity(dl_se);
e36d8677 1758 }
aab03e05
DF
1759
1760 __enqueue_dl_entity(dl_se);
1761}
1762
2f7a0f58 1763static void dequeue_dl_entity(struct sched_dl_entity *dl_se, int flags)
aab03e05
DF
1764{
1765 __dequeue_dl_entity(dl_se);
2f7a0f58
PZ
1766
1767 if (flags & (DEQUEUE_SAVE|DEQUEUE_MIGRATING)) {
1768 struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
1769
1770 sub_running_bw(dl_se, dl_rq);
1771 sub_rq_bw(dl_se, dl_rq);
1772 }
1773
1774 /*
1775 * This check allows to start the inactive timer (or to immediately
1776 * decrease the active utilization, if needed) in two cases:
1777 * when the task blocks and when it is terminating
1778 * (p->state == TASK_DEAD). We can handle the two cases in the same
1779 * way, because from GRUB's point of view the same thing is happening
1780 * (the task moves from "active contending" to "active non contending"
1781 * or "inactive")
1782 */
1783 if (flags & DEQUEUE_SLEEP)
1784 task_non_contending(dl_se);
aab03e05
DF
1785}
1786
1787static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1788{
2279f540 1789 if (is_dl_boosted(&p->dl)) {
feff2e65
DBO
1790 /*
1791 * Because of delays in the detection of the overrun of a
1792 * thread's runtime, it might be the case that a thread
1793 * goes to sleep in a rt mutex with negative runtime. As
1794 * a consequence, the thread will be throttled.
1795 *
1796 * While waiting for the mutex, this thread can also be
1797 * boosted via PI, resulting in a thread that is throttled
1798 * and boosted at the same time.
1799 *
1800 * In this case, the boost overrides the throttle.
1801 */
1802 if (p->dl.dl_throttled) {
1803 /*
1804 * The replenish timer needs to be canceled. No
1805 * problem if it fires concurrently: boosted threads
1806 * are ignored in dl_task_timer().
1807 */
1808 hrtimer_try_to_cancel(&p->dl.dl_timer);
1809 p->dl.dl_throttled = 0;
1810 }
64be6f1f
JL
1811 } else if (!dl_prio(p->normal_prio)) {
1812 /*
46fcc4b0
LS
1813 * Special case in which we have a !SCHED_DEADLINE task that is going
1814 * to be deboosted, but exceeds its runtime while doing so. No point in
1815 * replenishing it, as it's going to return back to its original
1816 * scheduling class after this. If it has been throttled, we need to
1817 * clear the flag, otherwise the task may wake up as throttled after
1818 * being boosted again with no means to replenish the runtime and clear
1819 * the throttle.
64be6f1f 1820 */
46fcc4b0 1821 p->dl.dl_throttled = 0;
ddfc7103
JL
1822 if (!(flags & ENQUEUE_REPLENISH))
1823 printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n",
1824 task_pid_nr(p));
1825
64be6f1f
JL
1826 return;
1827 }
2d3d891d 1828
b5eb4a5f
YS
1829 check_schedstat_required();
1830 update_stats_wait_start_dl(dl_rq_of_se(&p->dl), &p->dl);
1831
2f7a0f58
PZ
1832 if (p->on_rq == TASK_ON_RQ_MIGRATING)
1833 flags |= ENQUEUE_MIGRATING;
1834
2279f540 1835 enqueue_dl_entity(&p->dl, flags);
1baca4ce 1836
63ba8422
PZ
1837 if (dl_server(&p->dl))
1838 return;
1839
2f7a0f58 1840 if (!task_current(rq, p) && !p->dl.dl_throttled && p->nr_cpus_allowed > 1)
1baca4ce 1841 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
1842}
1843
aab03e05
DF
1844static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
1845{
1846 update_curr_dl(rq);
e36d8677 1847
2f7a0f58
PZ
1848 if (p->on_rq == TASK_ON_RQ_MIGRATING)
1849 flags |= DEQUEUE_MIGRATING;
e36d8677 1850
63ba8422
PZ
1851 dequeue_dl_entity(&p->dl, flags);
1852 if (!p->dl.dl_throttled && !dl_server(&p->dl))
1853 dequeue_pushable_dl_task(rq, p);
aab03e05
DF
1854}
1855
1856/*
1857 * Yield task semantic for -deadline tasks is:
1858 *
1859 * get off from the CPU until our next instance, with
1860 * a new runtime. This is of little use now, since we
1861 * don't have a bandwidth reclaiming mechanism. Anyway,
1862 * bandwidth reclaiming is planned for the future, and
1863 * yield_task_dl will indicate that some spare budget
1864 * is available for other task instances to use it.
1865 */
1866static void yield_task_dl(struct rq *rq)
1867{
aab03e05
DF
1868 /*
1869 * We make the task go to sleep until its current deadline by
1870 * forcing its runtime to zero. This way, update_curr_dl() stops
1871 * it and the bandwidth timer will wake it up and will give it
5bfd126e 1872 * new scheduling parameters (thanks to dl_yielded=1).
aab03e05 1873 */
48be3a67
PZ
1874 rq->curr->dl.dl_yielded = 1;
1875
6f1607f1 1876 update_rq_clock(rq);
aab03e05 1877 update_curr_dl(rq);
44fb085b
WL
1878 /*
1879 * Tell update_rq_clock() that we've just updated,
1880 * so we don't do microscopic update in schedule()
1881 * and double the fastpath cost.
1882 */
adcc8da8 1883 rq_clock_skip_update(rq);
aab03e05
DF
1884}
1885
1baca4ce
JL
1886#ifdef CONFIG_SMP
1887
973bee49
SX
1888static inline bool dl_task_is_earliest_deadline(struct task_struct *p,
1889 struct rq *rq)
1890{
1891 return (!rq->dl.dl_nr_running ||
1892 dl_time_before(p->dl.deadline,
1893 rq->dl.earliest_dl.curr));
1894}
1895
1baca4ce 1896static int find_later_rq(struct task_struct *task);
1baca4ce
JL
1897
1898static int
3aef1551 1899select_task_rq_dl(struct task_struct *p, int cpu, int flags)
1baca4ce
JL
1900{
1901 struct task_struct *curr;
b4118988 1902 bool select_rq;
1baca4ce
JL
1903 struct rq *rq;
1904
3aef1551 1905 if (!(flags & WF_TTWU))
1baca4ce
JL
1906 goto out;
1907
1908 rq = cpu_rq(cpu);
1909
1910 rcu_read_lock();
316c1608 1911 curr = READ_ONCE(rq->curr); /* unlocked access */
1baca4ce
JL
1912
1913 /*
1914 * If we are dealing with a -deadline task, we must
1915 * decide where to wake it up.
1916 * If it has a later deadline and the current task
1917 * on this rq can't move (provided the waking task
1918 * can!) we prefer to send it somewhere else. On the
1919 * other hand, if it has a shorter deadline, we
1920 * try to make it stay here, it might be important.
1921 */
b4118988
LA
1922 select_rq = unlikely(dl_task(curr)) &&
1923 (curr->nr_cpus_allowed < 2 ||
1924 !dl_entity_preempt(&p->dl, &curr->dl)) &&
1925 p->nr_cpus_allowed > 1;
1926
1927 /*
1928 * Take the capacity of the CPU into account to
1929 * ensure it fits the requirement of the task.
1930 */
740cf8a7 1931 if (sched_asym_cpucap_active())
b4118988
LA
1932 select_rq |= !dl_task_fits_capacity(p, cpu);
1933
1934 if (select_rq) {
1baca4ce
JL
1935 int target = find_later_rq(p);
1936
9d514262 1937 if (target != -1 &&
973bee49 1938 dl_task_is_earliest_deadline(p, cpu_rq(target)))
1baca4ce
JL
1939 cpu = target;
1940 }
1941 rcu_read_unlock();
1942
1943out:
1944 return cpu;
1945}
1946
1327237a 1947static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
209a0cbd 1948{
2679a837 1949 struct rq_flags rf;
209a0cbd
LA
1950 struct rq *rq;
1951
2f064a59 1952 if (READ_ONCE(p->__state) != TASK_WAKING)
209a0cbd
LA
1953 return;
1954
1955 rq = task_rq(p);
1956 /*
1957 * Since p->state == TASK_WAKING, set_task_cpu() has been called
1958 * from try_to_wake_up(). Hence, p->pi_lock is locked, but
1959 * rq->lock is not... So, lock it
1960 */
2679a837 1961 rq_lock(rq, &rf);
8fd27231 1962 if (p->dl.dl_non_contending) {
b4da13aa 1963 update_rq_clock(rq);
794a56eb 1964 sub_running_bw(&p->dl, &rq->dl);
8fd27231
LA
1965 p->dl.dl_non_contending = 0;
1966 /*
1967 * If the timer handler is currently running and the
3b03706f 1968 * timer cannot be canceled, inactive_task_timer()
8fd27231
LA
1969 * will see that dl_not_contending is not set, and
1970 * will not touch the rq's active utilization,
1971 * so we are still safe.
1972 */
1973 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
1974 put_task_struct(p);
1975 }
794a56eb 1976 sub_rq_bw(&p->dl, &rq->dl);
2679a837 1977 rq_unlock(rq, &rf);
209a0cbd
LA
1978}
1979
1baca4ce
JL
1980static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1981{
1982 /*
1983 * Current can't be migrated, useless to reschedule,
1984 * let's hope p can move out.
1985 */
4b53a341 1986 if (rq->curr->nr_cpus_allowed == 1 ||
3261ed0b 1987 !cpudl_find(&rq->rd->cpudl, rq->curr, NULL))
1baca4ce
JL
1988 return;
1989
1990 /*
1991 * p is migratable, so let's not schedule it and
1992 * see if it is pushed or pulled somewhere else.
1993 */
4b53a341 1994 if (p->nr_cpus_allowed != 1 &&
3261ed0b 1995 cpudl_find(&rq->rd->cpudl, p, NULL))
1baca4ce
JL
1996 return;
1997
8875125e 1998 resched_curr(rq);
1baca4ce
JL
1999}
2000
6e2df058
PZ
2001static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
2002{
2003 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
2004 /*
2005 * This is OK, because current is on_cpu, which avoids it being
2006 * picked for load-balance and preemption/IRQs are still
2007 * disabled avoiding further scheduler activity on it and we've
2008 * not yet started the picking loop.
2009 */
2010 rq_unpin_lock(rq, rf);
2011 pull_dl_task(rq);
2012 rq_repin_lock(rq, rf);
2013 }
2014
2015 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
2016}
1baca4ce
JL
2017#endif /* CONFIG_SMP */
2018
aab03e05
DF
2019/*
2020 * Only called when both the current and waking task are -deadline
2021 * tasks.
2022 */
e23edc86 2023static void wakeup_preempt_dl(struct rq *rq, struct task_struct *p,
aab03e05
DF
2024 int flags)
2025{
1baca4ce 2026 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) {
8875125e 2027 resched_curr(rq);
1baca4ce
JL
2028 return;
2029 }
2030
2031#ifdef CONFIG_SMP
2032 /*
2033 * In the unlikely case current and p have the same deadline
2034 * let us try to decide what's the best thing to do...
2035 */
332ac17e
DF
2036 if ((p->dl.deadline == rq->curr->dl.deadline) &&
2037 !test_tsk_need_resched(rq->curr))
1baca4ce
JL
2038 check_preempt_equal_dl(rq, p);
2039#endif /* CONFIG_SMP */
aab03e05
DF
2040}
2041
2042#ifdef CONFIG_SCHED_HRTICK
63ba8422 2043static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
aab03e05 2044{
63ba8422 2045 hrtick_start(rq, dl_se->runtime);
aab03e05 2046}
36ce9881 2047#else /* !CONFIG_SCHED_HRTICK */
63ba8422 2048static void start_hrtick_dl(struct rq *rq, struct sched_dl_entity *dl_se)
36ce9881
WL
2049{
2050}
aab03e05
DF
2051#endif
2052
a0e813f2 2053static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first)
ff1cdc94 2054{
b5eb4a5f
YS
2055 struct sched_dl_entity *dl_se = &p->dl;
2056 struct dl_rq *dl_rq = &rq->dl;
2057
ff1cdc94 2058 p->se.exec_start = rq_clock_task(rq);
b5eb4a5f
YS
2059 if (on_dl_rq(&p->dl))
2060 update_stats_wait_end_dl(dl_rq, dl_se);
ff1cdc94
MS
2061
2062 /* You can't push away the running task */
2063 dequeue_pushable_dl_task(rq, p);
f95d4eae 2064
a0e813f2
PZ
2065 if (!first)
2066 return;
2067
f95d4eae
PZ
2068 if (rq->curr->sched_class != &dl_sched_class)
2069 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2070
2071 deadline_queue_push_tasks(rq);
ff1cdc94
MS
2072}
2073
821aecd0 2074static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq)
aab03e05 2075{
2161573e 2076 struct rb_node *left = rb_first_cached(&dl_rq->root);
aab03e05
DF
2077
2078 if (!left)
2079 return NULL;
2080
f4478e7c 2081 return __node_2_dle(left);
aab03e05
DF
2082}
2083
21f56ffe 2084static struct task_struct *pick_task_dl(struct rq *rq)
aab03e05
DF
2085{
2086 struct sched_dl_entity *dl_se;
6e2df058 2087 struct dl_rq *dl_rq = &rq->dl;
aab03e05 2088 struct task_struct *p;
aab03e05 2089
63ba8422 2090again:
6e2df058 2091 if (!sched_dl_runnable(rq))
aab03e05
DF
2092 return NULL;
2093
821aecd0 2094 dl_se = pick_next_dl_entity(dl_rq);
09348d75 2095 WARN_ON_ONCE(!dl_se);
63ba8422
PZ
2096
2097 if (dl_server(dl_se)) {
2098 p = dl_se->server_pick(dl_se);
2099 if (!p) {
2100 WARN_ON_ONCE(1);
2101 dl_se->dl_yielded = 1;
2102 update_curr_dl_se(rq, dl_se, 0);
2103 goto again;
2104 }
2105 p->dl_server = dl_se;
2106 } else {
2107 p = dl_task_of(dl_se);
2108 }
21f56ffe
PZ
2109
2110 return p;
2111}
2112
2113static struct task_struct *pick_next_task_dl(struct rq *rq)
2114{
2115 struct task_struct *p;
2116
2117 p = pick_task_dl(rq);
63ba8422
PZ
2118 if (!p)
2119 return p;
2120
2121 if (!p->dl_server)
21f56ffe
PZ
2122 set_next_task_dl(rq, p, true);
2123
63ba8422
PZ
2124 if (hrtick_enabled(rq))
2125 start_hrtick_dl(rq, &p->dl);
2126
aab03e05
DF
2127 return p;
2128}
2129
6e2df058 2130static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
aab03e05 2131{
b5eb4a5f
YS
2132 struct sched_dl_entity *dl_se = &p->dl;
2133 struct dl_rq *dl_rq = &rq->dl;
2134
2135 if (on_dl_rq(&p->dl))
2136 update_stats_wait_start_dl(dl_rq, dl_se);
2137
aab03e05 2138 update_curr_dl(rq);
1baca4ce 2139
23127296 2140 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
4b53a341 2141 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1baca4ce 2142 enqueue_pushable_dl_task(rq, p);
aab03e05
DF
2143}
2144
d84b3131
FW
2145/*
2146 * scheduler tick hitting a task of our scheduling class.
2147 *
2148 * NOTE: This function can be called remotely by the tick offload that
2149 * goes along full dynticks. Therefore no local assumption can be made
2150 * and everything must be accessed through the @rq and @curr passed in
2151 * parameters.
2152 */
aab03e05
DF
2153static void task_tick_dl(struct rq *rq, struct task_struct *p, int queued)
2154{
2155 update_curr_dl(rq);
2156
23127296 2157 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
a7bebf48
WL
2158 /*
2159 * Even when we have runtime, update_curr_dl() might have resulted in us
2160 * not being the leftmost task anymore. In that case NEED_RESCHED will
2161 * be set and schedule() will start a new hrtick for the next task.
2162 */
e0ee463c 2163 if (hrtick_enabled_dl(rq) && queued && p->dl.runtime > 0 &&
63ba8422
PZ
2164 is_leftmost(&p->dl, &rq->dl))
2165 start_hrtick_dl(rq, &p->dl);
aab03e05
DF
2166}
2167
2168static void task_fork_dl(struct task_struct *p)
2169{
2170 /*
2171 * SCHED_DEADLINE tasks cannot fork and this is achieved through
2172 * sched_fork()
2173 */
2174}
2175
1baca4ce
JL
2176#ifdef CONFIG_SMP
2177
2178/* Only try algorithms three times */
2179#define DL_MAX_TRIES 3
2180
2181static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
2182{
0b9d46fc 2183 if (!task_on_cpu(rq, p) &&
95158a89 2184 cpumask_test_cpu(cpu, &p->cpus_mask))
1baca4ce 2185 return 1;
1baca4ce
JL
2186 return 0;
2187}
2188
8b5e770e
WL
2189/*
2190 * Return the earliest pushable rq's task, which is suitable to be executed
2191 * on the CPU, NULL otherwise:
2192 */
2193static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
2194{
8b5e770e 2195 struct task_struct *p = NULL;
f4478e7c 2196 struct rb_node *next_node;
8b5e770e
WL
2197
2198 if (!has_pushable_dl_tasks(rq))
2199 return NULL;
2200
f4478e7c
DE
2201 next_node = rb_first_cached(&rq->dl.pushable_dl_tasks_root);
2202
8b5e770e
WL
2203next_node:
2204 if (next_node) {
f4478e7c 2205 p = __node_2_pdl(next_node);
8b5e770e
WL
2206
2207 if (pick_dl_task(rq, p, cpu))
2208 return p;
2209
2210 next_node = rb_next(next_node);
2211 goto next_node;
2212 }
2213
2214 return NULL;
2215}
2216
1baca4ce
JL
2217static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
2218
2219static int find_later_rq(struct task_struct *task)
2220{
2221 struct sched_domain *sd;
4ba29684 2222 struct cpumask *later_mask = this_cpu_cpumask_var_ptr(local_cpu_mask_dl);
1baca4ce 2223 int this_cpu = smp_processor_id();
b18c3ca1 2224 int cpu = task_cpu(task);
1baca4ce
JL
2225
2226 /* Make sure the mask is initialized first */
2227 if (unlikely(!later_mask))
2228 return -1;
2229
4b53a341 2230 if (task->nr_cpus_allowed == 1)
1baca4ce
JL
2231 return -1;
2232
91ec6778
JL
2233 /*
2234 * We have to consider system topology and task affinity
97fb7a0a 2235 * first, then we can look for a suitable CPU.
91ec6778 2236 */
3261ed0b 2237 if (!cpudl_find(&task_rq(task)->rd->cpudl, task, later_mask))
1baca4ce
JL
2238 return -1;
2239
2240 /*
b18c3ca1
BP
2241 * If we are here, some targets have been found, including
2242 * the most suitable which is, among the runqueues where the
2243 * current tasks have later deadlines than the task's one, the
2244 * rq with the latest possible one.
1baca4ce
JL
2245 *
2246 * Now we check how well this matches with task's
2247 * affinity and system topology.
2248 *
97fb7a0a 2249 * The last CPU where the task run is our first
1baca4ce
JL
2250 * guess, since it is most likely cache-hot there.
2251 */
2252 if (cpumask_test_cpu(cpu, later_mask))
2253 return cpu;
2254 /*
2255 * Check if this_cpu is to be skipped (i.e., it is
2256 * not in the mask) or not.
2257 */
2258 if (!cpumask_test_cpu(this_cpu, later_mask))
2259 this_cpu = -1;
2260
2261 rcu_read_lock();
2262 for_each_domain(cpu, sd) {
2263 if (sd->flags & SD_WAKE_AFFINE) {
b18c3ca1 2264 int best_cpu;
1baca4ce
JL
2265
2266 /*
2267 * If possible, preempting this_cpu is
2268 * cheaper than migrating.
2269 */
2270 if (this_cpu != -1 &&
2271 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
2272 rcu_read_unlock();
2273 return this_cpu;
2274 }
2275
14e292f8
PZ
2276 best_cpu = cpumask_any_and_distribute(later_mask,
2277 sched_domain_span(sd));
1baca4ce 2278 /*
97fb7a0a 2279 * Last chance: if a CPU being in both later_mask
b18c3ca1 2280 * and current sd span is valid, that becomes our
97fb7a0a 2281 * choice. Of course, the latest possible CPU is
b18c3ca1 2282 * already under consideration through later_mask.
1baca4ce 2283 */
b18c3ca1 2284 if (best_cpu < nr_cpu_ids) {
1baca4ce
JL
2285 rcu_read_unlock();
2286 return best_cpu;
2287 }
2288 }
2289 }
2290 rcu_read_unlock();
2291
2292 /*
2293 * At this point, all our guesses failed, we just return
2294 * 'something', and let the caller sort the things out.
2295 */
2296 if (this_cpu != -1)
2297 return this_cpu;
2298
14e292f8 2299 cpu = cpumask_any_distribute(later_mask);
1baca4ce
JL
2300 if (cpu < nr_cpu_ids)
2301 return cpu;
2302
2303 return -1;
2304}
2305
2306/* Locks the rq it finds */
2307static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
2308{
2309 struct rq *later_rq = NULL;
2310 int tries;
2311 int cpu;
2312
2313 for (tries = 0; tries < DL_MAX_TRIES; tries++) {
2314 cpu = find_later_rq(task);
2315
2316 if ((cpu == -1) || (cpu == rq->cpu))
2317 break;
2318
2319 later_rq = cpu_rq(cpu);
2320
973bee49 2321 if (!dl_task_is_earliest_deadline(task, later_rq)) {
9d514262
WL
2322 /*
2323 * Target rq has tasks of equal or earlier deadline,
2324 * retrying does not release any lock and is unlikely
2325 * to yield a different result.
2326 */
2327 later_rq = NULL;
2328 break;
2329 }
2330
1baca4ce
JL
2331 /* Retry if something changed. */
2332 if (double_lock_balance(rq, later_rq)) {
2333 if (unlikely(task_rq(task) != rq ||
95158a89 2334 !cpumask_test_cpu(later_rq->cpu, &task->cpus_mask) ||
0b9d46fc 2335 task_on_cpu(rq, task) ||
13b5ab02 2336 !dl_task(task) ||
feffe5bb 2337 is_migration_disabled(task) ||
da0c1e65 2338 !task_on_rq_queued(task))) {
1baca4ce
JL
2339 double_unlock_balance(rq, later_rq);
2340 later_rq = NULL;
2341 break;
2342 }
2343 }
2344
2345 /*
2346 * If the rq we found has no -deadline task, or
2347 * its earliest one has a later deadline than our
2348 * task, the rq is a good one.
2349 */
973bee49 2350 if (dl_task_is_earliest_deadline(task, later_rq))
1baca4ce
JL
2351 break;
2352
2353 /* Otherwise we try again. */
2354 double_unlock_balance(rq, later_rq);
2355 later_rq = NULL;
2356 }
2357
2358 return later_rq;
2359}
2360
2361static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
2362{
2363 struct task_struct *p;
2364
2365 if (!has_pushable_dl_tasks(rq))
2366 return NULL;
2367
f4478e7c 2368 p = __node_2_pdl(rb_first_cached(&rq->dl.pushable_dl_tasks_root));
1baca4ce 2369
09348d75
IM
2370 WARN_ON_ONCE(rq->cpu != task_cpu(p));
2371 WARN_ON_ONCE(task_current(rq, p));
2372 WARN_ON_ONCE(p->nr_cpus_allowed <= 1);
1baca4ce 2373
09348d75
IM
2374 WARN_ON_ONCE(!task_on_rq_queued(p));
2375 WARN_ON_ONCE(!dl_task(p));
1baca4ce
JL
2376
2377 return p;
2378}
2379
2380/*
2381 * See if the non running -deadline tasks on this rq
2382 * can be sent to some other CPU where they can preempt
2383 * and start executing.
2384 */
2385static int push_dl_task(struct rq *rq)
2386{
2387 struct task_struct *next_task;
2388 struct rq *later_rq;
c51b8ab5 2389 int ret = 0;
1baca4ce 2390
1baca4ce
JL
2391 next_task = pick_next_pushable_dl_task(rq);
2392 if (!next_task)
2393 return 0;
2394
2395retry:
1baca4ce
JL
2396 /*
2397 * If next_task preempts rq->curr, and rq->curr
2398 * can move away, it makes sense to just reschedule
2399 * without going further in pushing next_task.
2400 */
2401 if (dl_task(rq->curr) &&
2402 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) &&
4b53a341 2403 rq->curr->nr_cpus_allowed > 1) {
8875125e 2404 resched_curr(rq);
1baca4ce
JL
2405 return 0;
2406 }
2407
49bef33e
VS
2408 if (is_migration_disabled(next_task))
2409 return 0;
2410
2411 if (WARN_ON(next_task == rq->curr))
2412 return 0;
2413
1baca4ce
JL
2414 /* We might release rq lock */
2415 get_task_struct(next_task);
2416
2417 /* Will lock the rq it'll find */
2418 later_rq = find_lock_later_rq(next_task, rq);
2419 if (!later_rq) {
2420 struct task_struct *task;
2421
2422 /*
2423 * We must check all this again, since
2424 * find_lock_later_rq releases rq->lock and it is
2425 * then possible that next_task has migrated.
2426 */
2427 task = pick_next_pushable_dl_task(rq);
a776b968 2428 if (task == next_task) {
1baca4ce
JL
2429 /*
2430 * The task is still there. We don't try
97fb7a0a 2431 * again, some other CPU will pull it when ready.
1baca4ce 2432 */
1baca4ce
JL
2433 goto out;
2434 }
2435
2436 if (!task)
2437 /* No more tasks */
2438 goto out;
2439
2440 put_task_struct(next_task);
2441 next_task = task;
2442 goto retry;
2443 }
2444
2445 deactivate_task(rq, next_task, 0);
2446 set_task_cpu(next_task, later_rq->cpu);
734387ec 2447 activate_task(later_rq, next_task, 0);
c51b8ab5 2448 ret = 1;
1baca4ce 2449
8875125e 2450 resched_curr(later_rq);
1baca4ce
JL
2451
2452 double_unlock_balance(rq, later_rq);
2453
2454out:
2455 put_task_struct(next_task);
2456
c51b8ab5 2457 return ret;
1baca4ce
JL
2458}
2459
2460static void push_dl_tasks(struct rq *rq)
2461{
4ffa08ed 2462 /* push_dl_task() will return true if it moved a -deadline task */
1baca4ce
JL
2463 while (push_dl_task(rq))
2464 ;
aab03e05
DF
2465}
2466
0ea60c20 2467static void pull_dl_task(struct rq *this_rq)
1baca4ce 2468{
0ea60c20 2469 int this_cpu = this_rq->cpu, cpu;
a7c81556 2470 struct task_struct *p, *push_task;
0ea60c20 2471 bool resched = false;
1baca4ce
JL
2472 struct rq *src_rq;
2473 u64 dmin = LONG_MAX;
2474
2475 if (likely(!dl_overloaded(this_rq)))
0ea60c20 2476 return;
1baca4ce
JL
2477
2478 /*
2479 * Match the barrier from dl_set_overloaded; this guarantees that if we
2480 * see overloaded we must also see the dlo_mask bit.
2481 */
2482 smp_rmb();
2483
2484 for_each_cpu(cpu, this_rq->rd->dlo_mask) {
2485 if (this_cpu == cpu)
2486 continue;
2487
2488 src_rq = cpu_rq(cpu);
2489
2490 /*
2491 * It looks racy, abd it is! However, as in sched_rt.c,
2492 * we are fine with this.
2493 */
2494 if (this_rq->dl.dl_nr_running &&
2495 dl_time_before(this_rq->dl.earliest_dl.curr,
2496 src_rq->dl.earliest_dl.next))
2497 continue;
2498
2499 /* Might drop this_rq->lock */
a7c81556 2500 push_task = NULL;
1baca4ce
JL
2501 double_lock_balance(this_rq, src_rq);
2502
2503 /*
2504 * If there are no more pullable tasks on the
2505 * rq, we're done with it.
2506 */
2507 if (src_rq->dl.dl_nr_running <= 1)
2508 goto skip;
2509
8b5e770e 2510 p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
1baca4ce
JL
2511
2512 /*
2513 * We found a task to be pulled if:
2514 * - it preempts our current (if there's one),
2515 * - it will preempt the last one we pulled (if any).
2516 */
2517 if (p && dl_time_before(p->dl.deadline, dmin) &&
973bee49 2518 dl_task_is_earliest_deadline(p, this_rq)) {
1baca4ce 2519 WARN_ON(p == src_rq->curr);
da0c1e65 2520 WARN_ON(!task_on_rq_queued(p));
1baca4ce
JL
2521
2522 /*
2523 * Then we pull iff p has actually an earlier
2524 * deadline than the current task of its runqueue.
2525 */
2526 if (dl_time_before(p->dl.deadline,
2527 src_rq->curr->dl.deadline))
2528 goto skip;
2529
a7c81556
PZ
2530 if (is_migration_disabled(p)) {
2531 push_task = get_push_task(src_rq);
2532 } else {
2533 deactivate_task(src_rq, p, 0);
2534 set_task_cpu(p, this_cpu);
2535 activate_task(this_rq, p, 0);
2536 dmin = p->dl.deadline;
2537 resched = true;
2538 }
1baca4ce
JL
2539
2540 /* Is there any other task even earlier? */
2541 }
2542skip:
2543 double_unlock_balance(this_rq, src_rq);
a7c81556
PZ
2544
2545 if (push_task) {
f0498d2a 2546 preempt_disable();
5cb9eaa3 2547 raw_spin_rq_unlock(this_rq);
a7c81556
PZ
2548 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2549 push_task, &src_rq->push_work);
f0498d2a 2550 preempt_enable();
5cb9eaa3 2551 raw_spin_rq_lock(this_rq);
a7c81556 2552 }
1baca4ce
JL
2553 }
2554
0ea60c20
PZ
2555 if (resched)
2556 resched_curr(this_rq);
1baca4ce
JL
2557}
2558
2559/*
2560 * Since the task is not running and a reschedule is not going to happen
2561 * anytime soon on its runqueue, we try pushing it away now.
2562 */
2563static void task_woken_dl(struct rq *rq, struct task_struct *p)
2564{
0b9d46fc 2565 if (!task_on_cpu(rq, p) &&
1baca4ce 2566 !test_tsk_need_resched(rq->curr) &&
4b53a341 2567 p->nr_cpus_allowed > 1 &&
1baca4ce 2568 dl_task(rq->curr) &&
4b53a341 2569 (rq->curr->nr_cpus_allowed < 2 ||
6b0a563f 2570 !dl_entity_preempt(&p->dl, &rq->curr->dl))) {
1baca4ce
JL
2571 push_dl_tasks(rq);
2572 }
2573}
2574
2575static void set_cpus_allowed_dl(struct task_struct *p,
713a2e21 2576 struct affinity_context *ctx)
1baca4ce 2577{
7f51412a 2578 struct root_domain *src_rd;
6c37067e 2579 struct rq *rq;
1baca4ce 2580
09348d75 2581 WARN_ON_ONCE(!dl_task(p));
1baca4ce 2582
7f51412a
JL
2583 rq = task_rq(p);
2584 src_rd = rq->rd;
2585 /*
2586 * Migrating a SCHED_DEADLINE task between exclusive
2587 * cpusets (different root_domains) entails a bandwidth
2588 * update. We already made space for us in the destination
2589 * domain (see cpuset_can_attach()).
2590 */
713a2e21 2591 if (!cpumask_intersects(src_rd->span, ctx->new_mask)) {
7f51412a
JL
2592 struct dl_bw *src_dl_b;
2593
2594 src_dl_b = dl_bw_of(cpu_of(rq));
2595 /*
2596 * We now free resources of the root_domain we are migrating
2597 * off. In the worst case, sched_setattr() may temporary fail
2598 * until we complete the update.
2599 */
2600 raw_spin_lock(&src_dl_b->lock);
8c0944ce 2601 __dl_sub(src_dl_b, p->dl.dl_bw, dl_bw_cpus(task_cpu(p)));
7f51412a
JL
2602 raw_spin_unlock(&src_dl_b->lock);
2603 }
2604
713a2e21 2605 set_cpus_allowed_common(p, ctx);
1baca4ce
JL
2606}
2607
2608/* Assumes rq->lock is held */
2609static void rq_online_dl(struct rq *rq)
2610{
2611 if (rq->dl.overloaded)
2612 dl_set_overload(rq);
6bfd6d72 2613
16b26943 2614 cpudl_set_freecpu(&rq->rd->cpudl, rq->cpu);
6bfd6d72 2615 if (rq->dl.dl_nr_running > 0)
d8206bb3 2616 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr);
1baca4ce
JL
2617}
2618
2619/* Assumes rq->lock is held */
2620static void rq_offline_dl(struct rq *rq)
2621{
2622 if (rq->dl.overloaded)
2623 dl_clear_overload(rq);
6bfd6d72 2624
d8206bb3 2625 cpudl_clear(&rq->rd->cpudl, rq->cpu);
16b26943 2626 cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
1baca4ce
JL
2627}
2628
a6c0e746 2629void __init init_sched_dl_class(void)
1baca4ce
JL
2630{
2631 unsigned int i;
2632
2633 for_each_possible_cpu(i)
2634 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask_dl, i),
2635 GFP_KERNEL, cpu_to_node(i));
2636}
2637
f9a25f77
MP
2638void dl_add_task_root_domain(struct task_struct *p)
2639{
2640 struct rq_flags rf;
2641 struct rq *rq;
2642 struct dl_bw *dl_b;
2643
de40f33e
DE
2644 raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
2645 if (!dl_task(p)) {
2646 raw_spin_unlock_irqrestore(&p->pi_lock, rf.flags);
2647 return;
2648 }
2649
2650 rq = __task_rq_lock(p, &rf);
f9a25f77
MP
2651
2652 dl_b = &rq->rd->dl_bw;
2653 raw_spin_lock(&dl_b->lock);
2654
2655 __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
2656
2657 raw_spin_unlock(&dl_b->lock);
2658
f9a25f77
MP
2659 task_rq_unlock(rq, p, &rf);
2660}
2661
2662void dl_clear_root_domain(struct root_domain *rd)
2663{
2664 unsigned long flags;
2665
2666 raw_spin_lock_irqsave(&rd->dl_bw.lock, flags);
2667 rd->dl_bw.total_bw = 0;
2668 raw_spin_unlock_irqrestore(&rd->dl_bw.lock, flags);
2669}
2670
1baca4ce
JL
2671#endif /* CONFIG_SMP */
2672
aab03e05
DF
2673static void switched_from_dl(struct rq *rq, struct task_struct *p)
2674{
a649f237 2675 /*
209a0cbd
LA
2676 * task_non_contending() can start the "inactive timer" (if the 0-lag
2677 * time is in the future). If the task switches back to dl before
2678 * the "inactive timer" fires, it can continue to consume its current
2679 * runtime using its current deadline. If it stays outside of
2680 * SCHED_DEADLINE until the 0-lag time passes, inactive_task_timer()
2681 * will reset the task parameters.
a649f237 2682 */
209a0cbd 2683 if (task_on_rq_queued(p) && p->dl.dl_runtime)
2f7a0f58 2684 task_non_contending(&p->dl);
209a0cbd 2685
6c24849f
JL
2686 /*
2687 * In case a task is setscheduled out from SCHED_DEADLINE we need to
2688 * keep track of that on its cpuset (for correct bandwidth tracking).
2689 */
2690 dec_dl_tasks_cs(p);
2691
e117cb52
JL
2692 if (!task_on_rq_queued(p)) {
2693 /*
2694 * Inactive timer is armed. However, p is leaving DEADLINE and
2695 * might migrate away from this rq while continuing to run on
2696 * some other class. We need to remove its contribution from
2697 * this rq running_bw now, or sub_rq_bw (below) will complain.
2698 */
2699 if (p->dl.dl_non_contending)
2700 sub_running_bw(&p->dl, &rq->dl);
794a56eb 2701 sub_rq_bw(&p->dl, &rq->dl);
e117cb52 2702 }
8fd27231 2703
209a0cbd
LA
2704 /*
2705 * We cannot use inactive_task_timer() to invoke sub_running_bw()
2706 * at the 0-lag time, because the task could have been migrated
2707 * while SCHED_OTHER in the meanwhile.
2708 */
2709 if (p->dl.dl_non_contending)
2710 p->dl.dl_non_contending = 0;
a5e7be3b 2711
1baca4ce
JL
2712 /*
2713 * Since this might be the only -deadline task on the rq,
2714 * this is the right place to try to pull some other one
97fb7a0a 2715 * from an overloaded CPU, if any.
1baca4ce 2716 */
cd660911
WL
2717 if (!task_on_rq_queued(p) || rq->dl.dl_nr_running)
2718 return;
2719
02d8ec94 2720 deadline_queue_pull_task(rq);
aab03e05
DF
2721}
2722
1baca4ce
JL
2723/*
2724 * When switching to -deadline, we may overload the rq, then
2725 * we try to push someone off, if possible.
2726 */
aab03e05
DF
2727static void switched_to_dl(struct rq *rq, struct task_struct *p)
2728{
209a0cbd
LA
2729 if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1)
2730 put_task_struct(p);
98b0a857 2731
6c24849f
JL
2732 /*
2733 * In case a task is setscheduled to SCHED_DEADLINE we need to keep
2734 * track of that on its cpuset (for correct bandwidth tracking).
2735 */
2736 inc_dl_tasks_cs(p);
2737
98b0a857 2738 /* If p is not queued we will update its parameters at next wakeup. */
8fd27231 2739 if (!task_on_rq_queued(p)) {
794a56eb 2740 add_rq_bw(&p->dl, &rq->dl);
98b0a857 2741
8fd27231
LA
2742 return;
2743 }
72f9f3fd 2744
98b0a857 2745 if (rq->curr != p) {
1baca4ce 2746#ifdef CONFIG_SMP
4b53a341 2747 if (p->nr_cpus_allowed > 1 && rq->dl.overloaded)
02d8ec94 2748 deadline_queue_push_tasks(rq);
619bd4a7 2749#endif
9916e214 2750 if (dl_task(rq->curr))
e23edc86 2751 wakeup_preempt_dl(rq, p, 0);
9916e214
PZ
2752 else
2753 resched_curr(rq);
d7d60709
VD
2754 } else {
2755 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0);
aab03e05
DF
2756 }
2757}
2758
1baca4ce
JL
2759/*
2760 * If the scheduling parameters of a -deadline task changed,
2761 * a push or pull operation might be needed.
2762 */
aab03e05
DF
2763static void prio_changed_dl(struct rq *rq, struct task_struct *p,
2764 int oldprio)
2765{
7ea98dfa
VS
2766 if (!task_on_rq_queued(p))
2767 return;
2768
aab03e05 2769#ifdef CONFIG_SMP
7ea98dfa
VS
2770 /*
2771 * This might be too much, but unfortunately
2772 * we don't have the old deadline value, and
2773 * we can't argue if the task is increasing
2774 * or lowering its prio, so...
2775 */
2776 if (!rq->dl.overloaded)
2777 deadline_queue_pull_task(rq);
1baca4ce 2778
7ea98dfa 2779 if (task_current(rq, p)) {
1baca4ce
JL
2780 /*
2781 * If we now have a earlier deadline task than p,
2782 * then reschedule, provided p is still on this
2783 * runqueue.
2784 */
9916e214 2785 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline))
8875125e 2786 resched_curr(rq);
7ea98dfa 2787 } else {
1baca4ce 2788 /*
7ea98dfa
VS
2789 * Current may not be deadline in case p was throttled but we
2790 * have just replenished it (e.g. rt_mutex_setprio()).
2791 *
2792 * Otherwise, if p was given an earlier deadline, reschedule.
1baca4ce 2793 */
7ea98dfa
VS
2794 if (!dl_task(rq->curr) ||
2795 dl_time_before(p->dl.deadline, rq->curr->dl.deadline))
2796 resched_curr(rq);
801ccdbf 2797 }
7ea98dfa
VS
2798#else
2799 /*
2800 * We don't know if p has a earlier or later deadline, so let's blindly
2801 * set a (maybe not needed) rescheduling point.
2802 */
2803 resched_curr(rq);
2804#endif
aab03e05 2805}
aab03e05 2806
530bfad1
HJ
2807#ifdef CONFIG_SCHED_CORE
2808static int task_is_throttled_dl(struct task_struct *p, int cpu)
2809{
2810 return p->dl.dl_throttled;
2811}
2812#endif
2813
43c31ac0
PZ
2814DEFINE_SCHED_CLASS(dl) = {
2815
aab03e05
DF
2816 .enqueue_task = enqueue_task_dl,
2817 .dequeue_task = dequeue_task_dl,
2818 .yield_task = yield_task_dl,
2819
e23edc86 2820 .wakeup_preempt = wakeup_preempt_dl,
aab03e05
DF
2821
2822 .pick_next_task = pick_next_task_dl,
2823 .put_prev_task = put_prev_task_dl,
03b7fad1 2824 .set_next_task = set_next_task_dl,
aab03e05
DF
2825
2826#ifdef CONFIG_SMP
6e2df058 2827 .balance = balance_dl,
21f56ffe 2828 .pick_task = pick_task_dl,
aab03e05 2829 .select_task_rq = select_task_rq_dl,
209a0cbd 2830 .migrate_task_rq = migrate_task_rq_dl,
1baca4ce
JL
2831 .set_cpus_allowed = set_cpus_allowed_dl,
2832 .rq_online = rq_online_dl,
2833 .rq_offline = rq_offline_dl,
1baca4ce 2834 .task_woken = task_woken_dl,
a7c81556 2835 .find_lock_rq = find_lock_later_rq,
aab03e05
DF
2836#endif
2837
aab03e05
DF
2838 .task_tick = task_tick_dl,
2839 .task_fork = task_fork_dl,
aab03e05
DF
2840
2841 .prio_changed = prio_changed_dl,
2842 .switched_from = switched_from_dl,
2843 .switched_to = switched_to_dl,
6e998916
SG
2844
2845 .update_curr = update_curr_dl,
530bfad1
HJ
2846#ifdef CONFIG_SCHED_CORE
2847 .task_is_throttled = task_is_throttled_dl,
2848#endif
aab03e05 2849};
acb32132 2850
26762423
PL
2851/* Used for dl_bw check and update, used under sched_rt_handler()::mutex */
2852static u64 dl_generation;
2853
06a76fe0
NP
2854int sched_dl_global_validate(void)
2855{
2856 u64 runtime = global_rt_runtime();
2857 u64 period = global_rt_period();
2858 u64 new_bw = to_ratio(period, runtime);
26762423 2859 u64 gen = ++dl_generation;
06a76fe0 2860 struct dl_bw *dl_b;
a57415f5 2861 int cpu, cpus, ret = 0;
06a76fe0
NP
2862 unsigned long flags;
2863
2864 /*
2865 * Here we want to check the bandwidth not being set to some
2866 * value smaller than the currently allocated bandwidth in
2867 * any of the root_domains.
06a76fe0
NP
2868 */
2869 for_each_possible_cpu(cpu) {
2870 rcu_read_lock_sched();
26762423
PL
2871
2872 if (dl_bw_visited(cpu, gen))
2873 goto next;
2874
06a76fe0 2875 dl_b = dl_bw_of(cpu);
a57415f5 2876 cpus = dl_bw_cpus(cpu);
06a76fe0
NP
2877
2878 raw_spin_lock_irqsave(&dl_b->lock, flags);
a57415f5 2879 if (new_bw * cpus < dl_b->total_bw)
06a76fe0
NP
2880 ret = -EBUSY;
2881 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2882
26762423 2883next:
06a76fe0
NP
2884 rcu_read_unlock_sched();
2885
2886 if (ret)
2887 break;
2888 }
2889
2890 return ret;
2891}
2892
ba4f7bc1 2893static void init_dl_rq_bw_ratio(struct dl_rq *dl_rq)
06a76fe0
NP
2894{
2895 if (global_rt_runtime() == RUNTIME_INF) {
2896 dl_rq->bw_ratio = 1 << RATIO_SHIFT;
6a9d623a 2897 dl_rq->max_bw = dl_rq->extra_bw = 1 << BW_SHIFT;
06a76fe0
NP
2898 } else {
2899 dl_rq->bw_ratio = to_ratio(global_rt_runtime(),
2900 global_rt_period()) >> (BW_SHIFT - RATIO_SHIFT);
6a9d623a
VP
2901 dl_rq->max_bw = dl_rq->extra_bw =
2902 to_ratio(global_rt_period(), global_rt_runtime());
06a76fe0
NP
2903 }
2904}
2905
2906void sched_dl_do_global(void)
2907{
2908 u64 new_bw = -1;
26762423 2909 u64 gen = ++dl_generation;
06a76fe0
NP
2910 struct dl_bw *dl_b;
2911 int cpu;
2912 unsigned long flags;
2913
06a76fe0
NP
2914 if (global_rt_runtime() != RUNTIME_INF)
2915 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
2916
06a76fe0
NP
2917 for_each_possible_cpu(cpu) {
2918 rcu_read_lock_sched();
26762423
PL
2919
2920 if (dl_bw_visited(cpu, gen)) {
2921 rcu_read_unlock_sched();
2922 continue;
2923 }
2924
06a76fe0
NP
2925 dl_b = dl_bw_of(cpu);
2926
2927 raw_spin_lock_irqsave(&dl_b->lock, flags);
2928 dl_b->bw = new_bw;
2929 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
2930
2931 rcu_read_unlock_sched();
2932 init_dl_rq_bw_ratio(&cpu_rq(cpu)->dl);
2933 }
2934}
2935
2936/*
2937 * We must be sure that accepting a new task (or allowing changing the
2938 * parameters of an existing one) is consistent with the bandwidth
2939 * constraints. If yes, this function also accordingly updates the currently
2940 * allocated bandwidth to reflect the new situation.
2941 *
2942 * This function is called while holding p's rq->lock.
2943 */
2944int sched_dl_overflow(struct task_struct *p, int policy,
2945 const struct sched_attr *attr)
2946{
06a76fe0
NP
2947 u64 period = attr->sched_period ?: attr->sched_deadline;
2948 u64 runtime = attr->sched_runtime;
2949 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
60ffd5ed
LA
2950 int cpus, err = -1, cpu = task_cpu(p);
2951 struct dl_bw *dl_b = dl_bw_of(cpu);
2952 unsigned long cap;
06a76fe0 2953
794a56eb
JL
2954 if (attr->sched_flags & SCHED_FLAG_SUGOV)
2955 return 0;
2956
06a76fe0
NP
2957 /* !deadline task may carry old deadline bandwidth */
2958 if (new_bw == p->dl.dl_bw && task_has_dl_policy(p))
2959 return 0;
2960
2961 /*
2962 * Either if a task, enters, leave, or stays -deadline but changes
2963 * its parameters, we may need to update accordingly the total
2964 * allocated bandwidth of the container.
2965 */
2966 raw_spin_lock(&dl_b->lock);
60ffd5ed
LA
2967 cpus = dl_bw_cpus(cpu);
2968 cap = dl_bw_capacity(cpu);
2969
06a76fe0 2970 if (dl_policy(policy) && !task_has_dl_policy(p) &&
60ffd5ed 2971 !__dl_overflow(dl_b, cap, 0, new_bw)) {
06a76fe0 2972 if (hrtimer_active(&p->dl.inactive_timer))
8c0944ce 2973 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
2974 __dl_add(dl_b, new_bw, cpus);
2975 err = 0;
2976 } else if (dl_policy(policy) && task_has_dl_policy(p) &&
60ffd5ed 2977 !__dl_overflow(dl_b, cap, p->dl.dl_bw, new_bw)) {
06a76fe0
NP
2978 /*
2979 * XXX this is slightly incorrect: when the task
2980 * utilization decreases, we should delay the total
2981 * utilization change until the task's 0-lag point.
2982 * But this would require to set the task's "inactive
2983 * timer" when the task is not inactive.
2984 */
8c0944ce 2985 __dl_sub(dl_b, p->dl.dl_bw, cpus);
06a76fe0
NP
2986 __dl_add(dl_b, new_bw, cpus);
2987 dl_change_utilization(p, new_bw);
2988 err = 0;
2989 } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
2990 /*
2991 * Do not decrease the total deadline utilization here,
2992 * switched_from_dl() will take care to do it at the correct
2993 * (0-lag) time.
2994 */
2995 err = 0;
2996 }
2997 raw_spin_unlock(&dl_b->lock);
2998
2999 return err;
3000}
3001
3002/*
3003 * This function initializes the sched_dl_entity of a newly becoming
3004 * SCHED_DEADLINE task.
3005 *
3006 * Only the static values are considered here, the actual runtime and the
3007 * absolute deadline will be properly calculated when the task is enqueued
3008 * for the first time with its new policy.
3009 */
3010void __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3011{
3012 struct sched_dl_entity *dl_se = &p->dl;
3013
3014 dl_se->dl_runtime = attr->sched_runtime;
3015 dl_se->dl_deadline = attr->sched_deadline;
3016 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
f9509153 3017 dl_se->flags = attr->sched_flags & SCHED_DL_FLAGS;
06a76fe0
NP
3018 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3019 dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime);
3020}
3021
3022void __getparam_dl(struct task_struct *p, struct sched_attr *attr)
3023{
3024 struct sched_dl_entity *dl_se = &p->dl;
3025
3026 attr->sched_priority = p->rt_priority;
3027 attr->sched_runtime = dl_se->dl_runtime;
3028 attr->sched_deadline = dl_se->dl_deadline;
3029 attr->sched_period = dl_se->dl_period;
f9509153
QP
3030 attr->sched_flags &= ~SCHED_DL_FLAGS;
3031 attr->sched_flags |= dl_se->flags;
06a76fe0
NP
3032}
3033
3034/*
3035 * This function validates the new parameters of a -deadline task.
3036 * We ask for the deadline not being zero, and greater or equal
3037 * than the runtime, as well as the period of being zero or
3038 * greater than deadline. Furthermore, we have to be sure that
3039 * user parameters are above the internal resolution of 1us (we
3040 * check sched_runtime only since it is always the smaller one) and
3041 * below 2^63 ns (we have to check both sched_deadline and
3042 * sched_period, as the latter can be zero).
3043 */
3044bool __checkparam_dl(const struct sched_attr *attr)
3045{
b4098bfc
PZ
3046 u64 period, max, min;
3047
794a56eb
JL
3048 /* special dl tasks don't actually use any parameter */
3049 if (attr->sched_flags & SCHED_FLAG_SUGOV)
3050 return true;
3051
06a76fe0
NP
3052 /* deadline != 0 */
3053 if (attr->sched_deadline == 0)
3054 return false;
3055
3056 /*
3057 * Since we truncate DL_SCALE bits, make sure we're at least
3058 * that big.
3059 */
3060 if (attr->sched_runtime < (1ULL << DL_SCALE))
3061 return false;
3062
3063 /*
3064 * Since we use the MSB for wrap-around and sign issues, make
3065 * sure it's not set (mind that period can be equal to zero).
3066 */
3067 if (attr->sched_deadline & (1ULL << 63) ||
3068 attr->sched_period & (1ULL << 63))
3069 return false;
3070
b4098bfc
PZ
3071 period = attr->sched_period;
3072 if (!period)
3073 period = attr->sched_deadline;
3074
06a76fe0 3075 /* runtime <= deadline <= period (if period != 0) */
b4098bfc 3076 if (period < attr->sched_deadline ||
06a76fe0
NP
3077 attr->sched_deadline < attr->sched_runtime)
3078 return false;
3079
b4098bfc
PZ
3080 max = (u64)READ_ONCE(sysctl_sched_dl_period_max) * NSEC_PER_USEC;
3081 min = (u64)READ_ONCE(sysctl_sched_dl_period_min) * NSEC_PER_USEC;
3082
3083 if (period < min || period > max)
3084 return false;
3085
06a76fe0
NP
3086 return true;
3087}
3088
3089/*
3090 * This function clears the sched_dl_entity static params.
3091 */
9e07d45c 3092static void __dl_clear_params(struct sched_dl_entity *dl_se)
06a76fe0 3093{
97fb7a0a
IM
3094 dl_se->dl_runtime = 0;
3095 dl_se->dl_deadline = 0;
3096 dl_se->dl_period = 0;
3097 dl_se->flags = 0;
3098 dl_se->dl_bw = 0;
3099 dl_se->dl_density = 0;
06a76fe0 3100
97fb7a0a
IM
3101 dl_se->dl_throttled = 0;
3102 dl_se->dl_yielded = 0;
3103 dl_se->dl_non_contending = 0;
3104 dl_se->dl_overrun = 0;
63ba8422 3105 dl_se->dl_server = 0;
2279f540
JL
3106
3107#ifdef CONFIG_RT_MUTEXES
3108 dl_se->pi_se = dl_se;
3109#endif
06a76fe0
NP
3110}
3111
9e07d45c
PZ
3112void init_dl_entity(struct sched_dl_entity *dl_se)
3113{
3114 RB_CLEAR_NODE(&dl_se->rb_node);
3115 init_dl_task_timer(dl_se);
3116 init_dl_inactive_task_timer(dl_se);
3117 __dl_clear_params(dl_se);
3118}
3119
06a76fe0
NP
3120bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
3121{
3122 struct sched_dl_entity *dl_se = &p->dl;
3123
3124 if (dl_se->dl_runtime != attr->sched_runtime ||
3125 dl_se->dl_deadline != attr->sched_deadline ||
3126 dl_se->dl_period != attr->sched_period ||
f9509153 3127 dl_se->flags != (attr->sched_flags & SCHED_DL_FLAGS))
06a76fe0
NP
3128 return true;
3129
3130 return false;
3131}
3132
3133#ifdef CONFIG_SMP
06a76fe0
NP
3134int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
3135 const struct cpumask *trial)
3136{
6092478b 3137 unsigned long flags, cap;
06a76fe0 3138 struct dl_bw *cur_dl_b;
6092478b 3139 int ret = 1;
06a76fe0
NP
3140
3141 rcu_read_lock_sched();
3142 cur_dl_b = dl_bw_of(cpumask_any(cur));
6092478b 3143 cap = __dl_bw_capacity(trial);
06a76fe0 3144 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
6092478b 3145 if (__dl_overflow(cur_dl_b, cap, 0, 0))
06a76fe0
NP
3146 ret = 0;
3147 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
3148 rcu_read_unlock_sched();
97fb7a0a 3149
06a76fe0
NP
3150 return ret;
3151}
3152
85989106
DE
3153enum dl_bw_request {
3154 dl_bw_req_check_overflow = 0,
3155 dl_bw_req_alloc,
3156 dl_bw_req_free
3157};
3158
3159static int dl_bw_manage(enum dl_bw_request req, int cpu, u64 dl_bw)
06a76fe0 3160{
85989106 3161 unsigned long flags;
06a76fe0 3162 struct dl_bw *dl_b;
85989106 3163 bool overflow = 0;
06a76fe0
NP
3164
3165 rcu_read_lock_sched();
3166 dl_b = dl_bw_of(cpu);
3167 raw_spin_lock_irqsave(&dl_b->lock, flags);
772b6539 3168
85989106
DE
3169 if (req == dl_bw_req_free) {
3170 __dl_sub(dl_b, dl_bw, dl_bw_cpus(cpu));
3171 } else {
3172 unsigned long cap = dl_bw_capacity(cpu);
3173
3174 overflow = __dl_overflow(dl_b, cap, 0, dl_bw);
3175
3176 if (req == dl_bw_req_alloc && !overflow) {
3177 /*
3178 * We reserve space in the destination
3179 * root_domain, as we can't fail after this point.
3180 * We will free resources in the source root_domain
3181 * later on (see set_cpus_allowed_dl()).
3182 */
3183 __dl_add(dl_b, dl_bw, dl_bw_cpus(cpu));
3184 }
772b6539
DE
3185 }
3186
06a76fe0
NP
3187 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
3188 rcu_read_unlock_sched();
97fb7a0a 3189
772b6539 3190 return overflow ? -EBUSY : 0;
06a76fe0 3191}
85989106
DE
3192
3193int dl_bw_check_overflow(int cpu)
3194{
3195 return dl_bw_manage(dl_bw_req_check_overflow, cpu, 0);
3196}
3197
3198int dl_bw_alloc(int cpu, u64 dl_bw)
3199{
3200 return dl_bw_manage(dl_bw_req_alloc, cpu, dl_bw);
3201}
3202
3203void dl_bw_free(int cpu, u64 dl_bw)
3204{
3205 dl_bw_manage(dl_bw_req_free, cpu, dl_bw);
3206}
06a76fe0
NP
3207#endif
3208
acb32132 3209#ifdef CONFIG_SCHED_DEBUG
acb32132
WL
3210void print_dl_stats(struct seq_file *m, int cpu)
3211{
3212 print_dl_rq(m, cpu, &cpu_rq(cpu)->dl);
3213}
3214#endif /* CONFIG_SCHED_DEBUG */