Merge tag 'media/v5.14-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-block.git] / kernel / sched / rt.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
bb44e5d1
IM
2/*
3 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4 * policies)
5 */
029632fb
PZ
6#include "sched.h"
7
371bf427
VG
8#include "pelt.h"
9
ce0dbbbb 10int sched_rr_timeslice = RR_TIMESLICE;
975e155e 11int sysctl_sched_rr_timeslice = (MSEC_PER_SEC / HZ) * RR_TIMESLICE;
d505b8af
HC
12/* More than 4 hours if BW_SHIFT equals 20. */
13static const u64 max_rt_runtime = MAX_BW;
ce0dbbbb 14
029632fb
PZ
15static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
16
17struct rt_bandwidth def_rt_bandwidth;
18
19static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
20{
21 struct rt_bandwidth *rt_b =
22 container_of(timer, struct rt_bandwidth, rt_period_timer);
029632fb 23 int idle = 0;
77a4d1a1 24 int overrun;
029632fb 25
77a4d1a1 26 raw_spin_lock(&rt_b->rt_runtime_lock);
029632fb 27 for (;;) {
77a4d1a1 28 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
029632fb
PZ
29 if (!overrun)
30 break;
31
77a4d1a1 32 raw_spin_unlock(&rt_b->rt_runtime_lock);
029632fb 33 idle = do_sched_rt_period_timer(rt_b, overrun);
77a4d1a1 34 raw_spin_lock(&rt_b->rt_runtime_lock);
029632fb 35 }
4cfafd30
PZ
36 if (idle)
37 rt_b->rt_period_active = 0;
77a4d1a1 38 raw_spin_unlock(&rt_b->rt_runtime_lock);
029632fb
PZ
39
40 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
41}
42
43void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
44{
45 rt_b->rt_period = ns_to_ktime(period);
46 rt_b->rt_runtime = runtime;
47
48 raw_spin_lock_init(&rt_b->rt_runtime_lock);
49
d5096aa6
SAS
50 hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
51 HRTIMER_MODE_REL_HARD);
029632fb
PZ
52 rt_b->rt_period_timer.function = sched_rt_period_timer;
53}
54
55static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
56{
57 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
58 return;
59
029632fb 60 raw_spin_lock(&rt_b->rt_runtime_lock);
4cfafd30
PZ
61 if (!rt_b->rt_period_active) {
62 rt_b->rt_period_active = 1;
c3a990dc
SR
63 /*
64 * SCHED_DEADLINE updates the bandwidth, as a run away
65 * RT task with a DL task could hog a CPU. But DL does
66 * not reset the period. If a deadline task was running
67 * without an RT task running, it can cause RT tasks to
68 * throttle when they start up. Kick the timer right away
69 * to update the period.
70 */
71 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
d5096aa6
SAS
72 hrtimer_start_expires(&rt_b->rt_period_timer,
73 HRTIMER_MODE_ABS_PINNED_HARD);
4cfafd30 74 }
029632fb
PZ
75 raw_spin_unlock(&rt_b->rt_runtime_lock);
76}
77
07c54f7a 78void init_rt_rq(struct rt_rq *rt_rq)
029632fb
PZ
79{
80 struct rt_prio_array *array;
81 int i;
82
83 array = &rt_rq->active;
84 for (i = 0; i < MAX_RT_PRIO; i++) {
85 INIT_LIST_HEAD(array->queue + i);
86 __clear_bit(i, array->bitmap);
87 }
88 /* delimiter for bitsearch: */
89 __set_bit(MAX_RT_PRIO, array->bitmap);
90
91#if defined CONFIG_SMP
934fc331
PZ
92 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
93 rt_rq->highest_prio.next = MAX_RT_PRIO-1;
029632fb
PZ
94 rt_rq->rt_nr_migratory = 0;
95 rt_rq->overloaded = 0;
96 plist_head_init(&rt_rq->pushable_tasks);
b6366f04 97#endif /* CONFIG_SMP */
f4ebcbc0
KT
98 /* We start is dequeued state, because no RT tasks are queued */
99 rt_rq->rt_queued = 0;
029632fb
PZ
100
101 rt_rq->rt_time = 0;
102 rt_rq->rt_throttled = 0;
103 rt_rq->rt_runtime = 0;
104 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
105}
106
8f48894f 107#ifdef CONFIG_RT_GROUP_SCHED
029632fb
PZ
108static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
109{
110 hrtimer_cancel(&rt_b->rt_period_timer);
111}
8f48894f
PZ
112
113#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
114
398a153b
GH
115static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
116{
8f48894f
PZ
117#ifdef CONFIG_SCHED_DEBUG
118 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
119#endif
398a153b
GH
120 return container_of(rt_se, struct task_struct, rt);
121}
122
398a153b
GH
123static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
124{
125 return rt_rq->rq;
126}
127
128static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
129{
130 return rt_se->rt_rq;
131}
132
653d07a6
KT
133static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
134{
135 struct rt_rq *rt_rq = rt_se->rt_rq;
136
137 return rt_rq->rq;
138}
139
029632fb
PZ
140void free_rt_sched_group(struct task_group *tg)
141{
142 int i;
143
144 if (tg->rt_se)
145 destroy_rt_bandwidth(&tg->rt_bandwidth);
146
147 for_each_possible_cpu(i) {
148 if (tg->rt_rq)
149 kfree(tg->rt_rq[i]);
150 if (tg->rt_se)
151 kfree(tg->rt_se[i]);
152 }
153
154 kfree(tg->rt_rq);
155 kfree(tg->rt_se);
156}
157
158void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
159 struct sched_rt_entity *rt_se, int cpu,
160 struct sched_rt_entity *parent)
161{
162 struct rq *rq = cpu_rq(cpu);
163
934fc331 164 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
029632fb
PZ
165 rt_rq->rt_nr_boosted = 0;
166 rt_rq->rq = rq;
167 rt_rq->tg = tg;
168
169 tg->rt_rq[cpu] = rt_rq;
170 tg->rt_se[cpu] = rt_se;
171
172 if (!rt_se)
173 return;
174
175 if (!parent)
176 rt_se->rt_rq = &rq->rt;
177 else
178 rt_se->rt_rq = parent->my_q;
179
180 rt_se->my_q = rt_rq;
181 rt_se->parent = parent;
182 INIT_LIST_HEAD(&rt_se->run_list);
183}
184
185int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
186{
187 struct rt_rq *rt_rq;
188 struct sched_rt_entity *rt_se;
189 int i;
190
6396bb22 191 tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
029632fb
PZ
192 if (!tg->rt_rq)
193 goto err;
6396bb22 194 tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
029632fb
PZ
195 if (!tg->rt_se)
196 goto err;
197
198 init_rt_bandwidth(&tg->rt_bandwidth,
199 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
200
201 for_each_possible_cpu(i) {
202 rt_rq = kzalloc_node(sizeof(struct rt_rq),
203 GFP_KERNEL, cpu_to_node(i));
204 if (!rt_rq)
205 goto err;
206
207 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
208 GFP_KERNEL, cpu_to_node(i));
209 if (!rt_se)
210 goto err_free_rq;
211
07c54f7a 212 init_rt_rq(rt_rq);
029632fb
PZ
213 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
214 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
215 }
216
217 return 1;
218
219err_free_rq:
220 kfree(rt_rq);
221err:
222 return 0;
223}
224
398a153b
GH
225#else /* CONFIG_RT_GROUP_SCHED */
226
a1ba4d8b
PZ
227#define rt_entity_is_task(rt_se) (1)
228
8f48894f
PZ
229static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
230{
231 return container_of(rt_se, struct task_struct, rt);
232}
233
398a153b
GH
234static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
235{
236 return container_of(rt_rq, struct rq, rt);
237}
238
653d07a6 239static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
398a153b
GH
240{
241 struct task_struct *p = rt_task_of(rt_se);
653d07a6
KT
242
243 return task_rq(p);
244}
245
246static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
247{
248 struct rq *rq = rq_of_rt_se(rt_se);
398a153b
GH
249
250 return &rq->rt;
251}
252
029632fb
PZ
253void free_rt_sched_group(struct task_group *tg) { }
254
255int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
256{
257 return 1;
258}
398a153b
GH
259#endif /* CONFIG_RT_GROUP_SCHED */
260
4fd29176 261#ifdef CONFIG_SMP
84de4274 262
8046d680 263static void pull_rt_task(struct rq *this_rq);
38033c37 264
dc877341
PZ
265static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
266{
267 /* Try to pull RT tasks here if we lower this rq's prio */
120455c5 268 return rq->online && rq->rt.highest_prio.curr > prev->prio;
dc877341
PZ
269}
270
637f5085 271static inline int rt_overloaded(struct rq *rq)
4fd29176 272{
637f5085 273 return atomic_read(&rq->rd->rto_count);
4fd29176 274}
84de4274 275
4fd29176
SR
276static inline void rt_set_overload(struct rq *rq)
277{
1f11eb6a
GH
278 if (!rq->online)
279 return;
280
c6c4927b 281 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
282 /*
283 * Make sure the mask is visible before we set
284 * the overload count. That is checked to determine
285 * if we should look at the mask. It would be a shame
286 * if we looked at the mask, but the mask was not
287 * updated yet.
7c3f2ab7
PZ
288 *
289 * Matched by the barrier in pull_rt_task().
4fd29176 290 */
7c3f2ab7 291 smp_wmb();
637f5085 292 atomic_inc(&rq->rd->rto_count);
4fd29176 293}
84de4274 294
4fd29176
SR
295static inline void rt_clear_overload(struct rq *rq)
296{
1f11eb6a
GH
297 if (!rq->online)
298 return;
299
4fd29176 300 /* the order here really doesn't matter */
637f5085 301 atomic_dec(&rq->rd->rto_count);
c6c4927b 302 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176 303}
73fe6aae 304
398a153b 305static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae 306{
a1ba4d8b 307 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b
GH
308 if (!rt_rq->overloaded) {
309 rt_set_overload(rq_of_rt_rq(rt_rq));
310 rt_rq->overloaded = 1;
cdc8eb98 311 }
398a153b
GH
312 } else if (rt_rq->overloaded) {
313 rt_clear_overload(rq_of_rt_rq(rt_rq));
314 rt_rq->overloaded = 0;
637f5085 315 }
73fe6aae 316}
4fd29176 317
398a153b
GH
318static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
319{
29baa747
PZ
320 struct task_struct *p;
321
a1ba4d8b
PZ
322 if (!rt_entity_is_task(rt_se))
323 return;
324
29baa747 325 p = rt_task_of(rt_se);
a1ba4d8b
PZ
326 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
327
328 rt_rq->rt_nr_total++;
4b53a341 329 if (p->nr_cpus_allowed > 1)
398a153b
GH
330 rt_rq->rt_nr_migratory++;
331
332 update_rt_migration(rt_rq);
333}
334
335static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
336{
29baa747
PZ
337 struct task_struct *p;
338
a1ba4d8b
PZ
339 if (!rt_entity_is_task(rt_se))
340 return;
341
29baa747 342 p = rt_task_of(rt_se);
a1ba4d8b
PZ
343 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
344
345 rt_rq->rt_nr_total--;
4b53a341 346 if (p->nr_cpus_allowed > 1)
398a153b
GH
347 rt_rq->rt_nr_migratory--;
348
349 update_rt_migration(rt_rq);
350}
351
5181f4a4
SR
352static inline int has_pushable_tasks(struct rq *rq)
353{
354 return !plist_head_empty(&rq->rt.pushable_tasks);
355}
356
fd7a4bed
PZ
357static DEFINE_PER_CPU(struct callback_head, rt_push_head);
358static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
e3fca9e7
PZ
359
360static void push_rt_tasks(struct rq *);
fd7a4bed 361static void pull_rt_task(struct rq *);
e3fca9e7 362
02d8ec94 363static inline void rt_queue_push_tasks(struct rq *rq)
dc877341 364{
e3fca9e7
PZ
365 if (!has_pushable_tasks(rq))
366 return;
367
fd7a4bed
PZ
368 queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
369}
370
02d8ec94 371static inline void rt_queue_pull_task(struct rq *rq)
fd7a4bed
PZ
372{
373 queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
dc877341
PZ
374}
375
917b627d
GH
376static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
377{
378 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
379 plist_node_init(&p->pushable_tasks, p->prio);
380 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a4
SR
381
382 /* Update the highest prio pushable task */
383 if (p->prio < rq->rt.highest_prio.next)
384 rq->rt.highest_prio.next = p->prio;
917b627d
GH
385}
386
387static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
388{
389 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d 390
5181f4a4
SR
391 /* Update the new highest prio pushable task */
392 if (has_pushable_tasks(rq)) {
393 p = plist_first_entry(&rq->rt.pushable_tasks,
394 struct task_struct, pushable_tasks);
395 rq->rt.highest_prio.next = p->prio;
934fc331
PZ
396 } else {
397 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
398 }
bcf08df3
IM
399}
400
917b627d
GH
401#else
402
ceacc2c1 403static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae24 404{
6f505b16
PZ
405}
406
ceacc2c1
PZ
407static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
408{
409}
410
b07430ac 411static inline
ceacc2c1
PZ
412void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
413{
414}
415
398a153b 416static inline
ceacc2c1
PZ
417void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
418{
419}
917b627d 420
dc877341
PZ
421static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
422{
423 return false;
424}
425
8046d680 426static inline void pull_rt_task(struct rq *this_rq)
dc877341 427{
dc877341
PZ
428}
429
02d8ec94 430static inline void rt_queue_push_tasks(struct rq *rq)
dc877341
PZ
431{
432}
4fd29176
SR
433#endif /* CONFIG_SMP */
434
f4ebcbc0
KT
435static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
436static void dequeue_top_rt_rq(struct rt_rq *rt_rq);
437
6f505b16
PZ
438static inline int on_rt_rq(struct sched_rt_entity *rt_se)
439{
ff77e468 440 return rt_se->on_rq;
6f505b16
PZ
441}
442
804d402f
QY
443#ifdef CONFIG_UCLAMP_TASK
444/*
445 * Verify the fitness of task @p to run on @cpu taking into account the uclamp
446 * settings.
447 *
448 * This check is only important for heterogeneous systems where uclamp_min value
449 * is higher than the capacity of a @cpu. For non-heterogeneous system this
450 * function will always return true.
451 *
452 * The function will return true if the capacity of the @cpu is >= the
453 * uclamp_min and false otherwise.
454 *
455 * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
456 * > uclamp_max.
457 */
458static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
459{
460 unsigned int min_cap;
461 unsigned int max_cap;
462 unsigned int cpu_cap;
463
464 /* Only heterogeneous systems can benefit from this check */
465 if (!static_branch_unlikely(&sched_asym_cpucapacity))
466 return true;
467
468 min_cap = uclamp_eff_value(p, UCLAMP_MIN);
469 max_cap = uclamp_eff_value(p, UCLAMP_MAX);
470
471 cpu_cap = capacity_orig_of(cpu);
472
473 return cpu_cap >= min(min_cap, max_cap);
474}
475#else
476static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
477{
478 return true;
479}
480#endif
481
052f1dc7 482#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 483
9f0c1e56 484static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
485{
486 if (!rt_rq->tg)
9f0c1e56 487 return RUNTIME_INF;
6f505b16 488
ac086bc2
PZ
489 return rt_rq->rt_runtime;
490}
491
492static inline u64 sched_rt_period(struct rt_rq *rt_rq)
493{
494 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
495}
496
ec514c48
CX
497typedef struct task_group *rt_rq_iter_t;
498
1c09ab0d
YZ
499static inline struct task_group *next_task_group(struct task_group *tg)
500{
501 do {
502 tg = list_entry_rcu(tg->list.next,
503 typeof(struct task_group), list);
504 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
505
506 if (&tg->list == &task_groups)
507 tg = NULL;
508
509 return tg;
510}
511
512#define for_each_rt_rq(rt_rq, iter, rq) \
513 for (iter = container_of(&task_groups, typeof(*iter), list); \
514 (iter = next_task_group(iter)) && \
515 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c48 516
6f505b16
PZ
517#define for_each_sched_rt_entity(rt_se) \
518 for (; rt_se; rt_se = rt_se->parent)
519
520static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
521{
522 return rt_se->my_q;
523}
524
ff77e468
PZ
525static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
526static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
6f505b16 527
9f0c1e56 528static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 529{
f6121f4f 530 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
8875125e 531 struct rq *rq = rq_of_rt_rq(rt_rq);
74b7eb58
YZ
532 struct sched_rt_entity *rt_se;
533
8875125e 534 int cpu = cpu_of(rq);
0c3b9168
BS
535
536 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 537
f6121f4f 538 if (rt_rq->rt_nr_running) {
f4ebcbc0
KT
539 if (!rt_se)
540 enqueue_top_rt_rq(rt_rq);
541 else if (!on_rt_rq(rt_se))
ff77e468 542 enqueue_rt_entity(rt_se, 0);
f4ebcbc0 543
e864c499 544 if (rt_rq->highest_prio.curr < curr->prio)
8875125e 545 resched_curr(rq);
6f505b16
PZ
546 }
547}
548
9f0c1e56 549static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 550{
74b7eb58 551 struct sched_rt_entity *rt_se;
0c3b9168 552 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb58 553
0c3b9168 554 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 555
296b2ffe 556 if (!rt_se) {
f4ebcbc0 557 dequeue_top_rt_rq(rt_rq);
296b2ffe
VG
558 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
559 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
560 }
f4ebcbc0 561 else if (on_rt_rq(rt_se))
ff77e468 562 dequeue_rt_entity(rt_se, 0);
6f505b16
PZ
563}
564
46383648
KT
565static inline int rt_rq_throttled(struct rt_rq *rt_rq)
566{
567 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
568}
569
23b0fdfc
PZ
570static int rt_se_boosted(struct sched_rt_entity *rt_se)
571{
572 struct rt_rq *rt_rq = group_rt_rq(rt_se);
573 struct task_struct *p;
574
575 if (rt_rq)
576 return !!rt_rq->rt_nr_boosted;
577
578 p = rt_task_of(rt_se);
579 return p->prio != p->normal_prio;
580}
581
d0b27fa7 582#ifdef CONFIG_SMP
c6c4927b 583static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 584{
424c93fe 585 return this_rq()->rd->span;
d0b27fa7 586}
6f505b16 587#else
c6c4927b 588static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 589{
c6c4927b 590 return cpu_online_mask;
d0b27fa7
PZ
591}
592#endif
6f505b16 593
d0b27fa7
PZ
594static inline
595struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 596{
d0b27fa7
PZ
597 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
598}
9f0c1e56 599
ac086bc2
PZ
600static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
601{
602 return &rt_rq->tg->rt_bandwidth;
603}
604
55e12e5e 605#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
606
607static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
608{
ac086bc2
PZ
609 return rt_rq->rt_runtime;
610}
611
612static inline u64 sched_rt_period(struct rt_rq *rt_rq)
613{
614 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
615}
616
ec514c48
CX
617typedef struct rt_rq *rt_rq_iter_t;
618
619#define for_each_rt_rq(rt_rq, iter, rq) \
620 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
621
6f505b16
PZ
622#define for_each_sched_rt_entity(rt_se) \
623 for (; rt_se; rt_se = NULL)
624
625static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
626{
627 return NULL;
628}
629
9f0c1e56 630static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 631{
f4ebcbc0
KT
632 struct rq *rq = rq_of_rt_rq(rt_rq);
633
634 if (!rt_rq->rt_nr_running)
635 return;
636
637 enqueue_top_rt_rq(rt_rq);
8875125e 638 resched_curr(rq);
6f505b16
PZ
639}
640
9f0c1e56 641static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 642{
f4ebcbc0 643 dequeue_top_rt_rq(rt_rq);
6f505b16
PZ
644}
645
46383648
KT
646static inline int rt_rq_throttled(struct rt_rq *rt_rq)
647{
648 return rt_rq->rt_throttled;
649}
650
c6c4927b 651static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 652{
c6c4927b 653 return cpu_online_mask;
d0b27fa7
PZ
654}
655
656static inline
657struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
658{
659 return &cpu_rq(cpu)->rt;
660}
661
ac086bc2
PZ
662static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
663{
664 return &def_rt_bandwidth;
665}
666
55e12e5e 667#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 668
faa59937
JL
669bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
670{
671 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
672
673 return (hrtimer_active(&rt_b->rt_period_timer) ||
674 rt_rq->rt_time < rt_b->rt_runtime);
675}
676
ac086bc2 677#ifdef CONFIG_SMP
78333cdd
PZ
678/*
679 * We ran out of runtime, see if we can borrow some from our neighbours.
680 */
269b26a5 681static void do_balance_runtime(struct rt_rq *rt_rq)
ac086bc2
PZ
682{
683 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
aa7f6730 684 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
269b26a5 685 int i, weight;
ac086bc2
PZ
686 u64 rt_period;
687
c6c4927b 688 weight = cpumask_weight(rd->span);
ac086bc2 689
0986b11b 690 raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc2 691 rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b 692 for_each_cpu(i, rd->span) {
ac086bc2
PZ
693 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
694 s64 diff;
695
696 if (iter == rt_rq)
697 continue;
698
0986b11b 699 raw_spin_lock(&iter->rt_runtime_lock);
78333cdd
PZ
700 /*
701 * Either all rqs have inf runtime and there's nothing to steal
702 * or __disable_runtime() below sets a specific rq to inf to
3b03706f 703 * indicate its been disabled and disallow stealing.
78333cdd 704 */
7def2be1
PZ
705 if (iter->rt_runtime == RUNTIME_INF)
706 goto next;
707
78333cdd
PZ
708 /*
709 * From runqueues with spare time, take 1/n part of their
710 * spare time, but no more than our period.
711 */
ac086bc2
PZ
712 diff = iter->rt_runtime - iter->rt_time;
713 if (diff > 0) {
58838cf3 714 diff = div_u64((u64)diff, weight);
ac086bc2
PZ
715 if (rt_rq->rt_runtime + diff > rt_period)
716 diff = rt_period - rt_rq->rt_runtime;
717 iter->rt_runtime -= diff;
718 rt_rq->rt_runtime += diff;
ac086bc2 719 if (rt_rq->rt_runtime == rt_period) {
0986b11b 720 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2
PZ
721 break;
722 }
723 }
7def2be1 724next:
0986b11b 725 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2 726 }
0986b11b 727 raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc2 728}
7def2be1 729
78333cdd
PZ
730/*
731 * Ensure this RQ takes back all the runtime it lend to its neighbours.
732 */
7def2be1
PZ
733static void __disable_runtime(struct rq *rq)
734{
735 struct root_domain *rd = rq->rd;
ec514c48 736 rt_rq_iter_t iter;
7def2be1
PZ
737 struct rt_rq *rt_rq;
738
739 if (unlikely(!scheduler_running))
740 return;
741
ec514c48 742 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
743 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
744 s64 want;
745 int i;
746
0986b11b
TG
747 raw_spin_lock(&rt_b->rt_runtime_lock);
748 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
749 /*
750 * Either we're all inf and nobody needs to borrow, or we're
751 * already disabled and thus have nothing to do, or we have
752 * exactly the right amount of runtime to take out.
753 */
7def2be1
PZ
754 if (rt_rq->rt_runtime == RUNTIME_INF ||
755 rt_rq->rt_runtime == rt_b->rt_runtime)
756 goto balanced;
0986b11b 757 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1 758
78333cdd
PZ
759 /*
760 * Calculate the difference between what we started out with
761 * and what we current have, that's the amount of runtime
762 * we lend and now have to reclaim.
763 */
7def2be1
PZ
764 want = rt_b->rt_runtime - rt_rq->rt_runtime;
765
78333cdd
PZ
766 /*
767 * Greedy reclaim, take back as much as we can.
768 */
c6c4927b 769 for_each_cpu(i, rd->span) {
7def2be1
PZ
770 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
771 s64 diff;
772
78333cdd
PZ
773 /*
774 * Can't reclaim from ourselves or disabled runqueues.
775 */
f1679d08 776 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1
PZ
777 continue;
778
0986b11b 779 raw_spin_lock(&iter->rt_runtime_lock);
7def2be1
PZ
780 if (want > 0) {
781 diff = min_t(s64, iter->rt_runtime, want);
782 iter->rt_runtime -= diff;
783 want -= diff;
784 } else {
785 iter->rt_runtime -= want;
786 want -= want;
787 }
0986b11b 788 raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1
PZ
789
790 if (!want)
791 break;
792 }
793
0986b11b 794 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
795 /*
796 * We cannot be left wanting - that would mean some runtime
797 * leaked out of the system.
798 */
7def2be1
PZ
799 BUG_ON(want);
800balanced:
78333cdd
PZ
801 /*
802 * Disable all the borrow logic by pretending we have inf
803 * runtime - in which case borrowing doesn't make sense.
804 */
7def2be1 805 rt_rq->rt_runtime = RUNTIME_INF;
a4c96ae3 806 rt_rq->rt_throttled = 0;
0986b11b
TG
807 raw_spin_unlock(&rt_rq->rt_runtime_lock);
808 raw_spin_unlock(&rt_b->rt_runtime_lock);
99b62567
KT
809
810 /* Make rt_rq available for pick_next_task() */
811 sched_rt_rq_enqueue(rt_rq);
7def2be1
PZ
812 }
813}
814
7def2be1
PZ
815static void __enable_runtime(struct rq *rq)
816{
ec514c48 817 rt_rq_iter_t iter;
7def2be1
PZ
818 struct rt_rq *rt_rq;
819
820 if (unlikely(!scheduler_running))
821 return;
822
78333cdd
PZ
823 /*
824 * Reset each runqueue's bandwidth settings
825 */
ec514c48 826 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
827 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
828
0986b11b
TG
829 raw_spin_lock(&rt_b->rt_runtime_lock);
830 raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1
PZ
831 rt_rq->rt_runtime = rt_b->rt_runtime;
832 rt_rq->rt_time = 0;
baf25731 833 rt_rq->rt_throttled = 0;
0986b11b
TG
834 raw_spin_unlock(&rt_rq->rt_runtime_lock);
835 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
836 }
837}
838
269b26a5 839static void balance_runtime(struct rt_rq *rt_rq)
eff6549b 840{
4a6184ce 841 if (!sched_feat(RT_RUNTIME_SHARE))
269b26a5 842 return;
4a6184ce 843
eff6549b 844 if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b 845 raw_spin_unlock(&rt_rq->rt_runtime_lock);
269b26a5 846 do_balance_runtime(rt_rq);
0986b11b 847 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b 848 }
eff6549b 849}
55e12e5e 850#else /* !CONFIG_SMP */
269b26a5 851static inline void balance_runtime(struct rt_rq *rt_rq) {}
55e12e5e 852#endif /* CONFIG_SMP */
ac086bc2 853
eff6549b
PZ
854static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
855{
42c62a58 856 int i, idle = 1, throttled = 0;
c6c4927b 857 const struct cpumask *span;
eff6549b 858
eff6549b 859 span = sched_rt_period_mask();
e221d028
MG
860#ifdef CONFIG_RT_GROUP_SCHED
861 /*
862 * FIXME: isolated CPUs should really leave the root task group,
863 * whether they are isolcpus or were isolated via cpusets, lest
864 * the timer run on a CPU which does not service all runqueues,
865 * potentially leaving other CPUs indefinitely throttled. If
866 * isolation is really required, the user will turn the throttle
867 * off to kill the perturbations it causes anyway. Meanwhile,
868 * this maintains functionality for boot and/or troubleshooting.
869 */
870 if (rt_b == &root_task_group.rt_bandwidth)
871 span = cpu_online_mask;
872#endif
c6c4927b 873 for_each_cpu(i, span) {
eff6549b
PZ
874 int enqueue = 0;
875 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
876 struct rq *rq = rq_of_rt_rq(rt_rq);
c249f255
DK
877 int skip;
878
879 /*
880 * When span == cpu_online_mask, taking each rq->lock
881 * can be time-consuming. Try to avoid it when possible.
882 */
883 raw_spin_lock(&rt_rq->rt_runtime_lock);
f3d133ee
HL
884 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
885 rt_rq->rt_runtime = rt_b->rt_runtime;
c249f255
DK
886 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
887 raw_spin_unlock(&rt_rq->rt_runtime_lock);
888 if (skip)
889 continue;
eff6549b 890
5cb9eaa3 891 raw_spin_rq_lock(rq);
d29a2064
DB
892 update_rq_clock(rq);
893
eff6549b
PZ
894 if (rt_rq->rt_time) {
895 u64 runtime;
896
0986b11b 897 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
898 if (rt_rq->rt_throttled)
899 balance_runtime(rt_rq);
900 runtime = rt_rq->rt_runtime;
901 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
902 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
903 rt_rq->rt_throttled = 0;
904 enqueue = 1;
61eadef6
MG
905
906 /*
9edfbfed
PZ
907 * When we're idle and a woken (rt) task is
908 * throttled check_preempt_curr() will set
909 * skip_update and the time between the wakeup
910 * and this unthrottle will get accounted as
911 * 'runtime'.
61eadef6
MG
912 */
913 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
adcc8da8 914 rq_clock_cancel_skipupdate(rq);
eff6549b
PZ
915 }
916 if (rt_rq->rt_time || rt_rq->rt_nr_running)
917 idle = 0;
0986b11b 918 raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b9168 919 } else if (rt_rq->rt_nr_running) {
6c3df255 920 idle = 0;
0c3b9168
BS
921 if (!rt_rq_throttled(rt_rq))
922 enqueue = 1;
923 }
42c62a58
PZ
924 if (rt_rq->rt_throttled)
925 throttled = 1;
eff6549b
PZ
926
927 if (enqueue)
928 sched_rt_rq_enqueue(rt_rq);
5cb9eaa3 929 raw_spin_rq_unlock(rq);
eff6549b
PZ
930 }
931
42c62a58
PZ
932 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
933 return 1;
934
eff6549b
PZ
935 return idle;
936}
ac086bc2 937
6f505b16
PZ
938static inline int rt_se_prio(struct sched_rt_entity *rt_se)
939{
052f1dc7 940#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
941 struct rt_rq *rt_rq = group_rt_rq(rt_se);
942
943 if (rt_rq)
e864c499 944 return rt_rq->highest_prio.curr;
6f505b16
PZ
945#endif
946
947 return rt_task_of(rt_se)->prio;
948}
949
9f0c1e56 950static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 951{
9f0c1e56 952 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 953
fa85ae24 954 if (rt_rq->rt_throttled)
23b0fdfc 955 return rt_rq_throttled(rt_rq);
fa85ae24 956
5b680fd6 957 if (runtime >= sched_rt_period(rt_rq))
ac086bc2
PZ
958 return 0;
959
b79f3833
PZ
960 balance_runtime(rt_rq);
961 runtime = sched_rt_runtime(rt_rq);
962 if (runtime == RUNTIME_INF)
963 return 0;
ac086bc2 964
9f0c1e56 965 if (rt_rq->rt_time > runtime) {
7abc63b1
PZ
966 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
967
968 /*
969 * Don't actually throttle groups that have no runtime assigned
970 * but accrue some time due to boosting.
971 */
972 if (likely(rt_b->rt_runtime)) {
973 rt_rq->rt_throttled = 1;
c224815d 974 printk_deferred_once("sched: RT throttling activated\n");
7abc63b1
PZ
975 } else {
976 /*
977 * In case we did anyway, make it go away,
978 * replenishment is a joke, since it will replenish us
979 * with exactly 0 ns.
980 */
981 rt_rq->rt_time = 0;
982 }
983
23b0fdfc 984 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 985 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
986 return 1;
987 }
fa85ae24
PZ
988 }
989
990 return 0;
991}
992
bb44e5d1
IM
993/*
994 * Update the current task's runtime statistics. Skip current tasks that
995 * are not in our scheduling class.
996 */
a9957449 997static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
998{
999 struct task_struct *curr = rq->curr;
6f505b16 1000 struct sched_rt_entity *rt_se = &curr->rt;
bb44e5d1 1001 u64 delta_exec;
a7711602 1002 u64 now;
bb44e5d1 1003
06c3bc65 1004 if (curr->sched_class != &rt_sched_class)
bb44e5d1
IM
1005 return;
1006
a7711602 1007 now = rq_clock_task(rq);
e7ad2031 1008 delta_exec = now - curr->se.exec_start;
fc79e240
KT
1009 if (unlikely((s64)delta_exec <= 0))
1010 return;
6cfb0d5d 1011
42c62a58
PZ
1012 schedstat_set(curr->se.statistics.exec_max,
1013 max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1
IM
1014
1015 curr->se.sum_exec_runtime += delta_exec;
f06febc9
FM
1016 account_group_exec_runtime(curr, delta_exec);
1017
e7ad2031 1018 curr->se.exec_start = now;
d2cc5ed6 1019 cgroup_account_cputime(curr, delta_exec);
fa85ae24 1020
0b148fa0
PZ
1021 if (!rt_bandwidth_enabled())
1022 return;
1023
354d60c2 1024 for_each_sched_rt_entity(rt_se) {
0b07939c 1025 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
354d60c2 1026
cc2991cf 1027 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b 1028 raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf
PZ
1029 rt_rq->rt_time += delta_exec;
1030 if (sched_rt_runtime_exceeded(rt_rq))
8875125e 1031 resched_curr(rq);
0986b11b 1032 raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf 1033 }
354d60c2 1034 }
bb44e5d1
IM
1035}
1036
f4ebcbc0
KT
1037static void
1038dequeue_top_rt_rq(struct rt_rq *rt_rq)
1039{
1040 struct rq *rq = rq_of_rt_rq(rt_rq);
1041
1042 BUG_ON(&rq->rt != rt_rq);
1043
1044 if (!rt_rq->rt_queued)
1045 return;
1046
1047 BUG_ON(!rq->nr_running);
1048
72465447 1049 sub_nr_running(rq, rt_rq->rt_nr_running);
f4ebcbc0 1050 rt_rq->rt_queued = 0;
8f111bc3 1051
f4ebcbc0
KT
1052}
1053
1054static void
1055enqueue_top_rt_rq(struct rt_rq *rt_rq)
1056{
1057 struct rq *rq = rq_of_rt_rq(rt_rq);
1058
1059 BUG_ON(&rq->rt != rt_rq);
1060
1061 if (rt_rq->rt_queued)
1062 return;
296b2ffe
VG
1063
1064 if (rt_rq_throttled(rt_rq))
f4ebcbc0
KT
1065 return;
1066
296b2ffe
VG
1067 if (rt_rq->rt_nr_running) {
1068 add_nr_running(rq, rt_rq->rt_nr_running);
1069 rt_rq->rt_queued = 1;
1070 }
8f111bc3
PZ
1071
1072 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1073 cpufreq_update_util(rq, 0);
f4ebcbc0
KT
1074}
1075
398a153b 1076#if defined CONFIG_SMP
e864c499 1077
398a153b
GH
1078static void
1079inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45 1080{
4d984277 1081 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a 1082
757dfcaa
KT
1083#ifdef CONFIG_RT_GROUP_SCHED
1084 /*
1085 * Change rq's cpupri only if rt_rq is the top queue.
1086 */
1087 if (&rq->rt != rt_rq)
1088 return;
1089#endif
5181f4a4
SR
1090 if (rq->online && prio < prev_prio)
1091 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b 1092}
73fe6aae 1093
398a153b
GH
1094static void
1095dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1096{
1097 struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa7 1098
757dfcaa
KT
1099#ifdef CONFIG_RT_GROUP_SCHED
1100 /*
1101 * Change rq's cpupri only if rt_rq is the top queue.
1102 */
1103 if (&rq->rt != rt_rq)
1104 return;
1105#endif
398a153b
GH
1106 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1107 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45
SR
1108}
1109
398a153b
GH
1110#else /* CONFIG_SMP */
1111
6f505b16 1112static inline
398a153b
GH
1113void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1114static inline
1115void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1116
1117#endif /* CONFIG_SMP */
6e0534f2 1118
052f1dc7 1119#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b
GH
1120static void
1121inc_rt_prio(struct rt_rq *rt_rq, int prio)
1122{
1123 int prev_prio = rt_rq->highest_prio.curr;
1124
1125 if (prio < prev_prio)
1126 rt_rq->highest_prio.curr = prio;
1127
1128 inc_rt_prio_smp(rt_rq, prio, prev_prio);
1129}
1130
1131static void
1132dec_rt_prio(struct rt_rq *rt_rq, int prio)
1133{
1134 int prev_prio = rt_rq->highest_prio.curr;
1135
6f505b16 1136 if (rt_rq->rt_nr_running) {
764a9d6f 1137
398a153b 1138 WARN_ON(prio < prev_prio);
764a9d6f 1139
e864c499 1140 /*
398a153b
GH
1141 * This may have been our highest task, and therefore
1142 * we may have some recomputation to do
e864c499 1143 */
398a153b 1144 if (prio == prev_prio) {
e864c499
GH
1145 struct rt_prio_array *array = &rt_rq->active;
1146
1147 rt_rq->highest_prio.curr =
764a9d6f 1148 sched_find_first_bit(array->bitmap);
e864c499
GH
1149 }
1150
934fc331
PZ
1151 } else {
1152 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1153 }
73fe6aae 1154
398a153b
GH
1155 dec_rt_prio_smp(rt_rq, prio, prev_prio);
1156}
1f11eb6a 1157
398a153b
GH
1158#else
1159
1160static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1161static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1162
1163#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f2 1164
052f1dc7 1165#ifdef CONFIG_RT_GROUP_SCHED
398a153b
GH
1166
1167static void
1168inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1169{
1170 if (rt_se_boosted(rt_se))
1171 rt_rq->rt_nr_boosted++;
1172
1173 if (rt_rq->tg)
1174 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1175}
1176
1177static void
1178dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1179{
23b0fdfc
PZ
1180 if (rt_se_boosted(rt_se))
1181 rt_rq->rt_nr_boosted--;
1182
1183 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b
GH
1184}
1185
1186#else /* CONFIG_RT_GROUP_SCHED */
1187
1188static void
1189inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1190{
1191 start_rt_bandwidth(&def_rt_bandwidth);
1192}
1193
1194static inline
1195void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1196
1197#endif /* CONFIG_RT_GROUP_SCHED */
1198
22abdef3
KT
1199static inline
1200unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1201{
1202 struct rt_rq *group_rq = group_rt_rq(rt_se);
1203
1204 if (group_rq)
1205 return group_rq->rt_nr_running;
1206 else
1207 return 1;
1208}
1209
01d36d0a
FW
1210static inline
1211unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1212{
1213 struct rt_rq *group_rq = group_rt_rq(rt_se);
1214 struct task_struct *tsk;
1215
1216 if (group_rq)
1217 return group_rq->rr_nr_running;
1218
1219 tsk = rt_task_of(rt_se);
1220
1221 return (tsk->policy == SCHED_RR) ? 1 : 0;
1222}
1223
398a153b
GH
1224static inline
1225void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1226{
1227 int prio = rt_se_prio(rt_se);
1228
1229 WARN_ON(!rt_prio(prio));
22abdef3 1230 rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
01d36d0a 1231 rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
398a153b
GH
1232
1233 inc_rt_prio(rt_rq, prio);
1234 inc_rt_migration(rt_se, rt_rq);
1235 inc_rt_group(rt_se, rt_rq);
1236}
1237
1238static inline
1239void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1240{
1241 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1242 WARN_ON(!rt_rq->rt_nr_running);
22abdef3 1243 rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
01d36d0a 1244 rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
398a153b
GH
1245
1246 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1247 dec_rt_migration(rt_se, rt_rq);
1248 dec_rt_group(rt_se, rt_rq);
63489e45
SR
1249}
1250
ff77e468
PZ
1251/*
1252 * Change rt_se->run_list location unless SAVE && !MOVE
1253 *
1254 * assumes ENQUEUE/DEQUEUE flags match
1255 */
1256static inline bool move_entity(unsigned int flags)
1257{
1258 if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1259 return false;
1260
1261 return true;
1262}
1263
1264static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1265{
1266 list_del_init(&rt_se->run_list);
1267
1268 if (list_empty(array->queue + rt_se_prio(rt_se)))
1269 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1270
1271 rt_se->on_list = 0;
1272}
1273
1274static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
bb44e5d1 1275{
6f505b16
PZ
1276 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1277 struct rt_prio_array *array = &rt_rq->active;
1278 struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331b 1279 struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1 1280
ad2a3f13
PZ
1281 /*
1282 * Don't enqueue the group if its throttled, or when empty.
1283 * The latter is a consequence of the former when a child group
1284 * get throttled and the current group doesn't have any other
1285 * active members.
1286 */
ff77e468
PZ
1287 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1288 if (rt_se->on_list)
1289 __delist_rt_entity(rt_se, array);
6f505b16 1290 return;
ff77e468 1291 }
63489e45 1292
ff77e468
PZ
1293 if (move_entity(flags)) {
1294 WARN_ON_ONCE(rt_se->on_list);
1295 if (flags & ENQUEUE_HEAD)
1296 list_add(&rt_se->run_list, queue);
1297 else
1298 list_add_tail(&rt_se->run_list, queue);
1299
1300 __set_bit(rt_se_prio(rt_se), array->bitmap);
1301 rt_se->on_list = 1;
1302 }
1303 rt_se->on_rq = 1;
78f2c7db 1304
6f505b16
PZ
1305 inc_rt_tasks(rt_se, rt_rq);
1306}
1307
ff77e468 1308static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
6f505b16
PZ
1309{
1310 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1311 struct rt_prio_array *array = &rt_rq->active;
1312
ff77e468
PZ
1313 if (move_entity(flags)) {
1314 WARN_ON_ONCE(!rt_se->on_list);
1315 __delist_rt_entity(rt_se, array);
1316 }
1317 rt_se->on_rq = 0;
6f505b16
PZ
1318
1319 dec_rt_tasks(rt_se, rt_rq);
1320}
1321
1322/*
1323 * Because the prio of an upper entry depends on the lower
1324 * entries, we must remove entries top - down.
6f505b16 1325 */
ff77e468 1326static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
6f505b16 1327{
ad2a3f13 1328 struct sched_rt_entity *back = NULL;
6f505b16 1329
58d6c2d7
PZ
1330 for_each_sched_rt_entity(rt_se) {
1331 rt_se->back = back;
1332 back = rt_se;
1333 }
1334
f4ebcbc0
KT
1335 dequeue_top_rt_rq(rt_rq_of_se(back));
1336
58d6c2d7
PZ
1337 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1338 if (on_rt_rq(rt_se))
ff77e468 1339 __dequeue_rt_entity(rt_se, flags);
ad2a3f13
PZ
1340 }
1341}
1342
ff77e468 1343static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
ad2a3f13 1344{
f4ebcbc0
KT
1345 struct rq *rq = rq_of_rt_se(rt_se);
1346
ff77e468 1347 dequeue_rt_stack(rt_se, flags);
ad2a3f13 1348 for_each_sched_rt_entity(rt_se)
ff77e468 1349 __enqueue_rt_entity(rt_se, flags);
f4ebcbc0 1350 enqueue_top_rt_rq(&rq->rt);
ad2a3f13
PZ
1351}
1352
ff77e468 1353static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
ad2a3f13 1354{
f4ebcbc0
KT
1355 struct rq *rq = rq_of_rt_se(rt_se);
1356
ff77e468 1357 dequeue_rt_stack(rt_se, flags);
ad2a3f13
PZ
1358
1359 for_each_sched_rt_entity(rt_se) {
1360 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1361
1362 if (rt_rq && rt_rq->rt_nr_running)
ff77e468 1363 __enqueue_rt_entity(rt_se, flags);
58d6c2d7 1364 }
f4ebcbc0 1365 enqueue_top_rt_rq(&rq->rt);
bb44e5d1
IM
1366}
1367
1368/*
1369 * Adding/removing a task to/from a priority array:
1370 */
ea87bb78 1371static void
371fd7e7 1372enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b16
PZ
1373{
1374 struct sched_rt_entity *rt_se = &p->rt;
1375
371fd7e7 1376 if (flags & ENQUEUE_WAKEUP)
6f505b16
PZ
1377 rt_se->timeout = 0;
1378
ff77e468 1379 enqueue_rt_entity(rt_se, flags);
c09595f6 1380
4b53a341 1381 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
917b627d 1382 enqueue_pushable_task(rq, p);
6f505b16
PZ
1383}
1384
371fd7e7 1385static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1386{
6f505b16 1387 struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1 1388
f1e14ef6 1389 update_curr_rt(rq);
ff77e468 1390 dequeue_rt_entity(rt_se, flags);
c09595f6 1391
917b627d 1392 dequeue_pushable_task(rq, p);
bb44e5d1
IM
1393}
1394
1395/*
60686317
RW
1396 * Put task to the head or the end of the run list without the overhead of
1397 * dequeue followed by enqueue.
bb44e5d1 1398 */
7ebefa8c
DA
1399static void
1400requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b16 1401{
1cdad715 1402 if (on_rt_rq(rt_se)) {
7ebefa8c
DA
1403 struct rt_prio_array *array = &rt_rq->active;
1404 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1405
1406 if (head)
1407 list_move(&rt_se->run_list, queue);
1408 else
1409 list_move_tail(&rt_se->run_list, queue);
1cdad715 1410 }
6f505b16
PZ
1411}
1412
7ebefa8c 1413static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1 1414{
6f505b16
PZ
1415 struct sched_rt_entity *rt_se = &p->rt;
1416 struct rt_rq *rt_rq;
bb44e5d1 1417
6f505b16
PZ
1418 for_each_sched_rt_entity(rt_se) {
1419 rt_rq = rt_rq_of_se(rt_se);
7ebefa8c 1420 requeue_rt_entity(rt_rq, rt_se, head);
6f505b16 1421 }
bb44e5d1
IM
1422}
1423
6f505b16 1424static void yield_task_rt(struct rq *rq)
bb44e5d1 1425{
7ebefa8c 1426 requeue_task_rt(rq, rq->curr, 0);
bb44e5d1
IM
1427}
1428
e7693a36 1429#ifdef CONFIG_SMP
318e0893
GH
1430static int find_lowest_rq(struct task_struct *task);
1431
0017d735 1432static int
3aef1551 1433select_task_rq_rt(struct task_struct *p, int cpu, int flags)
e7693a36 1434{
7608dec2
PZ
1435 struct task_struct *curr;
1436 struct rq *rq;
804d402f 1437 bool test;
c37495fd
SR
1438
1439 /* For anything but wake ups, just return the task_cpu */
3aef1551 1440 if (!(flags & (WF_TTWU | WF_FORK)))
c37495fd
SR
1441 goto out;
1442
7608dec2
PZ
1443 rq = cpu_rq(cpu);
1444
1445 rcu_read_lock();
316c1608 1446 curr = READ_ONCE(rq->curr); /* unlocked access */
7608dec2 1447
318e0893 1448 /*
7608dec2 1449 * If the current task on @p's runqueue is an RT task, then
e1f47d89
SR
1450 * try to see if we can wake this RT task up on another
1451 * runqueue. Otherwise simply start this RT task
1452 * on its current runqueue.
1453 *
43fa5460
SR
1454 * We want to avoid overloading runqueues. If the woken
1455 * task is a higher priority, then it will stay on this CPU
1456 * and the lower prio task should be moved to another CPU.
1457 * Even though this will probably make the lower prio task
1458 * lose its cache, we do not want to bounce a higher task
1459 * around just because it gave up its CPU, perhaps for a
1460 * lock?
1461 *
1462 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2
PZ
1463 *
1464 * Otherwise, just let it ride on the affined RQ and the
1465 * post-schedule router will push the preempted task away
1466 *
1467 * This test is optimistic, if we get it wrong the load-balancer
1468 * will have to sort it out.
804d402f
QY
1469 *
1470 * We take into account the capacity of the CPU to ensure it fits the
1471 * requirement of the task - which is only important on heterogeneous
1472 * systems like big.LITTLE.
318e0893 1473 */
804d402f
QY
1474 test = curr &&
1475 unlikely(rt_task(curr)) &&
1476 (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio);
1477
1478 if (test || !rt_task_fits_capacity(p, cpu)) {
7608dec2 1479 int target = find_lowest_rq(p);
318e0893 1480
b28bc1e0
QY
1481 /*
1482 * Bail out if we were forcing a migration to find a better
1483 * fitting CPU but our search failed.
1484 */
1485 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1486 goto out_unlock;
1487
80e3d87b
TC
1488 /*
1489 * Don't bother moving it if the destination CPU is
1490 * not running a lower priority task.
1491 */
1492 if (target != -1 &&
1493 p->prio < cpu_rq(target)->rt.highest_prio.curr)
7608dec2 1494 cpu = target;
318e0893 1495 }
b28bc1e0
QY
1496
1497out_unlock:
7608dec2 1498 rcu_read_unlock();
318e0893 1499
c37495fd 1500out:
7608dec2 1501 return cpu;
e7693a36 1502}
7ebefa8c
DA
1503
1504static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1505{
308a623a
WL
1506 /*
1507 * Current can't be migrated, useless to reschedule,
1508 * let's hope p can move out.
1509 */
4b53a341 1510 if (rq->curr->nr_cpus_allowed == 1 ||
a1bd02e1 1511 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
7ebefa8c
DA
1512 return;
1513
308a623a
WL
1514 /*
1515 * p is migratable, so let's not schedule it and
1516 * see if it is pushed or pulled somewhere else.
1517 */
804d402f 1518 if (p->nr_cpus_allowed != 1 &&
a1bd02e1 1519 cpupri_find(&rq->rd->cpupri, p, NULL))
13b8bd0a 1520 return;
24600ce8 1521
7ebefa8c 1522 /*
97fb7a0a
IM
1523 * There appear to be other CPUs that can accept
1524 * the current task but none can run 'p', so lets reschedule
1525 * to try and push the current task away:
7ebefa8c
DA
1526 */
1527 requeue_task_rt(rq, p, 1);
8875125e 1528 resched_curr(rq);
7ebefa8c
DA
1529}
1530
6e2df058
PZ
1531static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1532{
1533 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1534 /*
1535 * This is OK, because current is on_cpu, which avoids it being
1536 * picked for load-balance and preemption/IRQs are still
1537 * disabled avoiding further scheduler activity on it and we've
1538 * not yet started the picking loop.
1539 */
1540 rq_unpin_lock(rq, rf);
1541 pull_rt_task(rq);
1542 rq_repin_lock(rq, rf);
1543 }
1544
1545 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1546}
e7693a36
GH
1547#endif /* CONFIG_SMP */
1548
bb44e5d1
IM
1549/*
1550 * Preempt the current task with a newly woken task if needed:
1551 */
7d478721 1552static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1553{
45c01e82 1554 if (p->prio < rq->curr->prio) {
8875125e 1555 resched_curr(rq);
45c01e82
GH
1556 return;
1557 }
1558
1559#ifdef CONFIG_SMP
1560 /*
1561 * If:
1562 *
1563 * - the newly woken task is of equal priority to the current task
1564 * - the newly woken task is non-migratable while current is migratable
1565 * - current will be preempted on the next reschedule
1566 *
1567 * we should check to see if current can readily move to a different
1568 * cpu. If so, we will reschedule to allow the push logic to try
1569 * to move current somewhere else, making room for our non-migratable
1570 * task.
1571 */
8dd0de8b 1572 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8c 1573 check_preempt_equal_prio(rq, p);
45c01e82 1574#endif
bb44e5d1
IM
1575}
1576
a0e813f2 1577static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
ff1cdc94
MS
1578{
1579 p->se.exec_start = rq_clock_task(rq);
1580
1581 /* The running task is never eligible for pushing */
1582 dequeue_pushable_task(rq, p);
f95d4eae 1583
a0e813f2
PZ
1584 if (!first)
1585 return;
1586
f95d4eae
PZ
1587 /*
1588 * If prev task was rt, put_prev_task() has already updated the
1589 * utilization. We only care of the case where we start to schedule a
1590 * rt task
1591 */
1592 if (rq->curr->sched_class != &rt_sched_class)
1593 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1594
1595 rt_queue_push_tasks(rq);
ff1cdc94
MS
1596}
1597
6f505b16
PZ
1598static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1599 struct rt_rq *rt_rq)
bb44e5d1 1600{
6f505b16
PZ
1601 struct rt_prio_array *array = &rt_rq->active;
1602 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
1603 struct list_head *queue;
1604 int idx;
1605
1606 idx = sched_find_first_bit(array->bitmap);
6f505b16 1607 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
1608
1609 queue = array->queue + idx;
6f505b16 1610 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 1611
6f505b16
PZ
1612 return next;
1613}
bb44e5d1 1614
917b627d 1615static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b16
PZ
1616{
1617 struct sched_rt_entity *rt_se;
606dba2e 1618 struct rt_rq *rt_rq = &rq->rt;
6f505b16
PZ
1619
1620 do {
1621 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 1622 BUG_ON(!rt_se);
6f505b16
PZ
1623 rt_rq = group_rt_rq(rt_se);
1624 } while (rt_rq);
1625
ff1cdc94 1626 return rt_task_of(rt_se);
917b627d
GH
1627}
1628
21f56ffe 1629static struct task_struct *pick_task_rt(struct rq *rq)
917b627d 1630{
606dba2e 1631 struct task_struct *p;
606dba2e 1632
6e2df058 1633 if (!sched_rt_runnable(rq))
606dba2e
PZ
1634 return NULL;
1635
606dba2e 1636 p = _pick_next_task_rt(rq);
21f56ffe
PZ
1637
1638 return p;
1639}
1640
1641static struct task_struct *pick_next_task_rt(struct rq *rq)
1642{
1643 struct task_struct *p = pick_task_rt(rq);
1644
1645 if (p)
1646 set_next_task_rt(rq, p, true);
1647
6f505b16 1648 return p;
bb44e5d1
IM
1649}
1650
6e2df058 1651static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 1652{
f1e14ef6 1653 update_curr_rt(rq);
917b627d 1654
23127296 1655 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
371bf427 1656
917b627d
GH
1657 /*
1658 * The previous task needs to be made eligible for pushing
1659 * if it is still active
1660 */
4b53a341 1661 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
917b627d 1662 enqueue_pushable_task(rq, p);
bb44e5d1
IM
1663}
1664
681f3e68 1665#ifdef CONFIG_SMP
6f505b16 1666
e8fa1362
SR
1667/* Only try algorithms three times */
1668#define RT_MAX_TRIES 3
1669
f65eda4f
SR
1670static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1671{
1672 if (!task_running(rq, p) &&
95158a89 1673 cpumask_test_cpu(cpu, &p->cpus_mask))
f65eda4f 1674 return 1;
97fb7a0a 1675
f65eda4f
SR
1676 return 0;
1677}
1678
e23ee747
KT
1679/*
1680 * Return the highest pushable rq's task, which is suitable to be executed
97fb7a0a 1681 * on the CPU, NULL otherwise
e23ee747
KT
1682 */
1683static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
e8fa1362 1684{
e23ee747
KT
1685 struct plist_head *head = &rq->rt.pushable_tasks;
1686 struct task_struct *p;
3d07467b 1687
e23ee747
KT
1688 if (!has_pushable_tasks(rq))
1689 return NULL;
3d07467b 1690
e23ee747
KT
1691 plist_for_each_entry(p, head, pushable_tasks) {
1692 if (pick_rt_task(rq, p, cpu))
1693 return p;
f65eda4f
SR
1694 }
1695
e23ee747 1696 return NULL;
e8fa1362
SR
1697}
1698
0e3900e6 1699static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa1362 1700
6e1254d2
GH
1701static int find_lowest_rq(struct task_struct *task)
1702{
1703 struct sched_domain *sd;
4ba29684 1704 struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
6e1254d2
GH
1705 int this_cpu = smp_processor_id();
1706 int cpu = task_cpu(task);
a1bd02e1 1707 int ret;
06f90dbd 1708
0da938c4
SR
1709 /* Make sure the mask is initialized first */
1710 if (unlikely(!lowest_mask))
1711 return -1;
1712
4b53a341 1713 if (task->nr_cpus_allowed == 1)
6e0534f2 1714 return -1; /* No other targets possible */
6e1254d2 1715
a1bd02e1
QY
1716 /*
1717 * If we're on asym system ensure we consider the different capacities
1718 * of the CPUs when searching for the lowest_mask.
1719 */
1720 if (static_branch_unlikely(&sched_asym_cpucapacity)) {
1721
1722 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1723 task, lowest_mask,
1724 rt_task_fits_capacity);
1725 } else {
1726
1727 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1728 task, lowest_mask);
1729 }
1730
1731 if (!ret)
6e0534f2 1732 return -1; /* No targets found */
6e1254d2
GH
1733
1734 /*
97fb7a0a 1735 * At this point we have built a mask of CPUs representing the
6e1254d2
GH
1736 * lowest priority tasks in the system. Now we want to elect
1737 * the best one based on our affinity and topology.
1738 *
97fb7a0a 1739 * We prioritize the last CPU that the task executed on since
6e1254d2
GH
1740 * it is most likely cache-hot in that location.
1741 */
96f874e2 1742 if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2
GH
1743 return cpu;
1744
1745 /*
1746 * Otherwise, we consult the sched_domains span maps to figure
97fb7a0a 1747 * out which CPU is logically closest to our hot cache data.
6e1254d2 1748 */
e2c88063
RR
1749 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1750 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2 1751
cd4ae6ad 1752 rcu_read_lock();
e2c88063
RR
1753 for_each_domain(cpu, sd) {
1754 if (sd->flags & SD_WAKE_AFFINE) {
1755 int best_cpu;
6e1254d2 1756
e2c88063
RR
1757 /*
1758 * "this_cpu" is cheaper to preempt than a
1759 * remote processor.
1760 */
1761 if (this_cpu != -1 &&
cd4ae6ad
XF
1762 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1763 rcu_read_unlock();
e2c88063 1764 return this_cpu;
cd4ae6ad 1765 }
e2c88063 1766
14e292f8
PZ
1767 best_cpu = cpumask_any_and_distribute(lowest_mask,
1768 sched_domain_span(sd));
cd4ae6ad
XF
1769 if (best_cpu < nr_cpu_ids) {
1770 rcu_read_unlock();
e2c88063 1771 return best_cpu;
cd4ae6ad 1772 }
6e1254d2
GH
1773 }
1774 }
cd4ae6ad 1775 rcu_read_unlock();
6e1254d2
GH
1776
1777 /*
1778 * And finally, if there were no matches within the domains
1779 * just give the caller *something* to work with from the compatible
1780 * locations.
1781 */
e2c88063
RR
1782 if (this_cpu != -1)
1783 return this_cpu;
1784
14e292f8 1785 cpu = cpumask_any_distribute(lowest_mask);
e2c88063
RR
1786 if (cpu < nr_cpu_ids)
1787 return cpu;
97fb7a0a 1788
e2c88063 1789 return -1;
07b4032c
GH
1790}
1791
1792/* Will lock the rq it finds */
4df64c0b 1793static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
1794{
1795 struct rq *lowest_rq = NULL;
07b4032c 1796 int tries;
4df64c0b 1797 int cpu;
e8fa1362 1798
07b4032c
GH
1799 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1800 cpu = find_lowest_rq(task);
1801
2de0b463 1802 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
1803 break;
1804
07b4032c
GH
1805 lowest_rq = cpu_rq(cpu);
1806
80e3d87b
TC
1807 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1808 /*
1809 * Target rq has tasks of equal or higher priority,
1810 * retrying does not release any lock and is unlikely
1811 * to yield a different result.
1812 */
1813 lowest_rq = NULL;
1814 break;
1815 }
1816
e8fa1362 1817 /* if the prio of this runqueue changed, try again */
07b4032c 1818 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
1819 /*
1820 * We had to unlock the run queue. In
1821 * the mean time, task could have
1822 * migrated already or had its affinity changed.
1823 * Also make sure that it wasn't scheduled on its rq.
1824 */
07b4032c 1825 if (unlikely(task_rq(task) != rq ||
95158a89 1826 !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
07b4032c 1827 task_running(rq, task) ||
13b5ab02 1828 !rt_task(task) ||
da0c1e65 1829 !task_on_rq_queued(task))) {
4df64c0b 1830
7f1b4393 1831 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1832 lowest_rq = NULL;
1833 break;
1834 }
1835 }
1836
1837 /* If this rq is still suitable use it. */
e864c499 1838 if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa1362
SR
1839 break;
1840
1841 /* try again */
1b12bbc7 1842 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1843 lowest_rq = NULL;
1844 }
1845
1846 return lowest_rq;
1847}
1848
917b627d
GH
1849static struct task_struct *pick_next_pushable_task(struct rq *rq)
1850{
1851 struct task_struct *p;
1852
1853 if (!has_pushable_tasks(rq))
1854 return NULL;
1855
1856 p = plist_first_entry(&rq->rt.pushable_tasks,
1857 struct task_struct, pushable_tasks);
1858
1859 BUG_ON(rq->cpu != task_cpu(p));
1860 BUG_ON(task_current(rq, p));
4b53a341 1861 BUG_ON(p->nr_cpus_allowed <= 1);
917b627d 1862
da0c1e65 1863 BUG_ON(!task_on_rq_queued(p));
917b627d
GH
1864 BUG_ON(!rt_task(p));
1865
1866 return p;
1867}
1868
e8fa1362
SR
1869/*
1870 * If the current CPU has more than one RT task, see if the non
1871 * running task can migrate over to a CPU that is running a task
1872 * of lesser priority.
1873 */
a7c81556 1874static int push_rt_task(struct rq *rq, bool pull)
e8fa1362
SR
1875{
1876 struct task_struct *next_task;
1877 struct rq *lowest_rq;
311e800e 1878 int ret = 0;
e8fa1362 1879
a22d7fc1
GH
1880 if (!rq->rt.overloaded)
1881 return 0;
1882
917b627d 1883 next_task = pick_next_pushable_task(rq);
e8fa1362
SR
1884 if (!next_task)
1885 return 0;
1886
49246274 1887retry:
a7c81556
PZ
1888 if (is_migration_disabled(next_task)) {
1889 struct task_struct *push_task = NULL;
1890 int cpu;
1891
1892 if (!pull || rq->push_busy)
1893 return 0;
1894
1895 cpu = find_lowest_rq(rq->curr);
1896 if (cpu == -1 || cpu == rq->cpu)
1897 return 0;
1898
1899 /*
1900 * Given we found a CPU with lower priority than @next_task,
1901 * therefore it should be running. However we cannot migrate it
1902 * to this other CPU, instead attempt to push the current
1903 * running task on this CPU away.
1904 */
1905 push_task = get_push_task(rq);
1906 if (push_task) {
5cb9eaa3 1907 raw_spin_rq_unlock(rq);
a7c81556
PZ
1908 stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
1909 push_task, &rq->push_work);
5cb9eaa3 1910 raw_spin_rq_lock(rq);
a7c81556
PZ
1911 }
1912
1913 return 0;
1914 }
1915
9ebc6053 1916 if (WARN_ON(next_task == rq->curr))
e8fa1362
SR
1917 return 0;
1918
1919 /*
1920 * It's possible that the next_task slipped in of
1921 * higher priority than current. If that's the case
1922 * just reschedule current.
1923 */
697f0a48 1924 if (unlikely(next_task->prio < rq->curr->prio)) {
8875125e 1925 resched_curr(rq);
e8fa1362
SR
1926 return 0;
1927 }
1928
697f0a48 1929 /* We might release rq lock */
e8fa1362
SR
1930 get_task_struct(next_task);
1931
1932 /* find_lock_lowest_rq locks the rq if found */
697f0a48 1933 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
1934 if (!lowest_rq) {
1935 struct task_struct *task;
1936 /*
311e800e 1937 * find_lock_lowest_rq releases rq->lock
1563513d
GH
1938 * so it is possible that next_task has migrated.
1939 *
1940 * We need to make sure that the task is still on the same
1941 * run-queue and is also still the next task eligible for
1942 * pushing.
e8fa1362 1943 */
917b627d 1944 task = pick_next_pushable_task(rq);
de16b91e 1945 if (task == next_task) {
1563513d 1946 /*
311e800e
HD
1947 * The task hasn't migrated, and is still the next
1948 * eligible task, but we failed to find a run-queue
1949 * to push it to. Do not retry in this case, since
97fb7a0a 1950 * other CPUs will pull from us when ready.
1563513d 1951 */
1563513d 1952 goto out;
e8fa1362 1953 }
917b627d 1954
1563513d
GH
1955 if (!task)
1956 /* No more tasks, just exit */
1957 goto out;
1958
917b627d 1959 /*
1563513d 1960 * Something has shifted, try again.
917b627d 1961 */
1563513d
GH
1962 put_task_struct(next_task);
1963 next_task = task;
1964 goto retry;
e8fa1362
SR
1965 }
1966
697f0a48 1967 deactivate_task(rq, next_task, 0);
e8fa1362
SR
1968 set_task_cpu(next_task, lowest_rq->cpu);
1969 activate_task(lowest_rq, next_task, 0);
8875125e 1970 resched_curr(lowest_rq);
a7c81556 1971 ret = 1;
e8fa1362 1972
1b12bbc7 1973 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1974out:
1975 put_task_struct(next_task);
1976
311e800e 1977 return ret;
e8fa1362
SR
1978}
1979
e8fa1362
SR
1980static void push_rt_tasks(struct rq *rq)
1981{
1982 /* push_rt_task will return true if it moved an RT */
a7c81556 1983 while (push_rt_task(rq, false))
e8fa1362
SR
1984 ;
1985}
1986
b6366f04 1987#ifdef HAVE_RT_PUSH_IPI
4bdced5c 1988
b6366f04 1989/*
4bdced5c
SRRH
1990 * When a high priority task schedules out from a CPU and a lower priority
1991 * task is scheduled in, a check is made to see if there's any RT tasks
1992 * on other CPUs that are waiting to run because a higher priority RT task
1993 * is currently running on its CPU. In this case, the CPU with multiple RT
1994 * tasks queued on it (overloaded) needs to be notified that a CPU has opened
1995 * up that may be able to run one of its non-running queued RT tasks.
1996 *
1997 * All CPUs with overloaded RT tasks need to be notified as there is currently
1998 * no way to know which of these CPUs have the highest priority task waiting
1999 * to run. Instead of trying to take a spinlock on each of these CPUs,
2000 * which has shown to cause large latency when done on machines with many
2001 * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2002 * RT tasks waiting to run.
2003 *
2004 * Just sending an IPI to each of the CPUs is also an issue, as on large
2005 * count CPU machines, this can cause an IPI storm on a CPU, especially
2006 * if its the only CPU with multiple RT tasks queued, and a large number
2007 * of CPUs scheduling a lower priority task at the same time.
2008 *
2009 * Each root domain has its own irq work function that can iterate over
2010 * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
3b03706f 2011 * task must be checked if there's one or many CPUs that are lowering
4bdced5c
SRRH
2012 * their priority, there's a single irq work iterator that will try to
2013 * push off RT tasks that are waiting to run.
2014 *
2015 * When a CPU schedules a lower priority task, it will kick off the
2016 * irq work iterator that will jump to each CPU with overloaded RT tasks.
2017 * As it only takes the first CPU that schedules a lower priority task
2018 * to start the process, the rto_start variable is incremented and if
2019 * the atomic result is one, then that CPU will try to take the rto_lock.
2020 * This prevents high contention on the lock as the process handles all
2021 * CPUs scheduling lower priority tasks.
2022 *
2023 * All CPUs that are scheduling a lower priority task will increment the
2024 * rt_loop_next variable. This will make sure that the irq work iterator
2025 * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2026 * priority task, even if the iterator is in the middle of a scan. Incrementing
2027 * the rt_loop_next will cause the iterator to perform another scan.
b6366f04 2028 *
b6366f04 2029 */
ad0f1d9d 2030static int rto_next_cpu(struct root_domain *rd)
b6366f04 2031{
4bdced5c 2032 int next;
b6366f04
SR
2033 int cpu;
2034
b6366f04 2035 /*
4bdced5c
SRRH
2036 * When starting the IPI RT pushing, the rto_cpu is set to -1,
2037 * rt_next_cpu() will simply return the first CPU found in
2038 * the rto_mask.
2039 *
97fb7a0a 2040 * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
4bdced5c
SRRH
2041 * will return the next CPU found in the rto_mask.
2042 *
2043 * If there are no more CPUs left in the rto_mask, then a check is made
2044 * against rto_loop and rto_loop_next. rto_loop is only updated with
2045 * the rto_lock held, but any CPU may increment the rto_loop_next
2046 * without any locking.
b6366f04 2047 */
4bdced5c 2048 for (;;) {
b6366f04 2049
4bdced5c
SRRH
2050 /* When rto_cpu is -1 this acts like cpumask_first() */
2051 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
b6366f04 2052
4bdced5c 2053 rd->rto_cpu = cpu;
b6366f04 2054
4bdced5c
SRRH
2055 if (cpu < nr_cpu_ids)
2056 return cpu;
b6366f04 2057
4bdced5c
SRRH
2058 rd->rto_cpu = -1;
2059
2060 /*
2061 * ACQUIRE ensures we see the @rto_mask changes
2062 * made prior to the @next value observed.
2063 *
2064 * Matches WMB in rt_set_overload().
2065 */
2066 next = atomic_read_acquire(&rd->rto_loop_next);
b6366f04 2067
4bdced5c 2068 if (rd->rto_loop == next)
b6366f04 2069 break;
4bdced5c
SRRH
2070
2071 rd->rto_loop = next;
b6366f04
SR
2072 }
2073
4bdced5c 2074 return -1;
b6366f04
SR
2075}
2076
4bdced5c
SRRH
2077static inline bool rto_start_trylock(atomic_t *v)
2078{
2079 return !atomic_cmpxchg_acquire(v, 0, 1);
2080}
b6366f04 2081
4bdced5c 2082static inline void rto_start_unlock(atomic_t *v)
b6366f04 2083{
4bdced5c
SRRH
2084 atomic_set_release(v, 0);
2085}
b6366f04 2086
4bdced5c
SRRH
2087static void tell_cpu_to_push(struct rq *rq)
2088{
2089 int cpu = -1;
b6366f04 2090
4bdced5c
SRRH
2091 /* Keep the loop going if the IPI is currently active */
2092 atomic_inc(&rq->rd->rto_loop_next);
b6366f04 2093
4bdced5c
SRRH
2094 /* Only one CPU can initiate a loop at a time */
2095 if (!rto_start_trylock(&rq->rd->rto_loop_start))
b6366f04
SR
2096 return;
2097
4bdced5c 2098 raw_spin_lock(&rq->rd->rto_lock);
b6366f04 2099
4bdced5c 2100 /*
97fb7a0a 2101 * The rto_cpu is updated under the lock, if it has a valid CPU
4bdced5c
SRRH
2102 * then the IPI is still running and will continue due to the
2103 * update to loop_next, and nothing needs to be done here.
2104 * Otherwise it is finishing up and an ipi needs to be sent.
2105 */
2106 if (rq->rd->rto_cpu < 0)
ad0f1d9d 2107 cpu = rto_next_cpu(rq->rd);
4bdced5c
SRRH
2108
2109 raw_spin_unlock(&rq->rd->rto_lock);
2110
2111 rto_start_unlock(&rq->rd->rto_loop_start);
2112
364f5665
SRV
2113 if (cpu >= 0) {
2114 /* Make sure the rd does not get freed while pushing */
2115 sched_get_rd(rq->rd);
4bdced5c 2116 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
364f5665 2117 }
b6366f04
SR
2118}
2119
2120/* Called from hardirq context */
4bdced5c 2121void rto_push_irq_work_func(struct irq_work *work)
b6366f04 2122{
ad0f1d9d
SRV
2123 struct root_domain *rd =
2124 container_of(work, struct root_domain, rto_push_work);
4bdced5c 2125 struct rq *rq;
b6366f04
SR
2126 int cpu;
2127
4bdced5c 2128 rq = this_rq();
b6366f04 2129
4bdced5c
SRRH
2130 /*
2131 * We do not need to grab the lock to check for has_pushable_tasks.
2132 * When it gets updated, a check is made if a push is possible.
2133 */
b6366f04 2134 if (has_pushable_tasks(rq)) {
5cb9eaa3 2135 raw_spin_rq_lock(rq);
a7c81556
PZ
2136 while (push_rt_task(rq, true))
2137 ;
5cb9eaa3 2138 raw_spin_rq_unlock(rq);
b6366f04
SR
2139 }
2140
ad0f1d9d 2141 raw_spin_lock(&rd->rto_lock);
b6366f04 2142
4bdced5c 2143 /* Pass the IPI to the next rt overloaded queue */
ad0f1d9d 2144 cpu = rto_next_cpu(rd);
b6366f04 2145
ad0f1d9d 2146 raw_spin_unlock(&rd->rto_lock);
b6366f04 2147
364f5665
SRV
2148 if (cpu < 0) {
2149 sched_put_rd(rd);
b6366f04 2150 return;
364f5665 2151 }
b6366f04 2152
b6366f04 2153 /* Try the next RT overloaded CPU */
ad0f1d9d 2154 irq_work_queue_on(&rd->rto_push_work, cpu);
b6366f04
SR
2155}
2156#endif /* HAVE_RT_PUSH_IPI */
2157
8046d680 2158static void pull_rt_task(struct rq *this_rq)
f65eda4f 2159{
8046d680
PZ
2160 int this_cpu = this_rq->cpu, cpu;
2161 bool resched = false;
a7c81556 2162 struct task_struct *p, *push_task;
f65eda4f 2163 struct rq *src_rq;
f73c52a5 2164 int rt_overload_count = rt_overloaded(this_rq);
f65eda4f 2165
f73c52a5 2166 if (likely(!rt_overload_count))
8046d680 2167 return;
f65eda4f 2168
7c3f2ab7
PZ
2169 /*
2170 * Match the barrier from rt_set_overloaded; this guarantees that if we
2171 * see overloaded we must also see the rto_mask bit.
2172 */
2173 smp_rmb();
2174
f73c52a5
SR
2175 /* If we are the only overloaded CPU do nothing */
2176 if (rt_overload_count == 1 &&
2177 cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2178 return;
2179
b6366f04
SR
2180#ifdef HAVE_RT_PUSH_IPI
2181 if (sched_feat(RT_PUSH_IPI)) {
2182 tell_cpu_to_push(this_rq);
8046d680 2183 return;
b6366f04
SR
2184 }
2185#endif
2186
c6c4927b 2187 for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
2188 if (this_cpu == cpu)
2189 continue;
2190
2191 src_rq = cpu_rq(cpu);
74ab8e4f
GH
2192
2193 /*
2194 * Don't bother taking the src_rq->lock if the next highest
2195 * task is known to be lower-priority than our current task.
2196 * This may look racy, but if this value is about to go
2197 * logically higher, the src_rq will push this task away.
2198 * And if its going logically lower, we do not care
2199 */
2200 if (src_rq->rt.highest_prio.next >=
2201 this_rq->rt.highest_prio.curr)
2202 continue;
2203
f65eda4f
SR
2204 /*
2205 * We can potentially drop this_rq's lock in
2206 * double_lock_balance, and another CPU could
a8728944 2207 * alter this_rq
f65eda4f 2208 */
a7c81556 2209 push_task = NULL;
a8728944 2210 double_lock_balance(this_rq, src_rq);
f65eda4f
SR
2211
2212 /*
e23ee747
KT
2213 * We can pull only a task, which is pushable
2214 * on its rq, and no others.
f65eda4f 2215 */
e23ee747 2216 p = pick_highest_pushable_task(src_rq, this_cpu);
f65eda4f
SR
2217
2218 /*
2219 * Do we have an RT task that preempts
2220 * the to-be-scheduled task?
2221 */
a8728944 2222 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f 2223 WARN_ON(p == src_rq->curr);
da0c1e65 2224 WARN_ON(!task_on_rq_queued(p));
f65eda4f
SR
2225
2226 /*
2227 * There's a chance that p is higher in priority
97fb7a0a 2228 * than what's currently running on its CPU.
3b03706f 2229 * This is just that p is waking up and hasn't
f65eda4f
SR
2230 * had a chance to schedule. We only pull
2231 * p if it is lower in priority than the
a8728944 2232 * current task on the run queue
f65eda4f 2233 */
a8728944 2234 if (p->prio < src_rq->curr->prio)
614ee1f6 2235 goto skip;
f65eda4f 2236
a7c81556
PZ
2237 if (is_migration_disabled(p)) {
2238 push_task = get_push_task(src_rq);
2239 } else {
2240 deactivate_task(src_rq, p, 0);
2241 set_task_cpu(p, this_cpu);
2242 activate_task(this_rq, p, 0);
2243 resched = true;
2244 }
f65eda4f
SR
2245 /*
2246 * We continue with the search, just in
2247 * case there's an even higher prio task
25985edc 2248 * in another runqueue. (low likelihood
f65eda4f 2249 * but possible)
f65eda4f 2250 */
f65eda4f 2251 }
49246274 2252skip:
1b12bbc7 2253 double_unlock_balance(this_rq, src_rq);
a7c81556
PZ
2254
2255 if (push_task) {
5cb9eaa3 2256 raw_spin_rq_unlock(this_rq);
a7c81556
PZ
2257 stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2258 push_task, &src_rq->push_work);
5cb9eaa3 2259 raw_spin_rq_lock(this_rq);
a7c81556 2260 }
f65eda4f
SR
2261 }
2262
8046d680
PZ
2263 if (resched)
2264 resched_curr(this_rq);
f65eda4f
SR
2265}
2266
8ae121ac
GH
2267/*
2268 * If we are not running and we are not going to reschedule soon, we should
2269 * try to push tasks away now
2270 */
efbbd05a 2271static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafd 2272{
804d402f
QY
2273 bool need_to_push = !task_running(rq, p) &&
2274 !test_tsk_need_resched(rq->curr) &&
2275 p->nr_cpus_allowed > 1 &&
2276 (dl_task(rq->curr) || rt_task(rq->curr)) &&
2277 (rq->curr->nr_cpus_allowed < 2 ||
2278 rq->curr->prio <= p->prio);
2279
d94a9df4 2280 if (need_to_push)
4642dafd
SR
2281 push_rt_tasks(rq);
2282}
2283
bdd7c81b 2284/* Assumes rq->lock is held */
1f11eb6a 2285static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
2286{
2287 if (rq->rt.overloaded)
2288 rt_set_overload(rq);
6e0534f2 2289
7def2be1
PZ
2290 __enable_runtime(rq);
2291
e864c499 2292 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b
IM
2293}
2294
2295/* Assumes rq->lock is held */
1f11eb6a 2296static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
2297{
2298 if (rq->rt.overloaded)
2299 rt_clear_overload(rq);
6e0534f2 2300
7def2be1
PZ
2301 __disable_runtime(rq);
2302
6e0534f2 2303 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 2304}
cb469845
SR
2305
2306/*
2307 * When switch from the rt queue, we bring ourselves to a position
2308 * that we might want to pull RT tasks from other runqueues.
2309 */
da7a735e 2310static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
2311{
2312 /*
2313 * If there are other RT tasks then we will reschedule
2314 * and the scheduling of the other RT tasks will handle
2315 * the balancing. But if we are the last RT task
2316 * we may need to handle the pulling of RT tasks
2317 * now.
2318 */
da0c1e65 2319 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
1158ddb5
KT
2320 return;
2321
02d8ec94 2322 rt_queue_pull_task(rq);
cb469845 2323}
3d8cbdf8 2324
11c785b7 2325void __init init_sched_rt_class(void)
3d8cbdf8
RR
2326{
2327 unsigned int i;
2328
029632fb 2329 for_each_possible_cpu(i) {
eaa95840 2330 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc 2331 GFP_KERNEL, cpu_to_node(i));
029632fb 2332 }
3d8cbdf8 2333}
cb469845
SR
2334#endif /* CONFIG_SMP */
2335
2336/*
2337 * When switching a task to RT, we may overload the runqueue
2338 * with RT tasks. In this case we try to push them off to
2339 * other runqueues.
2340 */
da7a735e 2341static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb469845 2342{
cb469845 2343 /*
fecfcbc2
VD
2344 * If we are running, update the avg_rt tracking, as the running time
2345 * will now on be accounted into the latter.
2346 */
2347 if (task_current(rq, p)) {
2348 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2349 return;
2350 }
2351
2352 /*
2353 * If we are not running we may need to preempt the current
2354 * running task. If that current running task is also an RT task
cb469845
SR
2355 * then see if we can move to another run queue.
2356 */
fecfcbc2 2357 if (task_on_rq_queued(p)) {
cb469845 2358#ifdef CONFIG_SMP
d94a9df4 2359 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
02d8ec94 2360 rt_queue_push_tasks(rq);
619bd4a7 2361#endif /* CONFIG_SMP */
2fe25826 2362 if (p->prio < rq->curr->prio && cpu_online(cpu_of(rq)))
8875125e 2363 resched_curr(rq);
cb469845
SR
2364 }
2365}
2366
2367/*
2368 * Priority of the task has changed. This may cause
2369 * us to initiate a push or pull.
2370 */
da7a735e
PZ
2371static void
2372prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 2373{
da0c1e65 2374 if (!task_on_rq_queued(p))
da7a735e
PZ
2375 return;
2376
65bcf072 2377 if (task_current(rq, p)) {
cb469845
SR
2378#ifdef CONFIG_SMP
2379 /*
2380 * If our priority decreases while running, we
2381 * may need to pull tasks to this runqueue.
2382 */
2383 if (oldprio < p->prio)
02d8ec94 2384 rt_queue_pull_task(rq);
fd7a4bed 2385
cb469845
SR
2386 /*
2387 * If there's a higher priority task waiting to run
fd7a4bed 2388 * then reschedule.
cb469845 2389 */
fd7a4bed 2390 if (p->prio > rq->rt.highest_prio.curr)
8875125e 2391 resched_curr(rq);
cb469845
SR
2392#else
2393 /* For UP simply resched on drop of prio */
2394 if (oldprio < p->prio)
8875125e 2395 resched_curr(rq);
e8fa1362 2396#endif /* CONFIG_SMP */
cb469845
SR
2397 } else {
2398 /*
2399 * This task is not running, but if it is
2400 * greater than the current running task
2401 * then reschedule.
2402 */
2403 if (p->prio < rq->curr->prio)
8875125e 2404 resched_curr(rq);
cb469845
SR
2405 }
2406}
2407
b18b6a9c 2408#ifdef CONFIG_POSIX_TIMERS
78f2c7db
PZ
2409static void watchdog(struct rq *rq, struct task_struct *p)
2410{
2411 unsigned long soft, hard;
2412
78d7d407
JS
2413 /* max may change after cur was read, this will be fixed next tick */
2414 soft = task_rlimit(p, RLIMIT_RTTIME);
2415 hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db
PZ
2416
2417 if (soft != RLIM_INFINITY) {
2418 unsigned long next;
2419
57d2aa00
YX
2420 if (p->rt.watchdog_stamp != jiffies) {
2421 p->rt.timeout++;
2422 p->rt.watchdog_stamp = jiffies;
2423 }
2424
78f2c7db 2425 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
3a245c0f
TG
2426 if (p->rt.timeout > next) {
2427 posix_cputimers_rt_watchdog(&p->posix_cputimers,
2428 p->se.sum_exec_runtime);
2429 }
78f2c7db
PZ
2430 }
2431}
b18b6a9c
NP
2432#else
2433static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2434#endif
bb44e5d1 2435
d84b3131
FW
2436/*
2437 * scheduler tick hitting a task of our scheduling class.
2438 *
2439 * NOTE: This function can be called remotely by the tick offload that
2440 * goes along full dynticks. Therefore no local assumption can be made
2441 * and everything must be accessed through the @rq and @curr passed in
2442 * parameters.
2443 */
8f4d37ec 2444static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 2445{
454c7999
CC
2446 struct sched_rt_entity *rt_se = &p->rt;
2447
67e2be02 2448 update_curr_rt(rq);
23127296 2449 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
67e2be02 2450
78f2c7db
PZ
2451 watchdog(rq, p);
2452
bb44e5d1
IM
2453 /*
2454 * RR tasks need a special form of timeslice management.
2455 * FIFO tasks have no timeslices.
2456 */
2457 if (p->policy != SCHED_RR)
2458 return;
2459
fa717060 2460 if (--p->rt.time_slice)
bb44e5d1
IM
2461 return;
2462
ce0dbbbb 2463 p->rt.time_slice = sched_rr_timeslice;
bb44e5d1 2464
98fbc798 2465 /*
e9aa39bb
LB
2466 * Requeue to the end of queue if we (and all of our ancestors) are not
2467 * the only element on the queue
98fbc798 2468 */
454c7999
CC
2469 for_each_sched_rt_entity(rt_se) {
2470 if (rt_se->run_list.prev != rt_se->run_list.next) {
2471 requeue_task_rt(rq, p, 0);
8aa6f0eb 2472 resched_curr(rq);
454c7999
CC
2473 return;
2474 }
98fbc798 2475 }
bb44e5d1
IM
2476}
2477
6d686f45 2478static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cea
PW
2479{
2480 /*
2481 * Time slice is 0 for SCHED_FIFO tasks
2482 */
2483 if (task->policy == SCHED_RR)
ce0dbbbb 2484 return sched_rr_timeslice;
0d721cea
PW
2485 else
2486 return 0;
2487}
2488
43c31ac0
PZ
2489DEFINE_SCHED_CLASS(rt) = {
2490
bb44e5d1
IM
2491 .enqueue_task = enqueue_task_rt,
2492 .dequeue_task = dequeue_task_rt,
2493 .yield_task = yield_task_rt,
2494
2495 .check_preempt_curr = check_preempt_curr_rt,
2496
2497 .pick_next_task = pick_next_task_rt,
2498 .put_prev_task = put_prev_task_rt,
03b7fad1 2499 .set_next_task = set_next_task_rt,
bb44e5d1 2500
681f3e68 2501#ifdef CONFIG_SMP
6e2df058 2502 .balance = balance_rt,
21f56ffe 2503 .pick_task = pick_task_rt,
4ce72a2c 2504 .select_task_rq = select_task_rq_rt,
6c37067e 2505 .set_cpus_allowed = set_cpus_allowed_common,
1f11eb6a
GH
2506 .rq_online = rq_online_rt,
2507 .rq_offline = rq_offline_rt,
efbbd05a 2508 .task_woken = task_woken_rt,
cb469845 2509 .switched_from = switched_from_rt,
a7c81556 2510 .find_lock_rq = find_lock_lowest_rq,
681f3e68 2511#endif
bb44e5d1
IM
2512
2513 .task_tick = task_tick_rt,
cb469845 2514
0d721cea
PW
2515 .get_rr_interval = get_rr_interval_rt,
2516
cb469845
SR
2517 .prio_changed = prio_changed_rt,
2518 .switched_to = switched_to_rt,
6e998916
SG
2519
2520 .update_curr = update_curr_rt,
982d9cdc
PB
2521
2522#ifdef CONFIG_UCLAMP_TASK
2523 .uclamp_enabled = 1,
2524#endif
bb44e5d1 2525};
ada18de2 2526
8887cd99
NP
2527#ifdef CONFIG_RT_GROUP_SCHED
2528/*
2529 * Ensure that the real time constraints are schedulable.
2530 */
2531static DEFINE_MUTEX(rt_constraints_mutex);
2532
8887cd99
NP
2533static inline int tg_has_rt_tasks(struct task_group *tg)
2534{
b4fb015e
KK
2535 struct task_struct *task;
2536 struct css_task_iter it;
2537 int ret = 0;
8887cd99
NP
2538
2539 /*
2540 * Autogroups do not have RT tasks; see autogroup_create().
2541 */
2542 if (task_group_is_autogroup(tg))
2543 return 0;
2544
b4fb015e
KK
2545 css_task_iter_start(&tg->css, 0, &it);
2546 while (!ret && (task = css_task_iter_next(&it)))
2547 ret |= rt_task(task);
2548 css_task_iter_end(&it);
8887cd99 2549
b4fb015e 2550 return ret;
8887cd99
NP
2551}
2552
2553struct rt_schedulable_data {
2554 struct task_group *tg;
2555 u64 rt_period;
2556 u64 rt_runtime;
2557};
2558
2559static int tg_rt_schedulable(struct task_group *tg, void *data)
2560{
2561 struct rt_schedulable_data *d = data;
2562 struct task_group *child;
2563 unsigned long total, sum = 0;
2564 u64 period, runtime;
2565
2566 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2567 runtime = tg->rt_bandwidth.rt_runtime;
2568
2569 if (tg == d->tg) {
2570 period = d->rt_period;
2571 runtime = d->rt_runtime;
2572 }
2573
2574 /*
2575 * Cannot have more runtime than the period.
2576 */
2577 if (runtime > period && runtime != RUNTIME_INF)
2578 return -EINVAL;
2579
2580 /*
b4fb015e 2581 * Ensure we don't starve existing RT tasks if runtime turns zero.
8887cd99 2582 */
b4fb015e
KK
2583 if (rt_bandwidth_enabled() && !runtime &&
2584 tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
8887cd99
NP
2585 return -EBUSY;
2586
2587 total = to_ratio(period, runtime);
2588
2589 /*
2590 * Nobody can have more than the global setting allows.
2591 */
2592 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2593 return -EINVAL;
2594
2595 /*
2596 * The sum of our children's runtime should not exceed our own.
2597 */
2598 list_for_each_entry_rcu(child, &tg->children, siblings) {
2599 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2600 runtime = child->rt_bandwidth.rt_runtime;
2601
2602 if (child == d->tg) {
2603 period = d->rt_period;
2604 runtime = d->rt_runtime;
2605 }
2606
2607 sum += to_ratio(period, runtime);
2608 }
2609
2610 if (sum > total)
2611 return -EINVAL;
2612
2613 return 0;
2614}
2615
2616static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2617{
2618 int ret;
2619
2620 struct rt_schedulable_data data = {
2621 .tg = tg,
2622 .rt_period = period,
2623 .rt_runtime = runtime,
2624 };
2625
2626 rcu_read_lock();
2627 ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2628 rcu_read_unlock();
2629
2630 return ret;
2631}
2632
2633static int tg_set_rt_bandwidth(struct task_group *tg,
2634 u64 rt_period, u64 rt_runtime)
2635{
2636 int i, err = 0;
2637
2638 /*
2639 * Disallowing the root group RT runtime is BAD, it would disallow the
2640 * kernel creating (and or operating) RT threads.
2641 */
2642 if (tg == &root_task_group && rt_runtime == 0)
2643 return -EINVAL;
2644
2645 /* No period doesn't make any sense. */
2646 if (rt_period == 0)
2647 return -EINVAL;
2648
d505b8af
HC
2649 /*
2650 * Bound quota to defend quota against overflow during bandwidth shift.
2651 */
2652 if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2653 return -EINVAL;
2654
8887cd99 2655 mutex_lock(&rt_constraints_mutex);
8887cd99
NP
2656 err = __rt_schedulable(tg, rt_period, rt_runtime);
2657 if (err)
2658 goto unlock;
2659
2660 raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2661 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2662 tg->rt_bandwidth.rt_runtime = rt_runtime;
2663
2664 for_each_possible_cpu(i) {
2665 struct rt_rq *rt_rq = tg->rt_rq[i];
2666
2667 raw_spin_lock(&rt_rq->rt_runtime_lock);
2668 rt_rq->rt_runtime = rt_runtime;
2669 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2670 }
2671 raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2672unlock:
8887cd99
NP
2673 mutex_unlock(&rt_constraints_mutex);
2674
2675 return err;
2676}
2677
2678int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2679{
2680 u64 rt_runtime, rt_period;
2681
2682 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2683 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2684 if (rt_runtime_us < 0)
2685 rt_runtime = RUNTIME_INF;
1a010e29
KK
2686 else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2687 return -EINVAL;
8887cd99
NP
2688
2689 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2690}
2691
2692long sched_group_rt_runtime(struct task_group *tg)
2693{
2694 u64 rt_runtime_us;
2695
2696 if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2697 return -1;
2698
2699 rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2700 do_div(rt_runtime_us, NSEC_PER_USEC);
2701 return rt_runtime_us;
2702}
2703
2704int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2705{
2706 u64 rt_runtime, rt_period;
2707
1a010e29
KK
2708 if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2709 return -EINVAL;
2710
8887cd99
NP
2711 rt_period = rt_period_us * NSEC_PER_USEC;
2712 rt_runtime = tg->rt_bandwidth.rt_runtime;
2713
2714 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2715}
2716
2717long sched_group_rt_period(struct task_group *tg)
2718{
2719 u64 rt_period_us;
2720
2721 rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2722 do_div(rt_period_us, NSEC_PER_USEC);
2723 return rt_period_us;
2724}
2725
2726static int sched_rt_global_constraints(void)
2727{
2728 int ret = 0;
2729
2730 mutex_lock(&rt_constraints_mutex);
8887cd99 2731 ret = __rt_schedulable(NULL, 0, 0);
8887cd99
NP
2732 mutex_unlock(&rt_constraints_mutex);
2733
2734 return ret;
2735}
2736
2737int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2738{
2739 /* Don't accept realtime tasks when there is no way for them to run */
2740 if (rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2741 return 0;
2742
2743 return 1;
2744}
2745
2746#else /* !CONFIG_RT_GROUP_SCHED */
2747static int sched_rt_global_constraints(void)
2748{
2749 unsigned long flags;
2750 int i;
2751
2752 raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
2753 for_each_possible_cpu(i) {
2754 struct rt_rq *rt_rq = &cpu_rq(i)->rt;
2755
2756 raw_spin_lock(&rt_rq->rt_runtime_lock);
2757 rt_rq->rt_runtime = global_rt_runtime();
2758 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2759 }
2760 raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
2761
2762 return 0;
2763}
2764#endif /* CONFIG_RT_GROUP_SCHED */
2765
2766static int sched_rt_global_validate(void)
2767{
2768 if (sysctl_sched_rt_period <= 0)
2769 return -EINVAL;
2770
2771 if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
d505b8af
HC
2772 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2773 ((u64)sysctl_sched_rt_runtime *
2774 NSEC_PER_USEC > max_rt_runtime)))
8887cd99
NP
2775 return -EINVAL;
2776
2777 return 0;
2778}
2779
2780static void sched_rt_do_global(void)
2781{
2782 def_rt_bandwidth.rt_runtime = global_rt_runtime();
2783 def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
2784}
2785
32927393
CH
2786int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
2787 size_t *lenp, loff_t *ppos)
8887cd99
NP
2788{
2789 int old_period, old_runtime;
2790 static DEFINE_MUTEX(mutex);
2791 int ret;
2792
2793 mutex_lock(&mutex);
2794 old_period = sysctl_sched_rt_period;
2795 old_runtime = sysctl_sched_rt_runtime;
2796
2797 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2798
2799 if (!ret && write) {
2800 ret = sched_rt_global_validate();
2801 if (ret)
2802 goto undo;
2803
2804 ret = sched_dl_global_validate();
2805 if (ret)
2806 goto undo;
2807
2808 ret = sched_rt_global_constraints();
2809 if (ret)
2810 goto undo;
2811
2812 sched_rt_do_global();
2813 sched_dl_do_global();
2814 }
2815 if (0) {
2816undo:
2817 sysctl_sched_rt_period = old_period;
2818 sysctl_sched_rt_runtime = old_runtime;
2819 }
2820 mutex_unlock(&mutex);
2821
2822 return ret;
2823}
2824
32927393
CH
2825int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
2826 size_t *lenp, loff_t *ppos)
8887cd99
NP
2827{
2828 int ret;
2829 static DEFINE_MUTEX(mutex);
2830
2831 mutex_lock(&mutex);
2832 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2833 /*
2834 * Make sure that internally we keep jiffies.
2835 * Also, writing zero resets the timeslice to default:
2836 */
2837 if (!ret && write) {
2838 sched_rr_timeslice =
2839 sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2840 msecs_to_jiffies(sysctl_sched_rr_timeslice);
2841 }
2842 mutex_unlock(&mutex);
97fb7a0a 2843
8887cd99
NP
2844 return ret;
2845}
2846
ada18de2 2847#ifdef CONFIG_SCHED_DEBUG
029632fb 2848void print_rt_stats(struct seq_file *m, int cpu)
ada18de2 2849{
ec514c48 2850 rt_rq_iter_t iter;
ada18de2
PZ
2851 struct rt_rq *rt_rq;
2852
2853 rcu_read_lock();
ec514c48 2854 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2
PZ
2855 print_rt_rq(m, cpu, rt_rq);
2856 rcu_read_unlock();
2857}
55e12e5e 2858#endif /* CONFIG_SCHED_DEBUG */