e40422c3703352e72f3c61145fdc20fd70738ecc
[linux-block.git] / kernel / sched / rt.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
4  * policies)
5  */
6
7 int sched_rr_timeslice = RR_TIMESLICE;
8 /* More than 4 hours if BW_SHIFT equals 20. */
9 static const u64 max_rt_runtime = MAX_BW;
10
11 /*
12  * period over which we measure -rt task CPU usage in us.
13  * default: 1s
14  */
15 int sysctl_sched_rt_period = 1000000;
16
17 /*
18  * part of the period that we allow rt tasks to run in us.
19  * default: 0.95s
20  */
21 int sysctl_sched_rt_runtime = 950000;
22
23 #ifdef CONFIG_SYSCTL
24 static int sysctl_sched_rr_timeslice = (MSEC_PER_SEC * RR_TIMESLICE) / HZ;
25 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
26                 size_t *lenp, loff_t *ppos);
27 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
28                 size_t *lenp, loff_t *ppos);
29 static const struct ctl_table sched_rt_sysctls[] = {
30         {
31                 .procname       = "sched_rt_period_us",
32                 .data           = &sysctl_sched_rt_period,
33                 .maxlen         = sizeof(int),
34                 .mode           = 0644,
35                 .proc_handler   = sched_rt_handler,
36                 .extra1         = SYSCTL_ONE,
37                 .extra2         = SYSCTL_INT_MAX,
38         },
39         {
40                 .procname       = "sched_rt_runtime_us",
41                 .data           = &sysctl_sched_rt_runtime,
42                 .maxlen         = sizeof(int),
43                 .mode           = 0644,
44                 .proc_handler   = sched_rt_handler,
45                 .extra1         = SYSCTL_NEG_ONE,
46                 .extra2         = (void *)&sysctl_sched_rt_period,
47         },
48         {
49                 .procname       = "sched_rr_timeslice_ms",
50                 .data           = &sysctl_sched_rr_timeslice,
51                 .maxlen         = sizeof(int),
52                 .mode           = 0644,
53                 .proc_handler   = sched_rr_handler,
54         },
55 };
56
57 static int __init sched_rt_sysctl_init(void)
58 {
59         register_sysctl_init("kernel", sched_rt_sysctls);
60         return 0;
61 }
62 late_initcall(sched_rt_sysctl_init);
63 #endif
64
65 void init_rt_rq(struct rt_rq *rt_rq)
66 {
67         struct rt_prio_array *array;
68         int i;
69
70         array = &rt_rq->active;
71         for (i = 0; i < MAX_RT_PRIO; i++) {
72                 INIT_LIST_HEAD(array->queue + i);
73                 __clear_bit(i, array->bitmap);
74         }
75         /* delimiter for bitsearch: */
76         __set_bit(MAX_RT_PRIO, array->bitmap);
77
78 #if defined CONFIG_SMP
79         rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
80         rt_rq->highest_prio.next = MAX_RT_PRIO-1;
81         rt_rq->overloaded = 0;
82         plist_head_init(&rt_rq->pushable_tasks);
83 #endif /* CONFIG_SMP */
84         /* We start is dequeued state, because no RT tasks are queued */
85         rt_rq->rt_queued = 0;
86
87 #ifdef CONFIG_RT_GROUP_SCHED
88         rt_rq->rt_time = 0;
89         rt_rq->rt_throttled = 0;
90         rt_rq->rt_runtime = 0;
91         raw_spin_lock_init(&rt_rq->rt_runtime_lock);
92         rt_rq->tg = &root_task_group;
93 #endif
94 }
95
96 #ifdef CONFIG_RT_GROUP_SCHED
97
98 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
99
100 static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
101 {
102         struct rt_bandwidth *rt_b =
103                 container_of(timer, struct rt_bandwidth, rt_period_timer);
104         int idle = 0;
105         int overrun;
106
107         raw_spin_lock(&rt_b->rt_runtime_lock);
108         for (;;) {
109                 overrun = hrtimer_forward_now(timer, rt_b->rt_period);
110                 if (!overrun)
111                         break;
112
113                 raw_spin_unlock(&rt_b->rt_runtime_lock);
114                 idle = do_sched_rt_period_timer(rt_b, overrun);
115                 raw_spin_lock(&rt_b->rt_runtime_lock);
116         }
117         if (idle)
118                 rt_b->rt_period_active = 0;
119         raw_spin_unlock(&rt_b->rt_runtime_lock);
120
121         return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
122 }
123
124 void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
125 {
126         rt_b->rt_period = ns_to_ktime(period);
127         rt_b->rt_runtime = runtime;
128
129         raw_spin_lock_init(&rt_b->rt_runtime_lock);
130
131         hrtimer_setup(&rt_b->rt_period_timer, sched_rt_period_timer, CLOCK_MONOTONIC,
132                       HRTIMER_MODE_REL_HARD);
133 }
134
135 static inline void do_start_rt_bandwidth(struct rt_bandwidth *rt_b)
136 {
137         raw_spin_lock(&rt_b->rt_runtime_lock);
138         if (!rt_b->rt_period_active) {
139                 rt_b->rt_period_active = 1;
140                 /*
141                  * SCHED_DEADLINE updates the bandwidth, as a run away
142                  * RT task with a DL task could hog a CPU. But DL does
143                  * not reset the period. If a deadline task was running
144                  * without an RT task running, it can cause RT tasks to
145                  * throttle when they start up. Kick the timer right away
146                  * to update the period.
147                  */
148                 hrtimer_forward_now(&rt_b->rt_period_timer, ns_to_ktime(0));
149                 hrtimer_start_expires(&rt_b->rt_period_timer,
150                                       HRTIMER_MODE_ABS_PINNED_HARD);
151         }
152         raw_spin_unlock(&rt_b->rt_runtime_lock);
153 }
154
155 static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
156 {
157         if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
158                 return;
159
160         do_start_rt_bandwidth(rt_b);
161 }
162
163 static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
164 {
165         hrtimer_cancel(&rt_b->rt_period_timer);
166 }
167
168 #define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
169
170 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
171 {
172         WARN_ON_ONCE(!rt_entity_is_task(rt_se));
173
174         return container_of(rt_se, struct task_struct, rt);
175 }
176
177 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
178 {
179         /* Cannot fold with non-CONFIG_RT_GROUP_SCHED version, layout */
180         WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
181         return rt_rq->rq;
182 }
183
184 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185 {
186         WARN_ON(!rt_group_sched_enabled() && rt_se->rt_rq->tg != &root_task_group);
187         return rt_se->rt_rq;
188 }
189
190 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
191 {
192         struct rt_rq *rt_rq = rt_se->rt_rq;
193
194         WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
195         return rt_rq->rq;
196 }
197
198 void unregister_rt_sched_group(struct task_group *tg)
199 {
200         if (!rt_group_sched_enabled())
201                 return;
202
203         if (tg->rt_se)
204                 destroy_rt_bandwidth(&tg->rt_bandwidth);
205 }
206
207 void free_rt_sched_group(struct task_group *tg)
208 {
209         int i;
210
211         if (!rt_group_sched_enabled())
212                 return;
213
214         for_each_possible_cpu(i) {
215                 if (tg->rt_rq)
216                         kfree(tg->rt_rq[i]);
217                 if (tg->rt_se)
218                         kfree(tg->rt_se[i]);
219         }
220
221         kfree(tg->rt_rq);
222         kfree(tg->rt_se);
223 }
224
225 void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
226                 struct sched_rt_entity *rt_se, int cpu,
227                 struct sched_rt_entity *parent)
228 {
229         struct rq *rq = cpu_rq(cpu);
230
231         rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
232         rt_rq->rt_nr_boosted = 0;
233         rt_rq->rq = rq;
234         rt_rq->tg = tg;
235
236         tg->rt_rq[cpu] = rt_rq;
237         tg->rt_se[cpu] = rt_se;
238
239         if (!rt_se)
240                 return;
241
242         if (!parent)
243                 rt_se->rt_rq = &rq->rt;
244         else
245                 rt_se->rt_rq = parent->my_q;
246
247         rt_se->my_q = rt_rq;
248         rt_se->parent = parent;
249         INIT_LIST_HEAD(&rt_se->run_list);
250 }
251
252 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
253 {
254         struct rt_rq *rt_rq;
255         struct sched_rt_entity *rt_se;
256         int i;
257
258         if (!rt_group_sched_enabled())
259                 return 1;
260
261         tg->rt_rq = kcalloc(nr_cpu_ids, sizeof(rt_rq), GFP_KERNEL);
262         if (!tg->rt_rq)
263                 goto err;
264         tg->rt_se = kcalloc(nr_cpu_ids, sizeof(rt_se), GFP_KERNEL);
265         if (!tg->rt_se)
266                 goto err;
267
268         init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0);
269
270         for_each_possible_cpu(i) {
271                 rt_rq = kzalloc_node(sizeof(struct rt_rq),
272                                      GFP_KERNEL, cpu_to_node(i));
273                 if (!rt_rq)
274                         goto err;
275
276                 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
277                                      GFP_KERNEL, cpu_to_node(i));
278                 if (!rt_se)
279                         goto err_free_rq;
280
281                 init_rt_rq(rt_rq);
282                 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
283                 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
284         }
285
286         return 1;
287
288 err_free_rq:
289         kfree(rt_rq);
290 err:
291         return 0;
292 }
293
294 #else /* CONFIG_RT_GROUP_SCHED */
295
296 #define rt_entity_is_task(rt_se) (1)
297
298 static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
299 {
300         return container_of(rt_se, struct task_struct, rt);
301 }
302
303 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
304 {
305         return container_of(rt_rq, struct rq, rt);
306 }
307
308 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se)
309 {
310         struct task_struct *p = rt_task_of(rt_se);
311
312         return task_rq(p);
313 }
314
315 static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
316 {
317         struct rq *rq = rq_of_rt_se(rt_se);
318
319         return &rq->rt;
320 }
321
322 void unregister_rt_sched_group(struct task_group *tg) { }
323
324 void free_rt_sched_group(struct task_group *tg) { }
325
326 int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
327 {
328         return 1;
329 }
330 #endif /* CONFIG_RT_GROUP_SCHED */
331
332 #ifdef CONFIG_SMP
333
334 static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
335 {
336         /* Try to pull RT tasks here if we lower this rq's prio */
337         return rq->online && rq->rt.highest_prio.curr > prev->prio;
338 }
339
340 static inline int rt_overloaded(struct rq *rq)
341 {
342         return atomic_read(&rq->rd->rto_count);
343 }
344
345 static inline void rt_set_overload(struct rq *rq)
346 {
347         if (!rq->online)
348                 return;
349
350         cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
351         /*
352          * Make sure the mask is visible before we set
353          * the overload count. That is checked to determine
354          * if we should look at the mask. It would be a shame
355          * if we looked at the mask, but the mask was not
356          * updated yet.
357          *
358          * Matched by the barrier in pull_rt_task().
359          */
360         smp_wmb();
361         atomic_inc(&rq->rd->rto_count);
362 }
363
364 static inline void rt_clear_overload(struct rq *rq)
365 {
366         if (!rq->online)
367                 return;
368
369         /* the order here really doesn't matter */
370         atomic_dec(&rq->rd->rto_count);
371         cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
372 }
373
374 static inline int has_pushable_tasks(struct rq *rq)
375 {
376         return !plist_head_empty(&rq->rt.pushable_tasks);
377 }
378
379 static DEFINE_PER_CPU(struct balance_callback, rt_push_head);
380 static DEFINE_PER_CPU(struct balance_callback, rt_pull_head);
381
382 static void push_rt_tasks(struct rq *);
383 static void pull_rt_task(struct rq *);
384
385 static inline void rt_queue_push_tasks(struct rq *rq)
386 {
387         if (!has_pushable_tasks(rq))
388                 return;
389
390         queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
391 }
392
393 static inline void rt_queue_pull_task(struct rq *rq)
394 {
395         queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
396 }
397
398 static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
399 {
400         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
401         plist_node_init(&p->pushable_tasks, p->prio);
402         plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
403
404         /* Update the highest prio pushable task */
405         if (p->prio < rq->rt.highest_prio.next)
406                 rq->rt.highest_prio.next = p->prio;
407
408         if (!rq->rt.overloaded) {
409                 rt_set_overload(rq);
410                 rq->rt.overloaded = 1;
411         }
412 }
413
414 static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
415 {
416         plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
417
418         /* Update the new highest prio pushable task */
419         if (has_pushable_tasks(rq)) {
420                 p = plist_first_entry(&rq->rt.pushable_tasks,
421                                       struct task_struct, pushable_tasks);
422                 rq->rt.highest_prio.next = p->prio;
423         } else {
424                 rq->rt.highest_prio.next = MAX_RT_PRIO-1;
425
426                 if (rq->rt.overloaded) {
427                         rt_clear_overload(rq);
428                         rq->rt.overloaded = 0;
429                 }
430         }
431 }
432
433 #else
434
435 static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
436 {
437 }
438
439 static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
440 {
441 }
442
443 static inline void rt_queue_push_tasks(struct rq *rq)
444 {
445 }
446 #endif /* CONFIG_SMP */
447
448 static void enqueue_top_rt_rq(struct rt_rq *rt_rq);
449 static void dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count);
450
451 static inline int on_rt_rq(struct sched_rt_entity *rt_se)
452 {
453         return rt_se->on_rq;
454 }
455
456 #ifdef CONFIG_UCLAMP_TASK
457 /*
458  * Verify the fitness of task @p to run on @cpu taking into account the uclamp
459  * settings.
460  *
461  * This check is only important for heterogeneous systems where uclamp_min value
462  * is higher than the capacity of a @cpu. For non-heterogeneous system this
463  * function will always return true.
464  *
465  * The function will return true if the capacity of the @cpu is >= the
466  * uclamp_min and false otherwise.
467  *
468  * Note that uclamp_min will be clamped to uclamp_max if uclamp_min
469  * > uclamp_max.
470  */
471 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
472 {
473         unsigned int min_cap;
474         unsigned int max_cap;
475         unsigned int cpu_cap;
476
477         /* Only heterogeneous systems can benefit from this check */
478         if (!sched_asym_cpucap_active())
479                 return true;
480
481         min_cap = uclamp_eff_value(p, UCLAMP_MIN);
482         max_cap = uclamp_eff_value(p, UCLAMP_MAX);
483
484         cpu_cap = arch_scale_cpu_capacity(cpu);
485
486         return cpu_cap >= min(min_cap, max_cap);
487 }
488 #else
489 static inline bool rt_task_fits_capacity(struct task_struct *p, int cpu)
490 {
491         return true;
492 }
493 #endif
494
495 #ifdef CONFIG_RT_GROUP_SCHED
496
497 static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
498 {
499         return rt_rq->rt_runtime;
500 }
501
502 static inline u64 sched_rt_period(struct rt_rq *rt_rq)
503 {
504         return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
505 }
506
507 typedef struct task_group *rt_rq_iter_t;
508
509 static inline struct task_group *next_task_group(struct task_group *tg)
510 {
511         if (!rt_group_sched_enabled()) {
512                 WARN_ON(tg != &root_task_group);
513                 return NULL;
514         }
515
516         do {
517                 tg = list_entry_rcu(tg->list.next,
518                         typeof(struct task_group), list);
519         } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
520
521         if (&tg->list == &task_groups)
522                 tg = NULL;
523
524         return tg;
525 }
526
527 #define for_each_rt_rq(rt_rq, iter, rq)                                 \
528         for (iter = &root_task_group;                                   \
529                 iter && (rt_rq = iter->rt_rq[cpu_of(rq)]);              \
530                 iter = next_task_group(iter))
531
532 #define for_each_sched_rt_entity(rt_se) \
533         for (; rt_se; rt_se = rt_se->parent)
534
535 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
536 {
537         return rt_se->my_q;
538 }
539
540 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
541 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags);
542
543 static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
544 {
545         struct task_struct *donor = rq_of_rt_rq(rt_rq)->donor;
546         struct rq *rq = rq_of_rt_rq(rt_rq);
547         struct sched_rt_entity *rt_se;
548
549         int cpu = cpu_of(rq);
550
551         rt_se = rt_rq->tg->rt_se[cpu];
552
553         if (rt_rq->rt_nr_running) {
554                 if (!rt_se)
555                         enqueue_top_rt_rq(rt_rq);
556                 else if (!on_rt_rq(rt_se))
557                         enqueue_rt_entity(rt_se, 0);
558
559                 if (rt_rq->highest_prio.curr < donor->prio)
560                         resched_curr(rq);
561         }
562 }
563
564 static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
565 {
566         struct sched_rt_entity *rt_se;
567         int cpu = cpu_of(rq_of_rt_rq(rt_rq));
568
569         rt_se = rt_rq->tg->rt_se[cpu];
570
571         if (!rt_se) {
572                 dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
573                 /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
574                 cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
575         }
576         else if (on_rt_rq(rt_se))
577                 dequeue_rt_entity(rt_se, 0);
578 }
579
580 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
581 {
582         return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
583 }
584
585 static int rt_se_boosted(struct sched_rt_entity *rt_se)
586 {
587         struct rt_rq *rt_rq = group_rt_rq(rt_se);
588         struct task_struct *p;
589
590         if (rt_rq)
591                 return !!rt_rq->rt_nr_boosted;
592
593         p = rt_task_of(rt_se);
594         return p->prio != p->normal_prio;
595 }
596
597 #ifdef CONFIG_SMP
598 static inline const struct cpumask *sched_rt_period_mask(void)
599 {
600         return this_rq()->rd->span;
601 }
602 #else
603 static inline const struct cpumask *sched_rt_period_mask(void)
604 {
605         return cpu_online_mask;
606 }
607 #endif
608
609 static inline
610 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
611 {
612         return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
613 }
614
615 static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
616 {
617         return &rt_rq->tg->rt_bandwidth;
618 }
619
620 bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
621 {
622         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
623
624         return (hrtimer_active(&rt_b->rt_period_timer) ||
625                 rt_rq->rt_time < rt_b->rt_runtime);
626 }
627
628 #ifdef CONFIG_SMP
629 /*
630  * We ran out of runtime, see if we can borrow some from our neighbours.
631  */
632 static void do_balance_runtime(struct rt_rq *rt_rq)
633 {
634         struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
635         struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
636         int i, weight;
637         u64 rt_period;
638
639         weight = cpumask_weight(rd->span);
640
641         raw_spin_lock(&rt_b->rt_runtime_lock);
642         rt_period = ktime_to_ns(rt_b->rt_period);
643         for_each_cpu(i, rd->span) {
644                 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
645                 s64 diff;
646
647                 if (iter == rt_rq)
648                         continue;
649
650                 raw_spin_lock(&iter->rt_runtime_lock);
651                 /*
652                  * Either all rqs have inf runtime and there's nothing to steal
653                  * or __disable_runtime() below sets a specific rq to inf to
654                  * indicate its been disabled and disallow stealing.
655                  */
656                 if (iter->rt_runtime == RUNTIME_INF)
657                         goto next;
658
659                 /*
660                  * From runqueues with spare time, take 1/n part of their
661                  * spare time, but no more than our period.
662                  */
663                 diff = iter->rt_runtime - iter->rt_time;
664                 if (diff > 0) {
665                         diff = div_u64((u64)diff, weight);
666                         if (rt_rq->rt_runtime + diff > rt_period)
667                                 diff = rt_period - rt_rq->rt_runtime;
668                         iter->rt_runtime -= diff;
669                         rt_rq->rt_runtime += diff;
670                         if (rt_rq->rt_runtime == rt_period) {
671                                 raw_spin_unlock(&iter->rt_runtime_lock);
672                                 break;
673                         }
674                 }
675 next:
676                 raw_spin_unlock(&iter->rt_runtime_lock);
677         }
678         raw_spin_unlock(&rt_b->rt_runtime_lock);
679 }
680
681 /*
682  * Ensure this RQ takes back all the runtime it lend to its neighbours.
683  */
684 static void __disable_runtime(struct rq *rq)
685 {
686         struct root_domain *rd = rq->rd;
687         rt_rq_iter_t iter;
688         struct rt_rq *rt_rq;
689
690         if (unlikely(!scheduler_running))
691                 return;
692
693         for_each_rt_rq(rt_rq, iter, rq) {
694                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
695                 s64 want;
696                 int i;
697
698                 raw_spin_lock(&rt_b->rt_runtime_lock);
699                 raw_spin_lock(&rt_rq->rt_runtime_lock);
700                 /*
701                  * Either we're all inf and nobody needs to borrow, or we're
702                  * already disabled and thus have nothing to do, or we have
703                  * exactly the right amount of runtime to take out.
704                  */
705                 if (rt_rq->rt_runtime == RUNTIME_INF ||
706                                 rt_rq->rt_runtime == rt_b->rt_runtime)
707                         goto balanced;
708                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
709
710                 /*
711                  * Calculate the difference between what we started out with
712                  * and what we current have, that's the amount of runtime
713                  * we lend and now have to reclaim.
714                  */
715                 want = rt_b->rt_runtime - rt_rq->rt_runtime;
716
717                 /*
718                  * Greedy reclaim, take back as much as we can.
719                  */
720                 for_each_cpu(i, rd->span) {
721                         struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
722                         s64 diff;
723
724                         /*
725                          * Can't reclaim from ourselves or disabled runqueues.
726                          */
727                         if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
728                                 continue;
729
730                         raw_spin_lock(&iter->rt_runtime_lock);
731                         if (want > 0) {
732                                 diff = min_t(s64, iter->rt_runtime, want);
733                                 iter->rt_runtime -= diff;
734                                 want -= diff;
735                         } else {
736                                 iter->rt_runtime -= want;
737                                 want -= want;
738                         }
739                         raw_spin_unlock(&iter->rt_runtime_lock);
740
741                         if (!want)
742                                 break;
743                 }
744
745                 raw_spin_lock(&rt_rq->rt_runtime_lock);
746                 /*
747                  * We cannot be left wanting - that would mean some runtime
748                  * leaked out of the system.
749                  */
750                 WARN_ON_ONCE(want);
751 balanced:
752                 /*
753                  * Disable all the borrow logic by pretending we have inf
754                  * runtime - in which case borrowing doesn't make sense.
755                  */
756                 rt_rq->rt_runtime = RUNTIME_INF;
757                 rt_rq->rt_throttled = 0;
758                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
759                 raw_spin_unlock(&rt_b->rt_runtime_lock);
760
761                 /* Make rt_rq available for pick_next_task() */
762                 sched_rt_rq_enqueue(rt_rq);
763         }
764 }
765
766 static void __enable_runtime(struct rq *rq)
767 {
768         rt_rq_iter_t iter;
769         struct rt_rq *rt_rq;
770
771         if (unlikely(!scheduler_running))
772                 return;
773
774         /*
775          * Reset each runqueue's bandwidth settings
776          */
777         for_each_rt_rq(rt_rq, iter, rq) {
778                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
779
780                 raw_spin_lock(&rt_b->rt_runtime_lock);
781                 raw_spin_lock(&rt_rq->rt_runtime_lock);
782                 rt_rq->rt_runtime = rt_b->rt_runtime;
783                 rt_rq->rt_time = 0;
784                 rt_rq->rt_throttled = 0;
785                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
786                 raw_spin_unlock(&rt_b->rt_runtime_lock);
787         }
788 }
789
790 static void balance_runtime(struct rt_rq *rt_rq)
791 {
792         if (!sched_feat(RT_RUNTIME_SHARE))
793                 return;
794
795         if (rt_rq->rt_time > rt_rq->rt_runtime) {
796                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
797                 do_balance_runtime(rt_rq);
798                 raw_spin_lock(&rt_rq->rt_runtime_lock);
799         }
800 }
801 #else /* !CONFIG_SMP */
802 static inline void balance_runtime(struct rt_rq *rt_rq) {}
803 #endif /* CONFIG_SMP */
804
805 static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
806 {
807         int i, idle = 1, throttled = 0;
808         const struct cpumask *span;
809
810         span = sched_rt_period_mask();
811
812         /*
813          * FIXME: isolated CPUs should really leave the root task group,
814          * whether they are isolcpus or were isolated via cpusets, lest
815          * the timer run on a CPU which does not service all runqueues,
816          * potentially leaving other CPUs indefinitely throttled.  If
817          * isolation is really required, the user will turn the throttle
818          * off to kill the perturbations it causes anyway.  Meanwhile,
819          * this maintains functionality for boot and/or troubleshooting.
820          */
821         if (rt_b == &root_task_group.rt_bandwidth)
822                 span = cpu_online_mask;
823
824         for_each_cpu(i, span) {
825                 int enqueue = 0;
826                 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
827                 struct rq *rq = rq_of_rt_rq(rt_rq);
828                 struct rq_flags rf;
829                 int skip;
830
831                 /*
832                  * When span == cpu_online_mask, taking each rq->lock
833                  * can be time-consuming. Try to avoid it when possible.
834                  */
835                 raw_spin_lock(&rt_rq->rt_runtime_lock);
836                 if (!sched_feat(RT_RUNTIME_SHARE) && rt_rq->rt_runtime != RUNTIME_INF)
837                         rt_rq->rt_runtime = rt_b->rt_runtime;
838                 skip = !rt_rq->rt_time && !rt_rq->rt_nr_running;
839                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
840                 if (skip)
841                         continue;
842
843                 rq_lock(rq, &rf);
844                 update_rq_clock(rq);
845
846                 if (rt_rq->rt_time) {
847                         u64 runtime;
848
849                         raw_spin_lock(&rt_rq->rt_runtime_lock);
850                         if (rt_rq->rt_throttled)
851                                 balance_runtime(rt_rq);
852                         runtime = rt_rq->rt_runtime;
853                         rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
854                         if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
855                                 rt_rq->rt_throttled = 0;
856                                 enqueue = 1;
857
858                                 /*
859                                  * When we're idle and a woken (rt) task is
860                                  * throttled wakeup_preempt() will set
861                                  * skip_update and the time between the wakeup
862                                  * and this unthrottle will get accounted as
863                                  * 'runtime'.
864                                  */
865                                 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
866                                         rq_clock_cancel_skipupdate(rq);
867                         }
868                         if (rt_rq->rt_time || rt_rq->rt_nr_running)
869                                 idle = 0;
870                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
871                 } else if (rt_rq->rt_nr_running) {
872                         idle = 0;
873                         if (!rt_rq_throttled(rt_rq))
874                                 enqueue = 1;
875                 }
876                 if (rt_rq->rt_throttled)
877                         throttled = 1;
878
879                 if (enqueue)
880                         sched_rt_rq_enqueue(rt_rq);
881                 rq_unlock(rq, &rf);
882         }
883
884         if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
885                 return 1;
886
887         return idle;
888 }
889
890 static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
891 {
892         u64 runtime = sched_rt_runtime(rt_rq);
893
894         if (rt_rq->rt_throttled)
895                 return rt_rq_throttled(rt_rq);
896
897         if (runtime >= sched_rt_period(rt_rq))
898                 return 0;
899
900         balance_runtime(rt_rq);
901         runtime = sched_rt_runtime(rt_rq);
902         if (runtime == RUNTIME_INF)
903                 return 0;
904
905         if (rt_rq->rt_time > runtime) {
906                 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
907
908                 /*
909                  * Don't actually throttle groups that have no runtime assigned
910                  * but accrue some time due to boosting.
911                  */
912                 if (likely(rt_b->rt_runtime)) {
913                         rt_rq->rt_throttled = 1;
914                         printk_deferred_once("sched: RT throttling activated\n");
915                 } else {
916                         /*
917                          * In case we did anyway, make it go away,
918                          * replenishment is a joke, since it will replenish us
919                          * with exactly 0 ns.
920                          */
921                         rt_rq->rt_time = 0;
922                 }
923
924                 if (rt_rq_throttled(rt_rq)) {
925                         sched_rt_rq_dequeue(rt_rq);
926                         return 1;
927                 }
928         }
929
930         return 0;
931 }
932
933 #else /* !CONFIG_RT_GROUP_SCHED */
934
935 typedef struct rt_rq *rt_rq_iter_t;
936
937 #define for_each_rt_rq(rt_rq, iter, rq) \
938         for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
939
940 #define for_each_sched_rt_entity(rt_se) \
941         for (; rt_se; rt_se = NULL)
942
943 static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
944 {
945         return NULL;
946 }
947
948 static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
949 {
950         struct rq *rq = rq_of_rt_rq(rt_rq);
951
952         if (!rt_rq->rt_nr_running)
953                 return;
954
955         enqueue_top_rt_rq(rt_rq);
956         resched_curr(rq);
957 }
958
959 static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
960 {
961         dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running);
962 }
963
964 static inline int rt_rq_throttled(struct rt_rq *rt_rq)
965 {
966         return false;
967 }
968
969 static inline const struct cpumask *sched_rt_period_mask(void)
970 {
971         return cpu_online_mask;
972 }
973
974 static inline
975 struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
976 {
977         return &cpu_rq(cpu)->rt;
978 }
979
980 #ifdef CONFIG_SMP
981 static void __enable_runtime(struct rq *rq) { }
982 static void __disable_runtime(struct rq *rq) { }
983 #endif
984
985 #endif /* CONFIG_RT_GROUP_SCHED */
986
987 static inline int rt_se_prio(struct sched_rt_entity *rt_se)
988 {
989 #ifdef CONFIG_RT_GROUP_SCHED
990         struct rt_rq *rt_rq = group_rt_rq(rt_se);
991
992         if (rt_rq)
993                 return rt_rq->highest_prio.curr;
994 #endif
995
996         return rt_task_of(rt_se)->prio;
997 }
998
999 /*
1000  * Update the current task's runtime statistics. Skip current tasks that
1001  * are not in our scheduling class.
1002  */
1003 static void update_curr_rt(struct rq *rq)
1004 {
1005         struct task_struct *donor = rq->donor;
1006         s64 delta_exec;
1007
1008         if (donor->sched_class != &rt_sched_class)
1009                 return;
1010
1011         delta_exec = update_curr_common(rq);
1012         if (unlikely(delta_exec <= 0))
1013                 return;
1014
1015 #ifdef CONFIG_RT_GROUP_SCHED
1016         struct sched_rt_entity *rt_se = &donor->rt;
1017
1018         if (!rt_bandwidth_enabled())
1019                 return;
1020
1021         for_each_sched_rt_entity(rt_se) {
1022                 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1023                 int exceeded;
1024
1025                 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
1026                         raw_spin_lock(&rt_rq->rt_runtime_lock);
1027                         rt_rq->rt_time += delta_exec;
1028                         exceeded = sched_rt_runtime_exceeded(rt_rq);
1029                         if (exceeded)
1030                                 resched_curr(rq);
1031                         raw_spin_unlock(&rt_rq->rt_runtime_lock);
1032                         if (exceeded)
1033                                 do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq));
1034                 }
1035         }
1036 #endif
1037 }
1038
1039 static void
1040 dequeue_top_rt_rq(struct rt_rq *rt_rq, unsigned int count)
1041 {
1042         struct rq *rq = rq_of_rt_rq(rt_rq);
1043
1044         BUG_ON(&rq->rt != rt_rq);
1045
1046         if (!rt_rq->rt_queued)
1047                 return;
1048
1049         BUG_ON(!rq->nr_running);
1050
1051         sub_nr_running(rq, count);
1052         rt_rq->rt_queued = 0;
1053
1054 }
1055
1056 static void
1057 enqueue_top_rt_rq(struct rt_rq *rt_rq)
1058 {
1059         struct rq *rq = rq_of_rt_rq(rt_rq);
1060
1061         BUG_ON(&rq->rt != rt_rq);
1062
1063         if (rt_rq->rt_queued)
1064                 return;
1065
1066         if (rt_rq_throttled(rt_rq))
1067                 return;
1068
1069         if (rt_rq->rt_nr_running) {
1070                 add_nr_running(rq, rt_rq->rt_nr_running);
1071                 rt_rq->rt_queued = 1;
1072         }
1073
1074         /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
1075         cpufreq_update_util(rq, 0);
1076 }
1077
1078 #if defined CONFIG_SMP
1079
1080 static void
1081 inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1082 {
1083         struct rq *rq = rq_of_rt_rq(rt_rq);
1084
1085         /*
1086          * Change rq's cpupri only if rt_rq is the top queue.
1087          */
1088         if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq)
1089                 return;
1090
1091         if (rq->online && prio < prev_prio)
1092                 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
1093 }
1094
1095 static void
1096 dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
1097 {
1098         struct rq *rq = rq_of_rt_rq(rt_rq);
1099
1100         /*
1101          * Change rq's cpupri only if rt_rq is the top queue.
1102          */
1103         if (IS_ENABLED(CONFIG_RT_GROUP_SCHED) && &rq->rt != rt_rq)
1104                 return;
1105
1106         if (rq->online && rt_rq->highest_prio.curr != prev_prio)
1107                 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
1108 }
1109
1110 #else /* CONFIG_SMP */
1111
1112 static inline
1113 void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1114 static inline
1115 void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
1116
1117 #endif /* CONFIG_SMP */
1118
1119 #if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
1120 static void
1121 inc_rt_prio(struct rt_rq *rt_rq, int prio)
1122 {
1123         int prev_prio = rt_rq->highest_prio.curr;
1124
1125         if (prio < prev_prio)
1126                 rt_rq->highest_prio.curr = prio;
1127
1128         inc_rt_prio_smp(rt_rq, prio, prev_prio);
1129 }
1130
1131 static void
1132 dec_rt_prio(struct rt_rq *rt_rq, int prio)
1133 {
1134         int prev_prio = rt_rq->highest_prio.curr;
1135
1136         if (rt_rq->rt_nr_running) {
1137
1138                 WARN_ON(prio < prev_prio);
1139
1140                 /*
1141                  * This may have been our highest task, and therefore
1142                  * we may have some re-computation to do
1143                  */
1144                 if (prio == prev_prio) {
1145                         struct rt_prio_array *array = &rt_rq->active;
1146
1147                         rt_rq->highest_prio.curr =
1148                                 sched_find_first_bit(array->bitmap);
1149                 }
1150
1151         } else {
1152                 rt_rq->highest_prio.curr = MAX_RT_PRIO-1;
1153         }
1154
1155         dec_rt_prio_smp(rt_rq, prio, prev_prio);
1156 }
1157
1158 #else
1159
1160 static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
1161 static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
1162
1163 #endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
1164
1165 #ifdef CONFIG_RT_GROUP_SCHED
1166
1167 static void
1168 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1169 {
1170         if (rt_se_boosted(rt_se))
1171                 rt_rq->rt_nr_boosted++;
1172
1173         start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1174 }
1175
1176 static void
1177 dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1178 {
1179         if (rt_se_boosted(rt_se))
1180                 rt_rq->rt_nr_boosted--;
1181
1182         WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
1183 }
1184
1185 #else /* CONFIG_RT_GROUP_SCHED */
1186
1187 static void
1188 inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1189 {
1190 }
1191
1192 static inline
1193 void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1194
1195 #endif /* CONFIG_RT_GROUP_SCHED */
1196
1197 static inline
1198 unsigned int rt_se_nr_running(struct sched_rt_entity *rt_se)
1199 {
1200         struct rt_rq *group_rq = group_rt_rq(rt_se);
1201
1202         if (group_rq)
1203                 return group_rq->rt_nr_running;
1204         else
1205                 return 1;
1206 }
1207
1208 static inline
1209 unsigned int rt_se_rr_nr_running(struct sched_rt_entity *rt_se)
1210 {
1211         struct rt_rq *group_rq = group_rt_rq(rt_se);
1212         struct task_struct *tsk;
1213
1214         if (group_rq)
1215                 return group_rq->rr_nr_running;
1216
1217         tsk = rt_task_of(rt_se);
1218
1219         return (tsk->policy == SCHED_RR) ? 1 : 0;
1220 }
1221
1222 static inline
1223 void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1224 {
1225         int prio = rt_se_prio(rt_se);
1226
1227         WARN_ON(!rt_prio(prio));
1228         rt_rq->rt_nr_running += rt_se_nr_running(rt_se);
1229         rt_rq->rr_nr_running += rt_se_rr_nr_running(rt_se);
1230
1231         inc_rt_prio(rt_rq, prio);
1232         inc_rt_group(rt_se, rt_rq);
1233 }
1234
1235 static inline
1236 void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1237 {
1238         WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1239         WARN_ON(!rt_rq->rt_nr_running);
1240         rt_rq->rt_nr_running -= rt_se_nr_running(rt_se);
1241         rt_rq->rr_nr_running -= rt_se_rr_nr_running(rt_se);
1242
1243         dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1244         dec_rt_group(rt_se, rt_rq);
1245 }
1246
1247 /*
1248  * Change rt_se->run_list location unless SAVE && !MOVE
1249  *
1250  * assumes ENQUEUE/DEQUEUE flags match
1251  */
1252 static inline bool move_entity(unsigned int flags)
1253 {
1254         if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
1255                 return false;
1256
1257         return true;
1258 }
1259
1260 static void __delist_rt_entity(struct sched_rt_entity *rt_se, struct rt_prio_array *array)
1261 {
1262         list_del_init(&rt_se->run_list);
1263
1264         if (list_empty(array->queue + rt_se_prio(rt_se)))
1265                 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1266
1267         rt_se->on_list = 0;
1268 }
1269
1270 static inline struct sched_statistics *
1271 __schedstats_from_rt_se(struct sched_rt_entity *rt_se)
1272 {
1273         /* schedstats is not supported for rt group. */
1274         if (!rt_entity_is_task(rt_se))
1275                 return NULL;
1276
1277         return &rt_task_of(rt_se)->stats;
1278 }
1279
1280 static inline void
1281 update_stats_wait_start_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1282 {
1283         struct sched_statistics *stats;
1284         struct task_struct *p = NULL;
1285
1286         if (!schedstat_enabled())
1287                 return;
1288
1289         if (rt_entity_is_task(rt_se))
1290                 p = rt_task_of(rt_se);
1291
1292         stats = __schedstats_from_rt_se(rt_se);
1293         if (!stats)
1294                 return;
1295
1296         __update_stats_wait_start(rq_of_rt_rq(rt_rq), p, stats);
1297 }
1298
1299 static inline void
1300 update_stats_enqueue_sleeper_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1301 {
1302         struct sched_statistics *stats;
1303         struct task_struct *p = NULL;
1304
1305         if (!schedstat_enabled())
1306                 return;
1307
1308         if (rt_entity_is_task(rt_se))
1309                 p = rt_task_of(rt_se);
1310
1311         stats = __schedstats_from_rt_se(rt_se);
1312         if (!stats)
1313                 return;
1314
1315         __update_stats_enqueue_sleeper(rq_of_rt_rq(rt_rq), p, stats);
1316 }
1317
1318 static inline void
1319 update_stats_enqueue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1320                         int flags)
1321 {
1322         if (!schedstat_enabled())
1323                 return;
1324
1325         if (flags & ENQUEUE_WAKEUP)
1326                 update_stats_enqueue_sleeper_rt(rt_rq, rt_se);
1327 }
1328
1329 static inline void
1330 update_stats_wait_end_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
1331 {
1332         struct sched_statistics *stats;
1333         struct task_struct *p = NULL;
1334
1335         if (!schedstat_enabled())
1336                 return;
1337
1338         if (rt_entity_is_task(rt_se))
1339                 p = rt_task_of(rt_se);
1340
1341         stats = __schedstats_from_rt_se(rt_se);
1342         if (!stats)
1343                 return;
1344
1345         __update_stats_wait_end(rq_of_rt_rq(rt_rq), p, stats);
1346 }
1347
1348 static inline void
1349 update_stats_dequeue_rt(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se,
1350                         int flags)
1351 {
1352         struct task_struct *p = NULL;
1353
1354         if (!schedstat_enabled())
1355                 return;
1356
1357         if (rt_entity_is_task(rt_se))
1358                 p = rt_task_of(rt_se);
1359
1360         if ((flags & DEQUEUE_SLEEP) && p) {
1361                 unsigned int state;
1362
1363                 state = READ_ONCE(p->__state);
1364                 if (state & TASK_INTERRUPTIBLE)
1365                         __schedstat_set(p->stats.sleep_start,
1366                                         rq_clock(rq_of_rt_rq(rt_rq)));
1367
1368                 if (state & TASK_UNINTERRUPTIBLE)
1369                         __schedstat_set(p->stats.block_start,
1370                                         rq_clock(rq_of_rt_rq(rt_rq)));
1371         }
1372 }
1373
1374 static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1375 {
1376         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1377         struct rt_prio_array *array = &rt_rq->active;
1378         struct rt_rq *group_rq = group_rt_rq(rt_se);
1379         struct list_head *queue = array->queue + rt_se_prio(rt_se);
1380
1381         /*
1382          * Don't enqueue the group if its throttled, or when empty.
1383          * The latter is a consequence of the former when a child group
1384          * get throttled and the current group doesn't have any other
1385          * active members.
1386          */
1387         if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) {
1388                 if (rt_se->on_list)
1389                         __delist_rt_entity(rt_se, array);
1390                 return;
1391         }
1392
1393         if (move_entity(flags)) {
1394                 WARN_ON_ONCE(rt_se->on_list);
1395                 if (flags & ENQUEUE_HEAD)
1396                         list_add(&rt_se->run_list, queue);
1397                 else
1398                         list_add_tail(&rt_se->run_list, queue);
1399
1400                 __set_bit(rt_se_prio(rt_se), array->bitmap);
1401                 rt_se->on_list = 1;
1402         }
1403         rt_se->on_rq = 1;
1404
1405         inc_rt_tasks(rt_se, rt_rq);
1406 }
1407
1408 static void __dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1409 {
1410         struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1411         struct rt_prio_array *array = &rt_rq->active;
1412
1413         if (move_entity(flags)) {
1414                 WARN_ON_ONCE(!rt_se->on_list);
1415                 __delist_rt_entity(rt_se, array);
1416         }
1417         rt_se->on_rq = 0;
1418
1419         dec_rt_tasks(rt_se, rt_rq);
1420 }
1421
1422 /*
1423  * Because the prio of an upper entry depends on the lower
1424  * entries, we must remove entries top - down.
1425  */
1426 static void dequeue_rt_stack(struct sched_rt_entity *rt_se, unsigned int flags)
1427 {
1428         struct sched_rt_entity *back = NULL;
1429         unsigned int rt_nr_running;
1430
1431         for_each_sched_rt_entity(rt_se) {
1432                 rt_se->back = back;
1433                 back = rt_se;
1434         }
1435
1436         rt_nr_running = rt_rq_of_se(back)->rt_nr_running;
1437
1438         for (rt_se = back; rt_se; rt_se = rt_se->back) {
1439                 if (on_rt_rq(rt_se))
1440                         __dequeue_rt_entity(rt_se, flags);
1441         }
1442
1443         dequeue_top_rt_rq(rt_rq_of_se(back), rt_nr_running);
1444 }
1445
1446 static void enqueue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1447 {
1448         struct rq *rq = rq_of_rt_se(rt_se);
1449
1450         update_stats_enqueue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1451
1452         dequeue_rt_stack(rt_se, flags);
1453         for_each_sched_rt_entity(rt_se)
1454                 __enqueue_rt_entity(rt_se, flags);
1455         enqueue_top_rt_rq(&rq->rt);
1456 }
1457
1458 static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
1459 {
1460         struct rq *rq = rq_of_rt_se(rt_se);
1461
1462         update_stats_dequeue_rt(rt_rq_of_se(rt_se), rt_se, flags);
1463
1464         dequeue_rt_stack(rt_se, flags);
1465
1466         for_each_sched_rt_entity(rt_se) {
1467                 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1468
1469                 if (rt_rq && rt_rq->rt_nr_running)
1470                         __enqueue_rt_entity(rt_se, flags);
1471         }
1472         enqueue_top_rt_rq(&rq->rt);
1473 }
1474
1475 /*
1476  * Adding/removing a task to/from a priority array:
1477  */
1478 static void
1479 enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1480 {
1481         struct sched_rt_entity *rt_se = &p->rt;
1482
1483         if (flags & ENQUEUE_WAKEUP)
1484                 rt_se->timeout = 0;
1485
1486         check_schedstat_required();
1487         update_stats_wait_start_rt(rt_rq_of_se(rt_se), rt_se);
1488
1489         enqueue_rt_entity(rt_se, flags);
1490
1491         if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1492                 enqueue_pushable_task(rq, p);
1493 }
1494
1495 static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1496 {
1497         struct sched_rt_entity *rt_se = &p->rt;
1498
1499         update_curr_rt(rq);
1500         dequeue_rt_entity(rt_se, flags);
1501
1502         dequeue_pushable_task(rq, p);
1503
1504         return true;
1505 }
1506
1507 /*
1508  * Put task to the head or the end of the run list without the overhead of
1509  * dequeue followed by enqueue.
1510  */
1511 static void
1512 requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
1513 {
1514         if (on_rt_rq(rt_se)) {
1515                 struct rt_prio_array *array = &rt_rq->active;
1516                 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1517
1518                 if (head)
1519                         list_move(&rt_se->run_list, queue);
1520                 else
1521                         list_move_tail(&rt_se->run_list, queue);
1522         }
1523 }
1524
1525 static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
1526 {
1527         struct sched_rt_entity *rt_se = &p->rt;
1528         struct rt_rq *rt_rq;
1529
1530         for_each_sched_rt_entity(rt_se) {
1531                 rt_rq = rt_rq_of_se(rt_se);
1532                 requeue_rt_entity(rt_rq, rt_se, head);
1533         }
1534 }
1535
1536 static void yield_task_rt(struct rq *rq)
1537 {
1538         requeue_task_rt(rq, rq->curr, 0);
1539 }
1540
1541 #ifdef CONFIG_SMP
1542 static int find_lowest_rq(struct task_struct *task);
1543
1544 static int
1545 select_task_rq_rt(struct task_struct *p, int cpu, int flags)
1546 {
1547         struct task_struct *curr, *donor;
1548         struct rq *rq;
1549         bool test;
1550
1551         /* For anything but wake ups, just return the task_cpu */
1552         if (!(flags & (WF_TTWU | WF_FORK)))
1553                 goto out;
1554
1555         rq = cpu_rq(cpu);
1556
1557         rcu_read_lock();
1558         curr = READ_ONCE(rq->curr); /* unlocked access */
1559         donor = READ_ONCE(rq->donor);
1560
1561         /*
1562          * If the current task on @p's runqueue is an RT task, then
1563          * try to see if we can wake this RT task up on another
1564          * runqueue. Otherwise simply start this RT task
1565          * on its current runqueue.
1566          *
1567          * We want to avoid overloading runqueues. If the woken
1568          * task is a higher priority, then it will stay on this CPU
1569          * and the lower prio task should be moved to another CPU.
1570          * Even though this will probably make the lower prio task
1571          * lose its cache, we do not want to bounce a higher task
1572          * around just because it gave up its CPU, perhaps for a
1573          * lock?
1574          *
1575          * For equal prio tasks, we just let the scheduler sort it out.
1576          *
1577          * Otherwise, just let it ride on the affine RQ and the
1578          * post-schedule router will push the preempted task away
1579          *
1580          * This test is optimistic, if we get it wrong the load-balancer
1581          * will have to sort it out.
1582          *
1583          * We take into account the capacity of the CPU to ensure it fits the
1584          * requirement of the task - which is only important on heterogeneous
1585          * systems like big.LITTLE.
1586          */
1587         test = curr &&
1588                unlikely(rt_task(donor)) &&
1589                (curr->nr_cpus_allowed < 2 || donor->prio <= p->prio);
1590
1591         if (test || !rt_task_fits_capacity(p, cpu)) {
1592                 int target = find_lowest_rq(p);
1593
1594                 /*
1595                  * Bail out if we were forcing a migration to find a better
1596                  * fitting CPU but our search failed.
1597                  */
1598                 if (!test && target != -1 && !rt_task_fits_capacity(p, target))
1599                         goto out_unlock;
1600
1601                 /*
1602                  * Don't bother moving it if the destination CPU is
1603                  * not running a lower priority task.
1604                  */
1605                 if (target != -1 &&
1606                     p->prio < cpu_rq(target)->rt.highest_prio.curr)
1607                         cpu = target;
1608         }
1609
1610 out_unlock:
1611         rcu_read_unlock();
1612
1613 out:
1614         return cpu;
1615 }
1616
1617 static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1618 {
1619         if (rq->curr->nr_cpus_allowed == 1 ||
1620             !cpupri_find(&rq->rd->cpupri, rq->donor, NULL))
1621                 return;
1622
1623         /*
1624          * p is migratable, so let's not schedule it and
1625          * see if it is pushed or pulled somewhere else.
1626          */
1627         if (p->nr_cpus_allowed != 1 &&
1628             cpupri_find(&rq->rd->cpupri, p, NULL))
1629                 return;
1630
1631         /*
1632          * There appear to be other CPUs that can accept
1633          * the current task but none can run 'p', so lets reschedule
1634          * to try and push the current task away:
1635          */
1636         requeue_task_rt(rq, p, 1);
1637         resched_curr(rq);
1638 }
1639
1640 static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1641 {
1642         if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1643                 /*
1644                  * This is OK, because current is on_cpu, which avoids it being
1645                  * picked for load-balance and preemption/IRQs are still
1646                  * disabled avoiding further scheduler activity on it and we've
1647                  * not yet started the picking loop.
1648                  */
1649                 rq_unpin_lock(rq, rf);
1650                 pull_rt_task(rq);
1651                 rq_repin_lock(rq, rf);
1652         }
1653
1654         return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1655 }
1656 #endif /* CONFIG_SMP */
1657
1658 /*
1659  * Preempt the current task with a newly woken task if needed:
1660  */
1661 static void wakeup_preempt_rt(struct rq *rq, struct task_struct *p, int flags)
1662 {
1663         struct task_struct *donor = rq->donor;
1664
1665         if (p->prio < donor->prio) {
1666                 resched_curr(rq);
1667                 return;
1668         }
1669
1670 #ifdef CONFIG_SMP
1671         /*
1672          * If:
1673          *
1674          * - the newly woken task is of equal priority to the current task
1675          * - the newly woken task is non-migratable while current is migratable
1676          * - current will be preempted on the next reschedule
1677          *
1678          * we should check to see if current can readily move to a different
1679          * cpu.  If so, we will reschedule to allow the push logic to try
1680          * to move current somewhere else, making room for our non-migratable
1681          * task.
1682          */
1683         if (p->prio == donor->prio && !test_tsk_need_resched(rq->curr))
1684                 check_preempt_equal_prio(rq, p);
1685 #endif
1686 }
1687
1688 static inline void set_next_task_rt(struct rq *rq, struct task_struct *p, bool first)
1689 {
1690         struct sched_rt_entity *rt_se = &p->rt;
1691         struct rt_rq *rt_rq = &rq->rt;
1692
1693         p->se.exec_start = rq_clock_task(rq);
1694         if (on_rt_rq(&p->rt))
1695                 update_stats_wait_end_rt(rt_rq, rt_se);
1696
1697         /* The running task is never eligible for pushing */
1698         dequeue_pushable_task(rq, p);
1699
1700         if (!first)
1701                 return;
1702
1703         /*
1704          * If prev task was rt, put_prev_task() has already updated the
1705          * utilization. We only care of the case where we start to schedule a
1706          * rt task
1707          */
1708         if (rq->donor->sched_class != &rt_sched_class)
1709                 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
1710
1711         rt_queue_push_tasks(rq);
1712 }
1713
1714 static struct sched_rt_entity *pick_next_rt_entity(struct rt_rq *rt_rq)
1715 {
1716         struct rt_prio_array *array = &rt_rq->active;
1717         struct sched_rt_entity *next = NULL;
1718         struct list_head *queue;
1719         int idx;
1720
1721         idx = sched_find_first_bit(array->bitmap);
1722         BUG_ON(idx >= MAX_RT_PRIO);
1723
1724         queue = array->queue + idx;
1725         if (WARN_ON_ONCE(list_empty(queue)))
1726                 return NULL;
1727         next = list_entry(queue->next, struct sched_rt_entity, run_list);
1728
1729         return next;
1730 }
1731
1732 static struct task_struct *_pick_next_task_rt(struct rq *rq)
1733 {
1734         struct sched_rt_entity *rt_se;
1735         struct rt_rq *rt_rq  = &rq->rt;
1736
1737         do {
1738                 rt_se = pick_next_rt_entity(rt_rq);
1739                 if (unlikely(!rt_se))
1740                         return NULL;
1741                 rt_rq = group_rt_rq(rt_se);
1742         } while (rt_rq);
1743
1744         return rt_task_of(rt_se);
1745 }
1746
1747 static struct task_struct *pick_task_rt(struct rq *rq)
1748 {
1749         struct task_struct *p;
1750
1751         if (!sched_rt_runnable(rq))
1752                 return NULL;
1753
1754         p = _pick_next_task_rt(rq);
1755
1756         return p;
1757 }
1758
1759 static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next)
1760 {
1761         struct sched_rt_entity *rt_se = &p->rt;
1762         struct rt_rq *rt_rq = &rq->rt;
1763
1764         if (on_rt_rq(&p->rt))
1765                 update_stats_wait_start_rt(rt_rq, rt_se);
1766
1767         update_curr_rt(rq);
1768
1769         update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1770
1771         /*
1772          * The previous task needs to be made eligible for pushing
1773          * if it is still active
1774          */
1775         if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1776                 enqueue_pushable_task(rq, p);
1777 }
1778
1779 #ifdef CONFIG_SMP
1780
1781 /* Only try algorithms three times */
1782 #define RT_MAX_TRIES 3
1783
1784 /*
1785  * Return the highest pushable rq's task, which is suitable to be executed
1786  * on the CPU, NULL otherwise
1787  */
1788 static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
1789 {
1790         struct plist_head *head = &rq->rt.pushable_tasks;
1791         struct task_struct *p;
1792
1793         if (!has_pushable_tasks(rq))
1794                 return NULL;
1795
1796         plist_for_each_entry(p, head, pushable_tasks) {
1797                 if (task_is_pushable(rq, p, cpu))
1798                         return p;
1799         }
1800
1801         return NULL;
1802 }
1803
1804 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
1805
1806 static int find_lowest_rq(struct task_struct *task)
1807 {
1808         struct sched_domain *sd;
1809         struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
1810         int this_cpu = smp_processor_id();
1811         int cpu      = task_cpu(task);
1812         int ret;
1813
1814         /* Make sure the mask is initialized first */
1815         if (unlikely(!lowest_mask))
1816                 return -1;
1817
1818         if (task->nr_cpus_allowed == 1)
1819                 return -1; /* No other targets possible */
1820
1821         /*
1822          * If we're on asym system ensure we consider the different capacities
1823          * of the CPUs when searching for the lowest_mask.
1824          */
1825         if (sched_asym_cpucap_active()) {
1826
1827                 ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
1828                                           task, lowest_mask,
1829                                           rt_task_fits_capacity);
1830         } else {
1831
1832                 ret = cpupri_find(&task_rq(task)->rd->cpupri,
1833                                   task, lowest_mask);
1834         }
1835
1836         if (!ret)
1837                 return -1; /* No targets found */
1838
1839         /*
1840          * At this point we have built a mask of CPUs representing the
1841          * lowest priority tasks in the system.  Now we want to elect
1842          * the best one based on our affinity and topology.
1843          *
1844          * We prioritize the last CPU that the task executed on since
1845          * it is most likely cache-hot in that location.
1846          */
1847         if (cpumask_test_cpu(cpu, lowest_mask))
1848                 return cpu;
1849
1850         /*
1851          * Otherwise, we consult the sched_domains span maps to figure
1852          * out which CPU is logically closest to our hot cache data.
1853          */
1854         if (!cpumask_test_cpu(this_cpu, lowest_mask))
1855                 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
1856
1857         rcu_read_lock();
1858         for_each_domain(cpu, sd) {
1859                 if (sd->flags & SD_WAKE_AFFINE) {
1860                         int best_cpu;
1861
1862                         /*
1863                          * "this_cpu" is cheaper to preempt than a
1864                          * remote processor.
1865                          */
1866                         if (this_cpu != -1 &&
1867                             cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1868                                 rcu_read_unlock();
1869                                 return this_cpu;
1870                         }
1871
1872                         best_cpu = cpumask_any_and_distribute(lowest_mask,
1873                                                               sched_domain_span(sd));
1874                         if (best_cpu < nr_cpu_ids) {
1875                                 rcu_read_unlock();
1876                                 return best_cpu;
1877                         }
1878                 }
1879         }
1880         rcu_read_unlock();
1881
1882         /*
1883          * And finally, if there were no matches within the domains
1884          * just give the caller *something* to work with from the compatible
1885          * locations.
1886          */
1887         if (this_cpu != -1)
1888                 return this_cpu;
1889
1890         cpu = cpumask_any_distribute(lowest_mask);
1891         if (cpu < nr_cpu_ids)
1892                 return cpu;
1893
1894         return -1;
1895 }
1896
1897 static struct task_struct *pick_next_pushable_task(struct rq *rq)
1898 {
1899         struct task_struct *p;
1900
1901         if (!has_pushable_tasks(rq))
1902                 return NULL;
1903
1904         p = plist_first_entry(&rq->rt.pushable_tasks,
1905                               struct task_struct, pushable_tasks);
1906
1907         BUG_ON(rq->cpu != task_cpu(p));
1908         BUG_ON(task_current(rq, p));
1909         BUG_ON(task_current_donor(rq, p));
1910         BUG_ON(p->nr_cpus_allowed <= 1);
1911
1912         BUG_ON(!task_on_rq_queued(p));
1913         BUG_ON(!rt_task(p));
1914
1915         return p;
1916 }
1917
1918 /* Will lock the rq it finds */
1919 static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1920 {
1921         struct rq *lowest_rq = NULL;
1922         int tries;
1923         int cpu;
1924
1925         for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1926                 cpu = find_lowest_rq(task);
1927
1928                 if ((cpu == -1) || (cpu == rq->cpu))
1929                         break;
1930
1931                 lowest_rq = cpu_rq(cpu);
1932
1933                 if (lowest_rq->rt.highest_prio.curr <= task->prio) {
1934                         /*
1935                          * Target rq has tasks of equal or higher priority,
1936                          * retrying does not release any lock and is unlikely
1937                          * to yield a different result.
1938                          */
1939                         lowest_rq = NULL;
1940                         break;
1941                 }
1942
1943                 /* if the prio of this runqueue changed, try again */
1944                 if (double_lock_balance(rq, lowest_rq)) {
1945                         /*
1946                          * We had to unlock the run queue. In
1947                          * the mean time, task could have
1948                          * migrated already or had its affinity changed,
1949                          * therefore check if the task is still at the
1950                          * head of the pushable tasks list.
1951                          * It is possible the task was scheduled, set
1952                          * "migrate_disabled" and then got preempted, so we must
1953                          * check the task migration disable flag here too.
1954                          */
1955                         if (unlikely(is_migration_disabled(task) ||
1956                                      !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_mask) ||
1957                                      task != pick_next_pushable_task(rq))) {
1958
1959                                 double_unlock_balance(rq, lowest_rq);
1960                                 lowest_rq = NULL;
1961                                 break;
1962                         }
1963                 }
1964
1965                 /* If this rq is still suitable use it. */
1966                 if (lowest_rq->rt.highest_prio.curr > task->prio)
1967                         break;
1968
1969                 /* try again */
1970                 double_unlock_balance(rq, lowest_rq);
1971                 lowest_rq = NULL;
1972         }
1973
1974         return lowest_rq;
1975 }
1976
1977 /*
1978  * If the current CPU has more than one RT task, see if the non
1979  * running task can migrate over to a CPU that is running a task
1980  * of lesser priority.
1981  */
1982 static int push_rt_task(struct rq *rq, bool pull)
1983 {
1984         struct task_struct *next_task;
1985         struct rq *lowest_rq;
1986         int ret = 0;
1987
1988         if (!rq->rt.overloaded)
1989                 return 0;
1990
1991         next_task = pick_next_pushable_task(rq);
1992         if (!next_task)
1993                 return 0;
1994
1995 retry:
1996         /*
1997          * It's possible that the next_task slipped in of
1998          * higher priority than current. If that's the case
1999          * just reschedule current.
2000          */
2001         if (unlikely(next_task->prio < rq->donor->prio)) {
2002                 resched_curr(rq);
2003                 return 0;
2004         }
2005
2006         if (is_migration_disabled(next_task)) {
2007                 struct task_struct *push_task = NULL;
2008                 int cpu;
2009
2010                 if (!pull || rq->push_busy)
2011                         return 0;
2012
2013                 /*
2014                  * Invoking find_lowest_rq() on anything but an RT task doesn't
2015                  * make sense. Per the above priority check, curr has to
2016                  * be of higher priority than next_task, so no need to
2017                  * reschedule when bailing out.
2018                  *
2019                  * Note that the stoppers are masqueraded as SCHED_FIFO
2020                  * (cf. sched_set_stop_task()), so we can't rely on rt_task().
2021                  */
2022                 if (rq->donor->sched_class != &rt_sched_class)
2023                         return 0;
2024
2025                 cpu = find_lowest_rq(rq->curr);
2026                 if (cpu == -1 || cpu == rq->cpu)
2027                         return 0;
2028
2029                 /*
2030                  * Given we found a CPU with lower priority than @next_task,
2031                  * therefore it should be running. However we cannot migrate it
2032                  * to this other CPU, instead attempt to push the current
2033                  * running task on this CPU away.
2034                  */
2035                 push_task = get_push_task(rq);
2036                 if (push_task) {
2037                         preempt_disable();
2038                         raw_spin_rq_unlock(rq);
2039                         stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
2040                                             push_task, &rq->push_work);
2041                         preempt_enable();
2042                         raw_spin_rq_lock(rq);
2043                 }
2044
2045                 return 0;
2046         }
2047
2048         if (WARN_ON(next_task == rq->curr))
2049                 return 0;
2050
2051         /* We might release rq lock */
2052         get_task_struct(next_task);
2053
2054         /* find_lock_lowest_rq locks the rq if found */
2055         lowest_rq = find_lock_lowest_rq(next_task, rq);
2056         if (!lowest_rq) {
2057                 struct task_struct *task;
2058                 /*
2059                  * find_lock_lowest_rq releases rq->lock
2060                  * so it is possible that next_task has migrated.
2061                  *
2062                  * We need to make sure that the task is still on the same
2063                  * run-queue and is also still the next task eligible for
2064                  * pushing.
2065                  */
2066                 task = pick_next_pushable_task(rq);
2067                 if (task == next_task) {
2068                         /*
2069                          * The task hasn't migrated, and is still the next
2070                          * eligible task, but we failed to find a run-queue
2071                          * to push it to.  Do not retry in this case, since
2072                          * other CPUs will pull from us when ready.
2073                          */
2074                         goto out;
2075                 }
2076
2077                 if (!task)
2078                         /* No more tasks, just exit */
2079                         goto out;
2080
2081                 /*
2082                  * Something has shifted, try again.
2083                  */
2084                 put_task_struct(next_task);
2085                 next_task = task;
2086                 goto retry;
2087         }
2088
2089         move_queued_task_locked(rq, lowest_rq, next_task);
2090         resched_curr(lowest_rq);
2091         ret = 1;
2092
2093         double_unlock_balance(rq, lowest_rq);
2094 out:
2095         put_task_struct(next_task);
2096
2097         return ret;
2098 }
2099
2100 static void push_rt_tasks(struct rq *rq)
2101 {
2102         /* push_rt_task will return true if it moved an RT */
2103         while (push_rt_task(rq, false))
2104                 ;
2105 }
2106
2107 #ifdef HAVE_RT_PUSH_IPI
2108
2109 /*
2110  * When a high priority task schedules out from a CPU and a lower priority
2111  * task is scheduled in, a check is made to see if there's any RT tasks
2112  * on other CPUs that are waiting to run because a higher priority RT task
2113  * is currently running on its CPU. In this case, the CPU with multiple RT
2114  * tasks queued on it (overloaded) needs to be notified that a CPU has opened
2115  * up that may be able to run one of its non-running queued RT tasks.
2116  *
2117  * All CPUs with overloaded RT tasks need to be notified as there is currently
2118  * no way to know which of these CPUs have the highest priority task waiting
2119  * to run. Instead of trying to take a spinlock on each of these CPUs,
2120  * which has shown to cause large latency when done on machines with many
2121  * CPUs, sending an IPI to the CPUs to have them push off the overloaded
2122  * RT tasks waiting to run.
2123  *
2124  * Just sending an IPI to each of the CPUs is also an issue, as on large
2125  * count CPU machines, this can cause an IPI storm on a CPU, especially
2126  * if its the only CPU with multiple RT tasks queued, and a large number
2127  * of CPUs scheduling a lower priority task at the same time.
2128  *
2129  * Each root domain has its own IRQ work function that can iterate over
2130  * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
2131  * task must be checked if there's one or many CPUs that are lowering
2132  * their priority, there's a single IRQ work iterator that will try to
2133  * push off RT tasks that are waiting to run.
2134  *
2135  * When a CPU schedules a lower priority task, it will kick off the
2136  * IRQ work iterator that will jump to each CPU with overloaded RT tasks.
2137  * As it only takes the first CPU that schedules a lower priority task
2138  * to start the process, the rto_start variable is incremented and if
2139  * the atomic result is one, then that CPU will try to take the rto_lock.
2140  * This prevents high contention on the lock as the process handles all
2141  * CPUs scheduling lower priority tasks.
2142  *
2143  * All CPUs that are scheduling a lower priority task will increment the
2144  * rt_loop_next variable. This will make sure that the IRQ work iterator
2145  * checks all RT overloaded CPUs whenever a CPU schedules a new lower
2146  * priority task, even if the iterator is in the middle of a scan. Incrementing
2147  * the rt_loop_next will cause the iterator to perform another scan.
2148  *
2149  */
2150 static int rto_next_cpu(struct root_domain *rd)
2151 {
2152         int next;
2153         int cpu;
2154
2155         /*
2156          * When starting the IPI RT pushing, the rto_cpu is set to -1,
2157          * rt_next_cpu() will simply return the first CPU found in
2158          * the rto_mask.
2159          *
2160          * If rto_next_cpu() is called with rto_cpu is a valid CPU, it
2161          * will return the next CPU found in the rto_mask.
2162          *
2163          * If there are no more CPUs left in the rto_mask, then a check is made
2164          * against rto_loop and rto_loop_next. rto_loop is only updated with
2165          * the rto_lock held, but any CPU may increment the rto_loop_next
2166          * without any locking.
2167          */
2168         for (;;) {
2169
2170                 /* When rto_cpu is -1 this acts like cpumask_first() */
2171                 cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
2172
2173                 rd->rto_cpu = cpu;
2174
2175                 if (cpu < nr_cpu_ids)
2176                         return cpu;
2177
2178                 rd->rto_cpu = -1;
2179
2180                 /*
2181                  * ACQUIRE ensures we see the @rto_mask changes
2182                  * made prior to the @next value observed.
2183                  *
2184                  * Matches WMB in rt_set_overload().
2185                  */
2186                 next = atomic_read_acquire(&rd->rto_loop_next);
2187
2188                 if (rd->rto_loop == next)
2189                         break;
2190
2191                 rd->rto_loop = next;
2192         }
2193
2194         return -1;
2195 }
2196
2197 static inline bool rto_start_trylock(atomic_t *v)
2198 {
2199         return !atomic_cmpxchg_acquire(v, 0, 1);
2200 }
2201
2202 static inline void rto_start_unlock(atomic_t *v)
2203 {
2204         atomic_set_release(v, 0);
2205 }
2206
2207 static void tell_cpu_to_push(struct rq *rq)
2208 {
2209         int cpu = -1;
2210
2211         /* Keep the loop going if the IPI is currently active */
2212         atomic_inc(&rq->rd->rto_loop_next);
2213
2214         /* Only one CPU can initiate a loop at a time */
2215         if (!rto_start_trylock(&rq->rd->rto_loop_start))
2216                 return;
2217
2218         raw_spin_lock(&rq->rd->rto_lock);
2219
2220         /*
2221          * The rto_cpu is updated under the lock, if it has a valid CPU
2222          * then the IPI is still running and will continue due to the
2223          * update to loop_next, and nothing needs to be done here.
2224          * Otherwise it is finishing up and an IPI needs to be sent.
2225          */
2226         if (rq->rd->rto_cpu < 0)
2227                 cpu = rto_next_cpu(rq->rd);
2228
2229         raw_spin_unlock(&rq->rd->rto_lock);
2230
2231         rto_start_unlock(&rq->rd->rto_loop_start);
2232
2233         if (cpu >= 0) {
2234                 /* Make sure the rd does not get freed while pushing */
2235                 sched_get_rd(rq->rd);
2236                 irq_work_queue_on(&rq->rd->rto_push_work, cpu);
2237         }
2238 }
2239
2240 /* Called from hardirq context */
2241 void rto_push_irq_work_func(struct irq_work *work)
2242 {
2243         struct root_domain *rd =
2244                 container_of(work, struct root_domain, rto_push_work);
2245         struct rq *rq;
2246         int cpu;
2247
2248         rq = this_rq();
2249
2250         /*
2251          * We do not need to grab the lock to check for has_pushable_tasks.
2252          * When it gets updated, a check is made if a push is possible.
2253          */
2254         if (has_pushable_tasks(rq)) {
2255                 raw_spin_rq_lock(rq);
2256                 while (push_rt_task(rq, true))
2257                         ;
2258                 raw_spin_rq_unlock(rq);
2259         }
2260
2261         raw_spin_lock(&rd->rto_lock);
2262
2263         /* Pass the IPI to the next rt overloaded queue */
2264         cpu = rto_next_cpu(rd);
2265
2266         raw_spin_unlock(&rd->rto_lock);
2267
2268         if (cpu < 0) {
2269                 sched_put_rd(rd);
2270                 return;
2271         }
2272
2273         /* Try the next RT overloaded CPU */
2274         irq_work_queue_on(&rd->rto_push_work, cpu);
2275 }
2276 #endif /* HAVE_RT_PUSH_IPI */
2277
2278 static void pull_rt_task(struct rq *this_rq)
2279 {
2280         int this_cpu = this_rq->cpu, cpu;
2281         bool resched = false;
2282         struct task_struct *p, *push_task;
2283         struct rq *src_rq;
2284         int rt_overload_count = rt_overloaded(this_rq);
2285
2286         if (likely(!rt_overload_count))
2287                 return;
2288
2289         /*
2290          * Match the barrier from rt_set_overloaded; this guarantees that if we
2291          * see overloaded we must also see the rto_mask bit.
2292          */
2293         smp_rmb();
2294
2295         /* If we are the only overloaded CPU do nothing */
2296         if (rt_overload_count == 1 &&
2297             cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask))
2298                 return;
2299
2300 #ifdef HAVE_RT_PUSH_IPI
2301         if (sched_feat(RT_PUSH_IPI)) {
2302                 tell_cpu_to_push(this_rq);
2303                 return;
2304         }
2305 #endif
2306
2307         for_each_cpu(cpu, this_rq->rd->rto_mask) {
2308                 if (this_cpu == cpu)
2309                         continue;
2310
2311                 src_rq = cpu_rq(cpu);
2312
2313                 /*
2314                  * Don't bother taking the src_rq->lock if the next highest
2315                  * task is known to be lower-priority than our current task.
2316                  * This may look racy, but if this value is about to go
2317                  * logically higher, the src_rq will push this task away.
2318                  * And if its going logically lower, we do not care
2319                  */
2320                 if (src_rq->rt.highest_prio.next >=
2321                     this_rq->rt.highest_prio.curr)
2322                         continue;
2323
2324                 /*
2325                  * We can potentially drop this_rq's lock in
2326                  * double_lock_balance, and another CPU could
2327                  * alter this_rq
2328                  */
2329                 push_task = NULL;
2330                 double_lock_balance(this_rq, src_rq);
2331
2332                 /*
2333                  * We can pull only a task, which is pushable
2334                  * on its rq, and no others.
2335                  */
2336                 p = pick_highest_pushable_task(src_rq, this_cpu);
2337
2338                 /*
2339                  * Do we have an RT task that preempts
2340                  * the to-be-scheduled task?
2341                  */
2342                 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
2343                         WARN_ON(p == src_rq->curr);
2344                         WARN_ON(!task_on_rq_queued(p));
2345
2346                         /*
2347                          * There's a chance that p is higher in priority
2348                          * than what's currently running on its CPU.
2349                          * This is just that p is waking up and hasn't
2350                          * had a chance to schedule. We only pull
2351                          * p if it is lower in priority than the
2352                          * current task on the run queue
2353                          */
2354                         if (p->prio < src_rq->donor->prio)
2355                                 goto skip;
2356
2357                         if (is_migration_disabled(p)) {
2358                                 push_task = get_push_task(src_rq);
2359                         } else {
2360                                 move_queued_task_locked(src_rq, this_rq, p);
2361                                 resched = true;
2362                         }
2363                         /*
2364                          * We continue with the search, just in
2365                          * case there's an even higher prio task
2366                          * in another runqueue. (low likelihood
2367                          * but possible)
2368                          */
2369                 }
2370 skip:
2371                 double_unlock_balance(this_rq, src_rq);
2372
2373                 if (push_task) {
2374                         preempt_disable();
2375                         raw_spin_rq_unlock(this_rq);
2376                         stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
2377                                             push_task, &src_rq->push_work);
2378                         preempt_enable();
2379                         raw_spin_rq_lock(this_rq);
2380                 }
2381         }
2382
2383         if (resched)
2384                 resched_curr(this_rq);
2385 }
2386
2387 /*
2388  * If we are not running and we are not going to reschedule soon, we should
2389  * try to push tasks away now
2390  */
2391 static void task_woken_rt(struct rq *rq, struct task_struct *p)
2392 {
2393         bool need_to_push = !task_on_cpu(rq, p) &&
2394                             !test_tsk_need_resched(rq->curr) &&
2395                             p->nr_cpus_allowed > 1 &&
2396                             (dl_task(rq->donor) || rt_task(rq->donor)) &&
2397                             (rq->curr->nr_cpus_allowed < 2 ||
2398                              rq->donor->prio <= p->prio);
2399
2400         if (need_to_push)
2401                 push_rt_tasks(rq);
2402 }
2403
2404 /* Assumes rq->lock is held */
2405 static void rq_online_rt(struct rq *rq)
2406 {
2407         if (rq->rt.overloaded)
2408                 rt_set_overload(rq);
2409
2410         __enable_runtime(rq);
2411
2412         cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
2413 }
2414
2415 /* Assumes rq->lock is held */
2416 static void rq_offline_rt(struct rq *rq)
2417 {
2418         if (rq->rt.overloaded)
2419                 rt_clear_overload(rq);
2420
2421         __disable_runtime(rq);
2422
2423         cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
2424 }
2425
2426 /*
2427  * When switch from the rt queue, we bring ourselves to a position
2428  * that we might want to pull RT tasks from other runqueues.
2429  */
2430 static void switched_from_rt(struct rq *rq, struct task_struct *p)
2431 {
2432         /*
2433          * If there are other RT tasks then we will reschedule
2434          * and the scheduling of the other RT tasks will handle
2435          * the balancing. But if we are the last RT task
2436          * we may need to handle the pulling of RT tasks
2437          * now.
2438          */
2439         if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2440                 return;
2441
2442         rt_queue_pull_task(rq);
2443 }
2444
2445 void __init init_sched_rt_class(void)
2446 {
2447         unsigned int i;
2448
2449         for_each_possible_cpu(i) {
2450                 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
2451                                         GFP_KERNEL, cpu_to_node(i));
2452         }
2453 }
2454 #endif /* CONFIG_SMP */
2455
2456 /*
2457  * When switching a task to RT, we may overload the runqueue
2458  * with RT tasks. In this case we try to push them off to
2459  * other runqueues.
2460  */
2461 static void switched_to_rt(struct rq *rq, struct task_struct *p)
2462 {
2463         /*
2464          * If we are running, update the avg_rt tracking, as the running time
2465          * will now on be accounted into the latter.
2466          */
2467         if (task_current(rq, p)) {
2468                 update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 0);
2469                 return;
2470         }
2471
2472         /*
2473          * If we are not running we may need to preempt the current
2474          * running task. If that current running task is also an RT task
2475          * then see if we can move to another run queue.
2476          */
2477         if (task_on_rq_queued(p)) {
2478 #ifdef CONFIG_SMP
2479                 if (p->nr_cpus_allowed > 1 && rq->rt.overloaded)
2480                         rt_queue_push_tasks(rq);
2481 #endif /* CONFIG_SMP */
2482                 if (p->prio < rq->donor->prio && cpu_online(cpu_of(rq)))
2483                         resched_curr(rq);
2484         }
2485 }
2486
2487 /*
2488  * Priority of the task has changed. This may cause
2489  * us to initiate a push or pull.
2490  */
2491 static void
2492 prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
2493 {
2494         if (!task_on_rq_queued(p))
2495                 return;
2496
2497         if (task_current_donor(rq, p)) {
2498 #ifdef CONFIG_SMP
2499                 /*
2500                  * If our priority decreases while running, we
2501                  * may need to pull tasks to this runqueue.
2502                  */
2503                 if (oldprio < p->prio)
2504                         rt_queue_pull_task(rq);
2505
2506                 /*
2507                  * If there's a higher priority task waiting to run
2508                  * then reschedule.
2509                  */
2510                 if (p->prio > rq->rt.highest_prio.curr)
2511                         resched_curr(rq);
2512 #else
2513                 /* For UP simply resched on drop of prio */
2514                 if (oldprio < p->prio)
2515                         resched_curr(rq);
2516 #endif /* CONFIG_SMP */
2517         } else {
2518                 /*
2519                  * This task is not running, but if it is
2520                  * greater than the current running task
2521                  * then reschedule.
2522                  */
2523                 if (p->prio < rq->donor->prio)
2524                         resched_curr(rq);
2525         }
2526 }
2527
2528 #ifdef CONFIG_POSIX_TIMERS
2529 static void watchdog(struct rq *rq, struct task_struct *p)
2530 {
2531         unsigned long soft, hard;
2532
2533         /* max may change after cur was read, this will be fixed next tick */
2534         soft = task_rlimit(p, RLIMIT_RTTIME);
2535         hard = task_rlimit_max(p, RLIMIT_RTTIME);
2536
2537         if (soft != RLIM_INFINITY) {
2538                 unsigned long next;
2539
2540                 if (p->rt.watchdog_stamp != jiffies) {
2541                         p->rt.timeout++;
2542                         p->rt.watchdog_stamp = jiffies;
2543                 }
2544
2545                 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
2546                 if (p->rt.timeout > next) {
2547                         posix_cputimers_rt_watchdog(&p->posix_cputimers,
2548                                                     p->se.sum_exec_runtime);
2549                 }
2550         }
2551 }
2552 #else
2553 static inline void watchdog(struct rq *rq, struct task_struct *p) { }
2554 #endif
2555
2556 /*
2557  * scheduler tick hitting a task of our scheduling class.
2558  *
2559  * NOTE: This function can be called remotely by the tick offload that
2560  * goes along full dynticks. Therefore no local assumption can be made
2561  * and everything must be accessed through the @rq and @curr passed in
2562  * parameters.
2563  */
2564 static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
2565 {
2566         struct sched_rt_entity *rt_se = &p->rt;
2567
2568         update_curr_rt(rq);
2569         update_rt_rq_load_avg(rq_clock_pelt(rq), rq, 1);
2570
2571         watchdog(rq, p);
2572
2573         /*
2574          * RR tasks need a special form of time-slice management.
2575          * FIFO tasks have no timeslices.
2576          */
2577         if (p->policy != SCHED_RR)
2578                 return;
2579
2580         if (--p->rt.time_slice)
2581                 return;
2582
2583         p->rt.time_slice = sched_rr_timeslice;
2584
2585         /*
2586          * Requeue to the end of queue if we (and all of our ancestors) are not
2587          * the only element on the queue
2588          */
2589         for_each_sched_rt_entity(rt_se) {
2590                 if (rt_se->run_list.prev != rt_se->run_list.next) {
2591                         requeue_task_rt(rq, p, 0);
2592                         resched_curr(rq);
2593                         return;
2594                 }
2595         }
2596 }
2597
2598 static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
2599 {
2600         /*
2601          * Time slice is 0 for SCHED_FIFO tasks
2602          */
2603         if (task->policy == SCHED_RR)
2604                 return sched_rr_timeslice;
2605         else
2606                 return 0;
2607 }
2608
2609 #ifdef CONFIG_SCHED_CORE
2610 static int task_is_throttled_rt(struct task_struct *p, int cpu)
2611 {
2612         struct rt_rq *rt_rq;
2613
2614 #ifdef CONFIG_RT_GROUP_SCHED // XXX maybe add task_rt_rq(), see also sched_rt_period_rt_rq
2615         rt_rq = task_group(p)->rt_rq[cpu];
2616         WARN_ON(!rt_group_sched_enabled() && rt_rq->tg != &root_task_group);
2617 #else
2618         rt_rq = &cpu_rq(cpu)->rt;
2619 #endif
2620
2621         return rt_rq_throttled(rt_rq);
2622 }
2623 #endif
2624
2625 DEFINE_SCHED_CLASS(rt) = {
2626
2627         .enqueue_task           = enqueue_task_rt,
2628         .dequeue_task           = dequeue_task_rt,
2629         .yield_task             = yield_task_rt,
2630
2631         .wakeup_preempt         = wakeup_preempt_rt,
2632
2633         .pick_task              = pick_task_rt,
2634         .put_prev_task          = put_prev_task_rt,
2635         .set_next_task          = set_next_task_rt,
2636
2637 #ifdef CONFIG_SMP
2638         .balance                = balance_rt,
2639         .select_task_rq         = select_task_rq_rt,
2640         .set_cpus_allowed       = set_cpus_allowed_common,
2641         .rq_online              = rq_online_rt,
2642         .rq_offline             = rq_offline_rt,
2643         .task_woken             = task_woken_rt,
2644         .switched_from          = switched_from_rt,
2645         .find_lock_rq           = find_lock_lowest_rq,
2646 #endif
2647
2648         .task_tick              = task_tick_rt,
2649
2650         .get_rr_interval        = get_rr_interval_rt,
2651
2652         .prio_changed           = prio_changed_rt,
2653         .switched_to            = switched_to_rt,
2654
2655         .update_curr            = update_curr_rt,
2656
2657 #ifdef CONFIG_SCHED_CORE
2658         .task_is_throttled      = task_is_throttled_rt,
2659 #endif
2660
2661 #ifdef CONFIG_UCLAMP_TASK
2662         .uclamp_enabled         = 1,
2663 #endif
2664 };
2665
2666 #ifdef CONFIG_RT_GROUP_SCHED
2667 /*
2668  * Ensure that the real time constraints are schedulable.
2669  */
2670 static DEFINE_MUTEX(rt_constraints_mutex);
2671
2672 static inline int tg_has_rt_tasks(struct task_group *tg)
2673 {
2674         struct task_struct *task;
2675         struct css_task_iter it;
2676         int ret = 0;
2677
2678         /*
2679          * Autogroups do not have RT tasks; see autogroup_create().
2680          */
2681         if (task_group_is_autogroup(tg))
2682                 return 0;
2683
2684         css_task_iter_start(&tg->css, 0, &it);
2685         while (!ret && (task = css_task_iter_next(&it)))
2686                 ret |= rt_task(task);
2687         css_task_iter_end(&it);
2688
2689         return ret;
2690 }
2691
2692 struct rt_schedulable_data {
2693         struct task_group *tg;
2694         u64 rt_period;
2695         u64 rt_runtime;
2696 };
2697
2698 static int tg_rt_schedulable(struct task_group *tg, void *data)
2699 {
2700         struct rt_schedulable_data *d = data;
2701         struct task_group *child;
2702         unsigned long total, sum = 0;
2703         u64 period, runtime;
2704
2705         period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2706         runtime = tg->rt_bandwidth.rt_runtime;
2707
2708         if (tg == d->tg) {
2709                 period = d->rt_period;
2710                 runtime = d->rt_runtime;
2711         }
2712
2713         /*
2714          * Cannot have more runtime than the period.
2715          */
2716         if (runtime > period && runtime != RUNTIME_INF)
2717                 return -EINVAL;
2718
2719         /*
2720          * Ensure we don't starve existing RT tasks if runtime turns zero.
2721          */
2722         if (rt_bandwidth_enabled() && !runtime &&
2723             tg->rt_bandwidth.rt_runtime && tg_has_rt_tasks(tg))
2724                 return -EBUSY;
2725
2726         if (WARN_ON(!rt_group_sched_enabled() && tg != &root_task_group))
2727                 return -EBUSY;
2728
2729         total = to_ratio(period, runtime);
2730
2731         /*
2732          * Nobody can have more than the global setting allows.
2733          */
2734         if (total > to_ratio(global_rt_period(), global_rt_runtime()))
2735                 return -EINVAL;
2736
2737         /*
2738          * The sum of our children's runtime should not exceed our own.
2739          */
2740         list_for_each_entry_rcu(child, &tg->children, siblings) {
2741                 period = ktime_to_ns(child->rt_bandwidth.rt_period);
2742                 runtime = child->rt_bandwidth.rt_runtime;
2743
2744                 if (child == d->tg) {
2745                         period = d->rt_period;
2746                         runtime = d->rt_runtime;
2747                 }
2748
2749                 sum += to_ratio(period, runtime);
2750         }
2751
2752         if (sum > total)
2753                 return -EINVAL;
2754
2755         return 0;
2756 }
2757
2758 static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
2759 {
2760         int ret;
2761
2762         struct rt_schedulable_data data = {
2763                 .tg = tg,
2764                 .rt_period = period,
2765                 .rt_runtime = runtime,
2766         };
2767
2768         rcu_read_lock();
2769         ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data);
2770         rcu_read_unlock();
2771
2772         return ret;
2773 }
2774
2775 static int tg_set_rt_bandwidth(struct task_group *tg,
2776                 u64 rt_period, u64 rt_runtime)
2777 {
2778         int i, err = 0;
2779
2780         /*
2781          * Disallowing the root group RT runtime is BAD, it would disallow the
2782          * kernel creating (and or operating) RT threads.
2783          */
2784         if (tg == &root_task_group && rt_runtime == 0)
2785                 return -EINVAL;
2786
2787         /* No period doesn't make any sense. */
2788         if (rt_period == 0)
2789                 return -EINVAL;
2790
2791         /*
2792          * Bound quota to defend quota against overflow during bandwidth shift.
2793          */
2794         if (rt_runtime != RUNTIME_INF && rt_runtime > max_rt_runtime)
2795                 return -EINVAL;
2796
2797         mutex_lock(&rt_constraints_mutex);
2798         err = __rt_schedulable(tg, rt_period, rt_runtime);
2799         if (err)
2800                 goto unlock;
2801
2802         raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2803         tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
2804         tg->rt_bandwidth.rt_runtime = rt_runtime;
2805
2806         for_each_possible_cpu(i) {
2807                 struct rt_rq *rt_rq = tg->rt_rq[i];
2808
2809                 raw_spin_lock(&rt_rq->rt_runtime_lock);
2810                 rt_rq->rt_runtime = rt_runtime;
2811                 raw_spin_unlock(&rt_rq->rt_runtime_lock);
2812         }
2813         raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
2814 unlock:
2815         mutex_unlock(&rt_constraints_mutex);
2816
2817         return err;
2818 }
2819
2820 int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
2821 {
2822         u64 rt_runtime, rt_period;
2823
2824         rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period);
2825         rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
2826         if (rt_runtime_us < 0)
2827                 rt_runtime = RUNTIME_INF;
2828         else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
2829                 return -EINVAL;
2830
2831         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2832 }
2833
2834 long sched_group_rt_runtime(struct task_group *tg)
2835 {
2836         u64 rt_runtime_us;
2837
2838         if (tg->rt_bandwidth.rt_runtime == RUNTIME_INF)
2839                 return -1;
2840
2841         rt_runtime_us = tg->rt_bandwidth.rt_runtime;
2842         do_div(rt_runtime_us, NSEC_PER_USEC);
2843         return rt_runtime_us;
2844 }
2845
2846 int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
2847 {
2848         u64 rt_runtime, rt_period;
2849
2850         if (rt_period_us > U64_MAX / NSEC_PER_USEC)
2851                 return -EINVAL;
2852
2853         rt_period = rt_period_us * NSEC_PER_USEC;
2854         rt_runtime = tg->rt_bandwidth.rt_runtime;
2855
2856         return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
2857 }
2858
2859 long sched_group_rt_period(struct task_group *tg)
2860 {
2861         u64 rt_period_us;
2862
2863         rt_period_us = ktime_to_ns(tg->rt_bandwidth.rt_period);
2864         do_div(rt_period_us, NSEC_PER_USEC);
2865         return rt_period_us;
2866 }
2867
2868 #ifdef CONFIG_SYSCTL
2869 static int sched_rt_global_constraints(void)
2870 {
2871         int ret = 0;
2872
2873         mutex_lock(&rt_constraints_mutex);
2874         ret = __rt_schedulable(NULL, 0, 0);
2875         mutex_unlock(&rt_constraints_mutex);
2876
2877         return ret;
2878 }
2879 #endif /* CONFIG_SYSCTL */
2880
2881 int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
2882 {
2883         /* Don't accept real-time tasks when there is no way for them to run */
2884         if (rt_group_sched_enabled() && rt_task(tsk) && tg->rt_bandwidth.rt_runtime == 0)
2885                 return 0;
2886
2887         return 1;
2888 }
2889
2890 #else /* !CONFIG_RT_GROUP_SCHED */
2891
2892 #ifdef CONFIG_SYSCTL
2893 static int sched_rt_global_constraints(void)
2894 {
2895         return 0;
2896 }
2897 #endif /* CONFIG_SYSCTL */
2898 #endif /* CONFIG_RT_GROUP_SCHED */
2899
2900 #ifdef CONFIG_SYSCTL
2901 static int sched_rt_global_validate(void)
2902 {
2903         if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
2904                 ((sysctl_sched_rt_runtime > sysctl_sched_rt_period) ||
2905                  ((u64)sysctl_sched_rt_runtime *
2906                         NSEC_PER_USEC > max_rt_runtime)))
2907                 return -EINVAL;
2908
2909         return 0;
2910 }
2911
2912 static void sched_rt_do_global(void)
2913 {
2914 }
2915
2916 static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer,
2917                 size_t *lenp, loff_t *ppos)
2918 {
2919         int old_period, old_runtime;
2920         static DEFINE_MUTEX(mutex);
2921         int ret;
2922
2923         mutex_lock(&mutex);
2924         sched_domains_mutex_lock();
2925         old_period = sysctl_sched_rt_period;
2926         old_runtime = sysctl_sched_rt_runtime;
2927
2928         ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2929
2930         if (!ret && write) {
2931                 ret = sched_rt_global_validate();
2932                 if (ret)
2933                         goto undo;
2934
2935                 ret = sched_dl_global_validate();
2936                 if (ret)
2937                         goto undo;
2938
2939                 ret = sched_rt_global_constraints();
2940                 if (ret)
2941                         goto undo;
2942
2943                 sched_rt_do_global();
2944                 sched_dl_do_global();
2945         }
2946         if (0) {
2947 undo:
2948                 sysctl_sched_rt_period = old_period;
2949                 sysctl_sched_rt_runtime = old_runtime;
2950         }
2951         sched_domains_mutex_unlock();
2952         mutex_unlock(&mutex);
2953
2954         return ret;
2955 }
2956
2957 static int sched_rr_handler(const struct ctl_table *table, int write, void *buffer,
2958                 size_t *lenp, loff_t *ppos)
2959 {
2960         int ret;
2961         static DEFINE_MUTEX(mutex);
2962
2963         mutex_lock(&mutex);
2964         ret = proc_dointvec(table, write, buffer, lenp, ppos);
2965         /*
2966          * Make sure that internally we keep jiffies.
2967          * Also, writing zero resets the time-slice to default:
2968          */
2969         if (!ret && write) {
2970                 sched_rr_timeslice =
2971                         sysctl_sched_rr_timeslice <= 0 ? RR_TIMESLICE :
2972                         msecs_to_jiffies(sysctl_sched_rr_timeslice);
2973
2974                 if (sysctl_sched_rr_timeslice <= 0)
2975                         sysctl_sched_rr_timeslice = jiffies_to_msecs(RR_TIMESLICE);
2976         }
2977         mutex_unlock(&mutex);
2978
2979         return ret;
2980 }
2981 #endif /* CONFIG_SYSCTL */
2982
2983 void print_rt_stats(struct seq_file *m, int cpu)
2984 {
2985         rt_rq_iter_t iter;
2986         struct rt_rq *rt_rq;
2987
2988         rcu_read_lock();
2989         for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
2990                 print_rt_rq(m, cpu, rt_rq);
2991         rcu_read_unlock();
2992 }