Merge branch 'for-3.14' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
[linux-2.6-block.git] / kernel / sched / rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
029632fb
PZ
6#include "sched.h"
7
8#include <linux/slab.h>
9
ce0dbbbb
CW
10int sched_rr_timeslice = RR_TIMESLICE;
11
029632fb
PZ
12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14struct rt_bandwidth def_rt_bandwidth;
15
16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17{
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35}
36
37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38{
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47}
48
49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50{
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60}
61
62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63{
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75#if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81#endif
82
83 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0;
85 rt_rq->rt_runtime = 0;
86 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87}
88
8f48894f 89#ifdef CONFIG_RT_GROUP_SCHED
029632fb
PZ
90static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91{
92 hrtimer_cancel(&rt_b->rt_period_timer);
93}
8f48894f
PZ
94
95#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
398a153b
GH
97static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98{
8f48894f
PZ
99#ifdef CONFIG_SCHED_DEBUG
100 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101#endif
398a153b
GH
102 return container_of(rt_se, struct task_struct, rt);
103}
104
398a153b
GH
105static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106{
107 return rt_rq->rq;
108}
109
110static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111{
112 return rt_se->rt_rq;
113}
114
029632fb
PZ
115void free_rt_sched_group(struct task_group *tg)
116{
117 int i;
118
119 if (tg->rt_se)
120 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122 for_each_possible_cpu(i) {
123 if (tg->rt_rq)
124 kfree(tg->rt_rq[i]);
125 if (tg->rt_se)
126 kfree(tg->rt_se[i]);
127 }
128
129 kfree(tg->rt_rq);
130 kfree(tg->rt_se);
131}
132
133void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134 struct sched_rt_entity *rt_se, int cpu,
135 struct sched_rt_entity *parent)
136{
137 struct rq *rq = cpu_rq(cpu);
138
139 rt_rq->highest_prio.curr = MAX_RT_PRIO;
140 rt_rq->rt_nr_boosted = 0;
141 rt_rq->rq = rq;
142 rt_rq->tg = tg;
143
144 tg->rt_rq[cpu] = rt_rq;
145 tg->rt_se[cpu] = rt_se;
146
147 if (!rt_se)
148 return;
149
150 if (!parent)
151 rt_se->rt_rq = &rq->rt;
152 else
153 rt_se->rt_rq = parent->my_q;
154
155 rt_se->my_q = rt_rq;
156 rt_se->parent = parent;
157 INIT_LIST_HEAD(&rt_se->run_list);
158}
159
160int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161{
162 struct rt_rq *rt_rq;
163 struct sched_rt_entity *rt_se;
164 int i;
165
166 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167 if (!tg->rt_rq)
168 goto err;
169 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170 if (!tg->rt_se)
171 goto err;
172
173 init_rt_bandwidth(&tg->rt_bandwidth,
174 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176 for_each_possible_cpu(i) {
177 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178 GFP_KERNEL, cpu_to_node(i));
179 if (!rt_rq)
180 goto err;
181
182 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183 GFP_KERNEL, cpu_to_node(i));
184 if (!rt_se)
185 goto err_free_rq;
186
187 init_rt_rq(rt_rq, cpu_rq(i));
188 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190 }
191
192 return 1;
193
194err_free_rq:
195 kfree(rt_rq);
196err:
197 return 0;
198}
199
398a153b
GH
200#else /* CONFIG_RT_GROUP_SCHED */
201
a1ba4d8b
PZ
202#define rt_entity_is_task(rt_se) (1)
203
8f48894f
PZ
204static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205{
206 return container_of(rt_se, struct task_struct, rt);
207}
208
398a153b
GH
209static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210{
211 return container_of(rt_rq, struct rq, rt);
212}
213
214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215{
216 struct task_struct *p = rt_task_of(rt_se);
217 struct rq *rq = task_rq(p);
218
219 return &rq->rt;
220}
221
029632fb
PZ
222void free_rt_sched_group(struct task_group *tg) { }
223
224int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225{
226 return 1;
227}
398a153b
GH
228#endif /* CONFIG_RT_GROUP_SCHED */
229
4fd29176 230#ifdef CONFIG_SMP
84de4274 231
637f5085 232static inline int rt_overloaded(struct rq *rq)
4fd29176 233{
637f5085 234 return atomic_read(&rq->rd->rto_count);
4fd29176 235}
84de4274 236
4fd29176
SR
237static inline void rt_set_overload(struct rq *rq)
238{
1f11eb6a
GH
239 if (!rq->online)
240 return;
241
c6c4927b 242 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
243 /*
244 * Make sure the mask is visible before we set
245 * the overload count. That is checked to determine
246 * if we should look at the mask. It would be a shame
247 * if we looked at the mask, but the mask was not
248 * updated yet.
7c3f2ab7
PZ
249 *
250 * Matched by the barrier in pull_rt_task().
4fd29176 251 */
7c3f2ab7 252 smp_wmb();
637f5085 253 atomic_inc(&rq->rd->rto_count);
4fd29176 254}
84de4274 255
4fd29176
SR
256static inline void rt_clear_overload(struct rq *rq)
257{
1f11eb6a
GH
258 if (!rq->online)
259 return;
260
4fd29176 261 /* the order here really doesn't matter */
637f5085 262 atomic_dec(&rq->rd->rto_count);
c6c4927b 263 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176 264}
73fe6aae 265
398a153b 266static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae 267{
a1ba4d8b 268 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b
GH
269 if (!rt_rq->overloaded) {
270 rt_set_overload(rq_of_rt_rq(rt_rq));
271 rt_rq->overloaded = 1;
cdc8eb98 272 }
398a153b
GH
273 } else if (rt_rq->overloaded) {
274 rt_clear_overload(rq_of_rt_rq(rt_rq));
275 rt_rq->overloaded = 0;
637f5085 276 }
73fe6aae 277}
4fd29176 278
398a153b
GH
279static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
280{
29baa747
PZ
281 struct task_struct *p;
282
a1ba4d8b
PZ
283 if (!rt_entity_is_task(rt_se))
284 return;
285
29baa747 286 p = rt_task_of(rt_se);
a1ba4d8b
PZ
287 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
288
289 rt_rq->rt_nr_total++;
29baa747 290 if (p->nr_cpus_allowed > 1)
398a153b
GH
291 rt_rq->rt_nr_migratory++;
292
293 update_rt_migration(rt_rq);
294}
295
296static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
297{
29baa747
PZ
298 struct task_struct *p;
299
a1ba4d8b
PZ
300 if (!rt_entity_is_task(rt_se))
301 return;
302
29baa747 303 p = rt_task_of(rt_se);
a1ba4d8b
PZ
304 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
305
306 rt_rq->rt_nr_total--;
29baa747 307 if (p->nr_cpus_allowed > 1)
398a153b
GH
308 rt_rq->rt_nr_migratory--;
309
310 update_rt_migration(rt_rq);
311}
312
5181f4a4
SR
313static inline int has_pushable_tasks(struct rq *rq)
314{
315 return !plist_head_empty(&rq->rt.pushable_tasks);
316}
317
917b627d
GH
318static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
319{
320 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
321 plist_node_init(&p->pushable_tasks, p->prio);
322 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a4
SR
323
324 /* Update the highest prio pushable task */
325 if (p->prio < rq->rt.highest_prio.next)
326 rq->rt.highest_prio.next = p->prio;
917b627d
GH
327}
328
329static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
330{
331 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d 332
5181f4a4
SR
333 /* Update the new highest prio pushable task */
334 if (has_pushable_tasks(rq)) {
335 p = plist_first_entry(&rq->rt.pushable_tasks,
336 struct task_struct, pushable_tasks);
337 rq->rt.highest_prio.next = p->prio;
338 } else
339 rq->rt.highest_prio.next = MAX_RT_PRIO;
bcf08df3
IM
340}
341
917b627d
GH
342#else
343
ceacc2c1 344static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae24 345{
6f505b16
PZ
346}
347
ceacc2c1
PZ
348static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
349{
350}
351
b07430ac 352static inline
ceacc2c1
PZ
353void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
354{
355}
356
398a153b 357static inline
ceacc2c1
PZ
358void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
359{
360}
917b627d 361
4fd29176
SR
362#endif /* CONFIG_SMP */
363
6f505b16
PZ
364static inline int on_rt_rq(struct sched_rt_entity *rt_se)
365{
366 return !list_empty(&rt_se->run_list);
367}
368
052f1dc7 369#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 370
9f0c1e56 371static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
372{
373 if (!rt_rq->tg)
9f0c1e56 374 return RUNTIME_INF;
6f505b16 375
ac086bc2
PZ
376 return rt_rq->rt_runtime;
377}
378
379static inline u64 sched_rt_period(struct rt_rq *rt_rq)
380{
381 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
382}
383
ec514c48
CX
384typedef struct task_group *rt_rq_iter_t;
385
1c09ab0d
YZ
386static inline struct task_group *next_task_group(struct task_group *tg)
387{
388 do {
389 tg = list_entry_rcu(tg->list.next,
390 typeof(struct task_group), list);
391 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
392
393 if (&tg->list == &task_groups)
394 tg = NULL;
395
396 return tg;
397}
398
399#define for_each_rt_rq(rt_rq, iter, rq) \
400 for (iter = container_of(&task_groups, typeof(*iter), list); \
401 (iter = next_task_group(iter)) && \
402 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c48 403
6f505b16
PZ
404#define for_each_sched_rt_entity(rt_se) \
405 for (; rt_se; rt_se = rt_se->parent)
406
407static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
408{
409 return rt_se->my_q;
410}
411
37dad3fc 412static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b16
PZ
413static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
414
9f0c1e56 415static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 416{
f6121f4f 417 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb58
YZ
418 struct sched_rt_entity *rt_se;
419
0c3b9168
BS
420 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
421
422 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 423
f6121f4f
DF
424 if (rt_rq->rt_nr_running) {
425 if (rt_se && !on_rt_rq(rt_se))
37dad3fc 426 enqueue_rt_entity(rt_se, false);
e864c499 427 if (rt_rq->highest_prio.curr < curr->prio)
1020387f 428 resched_task(curr);
6f505b16
PZ
429 }
430}
431
9f0c1e56 432static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 433{
74b7eb58 434 struct sched_rt_entity *rt_se;
0c3b9168 435 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb58 436
0c3b9168 437 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16
PZ
438
439 if (rt_se && on_rt_rq(rt_se))
440 dequeue_rt_entity(rt_se);
441}
442
23b0fdfc
PZ
443static inline int rt_rq_throttled(struct rt_rq *rt_rq)
444{
445 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
446}
447
448static int rt_se_boosted(struct sched_rt_entity *rt_se)
449{
450 struct rt_rq *rt_rq = group_rt_rq(rt_se);
451 struct task_struct *p;
452
453 if (rt_rq)
454 return !!rt_rq->rt_nr_boosted;
455
456 p = rt_task_of(rt_se);
457 return p->prio != p->normal_prio;
458}
459
d0b27fa7 460#ifdef CONFIG_SMP
c6c4927b 461static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 462{
424c93fe 463 return this_rq()->rd->span;
d0b27fa7 464}
6f505b16 465#else
c6c4927b 466static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 467{
c6c4927b 468 return cpu_online_mask;
d0b27fa7
PZ
469}
470#endif
6f505b16 471
d0b27fa7
PZ
472static inline
473struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 474{
d0b27fa7
PZ
475 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
476}
9f0c1e56 477
ac086bc2
PZ
478static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
479{
480 return &rt_rq->tg->rt_bandwidth;
481}
482
55e12e5e 483#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
484
485static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
486{
ac086bc2
PZ
487 return rt_rq->rt_runtime;
488}
489
490static inline u64 sched_rt_period(struct rt_rq *rt_rq)
491{
492 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
493}
494
ec514c48
CX
495typedef struct rt_rq *rt_rq_iter_t;
496
497#define for_each_rt_rq(rt_rq, iter, rq) \
498 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
499
6f505b16
PZ
500#define for_each_sched_rt_entity(rt_se) \
501 for (; rt_se; rt_se = NULL)
502
503static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
504{
505 return NULL;
506}
507
9f0c1e56 508static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 509{
f3ade837
JB
510 if (rt_rq->rt_nr_running)
511 resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b16
PZ
512}
513
9f0c1e56 514static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
515{
516}
517
23b0fdfc
PZ
518static inline int rt_rq_throttled(struct rt_rq *rt_rq)
519{
520 return rt_rq->rt_throttled;
521}
d0b27fa7 522
c6c4927b 523static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 524{
c6c4927b 525 return cpu_online_mask;
d0b27fa7
PZ
526}
527
528static inline
529struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
530{
531 return &cpu_rq(cpu)->rt;
532}
533
ac086bc2
PZ
534static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
535{
536 return &def_rt_bandwidth;
537}
538
55e12e5e 539#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 540
ac086bc2 541#ifdef CONFIG_SMP
78333cdd
PZ
542/*
543 * We ran out of runtime, see if we can borrow some from our neighbours.
544 */
b79f3833 545static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc2
PZ
546{
547 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
aa7f6730 548 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
ac086bc2
PZ
549 int i, weight, more = 0;
550 u64 rt_period;
551
c6c4927b 552 weight = cpumask_weight(rd->span);
ac086bc2 553
0986b11b 554 raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc2 555 rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b 556 for_each_cpu(i, rd->span) {
ac086bc2
PZ
557 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
558 s64 diff;
559
560 if (iter == rt_rq)
561 continue;
562
0986b11b 563 raw_spin_lock(&iter->rt_runtime_lock);
78333cdd
PZ
564 /*
565 * Either all rqs have inf runtime and there's nothing to steal
566 * or __disable_runtime() below sets a specific rq to inf to
567 * indicate its been disabled and disalow stealing.
568 */
7def2be1
PZ
569 if (iter->rt_runtime == RUNTIME_INF)
570 goto next;
571
78333cdd
PZ
572 /*
573 * From runqueues with spare time, take 1/n part of their
574 * spare time, but no more than our period.
575 */
ac086bc2
PZ
576 diff = iter->rt_runtime - iter->rt_time;
577 if (diff > 0) {
58838cf3 578 diff = div_u64((u64)diff, weight);
ac086bc2
PZ
579 if (rt_rq->rt_runtime + diff > rt_period)
580 diff = rt_period - rt_rq->rt_runtime;
581 iter->rt_runtime -= diff;
582 rt_rq->rt_runtime += diff;
583 more = 1;
584 if (rt_rq->rt_runtime == rt_period) {
0986b11b 585 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2
PZ
586 break;
587 }
588 }
7def2be1 589next:
0986b11b 590 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2 591 }
0986b11b 592 raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc2
PZ
593
594 return more;
595}
7def2be1 596
78333cdd
PZ
597/*
598 * Ensure this RQ takes back all the runtime it lend to its neighbours.
599 */
7def2be1
PZ
600static void __disable_runtime(struct rq *rq)
601{
602 struct root_domain *rd = rq->rd;
ec514c48 603 rt_rq_iter_t iter;
7def2be1
PZ
604 struct rt_rq *rt_rq;
605
606 if (unlikely(!scheduler_running))
607 return;
608
ec514c48 609 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
610 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
611 s64 want;
612 int i;
613
0986b11b
TG
614 raw_spin_lock(&rt_b->rt_runtime_lock);
615 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
616 /*
617 * Either we're all inf and nobody needs to borrow, or we're
618 * already disabled and thus have nothing to do, or we have
619 * exactly the right amount of runtime to take out.
620 */
7def2be1
PZ
621 if (rt_rq->rt_runtime == RUNTIME_INF ||
622 rt_rq->rt_runtime == rt_b->rt_runtime)
623 goto balanced;
0986b11b 624 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1 625
78333cdd
PZ
626 /*
627 * Calculate the difference between what we started out with
628 * and what we current have, that's the amount of runtime
629 * we lend and now have to reclaim.
630 */
7def2be1
PZ
631 want = rt_b->rt_runtime - rt_rq->rt_runtime;
632
78333cdd
PZ
633 /*
634 * Greedy reclaim, take back as much as we can.
635 */
c6c4927b 636 for_each_cpu(i, rd->span) {
7def2be1
PZ
637 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
638 s64 diff;
639
78333cdd
PZ
640 /*
641 * Can't reclaim from ourselves or disabled runqueues.
642 */
f1679d08 643 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1
PZ
644 continue;
645
0986b11b 646 raw_spin_lock(&iter->rt_runtime_lock);
7def2be1
PZ
647 if (want > 0) {
648 diff = min_t(s64, iter->rt_runtime, want);
649 iter->rt_runtime -= diff;
650 want -= diff;
651 } else {
652 iter->rt_runtime -= want;
653 want -= want;
654 }
0986b11b 655 raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1
PZ
656
657 if (!want)
658 break;
659 }
660
0986b11b 661 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
662 /*
663 * We cannot be left wanting - that would mean some runtime
664 * leaked out of the system.
665 */
7def2be1
PZ
666 BUG_ON(want);
667balanced:
78333cdd
PZ
668 /*
669 * Disable all the borrow logic by pretending we have inf
670 * runtime - in which case borrowing doesn't make sense.
671 */
7def2be1 672 rt_rq->rt_runtime = RUNTIME_INF;
a4c96ae3 673 rt_rq->rt_throttled = 0;
0986b11b
TG
674 raw_spin_unlock(&rt_rq->rt_runtime_lock);
675 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
676 }
677}
678
7def2be1
PZ
679static void __enable_runtime(struct rq *rq)
680{
ec514c48 681 rt_rq_iter_t iter;
7def2be1
PZ
682 struct rt_rq *rt_rq;
683
684 if (unlikely(!scheduler_running))
685 return;
686
78333cdd
PZ
687 /*
688 * Reset each runqueue's bandwidth settings
689 */
ec514c48 690 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
691 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
692
0986b11b
TG
693 raw_spin_lock(&rt_b->rt_runtime_lock);
694 raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1
PZ
695 rt_rq->rt_runtime = rt_b->rt_runtime;
696 rt_rq->rt_time = 0;
baf25731 697 rt_rq->rt_throttled = 0;
0986b11b
TG
698 raw_spin_unlock(&rt_rq->rt_runtime_lock);
699 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
700 }
701}
702
eff6549b
PZ
703static int balance_runtime(struct rt_rq *rt_rq)
704{
705 int more = 0;
706
4a6184ce
PZ
707 if (!sched_feat(RT_RUNTIME_SHARE))
708 return more;
709
eff6549b 710 if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b 711 raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b 712 more = do_balance_runtime(rt_rq);
0986b11b 713 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
714 }
715
716 return more;
717}
55e12e5e 718#else /* !CONFIG_SMP */
eff6549b
PZ
719static inline int balance_runtime(struct rt_rq *rt_rq)
720{
721 return 0;
722}
55e12e5e 723#endif /* CONFIG_SMP */
ac086bc2 724
eff6549b
PZ
725static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
726{
42c62a58 727 int i, idle = 1, throttled = 0;
c6c4927b 728 const struct cpumask *span;
eff6549b 729
eff6549b 730 span = sched_rt_period_mask();
e221d028
MG
731#ifdef CONFIG_RT_GROUP_SCHED
732 /*
733 * FIXME: isolated CPUs should really leave the root task group,
734 * whether they are isolcpus or were isolated via cpusets, lest
735 * the timer run on a CPU which does not service all runqueues,
736 * potentially leaving other CPUs indefinitely throttled. If
737 * isolation is really required, the user will turn the throttle
738 * off to kill the perturbations it causes anyway. Meanwhile,
739 * this maintains functionality for boot and/or troubleshooting.
740 */
741 if (rt_b == &root_task_group.rt_bandwidth)
742 span = cpu_online_mask;
743#endif
c6c4927b 744 for_each_cpu(i, span) {
eff6549b
PZ
745 int enqueue = 0;
746 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
747 struct rq *rq = rq_of_rt_rq(rt_rq);
748
05fa785c 749 raw_spin_lock(&rq->lock);
eff6549b
PZ
750 if (rt_rq->rt_time) {
751 u64 runtime;
752
0986b11b 753 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
754 if (rt_rq->rt_throttled)
755 balance_runtime(rt_rq);
756 runtime = rt_rq->rt_runtime;
757 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
758 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
759 rt_rq->rt_throttled = 0;
760 enqueue = 1;
61eadef6
MG
761
762 /*
763 * Force a clock update if the CPU was idle,
764 * lest wakeup -> unthrottle time accumulate.
765 */
766 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
767 rq->skip_clock_update = -1;
eff6549b
PZ
768 }
769 if (rt_rq->rt_time || rt_rq->rt_nr_running)
770 idle = 0;
0986b11b 771 raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b9168 772 } else if (rt_rq->rt_nr_running) {
6c3df255 773 idle = 0;
0c3b9168
BS
774 if (!rt_rq_throttled(rt_rq))
775 enqueue = 1;
776 }
42c62a58
PZ
777 if (rt_rq->rt_throttled)
778 throttled = 1;
eff6549b
PZ
779
780 if (enqueue)
781 sched_rt_rq_enqueue(rt_rq);
05fa785c 782 raw_spin_unlock(&rq->lock);
eff6549b
PZ
783 }
784
42c62a58
PZ
785 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
786 return 1;
787
eff6549b
PZ
788 return idle;
789}
ac086bc2 790
6f505b16
PZ
791static inline int rt_se_prio(struct sched_rt_entity *rt_se)
792{
052f1dc7 793#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
794 struct rt_rq *rt_rq = group_rt_rq(rt_se);
795
796 if (rt_rq)
e864c499 797 return rt_rq->highest_prio.curr;
6f505b16
PZ
798#endif
799
800 return rt_task_of(rt_se)->prio;
801}
802
9f0c1e56 803static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 804{
9f0c1e56 805 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 806
fa85ae24 807 if (rt_rq->rt_throttled)
23b0fdfc 808 return rt_rq_throttled(rt_rq);
fa85ae24 809
5b680fd6 810 if (runtime >= sched_rt_period(rt_rq))
ac086bc2
PZ
811 return 0;
812
b79f3833
PZ
813 balance_runtime(rt_rq);
814 runtime = sched_rt_runtime(rt_rq);
815 if (runtime == RUNTIME_INF)
816 return 0;
ac086bc2 817
9f0c1e56 818 if (rt_rq->rt_time > runtime) {
7abc63b1
PZ
819 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
820
821 /*
822 * Don't actually throttle groups that have no runtime assigned
823 * but accrue some time due to boosting.
824 */
825 if (likely(rt_b->rt_runtime)) {
3ccf3e83
PZ
826 static bool once = false;
827
7abc63b1 828 rt_rq->rt_throttled = 1;
3ccf3e83
PZ
829
830 if (!once) {
831 once = true;
832 printk_sched("sched: RT throttling activated\n");
833 }
7abc63b1
PZ
834 } else {
835 /*
836 * In case we did anyway, make it go away,
837 * replenishment is a joke, since it will replenish us
838 * with exactly 0 ns.
839 */
840 rt_rq->rt_time = 0;
841 }
842
23b0fdfc 843 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 844 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
845 return 1;
846 }
fa85ae24
PZ
847 }
848
849 return 0;
850}
851
bb44e5d1
IM
852/*
853 * Update the current task's runtime statistics. Skip current tasks that
854 * are not in our scheduling class.
855 */
a9957449 856static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
857{
858 struct task_struct *curr = rq->curr;
6f505b16
PZ
859 struct sched_rt_entity *rt_se = &curr->rt;
860 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
861 u64 delta_exec;
862
06c3bc65 863 if (curr->sched_class != &rt_sched_class)
bb44e5d1
IM
864 return;
865
78becc27 866 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
fc79e240
KT
867 if (unlikely((s64)delta_exec <= 0))
868 return;
6cfb0d5d 869
42c62a58
PZ
870 schedstat_set(curr->se.statistics.exec_max,
871 max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1
IM
872
873 curr->se.sum_exec_runtime += delta_exec;
f06febc9
FM
874 account_group_exec_runtime(curr, delta_exec);
875
78becc27 876 curr->se.exec_start = rq_clock_task(rq);
d842de87 877 cpuacct_charge(curr, delta_exec);
fa85ae24 878
e9e9250b
PZ
879 sched_rt_avg_update(rq, delta_exec);
880
0b148fa0
PZ
881 if (!rt_bandwidth_enabled())
882 return;
883
354d60c2
DG
884 for_each_sched_rt_entity(rt_se) {
885 rt_rq = rt_rq_of_se(rt_se);
886
cc2991cf 887 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b 888 raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf
PZ
889 rt_rq->rt_time += delta_exec;
890 if (sched_rt_runtime_exceeded(rt_rq))
891 resched_task(curr);
0986b11b 892 raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf 893 }
354d60c2 894 }
bb44e5d1
IM
895}
896
398a153b 897#if defined CONFIG_SMP
e864c499 898
398a153b
GH
899static void
900inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45 901{
4d984277 902 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a 903
757dfcaa
KT
904#ifdef CONFIG_RT_GROUP_SCHED
905 /*
906 * Change rq's cpupri only if rt_rq is the top queue.
907 */
908 if (&rq->rt != rt_rq)
909 return;
910#endif
5181f4a4
SR
911 if (rq->online && prio < prev_prio)
912 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b 913}
73fe6aae 914
398a153b
GH
915static void
916dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
917{
918 struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa7 919
757dfcaa
KT
920#ifdef CONFIG_RT_GROUP_SCHED
921 /*
922 * Change rq's cpupri only if rt_rq is the top queue.
923 */
924 if (&rq->rt != rt_rq)
925 return;
926#endif
398a153b
GH
927 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
928 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45
SR
929}
930
398a153b
GH
931#else /* CONFIG_SMP */
932
6f505b16 933static inline
398a153b
GH
934void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
935static inline
936void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
937
938#endif /* CONFIG_SMP */
6e0534f2 939
052f1dc7 940#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b
GH
941static void
942inc_rt_prio(struct rt_rq *rt_rq, int prio)
943{
944 int prev_prio = rt_rq->highest_prio.curr;
945
946 if (prio < prev_prio)
947 rt_rq->highest_prio.curr = prio;
948
949 inc_rt_prio_smp(rt_rq, prio, prev_prio);
950}
951
952static void
953dec_rt_prio(struct rt_rq *rt_rq, int prio)
954{
955 int prev_prio = rt_rq->highest_prio.curr;
956
6f505b16 957 if (rt_rq->rt_nr_running) {
764a9d6f 958
398a153b 959 WARN_ON(prio < prev_prio);
764a9d6f 960
e864c499 961 /*
398a153b
GH
962 * This may have been our highest task, and therefore
963 * we may have some recomputation to do
e864c499 964 */
398a153b 965 if (prio == prev_prio) {
e864c499
GH
966 struct rt_prio_array *array = &rt_rq->active;
967
968 rt_rq->highest_prio.curr =
764a9d6f 969 sched_find_first_bit(array->bitmap);
e864c499
GH
970 }
971
764a9d6f 972 } else
e864c499 973 rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae 974
398a153b
GH
975 dec_rt_prio_smp(rt_rq, prio, prev_prio);
976}
1f11eb6a 977
398a153b
GH
978#else
979
980static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
981static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
982
983#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f2 984
052f1dc7 985#ifdef CONFIG_RT_GROUP_SCHED
398a153b
GH
986
987static void
988inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
989{
990 if (rt_se_boosted(rt_se))
991 rt_rq->rt_nr_boosted++;
992
993 if (rt_rq->tg)
994 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
995}
996
997static void
998dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
999{
23b0fdfc
PZ
1000 if (rt_se_boosted(rt_se))
1001 rt_rq->rt_nr_boosted--;
1002
1003 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b
GH
1004}
1005
1006#else /* CONFIG_RT_GROUP_SCHED */
1007
1008static void
1009inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1010{
1011 start_rt_bandwidth(&def_rt_bandwidth);
1012}
1013
1014static inline
1015void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1016
1017#endif /* CONFIG_RT_GROUP_SCHED */
1018
1019static inline
1020void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1021{
1022 int prio = rt_se_prio(rt_se);
1023
1024 WARN_ON(!rt_prio(prio));
1025 rt_rq->rt_nr_running++;
1026
1027 inc_rt_prio(rt_rq, prio);
1028 inc_rt_migration(rt_se, rt_rq);
1029 inc_rt_group(rt_se, rt_rq);
1030}
1031
1032static inline
1033void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1034{
1035 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1036 WARN_ON(!rt_rq->rt_nr_running);
1037 rt_rq->rt_nr_running--;
1038
1039 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1040 dec_rt_migration(rt_se, rt_rq);
1041 dec_rt_group(rt_se, rt_rq);
63489e45
SR
1042}
1043
37dad3fc 1044static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1 1045{
6f505b16
PZ
1046 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1047 struct rt_prio_array *array = &rt_rq->active;
1048 struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331b 1049 struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1 1050
ad2a3f13
PZ
1051 /*
1052 * Don't enqueue the group if its throttled, or when empty.
1053 * The latter is a consequence of the former when a child group
1054 * get throttled and the current group doesn't have any other
1055 * active members.
1056 */
1057 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b16 1058 return;
63489e45 1059
37dad3fc
TG
1060 if (head)
1061 list_add(&rt_se->run_list, queue);
1062 else
1063 list_add_tail(&rt_se->run_list, queue);
6f505b16 1064 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 1065
6f505b16
PZ
1066 inc_rt_tasks(rt_se, rt_rq);
1067}
1068
ad2a3f13 1069static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b16
PZ
1070{
1071 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1072 struct rt_prio_array *array = &rt_rq->active;
1073
1074 list_del_init(&rt_se->run_list);
1075 if (list_empty(array->queue + rt_se_prio(rt_se)))
1076 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1077
1078 dec_rt_tasks(rt_se, rt_rq);
1079}
1080
1081/*
1082 * Because the prio of an upper entry depends on the lower
1083 * entries, we must remove entries top - down.
6f505b16 1084 */
ad2a3f13 1085static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b16 1086{
ad2a3f13 1087 struct sched_rt_entity *back = NULL;
6f505b16 1088
58d6c2d7
PZ
1089 for_each_sched_rt_entity(rt_se) {
1090 rt_se->back = back;
1091 back = rt_se;
1092 }
1093
1094 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1095 if (on_rt_rq(rt_se))
ad2a3f13
PZ
1096 __dequeue_rt_entity(rt_se);
1097 }
1098}
1099
37dad3fc 1100static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13
PZ
1101{
1102 dequeue_rt_stack(rt_se);
1103 for_each_sched_rt_entity(rt_se)
37dad3fc 1104 __enqueue_rt_entity(rt_se, head);
ad2a3f13
PZ
1105}
1106
1107static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1108{
1109 dequeue_rt_stack(rt_se);
1110
1111 for_each_sched_rt_entity(rt_se) {
1112 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1113
1114 if (rt_rq && rt_rq->rt_nr_running)
37dad3fc 1115 __enqueue_rt_entity(rt_se, false);
58d6c2d7 1116 }
bb44e5d1
IM
1117}
1118
1119/*
1120 * Adding/removing a task to/from a priority array:
1121 */
ea87bb78 1122static void
371fd7e7 1123enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b16
PZ
1124{
1125 struct sched_rt_entity *rt_se = &p->rt;
1126
371fd7e7 1127 if (flags & ENQUEUE_WAKEUP)
6f505b16
PZ
1128 rt_se->timeout = 0;
1129
371fd7e7 1130 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f6 1131
29baa747 1132 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
917b627d 1133 enqueue_pushable_task(rq, p);
953bfcd1
PT
1134
1135 inc_nr_running(rq);
6f505b16
PZ
1136}
1137
371fd7e7 1138static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1139{
6f505b16 1140 struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1 1141
f1e14ef6 1142 update_curr_rt(rq);
ad2a3f13 1143 dequeue_rt_entity(rt_se);
c09595f6 1144
917b627d 1145 dequeue_pushable_task(rq, p);
953bfcd1
PT
1146
1147 dec_nr_running(rq);
bb44e5d1
IM
1148}
1149
1150/*
60686317
RW
1151 * Put task to the head or the end of the run list without the overhead of
1152 * dequeue followed by enqueue.
bb44e5d1 1153 */
7ebefa8c
DA
1154static void
1155requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b16 1156{
1cdad715 1157 if (on_rt_rq(rt_se)) {
7ebefa8c
DA
1158 struct rt_prio_array *array = &rt_rq->active;
1159 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1160
1161 if (head)
1162 list_move(&rt_se->run_list, queue);
1163 else
1164 list_move_tail(&rt_se->run_list, queue);
1cdad715 1165 }
6f505b16
PZ
1166}
1167
7ebefa8c 1168static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1 1169{
6f505b16
PZ
1170 struct sched_rt_entity *rt_se = &p->rt;
1171 struct rt_rq *rt_rq;
bb44e5d1 1172
6f505b16
PZ
1173 for_each_sched_rt_entity(rt_se) {
1174 rt_rq = rt_rq_of_se(rt_se);
7ebefa8c 1175 requeue_rt_entity(rt_rq, rt_se, head);
6f505b16 1176 }
bb44e5d1
IM
1177}
1178
6f505b16 1179static void yield_task_rt(struct rq *rq)
bb44e5d1 1180{
7ebefa8c 1181 requeue_task_rt(rq, rq->curr, 0);
bb44e5d1
IM
1182}
1183
e7693a36 1184#ifdef CONFIG_SMP
318e0893
GH
1185static int find_lowest_rq(struct task_struct *task);
1186
0017d735 1187static int
ac66f547 1188select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
e7693a36 1189{
7608dec2
PZ
1190 struct task_struct *curr;
1191 struct rq *rq;
c37495fd 1192
29baa747 1193 if (p->nr_cpus_allowed == 1)
76854c7e
MG
1194 goto out;
1195
c37495fd
SR
1196 /* For anything but wake ups, just return the task_cpu */
1197 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1198 goto out;
1199
7608dec2
PZ
1200 rq = cpu_rq(cpu);
1201
1202 rcu_read_lock();
1203 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1204
318e0893 1205 /*
7608dec2 1206 * If the current task on @p's runqueue is an RT task, then
e1f47d89
SR
1207 * try to see if we can wake this RT task up on another
1208 * runqueue. Otherwise simply start this RT task
1209 * on its current runqueue.
1210 *
43fa5460
SR
1211 * We want to avoid overloading runqueues. If the woken
1212 * task is a higher priority, then it will stay on this CPU
1213 * and the lower prio task should be moved to another CPU.
1214 * Even though this will probably make the lower prio task
1215 * lose its cache, we do not want to bounce a higher task
1216 * around just because it gave up its CPU, perhaps for a
1217 * lock?
1218 *
1219 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2
PZ
1220 *
1221 * Otherwise, just let it ride on the affined RQ and the
1222 * post-schedule router will push the preempted task away
1223 *
1224 * This test is optimistic, if we get it wrong the load-balancer
1225 * will have to sort it out.
318e0893 1226 */
7608dec2 1227 if (curr && unlikely(rt_task(curr)) &&
29baa747 1228 (curr->nr_cpus_allowed < 2 ||
6bfa687c 1229 curr->prio <= p->prio)) {
7608dec2 1230 int target = find_lowest_rq(p);
318e0893 1231
7608dec2
PZ
1232 if (target != -1)
1233 cpu = target;
318e0893 1234 }
7608dec2 1235 rcu_read_unlock();
318e0893 1236
c37495fd 1237out:
7608dec2 1238 return cpu;
e7693a36 1239}
7ebefa8c
DA
1240
1241static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1242{
29baa747 1243 if (rq->curr->nr_cpus_allowed == 1)
7ebefa8c
DA
1244 return;
1245
29baa747 1246 if (p->nr_cpus_allowed != 1
13b8bd0a
RR
1247 && cpupri_find(&rq->rd->cpupri, p, NULL))
1248 return;
24600ce8 1249
13b8bd0a
RR
1250 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1251 return;
7ebefa8c
DA
1252
1253 /*
1254 * There appears to be other cpus that can accept
1255 * current and none to run 'p', so lets reschedule
1256 * to try and push current away:
1257 */
1258 requeue_task_rt(rq, p, 1);
1259 resched_task(rq->curr);
1260}
1261
e7693a36
GH
1262#endif /* CONFIG_SMP */
1263
bb44e5d1
IM
1264/*
1265 * Preempt the current task with a newly woken task if needed:
1266 */
7d478721 1267static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1268{
45c01e82 1269 if (p->prio < rq->curr->prio) {
bb44e5d1 1270 resched_task(rq->curr);
45c01e82
GH
1271 return;
1272 }
1273
1274#ifdef CONFIG_SMP
1275 /*
1276 * If:
1277 *
1278 * - the newly woken task is of equal priority to the current task
1279 * - the newly woken task is non-migratable while current is migratable
1280 * - current will be preempted on the next reschedule
1281 *
1282 * we should check to see if current can readily move to a different
1283 * cpu. If so, we will reschedule to allow the push logic to try
1284 * to move current somewhere else, making room for our non-migratable
1285 * task.
1286 */
8dd0de8b 1287 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8c 1288 check_preempt_equal_prio(rq, p);
45c01e82 1289#endif
bb44e5d1
IM
1290}
1291
6f505b16
PZ
1292static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1293 struct rt_rq *rt_rq)
bb44e5d1 1294{
6f505b16
PZ
1295 struct rt_prio_array *array = &rt_rq->active;
1296 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
1297 struct list_head *queue;
1298 int idx;
1299
1300 idx = sched_find_first_bit(array->bitmap);
6f505b16 1301 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
1302
1303 queue = array->queue + idx;
6f505b16 1304 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 1305
6f505b16
PZ
1306 return next;
1307}
bb44e5d1 1308
917b627d 1309static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b16
PZ
1310{
1311 struct sched_rt_entity *rt_se;
1312 struct task_struct *p;
1313 struct rt_rq *rt_rq;
bb44e5d1 1314
6f505b16
PZ
1315 rt_rq = &rq->rt;
1316
8e54a2c0 1317 if (!rt_rq->rt_nr_running)
6f505b16
PZ
1318 return NULL;
1319
23b0fdfc 1320 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
1321 return NULL;
1322
1323 do {
1324 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 1325 BUG_ON(!rt_se);
6f505b16
PZ
1326 rt_rq = group_rt_rq(rt_se);
1327 } while (rt_rq);
1328
1329 p = rt_task_of(rt_se);
78becc27 1330 p->se.exec_start = rq_clock_task(rq);
917b627d
GH
1331
1332 return p;
1333}
1334
1335static struct task_struct *pick_next_task_rt(struct rq *rq)
1336{
1337 struct task_struct *p = _pick_next_task_rt(rq);
1338
1339 /* The running task is never eligible for pushing */
1340 if (p)
1341 dequeue_pushable_task(rq, p);
1342
bcf08df3 1343#ifdef CONFIG_SMP
3f029d3c
GH
1344 /*
1345 * We detect this state here so that we can avoid taking the RQ
1346 * lock again later if there is no need to push
1347 */
1348 rq->post_schedule = has_pushable_tasks(rq);
bcf08df3 1349#endif
3f029d3c 1350
6f505b16 1351 return p;
bb44e5d1
IM
1352}
1353
31ee529c 1354static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 1355{
f1e14ef6 1356 update_curr_rt(rq);
917b627d
GH
1357
1358 /*
1359 * The previous task needs to be made eligible for pushing
1360 * if it is still active
1361 */
29baa747 1362 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
917b627d 1363 enqueue_pushable_task(rq, p);
bb44e5d1
IM
1364}
1365
681f3e68 1366#ifdef CONFIG_SMP
6f505b16 1367
e8fa1362
SR
1368/* Only try algorithms three times */
1369#define RT_MAX_TRIES 3
1370
f65eda4f
SR
1371static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1372{
1373 if (!task_running(rq, p) &&
60334caf 1374 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
f65eda4f
SR
1375 return 1;
1376 return 0;
1377}
1378
e23ee747
KT
1379/*
1380 * Return the highest pushable rq's task, which is suitable to be executed
1381 * on the cpu, NULL otherwise
1382 */
1383static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
e8fa1362 1384{
e23ee747
KT
1385 struct plist_head *head = &rq->rt.pushable_tasks;
1386 struct task_struct *p;
3d07467b 1387
e23ee747
KT
1388 if (!has_pushable_tasks(rq))
1389 return NULL;
3d07467b 1390
e23ee747
KT
1391 plist_for_each_entry(p, head, pushable_tasks) {
1392 if (pick_rt_task(rq, p, cpu))
1393 return p;
f65eda4f
SR
1394 }
1395
e23ee747 1396 return NULL;
e8fa1362
SR
1397}
1398
0e3900e6 1399static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa1362 1400
6e1254d2
GH
1401static int find_lowest_rq(struct task_struct *task)
1402{
1403 struct sched_domain *sd;
96f874e2 1404 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2
GH
1405 int this_cpu = smp_processor_id();
1406 int cpu = task_cpu(task);
06f90dbd 1407
0da938c4
SR
1408 /* Make sure the mask is initialized first */
1409 if (unlikely(!lowest_mask))
1410 return -1;
1411
29baa747 1412 if (task->nr_cpus_allowed == 1)
6e0534f2 1413 return -1; /* No other targets possible */
6e1254d2 1414
6e0534f2
GH
1415 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1416 return -1; /* No targets found */
6e1254d2
GH
1417
1418 /*
1419 * At this point we have built a mask of cpus representing the
1420 * lowest priority tasks in the system. Now we want to elect
1421 * the best one based on our affinity and topology.
1422 *
1423 * We prioritize the last cpu that the task executed on since
1424 * it is most likely cache-hot in that location.
1425 */
96f874e2 1426 if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2
GH
1427 return cpu;
1428
1429 /*
1430 * Otherwise, we consult the sched_domains span maps to figure
1431 * out which cpu is logically closest to our hot cache data.
1432 */
e2c88063
RR
1433 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1434 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2 1435
cd4ae6ad 1436 rcu_read_lock();
e2c88063
RR
1437 for_each_domain(cpu, sd) {
1438 if (sd->flags & SD_WAKE_AFFINE) {
1439 int best_cpu;
6e1254d2 1440
e2c88063
RR
1441 /*
1442 * "this_cpu" is cheaper to preempt than a
1443 * remote processor.
1444 */
1445 if (this_cpu != -1 &&
cd4ae6ad
XF
1446 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1447 rcu_read_unlock();
e2c88063 1448 return this_cpu;
cd4ae6ad 1449 }
e2c88063
RR
1450
1451 best_cpu = cpumask_first_and(lowest_mask,
1452 sched_domain_span(sd));
cd4ae6ad
XF
1453 if (best_cpu < nr_cpu_ids) {
1454 rcu_read_unlock();
e2c88063 1455 return best_cpu;
cd4ae6ad 1456 }
6e1254d2
GH
1457 }
1458 }
cd4ae6ad 1459 rcu_read_unlock();
6e1254d2
GH
1460
1461 /*
1462 * And finally, if there were no matches within the domains
1463 * just give the caller *something* to work with from the compatible
1464 * locations.
1465 */
e2c88063
RR
1466 if (this_cpu != -1)
1467 return this_cpu;
1468
1469 cpu = cpumask_any(lowest_mask);
1470 if (cpu < nr_cpu_ids)
1471 return cpu;
1472 return -1;
07b4032c
GH
1473}
1474
1475/* Will lock the rq it finds */
4df64c0b 1476static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
1477{
1478 struct rq *lowest_rq = NULL;
07b4032c 1479 int tries;
4df64c0b 1480 int cpu;
e8fa1362 1481
07b4032c
GH
1482 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1483 cpu = find_lowest_rq(task);
1484
2de0b463 1485 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
1486 break;
1487
07b4032c
GH
1488 lowest_rq = cpu_rq(cpu);
1489
e8fa1362 1490 /* if the prio of this runqueue changed, try again */
07b4032c 1491 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
1492 /*
1493 * We had to unlock the run queue. In
1494 * the mean time, task could have
1495 * migrated already or had its affinity changed.
1496 * Also make sure that it wasn't scheduled on its rq.
1497 */
07b4032c 1498 if (unlikely(task_rq(task) != rq ||
96f874e2 1499 !cpumask_test_cpu(lowest_rq->cpu,
fa17b507 1500 tsk_cpus_allowed(task)) ||
07b4032c 1501 task_running(rq, task) ||
fd2f4419 1502 !task->on_rq)) {
4df64c0b 1503
7f1b4393 1504 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1505 lowest_rq = NULL;
1506 break;
1507 }
1508 }
1509
1510 /* If this rq is still suitable use it. */
e864c499 1511 if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa1362
SR
1512 break;
1513
1514 /* try again */
1b12bbc7 1515 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1516 lowest_rq = NULL;
1517 }
1518
1519 return lowest_rq;
1520}
1521
917b627d
GH
1522static struct task_struct *pick_next_pushable_task(struct rq *rq)
1523{
1524 struct task_struct *p;
1525
1526 if (!has_pushable_tasks(rq))
1527 return NULL;
1528
1529 p = plist_first_entry(&rq->rt.pushable_tasks,
1530 struct task_struct, pushable_tasks);
1531
1532 BUG_ON(rq->cpu != task_cpu(p));
1533 BUG_ON(task_current(rq, p));
29baa747 1534 BUG_ON(p->nr_cpus_allowed <= 1);
917b627d 1535
fd2f4419 1536 BUG_ON(!p->on_rq);
917b627d
GH
1537 BUG_ON(!rt_task(p));
1538
1539 return p;
1540}
1541
e8fa1362
SR
1542/*
1543 * If the current CPU has more than one RT task, see if the non
1544 * running task can migrate over to a CPU that is running a task
1545 * of lesser priority.
1546 */
697f0a48 1547static int push_rt_task(struct rq *rq)
e8fa1362
SR
1548{
1549 struct task_struct *next_task;
1550 struct rq *lowest_rq;
311e800e 1551 int ret = 0;
e8fa1362 1552
a22d7fc1
GH
1553 if (!rq->rt.overloaded)
1554 return 0;
1555
917b627d 1556 next_task = pick_next_pushable_task(rq);
e8fa1362
SR
1557 if (!next_task)
1558 return 0;
1559
49246274 1560retry:
697f0a48 1561 if (unlikely(next_task == rq->curr)) {
f65eda4f 1562 WARN_ON(1);
e8fa1362 1563 return 0;
f65eda4f 1564 }
e8fa1362
SR
1565
1566 /*
1567 * It's possible that the next_task slipped in of
1568 * higher priority than current. If that's the case
1569 * just reschedule current.
1570 */
697f0a48
GH
1571 if (unlikely(next_task->prio < rq->curr->prio)) {
1572 resched_task(rq->curr);
e8fa1362
SR
1573 return 0;
1574 }
1575
697f0a48 1576 /* We might release rq lock */
e8fa1362
SR
1577 get_task_struct(next_task);
1578
1579 /* find_lock_lowest_rq locks the rq if found */
697f0a48 1580 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
1581 if (!lowest_rq) {
1582 struct task_struct *task;
1583 /*
311e800e 1584 * find_lock_lowest_rq releases rq->lock
1563513d
GH
1585 * so it is possible that next_task has migrated.
1586 *
1587 * We need to make sure that the task is still on the same
1588 * run-queue and is also still the next task eligible for
1589 * pushing.
e8fa1362 1590 */
917b627d 1591 task = pick_next_pushable_task(rq);
1563513d
GH
1592 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1593 /*
311e800e
HD
1594 * The task hasn't migrated, and is still the next
1595 * eligible task, but we failed to find a run-queue
1596 * to push it to. Do not retry in this case, since
1597 * other cpus will pull from us when ready.
1563513d 1598 */
1563513d 1599 goto out;
e8fa1362 1600 }
917b627d 1601
1563513d
GH
1602 if (!task)
1603 /* No more tasks, just exit */
1604 goto out;
1605
917b627d 1606 /*
1563513d 1607 * Something has shifted, try again.
917b627d 1608 */
1563513d
GH
1609 put_task_struct(next_task);
1610 next_task = task;
1611 goto retry;
e8fa1362
SR
1612 }
1613
697f0a48 1614 deactivate_task(rq, next_task, 0);
e8fa1362
SR
1615 set_task_cpu(next_task, lowest_rq->cpu);
1616 activate_task(lowest_rq, next_task, 0);
311e800e 1617 ret = 1;
e8fa1362
SR
1618
1619 resched_task(lowest_rq->curr);
1620
1b12bbc7 1621 double_unlock_balance(rq, lowest_rq);
e8fa1362 1622
e8fa1362
SR
1623out:
1624 put_task_struct(next_task);
1625
311e800e 1626 return ret;
e8fa1362
SR
1627}
1628
e8fa1362
SR
1629static void push_rt_tasks(struct rq *rq)
1630{
1631 /* push_rt_task will return true if it moved an RT */
1632 while (push_rt_task(rq))
1633 ;
1634}
1635
f65eda4f
SR
1636static int pull_rt_task(struct rq *this_rq)
1637{
80bf3171 1638 int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944 1639 struct task_struct *p;
f65eda4f 1640 struct rq *src_rq;
f65eda4f 1641
637f5085 1642 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
1643 return 0;
1644
7c3f2ab7
PZ
1645 /*
1646 * Match the barrier from rt_set_overloaded; this guarantees that if we
1647 * see overloaded we must also see the rto_mask bit.
1648 */
1649 smp_rmb();
1650
c6c4927b 1651 for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1652 if (this_cpu == cpu)
1653 continue;
1654
1655 src_rq = cpu_rq(cpu);
74ab8e4f
GH
1656
1657 /*
1658 * Don't bother taking the src_rq->lock if the next highest
1659 * task is known to be lower-priority than our current task.
1660 * This may look racy, but if this value is about to go
1661 * logically higher, the src_rq will push this task away.
1662 * And if its going logically lower, we do not care
1663 */
1664 if (src_rq->rt.highest_prio.next >=
1665 this_rq->rt.highest_prio.curr)
1666 continue;
1667
f65eda4f
SR
1668 /*
1669 * We can potentially drop this_rq's lock in
1670 * double_lock_balance, and another CPU could
a8728944 1671 * alter this_rq
f65eda4f 1672 */
a8728944 1673 double_lock_balance(this_rq, src_rq);
f65eda4f
SR
1674
1675 /*
e23ee747
KT
1676 * We can pull only a task, which is pushable
1677 * on its rq, and no others.
f65eda4f 1678 */
e23ee747 1679 p = pick_highest_pushable_task(src_rq, this_cpu);
f65eda4f
SR
1680
1681 /*
1682 * Do we have an RT task that preempts
1683 * the to-be-scheduled task?
1684 */
a8728944 1685 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f 1686 WARN_ON(p == src_rq->curr);
fd2f4419 1687 WARN_ON(!p->on_rq);
f65eda4f
SR
1688
1689 /*
1690 * There's a chance that p is higher in priority
1691 * than what's currently running on its cpu.
1692 * This is just that p is wakeing up and hasn't
1693 * had a chance to schedule. We only pull
1694 * p if it is lower in priority than the
a8728944 1695 * current task on the run queue
f65eda4f 1696 */
a8728944 1697 if (p->prio < src_rq->curr->prio)
614ee1f6 1698 goto skip;
f65eda4f
SR
1699
1700 ret = 1;
1701
1702 deactivate_task(src_rq, p, 0);
1703 set_task_cpu(p, this_cpu);
1704 activate_task(this_rq, p, 0);
1705 /*
1706 * We continue with the search, just in
1707 * case there's an even higher prio task
25985edc 1708 * in another runqueue. (low likelihood
f65eda4f 1709 * but possible)
f65eda4f 1710 */
f65eda4f 1711 }
49246274 1712skip:
1b12bbc7 1713 double_unlock_balance(this_rq, src_rq);
f65eda4f
SR
1714 }
1715
1716 return ret;
1717}
1718
9a897c5a 1719static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1720{
1721 /* Try to pull RT tasks here if we lower this rq's prio */
33c3d6c6 1722 if (rq->rt.highest_prio.curr > prev->prio)
f65eda4f
SR
1723 pull_rt_task(rq);
1724}
1725
9a897c5a 1726static void post_schedule_rt(struct rq *rq)
e8fa1362 1727{
967fc046 1728 push_rt_tasks(rq);
e8fa1362
SR
1729}
1730
8ae121ac
GH
1731/*
1732 * If we are not running and we are not going to reschedule soon, we should
1733 * try to push tasks away now
1734 */
efbbd05a 1735static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafd 1736{
9a897c5a 1737 if (!task_running(rq, p) &&
8ae121ac 1738 !test_tsk_need_resched(rq->curr) &&
917b627d 1739 has_pushable_tasks(rq) &&
29baa747 1740 p->nr_cpus_allowed > 1 &&
1baca4ce 1741 (dl_task(rq->curr) || rt_task(rq->curr)) &&
29baa747 1742 (rq->curr->nr_cpus_allowed < 2 ||
3be209a8 1743 rq->curr->prio <= p->prio))
4642dafd
SR
1744 push_rt_tasks(rq);
1745}
1746
cd8ba7cd 1747static void set_cpus_allowed_rt(struct task_struct *p,
96f874e2 1748 const struct cpumask *new_mask)
73fe6aae 1749{
8d3d5ada
KT
1750 struct rq *rq;
1751 int weight;
73fe6aae
GH
1752
1753 BUG_ON(!rt_task(p));
1754
8d3d5ada
KT
1755 if (!p->on_rq)
1756 return;
917b627d 1757
8d3d5ada 1758 weight = cpumask_weight(new_mask);
917b627d 1759
8d3d5ada
KT
1760 /*
1761 * Only update if the process changes its state from whether it
1762 * can migrate or not.
1763 */
29baa747 1764 if ((p->nr_cpus_allowed > 1) == (weight > 1))
8d3d5ada 1765 return;
917b627d 1766
8d3d5ada 1767 rq = task_rq(p);
73fe6aae 1768
8d3d5ada
KT
1769 /*
1770 * The process used to be able to migrate OR it can now migrate
1771 */
1772 if (weight <= 1) {
1773 if (!task_current(rq, p))
1774 dequeue_pushable_task(rq, p);
1775 BUG_ON(!rq->rt.rt_nr_migratory);
1776 rq->rt.rt_nr_migratory--;
1777 } else {
1778 if (!task_current(rq, p))
1779 enqueue_pushable_task(rq, p);
1780 rq->rt.rt_nr_migratory++;
73fe6aae 1781 }
8d3d5ada
KT
1782
1783 update_rt_migration(&rq->rt);
73fe6aae 1784}
deeeccd4 1785
bdd7c81b 1786/* Assumes rq->lock is held */
1f11eb6a 1787static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
1788{
1789 if (rq->rt.overloaded)
1790 rt_set_overload(rq);
6e0534f2 1791
7def2be1
PZ
1792 __enable_runtime(rq);
1793
e864c499 1794 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b
IM
1795}
1796
1797/* Assumes rq->lock is held */
1f11eb6a 1798static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
1799{
1800 if (rq->rt.overloaded)
1801 rt_clear_overload(rq);
6e0534f2 1802
7def2be1
PZ
1803 __disable_runtime(rq);
1804
6e0534f2 1805 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 1806}
cb469845
SR
1807
1808/*
1809 * When switch from the rt queue, we bring ourselves to a position
1810 * that we might want to pull RT tasks from other runqueues.
1811 */
da7a735e 1812static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1813{
1814 /*
1815 * If there are other RT tasks then we will reschedule
1816 * and the scheduling of the other RT tasks will handle
1817 * the balancing. But if we are the last RT task
1818 * we may need to handle the pulling of RT tasks
1819 * now.
1820 */
1158ddb5
KT
1821 if (!p->on_rq || rq->rt.rt_nr_running)
1822 return;
1823
1824 if (pull_rt_task(rq))
1825 resched_task(rq->curr);
cb469845 1826}
3d8cbdf8 1827
029632fb 1828void init_sched_rt_class(void)
3d8cbdf8
RR
1829{
1830 unsigned int i;
1831
029632fb 1832 for_each_possible_cpu(i) {
eaa95840 1833 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc 1834 GFP_KERNEL, cpu_to_node(i));
029632fb 1835 }
3d8cbdf8 1836}
cb469845
SR
1837#endif /* CONFIG_SMP */
1838
1839/*
1840 * When switching a task to RT, we may overload the runqueue
1841 * with RT tasks. In this case we try to push them off to
1842 * other runqueues.
1843 */
da7a735e 1844static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1845{
1846 int check_resched = 1;
1847
1848 /*
1849 * If we are already running, then there's nothing
1850 * that needs to be done. But if we are not running
1851 * we may need to preempt the current running task.
1852 * If that current running task is also an RT task
1853 * then see if we can move to another run queue.
1854 */
fd2f4419 1855 if (p->on_rq && rq->curr != p) {
cb469845
SR
1856#ifdef CONFIG_SMP
1857 if (rq->rt.overloaded && push_rt_task(rq) &&
1858 /* Don't resched if we changed runqueues */
1859 rq != task_rq(p))
1860 check_resched = 0;
1861#endif /* CONFIG_SMP */
1862 if (check_resched && p->prio < rq->curr->prio)
1863 resched_task(rq->curr);
1864 }
1865}
1866
1867/*
1868 * Priority of the task has changed. This may cause
1869 * us to initiate a push or pull.
1870 */
da7a735e
PZ
1871static void
1872prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 1873{
fd2f4419 1874 if (!p->on_rq)
da7a735e
PZ
1875 return;
1876
1877 if (rq->curr == p) {
cb469845
SR
1878#ifdef CONFIG_SMP
1879 /*
1880 * If our priority decreases while running, we
1881 * may need to pull tasks to this runqueue.
1882 */
1883 if (oldprio < p->prio)
1884 pull_rt_task(rq);
1885 /*
1886 * If there's a higher priority task waiting to run
6fa46fa5
SR
1887 * then reschedule. Note, the above pull_rt_task
1888 * can release the rq lock and p could migrate.
1889 * Only reschedule if p is still on the same runqueue.
cb469845 1890 */
e864c499 1891 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb469845
SR
1892 resched_task(p);
1893#else
1894 /* For UP simply resched on drop of prio */
1895 if (oldprio < p->prio)
1896 resched_task(p);
e8fa1362 1897#endif /* CONFIG_SMP */
cb469845
SR
1898 } else {
1899 /*
1900 * This task is not running, but if it is
1901 * greater than the current running task
1902 * then reschedule.
1903 */
1904 if (p->prio < rq->curr->prio)
1905 resched_task(rq->curr);
1906 }
1907}
1908
78f2c7db
PZ
1909static void watchdog(struct rq *rq, struct task_struct *p)
1910{
1911 unsigned long soft, hard;
1912
78d7d407
JS
1913 /* max may change after cur was read, this will be fixed next tick */
1914 soft = task_rlimit(p, RLIMIT_RTTIME);
1915 hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db
PZ
1916
1917 if (soft != RLIM_INFINITY) {
1918 unsigned long next;
1919
57d2aa00
YX
1920 if (p->rt.watchdog_stamp != jiffies) {
1921 p->rt.timeout++;
1922 p->rt.watchdog_stamp = jiffies;
1923 }
1924
78f2c7db 1925 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1926 if (p->rt.timeout > next)
f06febc9 1927 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db
PZ
1928 }
1929}
bb44e5d1 1930
8f4d37ec 1931static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 1932{
454c7999
CC
1933 struct sched_rt_entity *rt_se = &p->rt;
1934
67e2be02
PZ
1935 update_curr_rt(rq);
1936
78f2c7db
PZ
1937 watchdog(rq, p);
1938
bb44e5d1
IM
1939 /*
1940 * RR tasks need a special form of timeslice management.
1941 * FIFO tasks have no timeslices.
1942 */
1943 if (p->policy != SCHED_RR)
1944 return;
1945
fa717060 1946 if (--p->rt.time_slice)
bb44e5d1
IM
1947 return;
1948
ce0dbbbb 1949 p->rt.time_slice = sched_rr_timeslice;
bb44e5d1 1950
98fbc798 1951 /*
e9aa39bb
LB
1952 * Requeue to the end of queue if we (and all of our ancestors) are not
1953 * the only element on the queue
98fbc798 1954 */
454c7999
CC
1955 for_each_sched_rt_entity(rt_se) {
1956 if (rt_se->run_list.prev != rt_se->run_list.next) {
1957 requeue_task_rt(rq, p, 0);
1958 set_tsk_need_resched(p);
1959 return;
1960 }
98fbc798 1961 }
bb44e5d1
IM
1962}
1963
83b699ed
SV
1964static void set_curr_task_rt(struct rq *rq)
1965{
1966 struct task_struct *p = rq->curr;
1967
78becc27 1968 p->se.exec_start = rq_clock_task(rq);
917b627d
GH
1969
1970 /* The running task is never eligible for pushing */
1971 dequeue_pushable_task(rq, p);
83b699ed
SV
1972}
1973
6d686f45 1974static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cea
PW
1975{
1976 /*
1977 * Time slice is 0 for SCHED_FIFO tasks
1978 */
1979 if (task->policy == SCHED_RR)
ce0dbbbb 1980 return sched_rr_timeslice;
0d721cea
PW
1981 else
1982 return 0;
1983}
1984
029632fb 1985const struct sched_class rt_sched_class = {
5522d5d5 1986 .next = &fair_sched_class,
bb44e5d1
IM
1987 .enqueue_task = enqueue_task_rt,
1988 .dequeue_task = dequeue_task_rt,
1989 .yield_task = yield_task_rt,
1990
1991 .check_preempt_curr = check_preempt_curr_rt,
1992
1993 .pick_next_task = pick_next_task_rt,
1994 .put_prev_task = put_prev_task_rt,
1995
681f3e68 1996#ifdef CONFIG_SMP
4ce72a2c
LZ
1997 .select_task_rq = select_task_rq_rt,
1998
73fe6aae 1999 .set_cpus_allowed = set_cpus_allowed_rt,
1f11eb6a
GH
2000 .rq_online = rq_online_rt,
2001 .rq_offline = rq_offline_rt,
9a897c5a
SR
2002 .pre_schedule = pre_schedule_rt,
2003 .post_schedule = post_schedule_rt,
efbbd05a 2004 .task_woken = task_woken_rt,
cb469845 2005 .switched_from = switched_from_rt,
681f3e68 2006#endif
bb44e5d1 2007
83b699ed 2008 .set_curr_task = set_curr_task_rt,
bb44e5d1 2009 .task_tick = task_tick_rt,
cb469845 2010
0d721cea
PW
2011 .get_rr_interval = get_rr_interval_rt,
2012
cb469845
SR
2013 .prio_changed = prio_changed_rt,
2014 .switched_to = switched_to_rt,
bb44e5d1 2015};
ada18de2
PZ
2016
2017#ifdef CONFIG_SCHED_DEBUG
2018extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2019
029632fb 2020void print_rt_stats(struct seq_file *m, int cpu)
ada18de2 2021{
ec514c48 2022 rt_rq_iter_t iter;
ada18de2
PZ
2023 struct rt_rq *rt_rq;
2024
2025 rcu_read_lock();
ec514c48 2026 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2
PZ
2027 print_rt_rq(m, cpu, rt_rq);
2028 rcu_read_unlock();
2029}
55e12e5e 2030#endif /* CONFIG_SCHED_DEBUG */