Merge branch 'sched/urgent' into sched/core
[linux-2.6-block.git] / kernel / sched / rt.c
CommitLineData
bb44e5d1
IM
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
029632fb
PZ
6#include "sched.h"
7
8#include <linux/slab.h>
9
ce0dbbbb
CW
10int sched_rr_timeslice = RR_TIMESLICE;
11
029632fb
PZ
12static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun);
13
14struct rt_bandwidth def_rt_bandwidth;
15
16static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
17{
18 struct rt_bandwidth *rt_b =
19 container_of(timer, struct rt_bandwidth, rt_period_timer);
20 ktime_t now;
21 int overrun;
22 int idle = 0;
23
24 for (;;) {
25 now = hrtimer_cb_get_time(timer);
26 overrun = hrtimer_forward(timer, now, rt_b->rt_period);
27
28 if (!overrun)
29 break;
30
31 idle = do_sched_rt_period_timer(rt_b, overrun);
32 }
33
34 return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
35}
36
37void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
38{
39 rt_b->rt_period = ns_to_ktime(period);
40 rt_b->rt_runtime = runtime;
41
42 raw_spin_lock_init(&rt_b->rt_runtime_lock);
43
44 hrtimer_init(&rt_b->rt_period_timer,
45 CLOCK_MONOTONIC, HRTIMER_MODE_REL);
46 rt_b->rt_period_timer.function = sched_rt_period_timer;
47}
48
49static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
50{
51 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
52 return;
53
54 if (hrtimer_active(&rt_b->rt_period_timer))
55 return;
56
57 raw_spin_lock(&rt_b->rt_runtime_lock);
58 start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
59 raw_spin_unlock(&rt_b->rt_runtime_lock);
60}
61
62void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
63{
64 struct rt_prio_array *array;
65 int i;
66
67 array = &rt_rq->active;
68 for (i = 0; i < MAX_RT_PRIO; i++) {
69 INIT_LIST_HEAD(array->queue + i);
70 __clear_bit(i, array->bitmap);
71 }
72 /* delimiter for bitsearch: */
73 __set_bit(MAX_RT_PRIO, array->bitmap);
74
75#if defined CONFIG_SMP
76 rt_rq->highest_prio.curr = MAX_RT_PRIO;
77 rt_rq->highest_prio.next = MAX_RT_PRIO;
78 rt_rq->rt_nr_migratory = 0;
79 rt_rq->overloaded = 0;
80 plist_head_init(&rt_rq->pushable_tasks);
81#endif
82
83 rt_rq->rt_time = 0;
84 rt_rq->rt_throttled = 0;
85 rt_rq->rt_runtime = 0;
86 raw_spin_lock_init(&rt_rq->rt_runtime_lock);
87}
88
8f48894f 89#ifdef CONFIG_RT_GROUP_SCHED
029632fb
PZ
90static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
91{
92 hrtimer_cancel(&rt_b->rt_period_timer);
93}
8f48894f
PZ
94
95#define rt_entity_is_task(rt_se) (!(rt_se)->my_q)
96
398a153b
GH
97static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
98{
8f48894f
PZ
99#ifdef CONFIG_SCHED_DEBUG
100 WARN_ON_ONCE(!rt_entity_is_task(rt_se));
101#endif
398a153b
GH
102 return container_of(rt_se, struct task_struct, rt);
103}
104
398a153b
GH
105static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
106{
107 return rt_rq->rq;
108}
109
110static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
111{
112 return rt_se->rt_rq;
113}
114
029632fb
PZ
115void free_rt_sched_group(struct task_group *tg)
116{
117 int i;
118
119 if (tg->rt_se)
120 destroy_rt_bandwidth(&tg->rt_bandwidth);
121
122 for_each_possible_cpu(i) {
123 if (tg->rt_rq)
124 kfree(tg->rt_rq[i]);
125 if (tg->rt_se)
126 kfree(tg->rt_se[i]);
127 }
128
129 kfree(tg->rt_rq);
130 kfree(tg->rt_se);
131}
132
133void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq,
134 struct sched_rt_entity *rt_se, int cpu,
135 struct sched_rt_entity *parent)
136{
137 struct rq *rq = cpu_rq(cpu);
138
139 rt_rq->highest_prio.curr = MAX_RT_PRIO;
140 rt_rq->rt_nr_boosted = 0;
141 rt_rq->rq = rq;
142 rt_rq->tg = tg;
143
144 tg->rt_rq[cpu] = rt_rq;
145 tg->rt_se[cpu] = rt_se;
146
147 if (!rt_se)
148 return;
149
150 if (!parent)
151 rt_se->rt_rq = &rq->rt;
152 else
153 rt_se->rt_rq = parent->my_q;
154
155 rt_se->my_q = rt_rq;
156 rt_se->parent = parent;
157 INIT_LIST_HEAD(&rt_se->run_list);
158}
159
160int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
161{
162 struct rt_rq *rt_rq;
163 struct sched_rt_entity *rt_se;
164 int i;
165
166 tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL);
167 if (!tg->rt_rq)
168 goto err;
169 tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL);
170 if (!tg->rt_se)
171 goto err;
172
173 init_rt_bandwidth(&tg->rt_bandwidth,
174 ktime_to_ns(def_rt_bandwidth.rt_period), 0);
175
176 for_each_possible_cpu(i) {
177 rt_rq = kzalloc_node(sizeof(struct rt_rq),
178 GFP_KERNEL, cpu_to_node(i));
179 if (!rt_rq)
180 goto err;
181
182 rt_se = kzalloc_node(sizeof(struct sched_rt_entity),
183 GFP_KERNEL, cpu_to_node(i));
184 if (!rt_se)
185 goto err_free_rq;
186
187 init_rt_rq(rt_rq, cpu_rq(i));
188 rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime;
189 init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]);
190 }
191
192 return 1;
193
194err_free_rq:
195 kfree(rt_rq);
196err:
197 return 0;
198}
199
398a153b
GH
200#else /* CONFIG_RT_GROUP_SCHED */
201
a1ba4d8b
PZ
202#define rt_entity_is_task(rt_se) (1)
203
8f48894f
PZ
204static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
205{
206 return container_of(rt_se, struct task_struct, rt);
207}
208
398a153b
GH
209static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
210{
211 return container_of(rt_rq, struct rq, rt);
212}
213
214static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
215{
216 struct task_struct *p = rt_task_of(rt_se);
217 struct rq *rq = task_rq(p);
218
219 return &rq->rt;
220}
221
029632fb
PZ
222void free_rt_sched_group(struct task_group *tg) { }
223
224int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
225{
226 return 1;
227}
398a153b
GH
228#endif /* CONFIG_RT_GROUP_SCHED */
229
4fd29176 230#ifdef CONFIG_SMP
84de4274 231
637f5085 232static inline int rt_overloaded(struct rq *rq)
4fd29176 233{
637f5085 234 return atomic_read(&rq->rd->rto_count);
4fd29176 235}
84de4274 236
4fd29176
SR
237static inline void rt_set_overload(struct rq *rq)
238{
1f11eb6a
GH
239 if (!rq->online)
240 return;
241
c6c4927b 242 cpumask_set_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176
SR
243 /*
244 * Make sure the mask is visible before we set
245 * the overload count. That is checked to determine
246 * if we should look at the mask. It would be a shame
247 * if we looked at the mask, but the mask was not
248 * updated yet.
249 */
250 wmb();
637f5085 251 atomic_inc(&rq->rd->rto_count);
4fd29176 252}
84de4274 253
4fd29176
SR
254static inline void rt_clear_overload(struct rq *rq)
255{
1f11eb6a
GH
256 if (!rq->online)
257 return;
258
4fd29176 259 /* the order here really doesn't matter */
637f5085 260 atomic_dec(&rq->rd->rto_count);
c6c4927b 261 cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask);
4fd29176 262}
73fe6aae 263
398a153b 264static void update_rt_migration(struct rt_rq *rt_rq)
73fe6aae 265{
a1ba4d8b 266 if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) {
398a153b
GH
267 if (!rt_rq->overloaded) {
268 rt_set_overload(rq_of_rt_rq(rt_rq));
269 rt_rq->overloaded = 1;
cdc8eb98 270 }
398a153b
GH
271 } else if (rt_rq->overloaded) {
272 rt_clear_overload(rq_of_rt_rq(rt_rq));
273 rt_rq->overloaded = 0;
637f5085 274 }
73fe6aae 275}
4fd29176 276
398a153b
GH
277static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
278{
29baa747
PZ
279 struct task_struct *p;
280
a1ba4d8b
PZ
281 if (!rt_entity_is_task(rt_se))
282 return;
283
29baa747 284 p = rt_task_of(rt_se);
a1ba4d8b
PZ
285 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
286
287 rt_rq->rt_nr_total++;
29baa747 288 if (p->nr_cpus_allowed > 1)
398a153b
GH
289 rt_rq->rt_nr_migratory++;
290
291 update_rt_migration(rt_rq);
292}
293
294static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
295{
29baa747
PZ
296 struct task_struct *p;
297
a1ba4d8b
PZ
298 if (!rt_entity_is_task(rt_se))
299 return;
300
29baa747 301 p = rt_task_of(rt_se);
a1ba4d8b
PZ
302 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
303
304 rt_rq->rt_nr_total--;
29baa747 305 if (p->nr_cpus_allowed > 1)
398a153b
GH
306 rt_rq->rt_nr_migratory--;
307
308 update_rt_migration(rt_rq);
309}
310
5181f4a4
SR
311static inline int has_pushable_tasks(struct rq *rq)
312{
313 return !plist_head_empty(&rq->rt.pushable_tasks);
314}
315
917b627d
GH
316static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
317{
318 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
319 plist_node_init(&p->pushable_tasks, p->prio);
320 plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks);
5181f4a4
SR
321
322 /* Update the highest prio pushable task */
323 if (p->prio < rq->rt.highest_prio.next)
324 rq->rt.highest_prio.next = p->prio;
917b627d
GH
325}
326
327static void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
328{
329 plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
917b627d 330
5181f4a4
SR
331 /* Update the new highest prio pushable task */
332 if (has_pushable_tasks(rq)) {
333 p = plist_first_entry(&rq->rt.pushable_tasks,
334 struct task_struct, pushable_tasks);
335 rq->rt.highest_prio.next = p->prio;
336 } else
337 rq->rt.highest_prio.next = MAX_RT_PRIO;
bcf08df3
IM
338}
339
917b627d
GH
340#else
341
ceacc2c1 342static inline void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
fa85ae24 343{
6f505b16
PZ
344}
345
ceacc2c1
PZ
346static inline void dequeue_pushable_task(struct rq *rq, struct task_struct *p)
347{
348}
349
b07430ac 350static inline
ceacc2c1
PZ
351void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
352{
353}
354
398a153b 355static inline
ceacc2c1
PZ
356void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
357{
358}
917b627d 359
4fd29176
SR
360#endif /* CONFIG_SMP */
361
6f505b16
PZ
362static inline int on_rt_rq(struct sched_rt_entity *rt_se)
363{
364 return !list_empty(&rt_se->run_list);
365}
366
052f1dc7 367#ifdef CONFIG_RT_GROUP_SCHED
6f505b16 368
9f0c1e56 369static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
6f505b16
PZ
370{
371 if (!rt_rq->tg)
9f0c1e56 372 return RUNTIME_INF;
6f505b16 373
ac086bc2
PZ
374 return rt_rq->rt_runtime;
375}
376
377static inline u64 sched_rt_period(struct rt_rq *rt_rq)
378{
379 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
6f505b16
PZ
380}
381
ec514c48
CX
382typedef struct task_group *rt_rq_iter_t;
383
1c09ab0d
YZ
384static inline struct task_group *next_task_group(struct task_group *tg)
385{
386 do {
387 tg = list_entry_rcu(tg->list.next,
388 typeof(struct task_group), list);
389 } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
390
391 if (&tg->list == &task_groups)
392 tg = NULL;
393
394 return tg;
395}
396
397#define for_each_rt_rq(rt_rq, iter, rq) \
398 for (iter = container_of(&task_groups, typeof(*iter), list); \
399 (iter = next_task_group(iter)) && \
400 (rt_rq = iter->rt_rq[cpu_of(rq)]);)
ec514c48 401
3d4b47b4
PZ
402static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
403{
404 list_add_rcu(&rt_rq->leaf_rt_rq_list,
405 &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list);
406}
407
408static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
409{
410 list_del_rcu(&rt_rq->leaf_rt_rq_list);
411}
412
6f505b16 413#define for_each_leaf_rt_rq(rt_rq, rq) \
80f40ee4 414 list_for_each_entry_rcu(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
6f505b16 415
6f505b16
PZ
416#define for_each_sched_rt_entity(rt_se) \
417 for (; rt_se; rt_se = rt_se->parent)
418
419static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
420{
421 return rt_se->my_q;
422}
423
37dad3fc 424static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head);
6f505b16
PZ
425static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
426
9f0c1e56 427static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 428{
f6121f4f 429 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
74b7eb58
YZ
430 struct sched_rt_entity *rt_se;
431
0c3b9168
BS
432 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
433
434 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16 435
f6121f4f
DF
436 if (rt_rq->rt_nr_running) {
437 if (rt_se && !on_rt_rq(rt_se))
37dad3fc 438 enqueue_rt_entity(rt_se, false);
e864c499 439 if (rt_rq->highest_prio.curr < curr->prio)
1020387f 440 resched_task(curr);
6f505b16
PZ
441 }
442}
443
9f0c1e56 444static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16 445{
74b7eb58 446 struct sched_rt_entity *rt_se;
0c3b9168 447 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
74b7eb58 448
0c3b9168 449 rt_se = rt_rq->tg->rt_se[cpu];
6f505b16
PZ
450
451 if (rt_se && on_rt_rq(rt_se))
452 dequeue_rt_entity(rt_se);
453}
454
23b0fdfc
PZ
455static inline int rt_rq_throttled(struct rt_rq *rt_rq)
456{
457 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
458}
459
460static int rt_se_boosted(struct sched_rt_entity *rt_se)
461{
462 struct rt_rq *rt_rq = group_rt_rq(rt_se);
463 struct task_struct *p;
464
465 if (rt_rq)
466 return !!rt_rq->rt_nr_boosted;
467
468 p = rt_task_of(rt_se);
469 return p->prio != p->normal_prio;
470}
471
d0b27fa7 472#ifdef CONFIG_SMP
c6c4927b 473static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 474{
424c93fe 475 return this_rq()->rd->span;
d0b27fa7 476}
6f505b16 477#else
c6c4927b 478static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 479{
c6c4927b 480 return cpu_online_mask;
d0b27fa7
PZ
481}
482#endif
6f505b16 483
d0b27fa7
PZ
484static inline
485struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
6f505b16 486{
d0b27fa7
PZ
487 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
488}
9f0c1e56 489
ac086bc2
PZ
490static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
491{
492 return &rt_rq->tg->rt_bandwidth;
493}
494
55e12e5e 495#else /* !CONFIG_RT_GROUP_SCHED */
d0b27fa7
PZ
496
497static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
498{
ac086bc2
PZ
499 return rt_rq->rt_runtime;
500}
501
502static inline u64 sched_rt_period(struct rt_rq *rt_rq)
503{
504 return ktime_to_ns(def_rt_bandwidth.rt_period);
6f505b16
PZ
505}
506
ec514c48
CX
507typedef struct rt_rq *rt_rq_iter_t;
508
509#define for_each_rt_rq(rt_rq, iter, rq) \
510 for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
511
3d4b47b4
PZ
512static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq)
513{
514}
515
516static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq)
517{
518}
519
6f505b16
PZ
520#define for_each_leaf_rt_rq(rt_rq, rq) \
521 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
522
6f505b16
PZ
523#define for_each_sched_rt_entity(rt_se) \
524 for (; rt_se; rt_se = NULL)
525
526static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
527{
528 return NULL;
529}
530
9f0c1e56 531static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
6f505b16 532{
f3ade837
JB
533 if (rt_rq->rt_nr_running)
534 resched_task(rq_of_rt_rq(rt_rq)->curr);
6f505b16
PZ
535}
536
9f0c1e56 537static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
6f505b16
PZ
538{
539}
540
23b0fdfc
PZ
541static inline int rt_rq_throttled(struct rt_rq *rt_rq)
542{
543 return rt_rq->rt_throttled;
544}
d0b27fa7 545
c6c4927b 546static inline const struct cpumask *sched_rt_period_mask(void)
d0b27fa7 547{
c6c4927b 548 return cpu_online_mask;
d0b27fa7
PZ
549}
550
551static inline
552struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
553{
554 return &cpu_rq(cpu)->rt;
555}
556
ac086bc2
PZ
557static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
558{
559 return &def_rt_bandwidth;
560}
561
55e12e5e 562#endif /* CONFIG_RT_GROUP_SCHED */
d0b27fa7 563
ac086bc2 564#ifdef CONFIG_SMP
78333cdd
PZ
565/*
566 * We ran out of runtime, see if we can borrow some from our neighbours.
567 */
b79f3833 568static int do_balance_runtime(struct rt_rq *rt_rq)
ac086bc2
PZ
569{
570 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
aa7f6730 571 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
ac086bc2
PZ
572 int i, weight, more = 0;
573 u64 rt_period;
574
c6c4927b 575 weight = cpumask_weight(rd->span);
ac086bc2 576
0986b11b 577 raw_spin_lock(&rt_b->rt_runtime_lock);
ac086bc2 578 rt_period = ktime_to_ns(rt_b->rt_period);
c6c4927b 579 for_each_cpu(i, rd->span) {
ac086bc2
PZ
580 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
581 s64 diff;
582
583 if (iter == rt_rq)
584 continue;
585
0986b11b 586 raw_spin_lock(&iter->rt_runtime_lock);
78333cdd
PZ
587 /*
588 * Either all rqs have inf runtime and there's nothing to steal
589 * or __disable_runtime() below sets a specific rq to inf to
590 * indicate its been disabled and disalow stealing.
591 */
7def2be1
PZ
592 if (iter->rt_runtime == RUNTIME_INF)
593 goto next;
594
78333cdd
PZ
595 /*
596 * From runqueues with spare time, take 1/n part of their
597 * spare time, but no more than our period.
598 */
ac086bc2
PZ
599 diff = iter->rt_runtime - iter->rt_time;
600 if (diff > 0) {
58838cf3 601 diff = div_u64((u64)diff, weight);
ac086bc2
PZ
602 if (rt_rq->rt_runtime + diff > rt_period)
603 diff = rt_period - rt_rq->rt_runtime;
604 iter->rt_runtime -= diff;
605 rt_rq->rt_runtime += diff;
606 more = 1;
607 if (rt_rq->rt_runtime == rt_period) {
0986b11b 608 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2
PZ
609 break;
610 }
611 }
7def2be1 612next:
0986b11b 613 raw_spin_unlock(&iter->rt_runtime_lock);
ac086bc2 614 }
0986b11b 615 raw_spin_unlock(&rt_b->rt_runtime_lock);
ac086bc2
PZ
616
617 return more;
618}
7def2be1 619
78333cdd
PZ
620/*
621 * Ensure this RQ takes back all the runtime it lend to its neighbours.
622 */
7def2be1
PZ
623static void __disable_runtime(struct rq *rq)
624{
625 struct root_domain *rd = rq->rd;
ec514c48 626 rt_rq_iter_t iter;
7def2be1
PZ
627 struct rt_rq *rt_rq;
628
629 if (unlikely(!scheduler_running))
630 return;
631
ec514c48 632 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
633 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
634 s64 want;
635 int i;
636
0986b11b
TG
637 raw_spin_lock(&rt_b->rt_runtime_lock);
638 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
639 /*
640 * Either we're all inf and nobody needs to borrow, or we're
641 * already disabled and thus have nothing to do, or we have
642 * exactly the right amount of runtime to take out.
643 */
7def2be1
PZ
644 if (rt_rq->rt_runtime == RUNTIME_INF ||
645 rt_rq->rt_runtime == rt_b->rt_runtime)
646 goto balanced;
0986b11b 647 raw_spin_unlock(&rt_rq->rt_runtime_lock);
7def2be1 648
78333cdd
PZ
649 /*
650 * Calculate the difference between what we started out with
651 * and what we current have, that's the amount of runtime
652 * we lend and now have to reclaim.
653 */
7def2be1
PZ
654 want = rt_b->rt_runtime - rt_rq->rt_runtime;
655
78333cdd
PZ
656 /*
657 * Greedy reclaim, take back as much as we can.
658 */
c6c4927b 659 for_each_cpu(i, rd->span) {
7def2be1
PZ
660 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
661 s64 diff;
662
78333cdd
PZ
663 /*
664 * Can't reclaim from ourselves or disabled runqueues.
665 */
f1679d08 666 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
7def2be1
PZ
667 continue;
668
0986b11b 669 raw_spin_lock(&iter->rt_runtime_lock);
7def2be1
PZ
670 if (want > 0) {
671 diff = min_t(s64, iter->rt_runtime, want);
672 iter->rt_runtime -= diff;
673 want -= diff;
674 } else {
675 iter->rt_runtime -= want;
676 want -= want;
677 }
0986b11b 678 raw_spin_unlock(&iter->rt_runtime_lock);
7def2be1
PZ
679
680 if (!want)
681 break;
682 }
683
0986b11b 684 raw_spin_lock(&rt_rq->rt_runtime_lock);
78333cdd
PZ
685 /*
686 * We cannot be left wanting - that would mean some runtime
687 * leaked out of the system.
688 */
7def2be1
PZ
689 BUG_ON(want);
690balanced:
78333cdd
PZ
691 /*
692 * Disable all the borrow logic by pretending we have inf
693 * runtime - in which case borrowing doesn't make sense.
694 */
7def2be1 695 rt_rq->rt_runtime = RUNTIME_INF;
a4c96ae3 696 rt_rq->rt_throttled = 0;
0986b11b
TG
697 raw_spin_unlock(&rt_rq->rt_runtime_lock);
698 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
699 }
700}
701
7def2be1
PZ
702static void __enable_runtime(struct rq *rq)
703{
ec514c48 704 rt_rq_iter_t iter;
7def2be1
PZ
705 struct rt_rq *rt_rq;
706
707 if (unlikely(!scheduler_running))
708 return;
709
78333cdd
PZ
710 /*
711 * Reset each runqueue's bandwidth settings
712 */
ec514c48 713 for_each_rt_rq(rt_rq, iter, rq) {
7def2be1
PZ
714 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
715
0986b11b
TG
716 raw_spin_lock(&rt_b->rt_runtime_lock);
717 raw_spin_lock(&rt_rq->rt_runtime_lock);
7def2be1
PZ
718 rt_rq->rt_runtime = rt_b->rt_runtime;
719 rt_rq->rt_time = 0;
baf25731 720 rt_rq->rt_throttled = 0;
0986b11b
TG
721 raw_spin_unlock(&rt_rq->rt_runtime_lock);
722 raw_spin_unlock(&rt_b->rt_runtime_lock);
7def2be1
PZ
723 }
724}
725
eff6549b
PZ
726static int balance_runtime(struct rt_rq *rt_rq)
727{
728 int more = 0;
729
4a6184ce
PZ
730 if (!sched_feat(RT_RUNTIME_SHARE))
731 return more;
732
eff6549b 733 if (rt_rq->rt_time > rt_rq->rt_runtime) {
0986b11b 734 raw_spin_unlock(&rt_rq->rt_runtime_lock);
eff6549b 735 more = do_balance_runtime(rt_rq);
0986b11b 736 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
737 }
738
739 return more;
740}
55e12e5e 741#else /* !CONFIG_SMP */
eff6549b
PZ
742static inline int balance_runtime(struct rt_rq *rt_rq)
743{
744 return 0;
745}
55e12e5e 746#endif /* CONFIG_SMP */
ac086bc2 747
eff6549b
PZ
748static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
749{
42c62a58 750 int i, idle = 1, throttled = 0;
c6c4927b 751 const struct cpumask *span;
eff6549b 752
eff6549b 753 span = sched_rt_period_mask();
e221d028
MG
754#ifdef CONFIG_RT_GROUP_SCHED
755 /*
756 * FIXME: isolated CPUs should really leave the root task group,
757 * whether they are isolcpus or were isolated via cpusets, lest
758 * the timer run on a CPU which does not service all runqueues,
759 * potentially leaving other CPUs indefinitely throttled. If
760 * isolation is really required, the user will turn the throttle
761 * off to kill the perturbations it causes anyway. Meanwhile,
762 * this maintains functionality for boot and/or troubleshooting.
763 */
764 if (rt_b == &root_task_group.rt_bandwidth)
765 span = cpu_online_mask;
766#endif
c6c4927b 767 for_each_cpu(i, span) {
eff6549b
PZ
768 int enqueue = 0;
769 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
770 struct rq *rq = rq_of_rt_rq(rt_rq);
771
05fa785c 772 raw_spin_lock(&rq->lock);
eff6549b
PZ
773 if (rt_rq->rt_time) {
774 u64 runtime;
775
0986b11b 776 raw_spin_lock(&rt_rq->rt_runtime_lock);
eff6549b
PZ
777 if (rt_rq->rt_throttled)
778 balance_runtime(rt_rq);
779 runtime = rt_rq->rt_runtime;
780 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
781 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
782 rt_rq->rt_throttled = 0;
783 enqueue = 1;
61eadef6
MG
784
785 /*
786 * Force a clock update if the CPU was idle,
787 * lest wakeup -> unthrottle time accumulate.
788 */
789 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
790 rq->skip_clock_update = -1;
eff6549b
PZ
791 }
792 if (rt_rq->rt_time || rt_rq->rt_nr_running)
793 idle = 0;
0986b11b 794 raw_spin_unlock(&rt_rq->rt_runtime_lock);
0c3b9168 795 } else if (rt_rq->rt_nr_running) {
6c3df255 796 idle = 0;
0c3b9168
BS
797 if (!rt_rq_throttled(rt_rq))
798 enqueue = 1;
799 }
42c62a58
PZ
800 if (rt_rq->rt_throttled)
801 throttled = 1;
eff6549b
PZ
802
803 if (enqueue)
804 sched_rt_rq_enqueue(rt_rq);
05fa785c 805 raw_spin_unlock(&rq->lock);
eff6549b
PZ
806 }
807
42c62a58
PZ
808 if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
809 return 1;
810
eff6549b
PZ
811 return idle;
812}
ac086bc2 813
6f505b16
PZ
814static inline int rt_se_prio(struct sched_rt_entity *rt_se)
815{
052f1dc7 816#ifdef CONFIG_RT_GROUP_SCHED
6f505b16
PZ
817 struct rt_rq *rt_rq = group_rt_rq(rt_se);
818
819 if (rt_rq)
e864c499 820 return rt_rq->highest_prio.curr;
6f505b16
PZ
821#endif
822
823 return rt_task_of(rt_se)->prio;
824}
825
9f0c1e56 826static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
6f505b16 827{
9f0c1e56 828 u64 runtime = sched_rt_runtime(rt_rq);
fa85ae24 829
fa85ae24 830 if (rt_rq->rt_throttled)
23b0fdfc 831 return rt_rq_throttled(rt_rq);
fa85ae24 832
5b680fd6 833 if (runtime >= sched_rt_period(rt_rq))
ac086bc2
PZ
834 return 0;
835
b79f3833
PZ
836 balance_runtime(rt_rq);
837 runtime = sched_rt_runtime(rt_rq);
838 if (runtime == RUNTIME_INF)
839 return 0;
ac086bc2 840
9f0c1e56 841 if (rt_rq->rt_time > runtime) {
7abc63b1
PZ
842 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
843
844 /*
845 * Don't actually throttle groups that have no runtime assigned
846 * but accrue some time due to boosting.
847 */
848 if (likely(rt_b->rt_runtime)) {
3ccf3e83
PZ
849 static bool once = false;
850
7abc63b1 851 rt_rq->rt_throttled = 1;
3ccf3e83
PZ
852
853 if (!once) {
854 once = true;
855 printk_sched("sched: RT throttling activated\n");
856 }
7abc63b1
PZ
857 } else {
858 /*
859 * In case we did anyway, make it go away,
860 * replenishment is a joke, since it will replenish us
861 * with exactly 0 ns.
862 */
863 rt_rq->rt_time = 0;
864 }
865
23b0fdfc 866 if (rt_rq_throttled(rt_rq)) {
9f0c1e56 867 sched_rt_rq_dequeue(rt_rq);
23b0fdfc
PZ
868 return 1;
869 }
fa85ae24
PZ
870 }
871
872 return 0;
873}
874
bb44e5d1
IM
875/*
876 * Update the current task's runtime statistics. Skip current tasks that
877 * are not in our scheduling class.
878 */
a9957449 879static void update_curr_rt(struct rq *rq)
bb44e5d1
IM
880{
881 struct task_struct *curr = rq->curr;
6f505b16
PZ
882 struct sched_rt_entity *rt_se = &curr->rt;
883 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
bb44e5d1
IM
884 u64 delta_exec;
885
06c3bc65 886 if (curr->sched_class != &rt_sched_class)
bb44e5d1
IM
887 return;
888
78becc27 889 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
fc79e240
KT
890 if (unlikely((s64)delta_exec <= 0))
891 return;
6cfb0d5d 892
42c62a58
PZ
893 schedstat_set(curr->se.statistics.exec_max,
894 max(curr->se.statistics.exec_max, delta_exec));
bb44e5d1
IM
895
896 curr->se.sum_exec_runtime += delta_exec;
f06febc9
FM
897 account_group_exec_runtime(curr, delta_exec);
898
78becc27 899 curr->se.exec_start = rq_clock_task(rq);
d842de87 900 cpuacct_charge(curr, delta_exec);
fa85ae24 901
e9e9250b
PZ
902 sched_rt_avg_update(rq, delta_exec);
903
0b148fa0
PZ
904 if (!rt_bandwidth_enabled())
905 return;
906
354d60c2
DG
907 for_each_sched_rt_entity(rt_se) {
908 rt_rq = rt_rq_of_se(rt_se);
909
cc2991cf 910 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
0986b11b 911 raw_spin_lock(&rt_rq->rt_runtime_lock);
cc2991cf
PZ
912 rt_rq->rt_time += delta_exec;
913 if (sched_rt_runtime_exceeded(rt_rq))
914 resched_task(curr);
0986b11b 915 raw_spin_unlock(&rt_rq->rt_runtime_lock);
cc2991cf 916 }
354d60c2 917 }
bb44e5d1
IM
918}
919
398a153b 920#if defined CONFIG_SMP
e864c499 921
398a153b
GH
922static void
923inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
63489e45 924{
4d984277 925 struct rq *rq = rq_of_rt_rq(rt_rq);
1f11eb6a 926
5181f4a4
SR
927 if (rq->online && prio < prev_prio)
928 cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
398a153b 929}
73fe6aae 930
398a153b
GH
931static void
932dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
933{
934 struct rq *rq = rq_of_rt_rq(rt_rq);
d0b27fa7 935
398a153b
GH
936 if (rq->online && rt_rq->highest_prio.curr != prev_prio)
937 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
63489e45
SR
938}
939
398a153b
GH
940#else /* CONFIG_SMP */
941
6f505b16 942static inline
398a153b
GH
943void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
944static inline
945void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {}
946
947#endif /* CONFIG_SMP */
6e0534f2 948
052f1dc7 949#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
398a153b
GH
950static void
951inc_rt_prio(struct rt_rq *rt_rq, int prio)
952{
953 int prev_prio = rt_rq->highest_prio.curr;
954
955 if (prio < prev_prio)
956 rt_rq->highest_prio.curr = prio;
957
958 inc_rt_prio_smp(rt_rq, prio, prev_prio);
959}
960
961static void
962dec_rt_prio(struct rt_rq *rt_rq, int prio)
963{
964 int prev_prio = rt_rq->highest_prio.curr;
965
6f505b16 966 if (rt_rq->rt_nr_running) {
764a9d6f 967
398a153b 968 WARN_ON(prio < prev_prio);
764a9d6f 969
e864c499 970 /*
398a153b
GH
971 * This may have been our highest task, and therefore
972 * we may have some recomputation to do
e864c499 973 */
398a153b 974 if (prio == prev_prio) {
e864c499
GH
975 struct rt_prio_array *array = &rt_rq->active;
976
977 rt_rq->highest_prio.curr =
764a9d6f 978 sched_find_first_bit(array->bitmap);
e864c499
GH
979 }
980
764a9d6f 981 } else
e864c499 982 rt_rq->highest_prio.curr = MAX_RT_PRIO;
73fe6aae 983
398a153b
GH
984 dec_rt_prio_smp(rt_rq, prio, prev_prio);
985}
1f11eb6a 986
398a153b
GH
987#else
988
989static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {}
990static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {}
991
992#endif /* CONFIG_SMP || CONFIG_RT_GROUP_SCHED */
6e0534f2 993
052f1dc7 994#ifdef CONFIG_RT_GROUP_SCHED
398a153b
GH
995
996static void
997inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
998{
999 if (rt_se_boosted(rt_se))
1000 rt_rq->rt_nr_boosted++;
1001
1002 if (rt_rq->tg)
1003 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
1004}
1005
1006static void
1007dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1008{
23b0fdfc
PZ
1009 if (rt_se_boosted(rt_se))
1010 rt_rq->rt_nr_boosted--;
1011
1012 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
398a153b
GH
1013}
1014
1015#else /* CONFIG_RT_GROUP_SCHED */
1016
1017static void
1018inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1019{
1020 start_rt_bandwidth(&def_rt_bandwidth);
1021}
1022
1023static inline
1024void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {}
1025
1026#endif /* CONFIG_RT_GROUP_SCHED */
1027
1028static inline
1029void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1030{
1031 int prio = rt_se_prio(rt_se);
1032
1033 WARN_ON(!rt_prio(prio));
1034 rt_rq->rt_nr_running++;
1035
1036 inc_rt_prio(rt_rq, prio);
1037 inc_rt_migration(rt_se, rt_rq);
1038 inc_rt_group(rt_se, rt_rq);
1039}
1040
1041static inline
1042void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
1043{
1044 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
1045 WARN_ON(!rt_rq->rt_nr_running);
1046 rt_rq->rt_nr_running--;
1047
1048 dec_rt_prio(rt_rq, rt_se_prio(rt_se));
1049 dec_rt_migration(rt_se, rt_rq);
1050 dec_rt_group(rt_se, rt_rq);
63489e45
SR
1051}
1052
37dad3fc 1053static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
bb44e5d1 1054{
6f505b16
PZ
1055 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1056 struct rt_prio_array *array = &rt_rq->active;
1057 struct rt_rq *group_rq = group_rt_rq(rt_se);
20b6331b 1058 struct list_head *queue = array->queue + rt_se_prio(rt_se);
bb44e5d1 1059
ad2a3f13
PZ
1060 /*
1061 * Don't enqueue the group if its throttled, or when empty.
1062 * The latter is a consequence of the former when a child group
1063 * get throttled and the current group doesn't have any other
1064 * active members.
1065 */
1066 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
6f505b16 1067 return;
63489e45 1068
3d4b47b4
PZ
1069 if (!rt_rq->rt_nr_running)
1070 list_add_leaf_rt_rq(rt_rq);
1071
37dad3fc
TG
1072 if (head)
1073 list_add(&rt_se->run_list, queue);
1074 else
1075 list_add_tail(&rt_se->run_list, queue);
6f505b16 1076 __set_bit(rt_se_prio(rt_se), array->bitmap);
78f2c7db 1077
6f505b16
PZ
1078 inc_rt_tasks(rt_se, rt_rq);
1079}
1080
ad2a3f13 1081static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
6f505b16
PZ
1082{
1083 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
1084 struct rt_prio_array *array = &rt_rq->active;
1085
1086 list_del_init(&rt_se->run_list);
1087 if (list_empty(array->queue + rt_se_prio(rt_se)))
1088 __clear_bit(rt_se_prio(rt_se), array->bitmap);
1089
1090 dec_rt_tasks(rt_se, rt_rq);
3d4b47b4
PZ
1091 if (!rt_rq->rt_nr_running)
1092 list_del_leaf_rt_rq(rt_rq);
6f505b16
PZ
1093}
1094
1095/*
1096 * Because the prio of an upper entry depends on the lower
1097 * entries, we must remove entries top - down.
6f505b16 1098 */
ad2a3f13 1099static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
6f505b16 1100{
ad2a3f13 1101 struct sched_rt_entity *back = NULL;
6f505b16 1102
58d6c2d7
PZ
1103 for_each_sched_rt_entity(rt_se) {
1104 rt_se->back = back;
1105 back = rt_se;
1106 }
1107
1108 for (rt_se = back; rt_se; rt_se = rt_se->back) {
1109 if (on_rt_rq(rt_se))
ad2a3f13
PZ
1110 __dequeue_rt_entity(rt_se);
1111 }
1112}
1113
37dad3fc 1114static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head)
ad2a3f13
PZ
1115{
1116 dequeue_rt_stack(rt_se);
1117 for_each_sched_rt_entity(rt_se)
37dad3fc 1118 __enqueue_rt_entity(rt_se, head);
ad2a3f13
PZ
1119}
1120
1121static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
1122{
1123 dequeue_rt_stack(rt_se);
1124
1125 for_each_sched_rt_entity(rt_se) {
1126 struct rt_rq *rt_rq = group_rt_rq(rt_se);
1127
1128 if (rt_rq && rt_rq->rt_nr_running)
37dad3fc 1129 __enqueue_rt_entity(rt_se, false);
58d6c2d7 1130 }
bb44e5d1
IM
1131}
1132
1133/*
1134 * Adding/removing a task to/from a priority array:
1135 */
ea87bb78 1136static void
371fd7e7 1137enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
6f505b16
PZ
1138{
1139 struct sched_rt_entity *rt_se = &p->rt;
1140
371fd7e7 1141 if (flags & ENQUEUE_WAKEUP)
6f505b16
PZ
1142 rt_se->timeout = 0;
1143
371fd7e7 1144 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
c09595f6 1145
29baa747 1146 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
917b627d 1147 enqueue_pushable_task(rq, p);
953bfcd1
PT
1148
1149 inc_nr_running(rq);
6f505b16
PZ
1150}
1151
371fd7e7 1152static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1153{
6f505b16 1154 struct sched_rt_entity *rt_se = &p->rt;
bb44e5d1 1155
f1e14ef6 1156 update_curr_rt(rq);
ad2a3f13 1157 dequeue_rt_entity(rt_se);
c09595f6 1158
917b627d 1159 dequeue_pushable_task(rq, p);
953bfcd1
PT
1160
1161 dec_nr_running(rq);
bb44e5d1
IM
1162}
1163
1164/*
60686317
RW
1165 * Put task to the head or the end of the run list without the overhead of
1166 * dequeue followed by enqueue.
bb44e5d1 1167 */
7ebefa8c
DA
1168static void
1169requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head)
6f505b16 1170{
1cdad715 1171 if (on_rt_rq(rt_se)) {
7ebefa8c
DA
1172 struct rt_prio_array *array = &rt_rq->active;
1173 struct list_head *queue = array->queue + rt_se_prio(rt_se);
1174
1175 if (head)
1176 list_move(&rt_se->run_list, queue);
1177 else
1178 list_move_tail(&rt_se->run_list, queue);
1cdad715 1179 }
6f505b16
PZ
1180}
1181
7ebefa8c 1182static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head)
bb44e5d1 1183{
6f505b16
PZ
1184 struct sched_rt_entity *rt_se = &p->rt;
1185 struct rt_rq *rt_rq;
bb44e5d1 1186
6f505b16
PZ
1187 for_each_sched_rt_entity(rt_se) {
1188 rt_rq = rt_rq_of_se(rt_se);
7ebefa8c 1189 requeue_rt_entity(rt_rq, rt_se, head);
6f505b16 1190 }
bb44e5d1
IM
1191}
1192
6f505b16 1193static void yield_task_rt(struct rq *rq)
bb44e5d1 1194{
7ebefa8c 1195 requeue_task_rt(rq, rq->curr, 0);
bb44e5d1
IM
1196}
1197
e7693a36 1198#ifdef CONFIG_SMP
318e0893
GH
1199static int find_lowest_rq(struct task_struct *task);
1200
0017d735 1201static int
7608dec2 1202select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
e7693a36 1203{
7608dec2
PZ
1204 struct task_struct *curr;
1205 struct rq *rq;
1206 int cpu;
1207
7608dec2 1208 cpu = task_cpu(p);
c37495fd 1209
29baa747 1210 if (p->nr_cpus_allowed == 1)
76854c7e
MG
1211 goto out;
1212
c37495fd
SR
1213 /* For anything but wake ups, just return the task_cpu */
1214 if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK)
1215 goto out;
1216
7608dec2
PZ
1217 rq = cpu_rq(cpu);
1218
1219 rcu_read_lock();
1220 curr = ACCESS_ONCE(rq->curr); /* unlocked access */
1221
318e0893 1222 /*
7608dec2 1223 * If the current task on @p's runqueue is an RT task, then
e1f47d89
SR
1224 * try to see if we can wake this RT task up on another
1225 * runqueue. Otherwise simply start this RT task
1226 * on its current runqueue.
1227 *
43fa5460
SR
1228 * We want to avoid overloading runqueues. If the woken
1229 * task is a higher priority, then it will stay on this CPU
1230 * and the lower prio task should be moved to another CPU.
1231 * Even though this will probably make the lower prio task
1232 * lose its cache, we do not want to bounce a higher task
1233 * around just because it gave up its CPU, perhaps for a
1234 * lock?
1235 *
1236 * For equal prio tasks, we just let the scheduler sort it out.
7608dec2
PZ
1237 *
1238 * Otherwise, just let it ride on the affined RQ and the
1239 * post-schedule router will push the preempted task away
1240 *
1241 * This test is optimistic, if we get it wrong the load-balancer
1242 * will have to sort it out.
318e0893 1243 */
7608dec2 1244 if (curr && unlikely(rt_task(curr)) &&
29baa747 1245 (curr->nr_cpus_allowed < 2 ||
3be209a8 1246 curr->prio <= p->prio) &&
29baa747 1247 (p->nr_cpus_allowed > 1)) {
7608dec2 1248 int target = find_lowest_rq(p);
318e0893 1249
7608dec2
PZ
1250 if (target != -1)
1251 cpu = target;
318e0893 1252 }
7608dec2 1253 rcu_read_unlock();
318e0893 1254
c37495fd 1255out:
7608dec2 1256 return cpu;
e7693a36 1257}
7ebefa8c
DA
1258
1259static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1260{
29baa747 1261 if (rq->curr->nr_cpus_allowed == 1)
7ebefa8c
DA
1262 return;
1263
29baa747 1264 if (p->nr_cpus_allowed != 1
13b8bd0a
RR
1265 && cpupri_find(&rq->rd->cpupri, p, NULL))
1266 return;
24600ce8 1267
13b8bd0a
RR
1268 if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
1269 return;
7ebefa8c
DA
1270
1271 /*
1272 * There appears to be other cpus that can accept
1273 * current and none to run 'p', so lets reschedule
1274 * to try and push current away:
1275 */
1276 requeue_task_rt(rq, p, 1);
1277 resched_task(rq->curr);
1278}
1279
e7693a36
GH
1280#endif /* CONFIG_SMP */
1281
bb44e5d1
IM
1282/*
1283 * Preempt the current task with a newly woken task if needed:
1284 */
7d478721 1285static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flags)
bb44e5d1 1286{
45c01e82 1287 if (p->prio < rq->curr->prio) {
bb44e5d1 1288 resched_task(rq->curr);
45c01e82
GH
1289 return;
1290 }
1291
1292#ifdef CONFIG_SMP
1293 /*
1294 * If:
1295 *
1296 * - the newly woken task is of equal priority to the current task
1297 * - the newly woken task is non-migratable while current is migratable
1298 * - current will be preempted on the next reschedule
1299 *
1300 * we should check to see if current can readily move to a different
1301 * cpu. If so, we will reschedule to allow the push logic to try
1302 * to move current somewhere else, making room for our non-migratable
1303 * task.
1304 */
8dd0de8b 1305 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
7ebefa8c 1306 check_preempt_equal_prio(rq, p);
45c01e82 1307#endif
bb44e5d1
IM
1308}
1309
6f505b16
PZ
1310static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
1311 struct rt_rq *rt_rq)
bb44e5d1 1312{
6f505b16
PZ
1313 struct rt_prio_array *array = &rt_rq->active;
1314 struct sched_rt_entity *next = NULL;
bb44e5d1
IM
1315 struct list_head *queue;
1316 int idx;
1317
1318 idx = sched_find_first_bit(array->bitmap);
6f505b16 1319 BUG_ON(idx >= MAX_RT_PRIO);
bb44e5d1
IM
1320
1321 queue = array->queue + idx;
6f505b16 1322 next = list_entry(queue->next, struct sched_rt_entity, run_list);
326587b8 1323
6f505b16
PZ
1324 return next;
1325}
bb44e5d1 1326
917b627d 1327static struct task_struct *_pick_next_task_rt(struct rq *rq)
6f505b16
PZ
1328{
1329 struct sched_rt_entity *rt_se;
1330 struct task_struct *p;
1331 struct rt_rq *rt_rq;
bb44e5d1 1332
6f505b16
PZ
1333 rt_rq = &rq->rt;
1334
8e54a2c0 1335 if (!rt_rq->rt_nr_running)
6f505b16
PZ
1336 return NULL;
1337
23b0fdfc 1338 if (rt_rq_throttled(rt_rq))
6f505b16
PZ
1339 return NULL;
1340
1341 do {
1342 rt_se = pick_next_rt_entity(rq, rt_rq);
326587b8 1343 BUG_ON(!rt_se);
6f505b16
PZ
1344 rt_rq = group_rt_rq(rt_se);
1345 } while (rt_rq);
1346
1347 p = rt_task_of(rt_se);
78becc27 1348 p->se.exec_start = rq_clock_task(rq);
917b627d
GH
1349
1350 return p;
1351}
1352
1353static struct task_struct *pick_next_task_rt(struct rq *rq)
1354{
1355 struct task_struct *p = _pick_next_task_rt(rq);
1356
1357 /* The running task is never eligible for pushing */
1358 if (p)
1359 dequeue_pushable_task(rq, p);
1360
bcf08df3 1361#ifdef CONFIG_SMP
3f029d3c
GH
1362 /*
1363 * We detect this state here so that we can avoid taking the RQ
1364 * lock again later if there is no need to push
1365 */
1366 rq->post_schedule = has_pushable_tasks(rq);
bcf08df3 1367#endif
3f029d3c 1368
6f505b16 1369 return p;
bb44e5d1
IM
1370}
1371
31ee529c 1372static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
bb44e5d1 1373{
f1e14ef6 1374 update_curr_rt(rq);
917b627d
GH
1375
1376 /*
1377 * The previous task needs to be made eligible for pushing
1378 * if it is still active
1379 */
29baa747 1380 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
917b627d 1381 enqueue_pushable_task(rq, p);
bb44e5d1
IM
1382}
1383
681f3e68 1384#ifdef CONFIG_SMP
6f505b16 1385
e8fa1362
SR
1386/* Only try algorithms three times */
1387#define RT_MAX_TRIES 3
1388
f65eda4f
SR
1389static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1390{
1391 if (!task_running(rq, p) &&
60334caf 1392 cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
f65eda4f
SR
1393 return 1;
1394 return 0;
1395}
1396
e8fa1362 1397/* Return the second highest RT task, NULL otherwise */
79064fbf 1398static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
e8fa1362 1399{
6f505b16
PZ
1400 struct task_struct *next = NULL;
1401 struct sched_rt_entity *rt_se;
1402 struct rt_prio_array *array;
1403 struct rt_rq *rt_rq;
e8fa1362
SR
1404 int idx;
1405
6f505b16
PZ
1406 for_each_leaf_rt_rq(rt_rq, rq) {
1407 array = &rt_rq->active;
1408 idx = sched_find_first_bit(array->bitmap);
49246274 1409next_idx:
6f505b16
PZ
1410 if (idx >= MAX_RT_PRIO)
1411 continue;
1b028abc 1412 if (next && next->prio <= idx)
6f505b16
PZ
1413 continue;
1414 list_for_each_entry(rt_se, array->queue + idx, run_list) {
3d07467b
PZ
1415 struct task_struct *p;
1416
1417 if (!rt_entity_is_task(rt_se))
1418 continue;
1419
1420 p = rt_task_of(rt_se);
6f505b16
PZ
1421 if (pick_rt_task(rq, p, cpu)) {
1422 next = p;
1423 break;
1424 }
1425 }
1426 if (!next) {
1427 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
1428 goto next_idx;
1429 }
f65eda4f
SR
1430 }
1431
e8fa1362
SR
1432 return next;
1433}
1434
0e3900e6 1435static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
e8fa1362 1436
6e1254d2
GH
1437static int find_lowest_rq(struct task_struct *task)
1438{
1439 struct sched_domain *sd;
96f874e2 1440 struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask);
6e1254d2
GH
1441 int this_cpu = smp_processor_id();
1442 int cpu = task_cpu(task);
06f90dbd 1443
0da938c4
SR
1444 /* Make sure the mask is initialized first */
1445 if (unlikely(!lowest_mask))
1446 return -1;
1447
29baa747 1448 if (task->nr_cpus_allowed == 1)
6e0534f2 1449 return -1; /* No other targets possible */
6e1254d2 1450
6e0534f2
GH
1451 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
1452 return -1; /* No targets found */
6e1254d2
GH
1453
1454 /*
1455 * At this point we have built a mask of cpus representing the
1456 * lowest priority tasks in the system. Now we want to elect
1457 * the best one based on our affinity and topology.
1458 *
1459 * We prioritize the last cpu that the task executed on since
1460 * it is most likely cache-hot in that location.
1461 */
96f874e2 1462 if (cpumask_test_cpu(cpu, lowest_mask))
6e1254d2
GH
1463 return cpu;
1464
1465 /*
1466 * Otherwise, we consult the sched_domains span maps to figure
1467 * out which cpu is logically closest to our hot cache data.
1468 */
e2c88063
RR
1469 if (!cpumask_test_cpu(this_cpu, lowest_mask))
1470 this_cpu = -1; /* Skip this_cpu opt if not among lowest */
6e1254d2 1471
cd4ae6ad 1472 rcu_read_lock();
e2c88063
RR
1473 for_each_domain(cpu, sd) {
1474 if (sd->flags & SD_WAKE_AFFINE) {
1475 int best_cpu;
6e1254d2 1476
e2c88063
RR
1477 /*
1478 * "this_cpu" is cheaper to preempt than a
1479 * remote processor.
1480 */
1481 if (this_cpu != -1 &&
cd4ae6ad
XF
1482 cpumask_test_cpu(this_cpu, sched_domain_span(sd))) {
1483 rcu_read_unlock();
e2c88063 1484 return this_cpu;
cd4ae6ad 1485 }
e2c88063
RR
1486
1487 best_cpu = cpumask_first_and(lowest_mask,
1488 sched_domain_span(sd));
cd4ae6ad
XF
1489 if (best_cpu < nr_cpu_ids) {
1490 rcu_read_unlock();
e2c88063 1491 return best_cpu;
cd4ae6ad 1492 }
6e1254d2
GH
1493 }
1494 }
cd4ae6ad 1495 rcu_read_unlock();
6e1254d2
GH
1496
1497 /*
1498 * And finally, if there were no matches within the domains
1499 * just give the caller *something* to work with from the compatible
1500 * locations.
1501 */
e2c88063
RR
1502 if (this_cpu != -1)
1503 return this_cpu;
1504
1505 cpu = cpumask_any(lowest_mask);
1506 if (cpu < nr_cpu_ids)
1507 return cpu;
1508 return -1;
07b4032c
GH
1509}
1510
1511/* Will lock the rq it finds */
4df64c0b 1512static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
07b4032c
GH
1513{
1514 struct rq *lowest_rq = NULL;
07b4032c 1515 int tries;
4df64c0b 1516 int cpu;
e8fa1362 1517
07b4032c
GH
1518 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
1519 cpu = find_lowest_rq(task);
1520
2de0b463 1521 if ((cpu == -1) || (cpu == rq->cpu))
e8fa1362
SR
1522 break;
1523
07b4032c
GH
1524 lowest_rq = cpu_rq(cpu);
1525
e8fa1362 1526 /* if the prio of this runqueue changed, try again */
07b4032c 1527 if (double_lock_balance(rq, lowest_rq)) {
e8fa1362
SR
1528 /*
1529 * We had to unlock the run queue. In
1530 * the mean time, task could have
1531 * migrated already or had its affinity changed.
1532 * Also make sure that it wasn't scheduled on its rq.
1533 */
07b4032c 1534 if (unlikely(task_rq(task) != rq ||
96f874e2 1535 !cpumask_test_cpu(lowest_rq->cpu,
fa17b507 1536 tsk_cpus_allowed(task)) ||
07b4032c 1537 task_running(rq, task) ||
fd2f4419 1538 !task->on_rq)) {
4df64c0b 1539
7f1b4393 1540 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1541 lowest_rq = NULL;
1542 break;
1543 }
1544 }
1545
1546 /* If this rq is still suitable use it. */
e864c499 1547 if (lowest_rq->rt.highest_prio.curr > task->prio)
e8fa1362
SR
1548 break;
1549
1550 /* try again */
1b12bbc7 1551 double_unlock_balance(rq, lowest_rq);
e8fa1362
SR
1552 lowest_rq = NULL;
1553 }
1554
1555 return lowest_rq;
1556}
1557
917b627d
GH
1558static struct task_struct *pick_next_pushable_task(struct rq *rq)
1559{
1560 struct task_struct *p;
1561
1562 if (!has_pushable_tasks(rq))
1563 return NULL;
1564
1565 p = plist_first_entry(&rq->rt.pushable_tasks,
1566 struct task_struct, pushable_tasks);
1567
1568 BUG_ON(rq->cpu != task_cpu(p));
1569 BUG_ON(task_current(rq, p));
29baa747 1570 BUG_ON(p->nr_cpus_allowed <= 1);
917b627d 1571
fd2f4419 1572 BUG_ON(!p->on_rq);
917b627d
GH
1573 BUG_ON(!rt_task(p));
1574
1575 return p;
1576}
1577
e8fa1362
SR
1578/*
1579 * If the current CPU has more than one RT task, see if the non
1580 * running task can migrate over to a CPU that is running a task
1581 * of lesser priority.
1582 */
697f0a48 1583static int push_rt_task(struct rq *rq)
e8fa1362
SR
1584{
1585 struct task_struct *next_task;
1586 struct rq *lowest_rq;
311e800e 1587 int ret = 0;
e8fa1362 1588
a22d7fc1
GH
1589 if (!rq->rt.overloaded)
1590 return 0;
1591
917b627d 1592 next_task = pick_next_pushable_task(rq);
e8fa1362
SR
1593 if (!next_task)
1594 return 0;
1595
49246274 1596retry:
697f0a48 1597 if (unlikely(next_task == rq->curr)) {
f65eda4f 1598 WARN_ON(1);
e8fa1362 1599 return 0;
f65eda4f 1600 }
e8fa1362
SR
1601
1602 /*
1603 * It's possible that the next_task slipped in of
1604 * higher priority than current. If that's the case
1605 * just reschedule current.
1606 */
697f0a48
GH
1607 if (unlikely(next_task->prio < rq->curr->prio)) {
1608 resched_task(rq->curr);
e8fa1362
SR
1609 return 0;
1610 }
1611
697f0a48 1612 /* We might release rq lock */
e8fa1362
SR
1613 get_task_struct(next_task);
1614
1615 /* find_lock_lowest_rq locks the rq if found */
697f0a48 1616 lowest_rq = find_lock_lowest_rq(next_task, rq);
e8fa1362
SR
1617 if (!lowest_rq) {
1618 struct task_struct *task;
1619 /*
311e800e 1620 * find_lock_lowest_rq releases rq->lock
1563513d
GH
1621 * so it is possible that next_task has migrated.
1622 *
1623 * We need to make sure that the task is still on the same
1624 * run-queue and is also still the next task eligible for
1625 * pushing.
e8fa1362 1626 */
917b627d 1627 task = pick_next_pushable_task(rq);
1563513d
GH
1628 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1629 /*
311e800e
HD
1630 * The task hasn't migrated, and is still the next
1631 * eligible task, but we failed to find a run-queue
1632 * to push it to. Do not retry in this case, since
1633 * other cpus will pull from us when ready.
1563513d 1634 */
1563513d 1635 goto out;
e8fa1362 1636 }
917b627d 1637
1563513d
GH
1638 if (!task)
1639 /* No more tasks, just exit */
1640 goto out;
1641
917b627d 1642 /*
1563513d 1643 * Something has shifted, try again.
917b627d 1644 */
1563513d
GH
1645 put_task_struct(next_task);
1646 next_task = task;
1647 goto retry;
e8fa1362
SR
1648 }
1649
697f0a48 1650 deactivate_task(rq, next_task, 0);
e8fa1362
SR
1651 set_task_cpu(next_task, lowest_rq->cpu);
1652 activate_task(lowest_rq, next_task, 0);
311e800e 1653 ret = 1;
e8fa1362
SR
1654
1655 resched_task(lowest_rq->curr);
1656
1b12bbc7 1657 double_unlock_balance(rq, lowest_rq);
e8fa1362 1658
e8fa1362
SR
1659out:
1660 put_task_struct(next_task);
1661
311e800e 1662 return ret;
e8fa1362
SR
1663}
1664
e8fa1362
SR
1665static void push_rt_tasks(struct rq *rq)
1666{
1667 /* push_rt_task will return true if it moved an RT */
1668 while (push_rt_task(rq))
1669 ;
1670}
1671
f65eda4f
SR
1672static int pull_rt_task(struct rq *this_rq)
1673{
80bf3171 1674 int this_cpu = this_rq->cpu, ret = 0, cpu;
a8728944 1675 struct task_struct *p;
f65eda4f 1676 struct rq *src_rq;
f65eda4f 1677
637f5085 1678 if (likely(!rt_overloaded(this_rq)))
f65eda4f
SR
1679 return 0;
1680
c6c4927b 1681 for_each_cpu(cpu, this_rq->rd->rto_mask) {
f65eda4f
SR
1682 if (this_cpu == cpu)
1683 continue;
1684
1685 src_rq = cpu_rq(cpu);
74ab8e4f
GH
1686
1687 /*
1688 * Don't bother taking the src_rq->lock if the next highest
1689 * task is known to be lower-priority than our current task.
1690 * This may look racy, but if this value is about to go
1691 * logically higher, the src_rq will push this task away.
1692 * And if its going logically lower, we do not care
1693 */
1694 if (src_rq->rt.highest_prio.next >=
1695 this_rq->rt.highest_prio.curr)
1696 continue;
1697
f65eda4f
SR
1698 /*
1699 * We can potentially drop this_rq's lock in
1700 * double_lock_balance, and another CPU could
a8728944 1701 * alter this_rq
f65eda4f 1702 */
a8728944 1703 double_lock_balance(this_rq, src_rq);
f65eda4f
SR
1704
1705 /*
1706 * Are there still pullable RT tasks?
1707 */
614ee1f6
MG
1708 if (src_rq->rt.rt_nr_running <= 1)
1709 goto skip;
f65eda4f 1710
f65eda4f
SR
1711 p = pick_next_highest_task_rt(src_rq, this_cpu);
1712
1713 /*
1714 * Do we have an RT task that preempts
1715 * the to-be-scheduled task?
1716 */
a8728944 1717 if (p && (p->prio < this_rq->rt.highest_prio.curr)) {
f65eda4f 1718 WARN_ON(p == src_rq->curr);
fd2f4419 1719 WARN_ON(!p->on_rq);
f65eda4f
SR
1720
1721 /*
1722 * There's a chance that p is higher in priority
1723 * than what's currently running on its cpu.
1724 * This is just that p is wakeing up and hasn't
1725 * had a chance to schedule. We only pull
1726 * p if it is lower in priority than the
a8728944 1727 * current task on the run queue
f65eda4f 1728 */
a8728944 1729 if (p->prio < src_rq->curr->prio)
614ee1f6 1730 goto skip;
f65eda4f
SR
1731
1732 ret = 1;
1733
1734 deactivate_task(src_rq, p, 0);
1735 set_task_cpu(p, this_cpu);
1736 activate_task(this_rq, p, 0);
1737 /*
1738 * We continue with the search, just in
1739 * case there's an even higher prio task
25985edc 1740 * in another runqueue. (low likelihood
f65eda4f 1741 * but possible)
f65eda4f 1742 */
f65eda4f 1743 }
49246274 1744skip:
1b12bbc7 1745 double_unlock_balance(this_rq, src_rq);
f65eda4f
SR
1746 }
1747
1748 return ret;
1749}
1750
9a897c5a 1751static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
f65eda4f
SR
1752{
1753 /* Try to pull RT tasks here if we lower this rq's prio */
33c3d6c6 1754 if (rq->rt.highest_prio.curr > prev->prio)
f65eda4f
SR
1755 pull_rt_task(rq);
1756}
1757
9a897c5a 1758static void post_schedule_rt(struct rq *rq)
e8fa1362 1759{
967fc046 1760 push_rt_tasks(rq);
e8fa1362
SR
1761}
1762
8ae121ac
GH
1763/*
1764 * If we are not running and we are not going to reschedule soon, we should
1765 * try to push tasks away now
1766 */
efbbd05a 1767static void task_woken_rt(struct rq *rq, struct task_struct *p)
4642dafd 1768{
9a897c5a 1769 if (!task_running(rq, p) &&
8ae121ac 1770 !test_tsk_need_resched(rq->curr) &&
917b627d 1771 has_pushable_tasks(rq) &&
29baa747 1772 p->nr_cpus_allowed > 1 &&
43fa5460 1773 rt_task(rq->curr) &&
29baa747 1774 (rq->curr->nr_cpus_allowed < 2 ||
3be209a8 1775 rq->curr->prio <= p->prio))
4642dafd
SR
1776 push_rt_tasks(rq);
1777}
1778
cd8ba7cd 1779static void set_cpus_allowed_rt(struct task_struct *p,
96f874e2 1780 const struct cpumask *new_mask)
73fe6aae 1781{
8d3d5ada
KT
1782 struct rq *rq;
1783 int weight;
73fe6aae
GH
1784
1785 BUG_ON(!rt_task(p));
1786
8d3d5ada
KT
1787 if (!p->on_rq)
1788 return;
917b627d 1789
8d3d5ada 1790 weight = cpumask_weight(new_mask);
917b627d 1791
8d3d5ada
KT
1792 /*
1793 * Only update if the process changes its state from whether it
1794 * can migrate or not.
1795 */
29baa747 1796 if ((p->nr_cpus_allowed > 1) == (weight > 1))
8d3d5ada 1797 return;
917b627d 1798
8d3d5ada 1799 rq = task_rq(p);
73fe6aae 1800
8d3d5ada
KT
1801 /*
1802 * The process used to be able to migrate OR it can now migrate
1803 */
1804 if (weight <= 1) {
1805 if (!task_current(rq, p))
1806 dequeue_pushable_task(rq, p);
1807 BUG_ON(!rq->rt.rt_nr_migratory);
1808 rq->rt.rt_nr_migratory--;
1809 } else {
1810 if (!task_current(rq, p))
1811 enqueue_pushable_task(rq, p);
1812 rq->rt.rt_nr_migratory++;
73fe6aae 1813 }
8d3d5ada
KT
1814
1815 update_rt_migration(&rq->rt);
73fe6aae 1816}
deeeccd4 1817
bdd7c81b 1818/* Assumes rq->lock is held */
1f11eb6a 1819static void rq_online_rt(struct rq *rq)
bdd7c81b
IM
1820{
1821 if (rq->rt.overloaded)
1822 rt_set_overload(rq);
6e0534f2 1823
7def2be1
PZ
1824 __enable_runtime(rq);
1825
e864c499 1826 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr);
bdd7c81b
IM
1827}
1828
1829/* Assumes rq->lock is held */
1f11eb6a 1830static void rq_offline_rt(struct rq *rq)
bdd7c81b
IM
1831{
1832 if (rq->rt.overloaded)
1833 rt_clear_overload(rq);
6e0534f2 1834
7def2be1
PZ
1835 __disable_runtime(rq);
1836
6e0534f2 1837 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
bdd7c81b 1838}
cb469845
SR
1839
1840/*
1841 * When switch from the rt queue, we bring ourselves to a position
1842 * that we might want to pull RT tasks from other runqueues.
1843 */
da7a735e 1844static void switched_from_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1845{
1846 /*
1847 * If there are other RT tasks then we will reschedule
1848 * and the scheduling of the other RT tasks will handle
1849 * the balancing. But if we are the last RT task
1850 * we may need to handle the pulling of RT tasks
1851 * now.
1852 */
1158ddb5
KT
1853 if (!p->on_rq || rq->rt.rt_nr_running)
1854 return;
1855
1856 if (pull_rt_task(rq))
1857 resched_task(rq->curr);
cb469845 1858}
3d8cbdf8 1859
029632fb 1860void init_sched_rt_class(void)
3d8cbdf8
RR
1861{
1862 unsigned int i;
1863
029632fb 1864 for_each_possible_cpu(i) {
eaa95840 1865 zalloc_cpumask_var_node(&per_cpu(local_cpu_mask, i),
6ca09dfc 1866 GFP_KERNEL, cpu_to_node(i));
029632fb 1867 }
3d8cbdf8 1868}
cb469845
SR
1869#endif /* CONFIG_SMP */
1870
1871/*
1872 * When switching a task to RT, we may overload the runqueue
1873 * with RT tasks. In this case we try to push them off to
1874 * other runqueues.
1875 */
da7a735e 1876static void switched_to_rt(struct rq *rq, struct task_struct *p)
cb469845
SR
1877{
1878 int check_resched = 1;
1879
1880 /*
1881 * If we are already running, then there's nothing
1882 * that needs to be done. But if we are not running
1883 * we may need to preempt the current running task.
1884 * If that current running task is also an RT task
1885 * then see if we can move to another run queue.
1886 */
fd2f4419 1887 if (p->on_rq && rq->curr != p) {
cb469845
SR
1888#ifdef CONFIG_SMP
1889 if (rq->rt.overloaded && push_rt_task(rq) &&
1890 /* Don't resched if we changed runqueues */
1891 rq != task_rq(p))
1892 check_resched = 0;
1893#endif /* CONFIG_SMP */
1894 if (check_resched && p->prio < rq->curr->prio)
1895 resched_task(rq->curr);
1896 }
1897}
1898
1899/*
1900 * Priority of the task has changed. This may cause
1901 * us to initiate a push or pull.
1902 */
da7a735e
PZ
1903static void
1904prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 1905{
fd2f4419 1906 if (!p->on_rq)
da7a735e
PZ
1907 return;
1908
1909 if (rq->curr == p) {
cb469845
SR
1910#ifdef CONFIG_SMP
1911 /*
1912 * If our priority decreases while running, we
1913 * may need to pull tasks to this runqueue.
1914 */
1915 if (oldprio < p->prio)
1916 pull_rt_task(rq);
1917 /*
1918 * If there's a higher priority task waiting to run
6fa46fa5
SR
1919 * then reschedule. Note, the above pull_rt_task
1920 * can release the rq lock and p could migrate.
1921 * Only reschedule if p is still on the same runqueue.
cb469845 1922 */
e864c499 1923 if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
cb469845
SR
1924 resched_task(p);
1925#else
1926 /* For UP simply resched on drop of prio */
1927 if (oldprio < p->prio)
1928 resched_task(p);
e8fa1362 1929#endif /* CONFIG_SMP */
cb469845
SR
1930 } else {
1931 /*
1932 * This task is not running, but if it is
1933 * greater than the current running task
1934 * then reschedule.
1935 */
1936 if (p->prio < rq->curr->prio)
1937 resched_task(rq->curr);
1938 }
1939}
1940
78f2c7db
PZ
1941static void watchdog(struct rq *rq, struct task_struct *p)
1942{
1943 unsigned long soft, hard;
1944
78d7d407
JS
1945 /* max may change after cur was read, this will be fixed next tick */
1946 soft = task_rlimit(p, RLIMIT_RTTIME);
1947 hard = task_rlimit_max(p, RLIMIT_RTTIME);
78f2c7db
PZ
1948
1949 if (soft != RLIM_INFINITY) {
1950 unsigned long next;
1951
57d2aa00
YX
1952 if (p->rt.watchdog_stamp != jiffies) {
1953 p->rt.timeout++;
1954 p->rt.watchdog_stamp = jiffies;
1955 }
1956
78f2c7db 1957 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
5a52dd50 1958 if (p->rt.timeout > next)
f06febc9 1959 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
78f2c7db
PZ
1960 }
1961}
bb44e5d1 1962
8f4d37ec 1963static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
bb44e5d1 1964{
454c7999
CC
1965 struct sched_rt_entity *rt_se = &p->rt;
1966
67e2be02
PZ
1967 update_curr_rt(rq);
1968
78f2c7db
PZ
1969 watchdog(rq, p);
1970
bb44e5d1
IM
1971 /*
1972 * RR tasks need a special form of timeslice management.
1973 * FIFO tasks have no timeslices.
1974 */
1975 if (p->policy != SCHED_RR)
1976 return;
1977
fa717060 1978 if (--p->rt.time_slice)
bb44e5d1
IM
1979 return;
1980
ce0dbbbb 1981 p->rt.time_slice = sched_rr_timeslice;
bb44e5d1 1982
98fbc798 1983 /*
454c7999
CC
1984 * Requeue to the end of queue if we (and all of our ancestors) are the
1985 * only element on the queue
98fbc798 1986 */
454c7999
CC
1987 for_each_sched_rt_entity(rt_se) {
1988 if (rt_se->run_list.prev != rt_se->run_list.next) {
1989 requeue_task_rt(rq, p, 0);
1990 set_tsk_need_resched(p);
1991 return;
1992 }
98fbc798 1993 }
bb44e5d1
IM
1994}
1995
83b699ed
SV
1996static void set_curr_task_rt(struct rq *rq)
1997{
1998 struct task_struct *p = rq->curr;
1999
78becc27 2000 p->se.exec_start = rq_clock_task(rq);
917b627d
GH
2001
2002 /* The running task is never eligible for pushing */
2003 dequeue_pushable_task(rq, p);
83b699ed
SV
2004}
2005
6d686f45 2006static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
0d721cea
PW
2007{
2008 /*
2009 * Time slice is 0 for SCHED_FIFO tasks
2010 */
2011 if (task->policy == SCHED_RR)
ce0dbbbb 2012 return sched_rr_timeslice;
0d721cea
PW
2013 else
2014 return 0;
2015}
2016
029632fb 2017const struct sched_class rt_sched_class = {
5522d5d5 2018 .next = &fair_sched_class,
bb44e5d1
IM
2019 .enqueue_task = enqueue_task_rt,
2020 .dequeue_task = dequeue_task_rt,
2021 .yield_task = yield_task_rt,
2022
2023 .check_preempt_curr = check_preempt_curr_rt,
2024
2025 .pick_next_task = pick_next_task_rt,
2026 .put_prev_task = put_prev_task_rt,
2027
681f3e68 2028#ifdef CONFIG_SMP
4ce72a2c
LZ
2029 .select_task_rq = select_task_rq_rt,
2030
73fe6aae 2031 .set_cpus_allowed = set_cpus_allowed_rt,
1f11eb6a
GH
2032 .rq_online = rq_online_rt,
2033 .rq_offline = rq_offline_rt,
9a897c5a
SR
2034 .pre_schedule = pre_schedule_rt,
2035 .post_schedule = post_schedule_rt,
efbbd05a 2036 .task_woken = task_woken_rt,
cb469845 2037 .switched_from = switched_from_rt,
681f3e68 2038#endif
bb44e5d1 2039
83b699ed 2040 .set_curr_task = set_curr_task_rt,
bb44e5d1 2041 .task_tick = task_tick_rt,
cb469845 2042
0d721cea
PW
2043 .get_rr_interval = get_rr_interval_rt,
2044
cb469845
SR
2045 .prio_changed = prio_changed_rt,
2046 .switched_to = switched_to_rt,
bb44e5d1 2047};
ada18de2
PZ
2048
2049#ifdef CONFIG_SCHED_DEBUG
2050extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
2051
029632fb 2052void print_rt_stats(struct seq_file *m, int cpu)
ada18de2 2053{
ec514c48 2054 rt_rq_iter_t iter;
ada18de2
PZ
2055 struct rt_rq *rt_rq;
2056
2057 rcu_read_lock();
ec514c48 2058 for_each_rt_rq(rt_rq, iter, cpu_rq(cpu))
ada18de2
PZ
2059 print_rt_rq(m, cpu, rt_rq);
2060 rcu_read_unlock();
2061}
55e12e5e 2062#endif /* CONFIG_SCHED_DEBUG */