sched: rt: fix the bandwidth contraint computations
[linux-2.6-block.git] / kernel / sched_rt.c
... / ...
CommitLineData
1/*
2 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
3 * policies)
4 */
5
6#ifdef CONFIG_SMP
7
8static inline int rt_overloaded(struct rq *rq)
9{
10 return atomic_read(&rq->rd->rto_count);
11}
12
13static inline void rt_set_overload(struct rq *rq)
14{
15 if (!rq->online)
16 return;
17
18 cpu_set(rq->cpu, rq->rd->rto_mask);
19 /*
20 * Make sure the mask is visible before we set
21 * the overload count. That is checked to determine
22 * if we should look at the mask. It would be a shame
23 * if we looked at the mask, but the mask was not
24 * updated yet.
25 */
26 wmb();
27 atomic_inc(&rq->rd->rto_count);
28}
29
30static inline void rt_clear_overload(struct rq *rq)
31{
32 if (!rq->online)
33 return;
34
35 /* the order here really doesn't matter */
36 atomic_dec(&rq->rd->rto_count);
37 cpu_clear(rq->cpu, rq->rd->rto_mask);
38}
39
40static void update_rt_migration(struct rq *rq)
41{
42 if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
43 if (!rq->rt.overloaded) {
44 rt_set_overload(rq);
45 rq->rt.overloaded = 1;
46 }
47 } else if (rq->rt.overloaded) {
48 rt_clear_overload(rq);
49 rq->rt.overloaded = 0;
50 }
51}
52#endif /* CONFIG_SMP */
53
54static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
55{
56 return container_of(rt_se, struct task_struct, rt);
57}
58
59static inline int on_rt_rq(struct sched_rt_entity *rt_se)
60{
61 return !list_empty(&rt_se->run_list);
62}
63
64#ifdef CONFIG_RT_GROUP_SCHED
65
66static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
67{
68 if (!rt_rq->tg)
69 return RUNTIME_INF;
70
71 return rt_rq->rt_runtime;
72}
73
74static inline u64 sched_rt_period(struct rt_rq *rt_rq)
75{
76 return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period);
77}
78
79#define for_each_leaf_rt_rq(rt_rq, rq) \
80 list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)
81
82static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
83{
84 return rt_rq->rq;
85}
86
87static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
88{
89 return rt_se->rt_rq;
90}
91
92#define for_each_sched_rt_entity(rt_se) \
93 for (; rt_se; rt_se = rt_se->parent)
94
95static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
96{
97 return rt_se->my_q;
98}
99
100static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
101static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102
103static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104{
105 struct sched_rt_entity *rt_se = rt_rq->rt_se;
106
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
109
110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio)
112 resched_task(curr);
113 }
114}
115
116static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
117{
118 struct sched_rt_entity *rt_se = rt_rq->rt_se;
119
120 if (rt_se && on_rt_rq(rt_se))
121 dequeue_rt_entity(rt_se);
122}
123
124static inline int rt_rq_throttled(struct rt_rq *rt_rq)
125{
126 return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted;
127}
128
129static int rt_se_boosted(struct sched_rt_entity *rt_se)
130{
131 struct rt_rq *rt_rq = group_rt_rq(rt_se);
132 struct task_struct *p;
133
134 if (rt_rq)
135 return !!rt_rq->rt_nr_boosted;
136
137 p = rt_task_of(rt_se);
138 return p->prio != p->normal_prio;
139}
140
141#ifdef CONFIG_SMP
142static inline cpumask_t sched_rt_period_mask(void)
143{
144 return cpu_rq(smp_processor_id())->rd->span;
145}
146#else
147static inline cpumask_t sched_rt_period_mask(void)
148{
149 return cpu_online_map;
150}
151#endif
152
153static inline
154struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
155{
156 return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu];
157}
158
159static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
160{
161 return &rt_rq->tg->rt_bandwidth;
162}
163
164#else
165
166static inline u64 sched_rt_runtime(struct rt_rq *rt_rq)
167{
168 return rt_rq->rt_runtime;
169}
170
171static inline u64 sched_rt_period(struct rt_rq *rt_rq)
172{
173 return ktime_to_ns(def_rt_bandwidth.rt_period);
174}
175
176#define for_each_leaf_rt_rq(rt_rq, rq) \
177 for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
178
179static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
180{
181 return container_of(rt_rq, struct rq, rt);
182}
183
184static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
185{
186 struct task_struct *p = rt_task_of(rt_se);
187 struct rq *rq = task_rq(p);
188
189 return &rq->rt;
190}
191
192#define for_each_sched_rt_entity(rt_se) \
193 for (; rt_se; rt_se = NULL)
194
195static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
196{
197 return NULL;
198}
199
200static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
201{
202}
203
204static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
205{
206}
207
208static inline int rt_rq_throttled(struct rt_rq *rt_rq)
209{
210 return rt_rq->rt_throttled;
211}
212
213static inline cpumask_t sched_rt_period_mask(void)
214{
215 return cpu_online_map;
216}
217
218static inline
219struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu)
220{
221 return &cpu_rq(cpu)->rt;
222}
223
224static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
225{
226 return &def_rt_bandwidth;
227}
228
229#endif
230
231#ifdef CONFIG_SMP
232static int do_balance_runtime(struct rt_rq *rt_rq)
233{
234 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
235 struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
236 int i, weight, more = 0;
237 u64 rt_period;
238
239 weight = cpus_weight(rd->span);
240
241 spin_lock(&rt_b->rt_runtime_lock);
242 rt_period = ktime_to_ns(rt_b->rt_period);
243 for_each_cpu_mask(i, rd->span) {
244 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
245 s64 diff;
246
247 if (iter == rt_rq)
248 continue;
249
250 spin_lock(&iter->rt_runtime_lock);
251 if (iter->rt_runtime == RUNTIME_INF)
252 goto next;
253
254 diff = iter->rt_runtime - iter->rt_time;
255 if (diff > 0) {
256 do_div(diff, weight);
257 if (rt_rq->rt_runtime + diff > rt_period)
258 diff = rt_period - rt_rq->rt_runtime;
259 iter->rt_runtime -= diff;
260 rt_rq->rt_runtime += diff;
261 more = 1;
262 if (rt_rq->rt_runtime == rt_period) {
263 spin_unlock(&iter->rt_runtime_lock);
264 break;
265 }
266 }
267next:
268 spin_unlock(&iter->rt_runtime_lock);
269 }
270 spin_unlock(&rt_b->rt_runtime_lock);
271
272 return more;
273}
274
275static void __disable_runtime(struct rq *rq)
276{
277 struct root_domain *rd = rq->rd;
278 struct rt_rq *rt_rq;
279
280 if (unlikely(!scheduler_running))
281 return;
282
283 for_each_leaf_rt_rq(rt_rq, rq) {
284 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
285 s64 want;
286 int i;
287
288 spin_lock(&rt_b->rt_runtime_lock);
289 spin_lock(&rt_rq->rt_runtime_lock);
290 if (rt_rq->rt_runtime == RUNTIME_INF ||
291 rt_rq->rt_runtime == rt_b->rt_runtime)
292 goto balanced;
293 spin_unlock(&rt_rq->rt_runtime_lock);
294
295 want = rt_b->rt_runtime - rt_rq->rt_runtime;
296
297 for_each_cpu_mask(i, rd->span) {
298 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
299 s64 diff;
300
301 if (iter == rt_rq)
302 continue;
303
304 spin_lock(&iter->rt_runtime_lock);
305 if (want > 0) {
306 diff = min_t(s64, iter->rt_runtime, want);
307 iter->rt_runtime -= diff;
308 want -= diff;
309 } else {
310 iter->rt_runtime -= want;
311 want -= want;
312 }
313 spin_unlock(&iter->rt_runtime_lock);
314
315 if (!want)
316 break;
317 }
318
319 spin_lock(&rt_rq->rt_runtime_lock);
320 BUG_ON(want);
321balanced:
322 rt_rq->rt_runtime = RUNTIME_INF;
323 spin_unlock(&rt_rq->rt_runtime_lock);
324 spin_unlock(&rt_b->rt_runtime_lock);
325 }
326}
327
328static void disable_runtime(struct rq *rq)
329{
330 unsigned long flags;
331
332 spin_lock_irqsave(&rq->lock, flags);
333 __disable_runtime(rq);
334 spin_unlock_irqrestore(&rq->lock, flags);
335}
336
337static void __enable_runtime(struct rq *rq)
338{
339 struct root_domain *rd = rq->rd;
340 struct rt_rq *rt_rq;
341
342 if (unlikely(!scheduler_running))
343 return;
344
345 for_each_leaf_rt_rq(rt_rq, rq) {
346 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
347
348 spin_lock(&rt_b->rt_runtime_lock);
349 spin_lock(&rt_rq->rt_runtime_lock);
350 rt_rq->rt_runtime = rt_b->rt_runtime;
351 rt_rq->rt_time = 0;
352 spin_unlock(&rt_rq->rt_runtime_lock);
353 spin_unlock(&rt_b->rt_runtime_lock);
354 }
355}
356
357static void enable_runtime(struct rq *rq)
358{
359 unsigned long flags;
360
361 spin_lock_irqsave(&rq->lock, flags);
362 __enable_runtime(rq);
363 spin_unlock_irqrestore(&rq->lock, flags);
364}
365
366static int balance_runtime(struct rt_rq *rt_rq)
367{
368 int more = 0;
369
370 if (rt_rq->rt_time > rt_rq->rt_runtime) {
371 spin_unlock(&rt_rq->rt_runtime_lock);
372 more = do_balance_runtime(rt_rq);
373 spin_lock(&rt_rq->rt_runtime_lock);
374 }
375
376 return more;
377}
378#else
379static inline int balance_runtime(struct rt_rq *rt_rq)
380{
381 return 0;
382}
383#endif
384
385static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
386{
387 int i, idle = 1;
388 cpumask_t span;
389
390 if (rt_b->rt_runtime == RUNTIME_INF)
391 return 1;
392
393 span = sched_rt_period_mask();
394 for_each_cpu_mask(i, span) {
395 int enqueue = 0;
396 struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
397 struct rq *rq = rq_of_rt_rq(rt_rq);
398
399 spin_lock(&rq->lock);
400 if (rt_rq->rt_time) {
401 u64 runtime;
402
403 spin_lock(&rt_rq->rt_runtime_lock);
404 if (rt_rq->rt_throttled)
405 balance_runtime(rt_rq);
406 runtime = rt_rq->rt_runtime;
407 rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
408 if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) {
409 rt_rq->rt_throttled = 0;
410 enqueue = 1;
411 }
412 if (rt_rq->rt_time || rt_rq->rt_nr_running)
413 idle = 0;
414 spin_unlock(&rt_rq->rt_runtime_lock);
415 }
416
417 if (enqueue)
418 sched_rt_rq_enqueue(rt_rq);
419 spin_unlock(&rq->lock);
420 }
421
422 return idle;
423}
424
425static inline int rt_se_prio(struct sched_rt_entity *rt_se)
426{
427#ifdef CONFIG_RT_GROUP_SCHED
428 struct rt_rq *rt_rq = group_rt_rq(rt_se);
429
430 if (rt_rq)
431 return rt_rq->highest_prio;
432#endif
433
434 return rt_task_of(rt_se)->prio;
435}
436
437static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
438{
439 u64 runtime = sched_rt_runtime(rt_rq);
440
441 if (runtime == RUNTIME_INF)
442 return 0;
443
444 if (rt_rq->rt_throttled)
445 return rt_rq_throttled(rt_rq);
446
447 if (sched_rt_runtime(rt_rq) >= sched_rt_period(rt_rq))
448 return 0;
449
450 balance_runtime(rt_rq);
451 runtime = sched_rt_runtime(rt_rq);
452 if (runtime == RUNTIME_INF)
453 return 0;
454
455 if (rt_rq->rt_time > runtime) {
456 rt_rq->rt_throttled = 1;
457 if (rt_rq_throttled(rt_rq)) {
458 sched_rt_rq_dequeue(rt_rq);
459 return 1;
460 }
461 }
462
463 return 0;
464}
465
466/*
467 * Update the current task's runtime statistics. Skip current tasks that
468 * are not in our scheduling class.
469 */
470static void update_curr_rt(struct rq *rq)
471{
472 struct task_struct *curr = rq->curr;
473 struct sched_rt_entity *rt_se = &curr->rt;
474 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
475 u64 delta_exec;
476
477 if (!task_has_rt_policy(curr))
478 return;
479
480 delta_exec = rq->clock - curr->se.exec_start;
481 if (unlikely((s64)delta_exec < 0))
482 delta_exec = 0;
483
484 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
485
486 curr->se.sum_exec_runtime += delta_exec;
487 curr->se.exec_start = rq->clock;
488 cpuacct_charge(curr, delta_exec);
489
490 for_each_sched_rt_entity(rt_se) {
491 rt_rq = rt_rq_of_se(rt_se);
492
493 spin_lock(&rt_rq->rt_runtime_lock);
494 rt_rq->rt_time += delta_exec;
495 if (sched_rt_runtime_exceeded(rt_rq))
496 resched_task(curr);
497 spin_unlock(&rt_rq->rt_runtime_lock);
498 }
499}
500
501static inline
502void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
503{
504 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
505 rt_rq->rt_nr_running++;
506#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
507 if (rt_se_prio(rt_se) < rt_rq->highest_prio) {
508 struct rq *rq = rq_of_rt_rq(rt_rq);
509
510 rt_rq->highest_prio = rt_se_prio(rt_se);
511#ifdef CONFIG_SMP
512 if (rq->online)
513 cpupri_set(&rq->rd->cpupri, rq->cpu,
514 rt_se_prio(rt_se));
515#endif
516 }
517#endif
518#ifdef CONFIG_SMP
519 if (rt_se->nr_cpus_allowed > 1) {
520 struct rq *rq = rq_of_rt_rq(rt_rq);
521
522 rq->rt.rt_nr_migratory++;
523 }
524
525 update_rt_migration(rq_of_rt_rq(rt_rq));
526#endif
527#ifdef CONFIG_RT_GROUP_SCHED
528 if (rt_se_boosted(rt_se))
529 rt_rq->rt_nr_boosted++;
530
531 if (rt_rq->tg)
532 start_rt_bandwidth(&rt_rq->tg->rt_bandwidth);
533#else
534 start_rt_bandwidth(&def_rt_bandwidth);
535#endif
536}
537
538static inline
539void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
540{
541#ifdef CONFIG_SMP
542 int highest_prio = rt_rq->highest_prio;
543#endif
544
545 WARN_ON(!rt_prio(rt_se_prio(rt_se)));
546 WARN_ON(!rt_rq->rt_nr_running);
547 rt_rq->rt_nr_running--;
548#if defined CONFIG_SMP || defined CONFIG_RT_GROUP_SCHED
549 if (rt_rq->rt_nr_running) {
550 struct rt_prio_array *array;
551
552 WARN_ON(rt_se_prio(rt_se) < rt_rq->highest_prio);
553 if (rt_se_prio(rt_se) == rt_rq->highest_prio) {
554 /* recalculate */
555 array = &rt_rq->active;
556 rt_rq->highest_prio =
557 sched_find_first_bit(array->bitmap);
558 } /* otherwise leave rq->highest prio alone */
559 } else
560 rt_rq->highest_prio = MAX_RT_PRIO;
561#endif
562#ifdef CONFIG_SMP
563 if (rt_se->nr_cpus_allowed > 1) {
564 struct rq *rq = rq_of_rt_rq(rt_rq);
565 rq->rt.rt_nr_migratory--;
566 }
567
568 if (rt_rq->highest_prio != highest_prio) {
569 struct rq *rq = rq_of_rt_rq(rt_rq);
570
571 if (rq->online)
572 cpupri_set(&rq->rd->cpupri, rq->cpu,
573 rt_rq->highest_prio);
574 }
575
576 update_rt_migration(rq_of_rt_rq(rt_rq));
577#endif /* CONFIG_SMP */
578#ifdef CONFIG_RT_GROUP_SCHED
579 if (rt_se_boosted(rt_se))
580 rt_rq->rt_nr_boosted--;
581
582 WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted);
583#endif
584}
585
586static void __enqueue_rt_entity(struct sched_rt_entity *rt_se)
587{
588 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
589 struct rt_prio_array *array = &rt_rq->active;
590 struct rt_rq *group_rq = group_rt_rq(rt_se);
591 struct list_head *queue = array->queue + rt_se_prio(rt_se);
592
593 /*
594 * Don't enqueue the group if its throttled, or when empty.
595 * The latter is a consequence of the former when a child group
596 * get throttled and the current group doesn't have any other
597 * active members.
598 */
599 if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running))
600 return;
601
602 if (rt_se->nr_cpus_allowed == 1)
603 list_add(&rt_se->run_list, queue);
604 else
605 list_add_tail(&rt_se->run_list, queue);
606
607 __set_bit(rt_se_prio(rt_se), array->bitmap);
608
609 inc_rt_tasks(rt_se, rt_rq);
610}
611
612static void __dequeue_rt_entity(struct sched_rt_entity *rt_se)
613{
614 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
615 struct rt_prio_array *array = &rt_rq->active;
616
617 list_del_init(&rt_se->run_list);
618 if (list_empty(array->queue + rt_se_prio(rt_se)))
619 __clear_bit(rt_se_prio(rt_se), array->bitmap);
620
621 dec_rt_tasks(rt_se, rt_rq);
622}
623
624/*
625 * Because the prio of an upper entry depends on the lower
626 * entries, we must remove entries top - down.
627 */
628static void dequeue_rt_stack(struct sched_rt_entity *rt_se)
629{
630 struct sched_rt_entity *back = NULL;
631
632 for_each_sched_rt_entity(rt_se) {
633 rt_se->back = back;
634 back = rt_se;
635 }
636
637 for (rt_se = back; rt_se; rt_se = rt_se->back) {
638 if (on_rt_rq(rt_se))
639 __dequeue_rt_entity(rt_se);
640 }
641}
642
643static void enqueue_rt_entity(struct sched_rt_entity *rt_se)
644{
645 dequeue_rt_stack(rt_se);
646 for_each_sched_rt_entity(rt_se)
647 __enqueue_rt_entity(rt_se);
648}
649
650static void dequeue_rt_entity(struct sched_rt_entity *rt_se)
651{
652 dequeue_rt_stack(rt_se);
653
654 for_each_sched_rt_entity(rt_se) {
655 struct rt_rq *rt_rq = group_rt_rq(rt_se);
656
657 if (rt_rq && rt_rq->rt_nr_running)
658 __enqueue_rt_entity(rt_se);
659 }
660}
661
662/*
663 * Adding/removing a task to/from a priority array:
664 */
665static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int wakeup)
666{
667 struct sched_rt_entity *rt_se = &p->rt;
668
669 if (wakeup)
670 rt_se->timeout = 0;
671
672 enqueue_rt_entity(rt_se);
673}
674
675static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int sleep)
676{
677 struct sched_rt_entity *rt_se = &p->rt;
678
679 update_curr_rt(rq);
680 dequeue_rt_entity(rt_se);
681}
682
683/*
684 * Put task to the end of the run list without the overhead of dequeue
685 * followed by enqueue.
686 */
687static
688void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se)
689{
690 struct rt_prio_array *array = &rt_rq->active;
691 struct list_head *queue = array->queue + rt_se_prio(rt_se);
692
693 if (on_rt_rq(rt_se)) {
694 list_del_init(&rt_se->run_list);
695 list_add_tail(&rt_se->run_list,
696 array->queue + rt_se_prio(rt_se));
697 }
698}
699
700static void requeue_task_rt(struct rq *rq, struct task_struct *p)
701{
702 struct sched_rt_entity *rt_se = &p->rt;
703 struct rt_rq *rt_rq;
704
705 for_each_sched_rt_entity(rt_se) {
706 rt_rq = rt_rq_of_se(rt_se);
707 requeue_rt_entity(rt_rq, rt_se);
708 }
709}
710
711static void yield_task_rt(struct rq *rq)
712{
713 requeue_task_rt(rq, rq->curr);
714}
715
716#ifdef CONFIG_SMP
717static int find_lowest_rq(struct task_struct *task);
718
719static int select_task_rq_rt(struct task_struct *p, int sync)
720{
721 struct rq *rq = task_rq(p);
722
723 /*
724 * If the current task is an RT task, then
725 * try to see if we can wake this RT task up on another
726 * runqueue. Otherwise simply start this RT task
727 * on its current runqueue.
728 *
729 * We want to avoid overloading runqueues. Even if
730 * the RT task is of higher priority than the current RT task.
731 * RT tasks behave differently than other tasks. If
732 * one gets preempted, we try to push it off to another queue.
733 * So trying to keep a preempting RT task on the same
734 * cache hot CPU will force the running RT task to
735 * a cold CPU. So we waste all the cache for the lower
736 * RT task in hopes of saving some of a RT task
737 * that is just being woken and probably will have
738 * cold cache anyway.
739 */
740 if (unlikely(rt_task(rq->curr)) &&
741 (p->rt.nr_cpus_allowed > 1)) {
742 int cpu = find_lowest_rq(p);
743
744 return (cpu == -1) ? task_cpu(p) : cpu;
745 }
746
747 /*
748 * Otherwise, just let it ride on the affined RQ and the
749 * post-schedule router will push the preempted task away
750 */
751 return task_cpu(p);
752}
753#endif /* CONFIG_SMP */
754
755/*
756 * Preempt the current task with a newly woken task if needed:
757 */
758static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p)
759{
760 if (p->prio < rq->curr->prio) {
761 resched_task(rq->curr);
762 return;
763 }
764
765#ifdef CONFIG_SMP
766 /*
767 * If:
768 *
769 * - the newly woken task is of equal priority to the current task
770 * - the newly woken task is non-migratable while current is migratable
771 * - current will be preempted on the next reschedule
772 *
773 * we should check to see if current can readily move to a different
774 * cpu. If so, we will reschedule to allow the push logic to try
775 * to move current somewhere else, making room for our non-migratable
776 * task.
777 */
778 if((p->prio == rq->curr->prio)
779 && p->rt.nr_cpus_allowed == 1
780 && rq->curr->rt.nr_cpus_allowed != 1) {
781 cpumask_t mask;
782
783 if (cpupri_find(&rq->rd->cpupri, rq->curr, &mask))
784 /*
785 * There appears to be other cpus that can accept
786 * current, so lets reschedule to try and push it away
787 */
788 resched_task(rq->curr);
789 }
790#endif
791}
792
793static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq,
794 struct rt_rq *rt_rq)
795{
796 struct rt_prio_array *array = &rt_rq->active;
797 struct sched_rt_entity *next = NULL;
798 struct list_head *queue;
799 int idx;
800
801 idx = sched_find_first_bit(array->bitmap);
802 BUG_ON(idx >= MAX_RT_PRIO);
803
804 queue = array->queue + idx;
805 next = list_entry(queue->next, struct sched_rt_entity, run_list);
806
807 return next;
808}
809
810static struct task_struct *pick_next_task_rt(struct rq *rq)
811{
812 struct sched_rt_entity *rt_se;
813 struct task_struct *p;
814 struct rt_rq *rt_rq;
815
816 rt_rq = &rq->rt;
817
818 if (unlikely(!rt_rq->rt_nr_running))
819 return NULL;
820
821 if (rt_rq_throttled(rt_rq))
822 return NULL;
823
824 do {
825 rt_se = pick_next_rt_entity(rq, rt_rq);
826 BUG_ON(!rt_se);
827 rt_rq = group_rt_rq(rt_se);
828 } while (rt_rq);
829
830 p = rt_task_of(rt_se);
831 p->se.exec_start = rq->clock;
832 return p;
833}
834
835static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
836{
837 update_curr_rt(rq);
838 p->se.exec_start = 0;
839}
840
841#ifdef CONFIG_SMP
842
843/* Only try algorithms three times */
844#define RT_MAX_TRIES 3
845
846static int double_lock_balance(struct rq *this_rq, struct rq *busiest);
847static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep);
848
849static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
850{
851 if (!task_running(rq, p) &&
852 (cpu < 0 || cpu_isset(cpu, p->cpus_allowed)) &&
853 (p->rt.nr_cpus_allowed > 1))
854 return 1;
855 return 0;
856}
857
858/* Return the second highest RT task, NULL otherwise */
859static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu)
860{
861 struct task_struct *next = NULL;
862 struct sched_rt_entity *rt_se;
863 struct rt_prio_array *array;
864 struct rt_rq *rt_rq;
865 int idx;
866
867 for_each_leaf_rt_rq(rt_rq, rq) {
868 array = &rt_rq->active;
869 idx = sched_find_first_bit(array->bitmap);
870 next_idx:
871 if (idx >= MAX_RT_PRIO)
872 continue;
873 if (next && next->prio < idx)
874 continue;
875 list_for_each_entry(rt_se, array->queue + idx, run_list) {
876 struct task_struct *p = rt_task_of(rt_se);
877 if (pick_rt_task(rq, p, cpu)) {
878 next = p;
879 break;
880 }
881 }
882 if (!next) {
883 idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1);
884 goto next_idx;
885 }
886 }
887
888 return next;
889}
890
891static DEFINE_PER_CPU(cpumask_t, local_cpu_mask);
892
893static inline int pick_optimal_cpu(int this_cpu, cpumask_t *mask)
894{
895 int first;
896
897 /* "this_cpu" is cheaper to preempt than a remote processor */
898 if ((this_cpu != -1) && cpu_isset(this_cpu, *mask))
899 return this_cpu;
900
901 first = first_cpu(*mask);
902 if (first != NR_CPUS)
903 return first;
904
905 return -1;
906}
907
908static int find_lowest_rq(struct task_struct *task)
909{
910 struct sched_domain *sd;
911 cpumask_t *lowest_mask = &__get_cpu_var(local_cpu_mask);
912 int this_cpu = smp_processor_id();
913 int cpu = task_cpu(task);
914
915 if (task->rt.nr_cpus_allowed == 1)
916 return -1; /* No other targets possible */
917
918 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
919 return -1; /* No targets found */
920
921 /*
922 * At this point we have built a mask of cpus representing the
923 * lowest priority tasks in the system. Now we want to elect
924 * the best one based on our affinity and topology.
925 *
926 * We prioritize the last cpu that the task executed on since
927 * it is most likely cache-hot in that location.
928 */
929 if (cpu_isset(cpu, *lowest_mask))
930 return cpu;
931
932 /*
933 * Otherwise, we consult the sched_domains span maps to figure
934 * out which cpu is logically closest to our hot cache data.
935 */
936 if (this_cpu == cpu)
937 this_cpu = -1; /* Skip this_cpu opt if the same */
938
939 for_each_domain(cpu, sd) {
940 if (sd->flags & SD_WAKE_AFFINE) {
941 cpumask_t domain_mask;
942 int best_cpu;
943
944 cpus_and(domain_mask, sd->span, *lowest_mask);
945
946 best_cpu = pick_optimal_cpu(this_cpu,
947 &domain_mask);
948 if (best_cpu != -1)
949 return best_cpu;
950 }
951 }
952
953 /*
954 * And finally, if there were no matches within the domains
955 * just give the caller *something* to work with from the compatible
956 * locations.
957 */
958 return pick_optimal_cpu(this_cpu, lowest_mask);
959}
960
961/* Will lock the rq it finds */
962static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
963{
964 struct rq *lowest_rq = NULL;
965 int tries;
966 int cpu;
967
968 for (tries = 0; tries < RT_MAX_TRIES; tries++) {
969 cpu = find_lowest_rq(task);
970
971 if ((cpu == -1) || (cpu == rq->cpu))
972 break;
973
974 lowest_rq = cpu_rq(cpu);
975
976 /* if the prio of this runqueue changed, try again */
977 if (double_lock_balance(rq, lowest_rq)) {
978 /*
979 * We had to unlock the run queue. In
980 * the mean time, task could have
981 * migrated already or had its affinity changed.
982 * Also make sure that it wasn't scheduled on its rq.
983 */
984 if (unlikely(task_rq(task) != rq ||
985 !cpu_isset(lowest_rq->cpu,
986 task->cpus_allowed) ||
987 task_running(rq, task) ||
988 !task->se.on_rq)) {
989
990 spin_unlock(&lowest_rq->lock);
991 lowest_rq = NULL;
992 break;
993 }
994 }
995
996 /* If this rq is still suitable use it. */
997 if (lowest_rq->rt.highest_prio > task->prio)
998 break;
999
1000 /* try again */
1001 spin_unlock(&lowest_rq->lock);
1002 lowest_rq = NULL;
1003 }
1004
1005 return lowest_rq;
1006}
1007
1008/*
1009 * If the current CPU has more than one RT task, see if the non
1010 * running task can migrate over to a CPU that is running a task
1011 * of lesser priority.
1012 */
1013static int push_rt_task(struct rq *rq)
1014{
1015 struct task_struct *next_task;
1016 struct rq *lowest_rq;
1017 int ret = 0;
1018 int paranoid = RT_MAX_TRIES;
1019
1020 if (!rq->rt.overloaded)
1021 return 0;
1022
1023 next_task = pick_next_highest_task_rt(rq, -1);
1024 if (!next_task)
1025 return 0;
1026
1027 retry:
1028 if (unlikely(next_task == rq->curr)) {
1029 WARN_ON(1);
1030 return 0;
1031 }
1032
1033 /*
1034 * It's possible that the next_task slipped in of
1035 * higher priority than current. If that's the case
1036 * just reschedule current.
1037 */
1038 if (unlikely(next_task->prio < rq->curr->prio)) {
1039 resched_task(rq->curr);
1040 return 0;
1041 }
1042
1043 /* We might release rq lock */
1044 get_task_struct(next_task);
1045
1046 /* find_lock_lowest_rq locks the rq if found */
1047 lowest_rq = find_lock_lowest_rq(next_task, rq);
1048 if (!lowest_rq) {
1049 struct task_struct *task;
1050 /*
1051 * find lock_lowest_rq releases rq->lock
1052 * so it is possible that next_task has changed.
1053 * If it has, then try again.
1054 */
1055 task = pick_next_highest_task_rt(rq, -1);
1056 if (unlikely(task != next_task) && task && paranoid--) {
1057 put_task_struct(next_task);
1058 next_task = task;
1059 goto retry;
1060 }
1061 goto out;
1062 }
1063
1064 deactivate_task(rq, next_task, 0);
1065 set_task_cpu(next_task, lowest_rq->cpu);
1066 activate_task(lowest_rq, next_task, 0);
1067
1068 resched_task(lowest_rq->curr);
1069
1070 spin_unlock(&lowest_rq->lock);
1071
1072 ret = 1;
1073out:
1074 put_task_struct(next_task);
1075
1076 return ret;
1077}
1078
1079/*
1080 * TODO: Currently we just use the second highest prio task on
1081 * the queue, and stop when it can't migrate (or there's
1082 * no more RT tasks). There may be a case where a lower
1083 * priority RT task has a different affinity than the
1084 * higher RT task. In this case the lower RT task could
1085 * possibly be able to migrate where as the higher priority
1086 * RT task could not. We currently ignore this issue.
1087 * Enhancements are welcome!
1088 */
1089static void push_rt_tasks(struct rq *rq)
1090{
1091 /* push_rt_task will return true if it moved an RT */
1092 while (push_rt_task(rq))
1093 ;
1094}
1095
1096static int pull_rt_task(struct rq *this_rq)
1097{
1098 int this_cpu = this_rq->cpu, ret = 0, cpu;
1099 struct task_struct *p, *next;
1100 struct rq *src_rq;
1101
1102 if (likely(!rt_overloaded(this_rq)))
1103 return 0;
1104
1105 next = pick_next_task_rt(this_rq);
1106
1107 for_each_cpu_mask(cpu, this_rq->rd->rto_mask) {
1108 if (this_cpu == cpu)
1109 continue;
1110
1111 src_rq = cpu_rq(cpu);
1112 /*
1113 * We can potentially drop this_rq's lock in
1114 * double_lock_balance, and another CPU could
1115 * steal our next task - hence we must cause
1116 * the caller to recalculate the next task
1117 * in that case:
1118 */
1119 if (double_lock_balance(this_rq, src_rq)) {
1120 struct task_struct *old_next = next;
1121
1122 next = pick_next_task_rt(this_rq);
1123 if (next != old_next)
1124 ret = 1;
1125 }
1126
1127 /*
1128 * Are there still pullable RT tasks?
1129 */
1130 if (src_rq->rt.rt_nr_running <= 1)
1131 goto skip;
1132
1133 p = pick_next_highest_task_rt(src_rq, this_cpu);
1134
1135 /*
1136 * Do we have an RT task that preempts
1137 * the to-be-scheduled task?
1138 */
1139 if (p && (!next || (p->prio < next->prio))) {
1140 WARN_ON(p == src_rq->curr);
1141 WARN_ON(!p->se.on_rq);
1142
1143 /*
1144 * There's a chance that p is higher in priority
1145 * than what's currently running on its cpu.
1146 * This is just that p is wakeing up and hasn't
1147 * had a chance to schedule. We only pull
1148 * p if it is lower in priority than the
1149 * current task on the run queue or
1150 * this_rq next task is lower in prio than
1151 * the current task on that rq.
1152 */
1153 if (p->prio < src_rq->curr->prio ||
1154 (next && next->prio < src_rq->curr->prio))
1155 goto skip;
1156
1157 ret = 1;
1158
1159 deactivate_task(src_rq, p, 0);
1160 set_task_cpu(p, this_cpu);
1161 activate_task(this_rq, p, 0);
1162 /*
1163 * We continue with the search, just in
1164 * case there's an even higher prio task
1165 * in another runqueue. (low likelyhood
1166 * but possible)
1167 *
1168 * Update next so that we won't pick a task
1169 * on another cpu with a priority lower (or equal)
1170 * than the one we just picked.
1171 */
1172 next = p;
1173
1174 }
1175 skip:
1176 spin_unlock(&src_rq->lock);
1177 }
1178
1179 return ret;
1180}
1181
1182static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
1183{
1184 /* Try to pull RT tasks here if we lower this rq's prio */
1185 if (unlikely(rt_task(prev)) && rq->rt.highest_prio > prev->prio)
1186 pull_rt_task(rq);
1187}
1188
1189static void post_schedule_rt(struct rq *rq)
1190{
1191 /*
1192 * If we have more than one rt_task queued, then
1193 * see if we can push the other rt_tasks off to other CPUS.
1194 * Note we may release the rq lock, and since
1195 * the lock was owned by prev, we need to release it
1196 * first via finish_lock_switch and then reaquire it here.
1197 */
1198 if (unlikely(rq->rt.overloaded)) {
1199 spin_lock_irq(&rq->lock);
1200 push_rt_tasks(rq);
1201 spin_unlock_irq(&rq->lock);
1202 }
1203}
1204
1205/*
1206 * If we are not running and we are not going to reschedule soon, we should
1207 * try to push tasks away now
1208 */
1209static void task_wake_up_rt(struct rq *rq, struct task_struct *p)
1210{
1211 if (!task_running(rq, p) &&
1212 !test_tsk_need_resched(rq->curr) &&
1213 rq->rt.overloaded)
1214 push_rt_tasks(rq);
1215}
1216
1217static unsigned long
1218load_balance_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1219 unsigned long max_load_move,
1220 struct sched_domain *sd, enum cpu_idle_type idle,
1221 int *all_pinned, int *this_best_prio)
1222{
1223 /* don't touch RT tasks */
1224 return 0;
1225}
1226
1227static int
1228move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest,
1229 struct sched_domain *sd, enum cpu_idle_type idle)
1230{
1231 /* don't touch RT tasks */
1232 return 0;
1233}
1234
1235static void set_cpus_allowed_rt(struct task_struct *p,
1236 const cpumask_t *new_mask)
1237{
1238 int weight = cpus_weight(*new_mask);
1239
1240 BUG_ON(!rt_task(p));
1241
1242 /*
1243 * Update the migration status of the RQ if we have an RT task
1244 * which is running AND changing its weight value.
1245 */
1246 if (p->se.on_rq && (weight != p->rt.nr_cpus_allowed)) {
1247 struct rq *rq = task_rq(p);
1248
1249 if ((p->rt.nr_cpus_allowed <= 1) && (weight > 1)) {
1250 rq->rt.rt_nr_migratory++;
1251 } else if ((p->rt.nr_cpus_allowed > 1) && (weight <= 1)) {
1252 BUG_ON(!rq->rt.rt_nr_migratory);
1253 rq->rt.rt_nr_migratory--;
1254 }
1255
1256 update_rt_migration(rq);
1257 }
1258
1259 p->cpus_allowed = *new_mask;
1260 p->rt.nr_cpus_allowed = weight;
1261}
1262
1263/* Assumes rq->lock is held */
1264static void rq_online_rt(struct rq *rq)
1265{
1266 if (rq->rt.overloaded)
1267 rt_set_overload(rq);
1268
1269 __enable_runtime(rq);
1270
1271 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio);
1272}
1273
1274/* Assumes rq->lock is held */
1275static void rq_offline_rt(struct rq *rq)
1276{
1277 if (rq->rt.overloaded)
1278 rt_clear_overload(rq);
1279
1280 __disable_runtime(rq);
1281
1282 cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID);
1283}
1284
1285/*
1286 * When switch from the rt queue, we bring ourselves to a position
1287 * that we might want to pull RT tasks from other runqueues.
1288 */
1289static void switched_from_rt(struct rq *rq, struct task_struct *p,
1290 int running)
1291{
1292 /*
1293 * If there are other RT tasks then we will reschedule
1294 * and the scheduling of the other RT tasks will handle
1295 * the balancing. But if we are the last RT task
1296 * we may need to handle the pulling of RT tasks
1297 * now.
1298 */
1299 if (!rq->rt.rt_nr_running)
1300 pull_rt_task(rq);
1301}
1302#endif /* CONFIG_SMP */
1303
1304/*
1305 * When switching a task to RT, we may overload the runqueue
1306 * with RT tasks. In this case we try to push them off to
1307 * other runqueues.
1308 */
1309static void switched_to_rt(struct rq *rq, struct task_struct *p,
1310 int running)
1311{
1312 int check_resched = 1;
1313
1314 /*
1315 * If we are already running, then there's nothing
1316 * that needs to be done. But if we are not running
1317 * we may need to preempt the current running task.
1318 * If that current running task is also an RT task
1319 * then see if we can move to another run queue.
1320 */
1321 if (!running) {
1322#ifdef CONFIG_SMP
1323 if (rq->rt.overloaded && push_rt_task(rq) &&
1324 /* Don't resched if we changed runqueues */
1325 rq != task_rq(p))
1326 check_resched = 0;
1327#endif /* CONFIG_SMP */
1328 if (check_resched && p->prio < rq->curr->prio)
1329 resched_task(rq->curr);
1330 }
1331}
1332
1333/*
1334 * Priority of the task has changed. This may cause
1335 * us to initiate a push or pull.
1336 */
1337static void prio_changed_rt(struct rq *rq, struct task_struct *p,
1338 int oldprio, int running)
1339{
1340 if (running) {
1341#ifdef CONFIG_SMP
1342 /*
1343 * If our priority decreases while running, we
1344 * may need to pull tasks to this runqueue.
1345 */
1346 if (oldprio < p->prio)
1347 pull_rt_task(rq);
1348 /*
1349 * If there's a higher priority task waiting to run
1350 * then reschedule. Note, the above pull_rt_task
1351 * can release the rq lock and p could migrate.
1352 * Only reschedule if p is still on the same runqueue.
1353 */
1354 if (p->prio > rq->rt.highest_prio && rq->curr == p)
1355 resched_task(p);
1356#else
1357 /* For UP simply resched on drop of prio */
1358 if (oldprio < p->prio)
1359 resched_task(p);
1360#endif /* CONFIG_SMP */
1361 } else {
1362 /*
1363 * This task is not running, but if it is
1364 * greater than the current running task
1365 * then reschedule.
1366 */
1367 if (p->prio < rq->curr->prio)
1368 resched_task(rq->curr);
1369 }
1370}
1371
1372static void watchdog(struct rq *rq, struct task_struct *p)
1373{
1374 unsigned long soft, hard;
1375
1376 if (!p->signal)
1377 return;
1378
1379 soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
1380 hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
1381
1382 if (soft != RLIM_INFINITY) {
1383 unsigned long next;
1384
1385 p->rt.timeout++;
1386 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1387 if (p->rt.timeout > next)
1388 p->it_sched_expires = p->se.sum_exec_runtime;
1389 }
1390}
1391
1392static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1393{
1394 update_curr_rt(rq);
1395
1396 watchdog(rq, p);
1397
1398 /*
1399 * RR tasks need a special form of timeslice management.
1400 * FIFO tasks have no timeslices.
1401 */
1402 if (p->policy != SCHED_RR)
1403 return;
1404
1405 if (--p->rt.time_slice)
1406 return;
1407
1408 p->rt.time_slice = DEF_TIMESLICE;
1409
1410 /*
1411 * Requeue to the end of queue if we are not the only element
1412 * on the queue:
1413 */
1414 if (p->rt.run_list.prev != p->rt.run_list.next) {
1415 requeue_task_rt(rq, p);
1416 set_tsk_need_resched(p);
1417 }
1418}
1419
1420static void set_curr_task_rt(struct rq *rq)
1421{
1422 struct task_struct *p = rq->curr;
1423
1424 p->se.exec_start = rq->clock;
1425}
1426
1427static const struct sched_class rt_sched_class = {
1428 .next = &fair_sched_class,
1429 .enqueue_task = enqueue_task_rt,
1430 .dequeue_task = dequeue_task_rt,
1431 .yield_task = yield_task_rt,
1432#ifdef CONFIG_SMP
1433 .select_task_rq = select_task_rq_rt,
1434#endif /* CONFIG_SMP */
1435
1436 .check_preempt_curr = check_preempt_curr_rt,
1437
1438 .pick_next_task = pick_next_task_rt,
1439 .put_prev_task = put_prev_task_rt,
1440
1441#ifdef CONFIG_SMP
1442 .load_balance = load_balance_rt,
1443 .move_one_task = move_one_task_rt,
1444 .set_cpus_allowed = set_cpus_allowed_rt,
1445 .rq_online = rq_online_rt,
1446 .rq_offline = rq_offline_rt,
1447 .pre_schedule = pre_schedule_rt,
1448 .post_schedule = post_schedule_rt,
1449 .task_wake_up = task_wake_up_rt,
1450 .switched_from = switched_from_rt,
1451#endif
1452
1453 .set_curr_task = set_curr_task_rt,
1454 .task_tick = task_tick_rt,
1455
1456 .prio_changed = prio_changed_rt,
1457 .switched_to = switched_to_rt,
1458};
1459
1460#ifdef CONFIG_SCHED_DEBUG
1461extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq);
1462
1463static void print_rt_stats(struct seq_file *m, int cpu)
1464{
1465 struct rt_rq *rt_rq;
1466
1467 rcu_read_lock();
1468 for_each_leaf_rt_rq(rt_rq, cpu_rq(cpu))
1469 print_rt_rq(m, cpu, rt_rq);
1470 rcu_read_unlock();
1471}
1472#endif