sched/fair: Push down check for high priority class task into idle_balance()
[linux-2.6-block.git] / kernel / sched / idle_task.c
CommitLineData
029632fb
PZ
1#include "sched.h"
2
fa72e9e4
IM
3/*
4 * idle-task scheduling class.
5 *
6 * (NOTE: these are not related to SCHED_IDLE tasks which are
489a71b0 7 * handled in sched/fair.c)
fa72e9e4
IM
8 */
9
e7693a36 10#ifdef CONFIG_SMP
0017d735 11static int
ac66f547 12select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
e7693a36
GH
13{
14 return task_cpu(p); /* IDLE tasks as never migrated */
15}
16#endif /* CONFIG_SMP */
38033c37 17
fa72e9e4
IM
18/*
19 * Idle tasks are unconditionally rescheduled:
20 */
7d478721 21static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
fa72e9e4
IM
22{
23 resched_task(rq->idle);
24}
25
606dba2e
PZ
26static struct task_struct *
27pick_next_task_idle(struct rq *rq, struct task_struct *prev)
fa72e9e4 28{
3f1d2a31 29 put_prev_task(rq, prev);
606dba2e 30
fa72e9e4 31 schedstat_inc(rq, sched_goidle);
fa72e9e4
IM
32 return rq->idle;
33}
34
35/*
36 * It is not legal to sleep in the idle task - print a warning
37 * message if some code attempts to do it:
38 */
39static void
371fd7e7 40dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
fa72e9e4 41{
05fa785c 42 raw_spin_unlock_irq(&rq->lock);
3df0fc5b 43 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
fa72e9e4 44 dump_stack();
05fa785c 45 raw_spin_lock_irq(&rq->lock);
fa72e9e4
IM
46}
47
31ee529c 48static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
fa72e9e4 49{
38033c37
PZ
50 idle_exit_fair(rq);
51 rq_last_tick_reset(rq);
fa72e9e4
IM
52}
53
8f4d37ec 54static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
fa72e9e4
IM
55{
56}
57
83b699ed
SV
58static void set_curr_task_idle(struct rq *rq)
59{
60}
61
da7a735e 62static void switched_to_idle(struct rq *rq, struct task_struct *p)
cb469845 63{
a8941d7e 64 BUG();
cb469845
SR
65}
66
da7a735e
PZ
67static void
68prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
cb469845 69{
a8941d7e 70 BUG();
cb469845
SR
71}
72
6d686f45 73static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
0d721cea
PW
74{
75 return 0;
76}
77
fa72e9e4
IM
78/*
79 * Simple, special scheduling class for the per-CPU idle tasks:
80 */
029632fb 81const struct sched_class idle_sched_class = {
5522d5d5 82 /* .next is NULL */
fa72e9e4
IM
83 /* no enqueue/yield_task for idle tasks */
84
85 /* dequeue is not valid, we print a debug message there: */
86 .dequeue_task = dequeue_task_idle,
87
88 .check_preempt_curr = check_preempt_curr_idle,
89
90 .pick_next_task = pick_next_task_idle,
91 .put_prev_task = put_prev_task_idle,
92
681f3e68 93#ifdef CONFIG_SMP
4ce72a2c 94 .select_task_rq = select_task_rq_idle,
681f3e68 95#endif
fa72e9e4 96
83b699ed 97 .set_curr_task = set_curr_task_idle,
fa72e9e4 98 .task_tick = task_tick_idle,
cb469845 99
0d721cea
PW
100 .get_rr_interval = get_rr_interval_idle,
101
cb469845
SR
102 .prio_changed = prio_changed_idle,
103 .switched_to = switched_to_idle,
fa72e9e4 104};