sched/deadline: Fix inter- exclusive cpusets migrations
[linux-2.6-block.git] / kernel / sched / idle.c
CommitLineData
cf37b6b4
NP
1/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
5#include <linux/cpu.h>
6#include <linux/cpuidle.h>
7#include <linux/tick.h>
8#include <linux/mm.h>
9#include <linux/stackprotector.h>
10
11#include <asm/tlb.h>
12
13#include <trace/events/power.h>
14
e3baac47
PZ
15#include "sched.h"
16
cf37b6b4
NP
17static int __read_mostly cpu_idle_force_poll;
18
19void cpu_idle_poll_ctrl(bool enable)
20{
21 if (enable) {
22 cpu_idle_force_poll++;
23 } else {
24 cpu_idle_force_poll--;
25 WARN_ON_ONCE(cpu_idle_force_poll < 0);
26 }
27}
28
29#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
30static int __init cpu_idle_poll_setup(char *__unused)
31{
32 cpu_idle_force_poll = 1;
33 return 1;
34}
35__setup("nohlt", cpu_idle_poll_setup);
36
37static int __init cpu_idle_nopoll_setup(char *__unused)
38{
39 cpu_idle_force_poll = 0;
40 return 1;
41}
42__setup("hlt", cpu_idle_nopoll_setup);
43#endif
44
45static inline int cpu_idle_poll(void)
46{
47 rcu_idle_enter();
48 trace_cpu_idle_rcuidle(0, smp_processor_id());
49 local_irq_enable();
50 while (!tif_need_resched())
51 cpu_relax();
52 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
53 rcu_idle_exit();
54 return 1;
55}
56
57/* Weak implementations for optional arch specific functions */
58void __weak arch_cpu_idle_prepare(void) { }
59void __weak arch_cpu_idle_enter(void) { }
60void __weak arch_cpu_idle_exit(void) { }
61void __weak arch_cpu_idle_dead(void) { }
62void __weak arch_cpu_idle(void)
63{
64 cpu_idle_force_poll = 1;
65 local_irq_enable();
66}
67
30cdd69e
DL
68/**
69 * cpuidle_idle_call - the main idle function
70 *
71 * NOTE: no locks or semaphores should be used here
82c65d60
AL
72 *
73 * On archs that support TIF_POLLING_NRFLAG, is called with polling
74 * set, and it returns with polling set. If it ever stops polling, it
75 * must clear the polling bit.
30cdd69e 76 */
08c373e5 77static void cpuidle_idle_call(void)
30cdd69e
DL
78{
79 struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
80 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
37352273 81 int next_state, entered_state;
89abb5ad 82 unsigned int broadcast;
30cdd69e 83
a1d028bd
DL
84 /*
85 * Check if the idle task must be rescheduled. If it is the
c444117f 86 * case, exit the function after re-enabling the local irq.
a1d028bd 87 */
c444117f 88 if (need_resched()) {
8ca3c642 89 local_irq_enable();
08c373e5 90 return;
8ca3c642
DL
91 }
92
a1d028bd
DL
93 /*
94 * During the idle period, stop measuring the disabled irqs
95 * critical sections latencies
96 */
c8cc7d4d 97 stop_critical_timings();
a1d028bd
DL
98
99 /*
100 * Tell the RCU framework we are entering an idle section,
101 * so no more rcu read side critical sections and one more
102 * step to the grace period
103 */
c8cc7d4d
DL
104 rcu_idle_enter();
105
a1d028bd 106 /*
52c324f8 107 * Ask the cpuidle framework to choose a convenient idle state.
ec6e7f40 108 * Fall back to the default arch idle method on errors.
a1d028bd 109 */
52c324f8 110 next_state = cpuidle_select(drv, dev);
ec6e7f40 111 if (next_state < 0) {
37352273 112use_default:
a1d028bd 113 /*
37352273
PZ
114 * We can't use the cpuidle framework, let's use the default
115 * idle routine.
a1d028bd 116 */
37352273 117 if (current_clr_polling_and_test())
8ca3c642 118 local_irq_enable();
37352273
PZ
119 else
120 arch_cpu_idle();
121
122 goto exit_idle;
8ca3c642
DL
123 }
124
37352273
PZ
125
126 /*
127 * The idle task must be scheduled, it is pointless to
128 * go to idle, just update no idle residency and get
129 * out of this function
130 */
131 if (current_clr_polling_and_test()) {
132 dev->last_residency = 0;
133 entered_state = next_state;
134 local_irq_enable();
135 goto exit_idle;
c444117f 136 }
8ca3c642 137
89abb5ad 138 broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
37352273
PZ
139
140 /*
141 * Tell the time framework to switch to a broadcast timer
142 * because our local timer will be shutdown. If a local timer
143 * is used from another cpu as a broadcast timer, this call may
144 * fail if it is not available
145 */
146 if (broadcast &&
147 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &dev->cpu))
148 goto use_default;
149
37352273
PZ
150 /*
151 * Enter the idle state previously returned by the governor decision.
152 * This function will block until an interrupt occurs and will take
153 * care of re-enabling the local interrupts
154 */
155 entered_state = cpuidle_enter(drv, dev, next_state);
156
37352273
PZ
157 if (broadcast)
158 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &dev->cpu);
159
160 /*
161 * Give the governor an opportunity to reflect on the outcome
162 */
163 cpuidle_reflect(dev, entered_state);
164
165exit_idle:
8ca3c642 166 __current_set_polling();
30cdd69e 167
a1d028bd 168 /*
37352273 169 * It is up to the idle functions to reenable local interrupts
a1d028bd 170 */
c8cc7d4d
DL
171 if (WARN_ON_ONCE(irqs_disabled()))
172 local_irq_enable();
173
174 rcu_idle_exit();
175 start_critical_timings();
30cdd69e 176}
30cdd69e 177
cf37b6b4
NP
178/*
179 * Generic idle loop implementation
82c65d60
AL
180 *
181 * Called with polling cleared.
cf37b6b4
NP
182 */
183static void cpu_idle_loop(void)
184{
185 while (1) {
82c65d60
AL
186 /*
187 * If the arch has a polling bit, we maintain an invariant:
188 *
189 * Our polling bit is clear if we're not scheduled (i.e. if
190 * rq->curr != rq->idle). This means that, if rq->idle has
191 * the polling bit set, then setting need_resched is
192 * guaranteed to cause the cpu to reschedule.
193 */
194
195 __current_set_polling();
cf37b6b4
NP
196 tick_nohz_idle_enter();
197
198 while (!need_resched()) {
199 check_pgt_cache();
200 rmb();
201
202 if (cpu_is_offline(smp_processor_id()))
203 arch_cpu_idle_dead();
204
205 local_irq_disable();
206 arch_cpu_idle_enter();
207
208 /*
209 * In poll mode we reenable interrupts and spin.
210 *
211 * Also if we detected in the wakeup from idle
212 * path that the tick broadcast device expired
213 * for us, we don't want to go deep idle as we
214 * know that the IPI is going to arrive right
215 * away
216 */
8ca3c642 217 if (cpu_idle_force_poll || tick_check_broadcast_expired())
cf37b6b4 218 cpu_idle_poll();
8ca3c642
DL
219 else
220 cpuidle_idle_call();
221
cf37b6b4 222 arch_cpu_idle_exit();
cf37b6b4 223 }
06d50c65
PZ
224
225 /*
226 * Since we fell out of the loop above, we know
227 * TIF_NEED_RESCHED must be set, propagate it into
228 * PREEMPT_NEED_RESCHED.
229 *
230 * This is required because for polling idle loops we will
231 * not have had an IPI to fold the state for us.
232 */
233 preempt_set_need_resched();
cf37b6b4 234 tick_nohz_idle_exit();
82c65d60
AL
235 __current_clr_polling();
236
237 /*
e3baac47
PZ
238 * We promise to call sched_ttwu_pending and reschedule
239 * if need_resched is set while polling is set. That
240 * means that clearing polling needs to be visible
241 * before doing these things.
82c65d60
AL
242 */
243 smp_mb__after_atomic();
244
e3baac47 245 sched_ttwu_pending();
cf37b6b4
NP
246 schedule_preempt_disabled();
247 }
248}
249
250void cpu_startup_entry(enum cpuhp_state state)
251{
252 /*
253 * This #ifdef needs to die, but it's too late in the cycle to
254 * make this generic (arm and sh have never invoked the canary
255 * init for the non boot cpus!). Will be fixed in 3.11
256 */
257#ifdef CONFIG_X86
258 /*
259 * If we're the non-boot CPU, nothing set the stack canary up
260 * for us. The boot CPU already has it initialized but no harm
261 * in doing it again. This is a good place for updating it, as
262 * we wont ever return from this function (so the invalid
263 * canaries already on the stack wont ever trigger).
264 */
265 boot_init_stack_canary();
266#endif
cf37b6b4
NP
267 arch_cpu_idle_prepare();
268 cpu_idle_loop();
269}