cpuidle: Allow enforcing deepest idle state selection
[linux-2.6-block.git] / kernel / sched / idle.c
CommitLineData
cf37b6b4
NP
1/*
2 * Generic entry point for the idle threads
3 */
4#include <linux/sched.h>
5#include <linux/cpu.h>
6#include <linux/cpuidle.h>
8df3e07e 7#include <linux/cpuhotplug.h>
cf37b6b4
NP
8#include <linux/tick.h>
9#include <linux/mm.h>
10#include <linux/stackprotector.h>
38106313 11#include <linux/suspend.h>
cf37b6b4
NP
12
13#include <asm/tlb.h>
14
15#include <trace/events/power.h>
16
e3baac47
PZ
17#include "sched.h"
18
6727ad9e
CM
19/* Linker adds these: start and end of __cpuidle functions */
20extern char __cpuidle_text_start[], __cpuidle_text_end[];
21
faad3849
RW
22/**
23 * sched_idle_set_state - Record idle state for the current CPU.
24 * @idle_state: State to record.
25 */
26void sched_idle_set_state(struct cpuidle_state *idle_state)
27{
28 idle_set_state(this_rq(), idle_state);
29}
30
cf37b6b4
NP
31static int __read_mostly cpu_idle_force_poll;
32
33void cpu_idle_poll_ctrl(bool enable)
34{
35 if (enable) {
36 cpu_idle_force_poll++;
37 } else {
38 cpu_idle_force_poll--;
39 WARN_ON_ONCE(cpu_idle_force_poll < 0);
40 }
41}
42
43#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
44static int __init cpu_idle_poll_setup(char *__unused)
45{
46 cpu_idle_force_poll = 1;
47 return 1;
48}
49__setup("nohlt", cpu_idle_poll_setup);
50
51static int __init cpu_idle_nopoll_setup(char *__unused)
52{
53 cpu_idle_force_poll = 0;
54 return 1;
55}
56__setup("hlt", cpu_idle_nopoll_setup);
57#endif
58
6727ad9e 59static noinline int __cpuidle cpu_idle_poll(void)
cf37b6b4
NP
60{
61 rcu_idle_enter();
62 trace_cpu_idle_rcuidle(0, smp_processor_id());
63 local_irq_enable();
9babcd79 64 stop_critical_timings();
ff6f2d29
PM
65 while (!tif_need_resched() &&
66 (cpu_idle_force_poll || tick_check_broadcast_expired()))
cf37b6b4 67 cpu_relax();
9babcd79 68 start_critical_timings();
cf37b6b4
NP
69 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
70 rcu_idle_exit();
71 return 1;
72}
73
74/* Weak implementations for optional arch specific functions */
75void __weak arch_cpu_idle_prepare(void) { }
76void __weak arch_cpu_idle_enter(void) { }
77void __weak arch_cpu_idle_exit(void) { }
78void __weak arch_cpu_idle_dead(void) { }
79void __weak arch_cpu_idle(void)
80{
81 cpu_idle_force_poll = 1;
82 local_irq_enable();
83}
84
827a5aef
RW
85/**
86 * default_idle_call - Default CPU idle routine.
87 *
88 * To use when the cpuidle framework cannot be used.
89 */
6727ad9e 90void __cpuidle default_idle_call(void)
82f66327 91{
63caae84 92 if (current_clr_polling_and_test()) {
82f66327 93 local_irq_enable();
63caae84
LS
94 } else {
95 stop_critical_timings();
82f66327 96 arch_cpu_idle();
63caae84
LS
97 start_critical_timings();
98 }
82f66327
RW
99}
100
bcf6ad8a
RW
101static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
102 int next_state)
103{
bcf6ad8a
RW
104 /*
105 * The idle task must be scheduled, it is pointless to go to idle, just
106 * update no idle residency and return.
107 */
108 if (current_clr_polling_and_test()) {
109 dev->last_residency = 0;
110 local_irq_enable();
111 return -EBUSY;
112 }
113
bcf6ad8a
RW
114 /*
115 * Enter the idle state previously returned by the governor decision.
116 * This function will block until an interrupt occurs and will take
117 * care of re-enabling the local interrupts
118 */
827a5aef 119 return cpuidle_enter(drv, dev, next_state);
bcf6ad8a
RW
120}
121
30cdd69e
DL
122/**
123 * cpuidle_idle_call - the main idle function
124 *
125 * NOTE: no locks or semaphores should be used here
82c65d60
AL
126 *
127 * On archs that support TIF_POLLING_NRFLAG, is called with polling
128 * set, and it returns with polling set. If it ever stops polling, it
129 * must clear the polling bit.
30cdd69e 130 */
08c373e5 131static void cpuidle_idle_call(void)
30cdd69e 132{
9bd616e3 133 struct cpuidle_device *dev = cpuidle_get_device();
30cdd69e 134 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
37352273 135 int next_state, entered_state;
30cdd69e 136
a1d028bd
DL
137 /*
138 * Check if the idle task must be rescheduled. If it is the
c444117f 139 * case, exit the function after re-enabling the local irq.
a1d028bd 140 */
c444117f 141 if (need_resched()) {
8ca3c642 142 local_irq_enable();
08c373e5 143 return;
8ca3c642
DL
144 }
145
a1d028bd
DL
146 /*
147 * Tell the RCU framework we are entering an idle section,
148 * so no more rcu read side critical sections and one more
149 * step to the grace period
150 */
c8cc7d4d
DL
151 rcu_idle_enter();
152
82f66327
RW
153 if (cpuidle_not_available(drv, dev)) {
154 default_idle_call();
155 goto exit_idle;
156 }
ef2b22ac 157
38106313
RW
158 /*
159 * Suspend-to-idle ("freeze") is a system state in which all user space
160 * has been frozen, all I/O devices have been suspended and the only
161 * activity happens here and in iterrupts (if any). In that case bypass
162 * the cpuidle governor and go stratight for the deepest idle state
163 * available. Possibly also suspend the local tick and the entire
164 * timekeeping to prevent timer interrupts from kicking us out of idle
165 * until a proper wakeup interrupt happens.
166 */
bb8313b6
JP
167
168 if (idle_should_freeze() || dev->use_deepest_state) {
169 if (idle_should_freeze()) {
170 entered_state = cpuidle_enter_freeze(drv, dev);
171 if (entered_state > 0) {
172 local_irq_enable();
173 goto exit_idle;
174 }
ef2b22ac
RW
175 }
176
ef2b22ac 177 next_state = cpuidle_find_deepest_state(drv, dev);
bcf6ad8a 178 call_cpuidle(drv, dev, next_state);
ef2b22ac 179 } else {
ef2b22ac
RW
180 /*
181 * Ask the cpuidle framework to choose a convenient idle state.
182 */
183 next_state = cpuidle_select(drv, dev);
bcf6ad8a
RW
184 entered_state = call_cpuidle(drv, dev, next_state);
185 /*
186 * Give the governor an opportunity to reflect on the outcome
187 */
ef2b22ac 188 cpuidle_reflect(dev, entered_state);
bcf6ad8a 189 }
37352273
PZ
190
191exit_idle:
8ca3c642 192 __current_set_polling();
30cdd69e 193
a1d028bd 194 /*
37352273 195 * It is up to the idle functions to reenable local interrupts
a1d028bd 196 */
c8cc7d4d
DL
197 if (WARN_ON_ONCE(irqs_disabled()))
198 local_irq_enable();
199
200 rcu_idle_exit();
30cdd69e 201}
30cdd69e 202
cf37b6b4
NP
203/*
204 * Generic idle loop implementation
82c65d60
AL
205 *
206 * Called with polling cleared.
cf37b6b4
NP
207 */
208static void cpu_idle_loop(void)
209{
df55f462
GJGJ
210 int cpu = smp_processor_id();
211
cf37b6b4 212 while (1) {
82c65d60
AL
213 /*
214 * If the arch has a polling bit, we maintain an invariant:
215 *
216 * Our polling bit is clear if we're not scheduled (i.e. if
217 * rq->curr != rq->idle). This means that, if rq->idle has
218 * the polling bit set, then setting need_resched is
219 * guaranteed to cause the cpu to reschedule.
220 */
221
222 __current_set_polling();
0eb77e98 223 quiet_vmstat();
cf37b6b4
NP
224 tick_nohz_idle_enter();
225
226 while (!need_resched()) {
227 check_pgt_cache();
228 rmb();
229
df55f462 230 if (cpu_is_offline(cpu)) {
e69aab13 231 cpuhp_report_idle_dead();
cf37b6b4 232 arch_cpu_idle_dead();
528a25b0 233 }
cf37b6b4
NP
234
235 local_irq_disable();
236 arch_cpu_idle_enter();
237
238 /*
239 * In poll mode we reenable interrupts and spin.
240 *
241 * Also if we detected in the wakeup from idle
242 * path that the tick broadcast device expired
243 * for us, we don't want to go deep idle as we
244 * know that the IPI is going to arrive right
245 * away
246 */
8ca3c642 247 if (cpu_idle_force_poll || tick_check_broadcast_expired())
cf37b6b4 248 cpu_idle_poll();
8ca3c642
DL
249 else
250 cpuidle_idle_call();
251
cf37b6b4 252 arch_cpu_idle_exit();
cf37b6b4 253 }
06d50c65
PZ
254
255 /*
256 * Since we fell out of the loop above, we know
257 * TIF_NEED_RESCHED must be set, propagate it into
258 * PREEMPT_NEED_RESCHED.
259 *
260 * This is required because for polling idle loops we will
261 * not have had an IPI to fold the state for us.
262 */
263 preempt_set_need_resched();
cf37b6b4 264 tick_nohz_idle_exit();
82c65d60
AL
265 __current_clr_polling();
266
267 /*
e3baac47
PZ
268 * We promise to call sched_ttwu_pending and reschedule
269 * if need_resched is set while polling is set. That
270 * means that clearing polling needs to be visible
271 * before doing these things.
82c65d60
AL
272 */
273 smp_mb__after_atomic();
274
e3baac47 275 sched_ttwu_pending();
cf37b6b4
NP
276 schedule_preempt_disabled();
277 }
278}
279
6727ad9e
CM
280bool cpu_in_idle(unsigned long pc)
281{
282 return pc >= (unsigned long)__cpuidle_text_start &&
283 pc < (unsigned long)__cpuidle_text_end;
284}
285
cf37b6b4
NP
286void cpu_startup_entry(enum cpuhp_state state)
287{
288 /*
289 * This #ifdef needs to die, but it's too late in the cycle to
290 * make this generic (arm and sh have never invoked the canary
291 * init for the non boot cpus!). Will be fixed in 3.11
292 */
293#ifdef CONFIG_X86
294 /*
295 * If we're the non-boot CPU, nothing set the stack canary up
296 * for us. The boot CPU already has it initialized but no harm
297 * in doing it again. This is a good place for updating it, as
298 * we wont ever return from this function (so the invalid
299 * canaries already on the stack wont ever trigger).
300 */
301 boot_init_stack_canary();
302#endif
cf37b6b4 303 arch_cpu_idle_prepare();
8df3e07e 304 cpuhp_online_idle(state);
cf37b6b4
NP
305 cpu_idle_loop();
306}