jiffies: Introduce USER_TICK_USEC and redefine TICK_USEC
[linux-2.6-block.git] / kernel / sched / idle.c
CommitLineData
cf37b6b4 1/*
a92057e1
IM
2 * Generic entry points for the idle threads and
3 * implementation of the idle task scheduling class.
4 *
5 * (NOTE: these are not related to SCHED_IDLE batch scheduled
6 * tasks which are handled in sched/fair.c )
cf37b6b4 7 */
325ea10c 8#include "sched.h"
cf37b6b4
NP
9
10#include <trace/events/power.h>
11
6727ad9e
CM
12/* Linker adds these: start and end of __cpuidle functions */
13extern char __cpuidle_text_start[], __cpuidle_text_end[];
14
faad3849
RW
15/**
16 * sched_idle_set_state - Record idle state for the current CPU.
17 * @idle_state: State to record.
18 */
19void sched_idle_set_state(struct cpuidle_state *idle_state)
20{
21 idle_set_state(this_rq(), idle_state);
22}
23
cf37b6b4
NP
24static int __read_mostly cpu_idle_force_poll;
25
26void cpu_idle_poll_ctrl(bool enable)
27{
28 if (enable) {
29 cpu_idle_force_poll++;
30 } else {
31 cpu_idle_force_poll--;
32 WARN_ON_ONCE(cpu_idle_force_poll < 0);
33 }
34}
35
36#ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
37static int __init cpu_idle_poll_setup(char *__unused)
38{
39 cpu_idle_force_poll = 1;
a92057e1 40
cf37b6b4
NP
41 return 1;
42}
43__setup("nohlt", cpu_idle_poll_setup);
44
45static int __init cpu_idle_nopoll_setup(char *__unused)
46{
47 cpu_idle_force_poll = 0;
a92057e1 48
cf37b6b4
NP
49 return 1;
50}
51__setup("hlt", cpu_idle_nopoll_setup);
52#endif
53
6727ad9e 54static noinline int __cpuidle cpu_idle_poll(void)
cf37b6b4
NP
55{
56 rcu_idle_enter();
57 trace_cpu_idle_rcuidle(0, smp_processor_id());
58 local_irq_enable();
9babcd79 59 stop_critical_timings();
a92057e1 60
ff6f2d29
PM
61 while (!tif_need_resched() &&
62 (cpu_idle_force_poll || tick_check_broadcast_expired()))
cf37b6b4 63 cpu_relax();
9babcd79 64 start_critical_timings();
cf37b6b4
NP
65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
66 rcu_idle_exit();
a92057e1 67
cf37b6b4
NP
68 return 1;
69}
70
71/* Weak implementations for optional arch specific functions */
72void __weak arch_cpu_idle_prepare(void) { }
73void __weak arch_cpu_idle_enter(void) { }
74void __weak arch_cpu_idle_exit(void) { }
75void __weak arch_cpu_idle_dead(void) { }
76void __weak arch_cpu_idle(void)
77{
78 cpu_idle_force_poll = 1;
79 local_irq_enable();
80}
81
827a5aef
RW
82/**
83 * default_idle_call - Default CPU idle routine.
84 *
85 * To use when the cpuidle framework cannot be used.
86 */
6727ad9e 87void __cpuidle default_idle_call(void)
82f66327 88{
63caae84 89 if (current_clr_polling_and_test()) {
82f66327 90 local_irq_enable();
63caae84
LS
91 } else {
92 stop_critical_timings();
82f66327 93 arch_cpu_idle();
63caae84
LS
94 start_critical_timings();
95 }
82f66327
RW
96}
97
bcf6ad8a
RW
98static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
99 int next_state)
100{
bcf6ad8a
RW
101 /*
102 * The idle task must be scheduled, it is pointless to go to idle, just
103 * update no idle residency and return.
104 */
105 if (current_clr_polling_and_test()) {
106 dev->last_residency = 0;
107 local_irq_enable();
108 return -EBUSY;
109 }
110
bcf6ad8a
RW
111 /*
112 * Enter the idle state previously returned by the governor decision.
113 * This function will block until an interrupt occurs and will take
114 * care of re-enabling the local interrupts
115 */
827a5aef 116 return cpuidle_enter(drv, dev, next_state);
bcf6ad8a
RW
117}
118
30cdd69e
DL
119/**
120 * cpuidle_idle_call - the main idle function
121 *
122 * NOTE: no locks or semaphores should be used here
82c65d60
AL
123 *
124 * On archs that support TIF_POLLING_NRFLAG, is called with polling
125 * set, and it returns with polling set. If it ever stops polling, it
126 * must clear the polling bit.
30cdd69e 127 */
08c373e5 128static void cpuidle_idle_call(void)
30cdd69e 129{
9bd616e3 130 struct cpuidle_device *dev = cpuidle_get_device();
30cdd69e 131 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
37352273 132 int next_state, entered_state;
30cdd69e 133
a1d028bd
DL
134 /*
135 * Check if the idle task must be rescheduled. If it is the
c444117f 136 * case, exit the function after re-enabling the local irq.
a1d028bd 137 */
c444117f 138 if (need_resched()) {
8ca3c642 139 local_irq_enable();
08c373e5 140 return;
8ca3c642
DL
141 }
142
a1d028bd 143 /*
ed98c349
RW
144 * The RCU framework needs to be told that we are entering an idle
145 * section, so no more rcu read side critical sections and one more
a1d028bd
DL
146 * step to the grace period
147 */
c8cc7d4d 148
82f66327 149 if (cpuidle_not_available(drv, dev)) {
ed98c349
RW
150 tick_nohz_idle_stop_tick();
151 rcu_idle_enter();
152
82f66327
RW
153 default_idle_call();
154 goto exit_idle;
155 }
ef2b22ac 156
38106313 157 /*
f02f4f9d 158 * Suspend-to-idle ("s2idle") is a system state in which all user space
38106313
RW
159 * has been frozen, all I/O devices have been suspended and the only
160 * activity happens here and in iterrupts (if any). In that case bypass
161 * the cpuidle governor and go stratight for the deepest idle state
162 * available. Possibly also suspend the local tick and the entire
163 * timekeeping to prevent timer interrupts from kicking us out of idle
164 * until a proper wakeup interrupt happens.
165 */
bb8313b6 166
f02f4f9d
RW
167 if (idle_should_enter_s2idle() || dev->use_deepest_state) {
168 if (idle_should_enter_s2idle()) {
ed98c349
RW
169 rcu_idle_enter();
170
28ba086e 171 entered_state = cpuidle_enter_s2idle(drv, dev);
bb8313b6
JP
172 if (entered_state > 0) {
173 local_irq_enable();
174 goto exit_idle;
175 }
ed98c349
RW
176
177 rcu_idle_exit();
ef2b22ac
RW
178 }
179
ed98c349
RW
180 tick_nohz_idle_stop_tick();
181 rcu_idle_enter();
182
ef2b22ac 183 next_state = cpuidle_find_deepest_state(drv, dev);
bcf6ad8a 184 call_cpuidle(drv, dev, next_state);
ef2b22ac 185 } else {
ed98c349
RW
186 tick_nohz_idle_stop_tick();
187 rcu_idle_enter();
188
ef2b22ac
RW
189 /*
190 * Ask the cpuidle framework to choose a convenient idle state.
191 */
192 next_state = cpuidle_select(drv, dev);
bcf6ad8a
RW
193 entered_state = call_cpuidle(drv, dev, next_state);
194 /*
195 * Give the governor an opportunity to reflect on the outcome
196 */
ef2b22ac 197 cpuidle_reflect(dev, entered_state);
bcf6ad8a 198 }
37352273
PZ
199
200exit_idle:
8ca3c642 201 __current_set_polling();
30cdd69e 202
a1d028bd 203 /*
37352273 204 * It is up to the idle functions to reenable local interrupts
a1d028bd 205 */
c8cc7d4d
DL
206 if (WARN_ON_ONCE(irqs_disabled()))
207 local_irq_enable();
208
209 rcu_idle_exit();
30cdd69e 210}
30cdd69e 211
cf37b6b4
NP
212/*
213 * Generic idle loop implementation
82c65d60
AL
214 *
215 * Called with polling cleared.
cf37b6b4 216 */
c1de45ca 217static void do_idle(void)
cf37b6b4 218{
54b933c6 219 int cpu = smp_processor_id();
c1de45ca
PZ
220 /*
221 * If the arch has a polling bit, we maintain an invariant:
222 *
223 * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
224 * rq->idle). This means that, if rq->idle has the polling bit set,
225 * then setting need_resched is guaranteed to cause the CPU to
226 * reschedule.
227 */
cf37b6b4 228
c1de45ca
PZ
229 __current_set_polling();
230 tick_nohz_idle_enter();
cf37b6b4 231
c1de45ca
PZ
232 while (!need_resched()) {
233 check_pgt_cache();
234 rmb();
cf37b6b4 235
54b933c6 236 if (cpu_is_offline(cpu)) {
2aaf709a 237 tick_nohz_idle_stop_tick_protected();
c1de45ca
PZ
238 cpuhp_report_idle_dead();
239 arch_cpu_idle_dead();
cf37b6b4 240 }
06d50c65 241
c1de45ca
PZ
242 local_irq_disable();
243 arch_cpu_idle_enter();
82c65d60
AL
244
245 /*
c1de45ca
PZ
246 * In poll mode we reenable interrupts and spin. Also if we
247 * detected in the wakeup from idle path that the tick
248 * broadcast device expired for us, we don't want to go deep
249 * idle as we know that the IPI is going to arrive right away.
82c65d60 250 */
2aaf709a
RW
251 if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
252 tick_nohz_idle_restart_tick();
c1de45ca 253 cpu_idle_poll();
2aaf709a 254 } else {
c1de45ca 255 cpuidle_idle_call();
2aaf709a 256 }
c1de45ca 257 arch_cpu_idle_exit();
cf37b6b4 258 }
c1de45ca
PZ
259
260 /*
261 * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
262 * be set, propagate it into PREEMPT_NEED_RESCHED.
263 *
264 * This is required because for polling idle loops we will not have had
265 * an IPI to fold the state for us.
266 */
267 preempt_set_need_resched();
268 tick_nohz_idle_exit();
269 __current_clr_polling();
270
271 /*
272 * We promise to call sched_ttwu_pending() and reschedule if
273 * need_resched() is set while polling is set. That means that clearing
274 * polling needs to be visible before doing these things.
275 */
276 smp_mb__after_atomic();
277
278 sched_ttwu_pending();
8663effb 279 schedule_idle();
d83a7cb3
JP
280
281 if (unlikely(klp_patch_pending(current)))
282 klp_update_patch_state(current);
cf37b6b4
NP
283}
284
6727ad9e
CM
285bool cpu_in_idle(unsigned long pc)
286{
287 return pc >= (unsigned long)__cpuidle_text_start &&
288 pc < (unsigned long)__cpuidle_text_end;
289}
290
c1de45ca
PZ
291struct idle_timer {
292 struct hrtimer timer;
293 int done;
294};
295
296static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
297{
298 struct idle_timer *it = container_of(timer, struct idle_timer, timer);
299
300 WRITE_ONCE(it->done, 1);
301 set_tsk_need_resched(current);
302
303 return HRTIMER_NORESTART;
304}
305
306void play_idle(unsigned long duration_ms)
307{
308 struct idle_timer it;
309
310 /*
311 * Only FIFO tasks can disable the tick since they don't need the forced
312 * preemption.
313 */
314 WARN_ON_ONCE(current->policy != SCHED_FIFO);
315 WARN_ON_ONCE(current->nr_cpus_allowed != 1);
316 WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
317 WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
318 WARN_ON_ONCE(!duration_ms);
319
320 rcu_sleep_check();
321 preempt_disable();
322 current->flags |= PF_IDLE;
323 cpuidle_use_deepest_state(true);
324
325 it.done = 0;
326 hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
327 it.timer.function = idle_inject_timer_fn;
328 hrtimer_start(&it.timer, ms_to_ktime(duration_ms), HRTIMER_MODE_REL_PINNED);
329
330 while (!READ_ONCE(it.done))
331 do_idle();
332
333 cpuidle_use_deepest_state(false);
334 current->flags &= ~PF_IDLE;
335
336 preempt_fold_need_resched();
337 preempt_enable();
338}
339EXPORT_SYMBOL_GPL(play_idle);
340
cf37b6b4
NP
341void cpu_startup_entry(enum cpuhp_state state)
342{
343 /*
344 * This #ifdef needs to die, but it's too late in the cycle to
97fb7a0a
IM
345 * make this generic (ARM and SH have never invoked the canary
346 * init for the non boot CPUs!). Will be fixed in 3.11
cf37b6b4
NP
347 */
348#ifdef CONFIG_X86
349 /*
350 * If we're the non-boot CPU, nothing set the stack canary up
351 * for us. The boot CPU already has it initialized but no harm
352 * in doing it again. This is a good place for updating it, as
353 * we wont ever return from this function (so the invalid
354 * canaries already on the stack wont ever trigger).
355 */
356 boot_init_stack_canary();
357#endif
cf37b6b4 358 arch_cpu_idle_prepare();
8df3e07e 359 cpuhp_online_idle(state);
c1de45ca
PZ
360 while (1)
361 do_idle();
cf37b6b4 362}
a92057e1
IM
363
364/*
365 * idle-task scheduling class.
366 */
367
368#ifdef CONFIG_SMP
369static int
370select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
371{
372 return task_cpu(p); /* IDLE tasks as never migrated */
373}
374#endif
375
376/*
377 * Idle tasks are unconditionally rescheduled:
378 */
379static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
380{
381 resched_curr(rq);
382}
383
384static struct task_struct *
385pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
386{
387 put_prev_task(rq, prev);
388 update_idle_core(rq);
389 schedstat_inc(rq->sched_goidle);
390
391 return rq->idle;
392}
393
394/*
395 * It is not legal to sleep in the idle task - print a warning
396 * message if some code attempts to do it:
397 */
398static void
399dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
400{
401 raw_spin_unlock_irq(&rq->lock);
402 printk(KERN_ERR "bad: scheduling from the idle thread!\n");
403 dump_stack();
404 raw_spin_lock_irq(&rq->lock);
405}
406
407static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
408{
409}
410
411/*
412 * scheduler tick hitting a task of our scheduling class.
413 *
414 * NOTE: This function can be called remotely by the tick offload that
415 * goes along full dynticks. Therefore no local assumption can be made
416 * and everything must be accessed through the @rq and @curr passed in
417 * parameters.
418 */
419static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
420{
421}
422
423static void set_curr_task_idle(struct rq *rq)
424{
425}
426
427static void switched_to_idle(struct rq *rq, struct task_struct *p)
428{
429 BUG();
430}
431
432static void
433prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
434{
435 BUG();
436}
437
438static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task)
439{
440 return 0;
441}
442
443static void update_curr_idle(struct rq *rq)
444{
445}
446
447/*
448 * Simple, special scheduling class for the per-CPU idle tasks:
449 */
450const struct sched_class idle_sched_class = {
451 /* .next is NULL */
452 /* no enqueue/yield_task for idle tasks */
453
454 /* dequeue is not valid, we print a debug message there: */
455 .dequeue_task = dequeue_task_idle,
456
457 .check_preempt_curr = check_preempt_curr_idle,
458
459 .pick_next_task = pick_next_task_idle,
460 .put_prev_task = put_prev_task_idle,
461
462#ifdef CONFIG_SMP
463 .select_task_rq = select_task_rq_idle,
464 .set_cpus_allowed = set_cpus_allowed_common,
465#endif
466
467 .set_curr_task = set_curr_task_idle,
468 .task_tick = task_tick_idle,
469
470 .get_rr_interval = get_rr_interval_idle,
471
472 .prio_changed = prio_changed_idle,
473 .switched_to = switched_to_idle,
474 .update_curr = update_curr_idle,
475};