sh: TLB fast path optimizations for load/store exceptions.
[linux-block.git] / arch / sh / kernel / idle.c
CommitLineData
1da1180c
PM
1/*
2 * The idle loop for all SuperH platforms.
3 *
2e046b94 4 * Copyright (C) 2002 - 2009 Paul Mundt
1da1180c
PM
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/mm.h>
13#include <linux/pm.h>
14#include <linux/tick.h>
15#include <linux/preempt.h>
16#include <linux/thread_info.h>
17#include <linux/irqflags.h>
2e046b94 18#include <linux/smp.h>
1da1180c
PM
19#include <asm/pgalloc.h>
20#include <asm/system.h>
21#include <asm/atomic.h>
22
23static int hlt_counter;
24void (*pm_idle)(void);
25void (*pm_power_off)(void);
26EXPORT_SYMBOL(pm_power_off);
27
28static int __init nohlt_setup(char *__unused)
29{
30 hlt_counter = 1;
31 return 1;
32}
33__setup("nohlt", nohlt_setup);
34
35static int __init hlt_setup(char *__unused)
36{
37 hlt_counter = 0;
38 return 1;
39}
40__setup("hlt", hlt_setup);
41
e869a90e 42void default_idle(void)
1da1180c
PM
43{
44 if (!hlt_counter) {
45 clear_thread_flag(TIF_POLLING_NRFLAG);
46 smp_mb__after_clear_bit();
47 set_bl_bit();
48 stop_critical_timings();
49
50 while (!need_resched())
51 cpu_sleep();
52
53 start_critical_timings();
54 clear_bl_bit();
55 set_thread_flag(TIF_POLLING_NRFLAG);
56 } else
57 while (!need_resched())
58 cpu_relax();
59}
60
61void cpu_idle(void)
62{
63 set_thread_flag(TIF_POLLING_NRFLAG);
64
65 /* endless idle loop with no priority at all */
66 while (1) {
67 void (*idle)(void) = pm_idle;
68
69 if (!idle)
70 idle = default_idle;
71
72 tick_nohz_stop_sched_tick(1);
73 while (!need_resched())
74 idle();
75 tick_nohz_restart_sched_tick();
76
77 preempt_enable_no_resched();
78 schedule();
79 preempt_disable();
80 check_pgt_cache();
81 }
82}
2e046b94
PM
83
84static void do_nothing(void *unused)
85{
86}
87
88/*
89 * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
90 * pm_idle and update to new pm_idle value. Required while changing pm_idle
91 * handler on SMP systems.
92 *
93 * Caller must have changed pm_idle to the new value before the call. Old
94 * pm_idle value will not be used by any CPU after the return of this function.
95 */
96void cpu_idle_wait(void)
97{
98 smp_mb();
99 /* kick all the CPUs so that they exit out of pm_idle */
100 smp_call_function(do_nothing, NULL, 1);
101}
102EXPORT_SYMBOL_GPL(cpu_idle_wait);