Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[linux-2.6-block.git] / arch / arm / mach-omap2 / cpuidle44xx.c
CommitLineData
98272660 1/*
db4f3dab 2 * OMAP4+ CPU idle Routines
98272660 3 *
db4f3dab 4 * Copyright (C) 2011-2013 Texas Instruments, Inc.
98272660
SS
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 * Rajendra Nayak <rnayak@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/sched.h>
14#include <linux/cpuidle.h>
15#include <linux/cpu_pm.h>
16#include <linux/export.h>
fa8589fe 17#include <linux/tick.h>
98272660 18
0e9e8b4b 19#include <asm/cpuidle.h>
98272660
SS
20
21#include "common.h"
22#include "pm.h"
23#include "prm.h"
7abdb0e2 24#include "soc.h"
dd3ad97c 25#include "clockdomain.h"
98272660 26
865da01c
SS
27#define MAX_CPUS 2
28
7aeb658d 29/* Machine specific information */
db4f3dab 30struct idle_statedata {
98272660
SS
31 u32 cpu_state;
32 u32 mpu_logic_state;
33 u32 mpu_state;
7abdb0e2 34 u32 mpu_state_vote;
98272660
SS
35};
36
db4f3dab 37static struct idle_statedata omap4_idle_data[] = {
d0d133d9
DL
38 {
39 .cpu_state = PWRDM_POWER_ON,
40 .mpu_state = PWRDM_POWER_ON,
41 .mpu_logic_state = PWRDM_POWER_RET,
42 },
43 {
44 .cpu_state = PWRDM_POWER_OFF,
45 .mpu_state = PWRDM_POWER_RET,
46 .mpu_logic_state = PWRDM_POWER_RET,
47 },
48 {
49 .cpu_state = PWRDM_POWER_OFF,
50 .mpu_state = PWRDM_POWER_RET,
51 .mpu_logic_state = PWRDM_POWER_OFF,
52 },
53};
98272660 54
7abdb0e2
SS
55static struct idle_statedata omap5_idle_data[] = {
56 {
57 .cpu_state = PWRDM_POWER_ON,
58 .mpu_state = PWRDM_POWER_ON,
59 .mpu_logic_state = PWRDM_POWER_ON,
60 },
61 {
62 .cpu_state = PWRDM_POWER_RET,
63 .mpu_state = PWRDM_POWER_RET,
64 .mpu_logic_state = PWRDM_POWER_RET,
65 },
66};
67
865da01c
SS
68static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
69static struct clockdomain *cpu_clkdm[MAX_CPUS];
98272660 70
5b4d5bcc 71static atomic_t abort_barrier;
865da01c 72static bool cpu_done[MAX_CPUS];
db4f3dab 73static struct idle_statedata *state_ptr = &omap4_idle_data[0];
7abdb0e2 74static DEFINE_RAW_SPINLOCK(mpu_lock);
98272660 75
9db316b6
PW
76/* Private functions */
77
98272660 78/**
db4f3dab 79 * omap_enter_idle_[simple/coupled] - OMAP4PLUS cpuidle entry functions
98272660
SS
80 * @dev: cpuidle device
81 * @drv: cpuidle driver
82 * @index: the index of state to be entered
83 *
84 * Called from the CPUidle framework to program the device to the
85 * specified low power state selected by the governor.
86 * Returns the amount of time spent in the low power state.
87 */
db4f3dab 88static int omap_enter_idle_simple(struct cpuidle_device *dev,
dd3ad97c
SS
89 struct cpuidle_driver *drv,
90 int index)
91{
dd3ad97c 92 omap_do_wfi();
dd3ad97c
SS
93 return index;
94}
95
7abdb0e2
SS
96static int omap_enter_idle_smp(struct cpuidle_device *dev,
97 struct cpuidle_driver *drv,
98 int index)
99{
100 struct idle_statedata *cx = state_ptr + index;
101 unsigned long flag;
102
103 raw_spin_lock_irqsave(&mpu_lock, flag);
104 cx->mpu_state_vote++;
105 if (cx->mpu_state_vote == num_online_cpus()) {
106 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
107 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
108 }
109 raw_spin_unlock_irqrestore(&mpu_lock, flag);
110
111 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
112
113 raw_spin_lock_irqsave(&mpu_lock, flag);
114 if (cx->mpu_state_vote == num_online_cpus())
115 omap_set_pwrdm_state(mpu_pd, PWRDM_POWER_ON);
116 cx->mpu_state_vote--;
117 raw_spin_unlock_irqrestore(&mpu_lock, flag);
118
119 return index;
120}
121
db4f3dab 122static int omap_enter_idle_coupled(struct cpuidle_device *dev,
98272660
SS
123 struct cpuidle_driver *drv,
124 int index)
125{
db4f3dab 126 struct idle_statedata *cx = state_ptr + index;
74ed7bdc 127 u32 mpuss_can_lose_context = 0;
98272660 128
98272660 129 /*
dd3ad97c 130 * CPU0 has to wait and stay ON until CPU1 is OFF state.
98272660
SS
131 * This is necessary to honour hardware recommondation
132 * of triggeing all the possible low power modes once CPU1 is
133 * out of coherency and in OFF mode.
98272660 134 */
dd3ad97c 135 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
5b4d5bcc 136 while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) {
dd3ad97c 137 cpu_relax();
5b4d5bcc
KH
138
139 /*
140 * CPU1 could have already entered & exited idle
141 * without hitting off because of a wakeup
142 * or a failed attempt to hit off mode. Check for
143 * that here, otherwise we could spin forever
144 * waiting for CPU1 off.
145 */
146 if (cpu_done[1])
147 goto fail;
148
149 }
98272660
SS
150 }
151
74ed7bdc
SG
152 mpuss_can_lose_context = (cx->mpu_state == PWRDM_POWER_RET) &&
153 (cx->mpu_logic_state == PWRDM_POWER_OFF);
154
50d6b3cf
RK
155 /* Enter broadcast mode for periodic timers */
156 tick_broadcast_enable();
157
158 /* Enter broadcast mode for one-shot timers */
fb7f0398 159 tick_broadcast_enter();
4b353a70 160
98272660
SS
161 /*
162 * Call idle CPU PM enter notifier chain so that
163 * VFP and per CPU interrupt context is saved.
164 */
dd3ad97c
SS
165 cpu_pm_enter();
166
167 if (dev->cpu == 0) {
168 pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state);
169 omap_set_pwrdm_state(mpu_pd, cx->mpu_state);
170
171 /*
172 * Call idle CPU cluster PM enter notifier chain
173 * to save GIC and wakeupgen context.
174 */
74ed7bdc
SG
175 if (mpuss_can_lose_context)
176 cpu_cluster_pm_enter();
dd3ad97c 177 }
98272660
SS
178
179 omap4_enter_lowpower(dev->cpu, cx->cpu_state);
5b4d5bcc 180 cpu_done[dev->cpu] = true;
98272660 181
dd3ad97c
SS
182 /* Wakeup CPU1 only if it is not offlined */
183 if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) {
74ed7bdc
SG
184
185 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
186 mpuss_can_lose_context)
187 gic_dist_disable();
188
1d9a5425 189 clkdm_deny_idle(cpu_clkdm[1]);
b7806dc7 190 omap_set_pwrdm_state(cpu_pd[1], PWRDM_POWER_ON);
dd3ad97c 191 clkdm_allow_idle(cpu_clkdm[1]);
74ed7bdc
SG
192
193 if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD) &&
194 mpuss_can_lose_context) {
195 while (gic_dist_disabled()) {
196 udelay(1);
197 cpu_relax();
198 }
199 gic_timer_retrigger();
200 }
dd3ad97c 201 }
98272660
SS
202
203 /*
204 * Call idle CPU PM exit notifier chain to restore
dd3ad97c 205 * VFP and per CPU IRQ context.
98272660 206 */
dd3ad97c 207 cpu_pm_exit();
98272660
SS
208
209 /*
210 * Call idle CPU cluster PM exit notifier chain
211 * to restore GIC and wakeupgen context.
212 */
74ed7bdc 213 if (dev->cpu == 0 && mpuss_can_lose_context)
98272660
SS
214 cpu_cluster_pm_exit();
215
fb7f0398 216 tick_broadcast_exit();
4b353a70 217
5b4d5bcc
KH
218fail:
219 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
220 cpu_done[dev->cpu] = false;
98be0dde 221
98272660
SS
222 return index;
223}
224
9db316b6 225static struct cpuidle_driver omap4_idle_driver = {
d13e9261
RL
226 .name = "omap4_idle",
227 .owner = THIS_MODULE,
78e9016f
DL
228 .states = {
229 {
230 /* C1 - CPU0 ON + CPU1 ON + MPU ON */
231 .exit_latency = 2 + 2,
232 .target_residency = 5,
db4f3dab 233 .enter = omap_enter_idle_simple,
78e9016f 234 .name = "C1",
eb495d33 235 .desc = "CPUx ON, MPUSS ON"
78e9016f
DL
236 },
237 {
9db316b6 238 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */
78e9016f
DL
239 .exit_latency = 328 + 440,
240 .target_residency = 960,
b82b6cca 241 .flags = CPUIDLE_FLAG_COUPLED,
db4f3dab 242 .enter = omap_enter_idle_coupled,
78e9016f 243 .name = "C2",
eb495d33 244 .desc = "CPUx OFF, MPUSS CSWR",
78e9016f
DL
245 },
246 {
247 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */
248 .exit_latency = 460 + 518,
249 .target_residency = 1100,
b82b6cca 250 .flags = CPUIDLE_FLAG_COUPLED,
db4f3dab 251 .enter = omap_enter_idle_coupled,
78e9016f 252 .name = "C3",
eb495d33 253 .desc = "CPUx OFF, MPUSS OSWR",
78e9016f
DL
254 },
255 },
d0d133d9 256 .state_count = ARRAY_SIZE(omap4_idle_data),
78e9016f 257 .safe_state_index = 0,
98272660
SS
258};
259
7abdb0e2
SS
260static struct cpuidle_driver omap5_idle_driver = {
261 .name = "omap5_idle",
262 .owner = THIS_MODULE,
263 .states = {
264 {
265 /* C1 - CPU0 ON + CPU1 ON + MPU ON */
266 .exit_latency = 2 + 2,
267 .target_residency = 5,
268 .enter = omap_enter_idle_simple,
269 .name = "C1",
270 .desc = "CPUx WFI, MPUSS ON"
271 },
272 {
273 /* C2 - CPU0 RET + CPU1 RET + MPU CSWR */
274 .exit_latency = 48 + 60,
275 .target_residency = 100,
276 .flags = CPUIDLE_FLAG_TIMER_STOP,
277 .enter = omap_enter_idle_smp,
278 .name = "C2",
279 .desc = "CPUx CSWR, MPUSS CSWR",
280 },
281 },
282 .state_count = ARRAY_SIZE(omap5_idle_data),
283 .safe_state_index = 0,
284};
285
9db316b6 286/* Public functions */
b93d70ae 287
98272660 288/**
db4f3dab 289 * omap4_idle_init - Init routine for OMAP4+ idle
98272660 290 *
db4f3dab 291 * Registers the OMAP4+ specific cpuidle driver to the cpuidle
98272660
SS
292 * framework with the valid set of states.
293 */
294int __init omap4_idle_init(void)
295{
7abdb0e2
SS
296 struct cpuidle_driver *idle_driver;
297
298 if (soc_is_omap54xx()) {
299 state_ptr = &omap5_idle_data[0];
300 idle_driver = &omap5_idle_driver;
301 } else {
302 state_ptr = &omap4_idle_data[0];
303 idle_driver = &omap4_idle_driver;
304 }
305
98272660 306 mpu_pd = pwrdm_lookup("mpu_pwrdm");
dd3ad97c
SS
307 cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm");
308 cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm");
309 if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1]))
98272660
SS
310 return -ENODEV;
311
dd3ad97c
SS
312 cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm");
313 cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm");
314 if (!cpu_clkdm[0] || !cpu_clkdm[1])
98272660
SS
315 return -ENODEV;
316
7abdb0e2 317 return cpuidle_register(idle_driver, cpu_online_mask);
98272660 318}