Merge remote-tracking branch 'regmap/fix/core' into tmp
[linux-2.6-block.git] / arch / arm / mach-omap2 / sleep44xx.S
CommitLineData
b2b9762f
SS
1/*
2 * OMAP44xx sleep code.
3 *
4 * Copyright (C) 2011 Texas Instruments, Inc.
5 * Santosh Shilimkar <santosh.shilimkar@ti.com>
6 *
7 * This program is free software,you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/linkage.h>
b2b9762f
SS
13#include <asm/smp_scu.h>
14#include <asm/memory.h>
15#include <asm/hardware/cache-l2x0.h>
16
c1db9d73 17#include "omap-secure.h"
b2b9762f
SS
18
19#include "common.h"
c49f34bc 20#include "omap44xx.h"
b2b9762f
SS
21#include "omap4-sar-layout.h"
22
23#if defined(CONFIG_SMP) && defined(CONFIG_PM)
24
25.macro DO_SMC
26 dsb
27 smc #0
28 dsb
29.endm
30
31ppa_zero_params:
32 .word 0x0
33
5e94c6e3
SS
34ppa_por_params:
35 .word 1, 0
36
b2b9762f
SS
37/*
38 * =============================
39 * == CPU suspend finisher ==
40 * =============================
41 *
42 * void omap4_finish_suspend(unsigned long cpu_state)
43 *
44 * This function code saves the CPU context and performs the CPU
45 * power down sequence. Calling WFI effectively changes the CPU
46 * power domains states to the desired target power state.
47 *
48 * @cpu_state : contains context save state (r0)
49 * 0 - No context lost
50 * 1 - CPUx L1 and logic lost: MPUSS CSWR
51 * 2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
52 * 3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
53 * @return: This function never returns for CPU OFF and DORMANT power states.
54 * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
55 * from this follows a full CPU reset path via ROM code to CPU restore code.
56 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
57 * It returns to the caller for CPU INACTIVE and ON power states or in case
58 * CPU failed to transition to targeted OFF/DORMANT state.
5b6e3eb5
SS
59 *
60 * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
61 * stack frame and it expects the caller to take care of it. Hence the entire
62 * stack frame is saved to avoid possible stack corruption.
b2b9762f
SS
63 */
64ENTRY(omap4_finish_suspend)
5b6e3eb5 65 stmfd sp!, {r4-r12, lr}
b2b9762f
SS
66 cmp r0, #0x0
67 beq do_WFI @ No lowpower state, jump to WFI
68
69 /*
70 * Flush all data from the L1 data cache before disabling
71 * SCTLR.C bit.
72 */
73 bl omap4_get_sar_ram_base
74 ldr r9, [r0, #OMAP_TYPE_OFFSET]
75 cmp r9, #0x1 @ Check for HS device
76 bne skip_secure_l1_clean
77 mov r0, #SCU_PM_NORMAL
78 mov r1, #0xFF @ clean seucre L1
79 stmfd r13!, {r4-r12, r14}
80 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
81 DO_SMC
82 ldmfd r13!, {r4-r12, r14}
83skip_secure_l1_clean:
84 bl v7_flush_dcache_all
85
86 /*
87 * Clear the SCTLR.C bit to prevent further data cache
88 * allocation. Clearing SCTLR.C would make all the data accesses
89 * strongly ordered and would not hit the cache.
90 */
91 mrc p15, 0, r0, c1, c0, 0
92 bic r0, r0, #(1 << 2) @ Disable the C bit
93 mcr p15, 0, r0, c1, c0, 0
94 isb
95
96 /*
97 * Invalidate L1 data cache. Even though only invalidate is
98 * necessary exported flush API is used here. Doing clean
99 * on already clean cache would be almost NOP.
100 */
101 bl v7_flush_dcache_all
102
103 /*
104 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
105 * to AsymmetricMultiprocessing (AMP) mode by programming
106 * the SCU power status to DORMANT or OFF mode.
107 * This enables the CPU to be taken out of coherency by
108 * preventing the CPU from receiving cache, TLB, or BTB
109 * maintenance operations broadcast by other CPUs in the cluster.
110 */
111 bl omap4_get_sar_ram_base
112 mov r8, r0
113 ldr r9, [r8, #OMAP_TYPE_OFFSET]
114 cmp r9, #0x1 @ Check for HS device
115 bne scu_gp_set
116 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
117 ands r0, r0, #0x0f
118 ldreq r0, [r8, #SCU_OFFSET0]
119 ldrne r0, [r8, #SCU_OFFSET1]
120 mov r1, #0x00
121 stmfd r13!, {r4-r12, r14}
122 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
123 DO_SMC
124 ldmfd r13!, {r4-r12, r14}
125 b skip_scu_gp_set
126scu_gp_set:
127 mrc p15, 0, r0, c0, c0, 5 @ Read MPIDR
128 ands r0, r0, #0x0f
129 ldreq r1, [r8, #SCU_OFFSET0]
130 ldrne r1, [r8, #SCU_OFFSET1]
131 bl omap4_get_scu_base
132 bl scu_power_mode
133skip_scu_gp_set:
134 mrc p15, 0, r0, c1, c1, 2 @ Read NSACR data
135 tst r0, #(1 << 18)
136 mrcne p15, 0, r0, c1, c0, 1
137 bicne r0, r0, #(1 << 6) @ Disable SMP bit
138 mcrne p15, 0, r0, c1, c0, 1
139 isb
140 dsb
5e94c6e3
SS
141#ifdef CONFIG_CACHE_L2X0
142 /*
143 * Clean and invalidate the L2 cache.
144 * Common cache-l2x0.c functions can't be used here since it
145 * uses spinlocks. We are out of coherency here with data cache
146 * disabled. The spinlock implementation uses exclusive load/store
147 * instruction which can fail without data cache being enabled.
148 * OMAP4 hardware doesn't support exclusive monitor which can
149 * overcome exclusive access issue. Because of this, CPU can
150 * lead to deadlock.
151 */
152 bl omap4_get_sar_ram_base
153 mov r8, r0
154 mrc p15, 0, r5, c0, c0, 5 @ Read MPIDR
155 ands r5, r5, #0x0f
156 ldreq r0, [r8, #L2X0_SAVE_OFFSET0] @ Retrieve L2 state from SAR
157 ldrne r0, [r8, #L2X0_SAVE_OFFSET1] @ memory.
158 cmp r0, #3
159 bne do_WFI
160#ifdef CONFIG_PL310_ERRATA_727915
161 mov r0, #0x03
162 mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
163 DO_SMC
164#endif
165 bl omap4_get_l2cache_base
166 mov r2, r0
167 ldr r0, =0xffff
168 str r0, [r2, #L2X0_CLEAN_INV_WAY]
169wait:
170 ldr r0, [r2, #L2X0_CLEAN_INV_WAY]
171 ldr r1, =0xffff
172 ands r0, r0, r1
173 bne wait
174#ifdef CONFIG_PL310_ERRATA_727915
175 mov r0, #0x00
176 mov r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
177 DO_SMC
178#endif
179l2x_sync:
180 bl omap4_get_l2cache_base
181 mov r2, r0
182 mov r0, #0x0
183 str r0, [r2, #L2X0_CACHE_SYNC]
184sync:
185 ldr r0, [r2, #L2X0_CACHE_SYNC]
186 ands r0, r0, #0x1
187 bne sync
188#endif
b2b9762f
SS
189
190do_WFI:
191 bl omap_do_wfi
192
193 /*
194 * CPU is here when it failed to enter OFF/DORMANT or
195 * no low power state was attempted.
196 */
197 mrc p15, 0, r0, c1, c0, 0
198 tst r0, #(1 << 2) @ Check C bit enabled?
199 orreq r0, r0, #(1 << 2) @ Enable the C bit
200 mcreq p15, 0, r0, c1, c0, 0
201 isb
202
203 /*
204 * Ensure the CPU power state is set to NORMAL in
205 * SCU power state so that CPU is back in coherency.
206 * In non-coherent mode CPU can lock-up and lead to
207 * system deadlock.
208 */
209 mrc p15, 0, r0, c1, c0, 1
210 tst r0, #(1 << 6) @ Check SMP bit enabled?
211 orreq r0, r0, #(1 << 6)
212 mcreq p15, 0, r0, c1, c0, 1
213 isb
214 bl omap4_get_sar_ram_base
215 mov r8, r0
216 ldr r9, [r8, #OMAP_TYPE_OFFSET]
217 cmp r9, #0x1 @ Check for HS device
218 bne scu_gp_clear
219 mov r0, #SCU_PM_NORMAL
220 mov r1, #0x00
221 stmfd r13!, {r4-r12, r14}
222 ldr r12, =OMAP4_MON_SCU_PWR_INDEX
223 DO_SMC
224 ldmfd r13!, {r4-r12, r14}
225 b skip_scu_gp_clear
226scu_gp_clear:
227 bl omap4_get_scu_base
228 mov r1, #SCU_PM_NORMAL
229 bl scu_power_mode
230skip_scu_gp_clear:
231 isb
232 dsb
5b6e3eb5 233 ldmfd sp!, {r4-r12, pc}
b2b9762f
SS
234ENDPROC(omap4_finish_suspend)
235
236/*
237 * ============================
238 * == CPU resume entry point ==
239 * ============================
240 *
241 * void omap4_cpu_resume(void)
242 *
243 * ROM code jumps to this function while waking up from CPU
244 * OFF or DORMANT state. Physical address of the function is
245 * stored in the SAR RAM while entering to OFF or DORMANT mode.
246 * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
247 */
248ENTRY(omap4_cpu_resume)
249 /*
250 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
251 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
252 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
253 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
254 * OMAP443X GP devices- SMP bit isn't accessible.
255 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
256 */
257 ldr r8, =OMAP44XX_SAR_RAM_BASE
258 ldr r9, [r8, #OMAP_TYPE_OFFSET]
259 cmp r9, #0x1 @ Skip if GP device
260 bne skip_ns_smp_enable
261 mrc p15, 0, r0, c0, c0, 5
262 ands r0, r0, #0x0f
263 beq skip_ns_smp_enable
264ppa_actrl_retry:
265 mov r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
266 adr r3, ppa_zero_params @ Pointer to parameters
267 mov r1, #0x0 @ Process ID
268 mov r2, #0x4 @ Flag
269 mov r6, #0xff
270 mov r12, #0x00 @ Secure Service ID
271 DO_SMC
272 cmp r0, #0x0 @ API returns 0 on success.
273 beq enable_smp_bit
274 b ppa_actrl_retry
275enable_smp_bit:
276 mrc p15, 0, r0, c1, c0, 1
277 tst r0, #(1 << 6) @ Check SMP bit enabled?
278 orreq r0, r0, #(1 << 6)
279 mcreq p15, 0, r0, c1, c0, 1
280 isb
281skip_ns_smp_enable:
5e94c6e3
SS
282#ifdef CONFIG_CACHE_L2X0
283 /*
284 * Restore the L2 AUXCTRL and enable the L2 cache.
285 * OMAP4_MON_L2X0_AUXCTRL_INDEX = Program the L2X0 AUXCTRL
286 * OMAP4_MON_L2X0_CTRL_INDEX = Enable the L2 using L2X0 CTRL
287 * register r0 contains value to be programmed.
288 * L2 cache is already invalidate by ROM code as part
289 * of MPUSS OFF wakeup path.
290 */
291 ldr r2, =OMAP44XX_L2CACHE_BASE
292 ldr r0, [r2, #L2X0_CTRL]
293 and r0, #0x0f
294 cmp r0, #1
295 beq skip_l2en @ Skip if already enabled
296 ldr r3, =OMAP44XX_SAR_RAM_BASE
297 ldr r1, [r3, #OMAP_TYPE_OFFSET]
298 cmp r1, #0x1 @ Check for HS device
299 bne set_gp_por
300 ldr r0, =OMAP4_PPA_L2_POR_INDEX
301 ldr r1, =OMAP44XX_SAR_RAM_BASE
302 ldr r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
303 adr r3, ppa_por_params
304 str r4, [r3, #0x04]
305 mov r1, #0x0 @ Process ID
306 mov r2, #0x4 @ Flag
307 mov r6, #0xff
308 mov r12, #0x00 @ Secure Service ID
309 DO_SMC
310 b set_aux_ctrl
311set_gp_por:
312 ldr r1, =OMAP44XX_SAR_RAM_BASE
313 ldr r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
314 ldr r12, =OMAP4_MON_L2X0_PREFETCH_INDEX @ Setup L2 PREFETCH
315 DO_SMC
316set_aux_ctrl:
317 ldr r1, =OMAP44XX_SAR_RAM_BASE
318 ldr r0, [r1, #L2X0_AUXCTRL_OFFSET]
319 ldr r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX @ Setup L2 AUXCTRL
320 DO_SMC
321 mov r0, #0x1
322 ldr r12, =OMAP4_MON_L2X0_CTRL_INDEX @ Enable L2 cache
323 DO_SMC
324skip_l2en:
325#endif
b2b9762f
SS
326
327 b cpu_resume @ Jump to generic resume
328ENDPROC(omap4_cpu_resume)
329#endif
330
137d105d
SS
331#ifndef CONFIG_OMAP4_ERRATA_I688
332ENTRY(omap_bus_sync)
333 mov pc, lr
334ENDPROC(omap_bus_sync)
335#endif
336
b2b9762f
SS
337ENTRY(omap_do_wfi)
338 stmfd sp!, {lr}
137d105d
SS
339 /* Drain interconnect write buffers. */
340 bl omap_bus_sync
b2b9762f
SS
341
342 /*
343 * Execute an ISB instruction to ensure that all of the
344 * CP15 register changes have been committed.
345 */
346 isb
347
348 /*
349 * Execute a barrier instruction to ensure that all cache,
350 * TLB and branch predictor maintenance operations issued
351 * by any CPU in the cluster have completed.
352 */
353 dsb
354 dmb
355
356 /*
357 * Execute a WFI instruction and wait until the
358 * STANDBYWFI output is asserted to indicate that the
359 * CPU is in idle and low power state. CPU can specualatively
360 * prefetch the instructions so add NOPs after WFI. Sixteen
361 * NOPs as per Cortex-A9 pipeline.
362 */
363 wfi @ Wait For Interrupt
364 nop
365 nop
366 nop
367 nop
368 nop
369 nop
370 nop
371 nop
372 nop
373 nop
374 nop
375 nop
376 nop
377 nop
378 nop
379 nop
380
381 ldmfd sp!, {pc}
382ENDPROC(omap_do_wfi)