Merge branch 'msm-mmc_sdcc' of git://codeaurora.org/quic/kernel/dwalker/linux-msm
[linux-2.6-block.git] / arch / arm / mach-pxa / sleep.S
CommitLineData
1da177e4
LT
1/*
2 * Low-level PXA250/210 sleep/wakeUp support
3 *
4 * Initial SA1110 code:
5 * Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
6 *
7 * Adapted for PXA by Nicolas Pitre:
8 * Copyright (c) 2002 Monta Vista Software, Inc.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License.
12 */
13
1da177e4
LT
14#include <linux/linkage.h>
15#include <asm/assembler.h>
a09e64fb 16#include <mach/hardware.h>
1da177e4 17
a09e64fb 18#include <mach/pxa2xx-regs.h>
1da177e4 19
41130d37
JL
20#define MDREFR_KDIV 0x200a4000 // all banks
21#define CCCR_SLEEP 0x00000107 // L=7 2N=2 A=0 PPDIS=0 CPDIS=0
41130d37 22
1da177e4
LT
23 .text
24
b750a093 25pxa_cpu_save_cp:
1da177e4
LT
26 @ get coprocessor registers
27 mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
28 mrc p15, 0, r4, c15, c1, 0 @ CP access reg
29 mrc p15, 0, r5, c13, c0, 0 @ PID
30 mrc p15, 0, r6, c3, c0, 0 @ domain ID
31 mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
32 mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg
33 mrc p15, 0, r9, c1, c0, 0 @ control reg
34
35 bic r3, r3, #2 @ clear frequency change bit
36
37 @ store them plus current virtual stack ptr on stack
38 mov r10, sp
39 stmfd sp!, {r3 - r10}
40
b750a093
EM
41 mov pc, lr
42
43pxa_cpu_save_sp:
1da177e4
LT
44 @ preserve phys address of stack
45 mov r0, sp
3b1904d0 46 str lr, [sp, #-4]!
1da177e4
LT
47 bl sleep_phys_sp
48 ldr r1, =sleep_save_sp
49 str r0, [r1]
3b1904d0 50 ldr pc, [sp], #4
b750a093 51
c4d1fb62 52#ifdef CONFIG_PXA3xx
53/*
54 * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4)
55 *
56 * NOTE: unfortunately, pxa_cpu_save_cp can not be reused here since
57 * the auxiliary control register address is different between pxa3xx
58 * and pxa{25x,27x}
59 */
60
61ENTRY(pxa3xx_cpu_suspend)
62
63#ifndef CONFIG_IWMMXT
64 mra r2, r3, acc0
65#endif
66 stmfd sp!, {r2 - r12, lr} @ save registers on stack
67
68 mrc p14, 0, r3, c6, c0, 0 @ clock configuration, for turbo mode
69 mrc p15, 0, r4, c15, c1, 0 @ CP access reg
70 mrc p15, 0, r5, c13, c0, 0 @ PID
71 mrc p15, 0, r6, c3, c0, 0 @ domain ID
72 mrc p15, 0, r7, c2, c0, 0 @ translation table base addr
73 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
74 mrc p15, 0, r9, c1, c0, 0 @ control reg
75
76 bic r3, r3, #2 @ clear frequency change bit
77
78 @ store them plus current virtual stack ptr on stack
79 mov r10, sp
80 stmfd sp!, {r3 - r10}
81
82 @ store physical address of stack pointer
83 mov r0, sp
84 bl sleep_phys_sp
85 ldr r1, =sleep_save_sp
86 str r0, [r1]
87
88 @ clean data cache
89 bl xsc3_flush_kern_cache_all
90
91 mov r0, #0x06 @ S2D3C4 mode
92 mcr p14, 0, r0, c7, c0, 0 @ enter sleep
93
9420: b 20b @ waiting for sleep
95
96 .data
97 .align 5
98/*
99 * pxa3xx_cpu_resume
100 */
101
102ENTRY(pxa3xx_cpu_resume)
103
104 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
105 msr cpsr_c, r0
106
107 ldr r0, sleep_save_sp @ stack phys addr
108 ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
109
110 mov r1, #0
111 mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
112 mcr p15, 0, r1, c7, c10, 4 @ drain write (&fill) buffer
113 mcr p15, 0, r1, c7, c5, 4 @ flush prefetch buffer
114 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
115
116 mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
117 mcr p15, 0, r4, c15, c1, 0 @ CP access reg
118 mcr p15, 0, r5, c13, c0, 0 @ PID
119 mcr p15, 0, r6, c3, c0, 0 @ domain ID
120 mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
121 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
122
123 @ temporarily map resume_turn_on_mmu into the page table,
124 @ otherwise prefetch abort occurs after MMU is turned on
125 mov r1, r7
126 bic r1, r1, #0x00ff
127 bic r1, r1, #0x3f00
128 ldr r2, =0x542e
129
130 adr r3, resume_turn_on_mmu
131 mov r3, r3, lsr #20
132 orr r4, r2, r3, lsl #20
133 ldr r5, [r1, r3, lsl #2]
134 str r4, [r1, r3, lsl #2]
135
136 @ Mapping page table address in the page table
137 mov r6, r1, lsr #20
138 orr r7, r2, r6, lsl #20
139 ldr r8, [r1, r6, lsl #2]
140 str r7, [r1, r6, lsl #2]
141
142 ldr r2, =pxa3xx_resume_after_mmu @ absolute virtual address
143 b resume_turn_on_mmu @ cache align execution
144
145 .text
146pxa3xx_resume_after_mmu:
147 /* restore the temporary mapping */
148 str r5, [r1, r3, lsl #2]
149 str r8, [r1, r6, lsl #2]
150 b resume_after_mmu
151
152#endif /* CONFIG_PXA3xx */
153
533462fb 154#ifdef CONFIG_PXA27x
b750a093
EM
155/*
156 * pxa27x_cpu_suspend()
157 *
158 * Forces CPU into sleep state.
159 *
160 * r0 = value for PWRMODE M field for desired sleep state
161 */
162
163ENTRY(pxa27x_cpu_suspend)
164
165#ifndef CONFIG_IWMMXT
166 mra r2, r3, acc0
167#endif
168 stmfd sp!, {r2 - r12, lr} @ save registers on stack
169
170 bl pxa_cpu_save_cp
171
172 mov r5, r0 @ save sleep mode
173 bl pxa_cpu_save_sp
1da177e4
LT
174
175 @ clean data cache
176 bl xscale_flush_kern_cache_all
177
178 @ Put the processor to sleep
179 @ (also workaround for sighting 28071)
180
181 @ prepare value for sleep mode
80a18573 182 mov r1, r5 @ sleep mode
1da177e4 183
41130d37
JL
184 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
185 mov r2, #UNCACHED_PHYS_0
186
187 @ prepare SDRAM refresh settings
1da177e4
LT
188 ldr r4, =MDREFR
189 ldr r5, [r4]
41130d37
JL
190
191 @ enable SDRAM self-refresh mode
1da177e4
LT
192 orr r5, r5, #MDREFR_SLFRSH
193
41130d37
JL
194 @ set SDCLKx divide-by-2 bits (this is part of a workaround for Errata 50)
195 ldr r6, =MDREFR_KDIV
196 orr r5, r5, r6
1da177e4 197
b750a093
EM
198 @ Intel PXA270 Specification Update notes problems sleeping
199 @ with core operating above 91 MHz
200 @ (see Errata 50, ...processor does not exit from sleep...)
201
202 ldr r6, =CCCR
203 ldr r8, [r6] @ keep original value for resume
204
205 ldr r7, =CCCR_SLEEP @ prepare CCCR sleep value
206 mov r0, #0x2 @ prepare value for CLKCFG
207
208 @ align execution to a cache line
209 b pxa_cpu_do_suspend
533462fb 210#endif
b750a093 211
533462fb 212#ifdef CONFIG_PXA25x
b750a093 213/*
533462fb 214 * pxa25x_cpu_suspend()
b750a093
EM
215 *
216 * Forces CPU into sleep state.
217 *
218 * r0 = value for PWRMODE M field for desired sleep state
219 */
220
221ENTRY(pxa25x_cpu_suspend)
222 stmfd sp!, {r2 - r12, lr} @ save registers on stack
223
224 bl pxa_cpu_save_cp
225
226 mov r5, r0 @ save sleep mode
227 bl pxa_cpu_save_sp
228
229 @ clean data cache
230 bl xscale_flush_kern_cache_all
231
232 @ prepare value for sleep mode
233 mov r1, r5 @ sleep mode
234
235 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
236 mov r2, #UNCACHED_PHYS_0
237
238 @ prepare SDRAM refresh settings
239 ldr r4, =MDREFR
240 ldr r5, [r4]
241
242 @ enable SDRAM self-refresh mode
243 orr r5, r5, #MDREFR_SLFRSH
244
1da177e4
LT
245 @ Intel PXA255 Specification Update notes problems
246 @ about suspending with PXBus operating above 133MHz
247 @ (see Errata 31, GPIO output signals, ... unpredictable in sleep
248 @
249 @ We keep the change-down close to the actual suspend on SDRAM
250 @ as possible to eliminate messing about with the refresh clock
251 @ as the system will restore with the original speed settings
252 @
253 @ Ben Dooks, 13-Sep-2004
254
255 ldr r6, =CCCR
256 ldr r8, [r6] @ keep original value for resume
257
258 @ ensure x1 for run and turbo mode with memory clock
259 bic r7, r8, #CCCR_M_MASK | CCCR_N_MASK
260 orr r7, r7, #(1<<5) | (2<<7)
261
262 @ check that the memory frequency is within limits
263 and r14, r7, #CCCR_L_MASK
264 teq r14, #1
265 bicne r7, r7, #CCCR_L_MASK
266 orrne r7, r7, #1 @@ 99.53MHz
267
268 @ get ready for the change
269
270 @ note, turbo is not preserved over sleep so there is no
271 @ point in preserving it here. we save it on the stack with the
272 @ other CP registers instead.
273 mov r0, #0
274 mcr p14, 0, r0, c6, c0, 0
275 orr r0, r0, #2 @ initiate change bit
b750a093 276 b pxa_cpu_do_suspend
533462fb 277#endif
1da177e4
LT
278
279 .ltorg
280 .align 5
b750a093 281pxa_cpu_do_suspend:
1da177e4
LT
282
283 @ All needed values are now in registers.
284 @ These last instructions should be in cache
285
286 @ initiate the frequency change...
287 str r7, [r6]
288 mcr p14, 0, r0, c6, c0, 0
289
290 @ restore the original cpu speed value for resume
291 str r8, [r6]
292
41130d37
JL
293 @ need 6 13-MHz cycles before changing PWRMODE
294 @ just set frequency to 91-MHz... 6*91/13 = 42
295
296 mov r0, #42
29710: subs r0, r0, #1
298 bne 10b
41130d37
JL
299
300 @ Do not reorder...
301 @ Intel PXA270 Specification Update notes problems performing
302 @ external accesses after SDRAM is put in self-refresh mode
303 @ (see Errata 39 ...hangs when entering self-refresh mode)
1da177e4
LT
304
305 @ force address lines low by reading at physical address 0
306 ldr r3, [r2]
307
41130d37
JL
308 @ put SDRAM into self-refresh
309 str r5, [r4]
310
1da177e4 311 @ enter sleep mode
41130d37 312 mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
1da177e4
LT
313
31420: b 20b @ loop waiting for sleep
315
316/*
533462fb 317 * pxa_cpu_resume()
1da177e4
LT
318 *
319 * entry point from bootloader into kernel during resume
320 *
321 * Note: Yes, part of the following code is located into the .data section.
322 * This is to allow sleep_save_sp to be accessed with a relative load
323 * while we can't rely on any MMU translation. We could have put
324 * sleep_save_sp in the .text section as well, but some setups might
325 * insist on it to be truly read-only.
326 */
327
328 .data
329 .align 5
330ENTRY(pxa_cpu_resume)
801194e3 331 mov r0, #PSR_I_BIT | PSR_F_BIT | SVC_MODE @ set SVC, irqs off
1da177e4
LT
332 msr cpsr_c, r0
333
334 ldr r0, sleep_save_sp @ stack phys addr
335 ldr r2, =resume_after_mmu @ its absolute virtual address
336 ldmfd r0, {r3 - r9, sp} @ CP regs + virt stack ptr
337
338 mov r1, #0
339 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
340 mcr p15, 0, r1, c7, c7, 0 @ invalidate I & D caches, BTB
341
342#ifdef CONFIG_XSCALE_CACHE_ERRATA
343 bic r9, r9, #0x0004 @ see cpu_xscale_proc_init
344#endif
345
346 mcr p14, 0, r3, c6, c0, 0 @ clock configuration, turbo mode.
347 mcr p15, 0, r4, c15, c1, 0 @ CP access reg
348 mcr p15, 0, r5, c13, c0, 0 @ PID
349 mcr p15, 0, r6, c3, c0, 0 @ domain ID
350 mcr p15, 0, r7, c2, c0, 0 @ translation table base addr
351 mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg
352 b resume_turn_on_mmu @ cache align execution
353
354 .align 5
355resume_turn_on_mmu:
356 mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, caches, etc.
357
358 @ Let us ensure we jump to resume_after_mmu only when the mcr above
359 @ actually took effect. They call it the "cpwait" operation.
360 mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
361 sub pc, r2, r1, lsr #32 @ jump to virtual addr
362 nop
363 nop
364 nop
365
366sleep_save_sp:
367 .word 0 @ preserve stack phys ptr here
368
369 .text
370resume_after_mmu:
371#ifdef CONFIG_XSCALE_CACHE_ERRATA
372 bl cpu_xscale_proc_init
373#endif
374 ldmfd sp!, {r2, r3}
41130d37 375#ifndef CONFIG_IWMMXT
1da177e4 376 mar acc0, r2, r3
41130d37 377#endif
1da177e4 378 ldmfd sp!, {r4 - r12, pc} @ return to caller