Merge tag 'fscache-fixes-20140917' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / arm / kernel / entry-header.S
CommitLineData
bce495d8 1#include <linux/init.h>
1da177e4
LT
2#include <linux/linkage.h>
3
4#include <asm/assembler.h>
e6ae744d 5#include <asm/asm-offsets.h>
1da177e4 6#include <asm/errno.h>
bce495d8 7#include <asm/thread_info.h>
19c4d593 8#include <asm/v7m.h>
1da177e4
LT
9
10@ Bad Abort numbers
11@ -----------------
12@
13#define BAD_PREFETCH 0
14#define BAD_DATA 1
15#define BAD_ADDREXCPTN 2
16#define BAD_IRQ 3
17#define BAD_UNDEFINSTR 4
18
1da177e4 19@
925c8a1a
RK
20@ Most of the stack format comes from struct pt_regs, but with
21@ the addition of 8 bytes for storing syscall args 5 and 6.
2dede2d8 22@ This _must_ remain a multiple of 8 for EABI.
1da177e4 23@
1da177e4
LT
24#define S_OFF 8
25
925c8a1a
RK
26/*
27 * The SWI code relies on the fact that R0 is at the bottom of the stack
28 * (due to slow/fast restore user regs).
29 */
30#if S_R0 != 0
31#error "Please fix"
32#endif
33
bce495d8
RK
34 .macro zero_fp
35#ifdef CONFIG_FRAME_POINTER
36 mov fp, #0
37#endif
38 .endm
39
8229c54f 40 .macro alignment_trap, rtemp, label
1da177e4 41#ifdef CONFIG_ALIGNMENT_TRAP
8229c54f 42 ldr \rtemp, \label
49f680ea 43 ldr \rtemp, [\rtemp]
1da177e4
LT
44 mcr p15, 0, \rtemp, c1, c0
45#endif
46 .endm
47
19c4d593
UKK
48#ifdef CONFIG_CPU_V7M
49/*
50 * ARMv7-M exception entry/exit macros.
51 *
52 * xPSR, ReturnAddress(), LR (R14), R12, R3, R2, R1, and R0 are
53 * automatically saved on the current stack (32 words) before
54 * switching to the exception stack (SP_main).
55 *
56 * If exception is taken while in user mode, SP_main is
57 * empty. Otherwise, SP_main is aligned to 64 bit automatically
58 * (CCR.STKALIGN set).
59 *
60 * Linux assumes that the interrupts are disabled when entering an
61 * exception handler and it may BUG if this is not the case. Interrupts
62 * are disabled during entry and reenabled in the exit macro.
63 *
64 * v7m_exception_slow_exit is used when returning from SVC or PendSV.
65 * When returning to kernel mode, we don't return from exception.
66 */
67 .macro v7m_exception_entry
68 @ determine the location of the registers saved by the core during
69 @ exception entry. Depending on the mode the cpu was in when the
70 @ exception happend that is either on the main or the process stack.
71 @ Bit 2 of EXC_RETURN stored in the lr register specifies which stack
72 @ was used.
73 tst lr, #EXC_RET_STACK_MASK
74 mrsne r12, psp
75 moveq r12, sp
76
77 @ we cannot rely on r0-r3 and r12 matching the value saved in the
78 @ exception frame because of tail-chaining. So these have to be
79 @ reloaded.
80 ldmia r12!, {r0-r3}
81
82 @ Linux expects to have irqs off. Do it here before taking stack space
83 cpsid i
84
85 sub sp, #S_FRAME_SIZE-S_IP
86 stmdb sp!, {r0-r11}
87
88 @ load saved r12, lr, return address and xPSR.
89 @ r0-r7 are used for signals and never touched from now on. Clobbering
90 @ r8-r12 is OK.
91 mov r9, r12
92 ldmia r9!, {r8, r10-r12}
93
94 @ calculate the original stack pointer value.
95 @ r9 currently points to the memory location just above the auto saved
96 @ xPSR.
97 @ The cpu might automatically 8-byte align the stack. Bit 9
98 @ of the saved xPSR specifies if stack aligning took place. In this case
99 @ another 32-bit value is included in the stack.
100
101 tst r12, V7M_xPSR_FRAMEPTRALIGN
102 addne r9, r9, #4
103
104 @ store saved r12 using str to have a register to hold the base for stm
105 str r8, [sp, #S_IP]
106 add r8, sp, #S_SP
107 @ store r13-r15, xPSR
108 stmia r8!, {r9-r12}
109 @ store old_r0
110 str r0, [r8]
111 .endm
112
113 /*
114 * PENDSV and SVCALL are configured to have the same exception
115 * priorities. As a kernel thread runs at SVCALL execution priority it
116 * can never be preempted and so we will never have to return to a
117 * kernel thread here.
118 */
119 .macro v7m_exception_slow_exit ret_r0
120 cpsid i
121 ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
122
123 @ read original r12, sp, lr, pc and xPSR
124 add r12, sp, #S_IP
125 ldmia r12, {r1-r5}
126
127 @ an exception frame is always 8-byte aligned. To tell the hardware if
128 @ the sp to be restored is aligned or not set bit 9 of the saved xPSR
129 @ accordingly.
130 tst r2, #4
131 subne r2, r2, #4
132 orrne r5, V7M_xPSR_FRAMEPTRALIGN
133 biceq r5, V7M_xPSR_FRAMEPTRALIGN
134
483a6c9d
RV
135 @ ensure bit 0 is cleared in the PC, otherwise behaviour is
136 @ unpredictable
137 bic r4, #1
138
19c4d593
UKK
139 @ write basic exception frame
140 stmdb r2!, {r1, r3-r5}
141 ldmia sp, {r1, r3-r5}
142 .if \ret_r0
143 stmdb r2!, {r0, r3-r5}
144 .else
145 stmdb r2!, {r1, r3-r5}
146 .endif
147
148 @ restore process sp
149 msr psp, r2
150
151 @ restore original r4-r11
152 ldmia sp!, {r0-r11}
153
154 @ restore main sp
155 add sp, sp, #S_FRAME_SIZE-S_IP
156
157 cpsie i
158 bx lr
159 .endm
160#endif /* CONFIG_CPU_V7M */
161
b86040a5
CM
162 @
163 @ Store/load the USER SP and LR registers by switching to the SYS
164 @ mode. Useful in Thumb-2 mode where "stm/ldm rd, {sp, lr}^" is not
165 @ available. Should only be called from SVC mode
166 @
167 .macro store_user_sp_lr, rd, rtemp, offset = 0
168 mrs \rtemp, cpsr
169 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
170 msr cpsr_c, \rtemp @ switch to the SYS mode
171
172 str sp, [\rd, #\offset] @ save sp_usr
173 str lr, [\rd, #\offset + 4] @ save lr_usr
174
175 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
176 msr cpsr_c, \rtemp @ switch back to the SVC mode
177 .endm
178
179 .macro load_user_sp_lr, rd, rtemp, offset = 0
180 mrs \rtemp, cpsr
181 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
182 msr cpsr_c, \rtemp @ switch to the SYS mode
183
184 ldr sp, [\rd, #\offset] @ load sp_usr
185 ldr lr, [\rd, #\offset + 4] @ load lr_usr
186
187 eor \rtemp, \rtemp, #(SVC_MODE ^ SYSTEM_MODE)
188 msr cpsr_c, \rtemp @ switch back to the SVC mode
189 .endm
190
191#ifndef CONFIG_THUMB2_KERNEL
9b56febe
RK
192 .macro svc_exit, rpsr, irq = 0
193 .if \irq != 0
f8f02ec2 194 @ IRQs already off
9b56febe
RK
195#ifdef CONFIG_TRACE_IRQFLAGS
196 @ The parent context IRQs must have been enabled to get here in
197 @ the first place, so there's no point checking the PSR I bit.
198 bl trace_hardirqs_on
199#endif
200 .else
f8f02ec2
RK
201 @ IRQs off again before pulling preserved data off the stack
202 disable_irq_notrace
9b56febe
RK
203#ifdef CONFIG_TRACE_IRQFLAGS
204 tst \rpsr, #PSR_I_BIT
205 bleq trace_hardirqs_on
206 tst \rpsr, #PSR_I_BIT
207 blne trace_hardirqs_off
208#endif
209 .endif
b86040a5 210 msr spsr_cxsf, \rpsr
2c32c65e
MR
211#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
212 @ We must avoid clrex due to Cortex-A15 erratum #830321
213 sub r0, sp, #4 @ uninhabited address
214 strex r1, r2, [r0] @ clear the exclusive monitor
200b812d 215#endif
2c32c65e 216 ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
b86040a5
CM
217 .endm
218
219 .macro restore_user_regs, fast = 0, offset = 0
220 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
221 ldr lr, [sp, #\offset + S_PC]! @ get pc
222 msr spsr_cxsf, r1 @ save in spsr_svc
2c32c65e
MR
223#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
224 @ We must avoid clrex due to Cortex-A15 erratum #830321
200b812d
CM
225 strex r1, r2, [sp] @ clear the exclusive monitor
226#endif
b86040a5
CM
227 .if \fast
228 ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
229 .else
230 ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
231 .endif
8e4971f2
AG
232 mov r0, r0 @ ARMv5T and earlier require a nop
233 @ after ldm {}^
b86040a5
CM
234 add sp, sp, #S_FRAME_SIZE - S_PC
235 movs pc, lr @ return & move spsr_svc into cpsr
236 .endm
237
b86040a5 238#else /* CONFIG_THUMB2_KERNEL */
9b56febe
RK
239 .macro svc_exit, rpsr, irq = 0
240 .if \irq != 0
f8f02ec2 241 @ IRQs already off
9b56febe
RK
242#ifdef CONFIG_TRACE_IRQFLAGS
243 @ The parent context IRQs must have been enabled to get here in
244 @ the first place, so there's no point checking the PSR I bit.
245 bl trace_hardirqs_on
246#endif
247 .else
f8f02ec2
RK
248 @ IRQs off again before pulling preserved data off the stack
249 disable_irq_notrace
9b56febe
RK
250#ifdef CONFIG_TRACE_IRQFLAGS
251 tst \rpsr, #PSR_I_BIT
252 bleq trace_hardirqs_on
253 tst \rpsr, #PSR_I_BIT
254 blne trace_hardirqs_off
255#endif
256 .endif
59481062
JM
257 ldr lr, [sp, #S_SP] @ top of the stack
258 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2c32c65e
MR
259
260 @ We must avoid clrex due to Cortex-A15 erratum #830321
261 strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor
262
59481062 263 stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context
b86040a5 264 ldmia sp, {r0 - r12}
59481062
JM
265 mov sp, lr
266 ldr lr, [sp], #4
b86040a5
CM
267 rfeia sp!
268 .endm
269
19c4d593
UKK
270#ifdef CONFIG_CPU_V7M
271 /*
272 * Note we don't need to do clrex here as clearing the local monitor is
273 * part of each exception entry and exit sequence.
274 */
275 .macro restore_user_regs, fast = 0, offset = 0
276 .if \offset
277 add sp, #\offset
278 .endif
279 v7m_exception_slow_exit ret_r0 = \fast
280 .endm
281#else /* ifdef CONFIG_CPU_V7M */
b86040a5
CM
282 .macro restore_user_regs, fast = 0, offset = 0
283 mov r2, sp
284 load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
285 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
286 ldr lr, [sp, #\offset + S_PC] @ get pc
287 add sp, sp, #\offset + S_SP
288 msr spsr_cxsf, r1 @ save in spsr_svc
2c32c65e
MR
289
290 @ We must avoid clrex due to Cortex-A15 erratum #830321
291 strex r1, r2, [sp] @ clear the exclusive monitor
292
b86040a5
CM
293 .if \fast
294 ldmdb sp, {r1 - r12} @ get calling r1 - r12
295 .else
296 ldmdb sp, {r0 - r12} @ get calling r0 - r12
297 .endif
298 add sp, sp, #S_FRAME_SIZE - S_SP
299 movs pc, lr @ return & move spsr_svc into cpsr
300 .endm
19c4d593 301#endif /* ifdef CONFIG_CPU_V7M / else */
b86040a5 302#endif /* !CONFIG_THUMB2_KERNEL */
1da177e4 303
b0088480
KH
304/*
305 * Context tracking subsystem. Used to instrument transitions
306 * between user and kernel mode.
307 */
308 .macro ct_user_exit, save = 1
309#ifdef CONFIG_CONTEXT_TRACKING
310 .if \save
311 stmdb sp!, {r0-r3, ip, lr}
0c06a5d4 312 bl context_tracking_user_exit
b0088480
KH
313 ldmia sp!, {r0-r3, ip, lr}
314 .else
0c06a5d4 315 bl context_tracking_user_exit
b0088480
KH
316 .endif
317#endif
318 .endm
319
320 .macro ct_user_enter, save = 1
321#ifdef CONFIG_CONTEXT_TRACKING
322 .if \save
323 stmdb sp!, {r0-r3, ip, lr}
0c06a5d4 324 bl context_tracking_user_enter
b0088480
KH
325 ldmia sp!, {r0-r3, ip, lr}
326 .else
0c06a5d4 327 bl context_tracking_user_enter
b0088480
KH
328 .endif
329#endif
330 .endm
331
1da177e4
LT
332/*
333 * These are the registers used in the syscall handler, and allow us to
334 * have in theory up to 7 arguments to a function - r0 to r6.
335 *
336 * r7 is reserved for the system call number for thumb mode.
337 *
338 * Note that tbl == why is intentional.
339 *
340 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
341 */
342scno .req r7 @ syscall number
343tbl .req r8 @ syscall table pointer
344why .req r8 @ Linux syscall (!= 0)
345tsk .req r9 @ current thread_info