2 * linux/arch/arm/kernel/entry-armv.S
4 * Copyright (C) 1996,1997,1998 Russell King.
5 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
6 * nommu support by Hyok S. Choi (hyok.choi@samsung.com)
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * Low-level vector interface routines
14 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction
15 * that causes it to save wrong values... Be aware!
18 #include <asm/memory.h>
19 #include <asm/glue-df.h>
20 #include <asm/glue-pf.h>
21 #include <asm/vfpmacros.h>
22 #ifndef CONFIG_MULTI_IRQ_HANDLER
23 #include <mach/entry-macro.S>
25 #include <asm/thread_notify.h>
26 #include <asm/unwind.h>
27 #include <asm/unistd.h>
29 #include <asm/system_info.h>
31 #include "entry-header.S"
32 #include <asm/entry-macro-multi.S>
38 #ifdef CONFIG_MULTI_IRQ_HANDLER
39 ldr r1, =handle_arch_irq
44 arch_irq_handler_default
50 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
54 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
63 @ Call the processor-specific abort handler:
66 @ r4 - aborted context pc
67 @ r5 - aborted context psr
69 @ The abort handler must return the aborted address in r0, and
70 @ the fault status register in r1. r9 must be preserved.
75 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
82 .section .kprobes.text,"ax",%progbits
88 * Invalid mode handlers
90 .macro inv_entry, reason
91 sub sp, sp, #S_FRAME_SIZE
92 ARM( stmib sp, {r1 - lr} )
93 THUMB( stmia sp, {r0 - r12} )
94 THUMB( str sp, [sp, #S_SP] )
95 THUMB( str lr, [sp, #S_LR] )
100 inv_entry BAD_PREFETCH
102 ENDPROC(__pabt_invalid)
107 ENDPROC(__dabt_invalid)
112 ENDPROC(__irq_invalid)
115 inv_entry BAD_UNDEFINSTR
118 @ XXX fall through to common_invalid
122 @ common_invalid - generic code for failed exception (re-entrant version of handlers)
128 add r0, sp, #S_PC @ here for interlock avoidance
129 mov r7, #-1 @ "" "" "" ""
130 str r4, [sp] @ save preserved r0
131 stmia r0, {r5 - r7} @ lr_<exception>,
132 @ cpsr_<exception>, "old_r0"
136 ENDPROC(__und_invalid)
142 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
143 #define SPFIX(code...) code
145 #define SPFIX(code...)
148 .macro svc_entry, stack_hole=0
150 UNWIND(.save {r0 - pc} )
151 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
152 #ifdef CONFIG_THUMB2_KERNEL
153 SPFIX( str r0, [sp] ) @ temporarily saved
155 SPFIX( tst r0, #4 ) @ test original stack alignment
156 SPFIX( ldr r0, [sp] ) @ restored
160 SPFIX( subeq sp, sp, #4 )
164 add r7, sp, #S_SP - 4 @ here for interlock avoidance
165 mov r6, #-1 @ "" "" "" ""
166 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
167 SPFIX( addeq r2, r2, #4 )
168 str r3, [sp, #-4]! @ save the "real" r0 copied
169 @ from the exception stack
174 @ We are now ready to fill in the remaining blanks on the stack:
178 @ r4 - lr_<exception>, already fixed up for correct return/restart
179 @ r5 - spsr_<exception>
180 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
184 #ifdef CONFIG_TRACE_IRQFLAGS
185 bl trace_hardirqs_off
196 @ IRQs off again before pulling preserved data off the stack
200 #ifdef CONFIG_TRACE_IRQFLAGS
202 bleq trace_hardirqs_on
204 blne trace_hardirqs_off
206 svc_exit r5 @ return from exception
215 #ifdef CONFIG_PREEMPT
217 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
218 ldr r0, [tsk, #TI_FLAGS] @ get flags
219 teq r8, #0 @ if preempt count != 0
220 movne r0, #0 @ force flags to 0
221 tst r0, #_TIF_NEED_RESCHED
225 #ifdef CONFIG_TRACE_IRQFLAGS
226 @ The parent context IRQs must have been enabled to get here in
227 @ the first place, so there's no point checking the PSR I bit.
230 svc_exit r5 @ return from exception
236 #ifdef CONFIG_PREEMPT
239 1: bl preempt_schedule_irq @ irq en/disable is done inside
240 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS
241 tst r0, #_TIF_NEED_RESCHED
242 moveq pc, r8 @ go again
248 #ifdef CONFIG_KPROBES
249 @ If a kprobe is about to simulate a "stmdb sp..." instruction,
250 @ it obviously needs free stack space which then will belong to
257 @ call emulation code, which returns using r9 if it has emulated
258 @ the instruction, or the more conventional lr if we are to treat
259 @ this as a real undefined instruction
263 #ifndef CONFIG_THUMB2_KERNEL
266 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
267 cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
268 ldrhhs r9, [r4] @ bottom 16 bits
269 orrhs r0, r9, r0, lsl #16
275 mov r0, sp @ struct pt_regs *regs
279 @ IRQs off again before pulling preserved data off the stack
281 1: disable_irq_notrace
284 @ restore SPSR and restart the instruction
286 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
287 #ifdef CONFIG_TRACE_IRQFLAGS
289 bleq trace_hardirqs_on
291 blne trace_hardirqs_off
293 svc_exit r5 @ return from exception
304 @ IRQs off again before pulling preserved data off the stack
308 #ifdef CONFIG_TRACE_IRQFLAGS
310 bleq trace_hardirqs_on
312 blne trace_hardirqs_off
314 svc_exit r5 @ return from exception
331 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE
334 #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7)
335 #error "sizeof(struct pt_regs) must be a multiple of 8"
340 UNWIND(.cantunwind ) @ don't unwind the user space
341 sub sp, sp, #S_FRAME_SIZE
342 ARM( stmib sp, {r1 - r12} )
343 THUMB( stmia sp, {r0 - r12} )
346 add r0, sp, #S_PC @ here for interlock avoidance
347 mov r6, #-1 @ "" "" "" ""
349 str r3, [sp] @ save the "real" r0 copied
350 @ from the exception stack
353 @ We are now ready to fill in the remaining blanks on the stack:
355 @ r4 - lr_<exception>, already fixed up for correct return/restart
356 @ r5 - spsr_<exception>
357 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
359 @ Also, separately save sp_usr and lr_usr
362 ARM( stmdb r0, {sp, lr}^ )
363 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
366 @ Enable the alignment trap while in kernel mode
371 @ Clear FP to mark the first stack frame
375 #ifdef CONFIG_IRQSOFF_TRACER
376 bl trace_hardirqs_off
380 .macro kuser_cmpxchg_check
381 #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
383 #warning "NPTL on non MMU needs fixing"
385 @ Make sure our user space atomic helper is restarted
386 @ if it was interrupted in a critical region. Here we
387 @ perform a quick test inline since it should be false
388 @ 99.9999% of the time. The rest is done out of line.
390 blhs kuser_cmpxchg64_fixup
412 b ret_to_user_from_irq
426 @ fall through to the emulation code, which returns using r9 if
427 @ it has emulated the instruction, or the more conventional lr
428 @ if we are to treat this as a real undefined instruction
432 adr r9, BSYM(ret_from_exception)
433 adr lr, BSYM(__und_usr_unknown)
434 tst r3, #PSR_T_BIT @ Thumb mode?
435 itet eq @ explicit IT needed for the 1f label
436 subeq r4, r2, #4 @ ARM instr at LR - 4
437 subne r4, r2, #2 @ Thumb instr at LR - 2
439 #ifdef CONFIG_CPU_ENDIAN_BE8
440 reveq r0, r0 @ little endian instruction
444 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
446 * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
447 * can never be supported in a single kernel, this code is not applicable at
448 * all when __LINUX_ARM_ARCH__ < 6. This allows simplifying assumptions to be
449 * made about .arch directives.
451 #if __LINUX_ARM_ARCH__ < 7
452 /* If the target CPU may not be Thumb-2-capable, a run-time check is needed: */
453 #define NEED_CPU_ARCHITECTURE
454 ldr r5, .LCcpu_architecture
456 cmp r5, #CPU_ARCH_ARMv7
457 blo __und_usr_unknown
459 * The following code won't get run unless the running CPU really is v7, so
460 * coding round the lack of ldrht on older arches is pointless. Temporarily
461 * override the assembler target arch with the minimum required instead:
466 ARM( ldrht r5, [r4], #2 )
467 THUMB( ldrht r5, [r4] )
468 THUMB( add r4, r4, #2 )
469 cmp r5, #0xe800 @ 32bit instruction if xx != 0
470 blo __und_usr_unknown
472 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
473 orr r0, r0, r5, lsl #16
475 #if __LINUX_ARM_ARCH__ < 7
476 /* If the target arch was overridden, change it back: */
477 #ifdef CONFIG_CPU_32v6K
482 #endif /* __LINUX_ARM_ARCH__ < 7 */
483 #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
490 @ fallthrough to call_fpe
494 * The out of line fixup for the ldrt above.
496 .pushsection .fixup, "ax"
499 .pushsection __ex_table,"a"
501 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
508 * Check whether the instruction is a co-processor instruction.
509 * If yes, we need to call the relevant co-processor handler.
511 * Note that we don't do a full check here for the co-processor
512 * instructions; all instructions with bit 27 set are well
513 * defined. The only instructions that should fault are the
514 * co-processor instructions. However, we have to watch out
515 * for the ARM6/ARM7 SWI bug.
517 * NEON is a special case that has to be handled here. Not all
518 * NEON instructions are co-processor instructions, so we have
519 * to make a special case of checking for them. Plus, there's
520 * five groups of them, so we have a table of mask/opcode pairs
521 * to check against, and if any match then we branch off into the
524 * Emulators may wish to make use of the following registers:
525 * r0 = instruction opcode.
527 * r9 = normal "successful" return address
528 * r10 = this threads thread_info structure.
529 * lr = unrecognised instruction return address
532 @ Fall-through from Thumb-2 __und_usr
535 adr r6, .LCneon_thumb_opcodes
540 adr r6, .LCneon_arm_opcodes
542 ldr r7, [r6], #4 @ mask value
543 cmp r7, #0 @ end mask?
546 ldr r7, [r6], #4 @ opcode bits matching in mask
547 cmp r8, r7 @ NEON instruction?
551 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used
552 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used
553 b do_vfp @ let VFP handler handle this
556 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27
557 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2
558 #if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710)
559 and r8, r0, #0x0f000000 @ mask out op-code bits
560 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)?
563 get_thread_info r10 @ get current thread
564 and r8, r0, #0x00000f00 @ mask out CP number
565 THUMB( lsr r8, r8, #8 )
567 add r6, r10, #TI_USED_CP
568 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[]
569 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[]
571 @ Test if we need to give access to iWMMXt coprocessors
572 ldr r5, [r10, #TI_FLAGS]
573 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only
574 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1)
575 bcs iwmmxt_task_enable
577 ARM( add pc, pc, r8, lsr #6 )
578 THUMB( lsl r8, r8, #2 )
583 W(b) do_fpe @ CP#1 (FPE)
584 W(b) do_fpe @ CP#2 (FPE)
587 b crunch_task_enable @ CP#4 (MaverickCrunch)
588 b crunch_task_enable @ CP#5 (MaverickCrunch)
589 b crunch_task_enable @ CP#6 (MaverickCrunch)
599 W(b) do_vfp @ CP#10 (VFP)
600 W(b) do_vfp @ CP#11 (VFP)
602 movw_pc lr @ CP#10 (VFP)
603 movw_pc lr @ CP#11 (VFP)
607 movw_pc lr @ CP#14 (Debug)
608 movw_pc lr @ CP#15 (Control)
610 #ifdef NEED_CPU_ARCHITECTURE
613 .word __cpu_architecture
620 .word 0xfe000000 @ mask
621 .word 0xf2000000 @ opcode
623 .word 0xff100000 @ mask
624 .word 0xf4000000 @ opcode
626 .word 0x00000000 @ mask
627 .word 0x00000000 @ opcode
629 .LCneon_thumb_opcodes:
630 .word 0xef000000 @ mask
631 .word 0xef000000 @ opcode
633 .word 0xff100000 @ mask
634 .word 0xf9000000 @ opcode
636 .word 0x00000000 @ mask
637 .word 0x00000000 @ opcode
643 add r10, r10, #TI_FPSTATE @ r10 = workspace
644 ldr pc, [r4] @ Call FP module USR entry point
647 * The FP module is called with these registers set:
650 * r9 = normal "successful" return address
652 * lr = unrecognised FP instruction return address
667 adr lr, BSYM(ret_from_exception)
669 ENDPROC(__und_usr_unknown)
679 * This is the return code to user mode for abort handlers
681 ENTRY(ret_from_exception)
689 ENDPROC(ret_from_exception)
692 * Register switch for ARMv3 and ARMv4 processors
693 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info
694 * previous and next are guaranteed not to be the same.
699 add ip, r1, #TI_CPU_SAVE
700 ldr r3, [r2, #TI_TP_VALUE]
701 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
702 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
703 THUMB( str sp, [ip], #4 )
704 THUMB( str lr, [ip], #4 )
705 #ifdef CONFIG_CPU_USE_DOMAINS
706 ldr r6, [r2, #TI_CPU_DOMAIN]
709 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
710 ldr r7, [r2, #TI_TASK]
711 ldr r8, =__stack_chk_guard
712 ldr r7, [r7, #TSK_STACK_CANARY]
714 #ifdef CONFIG_CPU_USE_DOMAINS
715 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
718 add r4, r2, #TI_CPU_SAVE
719 ldr r0, =thread_notify_head
720 mov r1, #THREAD_NOTIFY_SWITCH
721 bl atomic_notifier_call_chain
722 #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
727 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
728 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
729 THUMB( ldr sp, [ip], #4 )
730 THUMB( ldr pc, [ip] )
739 * Each segment is 32-byte aligned and will be moved to the top of the high
740 * vector page. New segments (if ever needed) must be added in front of
741 * existing ones. This mechanism should be used only for things that are
742 * really small and justified, and not be abused freely.
744 * See Documentation/arm/kernel_user_helpers.txt for formal definitions.
749 #ifdef CONFIG_ARM_THUMB
757 .globl __kuser_helper_start
758 __kuser_helper_start:
761 * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
762 * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
765 __kuser_cmpxchg64: @ 0xffff0f60
767 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
770 * Poor you. No fast solution possible...
771 * The kernel itself must perform the operation.
772 * A special ghost syscall is used for that (see traps.c).
775 ldr r7, 1f @ it's 20 bits
776 swi __ARM_NR_cmpxchg64
778 1: .word __ARM_NR_cmpxchg64
780 #elif defined(CONFIG_CPU_32v6K)
782 stmfd sp!, {r4, r5, r6, r7}
783 ldrd r4, r5, [r0] @ load old val
784 ldrd r6, r7, [r1] @ load new val
786 1: ldrexd r0, r1, [r2] @ load current val
787 eors r3, r0, r4 @ compare with oldval (1)
788 eoreqs r3, r1, r5 @ compare with oldval (2)
789 strexdeq r3, r6, r7, [r2] @ store newval if eq
790 teqeq r3, #1 @ success?
791 beq 1b @ if no then retry
793 rsbs r0, r3, #0 @ set returned val and C flag
794 ldmfd sp!, {r4, r5, r6, r7}
797 #elif !defined(CONFIG_SMP)
802 * The only thing that can break atomicity in this cmpxchg64
803 * implementation is either an IRQ or a data abort exception
804 * causing another process/thread to be scheduled in the middle of
805 * the critical sequence. The same strategy as for cmpxchg is used.
807 stmfd sp!, {r4, r5, r6, lr}
808 ldmia r0, {r4, r5} @ load old val
809 ldmia r1, {r6, lr} @ load new val
810 1: ldmia r2, {r0, r1} @ load current val
811 eors r3, r0, r4 @ compare with oldval (1)
812 eoreqs r3, r1, r5 @ compare with oldval (2)
813 2: stmeqia r2, {r6, lr} @ store newval if eq
814 rsbs r0, r3, #0 @ set return val and C flag
815 ldmfd sp!, {r4, r5, r6, pc}
818 kuser_cmpxchg64_fixup:
819 @ Called from kuser_cmpxchg_fixup.
820 @ r4 = address of interrupted insn (must be preserved).
821 @ sp = saved regs. r7 and r8 are clobbered.
822 @ 1b = first critical insn, 2b = last critical insn.
823 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
825 sub r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
827 rsbcss r8, r8, #(2b - 1b)
828 strcs r7, [sp, #S_PC]
829 #if __LINUX_ARM_ARCH__ < 6
830 bcc kuser_cmpxchg32_fixup
836 #warning "NPTL on non MMU needs fixing"
843 #error "incoherent kernel configuration"
846 /* pad to next slot */
847 .rept (16 - (. - __kuser_cmpxchg64)/4)
853 __kuser_memory_barrier: @ 0xffff0fa0
859 __kuser_cmpxchg: @ 0xffff0fc0
861 #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
864 * Poor you. No fast solution possible...
865 * The kernel itself must perform the operation.
866 * A special ghost syscall is used for that (see traps.c).
869 ldr r7, 1f @ it's 20 bits
872 1: .word __ARM_NR_cmpxchg
874 #elif __LINUX_ARM_ARCH__ < 6
879 * The only thing that can break atomicity in this cmpxchg
880 * implementation is either an IRQ or a data abort exception
881 * causing another process/thread to be scheduled in the middle
882 * of the critical sequence. To prevent this, code is added to
883 * the IRQ and data abort exception handlers to set the pc back
884 * to the beginning of the critical section if it is found to be
885 * within that critical section (see kuser_cmpxchg_fixup).
887 1: ldr r3, [r2] @ load current val
888 subs r3, r3, r0 @ compare with oldval
889 2: streq r1, [r2] @ store newval if eq
890 rsbs r0, r3, #0 @ set return val and C flag
894 kuser_cmpxchg32_fixup:
895 @ Called from kuser_cmpxchg_check macro.
896 @ r4 = address of interrupted insn (must be preserved).
897 @ sp = saved regs. r7 and r8 are clobbered.
898 @ 1b = first critical insn, 2b = last critical insn.
899 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
901 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
903 rsbcss r8, r8, #(2b - 1b)
904 strcs r7, [sp, #S_PC]
909 #warning "NPTL on non MMU needs fixing"
924 /* beware -- each __kuser slot must be 8 instructions max */
925 ALT_SMP(b __kuser_memory_barrier)
932 __kuser_get_tls: @ 0xffff0fe0
933 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init
935 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code
937 .word 0 @ 0xffff0ff0 software TLS value, then
938 .endr @ pad up to __kuser_helper_version
940 __kuser_helper_version: @ 0xffff0ffc
941 .word ((__kuser_helper_end - __kuser_helper_start) >> 5)
943 .globl __kuser_helper_end
951 * This code is copied to 0xffff0200 so we can use branches in the
952 * vectors, rather than ldr's. Note that this code must not
953 * exceed 0x300 bytes.
955 * Common stub entry macro:
956 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
958 * SP points to a minimal amount of processor-private memory, the address
959 * of which is copied into r0 for the mode specific abort handler.
961 .macro vector_stub, name, mode, correction=0
966 sub lr, lr, #\correction
970 @ Save r0, lr_<exception> (parent PC) and spsr_<exception>
973 stmia sp, {r0, lr} @ save r0, lr
975 str lr, [sp, #8] @ save spsr
978 @ Prepare for SVC32 mode. IRQs remain disabled.
981 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE)
985 @ the branch table must immediately follow this code
989 THUMB( ldr lr, [r0, lr, lsl #2] )
991 ARM( ldr lr, [pc, lr, lsl #2] )
992 movs pc, lr @ branch to handler in SVC mode
993 ENDPROC(vector_\name)
996 @ handler addresses follow this label
1000 .globl __stubs_start
1003 * Interrupt dispatcher
1005 vector_stub irq, IRQ_MODE, 4
1007 .long __irq_usr @ 0 (USR_26 / USR_32)
1008 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32)
1009 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32)
1010 .long __irq_svc @ 3 (SVC_26 / SVC_32)
1011 .long __irq_invalid @ 4
1012 .long __irq_invalid @ 5
1013 .long __irq_invalid @ 6
1014 .long __irq_invalid @ 7
1015 .long __irq_invalid @ 8
1016 .long __irq_invalid @ 9
1017 .long __irq_invalid @ a
1018 .long __irq_invalid @ b
1019 .long __irq_invalid @ c
1020 .long __irq_invalid @ d
1021 .long __irq_invalid @ e
1022 .long __irq_invalid @ f
1025 * Data abort dispatcher
1026 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1028 vector_stub dabt, ABT_MODE, 8
1030 .long __dabt_usr @ 0 (USR_26 / USR_32)
1031 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32)
1032 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32)
1033 .long __dabt_svc @ 3 (SVC_26 / SVC_32)
1034 .long __dabt_invalid @ 4
1035 .long __dabt_invalid @ 5
1036 .long __dabt_invalid @ 6
1037 .long __dabt_invalid @ 7
1038 .long __dabt_invalid @ 8
1039 .long __dabt_invalid @ 9
1040 .long __dabt_invalid @ a
1041 .long __dabt_invalid @ b
1042 .long __dabt_invalid @ c
1043 .long __dabt_invalid @ d
1044 .long __dabt_invalid @ e
1045 .long __dabt_invalid @ f
1048 * Prefetch abort dispatcher
1049 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC
1051 vector_stub pabt, ABT_MODE, 4
1053 .long __pabt_usr @ 0 (USR_26 / USR_32)
1054 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32)
1055 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32)
1056 .long __pabt_svc @ 3 (SVC_26 / SVC_32)
1057 .long __pabt_invalid @ 4
1058 .long __pabt_invalid @ 5
1059 .long __pabt_invalid @ 6
1060 .long __pabt_invalid @ 7
1061 .long __pabt_invalid @ 8
1062 .long __pabt_invalid @ 9
1063 .long __pabt_invalid @ a
1064 .long __pabt_invalid @ b
1065 .long __pabt_invalid @ c
1066 .long __pabt_invalid @ d
1067 .long __pabt_invalid @ e
1068 .long __pabt_invalid @ f
1071 * Undef instr entry dispatcher
1072 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC
1074 vector_stub und, UND_MODE
1076 .long __und_usr @ 0 (USR_26 / USR_32)
1077 .long __und_invalid @ 1 (FIQ_26 / FIQ_32)
1078 .long __und_invalid @ 2 (IRQ_26 / IRQ_32)
1079 .long __und_svc @ 3 (SVC_26 / SVC_32)
1080 .long __und_invalid @ 4
1081 .long __und_invalid @ 5
1082 .long __und_invalid @ 6
1083 .long __und_invalid @ 7
1084 .long __und_invalid @ 8
1085 .long __und_invalid @ 9
1086 .long __und_invalid @ a
1087 .long __und_invalid @ b
1088 .long __und_invalid @ c
1089 .long __und_invalid @ d
1090 .long __und_invalid @ e
1091 .long __und_invalid @ f
1095 /*=============================================================================
1097 *-----------------------------------------------------------------------------
1098 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC
1099 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg.
1100 * Basically to switch modes, we *HAVE* to clobber one register... brain
1101 * damage alert! I don't think that we can execute any code in here in any
1102 * other mode than FIQ... Ok you can switch to another mode, but you can't
1103 * get out of that mode without clobbering one register.
1108 /*=============================================================================
1109 * Address exception handler
1110 *-----------------------------------------------------------------------------
1111 * These aren't too critical.
1112 * (they're not supposed to happen, and won't happen in 32-bit data mode).
1119 * We group all the following data together to optimise
1120 * for CPUs with separate I & D caches.
1130 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start
1132 .globl __vectors_start
1134 ARM( swi SYS_ERROR0 )
1137 W(b) vector_und + stubs_offset
1138 W(ldr) pc, .LCvswi + stubs_offset
1139 W(b) vector_pabt + stubs_offset
1140 W(b) vector_dabt + stubs_offset
1141 W(b) vector_addrexcptn + stubs_offset
1142 W(b) vector_irq + stubs_offset
1143 W(b) vector_fiq + stubs_offset
1145 .globl __vectors_end
1151 .globl cr_no_alignment
1157 #ifdef CONFIG_MULTI_IRQ_HANDLER
1158 .globl handle_arch_irq