Merge remote-tracking branches 'asoc/topic/intel', 'asoc/topic/kirkwood', 'asoc/topic...
[linux-2.6-block.git] / arch / arm64 / kernel / entry.S
CommitLineData
60ffc30d
CM
1/*
2 * Low-level exception handling code
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Authors: Catalin Marinas <catalin.marinas@arm.com>
6 * Will Deacon <will.deacon@arm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23
24#include <asm/assembler.h>
25#include <asm/asm-offsets.h>
26#include <asm/errno.h>
5c1ce6f7 27#include <asm/esr.h>
60ffc30d
CM
28#include <asm/thread_info.h>
29#include <asm/unistd.h>
f3d447a9 30#include <asm/unistd32.h>
60ffc30d
CM
31
32/*
33 * Bad Abort numbers
34 *-----------------
35 */
36#define BAD_SYNC 0
37#define BAD_IRQ 1
38#define BAD_FIQ 2
39#define BAD_ERROR 3
40
41 .macro kernel_entry, el, regsize = 64
42 sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR
43 .if \regsize == 32
44 mov w0, w0 // zero upper 32 bits of x0
45 .endif
46 push x28, x29
47 push x26, x27
48 push x24, x25
49 push x22, x23
50 push x20, x21
51 push x18, x19
52 push x16, x17
53 push x14, x15
54 push x12, x13
55 push x10, x11
56 push x8, x9
57 push x6, x7
58 push x4, x5
59 push x2, x3
60 push x0, x1
61 .if \el == 0
62 mrs x21, sp_el0
2a283070
WD
63 get_thread_info tsk // Ensure MDSCR_EL1.SS is clear,
64 ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
65 disable_step_tsk x19, x20 // exceptions when scheduling.
60ffc30d
CM
66 .else
67 add x21, sp, #S_FRAME_SIZE
68 .endif
69 mrs x22, elr_el1
70 mrs x23, spsr_el1
71 stp lr, x21, [sp, #S_LR]
72 stp x22, x23, [sp, #S_PC]
73
74 /*
75 * Set syscallno to -1 by default (overridden later if real syscall).
76 */
77 .if \el == 0
78 mvn x21, xzr
79 str x21, [sp, #S_SYSCALLNO]
80 .endif
81
82 /*
83 * Registers that may be useful after this macro is invoked:
84 *
85 * x21 - aborted SP
86 * x22 - aborted PC
87 * x23 - aborted PSTATE
88 */
89 .endm
90
91 .macro kernel_exit, el, ret = 0
92 ldp x21, x22, [sp, #S_PC] // load ELR, SPSR
93 .if \el == 0
94 ldr x23, [sp, #S_SP] // load return stack pointer
95 .endif
96 .if \ret
97 ldr x1, [sp, #S_X1] // preserve x0 (syscall return)
98 add sp, sp, S_X2
99 .else
100 pop x0, x1
101 .endif
102 pop x2, x3 // load the rest of the registers
103 pop x4, x5
104 pop x6, x7
105 pop x8, x9
106 msr elr_el1, x21 // set up the return data
107 msr spsr_el1, x22
108 .if \el == 0
109 msr sp_el0, x23
110 .endif
111 pop x10, x11
112 pop x12, x13
113 pop x14, x15
114 pop x16, x17
115 pop x18, x19
116 pop x20, x21
117 pop x22, x23
118 pop x24, x25
119 pop x26, x27
120 pop x28, x29
121 ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP
122 eret // return to kernel
123 .endm
124
125 .macro get_thread_info, rd
126 mov \rd, sp
845ad05e 127 and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
60ffc30d
CM
128 .endm
129
130/*
131 * These are the registers used in the syscall handler, and allow us to
132 * have in theory up to 7 arguments to a function - x0 to x6.
133 *
134 * x7 is reserved for the system call number in 32-bit mode.
135 */
136sc_nr .req x25 // number of system calls
137scno .req x26 // syscall number
138stbl .req x27 // syscall table pointer
139tsk .req x28 // current thread_info
140
141/*
142 * Interrupt handling.
143 */
144 .macro irq_handler
145 ldr x1, handle_arch_irq
146 mov x0, sp
147 blr x1
148 .endm
149
150 .text
151
152/*
153 * Exception vectors.
154 */
60ffc30d
CM
155
156 .align 11
157ENTRY(vectors)
158 ventry el1_sync_invalid // Synchronous EL1t
159 ventry el1_irq_invalid // IRQ EL1t
160 ventry el1_fiq_invalid // FIQ EL1t
161 ventry el1_error_invalid // Error EL1t
162
163 ventry el1_sync // Synchronous EL1h
164 ventry el1_irq // IRQ EL1h
165 ventry el1_fiq_invalid // FIQ EL1h
166 ventry el1_error_invalid // Error EL1h
167
168 ventry el0_sync // Synchronous 64-bit EL0
169 ventry el0_irq // IRQ 64-bit EL0
170 ventry el0_fiq_invalid // FIQ 64-bit EL0
171 ventry el0_error_invalid // Error 64-bit EL0
172
173#ifdef CONFIG_COMPAT
174 ventry el0_sync_compat // Synchronous 32-bit EL0
175 ventry el0_irq_compat // IRQ 32-bit EL0
176 ventry el0_fiq_invalid_compat // FIQ 32-bit EL0
177 ventry el0_error_invalid_compat // Error 32-bit EL0
178#else
179 ventry el0_sync_invalid // Synchronous 32-bit EL0
180 ventry el0_irq_invalid // IRQ 32-bit EL0
181 ventry el0_fiq_invalid // FIQ 32-bit EL0
182 ventry el0_error_invalid // Error 32-bit EL0
183#endif
184END(vectors)
185
186/*
187 * Invalid mode handlers
188 */
189 .macro inv_entry, el, reason, regsize = 64
190 kernel_entry el, \regsize
191 mov x0, sp
192 mov x1, #\reason
193 mrs x2, esr_el1
194 b bad_mode
195 .endm
196
197el0_sync_invalid:
198 inv_entry 0, BAD_SYNC
199ENDPROC(el0_sync_invalid)
200
201el0_irq_invalid:
202 inv_entry 0, BAD_IRQ
203ENDPROC(el0_irq_invalid)
204
205el0_fiq_invalid:
206 inv_entry 0, BAD_FIQ
207ENDPROC(el0_fiq_invalid)
208
209el0_error_invalid:
210 inv_entry 0, BAD_ERROR
211ENDPROC(el0_error_invalid)
212
213#ifdef CONFIG_COMPAT
214el0_fiq_invalid_compat:
215 inv_entry 0, BAD_FIQ, 32
216ENDPROC(el0_fiq_invalid_compat)
217
218el0_error_invalid_compat:
219 inv_entry 0, BAD_ERROR, 32
220ENDPROC(el0_error_invalid_compat)
221#endif
222
223el1_sync_invalid:
224 inv_entry 1, BAD_SYNC
225ENDPROC(el1_sync_invalid)
226
227el1_irq_invalid:
228 inv_entry 1, BAD_IRQ
229ENDPROC(el1_irq_invalid)
230
231el1_fiq_invalid:
232 inv_entry 1, BAD_FIQ
233ENDPROC(el1_fiq_invalid)
234
235el1_error_invalid:
236 inv_entry 1, BAD_ERROR
237ENDPROC(el1_error_invalid)
238
239/*
240 * EL1 mode handlers.
241 */
242 .align 6
243el1_sync:
244 kernel_entry 1
245 mrs x1, esr_el1 // read the syndrome register
5c1ce6f7
MZ
246 lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class
247 cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1
60ffc30d 248 b.eq el1_da
5c1ce6f7 249 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 250 b.eq el1_undef
5c1ce6f7 251 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 252 b.eq el1_sp_pc
5c1ce6f7 253 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 254 b.eq el1_sp_pc
5c1ce6f7 255 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1
60ffc30d 256 b.eq el1_undef
5c1ce6f7 257 cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1
60ffc30d
CM
258 b.ge el1_dbg
259 b el1_inv
260el1_da:
261 /*
262 * Data abort handling
263 */
264 mrs x0, far_el1
2a283070 265 enable_dbg
60ffc30d
CM
266 // re-enable interrupts if they were enabled in the aborted context
267 tbnz x23, #7, 1f // PSR_I_BIT
268 enable_irq
2691:
270 mov x2, sp // struct pt_regs
271 bl do_mem_abort
272
273 // disable interrupts before pulling preserved data off the stack
274 disable_irq
275 kernel_exit 1
276el1_sp_pc:
277 /*
278 * Stack or PC alignment exception handling
279 */
280 mrs x0, far_el1
2a283070 281 enable_dbg
60ffc30d
CM
282 mov x2, sp
283 b do_sp_pc_abort
284el1_undef:
285 /*
286 * Undefined instruction
287 */
2a283070 288 enable_dbg
60ffc30d
CM
289 mov x0, sp
290 b do_undefinstr
291el1_dbg:
292 /*
293 * Debug exception handling
294 */
ee6214ce
SP
295 cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
296 cinc x24, x24, eq // set bit '0'
60ffc30d
CM
297 tbz x24, #0, el1_inv // EL1 only
298 mrs x0, far_el1
299 mov x2, sp // struct pt_regs
300 bl do_debug_exception
2a283070 301 enable_dbg
60ffc30d
CM
302 kernel_exit 1
303el1_inv:
304 // TODO: add support for undefined instructions in kernel mode
2a283070 305 enable_dbg
60ffc30d
CM
306 mov x0, sp
307 mov x1, #BAD_SYNC
308 mrs x2, esr_el1
309 b bad_mode
310ENDPROC(el1_sync)
311
312 .align 6
313el1_irq:
314 kernel_entry 1
2a283070 315 enable_dbg
60ffc30d
CM
316#ifdef CONFIG_TRACE_IRQFLAGS
317 bl trace_hardirqs_off
318#endif
64681787 319
60ffc30d 320 irq_handler
64681787 321
60ffc30d 322#ifdef CONFIG_PREEMPT
64681787 323 get_thread_info tsk
883c0573 324 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
717321fc 325 cbnz w24, 1f // preempt count != 0
60ffc30d
CM
326 ldr x0, [tsk, #TI_FLAGS] // get flags
327 tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
328 bl el1_preempt
3291:
330#endif
331#ifdef CONFIG_TRACE_IRQFLAGS
332 bl trace_hardirqs_on
333#endif
334 kernel_exit 1
335ENDPROC(el1_irq)
336
337#ifdef CONFIG_PREEMPT
338el1_preempt:
339 mov x24, lr
2a283070 3401: bl preempt_schedule_irq // irq en/disable is done inside
60ffc30d
CM
341 ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
342 tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
343 ret x24
344#endif
345
346/*
347 * EL0 mode handlers.
348 */
349 .align 6
350el0_sync:
351 kernel_entry 0
352 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
353 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
354 cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state
60ffc30d 355 b.eq el0_svc
2a283070 356 adr lr, ret_to_user
5c1ce6f7 357 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 358 b.eq el0_da
5c1ce6f7 359 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 360 b.eq el0_ia
5c1ce6f7 361 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 362 b.eq el0_fpsimd_acc
5c1ce6f7 363 cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception
60ffc30d 364 b.eq el0_fpsimd_exc
5c1ce6f7 365 cmp x24, #ESR_EL1_EC_SYS64 // configurable trap
60ffc30d 366 b.eq el0_undef
5c1ce6f7 367 cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception
60ffc30d 368 b.eq el0_sp_pc
5c1ce6f7 369 cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception
60ffc30d 370 b.eq el0_sp_pc
5c1ce6f7 371 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 372 b.eq el0_undef
5c1ce6f7 373 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
374 b.ge el0_dbg
375 b el0_inv
376
377#ifdef CONFIG_COMPAT
378 .align 6
379el0_sync_compat:
380 kernel_entry 0, 32
381 mrs x25, esr_el1 // read the syndrome register
5c1ce6f7
MZ
382 lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class
383 cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state
60ffc30d 384 b.eq el0_svc_compat
2a283070 385 adr lr, ret_to_user
5c1ce6f7 386 cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0
60ffc30d 387 b.eq el0_da
5c1ce6f7 388 cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0
60ffc30d 389 b.eq el0_ia
5c1ce6f7 390 cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access
60ffc30d 391 b.eq el0_fpsimd_acc
5c1ce6f7 392 cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception
60ffc30d 393 b.eq el0_fpsimd_exc
5c1ce6f7 394 cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0
60ffc30d 395 b.eq el0_undef
381cc2b9
MR
396 cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap
397 b.eq el0_undef
398 cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap
399 b.eq el0_undef
400 cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap
401 b.eq el0_undef
402 cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap
403 b.eq el0_undef
404 cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap
405 b.eq el0_undef
5c1ce6f7 406 cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0
60ffc30d
CM
407 b.ge el0_dbg
408 b el0_inv
409el0_svc_compat:
410 /*
411 * AArch32 syscall handling
412 */
413 adr stbl, compat_sys_call_table // load compat syscall table pointer
414 uxtw scno, w7 // syscall number in w7 (r7)
415 mov sc_nr, #__NR_compat_syscalls
416 b el0_svc_naked
417
418 .align 6
419el0_irq_compat:
420 kernel_entry 0, 32
421 b el0_irq_naked
422#endif
423
424el0_da:
425 /*
426 * Data abort handling
427 */
428 mrs x0, far_el1
d50240a5 429 bic x0, x0, #(0xff << 56)
60ffc30d 430 // enable interrupts before calling the main handler
2a283070 431 enable_dbg_and_irq
60ffc30d
CM
432 mov x1, x25
433 mov x2, sp
434 b do_mem_abort
435el0_ia:
436 /*
437 * Instruction abort handling
438 */
439 mrs x0, far_el1
60ffc30d 440 // enable interrupts before calling the main handler
2a283070 441 enable_dbg_and_irq
60ffc30d
CM
442 orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
443 mov x2, sp
444 b do_mem_abort
445el0_fpsimd_acc:
446 /*
447 * Floating Point or Advanced SIMD access
448 */
2a283070 449 enable_dbg
60ffc30d
CM
450 mov x0, x25
451 mov x1, sp
452 b do_fpsimd_acc
453el0_fpsimd_exc:
454 /*
455 * Floating Point or Advanced SIMD exception
456 */
2a283070 457 enable_dbg
60ffc30d
CM
458 mov x0, x25
459 mov x1, sp
460 b do_fpsimd_exc
461el0_sp_pc:
462 /*
463 * Stack or PC alignment exception handling
464 */
465 mrs x0, far_el1
60ffc30d 466 // enable interrupts before calling the main handler
2a283070 467 enable_dbg_and_irq
60ffc30d
CM
468 mov x1, x25
469 mov x2, sp
470 b do_sp_pc_abort
471el0_undef:
472 /*
473 * Undefined instruction
474 */
2600e130 475 // enable interrupts before calling the main handler
2a283070
WD
476 enable_dbg_and_irq
477 mov x0, sp
60ffc30d
CM
478 b do_undefinstr
479el0_dbg:
480 /*
481 * Debug exception handling
482 */
483 tbnz x24, #0, el0_inv // EL0 only
484 mrs x0, far_el1
60ffc30d
CM
485 mov x1, x25
486 mov x2, sp
2a283070
WD
487 bl do_debug_exception
488 enable_dbg
489 b ret_to_user
60ffc30d 490el0_inv:
2a283070 491 enable_dbg
60ffc30d
CM
492 mov x0, sp
493 mov x1, #BAD_SYNC
494 mrs x2, esr_el1
495 b bad_mode
496ENDPROC(el0_sync)
497
498 .align 6
499el0_irq:
500 kernel_entry 0
501el0_irq_naked:
60ffc30d
CM
502 enable_dbg
503#ifdef CONFIG_TRACE_IRQFLAGS
504 bl trace_hardirqs_off
505#endif
64681787 506
60ffc30d 507 irq_handler
64681787 508
60ffc30d
CM
509#ifdef CONFIG_TRACE_IRQFLAGS
510 bl trace_hardirqs_on
511#endif
512 b ret_to_user
513ENDPROC(el0_irq)
514
60ffc30d
CM
515/*
516 * Register switch for AArch64. The callee-saved registers need to be saved
517 * and restored. On entry:
518 * x0 = previous task_struct (must be preserved across the switch)
519 * x1 = next task_struct
520 * Previous and next are guaranteed not to be the same.
521 *
522 */
523ENTRY(cpu_switch_to)
524 add x8, x0, #THREAD_CPU_CONTEXT
525 mov x9, sp
526 stp x19, x20, [x8], #16 // store callee-saved registers
527 stp x21, x22, [x8], #16
528 stp x23, x24, [x8], #16
529 stp x25, x26, [x8], #16
530 stp x27, x28, [x8], #16
531 stp x29, x9, [x8], #16
532 str lr, [x8]
533 add x8, x1, #THREAD_CPU_CONTEXT
534 ldp x19, x20, [x8], #16 // restore callee-saved registers
535 ldp x21, x22, [x8], #16
536 ldp x23, x24, [x8], #16
537 ldp x25, x26, [x8], #16
538 ldp x27, x28, [x8], #16
539 ldp x29, x9, [x8], #16
540 ldr lr, [x8]
541 mov sp, x9
542 ret
543ENDPROC(cpu_switch_to)
544
545/*
546 * This is the fast syscall return path. We do as little as possible here,
547 * and this includes saving x0 back into the kernel stack.
548 */
549ret_fast_syscall:
550 disable_irq // disable interrupts
551 ldr x1, [tsk, #TI_FLAGS]
552 and x2, x1, #_TIF_WORK_MASK
553 cbnz x2, fast_work_pending
2a283070 554 enable_step_tsk x1, x2
60ffc30d
CM
555 kernel_exit 0, ret = 1
556
557/*
558 * Ok, we need to do extra processing, enter the slow path.
559 */
560fast_work_pending:
561 str x0, [sp, #S_X0] // returned x0
562work_pending:
563 tbnz x1, #TIF_NEED_RESCHED, work_resched
005f78cd 564 /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
60ffc30d
CM
565 ldr x2, [sp, #S_PSTATE]
566 mov x0, sp // 'regs'
567 tst x2, #PSR_MODE_MASK // user mode regs?
568 b.ne no_work_pending // returning to kernel
6916fd08 569 enable_irq // enable interrupts for do_notify_resume()
60ffc30d
CM
570 bl do_notify_resume
571 b ret_to_user
572work_resched:
60ffc30d
CM
573 bl schedule
574
575/*
576 * "slow" syscall return path.
577 */
59dc67b0 578ret_to_user:
60ffc30d
CM
579 disable_irq // disable interrupts
580 ldr x1, [tsk, #TI_FLAGS]
581 and x2, x1, #_TIF_WORK_MASK
582 cbnz x2, work_pending
2a283070 583 enable_step_tsk x1, x2
60ffc30d
CM
584no_work_pending:
585 kernel_exit 0, ret = 0
586ENDPROC(ret_to_user)
587
588/*
589 * This is how we return from a fork.
590 */
591ENTRY(ret_from_fork)
592 bl schedule_tail
c34501d2
CM
593 cbz x19, 1f // not a kernel thread
594 mov x0, x20
595 blr x19
5961: get_thread_info tsk
60ffc30d
CM
597 b ret_to_user
598ENDPROC(ret_from_fork)
599
600/*
601 * SVC handler.
602 */
603 .align 6
604el0_svc:
605 adrp stbl, sys_call_table // load syscall table pointer
606 uxtw scno, w8 // syscall number in w8
607 mov sc_nr, #__NR_syscalls
608el0_svc_naked: // compat entry point
609 stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
2a283070 610 enable_dbg_and_irq
60ffc30d 611
449f81a4
AT
612 ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
613 tst x16, #_TIF_SYSCALL_WORK
614 b.ne __sys_trace
60ffc30d
CM
615 adr lr, ret_fast_syscall // return address
616 cmp scno, sc_nr // check upper syscall limit
617 b.hs ni_sys
618 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
619 br x16 // call sys_* routine
620ni_sys:
621 mov x0, sp
622 b do_ni_syscall
623ENDPROC(el0_svc)
624
625 /*
626 * This is the really slow path. We're going to be doing context
627 * switches, and waiting for our parent to respond.
628 */
629__sys_trace:
3157858f
AT
630 mov x0, sp
631 bl syscall_trace_enter
60ffc30d
CM
632 adr lr, __sys_trace_return // return address
633 uxtw scno, w0 // syscall number (possibly new)
634 mov x1, sp // pointer to regs
635 cmp scno, sc_nr // check upper syscall limit
636 b.hs ni_sys
637 ldp x0, x1, [sp] // restore the syscall args
638 ldp x2, x3, [sp, #S_X2]
639 ldp x4, x5, [sp, #S_X4]
640 ldp x6, x7, [sp, #S_X6]
641 ldr x16, [stbl, scno, lsl #3] // address in the syscall table
642 br x16 // call sys_* routine
643
644__sys_trace_return:
645 str x0, [sp] // save returned x0
3157858f
AT
646 mov x0, sp
647 bl syscall_trace_exit
60ffc30d
CM
648 b ret_to_user
649
650/*
651 * Special system call wrappers.
652 */
60ffc30d
CM
653ENTRY(sys_rt_sigreturn_wrapper)
654 mov x0, sp
655 b sys_rt_sigreturn
656ENDPROC(sys_rt_sigreturn_wrapper)
657
60ffc30d
CM
658ENTRY(handle_arch_irq)
659 .quad 0