Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
1da177e4 LT |
2 | /* |
3 | * linux/arch/arm/kernel/entry-common.S | |
4 | * | |
5 | * Copyright (C) 2000 Russell King | |
1da177e4 | 6 | */ |
1da177e4 | 7 | |
6ebbf2ce | 8 | #include <asm/assembler.h> |
1da177e4 | 9 | #include <asm/unistd.h> |
395a59d0 | 10 | #include <asm/ftrace.h> |
c4c5716e | 11 | #include <asm/unwind.h> |
a9ff6961 | 12 | #include <asm/page.h> |
96a8fae0 RK |
13 | #ifdef CONFIG_AEABI |
14 | #include <asm/unistd-oabi.h> | |
15 | #endif | |
16 | ||
17 | .equ NR_syscalls, __NR_syscalls | |
1da177e4 LT |
18 | |
19 | #include "entry-header.S" | |
20 | ||
309ee042 | 21 | saved_psr .req r8 |
24a9c541 | 22 | #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING_USER) |
fcea4523 RK |
23 | saved_pc .req r9 |
24 | #define TRACE(x...) x | |
25 | #else | |
309ee042 | 26 | saved_pc .req lr |
fcea4523 RK |
27 | #define TRACE(x...) |
28 | #endif | |
1da177e4 | 29 | |
c6089061 | 30 | .section .entry.text,"ax",%progbits |
1da177e4 | 31 | .align 5 |
24a9c541 | 32 | #if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING_USER) || \ |
b74406f3 | 33 | IS_ENABLED(CONFIG_DEBUG_RSEQ)) |
1da177e4 | 34 | /* |
3302cadd RK |
35 | * This is the fast syscall return path. We do as little as possible here, |
36 | * such as avoiding writing r0 to the stack. We only use this path if we | |
b74406f3 MD |
37 | * have tracing, context tracking and rseq debug disabled - the overheads |
38 | * from those features make this path too inefficient. | |
1da177e4 LT |
39 | */ |
40 | ret_fast_syscall: | |
afc9f65e | 41 | __ret_fast_syscall: |
c4c5716e CM |
42 | UNWIND(.fnstart ) |
43 | UNWIND(.cantunwind ) | |
3302cadd | 44 | disable_irq_notrace @ disable interrupts |
1b979372 | 45 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing |
32d59773 | 46 | movs r1, r1, lsl #16 |
1da177e4 | 47 | bne fast_work_pending |
f4dc9a4c | 48 | |
b86040a5 | 49 | restore_user_regs fast = 1, offset = S_OFF |
c4c5716e | 50 | UNWIND(.fnend ) |
3302cadd | 51 | ENDPROC(ret_fast_syscall) |
1da177e4 | 52 | |
3302cadd | 53 | /* Ok, we need to do extra processing, enter the slow path. */ |
1da177e4 LT |
54 | fast_work_pending: |
55 | str r0, [sp, #S_R0+S_OFF]! @ returned r0 | |
3302cadd RK |
56 | /* fall through to work_pending */ |
57 | #else | |
58 | /* | |
b74406f3 MD |
59 | * The "replacement" ret_fast_syscall for when tracing, context tracking, |
60 | * or rseq debug is enabled. As we will need to call out to some C functions, | |
61 | * we save r0 first to avoid needing to save registers around each C function | |
62 | * call. | |
3302cadd RK |
63 | */ |
64 | ret_fast_syscall: | |
afc9f65e | 65 | __ret_fast_syscall: |
3302cadd RK |
66 | UNWIND(.fnstart ) |
67 | UNWIND(.cantunwind ) | |
68 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
b74406f3 MD |
69 | #if IS_ENABLED(CONFIG_DEBUG_RSEQ) |
70 | /* do_rseq_syscall needs interrupts enabled. */ | |
71 | mov r0, sp @ 'regs' | |
72 | bl do_rseq_syscall | |
73 | #endif | |
3302cadd RK |
74 | disable_irq_notrace @ disable interrupts |
75 | ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing | |
32d59773 | 76 | movs r1, r1, lsl #16 |
3302cadd RK |
77 | beq no_work_pending |
78 | UNWIND(.fnend ) | |
79 | ENDPROC(ret_fast_syscall) | |
80 | ||
81 | /* Slower path - fall through to work_pending */ | |
82 | #endif | |
83 | ||
84 | tst r1, #_TIF_SYSCALL_WORK | |
85 | bne __sys_trace_return_nosave | |
86 | slow_work_pending: | |
1da177e4 LT |
87 | mov r0, sp @ 'regs' |
88 | mov r2, why @ 'syscall' | |
0a267fa6 | 89 | bl do_work_pending |
66285217 | 90 | cmp r0, #0 |
81783786 | 91 | beq no_work_pending |
66285217 | 92 | movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) |
cf007647 | 93 | str scno, [tsk, #TI_ABI_SYSCALL] @ make sure tracers see update |
81783786 AV |
94 | ldmia sp, {r0 - r6} @ have to reload r0 - r6 |
95 | b local_restart @ ... and off we go | |
e83dd377 | 96 | ENDPROC(ret_fast_syscall) |
81783786 | 97 | |
1da177e4 LT |
98 | /* |
99 | * "slow" syscall return path. "why" tells us if this was a real syscall. | |
3302cadd RK |
100 | * IRQs may be enabled here, so always disable them. Note that we use the |
101 | * "notrace" version to avoid calling into the tracing code unnecessarily. | |
102 | * do_work_pending() will update this state if necessary. | |
1da177e4 LT |
103 | */ |
104 | ENTRY(ret_to_user) | |
105 | ret_slow_syscall: | |
b74406f3 MD |
106 | #if IS_ENABLED(CONFIG_DEBUG_RSEQ) |
107 | /* do_rseq_syscall needs interrupts enabled. */ | |
108 | enable_irq_notrace @ enable interrupts | |
109 | mov r0, sp @ 'regs' | |
110 | bl do_rseq_syscall | |
111 | #endif | |
3302cadd | 112 | disable_irq_notrace @ disable interrupts |
9fc2552a | 113 | ENTRY(ret_to_user_from_irq) |
1da177e4 | 114 | ldr r1, [tsk, #TI_FLAGS] |
32d59773 | 115 | movs r1, r1, lsl #16 |
3302cadd | 116 | bne slow_work_pending |
1da177e4 | 117 | no_work_pending: |
3302cadd | 118 | asm_trace_hardirqs_on save = 0 |
651e9499 | 119 | |
b0088480 | 120 | ct_user_enter save = 0 |
f80dff9d | 121 | |
2335c9cb JR |
122 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
123 | bl stackleak_erase_on_task_stack | |
124 | #endif | |
b86040a5 | 125 | restore_user_regs fast = 0, offset = 0 |
9fc2552a | 126 | ENDPROC(ret_to_user_from_irq) |
93ed3970 | 127 | ENDPROC(ret_to_user) |
1da177e4 LT |
128 | |
129 | /* | |
130 | * This is how we return from a fork. | |
131 | */ | |
132 | ENTRY(ret_from_fork) | |
133 | bl schedule_tail | |
9fff2fa0 AV |
134 | cmp r5, #0 |
135 | movne r0, r4 | |
14327c66 | 136 | badrne lr, 1f |
6ebbf2ce | 137 | retne r5 |
68687c84 | 138 | 1: get_thread_info tsk |
1da177e4 | 139 | b ret_slow_syscall |
93ed3970 | 140 | ENDPROC(ret_from_fork) |
1da177e4 | 141 | |
1da177e4 LT |
142 | /*============================================================================= |
143 | * SWI handler | |
144 | *----------------------------------------------------------------------------- | |
145 | */ | |
146 | ||
b9baf5c8 RKO |
147 | .align 5 |
148 | #ifdef CONFIG_HARDEN_BRANCH_HISTORY | |
149 | ENTRY(vector_bhb_loop8_swi) | |
150 | sub sp, sp, #PT_REGS_SIZE | |
151 | stmia sp, {r0 - r12} | |
152 | mov r8, #8 | |
153 | 1: b 2f | |
154 | 2: subs r8, r8, #1 | |
155 | bne 1b | |
892c608a | 156 | dsb nsh |
b9baf5c8 RKO |
157 | isb |
158 | b 3f | |
159 | ENDPROC(vector_bhb_loop8_swi) | |
160 | ||
161 | .align 5 | |
162 | ENTRY(vector_bhb_bpiall_swi) | |
163 | sub sp, sp, #PT_REGS_SIZE | |
164 | stmia sp, {r0 - r12} | |
165 | mcr p15, 0, r8, c7, c5, 6 @ BPIALL | |
166 | isb | |
167 | b 3f | |
168 | ENDPROC(vector_bhb_bpiall_swi) | |
169 | #endif | |
1da177e4 LT |
170 | .align 5 |
171 | ENTRY(vector_swi) | |
19c4d593 UKK |
172 | #ifdef CONFIG_CPU_V7M |
173 | v7m_exception_entry | |
174 | #else | |
5745eef6 | 175 | sub sp, sp, #PT_REGS_SIZE |
f4dc9a4c | 176 | stmia sp, {r0 - r12} @ Calling r0 - r12 |
b9baf5c8 | 177 | 3: |
b86040a5 CM |
178 | ARM( add r8, sp, #S_PC ) |
179 | ARM( stmdb r8, {sp, lr}^ ) @ Calling sp, lr | |
180 | THUMB( mov r8, sp ) | |
181 | THUMB( store_user_sp_lr r8, r10, S_SP ) @ calling sp, lr | |
309ee042 | 182 | mrs saved_psr, spsr @ called from non-FIQ mode, so ok. |
fcea4523 | 183 | TRACE( mov saved_pc, lr ) |
309ee042 RK |
184 | str saved_pc, [sp, #S_PC] @ Save calling PC |
185 | str saved_psr, [sp, #S_PSR] @ Save CPSR | |
f4dc9a4c | 186 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
19c4d593 | 187 | #endif |
50596b75 | 188 | reload_current r10, ip |
1da177e4 | 189 | zero_fp |
50807460 | 190 | alignment_trap r10, ip, cr_alignment |
dca778c5 RK |
191 | asm_trace_hardirqs_on save=0 |
192 | enable_irq_notrace | |
193 | ct_user_exit save=0 | |
1aa2b3b7 | 194 | |
e0f9f4a6 RK |
195 | /* |
196 | * Get the system call number. | |
197 | */ | |
3f2829a3 | 198 | |
dd35afc2 | 199 | #if defined(CONFIG_OABI_COMPAT) |
3f2829a3 | 200 | |
dd35afc2 NP |
201 | /* |
202 | * If we have CONFIG_OABI_COMPAT then we need to look at the swi | |
203 | * value to determine if it is an EABI or an old ABI call. | |
204 | */ | |
205 | #ifdef CONFIG_ARM_THUMB | |
309ee042 | 206 | tst saved_psr, #PSR_T_BIT |
dd35afc2 | 207 | movne r10, #0 @ no thumb OABI emulation |
309ee042 | 208 | USER( ldreq r10, [saved_pc, #-4] ) @ get SWI instruction |
dd35afc2 | 209 | #else |
309ee042 | 210 | USER( ldr r10, [saved_pc, #-4] ) @ get SWI instruction |
dd35afc2 | 211 | #endif |
457c2403 | 212 | ARM_BE8(rev r10, r10) @ little endian instruction |
dd35afc2 NP |
213 | |
214 | #elif defined(CONFIG_AEABI) | |
215 | ||
216 | /* | |
217 | * Pure EABI user space always put syscall number into scno (r7). | |
218 | */ | |
3f2829a3 | 219 | #elif defined(CONFIG_ARM_THUMB) |
dd35afc2 | 220 | /* Legacy ABI only, possibly thumb mode. */ |
309ee042 | 221 | tst saved_psr, #PSR_T_BIT @ this is SPSR from save_user_regs |
e0f9f4a6 | 222 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
309ee042 | 223 | USER( ldreq scno, [saved_pc, #-4] ) |
dd35afc2 | 224 | |
e0f9f4a6 | 225 | #else |
dd35afc2 | 226 | /* Legacy ABI only. */ |
309ee042 | 227 | USER( ldr scno, [saved_pc, #-4] ) @ get SWI instruction |
e0f9f4a6 | 228 | #endif |
1da177e4 | 229 | |
309ee042 RK |
230 | /* saved_psr and saved_pc are now dead */ |
231 | ||
2190fed6 | 232 | uaccess_disable tbl |
4e57a4dd | 233 | get_thread_info tsk |
2190fed6 | 234 | |
dd35afc2 | 235 | adr tbl, sys_call_table @ load syscall table pointer |
dd35afc2 NP |
236 | |
237 | #if defined(CONFIG_OABI_COMPAT) | |
238 | /* | |
239 | * If the swi argument is zero, this is an EABI call and we do nothing. | |
240 | * | |
241 | * If this is an old ABI call, get the syscall number into scno and | |
242 | * get the old ABI syscall table address. | |
243 | */ | |
244 | bics r10, r10, #0xff000000 | |
4e57a4dd AB |
245 | strne r10, [tsk, #TI_ABI_SYSCALL] |
246 | streq scno, [tsk, #TI_ABI_SYSCALL] | |
dd35afc2 NP |
247 | eorne scno, r10, #__NR_OABI_SYSCALL_BASE |
248 | ldrne tbl, =sys_oabi_call_table | |
249 | #elif !defined(CONFIG_AEABI) | |
1da177e4 | 250 | bic scno, scno, #0xff000000 @ mask off SWI op-code |
4e57a4dd | 251 | str scno, [tsk, #TI_ABI_SYSCALL] |
e0f9f4a6 | 252 | eor scno, scno, #__NR_SYSCALL_BASE @ check OS number |
4e57a4dd AB |
253 | #else |
254 | str scno, [tsk, #TI_ABI_SYSCALL] | |
3f2829a3 | 255 | #endif |
dca778c5 RK |
256 | /* |
257 | * Reload the registers that may have been corrupted on entry to | |
258 | * the syscall assembly (by tracing or context tracking.) | |
259 | */ | |
260 | TRACE( ldmia sp, {r0 - r3} ) | |
dd35afc2 | 261 | |
81783786 | 262 | local_restart: |
70c70d97 | 263 | ldr r10, [tsk, #TI_FLAGS] @ check for syscall tracing |
3f2829a3 | 264 | stmdb sp!, {r4, r5} @ push fifth and sixth args |
70c70d97 | 265 | |
29ef73b7 | 266 | tst r10, #_TIF_SYSCALL_WORK @ are we tracing syscalls? |
1da177e4 LT |
267 | bne __sys_trace |
268 | ||
afc9f65e | 269 | invoke_syscall tbl, scno, r10, __ret_fast_syscall |
1da177e4 LT |
270 | |
271 | add r1, sp, #S_OFF | |
d95bc250 | 272 | 2: cmp scno, #(__ARM_NR_BASE - __NR_SYSCALL_BASE) |
e0f9f4a6 | 273 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
377747c4 | 274 | bcs arm_syscall |
d95bc250 | 275 | mov why, #0 @ no longer a real syscall |
1da177e4 | 276 | b sys_ni_syscall @ not private func |
1aa2b3b7 WD |
277 | |
278 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | |
279 | /* | |
280 | * We failed to handle a fault trying to access the page | |
281 | * containing the swi instruction, but we're not really in a | |
282 | * position to return -EFAULT. Instead, return back to the | |
283 | * instruction and re-enter the user fault handling path trying | |
284 | * to page it in. This will likely result in sending SEGV to the | |
285 | * current task. | |
286 | */ | |
287 | 9001: | |
309ee042 | 288 | sub lr, saved_pc, #4 |
1aa2b3b7 | 289 | str lr, [sp, #S_PC] |
da594e3f | 290 | get_thread_info tsk |
1aa2b3b7 WD |
291 | b ret_fast_syscall |
292 | #endif | |
93ed3970 | 293 | ENDPROC(vector_swi) |
29589ca0 | 294 | .ltorg |
1da177e4 LT |
295 | |
296 | /* | |
297 | * This is the really slow path. We're going to be doing | |
298 | * context switches, and waiting for our parent to respond. | |
299 | */ | |
300 | __sys_trace: | |
ad722541 WD |
301 | add r0, sp, #S_OFF |
302 | bl syscall_trace_enter | |
10573ae5 RK |
303 | mov scno, r0 |
304 | invoke_syscall tbl, scno, r10, __sys_trace_return, reload=1 | |
ad75b514 KC |
305 | cmp scno, #-1 @ skip the syscall? |
306 | bne 2b | |
307 | add sp, sp, #S_OFF @ restore stack | |
1da177e4 | 308 | |
f18aef74 TB |
309 | __sys_trace_return_nosave: |
310 | enable_irq_notrace | |
ad722541 WD |
311 | mov r0, sp |
312 | bl syscall_trace_exit | |
1da177e4 LT |
313 | b ret_slow_syscall |
314 | ||
f18aef74 TB |
315 | __sys_trace_return: |
316 | str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 | |
3302cadd RK |
317 | mov r0, sp |
318 | bl syscall_trace_exit | |
319 | b ret_slow_syscall | |
320 | ||
96a8fae0 RK |
321 | .macro syscall_table_start, sym |
322 | .equ __sys_nr, 0 | |
323 | .type \sym, #object | |
324 | ENTRY(\sym) | |
325 | .endm | |
326 | ||
327 | .macro syscall, nr, func | |
328 | .ifgt __sys_nr - \nr | |
329 | .error "Duplicated/unorded system call entry" | |
330 | .endif | |
331 | .rept \nr - __sys_nr | |
332 | .long sys_ni_syscall | |
333 | .endr | |
334 | .long \func | |
335 | .equ __sys_nr, \nr + 1 | |
336 | .endm | |
337 | ||
338 | .macro syscall_table_end, sym | |
339 | .ifgt __sys_nr - __NR_syscalls | |
340 | .error "System call table too big" | |
341 | .endif | |
342 | .rept __NR_syscalls - __sys_nr | |
343 | .long sys_ni_syscall | |
344 | .endr | |
345 | .size \sym, . - \sym | |
346 | .endm | |
347 | ||
0047eb9f MY |
348 | #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native) |
349 | #define __SYSCALL(nr, func) syscall nr, func | |
96a8fae0 | 350 | |
dd35afc2 NP |
351 | /* |
352 | * This is the syscall table declaration for native ABI syscalls. | |
353 | * With EABI a couple syscalls are obsolete and defined as sys_ni_syscall. | |
354 | */ | |
96a8fae0 | 355 | syscall_table_start sys_call_table |
dd35afc2 | 356 | #ifdef CONFIG_AEABI |
96a8fae0 | 357 | #include <calls-eabi.S> |
dd35afc2 | 358 | #else |
96a8fae0 | 359 | #include <calls-oabi.S> |
1da177e4 | 360 | #endif |
96a8fae0 | 361 | syscall_table_end sys_call_table |
1da177e4 LT |
362 | |
363 | /*============================================================================ | |
364 | * Special system call wrappers | |
365 | */ | |
366 | @ r0 = syscall number | |
567bd980 | 367 | @ r8 = syscall table |
1da177e4 | 368 | sys_syscall: |
5247593c | 369 | bic scno, r0, #__NR_OABI_SYSCALL_BASE |
1da177e4 LT |
370 | cmp scno, #__NR_syscall - __NR_SYSCALL_BASE |
371 | cmpne scno, #NR_syscalls @ check range | |
10573ae5 RK |
372 | #ifdef CONFIG_CPU_SPECTRE |
373 | movhs scno, #0 | |
374 | csdb | |
375 | #endif | |
e44fc388 | 376 | stmialo sp, {r5, r6} @ shuffle args |
1da177e4 LT |
377 | movlo r0, r1 |
378 | movlo r1, r2 | |
379 | movlo r2, r3 | |
380 | movlo r3, r4 | |
381 | ldrlo pc, [tbl, scno, lsl #2] | |
382 | b sys_ni_syscall | |
93ed3970 | 383 | ENDPROC(sys_syscall) |
1da177e4 | 384 | |
1da177e4 LT |
385 | sys_sigreturn_wrapper: |
386 | add r0, sp, #S_OFF | |
653d48b2 | 387 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 388 | b sys_sigreturn |
93ed3970 | 389 | ENDPROC(sys_sigreturn_wrapper) |
1da177e4 LT |
390 | |
391 | sys_rt_sigreturn_wrapper: | |
392 | add r0, sp, #S_OFF | |
653d48b2 | 393 | mov why, #0 @ prevent syscall restart handling |
1da177e4 | 394 | b sys_rt_sigreturn |
93ed3970 | 395 | ENDPROC(sys_rt_sigreturn_wrapper) |
1da177e4 | 396 | |
713c4815 NP |
397 | sys_statfs64_wrapper: |
398 | teq r1, #88 | |
399 | moveq r1, #84 | |
400 | b sys_statfs64 | |
93ed3970 | 401 | ENDPROC(sys_statfs64_wrapper) |
713c4815 NP |
402 | |
403 | sys_fstatfs64_wrapper: | |
404 | teq r1, #88 | |
405 | moveq r1, #84 | |
406 | b sys_fstatfs64 | |
93ed3970 | 407 | ENDPROC(sys_fstatfs64_wrapper) |
713c4815 | 408 | |
1da177e4 LT |
409 | /* |
410 | * Note: off_4k (r5) is always units of 4K. If we can't do the requested | |
411 | * offset, we return EINVAL. | |
412 | */ | |
413 | sys_mmap2: | |
1da177e4 | 414 | str r5, [sp, #4] |
f8b72560 | 415 | b sys_mmap_pgoff |
93ed3970 | 416 | ENDPROC(sys_mmap2) |
687ad019 NP |
417 | |
418 | #ifdef CONFIG_OABI_COMPAT | |
dd35afc2 | 419 | |
687ad019 NP |
420 | /* |
421 | * These are syscalls with argument register differences | |
422 | */ | |
423 | ||
424 | sys_oabi_pread64: | |
425 | stmia sp, {r3, r4} | |
426 | b sys_pread64 | |
93ed3970 | 427 | ENDPROC(sys_oabi_pread64) |
687ad019 NP |
428 | |
429 | sys_oabi_pwrite64: | |
430 | stmia sp, {r3, r4} | |
431 | b sys_pwrite64 | |
93ed3970 | 432 | ENDPROC(sys_oabi_pwrite64) |
687ad019 NP |
433 | |
434 | sys_oabi_truncate64: | |
435 | mov r3, r2 | |
436 | mov r2, r1 | |
437 | b sys_truncate64 | |
93ed3970 | 438 | ENDPROC(sys_oabi_truncate64) |
687ad019 NP |
439 | |
440 | sys_oabi_ftruncate64: | |
441 | mov r3, r2 | |
442 | mov r2, r1 | |
443 | b sys_ftruncate64 | |
93ed3970 | 444 | ENDPROC(sys_oabi_ftruncate64) |
687ad019 NP |
445 | |
446 | sys_oabi_readahead: | |
447 | str r3, [sp] | |
448 | mov r3, r2 | |
449 | mov r2, r1 | |
450 | b sys_readahead | |
93ed3970 | 451 | ENDPROC(sys_oabi_readahead) |
687ad019 | 452 | |
dd35afc2 NP |
453 | /* |
454 | * Let's declare a second syscall table for old ABI binaries | |
455 | * using the compatibility syscall entries. | |
456 | */ | |
96a8fae0 | 457 | syscall_table_start sys_oabi_call_table |
0047eb9f MY |
458 | #undef __SYSCALL_WITH_COMPAT |
459 | #define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat) | |
96a8fae0 RK |
460 | #include <calls-oabi.S> |
461 | syscall_table_end sys_oabi_call_table | |
dd35afc2 | 462 | |
687ad019 NP |
463 | #endif |
464 |