Remove bogus BUG() in kernel/exit.c
[linux-2.6-block.git] / arch / i386 / kernel / entry.S
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
43#include <linux/config.h>
44#include <linux/linkage.h>
45#include <asm/thread_info.h>
46#include <asm/errno.h>
47#include <asm/segment.h>
48#include <asm/smp.h>
49#include <asm/page.h>
50#include <asm/desc.h>
51#include "irq_vectors.h"
52
53#define nr_syscalls ((syscall_table_size)/4)
54
55EBX = 0x00
56ECX = 0x04
57EDX = 0x08
58ESI = 0x0C
59EDI = 0x10
60EBP = 0x14
61EAX = 0x18
62DS = 0x1C
63ES = 0x20
64ORIG_EAX = 0x24
65EIP = 0x28
66CS = 0x2C
67EFLAGS = 0x30
68OLDESP = 0x34
69OLDSS = 0x38
70
71CF_MASK = 0x00000001
72TF_MASK = 0x00000100
73IF_MASK = 0x00000200
74DF_MASK = 0x00000400
75NT_MASK = 0x00004000
76VM_MASK = 0x00020000
77
78#ifdef CONFIG_PREEMPT
79#define preempt_stop cli
80#else
81#define preempt_stop
82#define resume_kernel restore_nocheck
83#endif
84
85#define SAVE_ALL \
86 cld; \
87 pushl %es; \
88 pushl %ds; \
89 pushl %eax; \
90 pushl %ebp; \
91 pushl %edi; \
92 pushl %esi; \
93 pushl %edx; \
94 pushl %ecx; \
95 pushl %ebx; \
96 movl $(__USER_DS), %edx; \
97 movl %edx, %ds; \
98 movl %edx, %es;
99
100#define RESTORE_INT_REGS \
101 popl %ebx; \
102 popl %ecx; \
103 popl %edx; \
104 popl %esi; \
105 popl %edi; \
106 popl %ebp; \
107 popl %eax
108
109#define RESTORE_REGS \
110 RESTORE_INT_REGS; \
1111: popl %ds; \
1122: popl %es; \
113.section .fixup,"ax"; \
1143: movl $0,(%esp); \
115 jmp 1b; \
1164: movl $0,(%esp); \
117 jmp 2b; \
118.previous; \
119.section __ex_table,"a";\
120 .align 4; \
121 .long 1b,3b; \
122 .long 2b,4b; \
123.previous
124
125
126ENTRY(ret_from_fork)
127 pushl %eax
128 call schedule_tail
129 GET_THREAD_INFO(%ebp)
130 popl %eax
131 jmp syscall_exit
132
133/*
134 * Return to user mode is not as complex as all this looks,
135 * but we want the default path for a system call return to
136 * go as quickly as possible which is why some of this is
137 * less clear than it otherwise should be.
138 */
139
140 # userspace resumption stub bypassing syscall exit tracing
141 ALIGN
142ret_from_exception:
143 preempt_stop
144ret_from_intr:
145 GET_THREAD_INFO(%ebp)
146 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
147 movb CS(%esp), %al
148 testl $(VM_MASK | 3), %eax
149 jz resume_kernel
150ENTRY(resume_userspace)
151 cli # make sure we don't miss an interrupt
152 # setting need_resched or sigpending
153 # between sampling and the iret
154 movl TI_flags(%ebp), %ecx
155 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
156 # int/exception return?
157 jne work_pending
158 jmp restore_all
159
160#ifdef CONFIG_PREEMPT
161ENTRY(resume_kernel)
162 cli
163 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
164 jnz restore_nocheck
165need_resched:
166 movl TI_flags(%ebp), %ecx # need_resched set ?
167 testb $_TIF_NEED_RESCHED, %cl
168 jz restore_all
169 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
170 jz restore_all
171 call preempt_schedule_irq
172 jmp need_resched
173#endif
174
175/* SYSENTER_RETURN points to after the "sysenter" instruction in
176 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
177
178 # sysenter call handler stub
179ENTRY(sysenter_entry)
180 movl TSS_sysenter_esp0(%esp),%esp
181sysenter_past_esp:
182 sti
183 pushl $(__USER_DS)
184 pushl %ebp
185 pushfl
186 pushl $(__USER_CS)
187 pushl $SYSENTER_RETURN
188
189/*
190 * Load the potential sixth argument from user stack.
191 * Careful about security.
192 */
193 cmpl $__PAGE_OFFSET-3,%ebp
194 jae syscall_fault
1951: movl (%ebp),%ebp
196.section __ex_table,"a"
197 .align 4
198 .long 1b,syscall_fault
199.previous
200
201 pushl %eax
202 SAVE_ALL
203 GET_THREAD_INFO(%ebp)
204
205 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
206 testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
207 jnz syscall_trace_entry
208 cmpl $(nr_syscalls), %eax
209 jae syscall_badsys
210 call *sys_call_table(,%eax,4)
211 movl %eax,EAX(%esp)
212 cli
213 movl TI_flags(%ebp), %ecx
214 testw $_TIF_ALLWORK_MASK, %cx
215 jne syscall_exit_work
216/* if something modifies registers it must also disable sysexit */
217 movl EIP(%esp), %edx
218 movl OLDESP(%esp), %ecx
219 xorl %ebp,%ebp
220 sti
221 sysexit
222
223
224 # system call handler stub
225ENTRY(system_call)
226 pushl %eax # save orig_eax
227 SAVE_ALL
228 GET_THREAD_INFO(%ebp)
229 # system call tracing in operation
230 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
231 testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
232 jnz syscall_trace_entry
233 cmpl $(nr_syscalls), %eax
234 jae syscall_badsys
235syscall_call:
236 call *sys_call_table(,%eax,4)
237 movl %eax,EAX(%esp) # store the return value
238syscall_exit:
239 cli # make sure we don't miss an interrupt
240 # setting need_resched or sigpending
241 # between sampling and the iret
242 movl TI_flags(%ebp), %ecx
243 testw $_TIF_ALLWORK_MASK, %cx # current->work
244 jne syscall_exit_work
245
246restore_all:
247 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
5df24082
SS
248 # Warning: OLDSS(%esp) contains the wrong/random values if we
249 # are returning to the kernel.
250 # See comments in process.c:copy_thread() for details.
1da177e4
LT
251 movb OLDSS(%esp), %ah
252 movb CS(%esp), %al
253 andl $(VM_MASK | (4 << 8) | 3), %eax
254 cmpl $((4 << 8) | 3), %eax
255 je ldt_ss # returning to user-space with LDT SS
256restore_nocheck:
257 RESTORE_REGS
258 addl $4, %esp
2591: iret
260.section .fixup,"ax"
261iret_exc:
262 sti
263 movl $__USER_DS, %edx
264 movl %edx, %ds
265 movl %edx, %es
266 movl $11,%eax
267 call do_exit
268.previous
269.section __ex_table,"a"
270 .align 4
271 .long 1b,iret_exc
272.previous
273
274ldt_ss:
275 larl OLDSS(%esp), %eax
276 jnz restore_nocheck
277 testl $0x00400000, %eax # returning to 32bit stack?
278 jnz restore_nocheck # allright, normal return
279 /* If returning to userspace with 16bit stack,
280 * try to fix the higher word of ESP, as the CPU
281 * won't restore it.
282 * This is an "official" bug of all the x86-compatible
283 * CPUs, which we can try to work around to make
284 * dosemu and wine happy. */
285 subl $8, %esp # reserve space for switch16 pointer
286 cli
287 movl %esp, %eax
288 /* Set up the 16bit stack frame with switch32 pointer on top,
289 * and a switch16 pointer on top of the current frame. */
290 call setup_x86_bogus_stack
291 RESTORE_REGS
292 lss 20+4(%esp), %esp # switch to 16bit stack
2931: iret
294.section __ex_table,"a"
295 .align 4
296 .long 1b,iret_exc
297.previous
298
299 # perform work that needs to be done immediately before resumption
300 ALIGN
301work_pending:
302 testb $_TIF_NEED_RESCHED, %cl
303 jz work_notifysig
304work_resched:
305 call schedule
306 cli # make sure we don't miss an interrupt
307 # setting need_resched or sigpending
308 # between sampling and the iret
309 movl TI_flags(%ebp), %ecx
310 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
311 # than syscall tracing?
312 jz restore_all
313 testb $_TIF_NEED_RESCHED, %cl
314 jnz work_resched
315
316work_notifysig: # deal with pending signals and
317 # notify-resume requests
318 testl $VM_MASK, EFLAGS(%esp)
319 movl %esp, %eax
320 jne work_notifysig_v86 # returning to kernel-space or
321 # vm86-space
322 xorl %edx, %edx
323 call do_notify_resume
324 jmp restore_all
325
326 ALIGN
327work_notifysig_v86:
328 pushl %ecx # save ti_flags for do_notify_resume
329 call save_v86_state # %eax contains pt_regs pointer
330 popl %ecx
331 movl %eax, %esp
332 xorl %edx, %edx
333 call do_notify_resume
334 jmp restore_all
335
336 # perform syscall exit tracing
337 ALIGN
338syscall_trace_entry:
339 movl $-ENOSYS,EAX(%esp)
340 movl %esp, %eax
341 xorl %edx,%edx
342 call do_syscall_trace
343 movl ORIG_EAX(%esp), %eax
344 cmpl $(nr_syscalls), %eax
345 jnae syscall_call
346 jmp syscall_exit
347
348 # perform syscall exit tracing
349 ALIGN
350syscall_exit_work:
351 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
352 jz work_pending
353 sti # could let do_syscall_trace() call
354 # schedule() instead
355 movl %esp, %eax
356 movl $1, %edx
357 call do_syscall_trace
358 jmp resume_userspace
359
360 ALIGN
361syscall_fault:
362 pushl %eax # save orig_eax
363 SAVE_ALL
364 GET_THREAD_INFO(%ebp)
365 movl $-EFAULT,EAX(%esp)
366 jmp resume_userspace
367
368 ALIGN
369syscall_badsys:
370 movl $-ENOSYS,EAX(%esp)
371 jmp resume_userspace
372
373#define FIXUP_ESPFIX_STACK \
374 movl %esp, %eax; \
375 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
376 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
377 /* copy data from 16bit stack to 32bit stack */ \
378 call fixup_x86_bogus_stack; \
379 /* put ESP to the proper location */ \
380 movl %eax, %esp;
381#define UNWIND_ESPFIX_STACK \
382 pushl %eax; \
383 movl %ss, %eax; \
384 /* see if on 16bit stack */ \
385 cmpw $__ESPFIX_SS, %ax; \
386 jne 28f; \
387 movl $__KERNEL_DS, %edx; \
388 movl %edx, %ds; \
389 movl %edx, %es; \
390 /* switch to 32bit stack */ \
391 FIXUP_ESPFIX_STACK \
39228: popl %eax;
393
394/*
395 * Build the entry stubs and pointer table with
396 * some assembler magic.
397 */
398.data
399ENTRY(interrupt)
400.text
401
402vector=0
403ENTRY(irq_entries_start)
404.rept NR_IRQS
405 ALIGN
4061: pushl $vector-256
407 jmp common_interrupt
408.data
409 .long 1b
410.text
411vector=vector+1
412.endr
413
414 ALIGN
415common_interrupt:
416 SAVE_ALL
417 movl %esp,%eax
418 call do_IRQ
419 jmp ret_from_intr
420
421#define BUILD_INTERRUPT(name, nr) \
422ENTRY(name) \
423 pushl $nr-256; \
424 SAVE_ALL \
425 movl %esp,%eax; \
426 call smp_/**/name; \
427 jmp ret_from_intr;
428
429/* The include is where all of the SMP etc. interrupts come from */
430#include "entry_arch.h"
431
432ENTRY(divide_error)
433 pushl $0 # no error code
434 pushl $do_divide_error
435 ALIGN
436error_code:
437 pushl %ds
438 pushl %eax
439 xorl %eax, %eax
440 pushl %ebp
441 pushl %edi
442 pushl %esi
443 pushl %edx
444 decl %eax # eax = -1
445 pushl %ecx
446 pushl %ebx
447 cld
448 pushl %es
449 UNWIND_ESPFIX_STACK
450 popl %ecx
451 movl ES(%esp), %edi # get the function address
452 movl ORIG_EAX(%esp), %edx # get the error code
453 movl %eax, ORIG_EAX(%esp)
454 movl %ecx, ES(%esp)
455 movl $(__USER_DS), %ecx
456 movl %ecx, %ds
457 movl %ecx, %es
458 movl %esp,%eax # pt_regs pointer
459 call *%edi
460 jmp ret_from_exception
461
462ENTRY(coprocessor_error)
463 pushl $0
464 pushl $do_coprocessor_error
465 jmp error_code
466
467ENTRY(simd_coprocessor_error)
468 pushl $0
469 pushl $do_simd_coprocessor_error
470 jmp error_code
471
472ENTRY(device_not_available)
473 pushl $-1 # mark this as an int
474 SAVE_ALL
475 movl %cr0, %eax
476 testl $0x4, %eax # EM (math emulation bit)
477 jne device_not_available_emulate
478 preempt_stop
479 call math_state_restore
480 jmp ret_from_exception
481device_not_available_emulate:
482 pushl $0 # temporary storage for ORIG_EIP
483 call math_emulate
484 addl $4, %esp
485 jmp ret_from_exception
486
487/*
488 * Debug traps and NMI can happen at the one SYSENTER instruction
489 * that sets up the real kernel stack. Check here, since we can't
490 * allow the wrong stack to be used.
491 *
492 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
493 * already pushed 3 words if it hits on the sysenter instruction:
494 * eflags, cs and eip.
495 *
496 * We just load the right stack, and push the three (known) values
497 * by hand onto the new stack - while updating the return eip past
498 * the instruction that would have done it for sysenter.
499 */
500#define FIX_STACK(offset, ok, label) \
501 cmpw $__KERNEL_CS,4(%esp); \
502 jne ok; \
503label: \
504 movl TSS_sysenter_esp0+offset(%esp),%esp; \
505 pushfl; \
506 pushl $__KERNEL_CS; \
507 pushl $sysenter_past_esp
508
509ENTRY(debug)
510 cmpl $sysenter_entry,(%esp)
511 jne debug_stack_correct
512 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
513debug_stack_correct:
514 pushl $-1 # mark this as an int
515 SAVE_ALL
516 xorl %edx,%edx # error code 0
517 movl %esp,%eax # pt_regs pointer
518 call do_debug
519 testl %eax,%eax
520 jnz restore_all
521 jmp ret_from_exception
522
523/*
524 * NMI is doubly nasty. It can happen _while_ we're handling
525 * a debug fault, and the debug fault hasn't yet been able to
526 * clear up the stack. So we first check whether we got an
527 * NMI on the sysenter entry path, but after that we need to
528 * check whether we got an NMI on the debug path where the debug
529 * fault happened on the sysenter path.
530 */
531ENTRY(nmi)
532 pushl %eax
533 movl %ss, %eax
534 cmpw $__ESPFIX_SS, %ax
535 popl %eax
536 je nmi_16bit_stack
537 cmpl $sysenter_entry,(%esp)
538 je nmi_stack_fixup
539 pushl %eax
540 movl %esp,%eax
541 /* Do not access memory above the end of our stack page,
542 * it might not exist.
543 */
544 andl $(THREAD_SIZE-1),%eax
545 cmpl $(THREAD_SIZE-20),%eax
546 popl %eax
547 jae nmi_stack_correct
548 cmpl $sysenter_entry,12(%esp)
549 je nmi_debug_stack_check
550nmi_stack_correct:
551 pushl %eax
552 SAVE_ALL
553 xorl %edx,%edx # zero error code
554 movl %esp,%eax # pt_regs pointer
555 call do_nmi
556 jmp restore_all
557
558nmi_stack_fixup:
559 FIX_STACK(12,nmi_stack_correct, 1)
560 jmp nmi_stack_correct
561nmi_debug_stack_check:
562 cmpw $__KERNEL_CS,16(%esp)
563 jne nmi_stack_correct
564 cmpl $debug - 1,(%esp)
565 jle nmi_stack_correct
566 cmpl $debug_esp_fix_insn,(%esp)
567 jle nmi_debug_stack_fixup
568nmi_debug_stack_fixup:
569 FIX_STACK(24,nmi_stack_correct, 1)
570 jmp nmi_stack_correct
571
572nmi_16bit_stack:
573 /* create the pointer to lss back */
574 pushl %ss
575 pushl %esp
576 movzwl %sp, %esp
577 addw $4, (%esp)
578 /* copy the iret frame of 12 bytes */
579 .rept 3
580 pushl 16(%esp)
581 .endr
582 pushl %eax
583 SAVE_ALL
584 FIXUP_ESPFIX_STACK # %eax == %esp
585 xorl %edx,%edx # zero error code
586 call do_nmi
587 RESTORE_REGS
588 lss 12+4(%esp), %esp # back to 16bit stack
5891: iret
590.section __ex_table,"a"
591 .align 4
592 .long 1b,iret_exc
593.previous
594
595ENTRY(int3)
596 pushl $-1 # mark this as an int
597 SAVE_ALL
598 xorl %edx,%edx # zero error code
599 movl %esp,%eax # pt_regs pointer
600 call do_int3
601 testl %eax,%eax
602 jnz restore_all
603 jmp ret_from_exception
604
605ENTRY(overflow)
606 pushl $0
607 pushl $do_overflow
608 jmp error_code
609
610ENTRY(bounds)
611 pushl $0
612 pushl $do_bounds
613 jmp error_code
614
615ENTRY(invalid_op)
616 pushl $0
617 pushl $do_invalid_op
618 jmp error_code
619
620ENTRY(coprocessor_segment_overrun)
621 pushl $0
622 pushl $do_coprocessor_segment_overrun
623 jmp error_code
624
625ENTRY(invalid_TSS)
626 pushl $do_invalid_TSS
627 jmp error_code
628
629ENTRY(segment_not_present)
630 pushl $do_segment_not_present
631 jmp error_code
632
633ENTRY(stack_segment)
634 pushl $do_stack_segment
635 jmp error_code
636
637ENTRY(general_protection)
638 pushl $do_general_protection
639 jmp error_code
640
641ENTRY(alignment_check)
642 pushl $do_alignment_check
643 jmp error_code
644
645ENTRY(page_fault)
646 pushl $do_page_fault
647 jmp error_code
648
649#ifdef CONFIG_X86_MCE
650ENTRY(machine_check)
651 pushl $0
652 pushl machine_check_vector
653 jmp error_code
654#endif
655
656ENTRY(spurious_interrupt_bug)
657 pushl $0
658 pushl $do_spurious_interrupt_bug
659 jmp error_code
660
661.data
662ENTRY(sys_call_table)
663 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
664 .long sys_exit
665 .long sys_fork
666 .long sys_read
667 .long sys_write
668 .long sys_open /* 5 */
669 .long sys_close
670 .long sys_waitpid
671 .long sys_creat
672 .long sys_link
673 .long sys_unlink /* 10 */
674 .long sys_execve
675 .long sys_chdir
676 .long sys_time
677 .long sys_mknod
678 .long sys_chmod /* 15 */
679 .long sys_lchown16
680 .long sys_ni_syscall /* old break syscall holder */
681 .long sys_stat
682 .long sys_lseek
683 .long sys_getpid /* 20 */
684 .long sys_mount
685 .long sys_oldumount
686 .long sys_setuid16
687 .long sys_getuid16
688 .long sys_stime /* 25 */
689 .long sys_ptrace
690 .long sys_alarm
691 .long sys_fstat
692 .long sys_pause
693 .long sys_utime /* 30 */
694 .long sys_ni_syscall /* old stty syscall holder */
695 .long sys_ni_syscall /* old gtty syscall holder */
696 .long sys_access
697 .long sys_nice
698 .long sys_ni_syscall /* 35 - old ftime syscall holder */
699 .long sys_sync
700 .long sys_kill
701 .long sys_rename
702 .long sys_mkdir
703 .long sys_rmdir /* 40 */
704 .long sys_dup
705 .long sys_pipe
706 .long sys_times
707 .long sys_ni_syscall /* old prof syscall holder */
708 .long sys_brk /* 45 */
709 .long sys_setgid16
710 .long sys_getgid16
711 .long sys_signal
712 .long sys_geteuid16
713 .long sys_getegid16 /* 50 */
714 .long sys_acct
715 .long sys_umount /* recycled never used phys() */
716 .long sys_ni_syscall /* old lock syscall holder */
717 .long sys_ioctl
718 .long sys_fcntl /* 55 */
719 .long sys_ni_syscall /* old mpx syscall holder */
720 .long sys_setpgid
721 .long sys_ni_syscall /* old ulimit syscall holder */
722 .long sys_olduname
723 .long sys_umask /* 60 */
724 .long sys_chroot
725 .long sys_ustat
726 .long sys_dup2
727 .long sys_getppid
728 .long sys_getpgrp /* 65 */
729 .long sys_setsid
730 .long sys_sigaction
731 .long sys_sgetmask
732 .long sys_ssetmask
733 .long sys_setreuid16 /* 70 */
734 .long sys_setregid16
735 .long sys_sigsuspend
736 .long sys_sigpending
737 .long sys_sethostname
738 .long sys_setrlimit /* 75 */
739 .long sys_old_getrlimit
740 .long sys_getrusage
741 .long sys_gettimeofday
742 .long sys_settimeofday
743 .long sys_getgroups16 /* 80 */
744 .long sys_setgroups16
745 .long old_select
746 .long sys_symlink
747 .long sys_lstat
748 .long sys_readlink /* 85 */
749 .long sys_uselib
750 .long sys_swapon
751 .long sys_reboot
752 .long old_readdir
753 .long old_mmap /* 90 */
754 .long sys_munmap
755 .long sys_truncate
756 .long sys_ftruncate
757 .long sys_fchmod
758 .long sys_fchown16 /* 95 */
759 .long sys_getpriority
760 .long sys_setpriority
761 .long sys_ni_syscall /* old profil syscall holder */
762 .long sys_statfs
763 .long sys_fstatfs /* 100 */
764 .long sys_ioperm
765 .long sys_socketcall
766 .long sys_syslog
767 .long sys_setitimer
768 .long sys_getitimer /* 105 */
769 .long sys_newstat
770 .long sys_newlstat
771 .long sys_newfstat
772 .long sys_uname
773 .long sys_iopl /* 110 */
774 .long sys_vhangup
775 .long sys_ni_syscall /* old "idle" system call */
776 .long sys_vm86old
777 .long sys_wait4
778 .long sys_swapoff /* 115 */
779 .long sys_sysinfo
780 .long sys_ipc
781 .long sys_fsync
782 .long sys_sigreturn
783 .long sys_clone /* 120 */
784 .long sys_setdomainname
785 .long sys_newuname
786 .long sys_modify_ldt
787 .long sys_adjtimex
788 .long sys_mprotect /* 125 */
789 .long sys_sigprocmask
790 .long sys_ni_syscall /* old "create_module" */
791 .long sys_init_module
792 .long sys_delete_module
793 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
794 .long sys_quotactl
795 .long sys_getpgid
796 .long sys_fchdir
797 .long sys_bdflush
798 .long sys_sysfs /* 135 */
799 .long sys_personality
800 .long sys_ni_syscall /* reserved for afs_syscall */
801 .long sys_setfsuid16
802 .long sys_setfsgid16
803 .long sys_llseek /* 140 */
804 .long sys_getdents
805 .long sys_select
806 .long sys_flock
807 .long sys_msync
808 .long sys_readv /* 145 */
809 .long sys_writev
810 .long sys_getsid
811 .long sys_fdatasync
812 .long sys_sysctl
813 .long sys_mlock /* 150 */
814 .long sys_munlock
815 .long sys_mlockall
816 .long sys_munlockall
817 .long sys_sched_setparam
818 .long sys_sched_getparam /* 155 */
819 .long sys_sched_setscheduler
820 .long sys_sched_getscheduler
821 .long sys_sched_yield
822 .long sys_sched_get_priority_max
823 .long sys_sched_get_priority_min /* 160 */
824 .long sys_sched_rr_get_interval
825 .long sys_nanosleep
826 .long sys_mremap
827 .long sys_setresuid16
828 .long sys_getresuid16 /* 165 */
829 .long sys_vm86
830 .long sys_ni_syscall /* Old sys_query_module */
831 .long sys_poll
832 .long sys_nfsservctl
833 .long sys_setresgid16 /* 170 */
834 .long sys_getresgid16
835 .long sys_prctl
836 .long sys_rt_sigreturn
837 .long sys_rt_sigaction
838 .long sys_rt_sigprocmask /* 175 */
839 .long sys_rt_sigpending
840 .long sys_rt_sigtimedwait
841 .long sys_rt_sigqueueinfo
842 .long sys_rt_sigsuspend
843 .long sys_pread64 /* 180 */
844 .long sys_pwrite64
845 .long sys_chown16
846 .long sys_getcwd
847 .long sys_capget
848 .long sys_capset /* 185 */
849 .long sys_sigaltstack
850 .long sys_sendfile
851 .long sys_ni_syscall /* reserved for streams1 */
852 .long sys_ni_syscall /* reserved for streams2 */
853 .long sys_vfork /* 190 */
854 .long sys_getrlimit
855 .long sys_mmap2
856 .long sys_truncate64
857 .long sys_ftruncate64
858 .long sys_stat64 /* 195 */
859 .long sys_lstat64
860 .long sys_fstat64
861 .long sys_lchown
862 .long sys_getuid
863 .long sys_getgid /* 200 */
864 .long sys_geteuid
865 .long sys_getegid
866 .long sys_setreuid
867 .long sys_setregid
868 .long sys_getgroups /* 205 */
869 .long sys_setgroups
870 .long sys_fchown
871 .long sys_setresuid
872 .long sys_getresuid
873 .long sys_setresgid /* 210 */
874 .long sys_getresgid
875 .long sys_chown
876 .long sys_setuid
877 .long sys_setgid
878 .long sys_setfsuid /* 215 */
879 .long sys_setfsgid
880 .long sys_pivot_root
881 .long sys_mincore
882 .long sys_madvise
883 .long sys_getdents64 /* 220 */
884 .long sys_fcntl64
885 .long sys_ni_syscall /* reserved for TUX */
886 .long sys_ni_syscall
887 .long sys_gettid
888 .long sys_readahead /* 225 */
889 .long sys_setxattr
890 .long sys_lsetxattr
891 .long sys_fsetxattr
892 .long sys_getxattr
893 .long sys_lgetxattr /* 230 */
894 .long sys_fgetxattr
895 .long sys_listxattr
896 .long sys_llistxattr
897 .long sys_flistxattr
898 .long sys_removexattr /* 235 */
899 .long sys_lremovexattr
900 .long sys_fremovexattr
901 .long sys_tkill
902 .long sys_sendfile64
903 .long sys_futex /* 240 */
904 .long sys_sched_setaffinity
905 .long sys_sched_getaffinity
906 .long sys_set_thread_area
907 .long sys_get_thread_area
908 .long sys_io_setup /* 245 */
909 .long sys_io_destroy
910 .long sys_io_getevents
911 .long sys_io_submit
912 .long sys_io_cancel
913 .long sys_fadvise64 /* 250 */
914 .long sys_ni_syscall
915 .long sys_exit_group
916 .long sys_lookup_dcookie
917 .long sys_epoll_create
918 .long sys_epoll_ctl /* 255 */
919 .long sys_epoll_wait
920 .long sys_remap_file_pages
921 .long sys_set_tid_address
922 .long sys_timer_create
923 .long sys_timer_settime /* 260 */
924 .long sys_timer_gettime
925 .long sys_timer_getoverrun
926 .long sys_timer_delete
927 .long sys_clock_settime
928 .long sys_clock_gettime /* 265 */
929 .long sys_clock_getres
930 .long sys_clock_nanosleep
931 .long sys_statfs64
932 .long sys_fstatfs64
933 .long sys_tgkill /* 270 */
934 .long sys_utimes
935 .long sys_fadvise64_64
936 .long sys_ni_syscall /* sys_vserver */
937 .long sys_mbind
938 .long sys_get_mempolicy
939 .long sys_set_mempolicy
940 .long sys_mq_open
941 .long sys_mq_unlink
942 .long sys_mq_timedsend
943 .long sys_mq_timedreceive /* 280 */
944 .long sys_mq_notify
945 .long sys_mq_getsetattr
946 .long sys_ni_syscall /* reserved for kexec */
947 .long sys_waitid
948 .long sys_ni_syscall /* 285 */ /* available */
949 .long sys_add_key
950 .long sys_request_key
951 .long sys_keyctl
952
953syscall_table_size=(.-sys_call_table)