2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/tracehook.h>
26 #include <linux/capability.h>
27 #include <linux/freezer.h>
28 #include <linux/pid_namespace.h>
29 #include <linux/nsproxy.h>
30 #include <trace/sched.h>
32 #include <asm/param.h>
33 #include <asm/uaccess.h>
34 #include <asm/unistd.h>
35 #include <asm/siginfo.h>
36 #include "audit.h" /* audit_signal_info() */
39 * SLAB caches for signal bits.
42 static struct kmem_cache *sigqueue_cachep;
44 static void __user *sig_handler(struct task_struct *t, int sig)
46 return t->sighand->action[sig - 1].sa.sa_handler;
49 static int sig_handler_ignored(void __user *handler, int sig)
51 /* Is it explicitly or implicitly ignored? */
52 return handler == SIG_IGN ||
53 (handler == SIG_DFL && sig_kernel_ignore(sig));
56 static int sig_ignored(struct task_struct *t, int sig)
61 * Blocked signals are never ignored, since the
62 * signal handler may change by the time it is
65 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
68 handler = sig_handler(t, sig);
69 if (!sig_handler_ignored(handler, sig))
73 * Tracers may want to know about even ignored signals.
75 return !tracehook_consider_ignored_signal(t, sig, handler);
79 * Re-calculate pending state from the set of locally pending
80 * signals, globally pending signals, and blocked signals.
82 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
87 switch (_NSIG_WORDS) {
89 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
90 ready |= signal->sig[i] &~ blocked->sig[i];
93 case 4: ready = signal->sig[3] &~ blocked->sig[3];
94 ready |= signal->sig[2] &~ blocked->sig[2];
95 ready |= signal->sig[1] &~ blocked->sig[1];
96 ready |= signal->sig[0] &~ blocked->sig[0];
99 case 2: ready = signal->sig[1] &~ blocked->sig[1];
100 ready |= signal->sig[0] &~ blocked->sig[0];
103 case 1: ready = signal->sig[0] &~ blocked->sig[0];
108 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
110 static int recalc_sigpending_tsk(struct task_struct *t)
112 if (t->signal->group_stop_count > 0 ||
113 PENDING(&t->pending, &t->blocked) ||
114 PENDING(&t->signal->shared_pending, &t->blocked)) {
115 set_tsk_thread_flag(t, TIF_SIGPENDING);
119 * We must never clear the flag in another thread, or in current
120 * when it's possible the current syscall is returning -ERESTART*.
121 * So we don't clear it here, and only callers who know they should do.
127 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
128 * This is superfluous when called on current, the wakeup is a harmless no-op.
130 void recalc_sigpending_and_wake(struct task_struct *t)
132 if (recalc_sigpending_tsk(t))
133 signal_wake_up(t, 0);
136 void recalc_sigpending(void)
138 if (unlikely(tracehook_force_sigpending()))
139 set_thread_flag(TIF_SIGPENDING);
140 else if (!recalc_sigpending_tsk(current) && !freezing(current))
141 clear_thread_flag(TIF_SIGPENDING);
145 /* Given the mask, find the first available signal that should be serviced. */
147 int next_signal(struct sigpending *pending, sigset_t *mask)
149 unsigned long i, *s, *m, x;
152 s = pending->signal.sig;
154 switch (_NSIG_WORDS) {
156 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
157 if ((x = *s &~ *m) != 0) {
158 sig = ffz(~x) + i*_NSIG_BPW + 1;
163 case 2: if ((x = s[0] &~ m[0]) != 0)
165 else if ((x = s[1] &~ m[1]) != 0)
172 case 1: if ((x = *s &~ *m) != 0)
181 * allocate a new signal queue record
182 * - this may be called without locks if and only if t == current, otherwise an
183 * appopriate lock must be held to protect t's user_struct
185 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
188 struct sigqueue *q = NULL;
189 struct user_struct *user;
192 * We won't get problems with the target's UID changing under us
193 * because changing it requires RCU be used, and if t != current, the
194 * caller must be holding the RCU readlock (by way of a spinlock) and
195 * we use RCU protection here
197 user = __task_cred(t)->user;
198 atomic_inc(&user->sigpending);
199 if (override_rlimit ||
200 atomic_read(&user->sigpending) <=
201 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
202 q = kmem_cache_alloc(sigqueue_cachep, flags);
203 if (unlikely(q == NULL)) {
204 atomic_dec(&user->sigpending);
206 INIT_LIST_HEAD(&q->list);
208 q->user = get_uid(user);
213 static void __sigqueue_free(struct sigqueue *q)
215 if (q->flags & SIGQUEUE_PREALLOC)
217 atomic_dec(&q->user->sigpending);
219 kmem_cache_free(sigqueue_cachep, q);
222 void flush_sigqueue(struct sigpending *queue)
226 sigemptyset(&queue->signal);
227 while (!list_empty(&queue->list)) {
228 q = list_entry(queue->list.next, struct sigqueue , list);
229 list_del_init(&q->list);
235 * Flush all pending signals for a task.
237 void flush_signals(struct task_struct *t)
241 spin_lock_irqsave(&t->sighand->siglock, flags);
242 clear_tsk_thread_flag(t, TIF_SIGPENDING);
243 flush_sigqueue(&t->pending);
244 flush_sigqueue(&t->signal->shared_pending);
245 spin_unlock_irqrestore(&t->sighand->siglock, flags);
248 static void __flush_itimer_signals(struct sigpending *pending)
250 sigset_t signal, retain;
251 struct sigqueue *q, *n;
253 signal = pending->signal;
254 sigemptyset(&retain);
256 list_for_each_entry_safe(q, n, &pending->list, list) {
257 int sig = q->info.si_signo;
259 if (likely(q->info.si_code != SI_TIMER)) {
260 sigaddset(&retain, sig);
262 sigdelset(&signal, sig);
263 list_del_init(&q->list);
268 sigorsets(&pending->signal, &signal, &retain);
271 void flush_itimer_signals(void)
273 struct task_struct *tsk = current;
276 spin_lock_irqsave(&tsk->sighand->siglock, flags);
277 __flush_itimer_signals(&tsk->pending);
278 __flush_itimer_signals(&tsk->signal->shared_pending);
279 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
282 void ignore_signals(struct task_struct *t)
286 for (i = 0; i < _NSIG; ++i)
287 t->sighand->action[i].sa.sa_handler = SIG_IGN;
293 * Flush all handlers for a task.
297 flush_signal_handlers(struct task_struct *t, int force_default)
300 struct k_sigaction *ka = &t->sighand->action[0];
301 for (i = _NSIG ; i != 0 ; i--) {
302 if (force_default || ka->sa.sa_handler != SIG_IGN)
303 ka->sa.sa_handler = SIG_DFL;
305 sigemptyset(&ka->sa.sa_mask);
310 int unhandled_signal(struct task_struct *tsk, int sig)
312 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
313 if (is_global_init(tsk))
315 if (handler != SIG_IGN && handler != SIG_DFL)
317 return !tracehook_consider_fatal_signal(tsk, sig, handler);
321 /* Notify the system that a driver wants to block all signals for this
322 * process, and wants to be notified if any signals at all were to be
323 * sent/acted upon. If the notifier routine returns non-zero, then the
324 * signal will be acted upon after all. If the notifier routine returns 0,
325 * then then signal will be blocked. Only one block per process is
326 * allowed. priv is a pointer to private data that the notifier routine
327 * can use to determine if the signal should be blocked or not. */
330 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
334 spin_lock_irqsave(¤t->sighand->siglock, flags);
335 current->notifier_mask = mask;
336 current->notifier_data = priv;
337 current->notifier = notifier;
338 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
341 /* Notify the system that blocking has ended. */
344 unblock_all_signals(void)
348 spin_lock_irqsave(¤t->sighand->siglock, flags);
349 current->notifier = NULL;
350 current->notifier_data = NULL;
352 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
355 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
357 struct sigqueue *q, *first = NULL;
360 * Collect the siginfo appropriate to this signal. Check if
361 * there is another siginfo for the same signal.
363 list_for_each_entry(q, &list->list, list) {
364 if (q->info.si_signo == sig) {
371 sigdelset(&list->signal, sig);
375 list_del_init(&first->list);
376 copy_siginfo(info, &first->info);
377 __sigqueue_free(first);
379 /* Ok, it wasn't in the queue. This must be
380 a fast-pathed signal or we must have been
381 out of queue space. So zero out the info.
383 info->si_signo = sig;
391 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
394 int sig = next_signal(pending, mask);
397 if (current->notifier) {
398 if (sigismember(current->notifier_mask, sig)) {
399 if (!(current->notifier)(current->notifier_data)) {
400 clear_thread_flag(TIF_SIGPENDING);
406 collect_signal(sig, pending, info);
413 * Dequeue a signal and return the element to the caller, which is
414 * expected to free it.
416 * All callers have to hold the siglock.
418 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
422 /* We only dequeue private signals from ourselves, we don't let
423 * signalfd steal them
425 signr = __dequeue_signal(&tsk->pending, mask, info);
427 signr = __dequeue_signal(&tsk->signal->shared_pending,
432 * itimers are process shared and we restart periodic
433 * itimers in the signal delivery path to prevent DoS
434 * attacks in the high resolution timer case. This is
435 * compliant with the old way of self restarting
436 * itimers, as the SIGALRM is a legacy signal and only
437 * queued once. Changing the restart behaviour to
438 * restart the timer in the signal dequeue path is
439 * reducing the timer noise on heavy loaded !highres
442 if (unlikely(signr == SIGALRM)) {
443 struct hrtimer *tmr = &tsk->signal->real_timer;
445 if (!hrtimer_is_queued(tmr) &&
446 tsk->signal->it_real_incr.tv64 != 0) {
447 hrtimer_forward(tmr, tmr->base->get_time(),
448 tsk->signal->it_real_incr);
449 hrtimer_restart(tmr);
458 if (unlikely(sig_kernel_stop(signr))) {
460 * Set a marker that we have dequeued a stop signal. Our
461 * caller might release the siglock and then the pending
462 * stop signal it is about to process is no longer in the
463 * pending bitmasks, but must still be cleared by a SIGCONT
464 * (and overruled by a SIGKILL). So those cases clear this
465 * shared flag after we've set it. Note that this flag may
466 * remain set after the signal we return is ignored or
467 * handled. That doesn't matter because its only purpose
468 * is to alert stop-signal processing code when another
469 * processor has come along and cleared the flag.
471 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
473 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
475 * Release the siglock to ensure proper locking order
476 * of timer locks outside of siglocks. Note, we leave
477 * irqs disabled here, since the posix-timers code is
478 * about to disable them again anyway.
480 spin_unlock(&tsk->sighand->siglock);
481 do_schedule_next_timer(info);
482 spin_lock(&tsk->sighand->siglock);
488 * Tell a process that it has a new active signal..
490 * NOTE! we rely on the previous spin_lock to
491 * lock interrupts for us! We can only be called with
492 * "siglock" held, and the local interrupt must
493 * have been disabled when that got acquired!
495 * No need to set need_resched since signal event passing
496 * goes through ->blocked
498 void signal_wake_up(struct task_struct *t, int resume)
502 set_tsk_thread_flag(t, TIF_SIGPENDING);
505 * For SIGKILL, we want to wake it up in the stopped/traced/killable
506 * case. We don't check t->state here because there is a race with it
507 * executing another processor and just now entering stopped state.
508 * By using wake_up_state, we ensure the process will wake up and
509 * handle its death signal.
511 mask = TASK_INTERRUPTIBLE;
513 mask |= TASK_WAKEKILL;
514 if (!wake_up_state(t, mask))
519 * Remove signals in mask from the pending set and queue.
520 * Returns 1 if any signals were found.
522 * All callers must be holding the siglock.
524 * This version takes a sigset mask and looks at all signals,
525 * not just those in the first mask word.
527 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
529 struct sigqueue *q, *n;
532 sigandsets(&m, mask, &s->signal);
533 if (sigisemptyset(&m))
536 signandsets(&s->signal, &s->signal, mask);
537 list_for_each_entry_safe(q, n, &s->list, list) {
538 if (sigismember(mask, q->info.si_signo)) {
539 list_del_init(&q->list);
546 * Remove signals in mask from the pending set and queue.
547 * Returns 1 if any signals were found.
549 * All callers must be holding the siglock.
551 static int rm_from_queue(unsigned long mask, struct sigpending *s)
553 struct sigqueue *q, *n;
555 if (!sigtestsetmask(&s->signal, mask))
558 sigdelsetmask(&s->signal, mask);
559 list_for_each_entry_safe(q, n, &s->list, list) {
560 if (q->info.si_signo < SIGRTMIN &&
561 (mask & sigmask(q->info.si_signo))) {
562 list_del_init(&q->list);
570 * Bad permissions for sending the signal
571 * - the caller must hold at least the RCU read lock
573 static int check_kill_permission(int sig, struct siginfo *info,
574 struct task_struct *t)
576 const struct cred *cred = current_cred(), *tcred;
580 if (!valid_signal(sig))
583 if (info != SEND_SIG_NOINFO && (is_si_special(info) || SI_FROMKERNEL(info)))
586 error = audit_signal_info(sig, t); /* Let audit system see the signal */
590 tcred = __task_cred(t);
591 if ((cred->euid ^ tcred->suid) &&
592 (cred->euid ^ tcred->uid) &&
593 (cred->uid ^ tcred->suid) &&
594 (cred->uid ^ tcred->uid) &&
595 !capable(CAP_KILL)) {
598 sid = task_session(t);
600 * We don't return the error if sid == NULL. The
601 * task was unhashed, the caller must notice this.
603 if (!sid || sid == task_session(current))
610 return security_task_kill(t, info, sig, 0);
614 * Handle magic process-wide effects of stop/continue signals. Unlike
615 * the signal actions, these happen immediately at signal-generation
616 * time regardless of blocking, ignoring, or handling. This does the
617 * actual continuing for SIGCONT, but not the actual stopping for stop
618 * signals. The process stop is done as a signal action for SIG_DFL.
620 * Returns true if the signal should be actually delivered, otherwise
621 * it should be dropped.
623 static int prepare_signal(int sig, struct task_struct *p)
625 struct signal_struct *signal = p->signal;
626 struct task_struct *t;
628 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
630 * The process is in the middle of dying, nothing to do.
632 } else if (sig_kernel_stop(sig)) {
634 * This is a stop signal. Remove SIGCONT from all queues.
636 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
639 rm_from_queue(sigmask(SIGCONT), &t->pending);
640 } while_each_thread(p, t);
641 } else if (sig == SIGCONT) {
644 * Remove all stop signals from all queues,
645 * and wake all threads.
647 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
651 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
653 * If there is a handler for SIGCONT, we must make
654 * sure that no thread returns to user mode before
655 * we post the signal, in case it was the only
656 * thread eligible to run the signal handler--then
657 * it must not do anything between resuming and
658 * running the handler. With the TIF_SIGPENDING
659 * flag set, the thread will pause and acquire the
660 * siglock that we hold now and until we've queued
661 * the pending signal.
663 * Wake up the stopped thread _after_ setting
666 state = __TASK_STOPPED;
667 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
668 set_tsk_thread_flag(t, TIF_SIGPENDING);
669 state |= TASK_INTERRUPTIBLE;
671 wake_up_state(t, state);
672 } while_each_thread(p, t);
675 * Notify the parent with CLD_CONTINUED if we were stopped.
677 * If we were in the middle of a group stop, we pretend it
678 * was already finished, and then continued. Since SIGCHLD
679 * doesn't queue we report only CLD_STOPPED, as if the next
680 * CLD_CONTINUED was dropped.
683 if (signal->flags & SIGNAL_STOP_STOPPED)
684 why |= SIGNAL_CLD_CONTINUED;
685 else if (signal->group_stop_count)
686 why |= SIGNAL_CLD_STOPPED;
690 * The first thread which returns from finish_stop()
691 * will take ->siglock, notice SIGNAL_CLD_MASK, and
692 * notify its parent. See get_signal_to_deliver().
694 signal->flags = why | SIGNAL_STOP_CONTINUED;
695 signal->group_stop_count = 0;
696 signal->group_exit_code = 0;
699 * We are not stopped, but there could be a stop
700 * signal in the middle of being processed after
701 * being removed from the queue. Clear that too.
703 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
707 return !sig_ignored(p, sig);
711 * Test if P wants to take SIG. After we've checked all threads with this,
712 * it's equivalent to finding no threads not blocking SIG. Any threads not
713 * blocking SIG were ruled out because they are not running and already
714 * have pending signals. Such threads will dequeue from the shared queue
715 * as soon as they're available, so putting the signal on the shared queue
716 * will be equivalent to sending it to one such thread.
718 static inline int wants_signal(int sig, struct task_struct *p)
720 if (sigismember(&p->blocked, sig))
722 if (p->flags & PF_EXITING)
726 if (task_is_stopped_or_traced(p))
728 return task_curr(p) || !signal_pending(p);
731 static void complete_signal(int sig, struct task_struct *p, int group)
733 struct signal_struct *signal = p->signal;
734 struct task_struct *t;
737 * Now find a thread we can wake up to take the signal off the queue.
739 * If the main thread wants the signal, it gets first crack.
740 * Probably the least surprising to the average bear.
742 if (wants_signal(sig, p))
744 else if (!group || thread_group_empty(p))
746 * There is just one thread and it does not need to be woken.
747 * It will dequeue unblocked signals before it runs again.
752 * Otherwise try to find a suitable thread.
754 t = signal->curr_target;
755 while (!wants_signal(sig, t)) {
757 if (t == signal->curr_target)
759 * No thread needs to be woken.
760 * Any eligible threads will see
761 * the signal in the queue soon.
765 signal->curr_target = t;
769 * Found a killable thread. If the signal will be fatal,
770 * then start taking the whole group down immediately.
772 if (sig_fatal(p, sig) &&
773 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
774 !sigismember(&t->real_blocked, sig) &&
776 !tracehook_consider_fatal_signal(t, sig, SIG_DFL))) {
778 * This signal will be fatal to the whole group.
780 if (!sig_kernel_coredump(sig)) {
782 * Start a group exit and wake everybody up.
783 * This way we don't have other threads
784 * running and doing things after a slower
785 * thread has the fatal signal pending.
787 signal->flags = SIGNAL_GROUP_EXIT;
788 signal->group_exit_code = sig;
789 signal->group_stop_count = 0;
792 sigaddset(&t->pending.signal, SIGKILL);
793 signal_wake_up(t, 1);
794 } while_each_thread(p, t);
800 * The signal is already in the shared-pending queue.
801 * Tell the chosen thread to wake up and dequeue it.
803 signal_wake_up(t, sig == SIGKILL);
807 static inline int legacy_queue(struct sigpending *signals, int sig)
809 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
812 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
815 struct sigpending *pending;
818 trace_sched_signal_send(sig, t);
820 assert_spin_locked(&t->sighand->siglock);
821 if (!prepare_signal(sig, t))
824 pending = group ? &t->signal->shared_pending : &t->pending;
826 * Short-circuit ignored signals and support queuing
827 * exactly one non-rt signal, so that we can get more
828 * detailed information about the cause of the signal.
830 if (legacy_queue(pending, sig))
833 * fast-pathed signals for kernel-internal things like SIGSTOP
836 if (info == SEND_SIG_FORCED)
839 /* Real-time signals must be queued if sent by sigqueue, or
840 some other real-time mechanism. It is implementation
841 defined whether kill() does so. We attempt to do so, on
842 the principle of least surprise, but since kill is not
843 allowed to fail with EAGAIN when low on memory we just
844 make sure at least one signal gets delivered and don't
845 pass on the info struct. */
847 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
848 (is_si_special(info) ||
849 info->si_code >= 0)));
851 list_add_tail(&q->list, &pending->list);
852 switch ((unsigned long) info) {
853 case (unsigned long) SEND_SIG_NOINFO:
854 q->info.si_signo = sig;
855 q->info.si_errno = 0;
856 q->info.si_code = SI_USER;
857 q->info.si_pid = task_pid_vnr(current);
858 q->info.si_uid = current_uid();
860 case (unsigned long) SEND_SIG_PRIV:
861 q->info.si_signo = sig;
862 q->info.si_errno = 0;
863 q->info.si_code = SI_KERNEL;
868 copy_siginfo(&q->info, info);
871 } else if (!is_si_special(info)) {
872 if (sig >= SIGRTMIN && info->si_code != SI_USER)
874 * Queue overflow, abort. We may abort if the signal was rt
875 * and sent by user using something other than kill().
881 signalfd_notify(t, sig);
882 sigaddset(&pending->signal, sig);
883 complete_signal(sig, t, group);
887 int print_fatal_signals;
889 static void print_fatal_signal(struct pt_regs *regs, int signr)
891 printk("%s/%d: potentially unexpected fatal signal %d.\n",
892 current->comm, task_pid_nr(current), signr);
894 #if defined(__i386__) && !defined(__arch_um__)
895 printk("code at %08lx: ", regs->ip);
898 for (i = 0; i < 16; i++) {
901 __get_user(insn, (unsigned char *)(regs->ip + i));
902 printk("%02x ", insn);
910 static int __init setup_print_fatal_signals(char *str)
912 get_option (&str, &print_fatal_signals);
917 __setup("print-fatal-signals=", setup_print_fatal_signals);
920 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
922 return send_signal(sig, info, p, 1);
926 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
928 return send_signal(sig, info, t, 0);
932 * Force a signal that the process can't ignore: if necessary
933 * we unblock the signal and change any SIG_IGN to SIG_DFL.
935 * Note: If we unblock the signal, we always reset it to SIG_DFL,
936 * since we do not want to have a signal handler that was blocked
937 * be invoked when user space had explicitly blocked it.
939 * We don't want to have recursive SIGSEGV's etc, for example,
940 * that is why we also clear SIGNAL_UNKILLABLE.
943 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
945 unsigned long int flags;
946 int ret, blocked, ignored;
947 struct k_sigaction *action;
949 spin_lock_irqsave(&t->sighand->siglock, flags);
950 action = &t->sighand->action[sig-1];
951 ignored = action->sa.sa_handler == SIG_IGN;
952 blocked = sigismember(&t->blocked, sig);
953 if (blocked || ignored) {
954 action->sa.sa_handler = SIG_DFL;
956 sigdelset(&t->blocked, sig);
957 recalc_sigpending_and_wake(t);
960 if (action->sa.sa_handler == SIG_DFL)
961 t->signal->flags &= ~SIGNAL_UNKILLABLE;
962 ret = specific_send_sig_info(sig, info, t);
963 spin_unlock_irqrestore(&t->sighand->siglock, flags);
969 force_sig_specific(int sig, struct task_struct *t)
971 force_sig_info(sig, SEND_SIG_FORCED, t);
975 * Nuke all other threads in the group.
977 void zap_other_threads(struct task_struct *p)
979 struct task_struct *t;
981 p->signal->group_stop_count = 0;
983 for (t = next_thread(p); t != p; t = next_thread(t)) {
985 * Don't bother with already dead threads
990 /* SIGKILL will be handled before any pending SIGSTOP */
991 sigaddset(&t->pending.signal, SIGKILL);
992 signal_wake_up(t, 1);
996 int __fatal_signal_pending(struct task_struct *tsk)
998 return sigismember(&tsk->pending.signal, SIGKILL);
1000 EXPORT_SYMBOL(__fatal_signal_pending);
1002 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
1004 struct sighand_struct *sighand;
1008 sighand = rcu_dereference(tsk->sighand);
1009 if (unlikely(sighand == NULL))
1012 spin_lock_irqsave(&sighand->siglock, *flags);
1013 if (likely(sighand == tsk->sighand))
1015 spin_unlock_irqrestore(&sighand->siglock, *flags);
1023 * send signal info to all the members of a group
1024 * - the caller must hold the RCU read lock at least
1026 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1028 unsigned long flags;
1031 ret = check_kill_permission(sig, info, p);
1035 if (lock_task_sighand(p, &flags)) {
1036 ret = __group_send_sig_info(sig, info, p);
1037 unlock_task_sighand(p, &flags);
1045 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1046 * control characters do (^C, ^Z etc)
1047 * - the caller must hold at least a readlock on tasklist_lock
1049 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1051 struct task_struct *p = NULL;
1052 int retval, success;
1056 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1057 int err = group_send_sig_info(sig, info, p);
1060 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1061 return success ? 0 : retval;
1064 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1067 struct task_struct *p;
1071 p = pid_task(pid, PIDTYPE_PID);
1073 error = group_send_sig_info(sig, info, p);
1074 if (unlikely(error == -ESRCH))
1076 * The task was unhashed in between, try again.
1077 * If it is dead, pid_task() will return NULL,
1078 * if we race with de_thread() it will find the
1089 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1093 error = kill_pid_info(sig, info, find_vpid(pid));
1098 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1099 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1100 uid_t uid, uid_t euid, u32 secid)
1103 struct task_struct *p;
1104 const struct cred *pcred;
1106 if (!valid_signal(sig))
1109 read_lock(&tasklist_lock);
1110 p = pid_task(pid, PIDTYPE_PID);
1115 pcred = __task_cred(p);
1116 if ((info == SEND_SIG_NOINFO ||
1117 (!is_si_special(info) && SI_FROMUSER(info))) &&
1118 euid != pcred->suid && euid != pcred->uid &&
1119 uid != pcred->suid && uid != pcred->uid) {
1123 ret = security_task_kill(p, info, sig, secid);
1126 if (sig && p->sighand) {
1127 unsigned long flags;
1128 spin_lock_irqsave(&p->sighand->siglock, flags);
1129 ret = __group_send_sig_info(sig, info, p);
1130 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1133 read_unlock(&tasklist_lock);
1136 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1139 * kill_something_info() interprets pid in interesting ways just like kill(2).
1141 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1142 * is probably wrong. Should make it like BSD or SYSV.
1145 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1151 ret = kill_pid_info(sig, info, find_vpid(pid));
1156 read_lock(&tasklist_lock);
1158 ret = __kill_pgrp_info(sig, info,
1159 pid ? find_vpid(-pid) : task_pgrp(current));
1161 int retval = 0, count = 0;
1162 struct task_struct * p;
1164 for_each_process(p) {
1165 if (task_pid_vnr(p) > 1 &&
1166 !same_thread_group(p, current)) {
1167 int err = group_send_sig_info(sig, info, p);
1173 ret = count ? retval : -ESRCH;
1175 read_unlock(&tasklist_lock);
1181 * These are for backward compatibility with the rest of the kernel source.
1185 * The caller must ensure the task can't exit.
1188 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1191 unsigned long flags;
1194 * Make sure legacy kernel users don't send in bad values
1195 * (normal paths check this in check_kill_permission).
1197 if (!valid_signal(sig))
1200 spin_lock_irqsave(&p->sighand->siglock, flags);
1201 ret = specific_send_sig_info(sig, info, p);
1202 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1206 #define __si_special(priv) \
1207 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1210 send_sig(int sig, struct task_struct *p, int priv)
1212 return send_sig_info(sig, __si_special(priv), p);
1216 force_sig(int sig, struct task_struct *p)
1218 force_sig_info(sig, SEND_SIG_PRIV, p);
1222 * When things go south during signal handling, we
1223 * will force a SIGSEGV. And if the signal that caused
1224 * the problem was already a SIGSEGV, we'll want to
1225 * make sure we don't even try to deliver the signal..
1228 force_sigsegv(int sig, struct task_struct *p)
1230 if (sig == SIGSEGV) {
1231 unsigned long flags;
1232 spin_lock_irqsave(&p->sighand->siglock, flags);
1233 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1234 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1236 force_sig(SIGSEGV, p);
1240 int kill_pgrp(struct pid *pid, int sig, int priv)
1244 read_lock(&tasklist_lock);
1245 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1246 read_unlock(&tasklist_lock);
1250 EXPORT_SYMBOL(kill_pgrp);
1252 int kill_pid(struct pid *pid, int sig, int priv)
1254 return kill_pid_info(sig, __si_special(priv), pid);
1256 EXPORT_SYMBOL(kill_pid);
1259 * These functions support sending signals using preallocated sigqueue
1260 * structures. This is needed "because realtime applications cannot
1261 * afford to lose notifications of asynchronous events, like timer
1262 * expirations or I/O completions". In the case of Posix Timers
1263 * we allocate the sigqueue structure from the timer_create. If this
1264 * allocation fails we are able to report the failure to the application
1265 * with an EAGAIN error.
1268 struct sigqueue *sigqueue_alloc(void)
1272 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1273 q->flags |= SIGQUEUE_PREALLOC;
1277 void sigqueue_free(struct sigqueue *q)
1279 unsigned long flags;
1280 spinlock_t *lock = ¤t->sighand->siglock;
1282 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1284 * We must hold ->siglock while testing q->list
1285 * to serialize with collect_signal() or with
1286 * __exit_signal()->flush_sigqueue().
1288 spin_lock_irqsave(lock, flags);
1289 q->flags &= ~SIGQUEUE_PREALLOC;
1291 * If it is queued it will be freed when dequeued,
1292 * like the "regular" sigqueue.
1294 if (!list_empty(&q->list))
1296 spin_unlock_irqrestore(lock, flags);
1302 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1304 int sig = q->info.si_signo;
1305 struct sigpending *pending;
1306 unsigned long flags;
1309 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1312 if (!likely(lock_task_sighand(t, &flags)))
1315 ret = 1; /* the signal is ignored */
1316 if (!prepare_signal(sig, t))
1320 if (unlikely(!list_empty(&q->list))) {
1322 * If an SI_TIMER entry is already queue just increment
1323 * the overrun count.
1325 BUG_ON(q->info.si_code != SI_TIMER);
1326 q->info.si_overrun++;
1329 q->info.si_overrun = 0;
1331 signalfd_notify(t, sig);
1332 pending = group ? &t->signal->shared_pending : &t->pending;
1333 list_add_tail(&q->list, &pending->list);
1334 sigaddset(&pending->signal, sig);
1335 complete_signal(sig, t, group);
1337 unlock_task_sighand(t, &flags);
1343 * Wake up any threads in the parent blocked in wait* syscalls.
1345 static inline void __wake_up_parent(struct task_struct *p,
1346 struct task_struct *parent)
1348 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1352 * Let a parent know about the death of a child.
1353 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1355 * Returns -1 if our parent ignored us and so we've switched to
1356 * self-reaping, or else @sig.
1358 int do_notify_parent(struct task_struct *tsk, int sig)
1360 struct siginfo info;
1361 unsigned long flags;
1362 struct sighand_struct *psig;
1363 struct task_cputime cputime;
1368 /* do_notify_parent_cldstop should have been called instead. */
1369 BUG_ON(task_is_stopped_or_traced(tsk));
1371 BUG_ON(!tsk->ptrace &&
1372 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1374 info.si_signo = sig;
1377 * we are under tasklist_lock here so our parent is tied to
1378 * us and cannot exit and release its namespace.
1380 * the only it can is to switch its nsproxy with sys_unshare,
1381 * bu uncharing pid namespaces is not allowed, so we'll always
1382 * see relevant namespace
1384 * write_lock() currently calls preempt_disable() which is the
1385 * same as rcu_read_lock(), but according to Oleg, this is not
1386 * correct to rely on this
1389 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1390 info.si_uid = __task_cred(tsk)->uid;
1393 thread_group_cputime(tsk, &cputime);
1394 info.si_utime = cputime_to_jiffies(cputime.utime);
1395 info.si_stime = cputime_to_jiffies(cputime.stime);
1397 info.si_status = tsk->exit_code & 0x7f;
1398 if (tsk->exit_code & 0x80)
1399 info.si_code = CLD_DUMPED;
1400 else if (tsk->exit_code & 0x7f)
1401 info.si_code = CLD_KILLED;
1403 info.si_code = CLD_EXITED;
1404 info.si_status = tsk->exit_code >> 8;
1407 psig = tsk->parent->sighand;
1408 spin_lock_irqsave(&psig->siglock, flags);
1409 if (!tsk->ptrace && sig == SIGCHLD &&
1410 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1411 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1413 * We are exiting and our parent doesn't care. POSIX.1
1414 * defines special semantics for setting SIGCHLD to SIG_IGN
1415 * or setting the SA_NOCLDWAIT flag: we should be reaped
1416 * automatically and not left for our parent's wait4 call.
1417 * Rather than having the parent do it as a magic kind of
1418 * signal handler, we just set this to tell do_exit that we
1419 * can be cleaned up without becoming a zombie. Note that
1420 * we still call __wake_up_parent in this case, because a
1421 * blocked sys_wait4 might now return -ECHILD.
1423 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1424 * is implementation-defined: we do (if you don't want
1425 * it, just use SIG_IGN instead).
1427 ret = tsk->exit_signal = -1;
1428 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1431 if (valid_signal(sig) && sig > 0)
1432 __group_send_sig_info(sig, &info, tsk->parent);
1433 __wake_up_parent(tsk, tsk->parent);
1434 spin_unlock_irqrestore(&psig->siglock, flags);
1439 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1441 struct siginfo info;
1442 unsigned long flags;
1443 struct task_struct *parent;
1444 struct sighand_struct *sighand;
1446 if (tsk->ptrace & PT_PTRACED)
1447 parent = tsk->parent;
1449 tsk = tsk->group_leader;
1450 parent = tsk->real_parent;
1453 info.si_signo = SIGCHLD;
1456 * see comment in do_notify_parent() abot the following 3 lines
1459 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1460 info.si_uid = __task_cred(tsk)->uid;
1463 info.si_utime = cputime_to_clock_t(tsk->utime);
1464 info.si_stime = cputime_to_clock_t(tsk->stime);
1469 info.si_status = SIGCONT;
1472 info.si_status = tsk->signal->group_exit_code & 0x7f;
1475 info.si_status = tsk->exit_code & 0x7f;
1481 sighand = parent->sighand;
1482 spin_lock_irqsave(&sighand->siglock, flags);
1483 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1484 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1485 __group_send_sig_info(SIGCHLD, &info, parent);
1487 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1489 __wake_up_parent(tsk, parent);
1490 spin_unlock_irqrestore(&sighand->siglock, flags);
1493 static inline int may_ptrace_stop(void)
1495 if (!likely(current->ptrace & PT_PTRACED))
1498 * Are we in the middle of do_coredump?
1499 * If so and our tracer is also part of the coredump stopping
1500 * is a deadlock situation, and pointless because our tracer
1501 * is dead so don't allow us to stop.
1502 * If SIGKILL was already sent before the caller unlocked
1503 * ->siglock we must see ->core_state != NULL. Otherwise it
1504 * is safe to enter schedule().
1506 if (unlikely(current->mm->core_state) &&
1507 unlikely(current->mm == current->parent->mm))
1514 * Return nonzero if there is a SIGKILL that should be waking us up.
1515 * Called with the siglock held.
1517 static int sigkill_pending(struct task_struct *tsk)
1519 return sigismember(&tsk->pending.signal, SIGKILL) ||
1520 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1524 * This must be called with current->sighand->siglock held.
1526 * This should be the path for all ptrace stops.
1527 * We always set current->last_siginfo while stopped here.
1528 * That makes it a way to test a stopped process for
1529 * being ptrace-stopped vs being job-control-stopped.
1531 * If we actually decide not to stop at all because the tracer
1532 * is gone, we keep current->exit_code unless clear_code.
1534 static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1536 if (arch_ptrace_stop_needed(exit_code, info)) {
1538 * The arch code has something special to do before a
1539 * ptrace stop. This is allowed to block, e.g. for faults
1540 * on user stack pages. We can't keep the siglock while
1541 * calling arch_ptrace_stop, so we must release it now.
1542 * To preserve proper semantics, we must do this before
1543 * any signal bookkeeping like checking group_stop_count.
1544 * Meanwhile, a SIGKILL could come in before we retake the
1545 * siglock. That must prevent us from sleeping in TASK_TRACED.
1546 * So after regaining the lock, we must check for SIGKILL.
1548 spin_unlock_irq(¤t->sighand->siglock);
1549 arch_ptrace_stop(exit_code, info);
1550 spin_lock_irq(¤t->sighand->siglock);
1551 if (sigkill_pending(current))
1556 * If there is a group stop in progress,
1557 * we must participate in the bookkeeping.
1559 if (current->signal->group_stop_count > 0)
1560 --current->signal->group_stop_count;
1562 current->last_siginfo = info;
1563 current->exit_code = exit_code;
1565 /* Let the debugger run. */
1566 __set_current_state(TASK_TRACED);
1567 spin_unlock_irq(¤t->sighand->siglock);
1568 read_lock(&tasklist_lock);
1569 if (may_ptrace_stop()) {
1570 do_notify_parent_cldstop(current, CLD_TRAPPED);
1571 read_unlock(&tasklist_lock);
1575 * By the time we got the lock, our tracer went away.
1576 * Don't drop the lock yet, another tracer may come.
1578 __set_current_state(TASK_RUNNING);
1580 current->exit_code = 0;
1581 read_unlock(&tasklist_lock);
1585 * While in TASK_TRACED, we were considered "frozen enough".
1586 * Now that we woke up, it's crucial if we're supposed to be
1587 * frozen that we freeze now before running anything substantial.
1592 * We are back. Now reacquire the siglock before touching
1593 * last_siginfo, so that we are sure to have synchronized with
1594 * any signal-sending on another CPU that wants to examine it.
1596 spin_lock_irq(¤t->sighand->siglock);
1597 current->last_siginfo = NULL;
1600 * Queued signals ignored us while we were stopped for tracing.
1601 * So check for any that we should take before resuming user mode.
1602 * This sets TIF_SIGPENDING, but never clears it.
1604 recalc_sigpending_tsk(current);
1607 void ptrace_notify(int exit_code)
1611 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1613 memset(&info, 0, sizeof info);
1614 info.si_signo = SIGTRAP;
1615 info.si_code = exit_code;
1616 info.si_pid = task_pid_vnr(current);
1617 info.si_uid = current_uid();
1619 /* Let the debugger run. */
1620 spin_lock_irq(¤t->sighand->siglock);
1621 ptrace_stop(exit_code, 1, &info);
1622 spin_unlock_irq(¤t->sighand->siglock);
1626 finish_stop(int stop_count)
1629 * If there are no other threads in the group, or if there is
1630 * a group stop in progress and we are the last to stop,
1631 * report to the parent. When ptraced, every thread reports itself.
1633 if (tracehook_notify_jctl(stop_count == 0, CLD_STOPPED)) {
1634 read_lock(&tasklist_lock);
1635 do_notify_parent_cldstop(current, CLD_STOPPED);
1636 read_unlock(&tasklist_lock);
1641 } while (try_to_freeze());
1643 * Now we don't run again until continued.
1645 current->exit_code = 0;
1649 * This performs the stopping for SIGSTOP and other stop signals.
1650 * We have to stop all threads in the thread group.
1651 * Returns nonzero if we've actually stopped and released the siglock.
1652 * Returns zero if we didn't stop and still hold the siglock.
1654 static int do_signal_stop(int signr)
1656 struct signal_struct *sig = current->signal;
1659 if (sig->group_stop_count > 0) {
1661 * There is a group stop in progress. We don't need to
1662 * start another one.
1664 stop_count = --sig->group_stop_count;
1666 struct task_struct *t;
1668 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1669 unlikely(signal_group_exit(sig)))
1672 * There is no group stop already in progress.
1673 * We must initiate one now.
1675 sig->group_exit_code = signr;
1678 for (t = next_thread(current); t != current; t = next_thread(t))
1680 * Setting state to TASK_STOPPED for a group
1681 * stop is always done with the siglock held,
1682 * so this check has no races.
1684 if (!(t->flags & PF_EXITING) &&
1685 !task_is_stopped_or_traced(t)) {
1687 signal_wake_up(t, 0);
1689 sig->group_stop_count = stop_count;
1692 if (stop_count == 0)
1693 sig->flags = SIGNAL_STOP_STOPPED;
1694 current->exit_code = sig->group_exit_code;
1695 __set_current_state(TASK_STOPPED);
1697 spin_unlock_irq(¤t->sighand->siglock);
1698 finish_stop(stop_count);
1702 static int ptrace_signal(int signr, siginfo_t *info,
1703 struct pt_regs *regs, void *cookie)
1705 if (!(current->ptrace & PT_PTRACED))
1708 ptrace_signal_deliver(regs, cookie);
1710 /* Let the debugger run. */
1711 ptrace_stop(signr, 0, info);
1713 /* We're back. Did the debugger cancel the sig? */
1714 signr = current->exit_code;
1718 current->exit_code = 0;
1720 /* Update the siginfo structure if the signal has
1721 changed. If the debugger wanted something
1722 specific in the siginfo structure then it should
1723 have updated *info via PTRACE_SETSIGINFO. */
1724 if (signr != info->si_signo) {
1725 info->si_signo = signr;
1727 info->si_code = SI_USER;
1728 info->si_pid = task_pid_vnr(current->parent);
1729 info->si_uid = task_uid(current->parent);
1732 /* If the (new) signal is now blocked, requeue it. */
1733 if (sigismember(¤t->blocked, signr)) {
1734 specific_send_sig_info(signr, info, current);
1741 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1742 struct pt_regs *regs, void *cookie)
1744 struct sighand_struct *sighand = current->sighand;
1745 struct signal_struct *signal = current->signal;
1750 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1751 * While in TASK_STOPPED, we were considered "frozen enough".
1752 * Now that we woke up, it's crucial if we're supposed to be
1753 * frozen that we freeze now before running anything substantial.
1757 spin_lock_irq(&sighand->siglock);
1759 * Every stopped thread goes here after wakeup. Check to see if
1760 * we should notify the parent, prepare_signal(SIGCONT) encodes
1761 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1763 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1764 int why = (signal->flags & SIGNAL_STOP_CONTINUED)
1765 ? CLD_CONTINUED : CLD_STOPPED;
1766 signal->flags &= ~SIGNAL_CLD_MASK;
1767 spin_unlock_irq(&sighand->siglock);
1769 if (unlikely(!tracehook_notify_jctl(1, why)))
1772 read_lock(&tasklist_lock);
1773 do_notify_parent_cldstop(current->group_leader, why);
1774 read_unlock(&tasklist_lock);
1779 struct k_sigaction *ka;
1781 if (unlikely(signal->group_stop_count > 0) &&
1786 * Tracing can induce an artifical signal and choose sigaction.
1787 * The return value in @signr determines the default action,
1788 * but @info->si_signo is the signal number we will report.
1790 signr = tracehook_get_signal(current, regs, info, return_ka);
1791 if (unlikely(signr < 0))
1793 if (unlikely(signr != 0))
1796 signr = dequeue_signal(current, ¤t->blocked,
1800 break; /* will return 0 */
1802 if (signr != SIGKILL) {
1803 signr = ptrace_signal(signr, info,
1809 ka = &sighand->action[signr-1];
1812 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1814 if (ka->sa.sa_handler != SIG_DFL) {
1815 /* Run the handler. */
1818 if (ka->sa.sa_flags & SA_ONESHOT)
1819 ka->sa.sa_handler = SIG_DFL;
1821 break; /* will return non-zero "signr" value */
1825 * Now we are doing the default action for this signal.
1827 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1831 * Global init gets no signals it doesn't want.
1833 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1834 !signal_group_exit(signal))
1837 if (sig_kernel_stop(signr)) {
1839 * The default action is to stop all threads in
1840 * the thread group. The job control signals
1841 * do nothing in an orphaned pgrp, but SIGSTOP
1842 * always works. Note that siglock needs to be
1843 * dropped during the call to is_orphaned_pgrp()
1844 * because of lock ordering with tasklist_lock.
1845 * This allows an intervening SIGCONT to be posted.
1846 * We need to check for that and bail out if necessary.
1848 if (signr != SIGSTOP) {
1849 spin_unlock_irq(&sighand->siglock);
1851 /* signals can be posted during this window */
1853 if (is_current_pgrp_orphaned())
1856 spin_lock_irq(&sighand->siglock);
1859 if (likely(do_signal_stop(info->si_signo))) {
1860 /* It released the siglock. */
1865 * We didn't actually stop, due to a race
1866 * with SIGCONT or something like that.
1871 spin_unlock_irq(&sighand->siglock);
1874 * Anything else is fatal, maybe with a core dump.
1876 current->flags |= PF_SIGNALED;
1878 if (sig_kernel_coredump(signr)) {
1879 if (print_fatal_signals)
1880 print_fatal_signal(regs, info->si_signo);
1882 * If it was able to dump core, this kills all
1883 * other threads in the group and synchronizes with
1884 * their demise. If we lost the race with another
1885 * thread getting here, it set group_exit_code
1886 * first and our do_group_exit call below will use
1887 * that value and ignore the one we pass it.
1889 do_coredump(info->si_signo, info->si_signo, regs);
1893 * Death signals, no core dump.
1895 do_group_exit(info->si_signo);
1898 spin_unlock_irq(&sighand->siglock);
1902 void exit_signals(struct task_struct *tsk)
1905 struct task_struct *t;
1907 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1908 tsk->flags |= PF_EXITING;
1912 spin_lock_irq(&tsk->sighand->siglock);
1914 * From now this task is not visible for group-wide signals,
1915 * see wants_signal(), do_signal_stop().
1917 tsk->flags |= PF_EXITING;
1918 if (!signal_pending(tsk))
1921 /* It could be that __group_complete_signal() choose us to
1922 * notify about group-wide signal. Another thread should be
1923 * woken now to take the signal since we will not.
1925 for (t = tsk; (t = next_thread(t)) != tsk; )
1926 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1927 recalc_sigpending_and_wake(t);
1929 if (unlikely(tsk->signal->group_stop_count) &&
1930 !--tsk->signal->group_stop_count) {
1931 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1935 spin_unlock_irq(&tsk->sighand->siglock);
1937 if (unlikely(group_stop) && tracehook_notify_jctl(1, CLD_STOPPED)) {
1938 read_lock(&tasklist_lock);
1939 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1940 read_unlock(&tasklist_lock);
1944 EXPORT_SYMBOL(recalc_sigpending);
1945 EXPORT_SYMBOL_GPL(dequeue_signal);
1946 EXPORT_SYMBOL(flush_signals);
1947 EXPORT_SYMBOL(force_sig);
1948 EXPORT_SYMBOL(send_sig);
1949 EXPORT_SYMBOL(send_sig_info);
1950 EXPORT_SYMBOL(sigprocmask);
1951 EXPORT_SYMBOL(block_all_signals);
1952 EXPORT_SYMBOL(unblock_all_signals);
1956 * System call entry points.
1959 asmlinkage long sys_restart_syscall(void)
1961 struct restart_block *restart = ¤t_thread_info()->restart_block;
1962 return restart->fn(restart);
1965 long do_no_restart_syscall(struct restart_block *param)
1971 * We don't need to get the kernel lock - this is all local to this
1972 * particular thread.. (and that's good, because this is _heavily_
1973 * used by various programs)
1977 * This is also useful for kernel threads that want to temporarily
1978 * (or permanently) block certain signals.
1980 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1981 * interface happily blocks "unblockable" signals like SIGKILL
1984 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1988 spin_lock_irq(¤t->sighand->siglock);
1990 *oldset = current->blocked;
1995 sigorsets(¤t->blocked, ¤t->blocked, set);
1998 signandsets(¤t->blocked, ¤t->blocked, set);
2001 current->blocked = *set;
2006 recalc_sigpending();
2007 spin_unlock_irq(¤t->sighand->siglock);
2013 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
2015 int error = -EINVAL;
2016 sigset_t old_set, new_set;
2018 /* XXX: Don't preclude handling different sized sigset_t's. */
2019 if (sigsetsize != sizeof(sigset_t))
2024 if (copy_from_user(&new_set, set, sizeof(*set)))
2026 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2028 error = sigprocmask(how, &new_set, &old_set);
2034 spin_lock_irq(¤t->sighand->siglock);
2035 old_set = current->blocked;
2036 spin_unlock_irq(¤t->sighand->siglock);
2040 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2048 long do_sigpending(void __user *set, unsigned long sigsetsize)
2050 long error = -EINVAL;
2053 if (sigsetsize > sizeof(sigset_t))
2056 spin_lock_irq(¤t->sighand->siglock);
2057 sigorsets(&pending, ¤t->pending.signal,
2058 ¤t->signal->shared_pending.signal);
2059 spin_unlock_irq(¤t->sighand->siglock);
2061 /* Outside the lock because only this thread touches it. */
2062 sigandsets(&pending, ¤t->blocked, &pending);
2065 if (!copy_to_user(set, &pending, sigsetsize))
2073 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
2075 return do_sigpending(set, sigsetsize);
2078 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2080 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2084 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2086 if (from->si_code < 0)
2087 return __copy_to_user(to, from, sizeof(siginfo_t))
2090 * If you change siginfo_t structure, please be sure
2091 * this code is fixed accordingly.
2092 * Please remember to update the signalfd_copyinfo() function
2093 * inside fs/signalfd.c too, in case siginfo_t changes.
2094 * It should never copy any pad contained in the structure
2095 * to avoid security leaks, but must copy the generic
2096 * 3 ints plus the relevant union member.
2098 err = __put_user(from->si_signo, &to->si_signo);
2099 err |= __put_user(from->si_errno, &to->si_errno);
2100 err |= __put_user((short)from->si_code, &to->si_code);
2101 switch (from->si_code & __SI_MASK) {
2103 err |= __put_user(from->si_pid, &to->si_pid);
2104 err |= __put_user(from->si_uid, &to->si_uid);
2107 err |= __put_user(from->si_tid, &to->si_tid);
2108 err |= __put_user(from->si_overrun, &to->si_overrun);
2109 err |= __put_user(from->si_ptr, &to->si_ptr);
2112 err |= __put_user(from->si_band, &to->si_band);
2113 err |= __put_user(from->si_fd, &to->si_fd);
2116 err |= __put_user(from->si_addr, &to->si_addr);
2117 #ifdef __ARCH_SI_TRAPNO
2118 err |= __put_user(from->si_trapno, &to->si_trapno);
2122 err |= __put_user(from->si_pid, &to->si_pid);
2123 err |= __put_user(from->si_uid, &to->si_uid);
2124 err |= __put_user(from->si_status, &to->si_status);
2125 err |= __put_user(from->si_utime, &to->si_utime);
2126 err |= __put_user(from->si_stime, &to->si_stime);
2128 case __SI_RT: /* This is not generated by the kernel as of now. */
2129 case __SI_MESGQ: /* But this is */
2130 err |= __put_user(from->si_pid, &to->si_pid);
2131 err |= __put_user(from->si_uid, &to->si_uid);
2132 err |= __put_user(from->si_ptr, &to->si_ptr);
2134 default: /* this is just in case for now ... */
2135 err |= __put_user(from->si_pid, &to->si_pid);
2136 err |= __put_user(from->si_uid, &to->si_uid);
2145 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2146 siginfo_t __user *uinfo,
2147 const struct timespec __user *uts,
2156 /* XXX: Don't preclude handling different sized sigset_t's. */
2157 if (sigsetsize != sizeof(sigset_t))
2160 if (copy_from_user(&these, uthese, sizeof(these)))
2164 * Invert the set of allowed signals to get those we
2167 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2171 if (copy_from_user(&ts, uts, sizeof(ts)))
2173 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2178 spin_lock_irq(¤t->sighand->siglock);
2179 sig = dequeue_signal(current, &these, &info);
2181 timeout = MAX_SCHEDULE_TIMEOUT;
2183 timeout = (timespec_to_jiffies(&ts)
2184 + (ts.tv_sec || ts.tv_nsec));
2187 /* None ready -- temporarily unblock those we're
2188 * interested while we are sleeping in so that we'll
2189 * be awakened when they arrive. */
2190 current->real_blocked = current->blocked;
2191 sigandsets(¤t->blocked, ¤t->blocked, &these);
2192 recalc_sigpending();
2193 spin_unlock_irq(¤t->sighand->siglock);
2195 timeout = schedule_timeout_interruptible(timeout);
2197 spin_lock_irq(¤t->sighand->siglock);
2198 sig = dequeue_signal(current, &these, &info);
2199 current->blocked = current->real_blocked;
2200 siginitset(¤t->real_blocked, 0);
2201 recalc_sigpending();
2204 spin_unlock_irq(¤t->sighand->siglock);
2209 if (copy_siginfo_to_user(uinfo, &info))
2222 sys_kill(pid_t pid, int sig)
2224 struct siginfo info;
2226 info.si_signo = sig;
2228 info.si_code = SI_USER;
2229 info.si_pid = task_tgid_vnr(current);
2230 info.si_uid = current_uid();
2232 return kill_something_info(sig, &info, pid);
2235 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2238 struct siginfo info;
2239 struct task_struct *p;
2240 unsigned long flags;
2243 info.si_signo = sig;
2245 info.si_code = SI_TKILL;
2246 info.si_pid = task_tgid_vnr(current);
2247 info.si_uid = current_uid();
2250 p = find_task_by_vpid(pid);
2251 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2252 error = check_kill_permission(sig, &info, p);
2254 * The null signal is a permissions and process existence
2255 * probe. No signal is actually delivered.
2257 * If lock_task_sighand() fails we pretend the task dies
2258 * after receiving the signal. The window is tiny, and the
2259 * signal is private anyway.
2261 if (!error && sig && lock_task_sighand(p, &flags)) {
2262 error = specific_send_sig_info(sig, &info, p);
2263 unlock_task_sighand(p, &flags);
2272 * sys_tgkill - send signal to one specific thread
2273 * @tgid: the thread group ID of the thread
2274 * @pid: the PID of the thread
2275 * @sig: signal to be sent
2277 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2278 * exists but it's not belonging to the target process anymore. This
2279 * method solves the problem of threads exiting and PIDs getting reused.
2281 asmlinkage long sys_tgkill(pid_t tgid, pid_t pid, int sig)
2283 /* This is only valid for single tasks */
2284 if (pid <= 0 || tgid <= 0)
2287 return do_tkill(tgid, pid, sig);
2291 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2294 sys_tkill(pid_t pid, int sig)
2296 /* This is only valid for single tasks */
2300 return do_tkill(0, pid, sig);
2304 sys_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t __user *uinfo)
2308 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2311 /* Not even root can pretend to send signals from the kernel.
2312 Nor can they impersonate a kill(), which adds source info. */
2313 if (info.si_code >= 0)
2315 info.si_signo = sig;
2317 /* POSIX.1b doesn't mention process groups. */
2318 return kill_proc_info(sig, &info, pid);
2321 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2323 struct task_struct *t = current;
2324 struct k_sigaction *k;
2327 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2330 k = &t->sighand->action[sig-1];
2332 spin_lock_irq(¤t->sighand->siglock);
2337 sigdelsetmask(&act->sa.sa_mask,
2338 sigmask(SIGKILL) | sigmask(SIGSTOP));
2342 * "Setting a signal action to SIG_IGN for a signal that is
2343 * pending shall cause the pending signal to be discarded,
2344 * whether or not it is blocked."
2346 * "Setting a signal action to SIG_DFL for a signal that is
2347 * pending and whose default action is to ignore the signal
2348 * (for example, SIGCHLD), shall cause the pending signal to
2349 * be discarded, whether or not it is blocked"
2351 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2353 sigaddset(&mask, sig);
2354 rm_from_queue_full(&mask, &t->signal->shared_pending);
2356 rm_from_queue_full(&mask, &t->pending);
2358 } while (t != current);
2362 spin_unlock_irq(¤t->sighand->siglock);
2367 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2373 oss.ss_sp = (void __user *) current->sas_ss_sp;
2374 oss.ss_size = current->sas_ss_size;
2375 oss.ss_flags = sas_ss_flags(sp);
2384 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2385 || __get_user(ss_sp, &uss->ss_sp)
2386 || __get_user(ss_flags, &uss->ss_flags)
2387 || __get_user(ss_size, &uss->ss_size))
2391 if (on_sig_stack(sp))
2397 * Note - this code used to test ss_flags incorrectly
2398 * old code may have been written using ss_flags==0
2399 * to mean ss_flags==SS_ONSTACK (as this was the only
2400 * way that worked) - this fix preserves that older
2403 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2406 if (ss_flags == SS_DISABLE) {
2411 if (ss_size < MINSIGSTKSZ)
2415 current->sas_ss_sp = (unsigned long) ss_sp;
2416 current->sas_ss_size = ss_size;
2421 if (copy_to_user(uoss, &oss, sizeof(oss)))
2430 #ifdef __ARCH_WANT_SYS_SIGPENDING
2433 sys_sigpending(old_sigset_t __user *set)
2435 return do_sigpending(set, sizeof(*set));
2440 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2441 /* Some platforms have their own version with special arguments others
2442 support only sys_rt_sigprocmask. */
2445 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2448 old_sigset_t old_set, new_set;
2452 if (copy_from_user(&new_set, set, sizeof(*set)))
2454 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2456 spin_lock_irq(¤t->sighand->siglock);
2457 old_set = current->blocked.sig[0];
2465 sigaddsetmask(¤t->blocked, new_set);
2468 sigdelsetmask(¤t->blocked, new_set);
2471 current->blocked.sig[0] = new_set;
2475 recalc_sigpending();
2476 spin_unlock_irq(¤t->sighand->siglock);
2482 old_set = current->blocked.sig[0];
2485 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2492 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2494 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2496 sys_rt_sigaction(int sig,
2497 const struct sigaction __user *act,
2498 struct sigaction __user *oact,
2501 struct k_sigaction new_sa, old_sa;
2504 /* XXX: Don't preclude handling different sized sigset_t's. */
2505 if (sigsetsize != sizeof(sigset_t))
2509 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2513 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2516 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2522 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2524 #ifdef __ARCH_WANT_SYS_SGETMASK
2527 * For backwards compatibility. Functionality superseded by sigprocmask.
2533 return current->blocked.sig[0];
2537 sys_ssetmask(int newmask)
2541 spin_lock_irq(¤t->sighand->siglock);
2542 old = current->blocked.sig[0];
2544 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2546 recalc_sigpending();
2547 spin_unlock_irq(¤t->sighand->siglock);
2551 #endif /* __ARCH_WANT_SGETMASK */
2553 #ifdef __ARCH_WANT_SYS_SIGNAL
2555 * For backwards compatibility. Functionality superseded by sigaction.
2557 asmlinkage unsigned long
2558 sys_signal(int sig, __sighandler_t handler)
2560 struct k_sigaction new_sa, old_sa;
2563 new_sa.sa.sa_handler = handler;
2564 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2565 sigemptyset(&new_sa.sa.sa_mask);
2567 ret = do_sigaction(sig, &new_sa, &old_sa);
2569 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2571 #endif /* __ARCH_WANT_SYS_SIGNAL */
2573 #ifdef __ARCH_WANT_SYS_PAUSE
2578 current->state = TASK_INTERRUPTIBLE;
2580 return -ERESTARTNOHAND;
2585 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2586 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2590 /* XXX: Don't preclude handling different sized sigset_t's. */
2591 if (sigsetsize != sizeof(sigset_t))
2594 if (copy_from_user(&newset, unewset, sizeof(newset)))
2596 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2598 spin_lock_irq(¤t->sighand->siglock);
2599 current->saved_sigmask = current->blocked;
2600 current->blocked = newset;
2601 recalc_sigpending();
2602 spin_unlock_irq(¤t->sighand->siglock);
2604 current->state = TASK_INTERRUPTIBLE;
2606 set_restore_sigmask();
2607 return -ERESTARTNOHAND;
2609 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2611 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2616 void __init signals_init(void)
2618 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);