2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/capability.h>
25 #include <linux/freezer.h>
26 #include <linux/pid_namespace.h>
27 #include <linux/nsproxy.h>
29 #include <asm/param.h>
30 #include <asm/uaccess.h>
31 #include <asm/unistd.h>
32 #include <asm/siginfo.h>
33 #include "audit.h" /* audit_signal_info() */
36 * SLAB caches for signal bits.
39 static struct kmem_cache *sigqueue_cachep;
42 static int sig_ignored(struct task_struct *t, int sig)
44 void __user * handler;
47 * Tracers always want to know about signals..
49 if (t->ptrace & PT_PTRACED)
53 * Blocked signals are never ignored, since the
54 * signal handler may change by the time it is
57 if (sigismember(&t->blocked, sig))
60 /* Is it explicitly or implicitly ignored? */
61 handler = t->sighand->action[sig-1].sa.sa_handler;
62 return handler == SIG_IGN ||
63 (handler == SIG_DFL && sig_kernel_ignore(sig));
67 * Re-calculate pending state from the set of locally pending
68 * signals, globally pending signals, and blocked signals.
70 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
75 switch (_NSIG_WORDS) {
77 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
78 ready |= signal->sig[i] &~ blocked->sig[i];
81 case 4: ready = signal->sig[3] &~ blocked->sig[3];
82 ready |= signal->sig[2] &~ blocked->sig[2];
83 ready |= signal->sig[1] &~ blocked->sig[1];
84 ready |= signal->sig[0] &~ blocked->sig[0];
87 case 2: ready = signal->sig[1] &~ blocked->sig[1];
88 ready |= signal->sig[0] &~ blocked->sig[0];
91 case 1: ready = signal->sig[0] &~ blocked->sig[0];
96 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
98 fastcall void recalc_sigpending_tsk(struct task_struct *t)
100 if (t->signal->group_stop_count > 0 ||
102 PENDING(&t->pending, &t->blocked) ||
103 PENDING(&t->signal->shared_pending, &t->blocked))
104 set_tsk_thread_flag(t, TIF_SIGPENDING);
106 clear_tsk_thread_flag(t, TIF_SIGPENDING);
109 void recalc_sigpending(void)
111 recalc_sigpending_tsk(current);
114 /* Given the mask, find the first available signal that should be serviced. */
117 next_signal(struct sigpending *pending, sigset_t *mask)
119 unsigned long i, *s, *m, x;
122 s = pending->signal.sig;
124 switch (_NSIG_WORDS) {
126 for (i = 0; i < _NSIG_WORDS; ++i, ++s, ++m)
127 if ((x = *s &~ *m) != 0) {
128 sig = ffz(~x) + i*_NSIG_BPW + 1;
133 case 2: if ((x = s[0] &~ m[0]) != 0)
135 else if ((x = s[1] &~ m[1]) != 0)
142 case 1: if ((x = *s &~ *m) != 0)
150 static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
153 struct sigqueue *q = NULL;
154 struct user_struct *user;
157 * In order to avoid problems with "switch_user()", we want to make
158 * sure that the compiler doesn't re-load "t->user"
162 atomic_inc(&user->sigpending);
163 if (override_rlimit ||
164 atomic_read(&user->sigpending) <=
165 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
166 q = kmem_cache_alloc(sigqueue_cachep, flags);
167 if (unlikely(q == NULL)) {
168 atomic_dec(&user->sigpending);
170 INIT_LIST_HEAD(&q->list);
172 q->user = get_uid(user);
177 static void __sigqueue_free(struct sigqueue *q)
179 if (q->flags & SIGQUEUE_PREALLOC)
181 atomic_dec(&q->user->sigpending);
183 kmem_cache_free(sigqueue_cachep, q);
186 void flush_sigqueue(struct sigpending *queue)
190 sigemptyset(&queue->signal);
191 while (!list_empty(&queue->list)) {
192 q = list_entry(queue->list.next, struct sigqueue , list);
193 list_del_init(&q->list);
199 * Flush all pending signals for a task.
201 void flush_signals(struct task_struct *t)
205 spin_lock_irqsave(&t->sighand->siglock, flags);
206 clear_tsk_thread_flag(t,TIF_SIGPENDING);
207 flush_sigqueue(&t->pending);
208 flush_sigqueue(&t->signal->shared_pending);
209 spin_unlock_irqrestore(&t->sighand->siglock, flags);
212 void ignore_signals(struct task_struct *t)
216 for (i = 0; i < _NSIG; ++i)
217 t->sighand->action[i].sa.sa_handler = SIG_IGN;
223 * Flush all handlers for a task.
227 flush_signal_handlers(struct task_struct *t, int force_default)
230 struct k_sigaction *ka = &t->sighand->action[0];
231 for (i = _NSIG ; i != 0 ; i--) {
232 if (force_default || ka->sa.sa_handler != SIG_IGN)
233 ka->sa.sa_handler = SIG_DFL;
235 sigemptyset(&ka->sa.sa_mask);
241 /* Notify the system that a driver wants to block all signals for this
242 * process, and wants to be notified if any signals at all were to be
243 * sent/acted upon. If the notifier routine returns non-zero, then the
244 * signal will be acted upon after all. If the notifier routine returns 0,
245 * then then signal will be blocked. Only one block per process is
246 * allowed. priv is a pointer to private data that the notifier routine
247 * can use to determine if the signal should be blocked or not. */
250 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
254 spin_lock_irqsave(¤t->sighand->siglock, flags);
255 current->notifier_mask = mask;
256 current->notifier_data = priv;
257 current->notifier = notifier;
258 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
261 /* Notify the system that blocking has ended. */
264 unblock_all_signals(void)
268 spin_lock_irqsave(¤t->sighand->siglock, flags);
269 current->notifier = NULL;
270 current->notifier_data = NULL;
272 spin_unlock_irqrestore(¤t->sighand->siglock, flags);
275 static int collect_signal(int sig, struct sigpending *list, siginfo_t *info)
277 struct sigqueue *q, *first = NULL;
278 int still_pending = 0;
280 if (unlikely(!sigismember(&list->signal, sig)))
284 * Collect the siginfo appropriate to this signal. Check if
285 * there is another siginfo for the same signal.
287 list_for_each_entry(q, &list->list, list) {
288 if (q->info.si_signo == sig) {
297 list_del_init(&first->list);
298 copy_siginfo(info, &first->info);
299 __sigqueue_free(first);
301 sigdelset(&list->signal, sig);
304 /* Ok, it wasn't in the queue. This must be
305 a fast-pathed signal or we must have been
306 out of queue space. So zero out the info.
308 sigdelset(&list->signal, sig);
309 info->si_signo = sig;
318 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
321 int sig = next_signal(pending, mask);
324 if (current->notifier) {
325 if (sigismember(current->notifier_mask, sig)) {
326 if (!(current->notifier)(current->notifier_data)) {
327 clear_thread_flag(TIF_SIGPENDING);
333 if (!collect_signal(sig, pending, info))
341 * Dequeue a signal and return the element to the caller, which is
342 * expected to free it.
344 * All callers have to hold the siglock.
346 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
348 int signr = __dequeue_signal(&tsk->pending, mask, info);
350 signr = __dequeue_signal(&tsk->signal->shared_pending,
355 * itimers are process shared and we restart periodic
356 * itimers in the signal delivery path to prevent DoS
357 * attacks in the high resolution timer case. This is
358 * compliant with the old way of self restarting
359 * itimers, as the SIGALRM is a legacy signal and only
360 * queued once. Changing the restart behaviour to
361 * restart the timer in the signal dequeue path is
362 * reducing the timer noise on heavy loaded !highres
365 if (unlikely(signr == SIGALRM)) {
366 struct hrtimer *tmr = &tsk->signal->real_timer;
368 if (!hrtimer_is_queued(tmr) &&
369 tsk->signal->it_real_incr.tv64 != 0) {
370 hrtimer_forward(tmr, tmr->base->get_time(),
371 tsk->signal->it_real_incr);
372 hrtimer_restart(tmr);
376 recalc_sigpending_tsk(tsk);
377 if (signr && unlikely(sig_kernel_stop(signr))) {
379 * Set a marker that we have dequeued a stop signal. Our
380 * caller might release the siglock and then the pending
381 * stop signal it is about to process is no longer in the
382 * pending bitmasks, but must still be cleared by a SIGCONT
383 * (and overruled by a SIGKILL). So those cases clear this
384 * shared flag after we've set it. Note that this flag may
385 * remain set after the signal we return is ignored or
386 * handled. That doesn't matter because its only purpose
387 * is to alert stop-signal processing code when another
388 * processor has come along and cleared the flag.
390 if (!(tsk->signal->flags & SIGNAL_GROUP_EXIT))
391 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
394 ((info->si_code & __SI_MASK) == __SI_TIMER) &&
395 info->si_sys_private){
397 * Release the siglock to ensure proper locking order
398 * of timer locks outside of siglocks. Note, we leave
399 * irqs disabled here, since the posix-timers code is
400 * about to disable them again anyway.
402 spin_unlock(&tsk->sighand->siglock);
403 do_schedule_next_timer(info);
404 spin_lock(&tsk->sighand->siglock);
410 * Tell a process that it has a new active signal..
412 * NOTE! we rely on the previous spin_lock to
413 * lock interrupts for us! We can only be called with
414 * "siglock" held, and the local interrupt must
415 * have been disabled when that got acquired!
417 * No need to set need_resched since signal event passing
418 * goes through ->blocked
420 void signal_wake_up(struct task_struct *t, int resume)
424 set_tsk_thread_flag(t, TIF_SIGPENDING);
427 * For SIGKILL, we want to wake it up in the stopped/traced case.
428 * We don't check t->state here because there is a race with it
429 * executing another processor and just now entering stopped state.
430 * By using wake_up_state, we ensure the process will wake up and
431 * handle its death signal.
433 mask = TASK_INTERRUPTIBLE;
435 mask |= TASK_STOPPED | TASK_TRACED;
436 if (!wake_up_state(t, mask))
441 * Remove signals in mask from the pending set and queue.
442 * Returns 1 if any signals were found.
444 * All callers must be holding the siglock.
446 * This version takes a sigset mask and looks at all signals,
447 * not just those in the first mask word.
449 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
451 struct sigqueue *q, *n;
454 sigandsets(&m, mask, &s->signal);
455 if (sigisemptyset(&m))
458 signandsets(&s->signal, &s->signal, mask);
459 list_for_each_entry_safe(q, n, &s->list, list) {
460 if (sigismember(mask, q->info.si_signo)) {
461 list_del_init(&q->list);
468 * Remove signals in mask from the pending set and queue.
469 * Returns 1 if any signals were found.
471 * All callers must be holding the siglock.
473 static int rm_from_queue(unsigned long mask, struct sigpending *s)
475 struct sigqueue *q, *n;
477 if (!sigtestsetmask(&s->signal, mask))
480 sigdelsetmask(&s->signal, mask);
481 list_for_each_entry_safe(q, n, &s->list, list) {
482 if (q->info.si_signo < SIGRTMIN &&
483 (mask & sigmask(q->info.si_signo))) {
484 list_del_init(&q->list);
492 * Bad permissions for sending the signal
494 static int check_kill_permission(int sig, struct siginfo *info,
495 struct task_struct *t)
498 if (!valid_signal(sig))
501 error = audit_signal_info(sig, t); /* Let audit system see the signal */
506 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
507 && ((sig != SIGCONT) ||
508 (process_session(current) != process_session(t)))
509 && (current->euid ^ t->suid) && (current->euid ^ t->uid)
510 && (current->uid ^ t->suid) && (current->uid ^ t->uid)
511 && !capable(CAP_KILL))
514 return security_task_kill(t, info, sig, 0);
518 static void do_notify_parent_cldstop(struct task_struct *tsk, int why);
521 * Handle magic process-wide effects of stop/continue signals.
522 * Unlike the signal actions, these happen immediately at signal-generation
523 * time regardless of blocking, ignoring, or handling. This does the
524 * actual continuing for SIGCONT, but not the actual stopping for stop
525 * signals. The process stop is done as a signal action for SIG_DFL.
527 static void handle_stop_signal(int sig, struct task_struct *p)
529 struct task_struct *t;
531 if (p->signal->flags & SIGNAL_GROUP_EXIT)
533 * The process is in the middle of dying already.
537 if (sig_kernel_stop(sig)) {
539 * This is a stop signal. Remove SIGCONT from all queues.
541 rm_from_queue(sigmask(SIGCONT), &p->signal->shared_pending);
544 rm_from_queue(sigmask(SIGCONT), &t->pending);
547 } else if (sig == SIGCONT) {
549 * Remove all stop signals from all queues,
550 * and wake all threads.
552 if (unlikely(p->signal->group_stop_count > 0)) {
554 * There was a group stop in progress. We'll
555 * pretend it finished before we got here. We are
556 * obliged to report it to the parent: if the
557 * SIGSTOP happened "after" this SIGCONT, then it
558 * would have cleared this pending SIGCONT. If it
559 * happened "before" this SIGCONT, then the parent
560 * got the SIGCHLD about the stop finishing before
561 * the continue happened. We do the notification
562 * now, and it's as if the stop had finished and
563 * the SIGCHLD was pending on entry to this kill.
565 p->signal->group_stop_count = 0;
566 p->signal->flags = SIGNAL_STOP_CONTINUED;
567 spin_unlock(&p->sighand->siglock);
568 do_notify_parent_cldstop(p, CLD_STOPPED);
569 spin_lock(&p->sighand->siglock);
571 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
575 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
578 * If there is a handler for SIGCONT, we must make
579 * sure that no thread returns to user mode before
580 * we post the signal, in case it was the only
581 * thread eligible to run the signal handler--then
582 * it must not do anything between resuming and
583 * running the handler. With the TIF_SIGPENDING
584 * flag set, the thread will pause and acquire the
585 * siglock that we hold now and until we've queued
586 * the pending signal.
588 * Wake up the stopped thread _after_ setting
591 state = TASK_STOPPED;
592 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
593 set_tsk_thread_flag(t, TIF_SIGPENDING);
594 state |= TASK_INTERRUPTIBLE;
596 wake_up_state(t, state);
601 if (p->signal->flags & SIGNAL_STOP_STOPPED) {
603 * We were in fact stopped, and are now continued.
604 * Notify the parent with CLD_CONTINUED.
606 p->signal->flags = SIGNAL_STOP_CONTINUED;
607 p->signal->group_exit_code = 0;
608 spin_unlock(&p->sighand->siglock);
609 do_notify_parent_cldstop(p, CLD_CONTINUED);
610 spin_lock(&p->sighand->siglock);
613 * We are not stopped, but there could be a stop
614 * signal in the middle of being processed after
615 * being removed from the queue. Clear that too.
617 p->signal->flags = 0;
619 } else if (sig == SIGKILL) {
621 * Make sure that any pending stop signal already dequeued
622 * is undone by the wakeup for SIGKILL.
624 p->signal->flags = 0;
628 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
629 struct sigpending *signals)
631 struct sigqueue * q = NULL;
635 * fast-pathed signals for kernel-internal things like SIGSTOP
638 if (info == SEND_SIG_FORCED)
641 /* Real-time signals must be queued if sent by sigqueue, or
642 some other real-time mechanism. It is implementation
643 defined whether kill() does so. We attempt to do so, on
644 the principle of least surprise, but since kill is not
645 allowed to fail with EAGAIN when low on memory we just
646 make sure at least one signal gets delivered and don't
647 pass on the info struct. */
649 q = __sigqueue_alloc(t, GFP_ATOMIC, (sig < SIGRTMIN &&
650 (is_si_special(info) ||
651 info->si_code >= 0)));
653 list_add_tail(&q->list, &signals->list);
654 switch ((unsigned long) info) {
655 case (unsigned long) SEND_SIG_NOINFO:
656 q->info.si_signo = sig;
657 q->info.si_errno = 0;
658 q->info.si_code = SI_USER;
659 q->info.si_pid = current->pid;
660 q->info.si_uid = current->uid;
662 case (unsigned long) SEND_SIG_PRIV:
663 q->info.si_signo = sig;
664 q->info.si_errno = 0;
665 q->info.si_code = SI_KERNEL;
670 copy_siginfo(&q->info, info);
673 } else if (!is_si_special(info)) {
674 if (sig >= SIGRTMIN && info->si_code != SI_USER)
676 * Queue overflow, abort. We may abort if the signal was rt
677 * and sent by user using something other than kill().
683 sigaddset(&signals->signal, sig);
687 #define LEGACY_QUEUE(sigptr, sig) \
688 (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
692 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
696 BUG_ON(!irqs_disabled());
697 assert_spin_locked(&t->sighand->siglock);
699 /* Short-circuit ignored signals. */
700 if (sig_ignored(t, sig))
703 /* Support queueing exactly one non-rt signal, so that we
704 can get more detailed information about the cause of
706 if (LEGACY_QUEUE(&t->pending, sig))
709 ret = send_signal(sig, info, t, &t->pending);
710 if (!ret && !sigismember(&t->blocked, sig))
711 signal_wake_up(t, sig == SIGKILL);
717 * Force a signal that the process can't ignore: if necessary
718 * we unblock the signal and change any SIG_IGN to SIG_DFL.
720 * Note: If we unblock the signal, we always reset it to SIG_DFL,
721 * since we do not want to have a signal handler that was blocked
722 * be invoked when user space had explicitly blocked it.
724 * We don't want to have recursive SIGSEGV's etc, for example.
727 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
729 unsigned long int flags;
730 int ret, blocked, ignored;
731 struct k_sigaction *action;
733 spin_lock_irqsave(&t->sighand->siglock, flags);
734 action = &t->sighand->action[sig-1];
735 ignored = action->sa.sa_handler == SIG_IGN;
736 blocked = sigismember(&t->blocked, sig);
737 if (blocked || ignored) {
738 action->sa.sa_handler = SIG_DFL;
740 sigdelset(&t->blocked, sig);
741 recalc_sigpending_tsk(t);
744 ret = specific_send_sig_info(sig, info, t);
745 spin_unlock_irqrestore(&t->sighand->siglock, flags);
751 force_sig_specific(int sig, struct task_struct *t)
753 force_sig_info(sig, SEND_SIG_FORCED, t);
757 * Test if P wants to take SIG. After we've checked all threads with this,
758 * it's equivalent to finding no threads not blocking SIG. Any threads not
759 * blocking SIG were ruled out because they are not running and already
760 * have pending signals. Such threads will dequeue from the shared queue
761 * as soon as they're available, so putting the signal on the shared queue
762 * will be equivalent to sending it to one such thread.
764 static inline int wants_signal(int sig, struct task_struct *p)
766 if (sigismember(&p->blocked, sig))
768 if (p->flags & PF_EXITING)
772 if (p->state & (TASK_STOPPED | TASK_TRACED))
774 return task_curr(p) || !signal_pending(p);
778 __group_complete_signal(int sig, struct task_struct *p)
780 struct task_struct *t;
783 * Now find a thread we can wake up to take the signal off the queue.
785 * If the main thread wants the signal, it gets first crack.
786 * Probably the least surprising to the average bear.
788 if (wants_signal(sig, p))
790 else if (thread_group_empty(p))
792 * There is just one thread and it does not need to be woken.
793 * It will dequeue unblocked signals before it runs again.
798 * Otherwise try to find a suitable thread.
800 t = p->signal->curr_target;
802 /* restart balancing at this thread */
803 t = p->signal->curr_target = p;
805 while (!wants_signal(sig, t)) {
807 if (t == p->signal->curr_target)
809 * No thread needs to be woken.
810 * Any eligible threads will see
811 * the signal in the queue soon.
815 p->signal->curr_target = t;
819 * Found a killable thread. If the signal will be fatal,
820 * then start taking the whole group down immediately.
822 if (sig_fatal(p, sig) && !(p->signal->flags & SIGNAL_GROUP_EXIT) &&
823 !sigismember(&t->real_blocked, sig) &&
824 (sig == SIGKILL || !(t->ptrace & PT_PTRACED))) {
826 * This signal will be fatal to the whole group.
828 if (!sig_kernel_coredump(sig)) {
830 * Start a group exit and wake everybody up.
831 * This way we don't have other threads
832 * running and doing things after a slower
833 * thread has the fatal signal pending.
835 p->signal->flags = SIGNAL_GROUP_EXIT;
836 p->signal->group_exit_code = sig;
837 p->signal->group_stop_count = 0;
840 sigaddset(&t->pending.signal, SIGKILL);
841 signal_wake_up(t, 1);
848 * There will be a core dump. We make all threads other
849 * than the chosen one go into a group stop so that nothing
850 * happens until it gets scheduled, takes the signal off
851 * the shared queue, and does the core dump. This is a
852 * little more complicated than strictly necessary, but it
853 * keeps the signal state that winds up in the core dump
854 * unchanged from the death state, e.g. which thread had
855 * the core-dump signal unblocked.
857 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
858 rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
859 p->signal->group_stop_count = 0;
860 p->signal->group_exit_task = t;
863 p->signal->group_stop_count++;
864 signal_wake_up(t, 0);
867 wake_up_process(p->signal->group_exit_task);
872 * The signal is already in the shared-pending queue.
873 * Tell the chosen thread to wake up and dequeue it.
875 signal_wake_up(t, sig == SIGKILL);
880 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
884 assert_spin_locked(&p->sighand->siglock);
885 handle_stop_signal(sig, p);
887 /* Short-circuit ignored signals. */
888 if (sig_ignored(p, sig))
891 if (LEGACY_QUEUE(&p->signal->shared_pending, sig))
892 /* This is a non-RT signal and we already have one queued. */
896 * Put this signal on the shared-pending queue, or fail with EAGAIN.
897 * We always use the shared queue for process-wide signals,
898 * to avoid several races.
900 ret = send_signal(sig, info, p, &p->signal->shared_pending);
904 __group_complete_signal(sig, p);
909 * Nuke all other threads in the group.
911 void zap_other_threads(struct task_struct *p)
913 struct task_struct *t;
915 p->signal->flags = SIGNAL_GROUP_EXIT;
916 p->signal->group_stop_count = 0;
918 if (thread_group_empty(p))
921 for (t = next_thread(p); t != p; t = next_thread(t)) {
923 * Don't bother with already dead threads
928 /* SIGKILL will be handled before any pending SIGSTOP */
929 sigaddset(&t->pending.signal, SIGKILL);
930 signal_wake_up(t, 1);
935 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
937 struct sighand_struct *lock_task_sighand(struct task_struct *tsk, unsigned long *flags)
939 struct sighand_struct *sighand;
942 sighand = rcu_dereference(tsk->sighand);
943 if (unlikely(sighand == NULL))
946 spin_lock_irqsave(&sighand->siglock, *flags);
947 if (likely(sighand == tsk->sighand))
949 spin_unlock_irqrestore(&sighand->siglock, *flags);
955 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
960 ret = check_kill_permission(sig, info, p);
964 if (lock_task_sighand(p, &flags)) {
965 ret = __group_send_sig_info(sig, info, p);
966 unlock_task_sighand(p, &flags);
974 * kill_pgrp_info() sends a signal to a process group: this is what the tty
975 * control characters do (^C, ^Z etc)
978 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
980 struct task_struct *p = NULL;
985 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
986 int err = group_send_sig_info(sig, info, p);
989 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
990 return success ? 0 : retval;
993 int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
997 read_lock(&tasklist_lock);
998 retval = __kill_pgrp_info(sig, info, pgrp);
999 read_unlock(&tasklist_lock);
1004 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1007 struct task_struct *p;
1010 if (unlikely(sig_needs_tasklist(sig)))
1011 read_lock(&tasklist_lock);
1013 p = pid_task(pid, PIDTYPE_PID);
1016 error = group_send_sig_info(sig, info, p);
1018 if (unlikely(sig_needs_tasklist(sig)))
1019 read_unlock(&tasklist_lock);
1025 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1029 error = kill_pid_info(sig, info, find_pid(pid));
1034 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1035 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1036 uid_t uid, uid_t euid, u32 secid)
1039 struct task_struct *p;
1041 if (!valid_signal(sig))
1044 read_lock(&tasklist_lock);
1045 p = pid_task(pid, PIDTYPE_PID);
1050 if ((info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info)))
1051 && (euid != p->suid) && (euid != p->uid)
1052 && (uid != p->suid) && (uid != p->uid)) {
1056 ret = security_task_kill(p, info, sig, secid);
1059 if (sig && p->sighand) {
1060 unsigned long flags;
1061 spin_lock_irqsave(&p->sighand->siglock, flags);
1062 ret = __group_send_sig_info(sig, info, p);
1063 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1066 read_unlock(&tasklist_lock);
1069 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1072 * kill_something_info() interprets pid in interesting ways just like kill(2).
1074 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1075 * is probably wrong. Should make it like BSD or SYSV.
1078 static int kill_something_info(int sig, struct siginfo *info, int pid)
1083 ret = kill_pgrp_info(sig, info, task_pgrp(current));
1084 } else if (pid == -1) {
1085 int retval = 0, count = 0;
1086 struct task_struct * p;
1088 read_lock(&tasklist_lock);
1089 for_each_process(p) {
1090 if (p->pid > 1 && p->tgid != current->tgid) {
1091 int err = group_send_sig_info(sig, info, p);
1097 read_unlock(&tasklist_lock);
1098 ret = count ? retval : -ESRCH;
1099 } else if (pid < 0) {
1100 ret = kill_pgrp_info(sig, info, find_pid(-pid));
1102 ret = kill_pid_info(sig, info, find_pid(pid));
1109 * These are for backward compatibility with the rest of the kernel source.
1113 * These two are the most common entry points. They send a signal
1114 * just to the specific thread.
1117 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1120 unsigned long flags;
1123 * Make sure legacy kernel users don't send in bad values
1124 * (normal paths check this in check_kill_permission).
1126 if (!valid_signal(sig))
1130 * We need the tasklist lock even for the specific
1131 * thread case (when we don't need to follow the group
1132 * lists) in order to avoid races with "p->sighand"
1133 * going away or changing from under us.
1135 read_lock(&tasklist_lock);
1136 spin_lock_irqsave(&p->sighand->siglock, flags);
1137 ret = specific_send_sig_info(sig, info, p);
1138 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1139 read_unlock(&tasklist_lock);
1143 #define __si_special(priv) \
1144 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1147 send_sig(int sig, struct task_struct *p, int priv)
1149 return send_sig_info(sig, __si_special(priv), p);
1153 * This is the entry point for "process-wide" signals.
1154 * They will go to an appropriate thread in the thread group.
1157 send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1160 read_lock(&tasklist_lock);
1161 ret = group_send_sig_info(sig, info, p);
1162 read_unlock(&tasklist_lock);
1167 force_sig(int sig, struct task_struct *p)
1169 force_sig_info(sig, SEND_SIG_PRIV, p);
1173 * When things go south during signal handling, we
1174 * will force a SIGSEGV. And if the signal that caused
1175 * the problem was already a SIGSEGV, we'll want to
1176 * make sure we don't even try to deliver the signal..
1179 force_sigsegv(int sig, struct task_struct *p)
1181 if (sig == SIGSEGV) {
1182 unsigned long flags;
1183 spin_lock_irqsave(&p->sighand->siglock, flags);
1184 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1185 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1187 force_sig(SIGSEGV, p);
1191 int kill_pgrp(struct pid *pid, int sig, int priv)
1193 return kill_pgrp_info(sig, __si_special(priv), pid);
1195 EXPORT_SYMBOL(kill_pgrp);
1197 int kill_pid(struct pid *pid, int sig, int priv)
1199 return kill_pid_info(sig, __si_special(priv), pid);
1201 EXPORT_SYMBOL(kill_pid);
1204 kill_proc(pid_t pid, int sig, int priv)
1206 return kill_proc_info(sig, __si_special(priv), pid);
1210 * These functions support sending signals using preallocated sigqueue
1211 * structures. This is needed "because realtime applications cannot
1212 * afford to lose notifications of asynchronous events, like timer
1213 * expirations or I/O completions". In the case of Posix Timers
1214 * we allocate the sigqueue structure from the timer_create. If this
1215 * allocation fails we are able to report the failure to the application
1216 * with an EAGAIN error.
1219 struct sigqueue *sigqueue_alloc(void)
1223 if ((q = __sigqueue_alloc(current, GFP_KERNEL, 0)))
1224 q->flags |= SIGQUEUE_PREALLOC;
1228 void sigqueue_free(struct sigqueue *q)
1230 unsigned long flags;
1231 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1233 * If the signal is still pending remove it from the
1236 if (unlikely(!list_empty(&q->list))) {
1237 spinlock_t *lock = ¤t->sighand->siglock;
1238 read_lock(&tasklist_lock);
1239 spin_lock_irqsave(lock, flags);
1240 if (!list_empty(&q->list))
1241 list_del_init(&q->list);
1242 spin_unlock_irqrestore(lock, flags);
1243 read_unlock(&tasklist_lock);
1245 q->flags &= ~SIGQUEUE_PREALLOC;
1249 int send_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1251 unsigned long flags;
1254 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1257 * The rcu based delayed sighand destroy makes it possible to
1258 * run this without tasklist lock held. The task struct itself
1259 * cannot go away as create_timer did get_task_struct().
1261 * We return -1, when the task is marked exiting, so
1262 * posix_timer_event can redirect it to the group leader
1266 if (!likely(lock_task_sighand(p, &flags))) {
1271 if (unlikely(!list_empty(&q->list))) {
1273 * If an SI_TIMER entry is already queue just increment
1274 * the overrun count.
1276 BUG_ON(q->info.si_code != SI_TIMER);
1277 q->info.si_overrun++;
1280 /* Short-circuit ignored signals. */
1281 if (sig_ignored(p, sig)) {
1286 list_add_tail(&q->list, &p->pending.list);
1287 sigaddset(&p->pending.signal, sig);
1288 if (!sigismember(&p->blocked, sig))
1289 signal_wake_up(p, sig == SIGKILL);
1292 unlock_task_sighand(p, &flags);
1300 send_group_sigqueue(int sig, struct sigqueue *q, struct task_struct *p)
1302 unsigned long flags;
1305 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1307 read_lock(&tasklist_lock);
1308 /* Since it_lock is held, p->sighand cannot be NULL. */
1309 spin_lock_irqsave(&p->sighand->siglock, flags);
1310 handle_stop_signal(sig, p);
1312 /* Short-circuit ignored signals. */
1313 if (sig_ignored(p, sig)) {
1318 if (unlikely(!list_empty(&q->list))) {
1320 * If an SI_TIMER entry is already queue just increment
1321 * the overrun count. Other uses should not try to
1322 * send the signal multiple times.
1324 BUG_ON(q->info.si_code != SI_TIMER);
1325 q->info.si_overrun++;
1330 * Put this signal on the shared-pending queue.
1331 * We always use the shared queue for process-wide signals,
1332 * to avoid several races.
1334 list_add_tail(&q->list, &p->signal->shared_pending.list);
1335 sigaddset(&p->signal->shared_pending.signal, sig);
1337 __group_complete_signal(sig, p);
1339 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1340 read_unlock(&tasklist_lock);
1345 * Wake up any threads in the parent blocked in wait* syscalls.
1347 static inline void __wake_up_parent(struct task_struct *p,
1348 struct task_struct *parent)
1350 wake_up_interruptible_sync(&parent->signal->wait_chldexit);
1354 * Let a parent know about the death of a child.
1355 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1358 void do_notify_parent(struct task_struct *tsk, int sig)
1360 struct siginfo info;
1361 unsigned long flags;
1362 struct sighand_struct *psig;
1366 /* do_notify_parent_cldstop should have been called instead. */
1367 BUG_ON(tsk->state & (TASK_STOPPED|TASK_TRACED));
1369 BUG_ON(!tsk->ptrace &&
1370 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1372 info.si_signo = sig;
1374 info.si_pid = tsk->pid;
1375 info.si_uid = tsk->uid;
1377 /* FIXME: find out whether or not this is supposed to be c*time. */
1378 info.si_utime = cputime_to_jiffies(cputime_add(tsk->utime,
1379 tsk->signal->utime));
1380 info.si_stime = cputime_to_jiffies(cputime_add(tsk->stime,
1381 tsk->signal->stime));
1383 info.si_status = tsk->exit_code & 0x7f;
1384 if (tsk->exit_code & 0x80)
1385 info.si_code = CLD_DUMPED;
1386 else if (tsk->exit_code & 0x7f)
1387 info.si_code = CLD_KILLED;
1389 info.si_code = CLD_EXITED;
1390 info.si_status = tsk->exit_code >> 8;
1393 psig = tsk->parent->sighand;
1394 spin_lock_irqsave(&psig->siglock, flags);
1395 if (!tsk->ptrace && sig == SIGCHLD &&
1396 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1397 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1399 * We are exiting and our parent doesn't care. POSIX.1
1400 * defines special semantics for setting SIGCHLD to SIG_IGN
1401 * or setting the SA_NOCLDWAIT flag: we should be reaped
1402 * automatically and not left for our parent's wait4 call.
1403 * Rather than having the parent do it as a magic kind of
1404 * signal handler, we just set this to tell do_exit that we
1405 * can be cleaned up without becoming a zombie. Note that
1406 * we still call __wake_up_parent in this case, because a
1407 * blocked sys_wait4 might now return -ECHILD.
1409 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1410 * is implementation-defined: we do (if you don't want
1411 * it, just use SIG_IGN instead).
1413 tsk->exit_signal = -1;
1414 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1417 if (valid_signal(sig) && sig > 0)
1418 __group_send_sig_info(sig, &info, tsk->parent);
1419 __wake_up_parent(tsk, tsk->parent);
1420 spin_unlock_irqrestore(&psig->siglock, flags);
1423 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1425 struct siginfo info;
1426 unsigned long flags;
1427 struct task_struct *parent;
1428 struct sighand_struct *sighand;
1430 if (tsk->ptrace & PT_PTRACED)
1431 parent = tsk->parent;
1433 tsk = tsk->group_leader;
1434 parent = tsk->real_parent;
1437 info.si_signo = SIGCHLD;
1439 info.si_pid = tsk->pid;
1440 info.si_uid = tsk->uid;
1442 /* FIXME: find out whether or not this is supposed to be c*time. */
1443 info.si_utime = cputime_to_jiffies(tsk->utime);
1444 info.si_stime = cputime_to_jiffies(tsk->stime);
1449 info.si_status = SIGCONT;
1452 info.si_status = tsk->signal->group_exit_code & 0x7f;
1455 info.si_status = tsk->exit_code & 0x7f;
1461 sighand = parent->sighand;
1462 spin_lock_irqsave(&sighand->siglock, flags);
1463 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1464 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1465 __group_send_sig_info(SIGCHLD, &info, parent);
1467 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1469 __wake_up_parent(tsk, parent);
1470 spin_unlock_irqrestore(&sighand->siglock, flags);
1473 static inline int may_ptrace_stop(void)
1475 if (!likely(current->ptrace & PT_PTRACED))
1478 if (unlikely(current->parent == current->real_parent &&
1479 (current->ptrace & PT_ATTACHED)))
1482 if (unlikely(current->signal == current->parent->signal) &&
1483 unlikely(current->signal->flags & SIGNAL_GROUP_EXIT))
1487 * Are we in the middle of do_coredump?
1488 * If so and our tracer is also part of the coredump stopping
1489 * is a deadlock situation, and pointless because our tracer
1490 * is dead so don't allow us to stop.
1491 * If SIGKILL was already sent before the caller unlocked
1492 * ->siglock we must see ->core_waiters != 0. Otherwise it
1493 * is safe to enter schedule().
1495 if (unlikely(current->mm->core_waiters) &&
1496 unlikely(current->mm == current->parent->mm))
1503 * This must be called with current->sighand->siglock held.
1505 * This should be the path for all ptrace stops.
1506 * We always set current->last_siginfo while stopped here.
1507 * That makes it a way to test a stopped process for
1508 * being ptrace-stopped vs being job-control-stopped.
1510 * If we actually decide not to stop at all because the tracer is gone,
1511 * we leave nostop_code in current->exit_code.
1513 static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1516 * If there is a group stop in progress,
1517 * we must participate in the bookkeeping.
1519 if (current->signal->group_stop_count > 0)
1520 --current->signal->group_stop_count;
1522 current->last_siginfo = info;
1523 current->exit_code = exit_code;
1525 /* Let the debugger run. */
1526 set_current_state(TASK_TRACED);
1527 spin_unlock_irq(¤t->sighand->siglock);
1529 read_lock(&tasklist_lock);
1530 if (may_ptrace_stop()) {
1531 do_notify_parent_cldstop(current, CLD_TRAPPED);
1532 read_unlock(&tasklist_lock);
1536 * By the time we got the lock, our tracer went away.
1539 read_unlock(&tasklist_lock);
1540 set_current_state(TASK_RUNNING);
1541 current->exit_code = nostop_code;
1545 * We are back. Now reacquire the siglock before touching
1546 * last_siginfo, so that we are sure to have synchronized with
1547 * any signal-sending on another CPU that wants to examine it.
1549 spin_lock_irq(¤t->sighand->siglock);
1550 current->last_siginfo = NULL;
1553 * Queued signals ignored us while we were stopped for tracing.
1554 * So check for any that we should take before resuming user mode.
1556 recalc_sigpending();
1559 void ptrace_notify(int exit_code)
1563 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1565 memset(&info, 0, sizeof info);
1566 info.si_signo = SIGTRAP;
1567 info.si_code = exit_code;
1568 info.si_pid = current->pid;
1569 info.si_uid = current->uid;
1571 /* Let the debugger run. */
1572 spin_lock_irq(¤t->sighand->siglock);
1573 ptrace_stop(exit_code, 0, &info);
1574 spin_unlock_irq(¤t->sighand->siglock);
1578 finish_stop(int stop_count)
1581 * If there are no other threads in the group, or if there is
1582 * a group stop in progress and we are the last to stop,
1583 * report to the parent. When ptraced, every thread reports itself.
1585 if (stop_count == 0 || (current->ptrace & PT_PTRACED)) {
1586 read_lock(&tasklist_lock);
1587 do_notify_parent_cldstop(current, CLD_STOPPED);
1588 read_unlock(&tasklist_lock);
1593 } while (try_to_freeze());
1595 * Now we don't run again until continued.
1597 current->exit_code = 0;
1601 * This performs the stopping for SIGSTOP and other stop signals.
1602 * We have to stop all threads in the thread group.
1603 * Returns nonzero if we've actually stopped and released the siglock.
1604 * Returns zero if we didn't stop and still hold the siglock.
1606 static int do_signal_stop(int signr)
1608 struct signal_struct *sig = current->signal;
1611 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
1614 if (sig->group_stop_count > 0) {
1616 * There is a group stop in progress. We don't need to
1617 * start another one.
1619 stop_count = --sig->group_stop_count;
1622 * There is no group stop already in progress.
1623 * We must initiate one now.
1625 struct task_struct *t;
1627 sig->group_exit_code = signr;
1630 for (t = next_thread(current); t != current; t = next_thread(t))
1632 * Setting state to TASK_STOPPED for a group
1633 * stop is always done with the siglock held,
1634 * so this check has no races.
1636 if (!t->exit_state &&
1637 !(t->state & (TASK_STOPPED|TASK_TRACED))) {
1639 signal_wake_up(t, 0);
1641 sig->group_stop_count = stop_count;
1644 if (stop_count == 0)
1645 sig->flags = SIGNAL_STOP_STOPPED;
1646 current->exit_code = sig->group_exit_code;
1647 __set_current_state(TASK_STOPPED);
1649 spin_unlock_irq(¤t->sighand->siglock);
1650 finish_stop(stop_count);
1655 * Do appropriate magic when group_stop_count > 0.
1656 * We return nonzero if we stopped, after releasing the siglock.
1657 * We return zero if we still hold the siglock and should look
1658 * for another signal without checking group_stop_count again.
1660 static int handle_group_stop(void)
1664 if (current->signal->group_exit_task == current) {
1666 * Group stop is so we can do a core dump,
1667 * We are the initiating thread, so get on with it.
1669 current->signal->group_exit_task = NULL;
1673 if (current->signal->flags & SIGNAL_GROUP_EXIT)
1675 * Group stop is so another thread can do a core dump,
1676 * or else we are racing against a death signal.
1677 * Just punt the stop so we can get the next signal.
1682 * There is a group stop in progress. We stop
1683 * without any associated signal being in our queue.
1685 stop_count = --current->signal->group_stop_count;
1686 if (stop_count == 0)
1687 current->signal->flags = SIGNAL_STOP_STOPPED;
1688 current->exit_code = current->signal->group_exit_code;
1689 set_current_state(TASK_STOPPED);
1690 spin_unlock_irq(¤t->sighand->siglock);
1691 finish_stop(stop_count);
1695 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1696 struct pt_regs *regs, void *cookie)
1698 sigset_t *mask = ¤t->blocked;
1704 spin_lock_irq(¤t->sighand->siglock);
1706 struct k_sigaction *ka;
1708 if (unlikely(current->signal->group_stop_count > 0) &&
1709 handle_group_stop())
1712 signr = dequeue_signal(current, mask, info);
1715 break; /* will return 0 */
1717 if ((current->ptrace & PT_PTRACED) && signr != SIGKILL) {
1718 ptrace_signal_deliver(regs, cookie);
1720 /* Let the debugger run. */
1721 ptrace_stop(signr, signr, info);
1723 /* We're back. Did the debugger cancel the sig? */
1724 signr = current->exit_code;
1728 current->exit_code = 0;
1730 /* Update the siginfo structure if the signal has
1731 changed. If the debugger wanted something
1732 specific in the siginfo structure then it should
1733 have updated *info via PTRACE_SETSIGINFO. */
1734 if (signr != info->si_signo) {
1735 info->si_signo = signr;
1737 info->si_code = SI_USER;
1738 info->si_pid = current->parent->pid;
1739 info->si_uid = current->parent->uid;
1742 /* If the (new) signal is now blocked, requeue it. */
1743 if (sigismember(¤t->blocked, signr)) {
1744 specific_send_sig_info(signr, info, current);
1749 ka = ¤t->sighand->action[signr-1];
1750 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1752 if (ka->sa.sa_handler != SIG_DFL) {
1753 /* Run the handler. */
1756 if (ka->sa.sa_flags & SA_ONESHOT)
1757 ka->sa.sa_handler = SIG_DFL;
1759 break; /* will return non-zero "signr" value */
1763 * Now we are doing the default action for this signal.
1765 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1769 * Init of a pid space gets no signals it doesn't want from
1770 * within that pid space. It can of course get signals from
1771 * its parent pid space.
1773 if (current == child_reaper(current))
1776 if (sig_kernel_stop(signr)) {
1778 * The default action is to stop all threads in
1779 * the thread group. The job control signals
1780 * do nothing in an orphaned pgrp, but SIGSTOP
1781 * always works. Note that siglock needs to be
1782 * dropped during the call to is_orphaned_pgrp()
1783 * because of lock ordering with tasklist_lock.
1784 * This allows an intervening SIGCONT to be posted.
1785 * We need to check for that and bail out if necessary.
1787 if (signr != SIGSTOP) {
1788 spin_unlock_irq(¤t->sighand->siglock);
1790 /* signals can be posted during this window */
1792 if (is_current_pgrp_orphaned())
1795 spin_lock_irq(¤t->sighand->siglock);
1798 if (likely(do_signal_stop(signr))) {
1799 /* It released the siglock. */
1804 * We didn't actually stop, due to a race
1805 * with SIGCONT or something like that.
1810 spin_unlock_irq(¤t->sighand->siglock);
1813 * Anything else is fatal, maybe with a core dump.
1815 current->flags |= PF_SIGNALED;
1816 if (sig_kernel_coredump(signr)) {
1818 * If it was able to dump core, this kills all
1819 * other threads in the group and synchronizes with
1820 * their demise. If we lost the race with another
1821 * thread getting here, it set group_exit_code
1822 * first and our do_group_exit call below will use
1823 * that value and ignore the one we pass it.
1825 do_coredump((long)signr, signr, regs);
1829 * Death signals, no core dump.
1831 do_group_exit(signr);
1834 spin_unlock_irq(¤t->sighand->siglock);
1838 EXPORT_SYMBOL(recalc_sigpending);
1839 EXPORT_SYMBOL_GPL(dequeue_signal);
1840 EXPORT_SYMBOL(flush_signals);
1841 EXPORT_SYMBOL(force_sig);
1842 EXPORT_SYMBOL(kill_proc);
1843 EXPORT_SYMBOL(ptrace_notify);
1844 EXPORT_SYMBOL(send_sig);
1845 EXPORT_SYMBOL(send_sig_info);
1846 EXPORT_SYMBOL(sigprocmask);
1847 EXPORT_SYMBOL(block_all_signals);
1848 EXPORT_SYMBOL(unblock_all_signals);
1852 * System call entry points.
1855 asmlinkage long sys_restart_syscall(void)
1857 struct restart_block *restart = ¤t_thread_info()->restart_block;
1858 return restart->fn(restart);
1861 long do_no_restart_syscall(struct restart_block *param)
1867 * We don't need to get the kernel lock - this is all local to this
1868 * particular thread.. (and that's good, because this is _heavily_
1869 * used by various programs)
1873 * This is also useful for kernel threads that want to temporarily
1874 * (or permanently) block certain signals.
1876 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
1877 * interface happily blocks "unblockable" signals like SIGKILL
1880 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
1884 spin_lock_irq(¤t->sighand->siglock);
1886 *oldset = current->blocked;
1891 sigorsets(¤t->blocked, ¤t->blocked, set);
1894 signandsets(¤t->blocked, ¤t->blocked, set);
1897 current->blocked = *set;
1902 recalc_sigpending();
1903 spin_unlock_irq(¤t->sighand->siglock);
1909 sys_rt_sigprocmask(int how, sigset_t __user *set, sigset_t __user *oset, size_t sigsetsize)
1911 int error = -EINVAL;
1912 sigset_t old_set, new_set;
1914 /* XXX: Don't preclude handling different sized sigset_t's. */
1915 if (sigsetsize != sizeof(sigset_t))
1920 if (copy_from_user(&new_set, set, sizeof(*set)))
1922 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
1924 error = sigprocmask(how, &new_set, &old_set);
1930 spin_lock_irq(¤t->sighand->siglock);
1931 old_set = current->blocked;
1932 spin_unlock_irq(¤t->sighand->siglock);
1936 if (copy_to_user(oset, &old_set, sizeof(*oset)))
1944 long do_sigpending(void __user *set, unsigned long sigsetsize)
1946 long error = -EINVAL;
1949 if (sigsetsize > sizeof(sigset_t))
1952 spin_lock_irq(¤t->sighand->siglock);
1953 sigorsets(&pending, ¤t->pending.signal,
1954 ¤t->signal->shared_pending.signal);
1955 spin_unlock_irq(¤t->sighand->siglock);
1957 /* Outside the lock because only this thread touches it. */
1958 sigandsets(&pending, ¤t->blocked, &pending);
1961 if (!copy_to_user(set, &pending, sigsetsize))
1969 sys_rt_sigpending(sigset_t __user *set, size_t sigsetsize)
1971 return do_sigpending(set, sigsetsize);
1974 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
1976 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
1980 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
1982 if (from->si_code < 0)
1983 return __copy_to_user(to, from, sizeof(siginfo_t))
1986 * If you change siginfo_t structure, please be sure
1987 * this code is fixed accordingly.
1988 * It should never copy any pad contained in the structure
1989 * to avoid security leaks, but must copy the generic
1990 * 3 ints plus the relevant union member.
1992 err = __put_user(from->si_signo, &to->si_signo);
1993 err |= __put_user(from->si_errno, &to->si_errno);
1994 err |= __put_user((short)from->si_code, &to->si_code);
1995 switch (from->si_code & __SI_MASK) {
1997 err |= __put_user(from->si_pid, &to->si_pid);
1998 err |= __put_user(from->si_uid, &to->si_uid);
2001 err |= __put_user(from->si_tid, &to->si_tid);
2002 err |= __put_user(from->si_overrun, &to->si_overrun);
2003 err |= __put_user(from->si_ptr, &to->si_ptr);
2006 err |= __put_user(from->si_band, &to->si_band);
2007 err |= __put_user(from->si_fd, &to->si_fd);
2010 err |= __put_user(from->si_addr, &to->si_addr);
2011 #ifdef __ARCH_SI_TRAPNO
2012 err |= __put_user(from->si_trapno, &to->si_trapno);
2016 err |= __put_user(from->si_pid, &to->si_pid);
2017 err |= __put_user(from->si_uid, &to->si_uid);
2018 err |= __put_user(from->si_status, &to->si_status);
2019 err |= __put_user(from->si_utime, &to->si_utime);
2020 err |= __put_user(from->si_stime, &to->si_stime);
2022 case __SI_RT: /* This is not generated by the kernel as of now. */
2023 case __SI_MESGQ: /* But this is */
2024 err |= __put_user(from->si_pid, &to->si_pid);
2025 err |= __put_user(from->si_uid, &to->si_uid);
2026 err |= __put_user(from->si_ptr, &to->si_ptr);
2028 default: /* this is just in case for now ... */
2029 err |= __put_user(from->si_pid, &to->si_pid);
2030 err |= __put_user(from->si_uid, &to->si_uid);
2039 sys_rt_sigtimedwait(const sigset_t __user *uthese,
2040 siginfo_t __user *uinfo,
2041 const struct timespec __user *uts,
2050 /* XXX: Don't preclude handling different sized sigset_t's. */
2051 if (sigsetsize != sizeof(sigset_t))
2054 if (copy_from_user(&these, uthese, sizeof(these)))
2058 * Invert the set of allowed signals to get those we
2061 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2065 if (copy_from_user(&ts, uts, sizeof(ts)))
2067 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2072 spin_lock_irq(¤t->sighand->siglock);
2073 sig = dequeue_signal(current, &these, &info);
2075 timeout = MAX_SCHEDULE_TIMEOUT;
2077 timeout = (timespec_to_jiffies(&ts)
2078 + (ts.tv_sec || ts.tv_nsec));
2081 /* None ready -- temporarily unblock those we're
2082 * interested while we are sleeping in so that we'll
2083 * be awakened when they arrive. */
2084 current->real_blocked = current->blocked;
2085 sigandsets(¤t->blocked, ¤t->blocked, &these);
2086 recalc_sigpending();
2087 spin_unlock_irq(¤t->sighand->siglock);
2089 timeout = schedule_timeout_interruptible(timeout);
2091 spin_lock_irq(¤t->sighand->siglock);
2092 sig = dequeue_signal(current, &these, &info);
2093 current->blocked = current->real_blocked;
2094 siginitset(¤t->real_blocked, 0);
2095 recalc_sigpending();
2098 spin_unlock_irq(¤t->sighand->siglock);
2103 if (copy_siginfo_to_user(uinfo, &info))
2116 sys_kill(int pid, int sig)
2118 struct siginfo info;
2120 info.si_signo = sig;
2122 info.si_code = SI_USER;
2123 info.si_pid = current->tgid;
2124 info.si_uid = current->uid;
2126 return kill_something_info(sig, &info, pid);
2129 static int do_tkill(int tgid, int pid, int sig)
2132 struct siginfo info;
2133 struct task_struct *p;
2136 info.si_signo = sig;
2138 info.si_code = SI_TKILL;
2139 info.si_pid = current->tgid;
2140 info.si_uid = current->uid;
2142 read_lock(&tasklist_lock);
2143 p = find_task_by_pid(pid);
2144 if (p && (tgid <= 0 || p->tgid == tgid)) {
2145 error = check_kill_permission(sig, &info, p);
2147 * The null signal is a permissions and process existence
2148 * probe. No signal is actually delivered.
2150 if (!error && sig && p->sighand) {
2151 spin_lock_irq(&p->sighand->siglock);
2152 handle_stop_signal(sig, p);
2153 error = specific_send_sig_info(sig, &info, p);
2154 spin_unlock_irq(&p->sighand->siglock);
2157 read_unlock(&tasklist_lock);
2163 * sys_tgkill - send signal to one specific thread
2164 * @tgid: the thread group ID of the thread
2165 * @pid: the PID of the thread
2166 * @sig: signal to be sent
2168 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2169 * exists but it's not belonging to the target process anymore. This
2170 * method solves the problem of threads exiting and PIDs getting reused.
2172 asmlinkage long sys_tgkill(int tgid, int pid, int sig)
2174 /* This is only valid for single tasks */
2175 if (pid <= 0 || tgid <= 0)
2178 return do_tkill(tgid, pid, sig);
2182 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2185 sys_tkill(int pid, int sig)
2187 /* This is only valid for single tasks */
2191 return do_tkill(0, pid, sig);
2195 sys_rt_sigqueueinfo(int pid, int sig, siginfo_t __user *uinfo)
2199 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2202 /* Not even root can pretend to send signals from the kernel.
2203 Nor can they impersonate a kill(), which adds source info. */
2204 if (info.si_code >= 0)
2206 info.si_signo = sig;
2208 /* POSIX.1b doesn't mention process groups. */
2209 return kill_proc_info(sig, &info, pid);
2212 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2214 struct k_sigaction *k;
2217 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2220 k = ¤t->sighand->action[sig-1];
2222 spin_lock_irq(¤t->sighand->siglock);
2223 if (signal_pending(current)) {
2225 * If there might be a fatal signal pending on multiple
2226 * threads, make sure we take it before changing the action.
2228 spin_unlock_irq(¤t->sighand->siglock);
2229 return -ERESTARTNOINTR;
2236 sigdelsetmask(&act->sa.sa_mask,
2237 sigmask(SIGKILL) | sigmask(SIGSTOP));
2241 * "Setting a signal action to SIG_IGN for a signal that is
2242 * pending shall cause the pending signal to be discarded,
2243 * whether or not it is blocked."
2245 * "Setting a signal action to SIG_DFL for a signal that is
2246 * pending and whose default action is to ignore the signal
2247 * (for example, SIGCHLD), shall cause the pending signal to
2248 * be discarded, whether or not it is blocked"
2250 if (act->sa.sa_handler == SIG_IGN ||
2251 (act->sa.sa_handler == SIG_DFL && sig_kernel_ignore(sig))) {
2252 struct task_struct *t = current;
2254 sigaddset(&mask, sig);
2255 rm_from_queue_full(&mask, &t->signal->shared_pending);
2257 rm_from_queue_full(&mask, &t->pending);
2258 recalc_sigpending_tsk(t);
2260 } while (t != current);
2264 spin_unlock_irq(¤t->sighand->siglock);
2269 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2275 oss.ss_sp = (void __user *) current->sas_ss_sp;
2276 oss.ss_size = current->sas_ss_size;
2277 oss.ss_flags = sas_ss_flags(sp);
2286 if (!access_ok(VERIFY_READ, uss, sizeof(*uss))
2287 || __get_user(ss_sp, &uss->ss_sp)
2288 || __get_user(ss_flags, &uss->ss_flags)
2289 || __get_user(ss_size, &uss->ss_size))
2293 if (on_sig_stack(sp))
2299 * Note - this code used to test ss_flags incorrectly
2300 * old code may have been written using ss_flags==0
2301 * to mean ss_flags==SS_ONSTACK (as this was the only
2302 * way that worked) - this fix preserves that older
2305 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2308 if (ss_flags == SS_DISABLE) {
2313 if (ss_size < MINSIGSTKSZ)
2317 current->sas_ss_sp = (unsigned long) ss_sp;
2318 current->sas_ss_size = ss_size;
2323 if (copy_to_user(uoss, &oss, sizeof(oss)))
2332 #ifdef __ARCH_WANT_SYS_SIGPENDING
2335 sys_sigpending(old_sigset_t __user *set)
2337 return do_sigpending(set, sizeof(*set));
2342 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2343 /* Some platforms have their own version with special arguments others
2344 support only sys_rt_sigprocmask. */
2347 sys_sigprocmask(int how, old_sigset_t __user *set, old_sigset_t __user *oset)
2350 old_sigset_t old_set, new_set;
2354 if (copy_from_user(&new_set, set, sizeof(*set)))
2356 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2358 spin_lock_irq(¤t->sighand->siglock);
2359 old_set = current->blocked.sig[0];
2367 sigaddsetmask(¤t->blocked, new_set);
2370 sigdelsetmask(¤t->blocked, new_set);
2373 current->blocked.sig[0] = new_set;
2377 recalc_sigpending();
2378 spin_unlock_irq(¤t->sighand->siglock);
2384 old_set = current->blocked.sig[0];
2387 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2394 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2396 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2398 sys_rt_sigaction(int sig,
2399 const struct sigaction __user *act,
2400 struct sigaction __user *oact,
2403 struct k_sigaction new_sa, old_sa;
2406 /* XXX: Don't preclude handling different sized sigset_t's. */
2407 if (sigsetsize != sizeof(sigset_t))
2411 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2415 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2418 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2424 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2426 #ifdef __ARCH_WANT_SYS_SGETMASK
2429 * For backwards compatibility. Functionality superseded by sigprocmask.
2435 return current->blocked.sig[0];
2439 sys_ssetmask(int newmask)
2443 spin_lock_irq(¤t->sighand->siglock);
2444 old = current->blocked.sig[0];
2446 siginitset(¤t->blocked, newmask & ~(sigmask(SIGKILL)|
2448 recalc_sigpending();
2449 spin_unlock_irq(¤t->sighand->siglock);
2453 #endif /* __ARCH_WANT_SGETMASK */
2455 #ifdef __ARCH_WANT_SYS_SIGNAL
2457 * For backwards compatibility. Functionality superseded by sigaction.
2459 asmlinkage unsigned long
2460 sys_signal(int sig, __sighandler_t handler)
2462 struct k_sigaction new_sa, old_sa;
2465 new_sa.sa.sa_handler = handler;
2466 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2467 sigemptyset(&new_sa.sa.sa_mask);
2469 ret = do_sigaction(sig, &new_sa, &old_sa);
2471 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2473 #endif /* __ARCH_WANT_SYS_SIGNAL */
2475 #ifdef __ARCH_WANT_SYS_PAUSE
2480 current->state = TASK_INTERRUPTIBLE;
2482 return -ERESTARTNOHAND;
2487 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2488 asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize)
2492 /* XXX: Don't preclude handling different sized sigset_t's. */
2493 if (sigsetsize != sizeof(sigset_t))
2496 if (copy_from_user(&newset, unewset, sizeof(newset)))
2498 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2500 spin_lock_irq(¤t->sighand->siglock);
2501 current->saved_sigmask = current->blocked;
2502 current->blocked = newset;
2503 recalc_sigpending();
2504 spin_unlock_irq(¤t->sighand->siglock);
2506 current->state = TASK_INTERRUPTIBLE;
2508 set_thread_flag(TIF_RESTORE_SIGMASK);
2509 return -ERESTARTNOHAND;
2511 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2513 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2518 void __init signals_init(void)
2520 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);