2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
23 #include <linux/tty.h>
24 #include <linux/binfmts.h>
25 #include <linux/coredump.h>
26 #include <linux/security.h>
27 #include <linux/syscalls.h>
28 #include <linux/ptrace.h>
29 #include <linux/signal.h>
30 #include <linux/signalfd.h>
31 #include <linux/ratelimit.h>
32 #include <linux/tracehook.h>
33 #include <linux/capability.h>
34 #include <linux/freezer.h>
35 #include <linux/pid_namespace.h>
36 #include <linux/nsproxy.h>
37 #include <linux/user_namespace.h>
38 #include <linux/uprobes.h>
39 #include <linux/compat.h>
40 #include <linux/cn_proc.h>
41 #include <linux/compiler.h>
42 #include <linux/posix-timers.h>
44 #define CREATE_TRACE_POINTS
45 #include <trace/events/signal.h>
47 #include <asm/param.h>
48 #include <linux/uaccess.h>
49 #include <asm/unistd.h>
50 #include <asm/siginfo.h>
51 #include <asm/cacheflush.h>
52 #include "audit.h" /* audit_signal_info() */
55 * SLAB caches for signal bits.
58 static struct kmem_cache *sigqueue_cachep;
60 int print_fatal_signals __read_mostly;
62 static void __user *sig_handler(struct task_struct *t, int sig)
64 return t->sighand->action[sig - 1].sa.sa_handler;
67 static int sig_handler_ignored(void __user *handler, int sig)
69 /* Is it explicitly or implicitly ignored? */
70 return handler == SIG_IGN ||
71 (handler == SIG_DFL && sig_kernel_ignore(sig));
74 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78 handler = sig_handler(t, sig);
80 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
81 handler == SIG_DFL && !force)
84 return sig_handler_ignored(handler, sig);
87 static int sig_ignored(struct task_struct *t, int sig, bool force)
90 * Blocked signals are never ignored, since the
91 * signal handler may change by the time it is
94 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
97 if (!sig_task_ignored(t, sig, force))
101 * Tracers may want to know about even ignored signals.
107 * Re-calculate pending state from the set of locally pending
108 * signals, globally pending signals, and blocked signals.
110 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
115 switch (_NSIG_WORDS) {
117 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
118 ready |= signal->sig[i] &~ blocked->sig[i];
121 case 4: ready = signal->sig[3] &~ blocked->sig[3];
122 ready |= signal->sig[2] &~ blocked->sig[2];
123 ready |= signal->sig[1] &~ blocked->sig[1];
124 ready |= signal->sig[0] &~ blocked->sig[0];
127 case 2: ready = signal->sig[1] &~ blocked->sig[1];
128 ready |= signal->sig[0] &~ blocked->sig[0];
131 case 1: ready = signal->sig[0] &~ blocked->sig[0];
136 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
138 static int recalc_sigpending_tsk(struct task_struct *t)
140 if ((t->jobctl & JOBCTL_PENDING_MASK) ||
141 PENDING(&t->pending, &t->blocked) ||
142 PENDING(&t->signal->shared_pending, &t->blocked)) {
143 set_tsk_thread_flag(t, TIF_SIGPENDING);
147 * We must never clear the flag in another thread, or in current
148 * when it's possible the current syscall is returning -ERESTART*.
149 * So we don't clear it here, and only callers who know they should do.
155 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
156 * This is superfluous when called on current, the wakeup is a harmless no-op.
158 void recalc_sigpending_and_wake(struct task_struct *t)
160 if (recalc_sigpending_tsk(t))
161 signal_wake_up(t, 0);
164 void recalc_sigpending(void)
166 if (!recalc_sigpending_tsk(current) && !freezing(current))
167 clear_thread_flag(TIF_SIGPENDING);
171 /* Given the mask, find the first available signal that should be serviced. */
173 #define SYNCHRONOUS_MASK \
174 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
175 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
177 int next_signal(struct sigpending *pending, sigset_t *mask)
179 unsigned long i, *s, *m, x;
182 s = pending->signal.sig;
186 * Handle the first word specially: it contains the
187 * synchronous signals that need to be dequeued first.
191 if (x & SYNCHRONOUS_MASK)
192 x &= SYNCHRONOUS_MASK;
197 switch (_NSIG_WORDS) {
199 for (i = 1; i < _NSIG_WORDS; ++i) {
203 sig = ffz(~x) + i*_NSIG_BPW + 1;
212 sig = ffz(~x) + _NSIG_BPW + 1;
223 static inline void print_dropped_signal(int sig)
225 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
227 if (!print_fatal_signals)
230 if (!__ratelimit(&ratelimit_state))
233 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
234 current->comm, current->pid, sig);
238 * task_set_jobctl_pending - set jobctl pending bits
240 * @mask: pending bits to set
242 * Clear @mask from @task->jobctl. @mask must be subset of
243 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
244 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
245 * cleared. If @task is already being killed or exiting, this function
249 * Must be called with @task->sighand->siglock held.
252 * %true if @mask is set, %false if made noop because @task was dying.
254 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
256 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
257 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
258 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
260 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
263 if (mask & JOBCTL_STOP_SIGMASK)
264 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
266 task->jobctl |= mask;
271 * task_clear_jobctl_trapping - clear jobctl trapping bit
274 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
275 * Clear it and wake up the ptracer. Note that we don't need any further
276 * locking. @task->siglock guarantees that @task->parent points to the
280 * Must be called with @task->sighand->siglock held.
282 void task_clear_jobctl_trapping(struct task_struct *task)
284 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
285 task->jobctl &= ~JOBCTL_TRAPPING;
286 smp_mb(); /* advised by wake_up_bit() */
287 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
292 * task_clear_jobctl_pending - clear jobctl pending bits
294 * @mask: pending bits to clear
296 * Clear @mask from @task->jobctl. @mask must be subset of
297 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
298 * STOP bits are cleared together.
300 * If clearing of @mask leaves no stop or trap pending, this function calls
301 * task_clear_jobctl_trapping().
304 * Must be called with @task->sighand->siglock held.
306 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
308 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
310 if (mask & JOBCTL_STOP_PENDING)
311 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
313 task->jobctl &= ~mask;
315 if (!(task->jobctl & JOBCTL_PENDING_MASK))
316 task_clear_jobctl_trapping(task);
320 * task_participate_group_stop - participate in a group stop
321 * @task: task participating in a group stop
323 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
324 * Group stop states are cleared and the group stop count is consumed if
325 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
326 * stop, the appropriate %SIGNAL_* flags are set.
329 * Must be called with @task->sighand->siglock held.
332 * %true if group stop completion should be notified to the parent, %false
335 static bool task_participate_group_stop(struct task_struct *task)
337 struct signal_struct *sig = task->signal;
338 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
340 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
342 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
347 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
348 sig->group_stop_count--;
351 * Tell the caller to notify completion iff we are entering into a
352 * fresh group stop. Read comment in do_signal_stop() for details.
354 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
355 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
362 * allocate a new signal queue record
363 * - this may be called without locks if and only if t == current, otherwise an
364 * appropriate lock must be held to stop the target task from exiting
366 static struct sigqueue *
367 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
369 struct sigqueue *q = NULL;
370 struct user_struct *user;
373 * Protect access to @t credentials. This can go away when all
374 * callers hold rcu read lock.
377 user = get_uid(__task_cred(t)->user);
378 atomic_inc(&user->sigpending);
381 if (override_rlimit ||
382 atomic_read(&user->sigpending) <=
383 task_rlimit(t, RLIMIT_SIGPENDING)) {
384 q = kmem_cache_alloc(sigqueue_cachep, flags);
386 print_dropped_signal(sig);
389 if (unlikely(q == NULL)) {
390 atomic_dec(&user->sigpending);
393 INIT_LIST_HEAD(&q->list);
401 static void __sigqueue_free(struct sigqueue *q)
403 if (q->flags & SIGQUEUE_PREALLOC)
405 atomic_dec(&q->user->sigpending);
407 kmem_cache_free(sigqueue_cachep, q);
410 void flush_sigqueue(struct sigpending *queue)
414 sigemptyset(&queue->signal);
415 while (!list_empty(&queue->list)) {
416 q = list_entry(queue->list.next, struct sigqueue , list);
417 list_del_init(&q->list);
423 * Flush all pending signals for this kthread.
425 void flush_signals(struct task_struct *t)
429 spin_lock_irqsave(&t->sighand->siglock, flags);
430 clear_tsk_thread_flag(t, TIF_SIGPENDING);
431 flush_sigqueue(&t->pending);
432 flush_sigqueue(&t->signal->shared_pending);
433 spin_unlock_irqrestore(&t->sighand->siglock, flags);
436 #ifdef CONFIG_POSIX_TIMERS
437 static void __flush_itimer_signals(struct sigpending *pending)
439 sigset_t signal, retain;
440 struct sigqueue *q, *n;
442 signal = pending->signal;
443 sigemptyset(&retain);
445 list_for_each_entry_safe(q, n, &pending->list, list) {
446 int sig = q->info.si_signo;
448 if (likely(q->info.si_code != SI_TIMER)) {
449 sigaddset(&retain, sig);
451 sigdelset(&signal, sig);
452 list_del_init(&q->list);
457 sigorsets(&pending->signal, &signal, &retain);
460 void flush_itimer_signals(void)
462 struct task_struct *tsk = current;
465 spin_lock_irqsave(&tsk->sighand->siglock, flags);
466 __flush_itimer_signals(&tsk->pending);
467 __flush_itimer_signals(&tsk->signal->shared_pending);
468 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
472 void ignore_signals(struct task_struct *t)
476 for (i = 0; i < _NSIG; ++i)
477 t->sighand->action[i].sa.sa_handler = SIG_IGN;
483 * Flush all handlers for a task.
487 flush_signal_handlers(struct task_struct *t, int force_default)
490 struct k_sigaction *ka = &t->sighand->action[0];
491 for (i = _NSIG ; i != 0 ; i--) {
492 if (force_default || ka->sa.sa_handler != SIG_IGN)
493 ka->sa.sa_handler = SIG_DFL;
495 #ifdef __ARCH_HAS_SA_RESTORER
496 ka->sa.sa_restorer = NULL;
498 sigemptyset(&ka->sa.sa_mask);
503 int unhandled_signal(struct task_struct *tsk, int sig)
505 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
506 if (is_global_init(tsk))
508 if (handler != SIG_IGN && handler != SIG_DFL)
510 /* if ptraced, let the tracer determine */
514 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
517 struct sigqueue *q, *first = NULL;
520 * Collect the siginfo appropriate to this signal. Check if
521 * there is another siginfo for the same signal.
523 list_for_each_entry(q, &list->list, list) {
524 if (q->info.si_signo == sig) {
531 sigdelset(&list->signal, sig);
535 list_del_init(&first->list);
536 copy_siginfo(info, &first->info);
539 (first->flags & SIGQUEUE_PREALLOC) &&
540 (info->si_code == SI_TIMER) &&
541 (info->si_sys_private);
543 __sigqueue_free(first);
546 * Ok, it wasn't in the queue. This must be
547 * a fast-pathed signal or we must have been
548 * out of queue space. So zero out the info.
550 info->si_signo = sig;
552 info->si_code = SI_USER;
558 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
559 siginfo_t *info, bool *resched_timer)
561 int sig = next_signal(pending, mask);
564 collect_signal(sig, pending, info, resched_timer);
569 * Dequeue a signal and return the element to the caller, which is
570 * expected to free it.
572 * All callers have to hold the siglock.
574 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
576 bool resched_timer = false;
579 /* We only dequeue private signals from ourselves, we don't let
580 * signalfd steal them
582 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
584 signr = __dequeue_signal(&tsk->signal->shared_pending,
585 mask, info, &resched_timer);
586 #ifdef CONFIG_POSIX_TIMERS
590 * itimers are process shared and we restart periodic
591 * itimers in the signal delivery path to prevent DoS
592 * attacks in the high resolution timer case. This is
593 * compliant with the old way of self-restarting
594 * itimers, as the SIGALRM is a legacy signal and only
595 * queued once. Changing the restart behaviour to
596 * restart the timer in the signal dequeue path is
597 * reducing the timer noise on heavy loaded !highres
600 if (unlikely(signr == SIGALRM)) {
601 struct hrtimer *tmr = &tsk->signal->real_timer;
603 if (!hrtimer_is_queued(tmr) &&
604 tsk->signal->it_real_incr != 0) {
605 hrtimer_forward(tmr, tmr->base->get_time(),
606 tsk->signal->it_real_incr);
607 hrtimer_restart(tmr);
617 if (unlikely(sig_kernel_stop(signr))) {
619 * Set a marker that we have dequeued a stop signal. Our
620 * caller might release the siglock and then the pending
621 * stop signal it is about to process is no longer in the
622 * pending bitmasks, but must still be cleared by a SIGCONT
623 * (and overruled by a SIGKILL). So those cases clear this
624 * shared flag after we've set it. Note that this flag may
625 * remain set after the signal we return is ignored or
626 * handled. That doesn't matter because its only purpose
627 * is to alert stop-signal processing code when another
628 * processor has come along and cleared the flag.
630 current->jobctl |= JOBCTL_STOP_DEQUEUED;
632 #ifdef CONFIG_POSIX_TIMERS
635 * Release the siglock to ensure proper locking order
636 * of timer locks outside of siglocks. Note, we leave
637 * irqs disabled here, since the posix-timers code is
638 * about to disable them again anyway.
640 spin_unlock(&tsk->sighand->siglock);
641 posixtimer_rearm(info);
642 spin_lock(&tsk->sighand->siglock);
649 * Tell a process that it has a new active signal..
651 * NOTE! we rely on the previous spin_lock to
652 * lock interrupts for us! We can only be called with
653 * "siglock" held, and the local interrupt must
654 * have been disabled when that got acquired!
656 * No need to set need_resched since signal event passing
657 * goes through ->blocked
659 void signal_wake_up_state(struct task_struct *t, unsigned int state)
661 set_tsk_thread_flag(t, TIF_SIGPENDING);
663 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
664 * case. We don't check t->state here because there is a race with it
665 * executing another processor and just now entering stopped state.
666 * By using wake_up_state, we ensure the process will wake up and
667 * handle its death signal.
669 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
674 * Remove signals in mask from the pending set and queue.
675 * Returns 1 if any signals were found.
677 * All callers must be holding the siglock.
679 static int flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
681 struct sigqueue *q, *n;
684 sigandsets(&m, mask, &s->signal);
685 if (sigisemptyset(&m))
688 sigandnsets(&s->signal, &s->signal, mask);
689 list_for_each_entry_safe(q, n, &s->list, list) {
690 if (sigismember(mask, q->info.si_signo)) {
691 list_del_init(&q->list);
698 static inline int is_si_special(const struct siginfo *info)
700 return info <= SEND_SIG_FORCED;
703 static inline bool si_fromuser(const struct siginfo *info)
705 return info == SEND_SIG_NOINFO ||
706 (!is_si_special(info) && SI_FROMUSER(info));
710 * called with RCU read lock from check_kill_permission()
712 static int kill_ok_by_cred(struct task_struct *t)
714 const struct cred *cred = current_cred();
715 const struct cred *tcred = __task_cred(t);
717 if (uid_eq(cred->euid, tcred->suid) ||
718 uid_eq(cred->euid, tcred->uid) ||
719 uid_eq(cred->uid, tcred->suid) ||
720 uid_eq(cred->uid, tcred->uid))
723 if (ns_capable(tcred->user_ns, CAP_KILL))
730 * Bad permissions for sending the signal
731 * - the caller must hold the RCU read lock
733 static int check_kill_permission(int sig, struct siginfo *info,
734 struct task_struct *t)
739 if (!valid_signal(sig))
742 if (!si_fromuser(info))
745 error = audit_signal_info(sig, t); /* Let audit system see the signal */
749 if (!same_thread_group(current, t) &&
750 !kill_ok_by_cred(t)) {
753 sid = task_session(t);
755 * We don't return the error if sid == NULL. The
756 * task was unhashed, the caller must notice this.
758 if (!sid || sid == task_session(current))
765 return security_task_kill(t, info, sig, 0);
769 * ptrace_trap_notify - schedule trap to notify ptracer
770 * @t: tracee wanting to notify tracer
772 * This function schedules sticky ptrace trap which is cleared on the next
773 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
776 * If @t is running, STOP trap will be taken. If trapped for STOP and
777 * ptracer is listening for events, tracee is woken up so that it can
778 * re-trap for the new event. If trapped otherwise, STOP trap will be
779 * eventually taken without returning to userland after the existing traps
780 * are finished by PTRACE_CONT.
783 * Must be called with @task->sighand->siglock held.
785 static void ptrace_trap_notify(struct task_struct *t)
787 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
788 assert_spin_locked(&t->sighand->siglock);
790 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
791 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
795 * Handle magic process-wide effects of stop/continue signals. Unlike
796 * the signal actions, these happen immediately at signal-generation
797 * time regardless of blocking, ignoring, or handling. This does the
798 * actual continuing for SIGCONT, but not the actual stopping for stop
799 * signals. The process stop is done as a signal action for SIG_DFL.
801 * Returns true if the signal should be actually delivered, otherwise
802 * it should be dropped.
804 static bool prepare_signal(int sig, struct task_struct *p, bool force)
806 struct signal_struct *signal = p->signal;
807 struct task_struct *t;
810 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
811 if (!(signal->flags & SIGNAL_GROUP_EXIT))
812 return sig == SIGKILL;
814 * The process is in the middle of dying, nothing to do.
816 } else if (sig_kernel_stop(sig)) {
818 * This is a stop signal. Remove SIGCONT from all queues.
820 siginitset(&flush, sigmask(SIGCONT));
821 flush_sigqueue_mask(&flush, &signal->shared_pending);
822 for_each_thread(p, t)
823 flush_sigqueue_mask(&flush, &t->pending);
824 } else if (sig == SIGCONT) {
827 * Remove all stop signals from all queues, wake all threads.
829 siginitset(&flush, SIG_KERNEL_STOP_MASK);
830 flush_sigqueue_mask(&flush, &signal->shared_pending);
831 for_each_thread(p, t) {
832 flush_sigqueue_mask(&flush, &t->pending);
833 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
834 if (likely(!(t->ptrace & PT_SEIZED)))
835 wake_up_state(t, __TASK_STOPPED);
837 ptrace_trap_notify(t);
841 * Notify the parent with CLD_CONTINUED if we were stopped.
843 * If we were in the middle of a group stop, we pretend it
844 * was already finished, and then continued. Since SIGCHLD
845 * doesn't queue we report only CLD_STOPPED, as if the next
846 * CLD_CONTINUED was dropped.
849 if (signal->flags & SIGNAL_STOP_STOPPED)
850 why |= SIGNAL_CLD_CONTINUED;
851 else if (signal->group_stop_count)
852 why |= SIGNAL_CLD_STOPPED;
856 * The first thread which returns from do_signal_stop()
857 * will take ->siglock, notice SIGNAL_CLD_MASK, and
858 * notify its parent. See get_signal_to_deliver().
860 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
861 signal->group_stop_count = 0;
862 signal->group_exit_code = 0;
866 return !sig_ignored(p, sig, force);
870 * Test if P wants to take SIG. After we've checked all threads with this,
871 * it's equivalent to finding no threads not blocking SIG. Any threads not
872 * blocking SIG were ruled out because they are not running and already
873 * have pending signals. Such threads will dequeue from the shared queue
874 * as soon as they're available, so putting the signal on the shared queue
875 * will be equivalent to sending it to one such thread.
877 static inline int wants_signal(int sig, struct task_struct *p)
879 if (sigismember(&p->blocked, sig))
881 if (p->flags & PF_EXITING)
885 if (task_is_stopped_or_traced(p))
887 return task_curr(p) || !signal_pending(p);
890 static void complete_signal(int sig, struct task_struct *p, int group)
892 struct signal_struct *signal = p->signal;
893 struct task_struct *t;
896 * Now find a thread we can wake up to take the signal off the queue.
898 * If the main thread wants the signal, it gets first crack.
899 * Probably the least surprising to the average bear.
901 if (wants_signal(sig, p))
903 else if (!group || thread_group_empty(p))
905 * There is just one thread and it does not need to be woken.
906 * It will dequeue unblocked signals before it runs again.
911 * Otherwise try to find a suitable thread.
913 t = signal->curr_target;
914 while (!wants_signal(sig, t)) {
916 if (t == signal->curr_target)
918 * No thread needs to be woken.
919 * Any eligible threads will see
920 * the signal in the queue soon.
924 signal->curr_target = t;
928 * Found a killable thread. If the signal will be fatal,
929 * then start taking the whole group down immediately.
931 if (sig_fatal(p, sig) &&
932 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
933 !sigismember(&t->real_blocked, sig) &&
934 (sig == SIGKILL || !t->ptrace)) {
936 * This signal will be fatal to the whole group.
938 if (!sig_kernel_coredump(sig)) {
940 * Start a group exit and wake everybody up.
941 * This way we don't have other threads
942 * running and doing things after a slower
943 * thread has the fatal signal pending.
945 signal->flags = SIGNAL_GROUP_EXIT;
946 signal->group_exit_code = sig;
947 signal->group_stop_count = 0;
950 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
951 sigaddset(&t->pending.signal, SIGKILL);
952 signal_wake_up(t, 1);
953 } while_each_thread(p, t);
959 * The signal is already in the shared-pending queue.
960 * Tell the chosen thread to wake up and dequeue it.
962 signal_wake_up(t, sig == SIGKILL);
966 static inline int legacy_queue(struct sigpending *signals, int sig)
968 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
971 #ifdef CONFIG_USER_NS
972 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
974 if (current_user_ns() == task_cred_xxx(t, user_ns))
977 if (SI_FROMKERNEL(info))
981 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
982 make_kuid(current_user_ns(), info->si_uid));
986 static inline void userns_fixup_signal_uid(struct siginfo *info, struct task_struct *t)
992 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
993 int group, int from_ancestor_ns)
995 struct sigpending *pending;
1000 assert_spin_locked(&t->sighand->siglock);
1002 result = TRACE_SIGNAL_IGNORED;
1003 if (!prepare_signal(sig, t,
1004 from_ancestor_ns || (info == SEND_SIG_FORCED)))
1007 pending = group ? &t->signal->shared_pending : &t->pending;
1009 * Short-circuit ignored signals and support queuing
1010 * exactly one non-rt signal, so that we can get more
1011 * detailed information about the cause of the signal.
1013 result = TRACE_SIGNAL_ALREADY_PENDING;
1014 if (legacy_queue(pending, sig))
1017 result = TRACE_SIGNAL_DELIVERED;
1019 * fast-pathed signals for kernel-internal things like SIGSTOP
1022 if (info == SEND_SIG_FORCED)
1026 * Real-time signals must be queued if sent by sigqueue, or
1027 * some other real-time mechanism. It is implementation
1028 * defined whether kill() does so. We attempt to do so, on
1029 * the principle of least surprise, but since kill is not
1030 * allowed to fail with EAGAIN when low on memory we just
1031 * make sure at least one signal gets delivered and don't
1032 * pass on the info struct.
1035 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1037 override_rlimit = 0;
1039 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1041 list_add_tail(&q->list, &pending->list);
1042 switch ((unsigned long) info) {
1043 case (unsigned long) SEND_SIG_NOINFO:
1044 q->info.si_signo = sig;
1045 q->info.si_errno = 0;
1046 q->info.si_code = SI_USER;
1047 q->info.si_pid = task_tgid_nr_ns(current,
1048 task_active_pid_ns(t));
1049 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1051 case (unsigned long) SEND_SIG_PRIV:
1052 q->info.si_signo = sig;
1053 q->info.si_errno = 0;
1054 q->info.si_code = SI_KERNEL;
1059 copy_siginfo(&q->info, info);
1060 if (from_ancestor_ns)
1065 userns_fixup_signal_uid(&q->info, t);
1067 } else if (!is_si_special(info)) {
1068 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1070 * Queue overflow, abort. We may abort if the
1071 * signal was rt and sent by user using something
1072 * other than kill().
1074 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1079 * This is a silent loss of information. We still
1080 * send the signal, but the *info bits are lost.
1082 result = TRACE_SIGNAL_LOSE_INFO;
1087 signalfd_notify(t, sig);
1088 sigaddset(&pending->signal, sig);
1089 complete_signal(sig, t, group);
1091 trace_signal_generate(sig, info, t, group, result);
1095 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1098 int from_ancestor_ns = 0;
1100 #ifdef CONFIG_PID_NS
1101 from_ancestor_ns = si_fromuser(info) &&
1102 !task_pid_nr_ns(current, task_active_pid_ns(t));
1105 return __send_signal(sig, info, t, group, from_ancestor_ns);
1108 static void print_fatal_signal(int signr)
1110 struct pt_regs *regs = signal_pt_regs();
1111 pr_info("potentially unexpected fatal signal %d.\n", signr);
1113 #if defined(__i386__) && !defined(__arch_um__)
1114 pr_info("code at %08lx: ", regs->ip);
1117 for (i = 0; i < 16; i++) {
1120 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1122 pr_cont("%02x ", insn);
1132 static int __init setup_print_fatal_signals(char *str)
1134 get_option (&str, &print_fatal_signals);
1139 __setup("print-fatal-signals=", setup_print_fatal_signals);
1142 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1144 return send_signal(sig, info, p, 1);
1148 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1150 return send_signal(sig, info, t, 0);
1153 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1156 unsigned long flags;
1159 if (lock_task_sighand(p, &flags)) {
1160 ret = send_signal(sig, info, p, group);
1161 unlock_task_sighand(p, &flags);
1168 * Force a signal that the process can't ignore: if necessary
1169 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1171 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1172 * since we do not want to have a signal handler that was blocked
1173 * be invoked when user space had explicitly blocked it.
1175 * We don't want to have recursive SIGSEGV's etc, for example,
1176 * that is why we also clear SIGNAL_UNKILLABLE.
1179 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1181 unsigned long int flags;
1182 int ret, blocked, ignored;
1183 struct k_sigaction *action;
1185 spin_lock_irqsave(&t->sighand->siglock, flags);
1186 action = &t->sighand->action[sig-1];
1187 ignored = action->sa.sa_handler == SIG_IGN;
1188 blocked = sigismember(&t->blocked, sig);
1189 if (blocked || ignored) {
1190 action->sa.sa_handler = SIG_DFL;
1192 sigdelset(&t->blocked, sig);
1193 recalc_sigpending_and_wake(t);
1197 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1198 * debugging to leave init killable.
1200 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1201 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1202 ret = specific_send_sig_info(sig, info, t);
1203 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1209 * Nuke all other threads in the group.
1211 int zap_other_threads(struct task_struct *p)
1213 struct task_struct *t = p;
1216 p->signal->group_stop_count = 0;
1218 while_each_thread(p, t) {
1219 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1222 /* Don't bother with already dead threads */
1225 sigaddset(&t->pending.signal, SIGKILL);
1226 signal_wake_up(t, 1);
1232 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1233 unsigned long *flags)
1235 struct sighand_struct *sighand;
1239 * Disable interrupts early to avoid deadlocks.
1240 * See rcu_read_unlock() comment header for details.
1242 local_irq_save(*flags);
1244 sighand = rcu_dereference(tsk->sighand);
1245 if (unlikely(sighand == NULL)) {
1247 local_irq_restore(*flags);
1251 * This sighand can be already freed and even reused, but
1252 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1253 * initializes ->siglock: this slab can't go away, it has
1254 * the same object type, ->siglock can't be reinitialized.
1256 * We need to ensure that tsk->sighand is still the same
1257 * after we take the lock, we can race with de_thread() or
1258 * __exit_signal(). In the latter case the next iteration
1259 * must see ->sighand == NULL.
1261 spin_lock(&sighand->siglock);
1262 if (likely(sighand == tsk->sighand)) {
1266 spin_unlock(&sighand->siglock);
1268 local_irq_restore(*flags);
1275 * send signal info to all the members of a group
1277 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1282 ret = check_kill_permission(sig, info, p);
1286 ret = do_send_sig_info(sig, info, p, true);
1292 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1293 * control characters do (^C, ^Z etc)
1294 * - the caller must hold at least a readlock on tasklist_lock
1296 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1298 struct task_struct *p = NULL;
1299 int retval, success;
1303 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1304 int err = group_send_sig_info(sig, info, p);
1307 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1308 return success ? 0 : retval;
1311 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1314 struct task_struct *p;
1318 p = pid_task(pid, PIDTYPE_PID);
1320 error = group_send_sig_info(sig, info, p);
1322 if (likely(!p || error != -ESRCH))
1326 * The task was unhashed in between, try again. If it
1327 * is dead, pid_task() will return NULL, if we race with
1328 * de_thread() it will find the new leader.
1333 static int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1337 error = kill_pid_info(sig, info, find_vpid(pid));
1342 static int kill_as_cred_perm(const struct cred *cred,
1343 struct task_struct *target)
1345 const struct cred *pcred = __task_cred(target);
1346 if (!uid_eq(cred->euid, pcred->suid) && !uid_eq(cred->euid, pcred->uid) &&
1347 !uid_eq(cred->uid, pcred->suid) && !uid_eq(cred->uid, pcred->uid))
1352 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1353 int kill_pid_info_as_cred(int sig, struct siginfo *info, struct pid *pid,
1354 const struct cred *cred, u32 secid)
1357 struct task_struct *p;
1358 unsigned long flags;
1360 if (!valid_signal(sig))
1364 p = pid_task(pid, PIDTYPE_PID);
1369 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1373 ret = security_task_kill(p, info, sig, secid);
1378 if (lock_task_sighand(p, &flags)) {
1379 ret = __send_signal(sig, info, p, 1, 0);
1380 unlock_task_sighand(p, &flags);
1388 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1391 * kill_something_info() interprets pid in interesting ways just like kill(2).
1393 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1394 * is probably wrong. Should make it like BSD or SYSV.
1397 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1403 ret = kill_pid_info(sig, info, find_vpid(pid));
1408 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1412 read_lock(&tasklist_lock);
1414 ret = __kill_pgrp_info(sig, info,
1415 pid ? find_vpid(-pid) : task_pgrp(current));
1417 int retval = 0, count = 0;
1418 struct task_struct * p;
1420 for_each_process(p) {
1421 if (task_pid_vnr(p) > 1 &&
1422 !same_thread_group(p, current)) {
1423 int err = group_send_sig_info(sig, info, p);
1429 ret = count ? retval : -ESRCH;
1431 read_unlock(&tasklist_lock);
1437 * These are for backward compatibility with the rest of the kernel source.
1440 int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1443 * Make sure legacy kernel users don't send in bad values
1444 * (normal paths check this in check_kill_permission).
1446 if (!valid_signal(sig))
1449 return do_send_sig_info(sig, info, p, false);
1452 #define __si_special(priv) \
1453 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1456 send_sig(int sig, struct task_struct *p, int priv)
1458 return send_sig_info(sig, __si_special(priv), p);
1462 force_sig(int sig, struct task_struct *p)
1464 force_sig_info(sig, SEND_SIG_PRIV, p);
1468 * When things go south during signal handling, we
1469 * will force a SIGSEGV. And if the signal that caused
1470 * the problem was already a SIGSEGV, we'll want to
1471 * make sure we don't even try to deliver the signal..
1474 force_sigsegv(int sig, struct task_struct *p)
1476 if (sig == SIGSEGV) {
1477 unsigned long flags;
1478 spin_lock_irqsave(&p->sighand->siglock, flags);
1479 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1480 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1482 force_sig(SIGSEGV, p);
1486 int kill_pgrp(struct pid *pid, int sig, int priv)
1490 read_lock(&tasklist_lock);
1491 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1492 read_unlock(&tasklist_lock);
1496 EXPORT_SYMBOL(kill_pgrp);
1498 int kill_pid(struct pid *pid, int sig, int priv)
1500 return kill_pid_info(sig, __si_special(priv), pid);
1502 EXPORT_SYMBOL(kill_pid);
1505 * These functions support sending signals using preallocated sigqueue
1506 * structures. This is needed "because realtime applications cannot
1507 * afford to lose notifications of asynchronous events, like timer
1508 * expirations or I/O completions". In the case of POSIX Timers
1509 * we allocate the sigqueue structure from the timer_create. If this
1510 * allocation fails we are able to report the failure to the application
1511 * with an EAGAIN error.
1513 struct sigqueue *sigqueue_alloc(void)
1515 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1518 q->flags |= SIGQUEUE_PREALLOC;
1523 void sigqueue_free(struct sigqueue *q)
1525 unsigned long flags;
1526 spinlock_t *lock = ¤t->sighand->siglock;
1528 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1530 * We must hold ->siglock while testing q->list
1531 * to serialize with collect_signal() or with
1532 * __exit_signal()->flush_sigqueue().
1534 spin_lock_irqsave(lock, flags);
1535 q->flags &= ~SIGQUEUE_PREALLOC;
1537 * If it is queued it will be freed when dequeued,
1538 * like the "regular" sigqueue.
1540 if (!list_empty(&q->list))
1542 spin_unlock_irqrestore(lock, flags);
1548 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1550 int sig = q->info.si_signo;
1551 struct sigpending *pending;
1552 unsigned long flags;
1555 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1558 if (!likely(lock_task_sighand(t, &flags)))
1561 ret = 1; /* the signal is ignored */
1562 result = TRACE_SIGNAL_IGNORED;
1563 if (!prepare_signal(sig, t, false))
1567 if (unlikely(!list_empty(&q->list))) {
1569 * If an SI_TIMER entry is already queue just increment
1570 * the overrun count.
1572 BUG_ON(q->info.si_code != SI_TIMER);
1573 q->info.si_overrun++;
1574 result = TRACE_SIGNAL_ALREADY_PENDING;
1577 q->info.si_overrun = 0;
1579 signalfd_notify(t, sig);
1580 pending = group ? &t->signal->shared_pending : &t->pending;
1581 list_add_tail(&q->list, &pending->list);
1582 sigaddset(&pending->signal, sig);
1583 complete_signal(sig, t, group);
1584 result = TRACE_SIGNAL_DELIVERED;
1586 trace_signal_generate(sig, &q->info, t, group, result);
1587 unlock_task_sighand(t, &flags);
1593 * Let a parent know about the death of a child.
1594 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1596 * Returns true if our parent ignored us and so we've switched to
1599 bool do_notify_parent(struct task_struct *tsk, int sig)
1601 struct siginfo info;
1602 unsigned long flags;
1603 struct sighand_struct *psig;
1604 bool autoreap = false;
1609 /* do_notify_parent_cldstop should have been called instead. */
1610 BUG_ON(task_is_stopped_or_traced(tsk));
1612 BUG_ON(!tsk->ptrace &&
1613 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1615 if (sig != SIGCHLD) {
1617 * This is only possible if parent == real_parent.
1618 * Check if it has changed security domain.
1620 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1624 info.si_signo = sig;
1627 * We are under tasklist_lock here so our parent is tied to
1628 * us and cannot change.
1630 * task_active_pid_ns will always return the same pid namespace
1631 * until a task passes through release_task.
1633 * write_lock() currently calls preempt_disable() which is the
1634 * same as rcu_read_lock(), but according to Oleg, this is not
1635 * correct to rely on this
1638 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1639 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1643 task_cputime(tsk, &utime, &stime);
1644 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1645 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1647 info.si_status = tsk->exit_code & 0x7f;
1648 if (tsk->exit_code & 0x80)
1649 info.si_code = CLD_DUMPED;
1650 else if (tsk->exit_code & 0x7f)
1651 info.si_code = CLD_KILLED;
1653 info.si_code = CLD_EXITED;
1654 info.si_status = tsk->exit_code >> 8;
1657 psig = tsk->parent->sighand;
1658 spin_lock_irqsave(&psig->siglock, flags);
1659 if (!tsk->ptrace && sig == SIGCHLD &&
1660 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1661 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1663 * We are exiting and our parent doesn't care. POSIX.1
1664 * defines special semantics for setting SIGCHLD to SIG_IGN
1665 * or setting the SA_NOCLDWAIT flag: we should be reaped
1666 * automatically and not left for our parent's wait4 call.
1667 * Rather than having the parent do it as a magic kind of
1668 * signal handler, we just set this to tell do_exit that we
1669 * can be cleaned up without becoming a zombie. Note that
1670 * we still call __wake_up_parent in this case, because a
1671 * blocked sys_wait4 might now return -ECHILD.
1673 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1674 * is implementation-defined: we do (if you don't want
1675 * it, just use SIG_IGN instead).
1678 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1681 if (valid_signal(sig) && sig)
1682 __group_send_sig_info(sig, &info, tsk->parent);
1683 __wake_up_parent(tsk, tsk->parent);
1684 spin_unlock_irqrestore(&psig->siglock, flags);
1690 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1691 * @tsk: task reporting the state change
1692 * @for_ptracer: the notification is for ptracer
1693 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1695 * Notify @tsk's parent that the stopped/continued state has changed. If
1696 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1697 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1700 * Must be called with tasklist_lock at least read locked.
1702 static void do_notify_parent_cldstop(struct task_struct *tsk,
1703 bool for_ptracer, int why)
1705 struct siginfo info;
1706 unsigned long flags;
1707 struct task_struct *parent;
1708 struct sighand_struct *sighand;
1712 parent = tsk->parent;
1714 tsk = tsk->group_leader;
1715 parent = tsk->real_parent;
1718 info.si_signo = SIGCHLD;
1721 * see comment in do_notify_parent() about the following 4 lines
1724 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1725 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1728 task_cputime(tsk, &utime, &stime);
1729 info.si_utime = nsec_to_clock_t(utime);
1730 info.si_stime = nsec_to_clock_t(stime);
1735 info.si_status = SIGCONT;
1738 info.si_status = tsk->signal->group_exit_code & 0x7f;
1741 info.si_status = tsk->exit_code & 0x7f;
1747 sighand = parent->sighand;
1748 spin_lock_irqsave(&sighand->siglock, flags);
1749 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1750 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1751 __group_send_sig_info(SIGCHLD, &info, parent);
1753 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1755 __wake_up_parent(tsk, parent);
1756 spin_unlock_irqrestore(&sighand->siglock, flags);
1759 static inline int may_ptrace_stop(void)
1761 if (!likely(current->ptrace))
1764 * Are we in the middle of do_coredump?
1765 * If so and our tracer is also part of the coredump stopping
1766 * is a deadlock situation, and pointless because our tracer
1767 * is dead so don't allow us to stop.
1768 * If SIGKILL was already sent before the caller unlocked
1769 * ->siglock we must see ->core_state != NULL. Otherwise it
1770 * is safe to enter schedule().
1772 * This is almost outdated, a task with the pending SIGKILL can't
1773 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1774 * after SIGKILL was already dequeued.
1776 if (unlikely(current->mm->core_state) &&
1777 unlikely(current->mm == current->parent->mm))
1784 * Return non-zero if there is a SIGKILL that should be waking us up.
1785 * Called with the siglock held.
1787 static int sigkill_pending(struct task_struct *tsk)
1789 return sigismember(&tsk->pending.signal, SIGKILL) ||
1790 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1794 * This must be called with current->sighand->siglock held.
1796 * This should be the path for all ptrace stops.
1797 * We always set current->last_siginfo while stopped here.
1798 * That makes it a way to test a stopped process for
1799 * being ptrace-stopped vs being job-control-stopped.
1801 * If we actually decide not to stop at all because the tracer
1802 * is gone, we keep current->exit_code unless clear_code.
1804 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1805 __releases(¤t->sighand->siglock)
1806 __acquires(¤t->sighand->siglock)
1808 bool gstop_done = false;
1810 if (arch_ptrace_stop_needed(exit_code, info)) {
1812 * The arch code has something special to do before a
1813 * ptrace stop. This is allowed to block, e.g. for faults
1814 * on user stack pages. We can't keep the siglock while
1815 * calling arch_ptrace_stop, so we must release it now.
1816 * To preserve proper semantics, we must do this before
1817 * any signal bookkeeping like checking group_stop_count.
1818 * Meanwhile, a SIGKILL could come in before we retake the
1819 * siglock. That must prevent us from sleeping in TASK_TRACED.
1820 * So after regaining the lock, we must check for SIGKILL.
1822 spin_unlock_irq(¤t->sighand->siglock);
1823 arch_ptrace_stop(exit_code, info);
1824 spin_lock_irq(¤t->sighand->siglock);
1825 if (sigkill_pending(current))
1830 * We're committing to trapping. TRACED should be visible before
1831 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
1832 * Also, transition to TRACED and updates to ->jobctl should be
1833 * atomic with respect to siglock and should be done after the arch
1834 * hook as siglock is released and regrabbed across it.
1836 set_current_state(TASK_TRACED);
1838 current->last_siginfo = info;
1839 current->exit_code = exit_code;
1842 * If @why is CLD_STOPPED, we're trapping to participate in a group
1843 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
1844 * across siglock relocks since INTERRUPT was scheduled, PENDING
1845 * could be clear now. We act as if SIGCONT is received after
1846 * TASK_TRACED is entered - ignore it.
1848 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
1849 gstop_done = task_participate_group_stop(current);
1851 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
1852 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
1853 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
1854 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
1856 /* entering a trap, clear TRAPPING */
1857 task_clear_jobctl_trapping(current);
1859 spin_unlock_irq(¤t->sighand->siglock);
1860 read_lock(&tasklist_lock);
1861 if (may_ptrace_stop()) {
1863 * Notify parents of the stop.
1865 * While ptraced, there are two parents - the ptracer and
1866 * the real_parent of the group_leader. The ptracer should
1867 * know about every stop while the real parent is only
1868 * interested in the completion of group stop. The states
1869 * for the two don't interact with each other. Notify
1870 * separately unless they're gonna be duplicates.
1872 do_notify_parent_cldstop(current, true, why);
1873 if (gstop_done && ptrace_reparented(current))
1874 do_notify_parent_cldstop(current, false, why);
1877 * Don't want to allow preemption here, because
1878 * sys_ptrace() needs this task to be inactive.
1880 * XXX: implement read_unlock_no_resched().
1883 read_unlock(&tasklist_lock);
1884 preempt_enable_no_resched();
1885 freezable_schedule();
1888 * By the time we got the lock, our tracer went away.
1889 * Don't drop the lock yet, another tracer may come.
1891 * If @gstop_done, the ptracer went away between group stop
1892 * completion and here. During detach, it would have set
1893 * JOBCTL_STOP_PENDING on us and we'll re-enter
1894 * TASK_STOPPED in do_signal_stop() on return, so notifying
1895 * the real parent of the group stop completion is enough.
1898 do_notify_parent_cldstop(current, false, why);
1900 /* tasklist protects us from ptrace_freeze_traced() */
1901 __set_current_state(TASK_RUNNING);
1903 current->exit_code = 0;
1904 read_unlock(&tasklist_lock);
1908 * We are back. Now reacquire the siglock before touching
1909 * last_siginfo, so that we are sure to have synchronized with
1910 * any signal-sending on another CPU that wants to examine it.
1912 spin_lock_irq(¤t->sighand->siglock);
1913 current->last_siginfo = NULL;
1915 /* LISTENING can be set only during STOP traps, clear it */
1916 current->jobctl &= ~JOBCTL_LISTENING;
1919 * Queued signals ignored us while we were stopped for tracing.
1920 * So check for any that we should take before resuming user mode.
1921 * This sets TIF_SIGPENDING, but never clears it.
1923 recalc_sigpending_tsk(current);
1926 static void ptrace_do_notify(int signr, int exit_code, int why)
1930 memset(&info, 0, sizeof info);
1931 info.si_signo = signr;
1932 info.si_code = exit_code;
1933 info.si_pid = task_pid_vnr(current);
1934 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1936 /* Let the debugger run. */
1937 ptrace_stop(exit_code, why, 1, &info);
1940 void ptrace_notify(int exit_code)
1942 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1943 if (unlikely(current->task_works))
1946 spin_lock_irq(¤t->sighand->siglock);
1947 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1948 spin_unlock_irq(¤t->sighand->siglock);
1952 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
1953 * @signr: signr causing group stop if initiating
1955 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
1956 * and participate in it. If already set, participate in the existing
1957 * group stop. If participated in a group stop (and thus slept), %true is
1958 * returned with siglock released.
1960 * If ptraced, this function doesn't handle stop itself. Instead,
1961 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
1962 * untouched. The caller must ensure that INTERRUPT trap handling takes
1963 * places afterwards.
1966 * Must be called with @current->sighand->siglock held, which is released
1970 * %false if group stop is already cancelled or ptrace trap is scheduled.
1971 * %true if participated in group stop.
1973 static bool do_signal_stop(int signr)
1974 __releases(¤t->sighand->siglock)
1976 struct signal_struct *sig = current->signal;
1978 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
1979 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
1980 struct task_struct *t;
1982 /* signr will be recorded in task->jobctl for retries */
1983 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
1985 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
1986 unlikely(signal_group_exit(sig)))
1989 * There is no group stop already in progress. We must
1992 * While ptraced, a task may be resumed while group stop is
1993 * still in effect and then receive a stop signal and
1994 * initiate another group stop. This deviates from the
1995 * usual behavior as two consecutive stop signals can't
1996 * cause two group stops when !ptraced. That is why we
1997 * also check !task_is_stopped(t) below.
1999 * The condition can be distinguished by testing whether
2000 * SIGNAL_STOP_STOPPED is already set. Don't generate
2001 * group_exit_code in such case.
2003 * This is not necessary for SIGNAL_STOP_CONTINUED because
2004 * an intervening stop signal is required to cause two
2005 * continued events regardless of ptrace.
2007 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2008 sig->group_exit_code = signr;
2010 sig->group_stop_count = 0;
2012 if (task_set_jobctl_pending(current, signr | gstop))
2013 sig->group_stop_count++;
2016 while_each_thread(current, t) {
2018 * Setting state to TASK_STOPPED for a group
2019 * stop is always done with the siglock held,
2020 * so this check has no races.
2022 if (!task_is_stopped(t) &&
2023 task_set_jobctl_pending(t, signr | gstop)) {
2024 sig->group_stop_count++;
2025 if (likely(!(t->ptrace & PT_SEIZED)))
2026 signal_wake_up(t, 0);
2028 ptrace_trap_notify(t);
2033 if (likely(!current->ptrace)) {
2037 * If there are no other threads in the group, or if there
2038 * is a group stop in progress and we are the last to stop,
2039 * report to the parent.
2041 if (task_participate_group_stop(current))
2042 notify = CLD_STOPPED;
2044 __set_current_state(TASK_STOPPED);
2045 spin_unlock_irq(¤t->sighand->siglock);
2048 * Notify the parent of the group stop completion. Because
2049 * we're not holding either the siglock or tasklist_lock
2050 * here, ptracer may attach inbetween; however, this is for
2051 * group stop and should always be delivered to the real
2052 * parent of the group leader. The new ptracer will get
2053 * its notification when this task transitions into
2057 read_lock(&tasklist_lock);
2058 do_notify_parent_cldstop(current, false, notify);
2059 read_unlock(&tasklist_lock);
2062 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2063 freezable_schedule();
2067 * While ptraced, group stop is handled by STOP trap.
2068 * Schedule it and let the caller deal with it.
2070 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2076 * do_jobctl_trap - take care of ptrace jobctl traps
2078 * When PT_SEIZED, it's used for both group stop and explicit
2079 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2080 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2081 * the stop signal; otherwise, %SIGTRAP.
2083 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2084 * number as exit_code and no siginfo.
2087 * Must be called with @current->sighand->siglock held, which may be
2088 * released and re-acquired before returning with intervening sleep.
2090 static void do_jobctl_trap(void)
2092 struct signal_struct *signal = current->signal;
2093 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2095 if (current->ptrace & PT_SEIZED) {
2096 if (!signal->group_stop_count &&
2097 !(signal->flags & SIGNAL_STOP_STOPPED))
2099 WARN_ON_ONCE(!signr);
2100 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2103 WARN_ON_ONCE(!signr);
2104 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2105 current->exit_code = 0;
2109 static int ptrace_signal(int signr, siginfo_t *info)
2112 * We do not check sig_kernel_stop(signr) but set this marker
2113 * unconditionally because we do not know whether debugger will
2114 * change signr. This flag has no meaning unless we are going
2115 * to stop after return from ptrace_stop(). In this case it will
2116 * be checked in do_signal_stop(), we should only stop if it was
2117 * not cleared by SIGCONT while we were sleeping. See also the
2118 * comment in dequeue_signal().
2120 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2121 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2123 /* We're back. Did the debugger cancel the sig? */
2124 signr = current->exit_code;
2128 current->exit_code = 0;
2131 * Update the siginfo structure if the signal has
2132 * changed. If the debugger wanted something
2133 * specific in the siginfo structure then it should
2134 * have updated *info via PTRACE_SETSIGINFO.
2136 if (signr != info->si_signo) {
2137 info->si_signo = signr;
2139 info->si_code = SI_USER;
2141 info->si_pid = task_pid_vnr(current->parent);
2142 info->si_uid = from_kuid_munged(current_user_ns(),
2143 task_uid(current->parent));
2147 /* If the (new) signal is now blocked, requeue it. */
2148 if (sigismember(¤t->blocked, signr)) {
2149 specific_send_sig_info(signr, info, current);
2156 int get_signal(struct ksignal *ksig)
2158 struct sighand_struct *sighand = current->sighand;
2159 struct signal_struct *signal = current->signal;
2162 if (unlikely(current->task_works))
2165 if (unlikely(uprobe_deny_signal()))
2169 * Do this once, we can't return to user-mode if freezing() == T.
2170 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2171 * thus do not need another check after return.
2176 spin_lock_irq(&sighand->siglock);
2178 * Every stopped thread goes here after wakeup. Check to see if
2179 * we should notify the parent, prepare_signal(SIGCONT) encodes
2180 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2182 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2185 if (signal->flags & SIGNAL_CLD_CONTINUED)
2186 why = CLD_CONTINUED;
2190 signal->flags &= ~SIGNAL_CLD_MASK;
2192 spin_unlock_irq(&sighand->siglock);
2195 * Notify the parent that we're continuing. This event is
2196 * always per-process and doesn't make whole lot of sense
2197 * for ptracers, who shouldn't consume the state via
2198 * wait(2) either, but, for backward compatibility, notify
2199 * the ptracer of the group leader too unless it's gonna be
2202 read_lock(&tasklist_lock);
2203 do_notify_parent_cldstop(current, false, why);
2205 if (ptrace_reparented(current->group_leader))
2206 do_notify_parent_cldstop(current->group_leader,
2208 read_unlock(&tasklist_lock);
2214 struct k_sigaction *ka;
2216 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2220 if (unlikely(current->jobctl & JOBCTL_TRAP_MASK)) {
2222 spin_unlock_irq(&sighand->siglock);
2226 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2229 break; /* will return 0 */
2231 if (unlikely(current->ptrace) && signr != SIGKILL) {
2232 signr = ptrace_signal(signr, &ksig->info);
2237 ka = &sighand->action[signr-1];
2239 /* Trace actually delivered signals. */
2240 trace_signal_deliver(signr, &ksig->info, ka);
2242 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2244 if (ka->sa.sa_handler != SIG_DFL) {
2245 /* Run the handler. */
2248 if (ka->sa.sa_flags & SA_ONESHOT)
2249 ka->sa.sa_handler = SIG_DFL;
2251 break; /* will return non-zero "signr" value */
2255 * Now we are doing the default action for this signal.
2257 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2261 * Global init gets no signals it doesn't want.
2262 * Container-init gets no signals it doesn't want from same
2265 * Note that if global/container-init sees a sig_kernel_only()
2266 * signal here, the signal must have been generated internally
2267 * or must have come from an ancestor namespace. In either
2268 * case, the signal cannot be dropped.
2270 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2271 !sig_kernel_only(signr))
2274 if (sig_kernel_stop(signr)) {
2276 * The default action is to stop all threads in
2277 * the thread group. The job control signals
2278 * do nothing in an orphaned pgrp, but SIGSTOP
2279 * always works. Note that siglock needs to be
2280 * dropped during the call to is_orphaned_pgrp()
2281 * because of lock ordering with tasklist_lock.
2282 * This allows an intervening SIGCONT to be posted.
2283 * We need to check for that and bail out if necessary.
2285 if (signr != SIGSTOP) {
2286 spin_unlock_irq(&sighand->siglock);
2288 /* signals can be posted during this window */
2290 if (is_current_pgrp_orphaned())
2293 spin_lock_irq(&sighand->siglock);
2296 if (likely(do_signal_stop(ksig->info.si_signo))) {
2297 /* It released the siglock. */
2302 * We didn't actually stop, due to a race
2303 * with SIGCONT or something like that.
2308 spin_unlock_irq(&sighand->siglock);
2311 * Anything else is fatal, maybe with a core dump.
2313 current->flags |= PF_SIGNALED;
2315 if (sig_kernel_coredump(signr)) {
2316 if (print_fatal_signals)
2317 print_fatal_signal(ksig->info.si_signo);
2318 proc_coredump_connector(current);
2320 * If it was able to dump core, this kills all
2321 * other threads in the group and synchronizes with
2322 * their demise. If we lost the race with another
2323 * thread getting here, it set group_exit_code
2324 * first and our do_group_exit call below will use
2325 * that value and ignore the one we pass it.
2327 do_coredump(&ksig->info);
2331 * Death signals, no core dump.
2333 do_group_exit(ksig->info.si_signo);
2336 spin_unlock_irq(&sighand->siglock);
2339 return ksig->sig > 0;
2343 * signal_delivered -
2344 * @ksig: kernel signal struct
2345 * @stepping: nonzero if debugger single-step or block-step in use
2347 * This function should be called when a signal has successfully been
2348 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2349 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2350 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2352 static void signal_delivered(struct ksignal *ksig, int stepping)
2356 /* A signal was successfully delivered, and the
2357 saved sigmask was stored on the signal frame,
2358 and will be restored by sigreturn. So we can
2359 simply clear the restore sigmask flag. */
2360 clear_restore_sigmask();
2362 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2363 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2364 sigaddset(&blocked, ksig->sig);
2365 set_current_blocked(&blocked);
2366 tracehook_signal_handler(stepping);
2369 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2372 force_sigsegv(ksig->sig, current);
2374 signal_delivered(ksig, stepping);
2378 * It could be that complete_signal() picked us to notify about the
2379 * group-wide signal. Other threads should be notified now to take
2380 * the shared signals in @which since we will not.
2382 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2385 struct task_struct *t;
2387 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2388 if (sigisemptyset(&retarget))
2392 while_each_thread(tsk, t) {
2393 if (t->flags & PF_EXITING)
2396 if (!has_pending_signals(&retarget, &t->blocked))
2398 /* Remove the signals this thread can handle. */
2399 sigandsets(&retarget, &retarget, &t->blocked);
2401 if (!signal_pending(t))
2402 signal_wake_up(t, 0);
2404 if (sigisemptyset(&retarget))
2409 void exit_signals(struct task_struct *tsk)
2415 * @tsk is about to have PF_EXITING set - lock out users which
2416 * expect stable threadgroup.
2418 cgroup_threadgroup_change_begin(tsk);
2420 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2421 tsk->flags |= PF_EXITING;
2422 cgroup_threadgroup_change_end(tsk);
2426 spin_lock_irq(&tsk->sighand->siglock);
2428 * From now this task is not visible for group-wide signals,
2429 * see wants_signal(), do_signal_stop().
2431 tsk->flags |= PF_EXITING;
2433 cgroup_threadgroup_change_end(tsk);
2435 if (!signal_pending(tsk))
2438 unblocked = tsk->blocked;
2439 signotset(&unblocked);
2440 retarget_shared_pending(tsk, &unblocked);
2442 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2443 task_participate_group_stop(tsk))
2444 group_stop = CLD_STOPPED;
2446 spin_unlock_irq(&tsk->sighand->siglock);
2449 * If group stop has completed, deliver the notification. This
2450 * should always go to the real parent of the group leader.
2452 if (unlikely(group_stop)) {
2453 read_lock(&tasklist_lock);
2454 do_notify_parent_cldstop(tsk, false, group_stop);
2455 read_unlock(&tasklist_lock);
2459 EXPORT_SYMBOL(recalc_sigpending);
2460 EXPORT_SYMBOL_GPL(dequeue_signal);
2461 EXPORT_SYMBOL(flush_signals);
2462 EXPORT_SYMBOL(force_sig);
2463 EXPORT_SYMBOL(send_sig);
2464 EXPORT_SYMBOL(send_sig_info);
2465 EXPORT_SYMBOL(sigprocmask);
2468 * System call entry points.
2472 * sys_restart_syscall - restart a system call
2474 SYSCALL_DEFINE0(restart_syscall)
2476 struct restart_block *restart = ¤t->restart_block;
2477 return restart->fn(restart);
2480 long do_no_restart_syscall(struct restart_block *param)
2485 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2487 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2488 sigset_t newblocked;
2489 /* A set of now blocked but previously unblocked signals. */
2490 sigandnsets(&newblocked, newset, ¤t->blocked);
2491 retarget_shared_pending(tsk, &newblocked);
2493 tsk->blocked = *newset;
2494 recalc_sigpending();
2498 * set_current_blocked - change current->blocked mask
2501 * It is wrong to change ->blocked directly, this helper should be used
2502 * to ensure the process can't miss a shared signal we are going to block.
2504 void set_current_blocked(sigset_t *newset)
2506 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2507 __set_current_blocked(newset);
2510 void __set_current_blocked(const sigset_t *newset)
2512 struct task_struct *tsk = current;
2515 * In case the signal mask hasn't changed, there is nothing we need
2516 * to do. The current->blocked shouldn't be modified by other task.
2518 if (sigequalsets(&tsk->blocked, newset))
2521 spin_lock_irq(&tsk->sighand->siglock);
2522 __set_task_blocked(tsk, newset);
2523 spin_unlock_irq(&tsk->sighand->siglock);
2527 * This is also useful for kernel threads that want to temporarily
2528 * (or permanently) block certain signals.
2530 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2531 * interface happily blocks "unblockable" signals like SIGKILL
2534 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2536 struct task_struct *tsk = current;
2539 /* Lockless, only current can change ->blocked, never from irq */
2541 *oldset = tsk->blocked;
2545 sigorsets(&newset, &tsk->blocked, set);
2548 sigandnsets(&newset, &tsk->blocked, set);
2557 __set_current_blocked(&newset);
2562 * sys_rt_sigprocmask - change the list of currently blocked signals
2563 * @how: whether to add, remove, or set signals
2564 * @nset: stores pending signals
2565 * @oset: previous value of signal mask if non-null
2566 * @sigsetsize: size of sigset_t type
2568 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2569 sigset_t __user *, oset, size_t, sigsetsize)
2571 sigset_t old_set, new_set;
2574 /* XXX: Don't preclude handling different sized sigset_t's. */
2575 if (sigsetsize != sizeof(sigset_t))
2578 old_set = current->blocked;
2581 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2583 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2585 error = sigprocmask(how, &new_set, NULL);
2591 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2598 #ifdef CONFIG_COMPAT
2599 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2600 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2602 sigset_t old_set = current->blocked;
2604 /* XXX: Don't preclude handling different sized sigset_t's. */
2605 if (sigsetsize != sizeof(sigset_t))
2611 if (get_compat_sigset(&new_set, nset))
2613 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2615 error = sigprocmask(how, &new_set, NULL);
2619 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2623 static int do_sigpending(sigset_t *set)
2625 spin_lock_irq(¤t->sighand->siglock);
2626 sigorsets(set, ¤t->pending.signal,
2627 ¤t->signal->shared_pending.signal);
2628 spin_unlock_irq(¤t->sighand->siglock);
2630 /* Outside the lock because only this thread touches it. */
2631 sigandsets(set, ¤t->blocked, set);
2636 * sys_rt_sigpending - examine a pending signal that has been raised
2638 * @uset: stores pending signals
2639 * @sigsetsize: size of sigset_t type or larger
2641 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
2646 if (sigsetsize > sizeof(*uset))
2649 err = do_sigpending(&set);
2650 if (!err && copy_to_user(uset, &set, sigsetsize))
2655 #ifdef CONFIG_COMPAT
2656 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
2657 compat_size_t, sigsetsize)
2662 if (sigsetsize > sizeof(*uset))
2665 err = do_sigpending(&set);
2667 err = put_compat_sigset(uset, &set, sigsetsize);
2672 enum siginfo_layout siginfo_layout(int sig, int si_code)
2674 enum siginfo_layout layout = SIL_KILL;
2675 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
2676 static const struct {
2677 unsigned char limit, layout;
2679 [SIGILL] = { NSIGILL, SIL_FAULT },
2680 [SIGFPE] = { NSIGFPE, SIL_FAULT },
2681 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
2682 [SIGBUS] = { NSIGBUS, SIL_FAULT },
2683 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
2684 #if defined(SIGEMT) && defined(NSIGEMT)
2685 [SIGEMT] = { NSIGEMT, SIL_FAULT },
2687 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
2688 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
2689 #ifdef __ARCH_SIGSYS
2690 [SIGSYS] = { NSIGSYS, SIL_SYS },
2693 if ((sig < ARRAY_SIZE(filter)) && (si_code <= filter[sig].limit))
2694 layout = filter[sig].layout;
2695 else if (si_code <= NSIGPOLL)
2698 if (si_code == SI_TIMER)
2700 else if (si_code == SI_SIGIO)
2702 else if (si_code < 0)
2704 /* Tests to support buggy kernel ABIs */
2706 if ((sig == SIGTRAP) && (si_code == TRAP_FIXME))
2710 if ((sig == SIGFPE) && (si_code == FPE_FIXME))
2717 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2719 int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2723 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2725 if (from->si_code < 0)
2726 return __copy_to_user(to, from, sizeof(siginfo_t))
2729 * If you change siginfo_t structure, please be sure
2730 * this code is fixed accordingly.
2731 * Please remember to update the signalfd_copyinfo() function
2732 * inside fs/signalfd.c too, in case siginfo_t changes.
2733 * It should never copy any pad contained in the structure
2734 * to avoid security leaks, but must copy the generic
2735 * 3 ints plus the relevant union member.
2737 err = __put_user(from->si_signo, &to->si_signo);
2738 err |= __put_user(from->si_errno, &to->si_errno);
2739 err |= __put_user(from->si_code, &to->si_code);
2740 switch (siginfo_layout(from->si_signo, from->si_code)) {
2742 err |= __put_user(from->si_pid, &to->si_pid);
2743 err |= __put_user(from->si_uid, &to->si_uid);
2746 /* Unreached SI_TIMER is negative */
2749 err |= __put_user(from->si_band, &to->si_band);
2750 err |= __put_user(from->si_fd, &to->si_fd);
2753 err |= __put_user(from->si_addr, &to->si_addr);
2754 #ifdef __ARCH_SI_TRAPNO
2755 err |= __put_user(from->si_trapno, &to->si_trapno);
2757 #ifdef BUS_MCEERR_AO
2759 * Other callers might not initialize the si_lsb field,
2760 * so check explicitly for the right codes here.
2762 if (from->si_signo == SIGBUS &&
2763 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2764 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2767 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2768 err |= __put_user(from->si_lower, &to->si_lower);
2769 err |= __put_user(from->si_upper, &to->si_upper);
2773 if (from->si_signo == SIGSEGV && from->si_code == SEGV_PKUERR)
2774 err |= __put_user(from->si_pkey, &to->si_pkey);
2778 err |= __put_user(from->si_pid, &to->si_pid);
2779 err |= __put_user(from->si_uid, &to->si_uid);
2780 err |= __put_user(from->si_status, &to->si_status);
2781 err |= __put_user(from->si_utime, &to->si_utime);
2782 err |= __put_user(from->si_stime, &to->si_stime);
2785 err |= __put_user(from->si_pid, &to->si_pid);
2786 err |= __put_user(from->si_uid, &to->si_uid);
2787 err |= __put_user(from->si_ptr, &to->si_ptr);
2789 #ifdef __ARCH_SIGSYS
2791 err |= __put_user(from->si_call_addr, &to->si_call_addr);
2792 err |= __put_user(from->si_syscall, &to->si_syscall);
2793 err |= __put_user(from->si_arch, &to->si_arch);
2803 * do_sigtimedwait - wait for queued signals specified in @which
2804 * @which: queued signals to wait for
2805 * @info: if non-null, the signal's siginfo is returned here
2806 * @ts: upper bound on process time suspension
2808 static int do_sigtimedwait(const sigset_t *which, siginfo_t *info,
2809 const struct timespec *ts)
2811 ktime_t *to = NULL, timeout = KTIME_MAX;
2812 struct task_struct *tsk = current;
2813 sigset_t mask = *which;
2817 if (!timespec_valid(ts))
2819 timeout = timespec_to_ktime(*ts);
2824 * Invert the set of allowed signals to get those we want to block.
2826 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2829 spin_lock_irq(&tsk->sighand->siglock);
2830 sig = dequeue_signal(tsk, &mask, info);
2831 if (!sig && timeout) {
2833 * None ready, temporarily unblock those we're interested
2834 * while we are sleeping in so that we'll be awakened when
2835 * they arrive. Unblocking is always fine, we can avoid
2836 * set_current_blocked().
2838 tsk->real_blocked = tsk->blocked;
2839 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
2840 recalc_sigpending();
2841 spin_unlock_irq(&tsk->sighand->siglock);
2843 __set_current_state(TASK_INTERRUPTIBLE);
2844 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
2846 spin_lock_irq(&tsk->sighand->siglock);
2847 __set_task_blocked(tsk, &tsk->real_blocked);
2848 sigemptyset(&tsk->real_blocked);
2849 sig = dequeue_signal(tsk, &mask, info);
2851 spin_unlock_irq(&tsk->sighand->siglock);
2855 return ret ? -EINTR : -EAGAIN;
2859 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2861 * @uthese: queued signals to wait for
2862 * @uinfo: if non-null, the signal's siginfo is returned here
2863 * @uts: upper bound on process time suspension
2864 * @sigsetsize: size of sigset_t type
2866 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2867 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2875 /* XXX: Don't preclude handling different sized sigset_t's. */
2876 if (sigsetsize != sizeof(sigset_t))
2879 if (copy_from_user(&these, uthese, sizeof(these)))
2883 if (copy_from_user(&ts, uts, sizeof(ts)))
2887 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
2889 if (ret > 0 && uinfo) {
2890 if (copy_siginfo_to_user(uinfo, &info))
2897 #ifdef CONFIG_COMPAT
2898 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait, compat_sigset_t __user *, uthese,
2899 struct compat_siginfo __user *, uinfo,
2900 struct compat_timespec __user *, uts, compat_size_t, sigsetsize)
2907 if (sigsetsize != sizeof(sigset_t))
2910 if (get_compat_sigset(&s, uthese))
2914 if (compat_get_timespec(&t, uts))
2918 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
2920 if (ret > 0 && uinfo) {
2921 if (copy_siginfo_to_user32(uinfo, &info))
2930 * sys_kill - send a signal to a process
2931 * @pid: the PID of the process
2932 * @sig: signal to be sent
2934 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2936 struct siginfo info;
2938 info.si_signo = sig;
2940 info.si_code = SI_USER;
2941 info.si_pid = task_tgid_vnr(current);
2942 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2944 return kill_something_info(sig, &info, pid);
2948 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2950 struct task_struct *p;
2954 p = find_task_by_vpid(pid);
2955 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2956 error = check_kill_permission(sig, info, p);
2958 * The null signal is a permissions and process existence
2959 * probe. No signal is actually delivered.
2961 if (!error && sig) {
2962 error = do_send_sig_info(sig, info, p, false);
2964 * If lock_task_sighand() failed we pretend the task
2965 * dies after receiving the signal. The window is tiny,
2966 * and the signal is private anyway.
2968 if (unlikely(error == -ESRCH))
2977 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2979 struct siginfo info = {};
2981 info.si_signo = sig;
2983 info.si_code = SI_TKILL;
2984 info.si_pid = task_tgid_vnr(current);
2985 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2987 return do_send_specific(tgid, pid, sig, &info);
2991 * sys_tgkill - send signal to one specific thread
2992 * @tgid: the thread group ID of the thread
2993 * @pid: the PID of the thread
2994 * @sig: signal to be sent
2996 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2997 * exists but it's not belonging to the target process anymore. This
2998 * method solves the problem of threads exiting and PIDs getting reused.
3000 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3002 /* This is only valid for single tasks */
3003 if (pid <= 0 || tgid <= 0)
3006 return do_tkill(tgid, pid, sig);
3010 * sys_tkill - send signal to one specific task
3011 * @pid: the PID of the task
3012 * @sig: signal to be sent
3014 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3016 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3018 /* This is only valid for single tasks */
3022 return do_tkill(0, pid, sig);
3025 static int do_rt_sigqueueinfo(pid_t pid, int sig, siginfo_t *info)
3027 /* Not even root can pretend to send signals from the kernel.
3028 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3030 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3031 (task_pid_vnr(current) != pid))
3034 info->si_signo = sig;
3036 /* POSIX.1b doesn't mention process groups. */
3037 return kill_proc_info(sig, info, pid);
3041 * sys_rt_sigqueueinfo - send signal information to a signal
3042 * @pid: the PID of the thread
3043 * @sig: signal to be sent
3044 * @uinfo: signal info to be sent
3046 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3047 siginfo_t __user *, uinfo)
3050 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3052 return do_rt_sigqueueinfo(pid, sig, &info);
3055 #ifdef CONFIG_COMPAT
3056 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3059 struct compat_siginfo __user *, uinfo)
3061 siginfo_t info = {};
3062 int ret = copy_siginfo_from_user32(&info, uinfo);
3065 return do_rt_sigqueueinfo(pid, sig, &info);
3069 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
3071 /* This is only valid for single tasks */
3072 if (pid <= 0 || tgid <= 0)
3075 /* Not even root can pretend to send signals from the kernel.
3076 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3078 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3079 (task_pid_vnr(current) != pid))
3082 info->si_signo = sig;
3084 return do_send_specific(tgid, pid, sig, info);
3087 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3088 siginfo_t __user *, uinfo)
3092 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
3095 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3098 #ifdef CONFIG_COMPAT
3099 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3103 struct compat_siginfo __user *, uinfo)
3105 siginfo_t info = {};
3107 if (copy_siginfo_from_user32(&info, uinfo))
3109 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3114 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3116 void kernel_sigaction(int sig, __sighandler_t action)
3118 spin_lock_irq(¤t->sighand->siglock);
3119 current->sighand->action[sig - 1].sa.sa_handler = action;
3120 if (action == SIG_IGN) {
3124 sigaddset(&mask, sig);
3126 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3127 flush_sigqueue_mask(&mask, ¤t->pending);
3128 recalc_sigpending();
3130 spin_unlock_irq(¤t->sighand->siglock);
3132 EXPORT_SYMBOL(kernel_sigaction);
3134 void __weak sigaction_compat_abi(struct k_sigaction *act,
3135 struct k_sigaction *oact)
3139 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3141 struct task_struct *p = current, *t;
3142 struct k_sigaction *k;
3145 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3148 k = &p->sighand->action[sig-1];
3150 spin_lock_irq(&p->sighand->siglock);
3154 sigaction_compat_abi(act, oact);
3157 sigdelsetmask(&act->sa.sa_mask,
3158 sigmask(SIGKILL) | sigmask(SIGSTOP));
3162 * "Setting a signal action to SIG_IGN for a signal that is
3163 * pending shall cause the pending signal to be discarded,
3164 * whether or not it is blocked."
3166 * "Setting a signal action to SIG_DFL for a signal that is
3167 * pending and whose default action is to ignore the signal
3168 * (for example, SIGCHLD), shall cause the pending signal to
3169 * be discarded, whether or not it is blocked"
3171 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3173 sigaddset(&mask, sig);
3174 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3175 for_each_thread(p, t)
3176 flush_sigqueue_mask(&mask, &t->pending);
3180 spin_unlock_irq(&p->sighand->siglock);
3185 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp)
3187 struct task_struct *t = current;
3190 memset(oss, 0, sizeof(stack_t));
3191 oss->ss_sp = (void __user *) t->sas_ss_sp;
3192 oss->ss_size = t->sas_ss_size;
3193 oss->ss_flags = sas_ss_flags(sp) |
3194 (current->sas_ss_flags & SS_FLAG_BITS);
3198 void __user *ss_sp = ss->ss_sp;
3199 size_t ss_size = ss->ss_size;
3200 unsigned ss_flags = ss->ss_flags;
3203 if (unlikely(on_sig_stack(sp)))
3206 ss_mode = ss_flags & ~SS_FLAG_BITS;
3207 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3211 if (ss_mode == SS_DISABLE) {
3215 if (unlikely(ss_size < MINSIGSTKSZ))
3219 t->sas_ss_sp = (unsigned long) ss_sp;
3220 t->sas_ss_size = ss_size;
3221 t->sas_ss_flags = ss_flags;
3226 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3230 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3232 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3233 current_user_stack_pointer());
3234 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3239 int restore_altstack(const stack_t __user *uss)
3242 if (copy_from_user(&new, uss, sizeof(stack_t)))
3244 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer());
3245 /* squash all but EFAULT for now */
3249 int __save_altstack(stack_t __user *uss, unsigned long sp)
3251 struct task_struct *t = current;
3252 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3253 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3254 __put_user(t->sas_ss_size, &uss->ss_size);
3257 if (t->sas_ss_flags & SS_AUTODISARM)
3262 #ifdef CONFIG_COMPAT
3263 COMPAT_SYSCALL_DEFINE2(sigaltstack,
3264 const compat_stack_t __user *, uss_ptr,
3265 compat_stack_t __user *, uoss_ptr)
3271 compat_stack_t uss32;
3272 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
3274 uss.ss_sp = compat_ptr(uss32.ss_sp);
3275 uss.ss_flags = uss32.ss_flags;
3276 uss.ss_size = uss32.ss_size;
3278 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
3279 compat_user_stack_pointer());
3280 if (ret >= 0 && uoss_ptr) {
3282 memset(&old, 0, sizeof(old));
3283 old.ss_sp = ptr_to_compat(uoss.ss_sp);
3284 old.ss_flags = uoss.ss_flags;
3285 old.ss_size = uoss.ss_size;
3286 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
3292 int compat_restore_altstack(const compat_stack_t __user *uss)
3294 int err = compat_sys_sigaltstack(uss, NULL);
3295 /* squash all but -EFAULT for now */
3296 return err == -EFAULT ? err : 0;
3299 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
3302 struct task_struct *t = current;
3303 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
3305 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3306 __put_user(t->sas_ss_size, &uss->ss_size);
3309 if (t->sas_ss_flags & SS_AUTODISARM)
3315 #ifdef __ARCH_WANT_SYS_SIGPENDING
3318 * sys_sigpending - examine pending signals
3319 * @set: where mask of pending signal is returned
3321 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
3323 return sys_rt_sigpending((sigset_t __user *)set, sizeof(old_sigset_t));
3326 #ifdef CONFIG_COMPAT
3327 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
3330 int err = do_sigpending(&set);
3332 err = put_user(set.sig[0], set32);
3339 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
3341 * sys_sigprocmask - examine and change blocked signals
3342 * @how: whether to add, remove, or set signals
3343 * @nset: signals to add or remove (if non-null)
3344 * @oset: previous value of signal mask if non-null
3346 * Some platforms have their own version with special arguments;
3347 * others support only sys_rt_sigprocmask.
3350 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3351 old_sigset_t __user *, oset)
3353 old_sigset_t old_set, new_set;
3354 sigset_t new_blocked;
3356 old_set = current->blocked.sig[0];
3359 if (copy_from_user(&new_set, nset, sizeof(*nset)))
3362 new_blocked = current->blocked;
3366 sigaddsetmask(&new_blocked, new_set);
3369 sigdelsetmask(&new_blocked, new_set);
3372 new_blocked.sig[0] = new_set;
3378 set_current_blocked(&new_blocked);
3382 if (copy_to_user(oset, &old_set, sizeof(*oset)))
3388 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
3390 #ifndef CONFIG_ODD_RT_SIGACTION
3392 * sys_rt_sigaction - alter an action taken by a process
3393 * @sig: signal to be sent
3394 * @act: new sigaction
3395 * @oact: used to save the previous sigaction
3396 * @sigsetsize: size of sigset_t type
3398 SYSCALL_DEFINE4(rt_sigaction, int, sig,
3399 const struct sigaction __user *, act,
3400 struct sigaction __user *, oact,
3403 struct k_sigaction new_sa, old_sa;
3406 /* XXX: Don't preclude handling different sized sigset_t's. */
3407 if (sigsetsize != sizeof(sigset_t))
3411 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
3415 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
3418 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
3424 #ifdef CONFIG_COMPAT
3425 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
3426 const struct compat_sigaction __user *, act,
3427 struct compat_sigaction __user *, oact,
3428 compat_size_t, sigsetsize)
3430 struct k_sigaction new_ka, old_ka;
3431 #ifdef __ARCH_HAS_SA_RESTORER
3432 compat_uptr_t restorer;
3436 /* XXX: Don't preclude handling different sized sigset_t's. */
3437 if (sigsetsize != sizeof(compat_sigset_t))
3441 compat_uptr_t handler;
3442 ret = get_user(handler, &act->sa_handler);
3443 new_ka.sa.sa_handler = compat_ptr(handler);
3444 #ifdef __ARCH_HAS_SA_RESTORER
3445 ret |= get_user(restorer, &act->sa_restorer);
3446 new_ka.sa.sa_restorer = compat_ptr(restorer);
3448 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3449 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
3454 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3456 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
3458 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
3459 sizeof(oact->sa_mask));
3460 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
3461 #ifdef __ARCH_HAS_SA_RESTORER
3462 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3463 &oact->sa_restorer);
3469 #endif /* !CONFIG_ODD_RT_SIGACTION */
3471 #ifdef CONFIG_OLD_SIGACTION
3472 SYSCALL_DEFINE3(sigaction, int, sig,
3473 const struct old_sigaction __user *, act,
3474 struct old_sigaction __user *, oact)
3476 struct k_sigaction new_ka, old_ka;
3481 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3482 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
3483 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
3484 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3485 __get_user(mask, &act->sa_mask))
3487 #ifdef __ARCH_HAS_KA_RESTORER
3488 new_ka.ka_restorer = NULL;
3490 siginitset(&new_ka.sa.sa_mask, mask);
3493 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3496 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3497 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
3498 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
3499 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3500 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3507 #ifdef CONFIG_COMPAT_OLD_SIGACTION
3508 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
3509 const struct compat_old_sigaction __user *, act,
3510 struct compat_old_sigaction __user *, oact)
3512 struct k_sigaction new_ka, old_ka;
3514 compat_old_sigset_t mask;
3515 compat_uptr_t handler, restorer;
3518 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
3519 __get_user(handler, &act->sa_handler) ||
3520 __get_user(restorer, &act->sa_restorer) ||
3521 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
3522 __get_user(mask, &act->sa_mask))
3525 #ifdef __ARCH_HAS_KA_RESTORER
3526 new_ka.ka_restorer = NULL;
3528 new_ka.sa.sa_handler = compat_ptr(handler);
3529 new_ka.sa.sa_restorer = compat_ptr(restorer);
3530 siginitset(&new_ka.sa.sa_mask, mask);
3533 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
3536 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
3537 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
3538 &oact->sa_handler) ||
3539 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
3540 &oact->sa_restorer) ||
3541 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
3542 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
3549 #ifdef CONFIG_SGETMASK_SYSCALL
3552 * For backwards compatibility. Functionality superseded by sigprocmask.
3554 SYSCALL_DEFINE0(sgetmask)
3557 return current->blocked.sig[0];
3560 SYSCALL_DEFINE1(ssetmask, int, newmask)
3562 int old = current->blocked.sig[0];
3565 siginitset(&newset, newmask);
3566 set_current_blocked(&newset);
3570 #endif /* CONFIG_SGETMASK_SYSCALL */
3572 #ifdef __ARCH_WANT_SYS_SIGNAL
3574 * For backwards compatibility. Functionality superseded by sigaction.
3576 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
3578 struct k_sigaction new_sa, old_sa;
3581 new_sa.sa.sa_handler = handler;
3582 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
3583 sigemptyset(&new_sa.sa.sa_mask);
3585 ret = do_sigaction(sig, &new_sa, &old_sa);
3587 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
3589 #endif /* __ARCH_WANT_SYS_SIGNAL */
3591 #ifdef __ARCH_WANT_SYS_PAUSE
3593 SYSCALL_DEFINE0(pause)
3595 while (!signal_pending(current)) {
3596 __set_current_state(TASK_INTERRUPTIBLE);
3599 return -ERESTARTNOHAND;
3604 static int sigsuspend(sigset_t *set)
3606 current->saved_sigmask = current->blocked;
3607 set_current_blocked(set);
3609 while (!signal_pending(current)) {
3610 __set_current_state(TASK_INTERRUPTIBLE);
3613 set_restore_sigmask();
3614 return -ERESTARTNOHAND;
3618 * sys_rt_sigsuspend - replace the signal mask for a value with the
3619 * @unewset value until a signal is received
3620 * @unewset: new signal mask value
3621 * @sigsetsize: size of sigset_t type
3623 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
3627 /* XXX: Don't preclude handling different sized sigset_t's. */
3628 if (sigsetsize != sizeof(sigset_t))
3631 if (copy_from_user(&newset, unewset, sizeof(newset)))
3633 return sigsuspend(&newset);
3636 #ifdef CONFIG_COMPAT
3637 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
3641 /* XXX: Don't preclude handling different sized sigset_t's. */
3642 if (sigsetsize != sizeof(sigset_t))
3645 if (get_compat_sigset(&newset, unewset))
3647 return sigsuspend(&newset);
3651 #ifdef CONFIG_OLD_SIGSUSPEND
3652 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
3655 siginitset(&blocked, mask);
3656 return sigsuspend(&blocked);
3659 #ifdef CONFIG_OLD_SIGSUSPEND3
3660 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
3663 siginitset(&blocked, mask);
3664 return sigsuspend(&blocked);
3668 __weak const char *arch_vma_name(struct vm_area_struct *vma)
3673 void __init signals_init(void)
3675 /* If this check fails, the __ARCH_SI_PREAMBLE_SIZE value is wrong! */
3676 BUILD_BUG_ON(__ARCH_SI_PREAMBLE_SIZE
3677 != offsetof(struct siginfo, _sifields._pad));
3679 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
3682 #ifdef CONFIG_KGDB_KDB
3683 #include <linux/kdb.h>
3685 * kdb_send_sig_info - Allows kdb to send signals without exposing
3686 * signal internals. This function checks if the required locks are
3687 * available before calling the main signal code, to avoid kdb
3691 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
3693 static struct task_struct *kdb_prev_t;
3695 if (!spin_trylock(&t->sighand->siglock)) {
3696 kdb_printf("Can't do kill command now.\n"
3697 "The sigmask lock is held somewhere else in "
3698 "kernel, try again later\n");
3701 spin_unlock(&t->sighand->siglock);
3702 new_t = kdb_prev_t != t;
3704 if (t->state != TASK_RUNNING && new_t) {
3705 kdb_printf("Process is not RUNNING, sending a signal from "
3706 "kdb risks deadlock\n"
3707 "on the run queue locks. "
3708 "The signal has _not_ been sent.\n"
3709 "Reissue the kill command if you want to risk "
3713 sig = info->si_signo;
3714 if (send_sig_info(sig, info, t))
3715 kdb_printf("Fail to deliver Signal %d to process %d.\n",
3718 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
3720 #endif /* CONFIG_KGDB_KDB */