2 * linux/kernel/signal.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
13 #include <linux/slab.h>
14 #include <linux/export.h>
15 #include <linux/init.h>
16 #include <linux/sched/mm.h>
17 #include <linux/sched/user.h>
18 #include <linux/sched/debug.h>
19 #include <linux/sched/task.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/sched/cputime.h>
22 #include <linux/file.h>
24 #include <linux/proc_fs.h>
25 #include <linux/tty.h>
26 #include <linux/binfmts.h>
27 #include <linux/coredump.h>
28 #include <linux/security.h>
29 #include <linux/syscalls.h>
30 #include <linux/ptrace.h>
31 #include <linux/signal.h>
32 #include <linux/signalfd.h>
33 #include <linux/ratelimit.h>
34 #include <linux/tracehook.h>
35 #include <linux/capability.h>
36 #include <linux/freezer.h>
37 #include <linux/pid_namespace.h>
38 #include <linux/nsproxy.h>
39 #include <linux/user_namespace.h>
40 #include <linux/uprobes.h>
41 #include <linux/compat.h>
42 #include <linux/cn_proc.h>
43 #include <linux/compiler.h>
44 #include <linux/posix-timers.h>
45 #include <linux/livepatch.h>
46 #include <linux/cgroup.h>
48 #define CREATE_TRACE_POINTS
49 #include <trace/events/signal.h>
51 #include <asm/param.h>
52 #include <linux/uaccess.h>
53 #include <asm/unistd.h>
54 #include <asm/siginfo.h>
55 #include <asm/cacheflush.h>
56 #include "audit.h" /* audit_signal_info() */
59 * SLAB caches for signal bits.
62 static struct kmem_cache *sigqueue_cachep;
64 int print_fatal_signals __read_mostly;
66 static void __user *sig_handler(struct task_struct *t, int sig)
68 return t->sighand->action[sig - 1].sa.sa_handler;
71 static inline bool sig_handler_ignored(void __user *handler, int sig)
73 /* Is it explicitly or implicitly ignored? */
74 return handler == SIG_IGN ||
75 (handler == SIG_DFL && sig_kernel_ignore(sig));
78 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
82 handler = sig_handler(t, sig);
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
92 return sig_handler_ignored(handler, sig);
95 static bool sig_ignored(struct task_struct *t, int sig, bool force)
98 * Blocked signals are never ignored, since the
99 * signal handler may change by the time it is
102 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
106 * Tracers may want to know about even ignored signal unless it
107 * is SIGKILL which can't be reported anyway but can be ignored
108 * by SIGNAL_UNKILLABLE task.
110 if (t->ptrace && sig != SIGKILL)
113 return sig_task_ignored(t, sig, force);
117 * Re-calculate pending state from the set of locally pending
118 * signals, globally pending signals, and blocked signals.
120 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
125 switch (_NSIG_WORDS) {
127 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 ready |= signal->sig[i] &~ blocked->sig[i];
131 case 4: ready = signal->sig[3] &~ blocked->sig[3];
132 ready |= signal->sig[2] &~ blocked->sig[2];
133 ready |= signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
137 case 2: ready = signal->sig[1] &~ blocked->sig[1];
138 ready |= signal->sig[0] &~ blocked->sig[0];
141 case 1: ready = signal->sig[0] &~ blocked->sig[0];
146 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
148 static bool recalc_sigpending_tsk(struct task_struct *t)
150 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
151 PENDING(&t->pending, &t->blocked) ||
152 PENDING(&t->signal->shared_pending, &t->blocked) ||
153 cgroup_task_frozen(t)) {
154 set_tsk_thread_flag(t, TIF_SIGPENDING);
159 * We must never clear the flag in another thread, or in current
160 * when it's possible the current syscall is returning -ERESTART*.
161 * So we don't clear it here, and only callers who know they should do.
167 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168 * This is superfluous when called on current, the wakeup is a harmless no-op.
170 void recalc_sigpending_and_wake(struct task_struct *t)
172 if (recalc_sigpending_tsk(t))
173 signal_wake_up(t, 0);
176 void recalc_sigpending(void)
178 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 !klp_patch_pending(current))
180 clear_thread_flag(TIF_SIGPENDING);
183 EXPORT_SYMBOL(recalc_sigpending);
185 void calculate_sigpending(void)
187 /* Have any signals or users of TIF_SIGPENDING been delayed
190 spin_lock_irq(¤t->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
193 spin_unlock_irq(¤t->sighand->siglock);
196 /* Given the mask, find the first available signal that should be serviced. */
198 #define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
202 int next_signal(struct sigpending *pending, sigset_t *mask)
204 unsigned long i, *s, *m, x;
207 s = pending->signal.sig;
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
222 switch (_NSIG_WORDS) {
224 for (i = 1; i < _NSIG_WORDS; ++i) {
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
237 sig = ffz(~x) + _NSIG_BPW + 1;
248 static inline void print_dropped_signal(int sig)
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
252 if (!print_fatal_signals)
255 if (!__ratelimit(&ratelimit_state))
258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
259 current->comm, current->pid, sig);
263 * task_set_jobctl_pending - set jobctl pending bits
265 * @mask: pending bits to set
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
274 * Must be called with @task->sighand->siglock held.
277 * %true if @mask is set, %false if made noop because @task was dying.
279 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
291 task->jobctl |= mask;
296 * task_clear_jobctl_trapping - clear jobctl trapping bit
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
305 * Must be called with @task->sighand->siglock held.
307 void task_clear_jobctl_trapping(struct task_struct *task)
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
311 smp_mb(); /* advised by wake_up_bit() */
312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
317 * task_clear_jobctl_pending - clear jobctl pending bits
319 * @mask: pending bits to clear
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
329 * Must be called with @task->sighand->siglock held.
331 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
338 task->jobctl &= ~mask;
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
349 * Group stop states are cleared and the group stop count is consumed if
350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
351 * stop, the appropriate %SIGNAL_* flags are set.
354 * Must be called with @task->sighand->siglock held.
357 * %true if group stop completion should be notified to the parent, %false
360 static bool task_participate_group_stop(struct task_struct *task)
362 struct signal_struct *sig = task->signal;
363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
386 void task_join_group_stop(struct task_struct *task)
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
403 * appropriate lock must be held to stop the target task from exiting
405 static struct sigqueue *
406 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
408 struct sigqueue *q = NULL;
409 struct user_struct *user;
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
416 user = get_uid(__task_cred(t)->user);
417 atomic_inc(&user->sigpending);
420 if (override_rlimit ||
421 atomic_read(&user->sigpending) <=
422 task_rlimit(t, RLIMIT_SIGPENDING)) {
423 q = kmem_cache_alloc(sigqueue_cachep, flags);
425 print_dropped_signal(sig);
428 if (unlikely(q == NULL)) {
429 atomic_dec(&user->sigpending);
432 INIT_LIST_HEAD(&q->list);
440 static void __sigqueue_free(struct sigqueue *q)
442 if (q->flags & SIGQUEUE_PREALLOC)
444 atomic_dec(&q->user->sigpending);
446 kmem_cache_free(sigqueue_cachep, q);
449 void flush_sigqueue(struct sigpending *queue)
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
462 * Flush all pending signals for this kthread.
464 void flush_signals(struct task_struct *t)
468 spin_lock_irqsave(&t->sighand->siglock, flags);
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
474 EXPORT_SYMBOL(flush_signals);
476 #ifdef CONFIG_POSIX_TIMERS
477 static void __flush_itimer_signals(struct sigpending *pending)
479 sigset_t signal, retain;
480 struct sigqueue *q, *n;
482 signal = pending->signal;
483 sigemptyset(&retain);
485 list_for_each_entry_safe(q, n, &pending->list, list) {
486 int sig = q->info.si_signo;
488 if (likely(q->info.si_code != SI_TIMER)) {
489 sigaddset(&retain, sig);
491 sigdelset(&signal, sig);
492 list_del_init(&q->list);
497 sigorsets(&pending->signal, &signal, &retain);
500 void flush_itimer_signals(void)
502 struct task_struct *tsk = current;
505 spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 __flush_itimer_signals(&tsk->pending);
507 __flush_itimer_signals(&tsk->signal->shared_pending);
508 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
512 void ignore_signals(struct task_struct *t)
516 for (i = 0; i < _NSIG; ++i)
517 t->sighand->action[i].sa.sa_handler = SIG_IGN;
523 * Flush all handlers for a task.
527 flush_signal_handlers(struct task_struct *t, int force_default)
530 struct k_sigaction *ka = &t->sighand->action[0];
531 for (i = _NSIG ; i != 0 ; i--) {
532 if (force_default || ka->sa.sa_handler != SIG_IGN)
533 ka->sa.sa_handler = SIG_DFL;
535 #ifdef __ARCH_HAS_SA_RESTORER
536 ka->sa.sa_restorer = NULL;
538 sigemptyset(&ka->sa.sa_mask);
543 bool unhandled_signal(struct task_struct *tsk, int sig)
545 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
546 if (is_global_init(tsk))
549 if (handler != SIG_IGN && handler != SIG_DFL)
552 /* if ptraced, let the tracer determine */
556 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
559 struct sigqueue *q, *first = NULL;
562 * Collect the siginfo appropriate to this signal. Check if
563 * there is another siginfo for the same signal.
565 list_for_each_entry(q, &list->list, list) {
566 if (q->info.si_signo == sig) {
573 sigdelset(&list->signal, sig);
577 list_del_init(&first->list);
578 copy_siginfo(info, &first->info);
581 (first->flags & SIGQUEUE_PREALLOC) &&
582 (info->si_code == SI_TIMER) &&
583 (info->si_sys_private);
585 __sigqueue_free(first);
588 * Ok, it wasn't in the queue. This must be
589 * a fast-pathed signal or we must have been
590 * out of queue space. So zero out the info.
593 info->si_signo = sig;
595 info->si_code = SI_USER;
601 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
602 kernel_siginfo_t *info, bool *resched_timer)
604 int sig = next_signal(pending, mask);
607 collect_signal(sig, pending, info, resched_timer);
612 * Dequeue a signal and return the element to the caller, which is
613 * expected to free it.
615 * All callers have to hold the siglock.
617 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
619 bool resched_timer = false;
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
627 signr = __dequeue_signal(&tsk->signal->shared_pending,
628 mask, info, &resched_timer);
629 #ifdef CONFIG_POSIX_TIMERS
633 * itimers are process shared and we restart periodic
634 * itimers in the signal delivery path to prevent DoS
635 * attacks in the high resolution timer case. This is
636 * compliant with the old way of self-restarting
637 * itimers, as the SIGALRM is a legacy signal and only
638 * queued once. Changing the restart behaviour to
639 * restart the timer in the signal dequeue path is
640 * reducing the timer noise on heavy loaded !highres
643 if (unlikely(signr == SIGALRM)) {
644 struct hrtimer *tmr = &tsk->signal->real_timer;
646 if (!hrtimer_is_queued(tmr) &&
647 tsk->signal->it_real_incr != 0) {
648 hrtimer_forward(tmr, tmr->base->get_time(),
649 tsk->signal->it_real_incr);
650 hrtimer_restart(tmr);
660 if (unlikely(sig_kernel_stop(signr))) {
662 * Set a marker that we have dequeued a stop signal. Our
663 * caller might release the siglock and then the pending
664 * stop signal it is about to process is no longer in the
665 * pending bitmasks, but must still be cleared by a SIGCONT
666 * (and overruled by a SIGKILL). So those cases clear this
667 * shared flag after we've set it. Note that this flag may
668 * remain set after the signal we return is ignored or
669 * handled. That doesn't matter because its only purpose
670 * is to alert stop-signal processing code when another
671 * processor has come along and cleared the flag.
673 current->jobctl |= JOBCTL_STOP_DEQUEUED;
675 #ifdef CONFIG_POSIX_TIMERS
678 * Release the siglock to ensure proper locking order
679 * of timer locks outside of siglocks. Note, we leave
680 * irqs disabled here, since the posix-timers code is
681 * about to disable them again anyway.
683 spin_unlock(&tsk->sighand->siglock);
684 posixtimer_rearm(info);
685 spin_lock(&tsk->sighand->siglock);
687 /* Don't expose the si_sys_private value to userspace */
688 info->si_sys_private = 0;
693 EXPORT_SYMBOL_GPL(dequeue_signal);
695 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
697 struct task_struct *tsk = current;
698 struct sigpending *pending = &tsk->pending;
699 struct sigqueue *q, *sync = NULL;
702 * Might a synchronous signal be in the queue?
704 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
708 * Return the first synchronous signal in the queue.
710 list_for_each_entry(q, &pending->list, list) {
711 /* Synchronous signals have a postive si_code */
712 if ((q->info.si_code > SI_USER) &&
713 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
721 * Check if there is another siginfo for the same signal.
723 list_for_each_entry_continue(q, &pending->list, list) {
724 if (q->info.si_signo == sync->info.si_signo)
728 sigdelset(&pending->signal, sync->info.si_signo);
731 list_del_init(&sync->list);
732 copy_siginfo(info, &sync->info);
733 __sigqueue_free(sync);
734 return info->si_signo;
738 * Tell a process that it has a new active signal..
740 * NOTE! we rely on the previous spin_lock to
741 * lock interrupts for us! We can only be called with
742 * "siglock" held, and the local interrupt must
743 * have been disabled when that got acquired!
745 * No need to set need_resched since signal event passing
746 * goes through ->blocked
748 void signal_wake_up_state(struct task_struct *t, unsigned int state)
750 set_tsk_thread_flag(t, TIF_SIGPENDING);
752 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
753 * case. We don't check t->state here because there is a race with it
754 * executing another processor and just now entering stopped state.
755 * By using wake_up_state, we ensure the process will wake up and
756 * handle its death signal.
758 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
763 * Remove signals in mask from the pending set and queue.
764 * Returns 1 if any signals were found.
766 * All callers must be holding the siglock.
768 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
770 struct sigqueue *q, *n;
773 sigandsets(&m, mask, &s->signal);
774 if (sigisemptyset(&m))
777 sigandnsets(&s->signal, &s->signal, mask);
778 list_for_each_entry_safe(q, n, &s->list, list) {
779 if (sigismember(mask, q->info.si_signo)) {
780 list_del_init(&q->list);
786 static inline int is_si_special(const struct kernel_siginfo *info)
788 return info <= SEND_SIG_PRIV;
791 static inline bool si_fromuser(const struct kernel_siginfo *info)
793 return info == SEND_SIG_NOINFO ||
794 (!is_si_special(info) && SI_FROMUSER(info));
798 * called with RCU read lock from check_kill_permission()
800 static bool kill_ok_by_cred(struct task_struct *t)
802 const struct cred *cred = current_cred();
803 const struct cred *tcred = __task_cred(t);
805 return uid_eq(cred->euid, tcred->suid) ||
806 uid_eq(cred->euid, tcred->uid) ||
807 uid_eq(cred->uid, tcred->suid) ||
808 uid_eq(cred->uid, tcred->uid) ||
809 ns_capable(tcred->user_ns, CAP_KILL);
813 * Bad permissions for sending the signal
814 * - the caller must hold the RCU read lock
816 static int check_kill_permission(int sig, struct kernel_siginfo *info,
817 struct task_struct *t)
822 if (!valid_signal(sig))
825 if (!si_fromuser(info))
828 error = audit_signal_info(sig, t); /* Let audit system see the signal */
832 if (!same_thread_group(current, t) &&
833 !kill_ok_by_cred(t)) {
836 sid = task_session(t);
838 * We don't return the error if sid == NULL. The
839 * task was unhashed, the caller must notice this.
841 if (!sid || sid == task_session(current))
848 return security_task_kill(t, info, sig, NULL);
852 * ptrace_trap_notify - schedule trap to notify ptracer
853 * @t: tracee wanting to notify tracer
855 * This function schedules sticky ptrace trap which is cleared on the next
856 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
859 * If @t is running, STOP trap will be taken. If trapped for STOP and
860 * ptracer is listening for events, tracee is woken up so that it can
861 * re-trap for the new event. If trapped otherwise, STOP trap will be
862 * eventually taken without returning to userland after the existing traps
863 * are finished by PTRACE_CONT.
866 * Must be called with @task->sighand->siglock held.
868 static void ptrace_trap_notify(struct task_struct *t)
870 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
871 assert_spin_locked(&t->sighand->siglock);
873 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
874 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
878 * Handle magic process-wide effects of stop/continue signals. Unlike
879 * the signal actions, these happen immediately at signal-generation
880 * time regardless of blocking, ignoring, or handling. This does the
881 * actual continuing for SIGCONT, but not the actual stopping for stop
882 * signals. The process stop is done as a signal action for SIG_DFL.
884 * Returns true if the signal should be actually delivered, otherwise
885 * it should be dropped.
887 static bool prepare_signal(int sig, struct task_struct *p, bool force)
889 struct signal_struct *signal = p->signal;
890 struct task_struct *t;
893 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
894 if (!(signal->flags & SIGNAL_GROUP_EXIT))
895 return sig == SIGKILL;
897 * The process is in the middle of dying, nothing to do.
899 } else if (sig_kernel_stop(sig)) {
901 * This is a stop signal. Remove SIGCONT from all queues.
903 siginitset(&flush, sigmask(SIGCONT));
904 flush_sigqueue_mask(&flush, &signal->shared_pending);
905 for_each_thread(p, t)
906 flush_sigqueue_mask(&flush, &t->pending);
907 } else if (sig == SIGCONT) {
910 * Remove all stop signals from all queues, wake all threads.
912 siginitset(&flush, SIG_KERNEL_STOP_MASK);
913 flush_sigqueue_mask(&flush, &signal->shared_pending);
914 for_each_thread(p, t) {
915 flush_sigqueue_mask(&flush, &t->pending);
916 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
917 if (likely(!(t->ptrace & PT_SEIZED)))
918 wake_up_state(t, __TASK_STOPPED);
920 ptrace_trap_notify(t);
924 * Notify the parent with CLD_CONTINUED if we were stopped.
926 * If we were in the middle of a group stop, we pretend it
927 * was already finished, and then continued. Since SIGCHLD
928 * doesn't queue we report only CLD_STOPPED, as if the next
929 * CLD_CONTINUED was dropped.
932 if (signal->flags & SIGNAL_STOP_STOPPED)
933 why |= SIGNAL_CLD_CONTINUED;
934 else if (signal->group_stop_count)
935 why |= SIGNAL_CLD_STOPPED;
939 * The first thread which returns from do_signal_stop()
940 * will take ->siglock, notice SIGNAL_CLD_MASK, and
941 * notify its parent. See get_signal().
943 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
944 signal->group_stop_count = 0;
945 signal->group_exit_code = 0;
949 return !sig_ignored(p, sig, force);
953 * Test if P wants to take SIG. After we've checked all threads with this,
954 * it's equivalent to finding no threads not blocking SIG. Any threads not
955 * blocking SIG were ruled out because they are not running and already
956 * have pending signals. Such threads will dequeue from the shared queue
957 * as soon as they're available, so putting the signal on the shared queue
958 * will be equivalent to sending it to one such thread.
960 static inline bool wants_signal(int sig, struct task_struct *p)
962 if (sigismember(&p->blocked, sig))
965 if (p->flags & PF_EXITING)
971 if (task_is_stopped_or_traced(p))
974 return task_curr(p) || !signal_pending(p);
977 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
979 struct signal_struct *signal = p->signal;
980 struct task_struct *t;
983 * Now find a thread we can wake up to take the signal off the queue.
985 * If the main thread wants the signal, it gets first crack.
986 * Probably the least surprising to the average bear.
988 if (wants_signal(sig, p))
990 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
992 * There is just one thread and it does not need to be woken.
993 * It will dequeue unblocked signals before it runs again.
998 * Otherwise try to find a suitable thread.
1000 t = signal->curr_target;
1001 while (!wants_signal(sig, t)) {
1003 if (t == signal->curr_target)
1005 * No thread needs to be woken.
1006 * Any eligible threads will see
1007 * the signal in the queue soon.
1011 signal->curr_target = t;
1015 * Found a killable thread. If the signal will be fatal,
1016 * then start taking the whole group down immediately.
1018 if (sig_fatal(p, sig) &&
1019 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1020 !sigismember(&t->real_blocked, sig) &&
1021 (sig == SIGKILL || !p->ptrace)) {
1023 * This signal will be fatal to the whole group.
1025 if (!sig_kernel_coredump(sig)) {
1027 * Start a group exit and wake everybody up.
1028 * This way we don't have other threads
1029 * running and doing things after a slower
1030 * thread has the fatal signal pending.
1032 signal->flags = SIGNAL_GROUP_EXIT;
1033 signal->group_exit_code = sig;
1034 signal->group_stop_count = 0;
1037 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1038 sigaddset(&t->pending.signal, SIGKILL);
1039 signal_wake_up(t, 1);
1040 } while_each_thread(p, t);
1046 * The signal is already in the shared-pending queue.
1047 * Tell the chosen thread to wake up and dequeue it.
1049 signal_wake_up(t, sig == SIGKILL);
1053 static inline bool legacy_queue(struct sigpending *signals, int sig)
1055 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1058 #ifdef CONFIG_USER_NS
1059 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1061 if (current_user_ns() == task_cred_xxx(t, user_ns))
1064 if (SI_FROMKERNEL(info))
1068 info->si_uid = from_kuid_munged(task_cred_xxx(t, user_ns),
1069 make_kuid(current_user_ns(), info->si_uid));
1073 static inline void userns_fixup_signal_uid(struct kernel_siginfo *info, struct task_struct *t)
1079 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1080 enum pid_type type, int from_ancestor_ns)
1082 struct sigpending *pending;
1084 int override_rlimit;
1085 int ret = 0, result;
1087 assert_spin_locked(&t->sighand->siglock);
1089 result = TRACE_SIGNAL_IGNORED;
1090 if (!prepare_signal(sig, t,
1091 from_ancestor_ns || (info == SEND_SIG_PRIV)))
1094 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1096 * Short-circuit ignored signals and support queuing
1097 * exactly one non-rt signal, so that we can get more
1098 * detailed information about the cause of the signal.
1100 result = TRACE_SIGNAL_ALREADY_PENDING;
1101 if (legacy_queue(pending, sig))
1104 result = TRACE_SIGNAL_DELIVERED;
1106 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1108 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1112 * Real-time signals must be queued if sent by sigqueue, or
1113 * some other real-time mechanism. It is implementation
1114 * defined whether kill() does so. We attempt to do so, on
1115 * the principle of least surprise, but since kill is not
1116 * allowed to fail with EAGAIN when low on memory we just
1117 * make sure at least one signal gets delivered and don't
1118 * pass on the info struct.
1121 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1123 override_rlimit = 0;
1125 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1127 list_add_tail(&q->list, &pending->list);
1128 switch ((unsigned long) info) {
1129 case (unsigned long) SEND_SIG_NOINFO:
1130 clear_siginfo(&q->info);
1131 q->info.si_signo = sig;
1132 q->info.si_errno = 0;
1133 q->info.si_code = SI_USER;
1134 q->info.si_pid = task_tgid_nr_ns(current,
1135 task_active_pid_ns(t));
1136 q->info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1138 case (unsigned long) SEND_SIG_PRIV:
1139 clear_siginfo(&q->info);
1140 q->info.si_signo = sig;
1141 q->info.si_errno = 0;
1142 q->info.si_code = SI_KERNEL;
1147 copy_siginfo(&q->info, info);
1148 if (from_ancestor_ns)
1153 userns_fixup_signal_uid(&q->info, t);
1155 } else if (!is_si_special(info)) {
1156 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1158 * Queue overflow, abort. We may abort if the
1159 * signal was rt and sent by user using something
1160 * other than kill().
1162 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1167 * This is a silent loss of information. We still
1168 * send the signal, but the *info bits are lost.
1170 result = TRACE_SIGNAL_LOSE_INFO;
1175 signalfd_notify(t, sig);
1176 sigaddset(&pending->signal, sig);
1178 /* Let multiprocess signals appear after on-going forks */
1179 if (type > PIDTYPE_TGID) {
1180 struct multiprocess_signals *delayed;
1181 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1182 sigset_t *signal = &delayed->signal;
1183 /* Can't queue both a stop and a continue signal */
1185 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1186 else if (sig_kernel_stop(sig))
1187 sigdelset(signal, SIGCONT);
1188 sigaddset(signal, sig);
1192 complete_signal(sig, t, type);
1194 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1198 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1201 int from_ancestor_ns = 0;
1203 #ifdef CONFIG_PID_NS
1204 from_ancestor_ns = si_fromuser(info) &&
1205 !task_pid_nr_ns(current, task_active_pid_ns(t));
1208 return __send_signal(sig, info, t, type, from_ancestor_ns);
1211 static void print_fatal_signal(int signr)
1213 struct pt_regs *regs = signal_pt_regs();
1214 pr_info("potentially unexpected fatal signal %d.\n", signr);
1216 #if defined(__i386__) && !defined(__arch_um__)
1217 pr_info("code at %08lx: ", regs->ip);
1220 for (i = 0; i < 16; i++) {
1223 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1225 pr_cont("%02x ", insn);
1235 static int __init setup_print_fatal_signals(char *str)
1237 get_option (&str, &print_fatal_signals);
1242 __setup("print-fatal-signals=", setup_print_fatal_signals);
1245 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1247 return send_signal(sig, info, p, PIDTYPE_TGID);
1250 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1253 unsigned long flags;
1256 if (lock_task_sighand(p, &flags)) {
1257 ret = send_signal(sig, info, p, type);
1258 unlock_task_sighand(p, &flags);
1265 * Force a signal that the process can't ignore: if necessary
1266 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1268 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1269 * since we do not want to have a signal handler that was blocked
1270 * be invoked when user space had explicitly blocked it.
1272 * We don't want to have recursive SIGSEGV's etc, for example,
1273 * that is why we also clear SIGNAL_UNKILLABLE.
1276 force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1278 unsigned long int flags;
1279 int ret, blocked, ignored;
1280 struct k_sigaction *action;
1282 spin_lock_irqsave(&t->sighand->siglock, flags);
1283 action = &t->sighand->action[sig-1];
1284 ignored = action->sa.sa_handler == SIG_IGN;
1285 blocked = sigismember(&t->blocked, sig);
1286 if (blocked || ignored) {
1287 action->sa.sa_handler = SIG_DFL;
1289 sigdelset(&t->blocked, sig);
1290 recalc_sigpending_and_wake(t);
1294 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1295 * debugging to leave init killable.
1297 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1298 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1299 ret = send_signal(sig, info, t, PIDTYPE_PID);
1300 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1306 * Nuke all other threads in the group.
1308 int zap_other_threads(struct task_struct *p)
1310 struct task_struct *t = p;
1313 p->signal->group_stop_count = 0;
1315 while_each_thread(p, t) {
1316 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1319 /* Don't bother with already dead threads */
1322 sigaddset(&t->pending.signal, SIGKILL);
1323 signal_wake_up(t, 1);
1329 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1330 unsigned long *flags)
1332 struct sighand_struct *sighand;
1336 sighand = rcu_dereference(tsk->sighand);
1337 if (unlikely(sighand == NULL))
1341 * This sighand can be already freed and even reused, but
1342 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1343 * initializes ->siglock: this slab can't go away, it has
1344 * the same object type, ->siglock can't be reinitialized.
1346 * We need to ensure that tsk->sighand is still the same
1347 * after we take the lock, we can race with de_thread() or
1348 * __exit_signal(). In the latter case the next iteration
1349 * must see ->sighand == NULL.
1351 spin_lock_irqsave(&sighand->siglock, *flags);
1352 if (likely(sighand == tsk->sighand))
1354 spin_unlock_irqrestore(&sighand->siglock, *flags);
1362 * send signal info to all the members of a group
1364 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1365 struct task_struct *p, enum pid_type type)
1370 ret = check_kill_permission(sig, info, p);
1374 ret = do_send_sig_info(sig, info, p, type);
1380 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1381 * control characters do (^C, ^Z etc)
1382 * - the caller must hold at least a readlock on tasklist_lock
1384 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1386 struct task_struct *p = NULL;
1387 int retval, success;
1391 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1392 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1395 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1396 return success ? 0 : retval;
1399 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1402 struct task_struct *p;
1406 p = pid_task(pid, PIDTYPE_PID);
1408 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1410 if (likely(!p || error != -ESRCH))
1414 * The task was unhashed in between, try again. If it
1415 * is dead, pid_task() will return NULL, if we race with
1416 * de_thread() it will find the new leader.
1421 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1425 error = kill_pid_info(sig, info, find_vpid(pid));
1430 static inline bool kill_as_cred_perm(const struct cred *cred,
1431 struct task_struct *target)
1433 const struct cred *pcred = __task_cred(target);
1435 return uid_eq(cred->euid, pcred->suid) ||
1436 uid_eq(cred->euid, pcred->uid) ||
1437 uid_eq(cred->uid, pcred->suid) ||
1438 uid_eq(cred->uid, pcred->uid);
1441 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1442 int kill_pid_info_as_cred(int sig, struct kernel_siginfo *info, struct pid *pid,
1443 const struct cred *cred)
1446 struct task_struct *p;
1447 unsigned long flags;
1449 if (!valid_signal(sig))
1453 p = pid_task(pid, PIDTYPE_PID);
1458 if (si_fromuser(info) && !kill_as_cred_perm(cred, p)) {
1462 ret = security_task_kill(p, info, sig, cred);
1467 if (lock_task_sighand(p, &flags)) {
1468 ret = __send_signal(sig, info, p, PIDTYPE_TGID, 0);
1469 unlock_task_sighand(p, &flags);
1477 EXPORT_SYMBOL_GPL(kill_pid_info_as_cred);
1480 * kill_something_info() interprets pid in interesting ways just like kill(2).
1482 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1483 * is probably wrong. Should make it like BSD or SYSV.
1486 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1492 ret = kill_pid_info(sig, info, find_vpid(pid));
1497 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1501 read_lock(&tasklist_lock);
1503 ret = __kill_pgrp_info(sig, info,
1504 pid ? find_vpid(-pid) : task_pgrp(current));
1506 int retval = 0, count = 0;
1507 struct task_struct * p;
1509 for_each_process(p) {
1510 if (task_pid_vnr(p) > 1 &&
1511 !same_thread_group(p, current)) {
1512 int err = group_send_sig_info(sig, info, p,
1519 ret = count ? retval : -ESRCH;
1521 read_unlock(&tasklist_lock);
1527 * These are for backward compatibility with the rest of the kernel source.
1530 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1533 * Make sure legacy kernel users don't send in bad values
1534 * (normal paths check this in check_kill_permission).
1536 if (!valid_signal(sig))
1539 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1541 EXPORT_SYMBOL(send_sig_info);
1543 #define __si_special(priv) \
1544 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1547 send_sig(int sig, struct task_struct *p, int priv)
1549 return send_sig_info(sig, __si_special(priv), p);
1551 EXPORT_SYMBOL(send_sig);
1553 void force_sig(int sig, struct task_struct *p)
1555 force_sig_info(sig, SEND_SIG_PRIV, p);
1557 EXPORT_SYMBOL(force_sig);
1560 * When things go south during signal handling, we
1561 * will force a SIGSEGV. And if the signal that caused
1562 * the problem was already a SIGSEGV, we'll want to
1563 * make sure we don't even try to deliver the signal..
1565 void force_sigsegv(int sig, struct task_struct *p)
1567 if (sig == SIGSEGV) {
1568 unsigned long flags;
1569 spin_lock_irqsave(&p->sighand->siglock, flags);
1570 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1571 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1573 force_sig(SIGSEGV, p);
1576 int force_sig_fault(int sig, int code, void __user *addr
1577 ___ARCH_SI_TRAPNO(int trapno)
1578 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1579 , struct task_struct *t)
1581 struct kernel_siginfo info;
1583 clear_siginfo(&info);
1584 info.si_signo = sig;
1586 info.si_code = code;
1587 info.si_addr = addr;
1588 #ifdef __ARCH_SI_TRAPNO
1589 info.si_trapno = trapno;
1593 info.si_flags = flags;
1596 return force_sig_info(info.si_signo, &info, t);
1599 int send_sig_fault(int sig, int code, void __user *addr
1600 ___ARCH_SI_TRAPNO(int trapno)
1601 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1602 , struct task_struct *t)
1604 struct kernel_siginfo info;
1606 clear_siginfo(&info);
1607 info.si_signo = sig;
1609 info.si_code = code;
1610 info.si_addr = addr;
1611 #ifdef __ARCH_SI_TRAPNO
1612 info.si_trapno = trapno;
1616 info.si_flags = flags;
1619 return send_sig_info(info.si_signo, &info, t);
1622 int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1624 struct kernel_siginfo info;
1626 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1627 clear_siginfo(&info);
1628 info.si_signo = SIGBUS;
1630 info.si_code = code;
1631 info.si_addr = addr;
1632 info.si_addr_lsb = lsb;
1633 return force_sig_info(info.si_signo, &info, t);
1636 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1638 struct kernel_siginfo info;
1640 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1641 clear_siginfo(&info);
1642 info.si_signo = SIGBUS;
1644 info.si_code = code;
1645 info.si_addr = addr;
1646 info.si_addr_lsb = lsb;
1647 return send_sig_info(info.si_signo, &info, t);
1649 EXPORT_SYMBOL(send_sig_mceerr);
1651 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1653 struct kernel_siginfo info;
1655 clear_siginfo(&info);
1656 info.si_signo = SIGSEGV;
1658 info.si_code = SEGV_BNDERR;
1659 info.si_addr = addr;
1660 info.si_lower = lower;
1661 info.si_upper = upper;
1662 return force_sig_info(info.si_signo, &info, current);
1666 int force_sig_pkuerr(void __user *addr, u32 pkey)
1668 struct kernel_siginfo info;
1670 clear_siginfo(&info);
1671 info.si_signo = SIGSEGV;
1673 info.si_code = SEGV_PKUERR;
1674 info.si_addr = addr;
1675 info.si_pkey = pkey;
1676 return force_sig_info(info.si_signo, &info, current);
1680 /* For the crazy architectures that include trap information in
1681 * the errno field, instead of an actual errno value.
1683 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1685 struct kernel_siginfo info;
1687 clear_siginfo(&info);
1688 info.si_signo = SIGTRAP;
1689 info.si_errno = errno;
1690 info.si_code = TRAP_HWBKPT;
1691 info.si_addr = addr;
1692 return force_sig_info(info.si_signo, &info, current);
1695 int kill_pgrp(struct pid *pid, int sig, int priv)
1699 read_lock(&tasklist_lock);
1700 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1701 read_unlock(&tasklist_lock);
1705 EXPORT_SYMBOL(kill_pgrp);
1707 int kill_pid(struct pid *pid, int sig, int priv)
1709 return kill_pid_info(sig, __si_special(priv), pid);
1711 EXPORT_SYMBOL(kill_pid);
1714 * These functions support sending signals using preallocated sigqueue
1715 * structures. This is needed "because realtime applications cannot
1716 * afford to lose notifications of asynchronous events, like timer
1717 * expirations or I/O completions". In the case of POSIX Timers
1718 * we allocate the sigqueue structure from the timer_create. If this
1719 * allocation fails we are able to report the failure to the application
1720 * with an EAGAIN error.
1722 struct sigqueue *sigqueue_alloc(void)
1724 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1727 q->flags |= SIGQUEUE_PREALLOC;
1732 void sigqueue_free(struct sigqueue *q)
1734 unsigned long flags;
1735 spinlock_t *lock = ¤t->sighand->siglock;
1737 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1739 * We must hold ->siglock while testing q->list
1740 * to serialize with collect_signal() or with
1741 * __exit_signal()->flush_sigqueue().
1743 spin_lock_irqsave(lock, flags);
1744 q->flags &= ~SIGQUEUE_PREALLOC;
1746 * If it is queued it will be freed when dequeued,
1747 * like the "regular" sigqueue.
1749 if (!list_empty(&q->list))
1751 spin_unlock_irqrestore(lock, flags);
1757 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1759 int sig = q->info.si_signo;
1760 struct sigpending *pending;
1761 struct task_struct *t;
1762 unsigned long flags;
1765 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1769 t = pid_task(pid, type);
1770 if (!t || !likely(lock_task_sighand(t, &flags)))
1773 ret = 1; /* the signal is ignored */
1774 result = TRACE_SIGNAL_IGNORED;
1775 if (!prepare_signal(sig, t, false))
1779 if (unlikely(!list_empty(&q->list))) {
1781 * If an SI_TIMER entry is already queue just increment
1782 * the overrun count.
1784 BUG_ON(q->info.si_code != SI_TIMER);
1785 q->info.si_overrun++;
1786 result = TRACE_SIGNAL_ALREADY_PENDING;
1789 q->info.si_overrun = 0;
1791 signalfd_notify(t, sig);
1792 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1793 list_add_tail(&q->list, &pending->list);
1794 sigaddset(&pending->signal, sig);
1795 complete_signal(sig, t, type);
1796 result = TRACE_SIGNAL_DELIVERED;
1798 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1799 unlock_task_sighand(t, &flags);
1806 * Let a parent know about the death of a child.
1807 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1809 * Returns true if our parent ignored us and so we've switched to
1812 bool do_notify_parent(struct task_struct *tsk, int sig)
1814 struct kernel_siginfo info;
1815 unsigned long flags;
1816 struct sighand_struct *psig;
1817 bool autoreap = false;
1822 /* do_notify_parent_cldstop should have been called instead. */
1823 BUG_ON(task_is_stopped_or_traced(tsk));
1825 BUG_ON(!tsk->ptrace &&
1826 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1828 if (sig != SIGCHLD) {
1830 * This is only possible if parent == real_parent.
1831 * Check if it has changed security domain.
1833 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1837 clear_siginfo(&info);
1838 info.si_signo = sig;
1841 * We are under tasklist_lock here so our parent is tied to
1842 * us and cannot change.
1844 * task_active_pid_ns will always return the same pid namespace
1845 * until a task passes through release_task.
1847 * write_lock() currently calls preempt_disable() which is the
1848 * same as rcu_read_lock(), but according to Oleg, this is not
1849 * correct to rely on this
1852 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1853 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1857 task_cputime(tsk, &utime, &stime);
1858 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1859 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1861 info.si_status = tsk->exit_code & 0x7f;
1862 if (tsk->exit_code & 0x80)
1863 info.si_code = CLD_DUMPED;
1864 else if (tsk->exit_code & 0x7f)
1865 info.si_code = CLD_KILLED;
1867 info.si_code = CLD_EXITED;
1868 info.si_status = tsk->exit_code >> 8;
1871 psig = tsk->parent->sighand;
1872 spin_lock_irqsave(&psig->siglock, flags);
1873 if (!tsk->ptrace && sig == SIGCHLD &&
1874 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1875 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1877 * We are exiting and our parent doesn't care. POSIX.1
1878 * defines special semantics for setting SIGCHLD to SIG_IGN
1879 * or setting the SA_NOCLDWAIT flag: we should be reaped
1880 * automatically and not left for our parent's wait4 call.
1881 * Rather than having the parent do it as a magic kind of
1882 * signal handler, we just set this to tell do_exit that we
1883 * can be cleaned up without becoming a zombie. Note that
1884 * we still call __wake_up_parent in this case, because a
1885 * blocked sys_wait4 might now return -ECHILD.
1887 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1888 * is implementation-defined: we do (if you don't want
1889 * it, just use SIG_IGN instead).
1892 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1895 if (valid_signal(sig) && sig)
1896 __group_send_sig_info(sig, &info, tsk->parent);
1897 __wake_up_parent(tsk, tsk->parent);
1898 spin_unlock_irqrestore(&psig->siglock, flags);
1904 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1905 * @tsk: task reporting the state change
1906 * @for_ptracer: the notification is for ptracer
1907 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1909 * Notify @tsk's parent that the stopped/continued state has changed. If
1910 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1911 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1914 * Must be called with tasklist_lock at least read locked.
1916 static void do_notify_parent_cldstop(struct task_struct *tsk,
1917 bool for_ptracer, int why)
1919 struct kernel_siginfo info;
1920 unsigned long flags;
1921 struct task_struct *parent;
1922 struct sighand_struct *sighand;
1926 parent = tsk->parent;
1928 tsk = tsk->group_leader;
1929 parent = tsk->real_parent;
1932 clear_siginfo(&info);
1933 info.si_signo = SIGCHLD;
1936 * see comment in do_notify_parent() about the following 4 lines
1939 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
1940 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
1943 task_cputime(tsk, &utime, &stime);
1944 info.si_utime = nsec_to_clock_t(utime);
1945 info.si_stime = nsec_to_clock_t(stime);
1950 info.si_status = SIGCONT;
1953 info.si_status = tsk->signal->group_exit_code & 0x7f;
1956 info.si_status = tsk->exit_code & 0x7f;
1962 sighand = parent->sighand;
1963 spin_lock_irqsave(&sighand->siglock, flags);
1964 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1965 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1966 __group_send_sig_info(SIGCHLD, &info, parent);
1968 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1970 __wake_up_parent(tsk, parent);
1971 spin_unlock_irqrestore(&sighand->siglock, flags);
1974 static inline bool may_ptrace_stop(void)
1976 if (!likely(current->ptrace))
1979 * Are we in the middle of do_coredump?
1980 * If so and our tracer is also part of the coredump stopping
1981 * is a deadlock situation, and pointless because our tracer
1982 * is dead so don't allow us to stop.
1983 * If SIGKILL was already sent before the caller unlocked
1984 * ->siglock we must see ->core_state != NULL. Otherwise it
1985 * is safe to enter schedule().
1987 * This is almost outdated, a task with the pending SIGKILL can't
1988 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
1989 * after SIGKILL was already dequeued.
1991 if (unlikely(current->mm->core_state) &&
1992 unlikely(current->mm == current->parent->mm))
1999 * Return non-zero if there is a SIGKILL that should be waking us up.
2000 * Called with the siglock held.
2002 static bool sigkill_pending(struct task_struct *tsk)
2004 return sigismember(&tsk->pending.signal, SIGKILL) ||
2005 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2009 * This must be called with current->sighand->siglock held.
2011 * This should be the path for all ptrace stops.
2012 * We always set current->last_siginfo while stopped here.
2013 * That makes it a way to test a stopped process for
2014 * being ptrace-stopped vs being job-control-stopped.
2016 * If we actually decide not to stop at all because the tracer
2017 * is gone, we keep current->exit_code unless clear_code.
2019 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2020 __releases(¤t->sighand->siglock)
2021 __acquires(¤t->sighand->siglock)
2023 bool gstop_done = false;
2025 if (arch_ptrace_stop_needed(exit_code, info)) {
2027 * The arch code has something special to do before a
2028 * ptrace stop. This is allowed to block, e.g. for faults
2029 * on user stack pages. We can't keep the siglock while
2030 * calling arch_ptrace_stop, so we must release it now.
2031 * To preserve proper semantics, we must do this before
2032 * any signal bookkeeping like checking group_stop_count.
2033 * Meanwhile, a SIGKILL could come in before we retake the
2034 * siglock. That must prevent us from sleeping in TASK_TRACED.
2035 * So after regaining the lock, we must check for SIGKILL.
2037 spin_unlock_irq(¤t->sighand->siglock);
2038 arch_ptrace_stop(exit_code, info);
2039 spin_lock_irq(¤t->sighand->siglock);
2040 if (sigkill_pending(current))
2044 set_special_state(TASK_TRACED);
2047 * We're committing to trapping. TRACED should be visible before
2048 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2049 * Also, transition to TRACED and updates to ->jobctl should be
2050 * atomic with respect to siglock and should be done after the arch
2051 * hook as siglock is released and regrabbed across it.
2056 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2058 * set_current_state() smp_wmb();
2060 * wait_task_stopped()
2061 * task_stopped_code()
2062 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2066 current->last_siginfo = info;
2067 current->exit_code = exit_code;
2070 * If @why is CLD_STOPPED, we're trapping to participate in a group
2071 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2072 * across siglock relocks since INTERRUPT was scheduled, PENDING
2073 * could be clear now. We act as if SIGCONT is received after
2074 * TASK_TRACED is entered - ignore it.
2076 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2077 gstop_done = task_participate_group_stop(current);
2079 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2080 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2081 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2082 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2084 /* entering a trap, clear TRAPPING */
2085 task_clear_jobctl_trapping(current);
2087 spin_unlock_irq(¤t->sighand->siglock);
2088 read_lock(&tasklist_lock);
2089 if (may_ptrace_stop()) {
2091 * Notify parents of the stop.
2093 * While ptraced, there are two parents - the ptracer and
2094 * the real_parent of the group_leader. The ptracer should
2095 * know about every stop while the real parent is only
2096 * interested in the completion of group stop. The states
2097 * for the two don't interact with each other. Notify
2098 * separately unless they're gonna be duplicates.
2100 do_notify_parent_cldstop(current, true, why);
2101 if (gstop_done && ptrace_reparented(current))
2102 do_notify_parent_cldstop(current, false, why);
2105 * Don't want to allow preemption here, because
2106 * sys_ptrace() needs this task to be inactive.
2108 * XXX: implement read_unlock_no_resched().
2111 read_unlock(&tasklist_lock);
2112 preempt_enable_no_resched();
2113 cgroup_enter_frozen();
2114 freezable_schedule();
2117 * By the time we got the lock, our tracer went away.
2118 * Don't drop the lock yet, another tracer may come.
2120 * If @gstop_done, the ptracer went away between group stop
2121 * completion and here. During detach, it would have set
2122 * JOBCTL_STOP_PENDING on us and we'll re-enter
2123 * TASK_STOPPED in do_signal_stop() on return, so notifying
2124 * the real parent of the group stop completion is enough.
2127 do_notify_parent_cldstop(current, false, why);
2129 /* tasklist protects us from ptrace_freeze_traced() */
2130 __set_current_state(TASK_RUNNING);
2132 current->exit_code = 0;
2133 read_unlock(&tasklist_lock);
2137 * We are back. Now reacquire the siglock before touching
2138 * last_siginfo, so that we are sure to have synchronized with
2139 * any signal-sending on another CPU that wants to examine it.
2141 spin_lock_irq(¤t->sighand->siglock);
2142 current->last_siginfo = NULL;
2144 /* LISTENING can be set only during STOP traps, clear it */
2145 current->jobctl &= ~JOBCTL_LISTENING;
2148 * Queued signals ignored us while we were stopped for tracing.
2149 * So check for any that we should take before resuming user mode.
2150 * This sets TIF_SIGPENDING, but never clears it.
2152 recalc_sigpending_tsk(current);
2155 static void ptrace_do_notify(int signr, int exit_code, int why)
2157 kernel_siginfo_t info;
2159 clear_siginfo(&info);
2160 info.si_signo = signr;
2161 info.si_code = exit_code;
2162 info.si_pid = task_pid_vnr(current);
2163 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2165 /* Let the debugger run. */
2166 ptrace_stop(exit_code, why, 1, &info);
2169 void ptrace_notify(int exit_code)
2171 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2172 if (unlikely(current->task_works))
2175 spin_lock_irq(¤t->sighand->siglock);
2176 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2177 spin_unlock_irq(¤t->sighand->siglock);
2181 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2182 * @signr: signr causing group stop if initiating
2184 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2185 * and participate in it. If already set, participate in the existing
2186 * group stop. If participated in a group stop (and thus slept), %true is
2187 * returned with siglock released.
2189 * If ptraced, this function doesn't handle stop itself. Instead,
2190 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2191 * untouched. The caller must ensure that INTERRUPT trap handling takes
2192 * places afterwards.
2195 * Must be called with @current->sighand->siglock held, which is released
2199 * %false if group stop is already cancelled or ptrace trap is scheduled.
2200 * %true if participated in group stop.
2202 static bool do_signal_stop(int signr)
2203 __releases(¤t->sighand->siglock)
2205 struct signal_struct *sig = current->signal;
2207 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2208 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2209 struct task_struct *t;
2211 /* signr will be recorded in task->jobctl for retries */
2212 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2214 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2215 unlikely(signal_group_exit(sig)))
2218 * There is no group stop already in progress. We must
2221 * While ptraced, a task may be resumed while group stop is
2222 * still in effect and then receive a stop signal and
2223 * initiate another group stop. This deviates from the
2224 * usual behavior as two consecutive stop signals can't
2225 * cause two group stops when !ptraced. That is why we
2226 * also check !task_is_stopped(t) below.
2228 * The condition can be distinguished by testing whether
2229 * SIGNAL_STOP_STOPPED is already set. Don't generate
2230 * group_exit_code in such case.
2232 * This is not necessary for SIGNAL_STOP_CONTINUED because
2233 * an intervening stop signal is required to cause two
2234 * continued events regardless of ptrace.
2236 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2237 sig->group_exit_code = signr;
2239 sig->group_stop_count = 0;
2241 if (task_set_jobctl_pending(current, signr | gstop))
2242 sig->group_stop_count++;
2245 while_each_thread(current, t) {
2247 * Setting state to TASK_STOPPED for a group
2248 * stop is always done with the siglock held,
2249 * so this check has no races.
2251 if (!task_is_stopped(t) &&
2252 task_set_jobctl_pending(t, signr | gstop)) {
2253 sig->group_stop_count++;
2254 if (likely(!(t->ptrace & PT_SEIZED)))
2255 signal_wake_up(t, 0);
2257 ptrace_trap_notify(t);
2262 if (likely(!current->ptrace)) {
2266 * If there are no other threads in the group, or if there
2267 * is a group stop in progress and we are the last to stop,
2268 * report to the parent.
2270 if (task_participate_group_stop(current))
2271 notify = CLD_STOPPED;
2273 set_special_state(TASK_STOPPED);
2274 spin_unlock_irq(¤t->sighand->siglock);
2277 * Notify the parent of the group stop completion. Because
2278 * we're not holding either the siglock or tasklist_lock
2279 * here, ptracer may attach inbetween; however, this is for
2280 * group stop and should always be delivered to the real
2281 * parent of the group leader. The new ptracer will get
2282 * its notification when this task transitions into
2286 read_lock(&tasklist_lock);
2287 do_notify_parent_cldstop(current, false, notify);
2288 read_unlock(&tasklist_lock);
2291 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2292 cgroup_enter_frozen();
2293 freezable_schedule();
2297 * While ptraced, group stop is handled by STOP trap.
2298 * Schedule it and let the caller deal with it.
2300 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2306 * do_jobctl_trap - take care of ptrace jobctl traps
2308 * When PT_SEIZED, it's used for both group stop and explicit
2309 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2310 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2311 * the stop signal; otherwise, %SIGTRAP.
2313 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2314 * number as exit_code and no siginfo.
2317 * Must be called with @current->sighand->siglock held, which may be
2318 * released and re-acquired before returning with intervening sleep.
2320 static void do_jobctl_trap(void)
2322 struct signal_struct *signal = current->signal;
2323 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2325 if (current->ptrace & PT_SEIZED) {
2326 if (!signal->group_stop_count &&
2327 !(signal->flags & SIGNAL_STOP_STOPPED))
2329 WARN_ON_ONCE(!signr);
2330 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2333 WARN_ON_ONCE(!signr);
2334 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2335 current->exit_code = 0;
2340 * do_freezer_trap - handle the freezer jobctl trap
2342 * Puts the task into frozen state, if only the task is not about to quit.
2343 * In this case it drops JOBCTL_TRAP_FREEZE.
2346 * Must be called with @current->sighand->siglock held,
2347 * which is always released before returning.
2349 static void do_freezer_trap(void)
2350 __releases(¤t->sighand->siglock)
2353 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2354 * let's make another loop to give it a chance to be handled.
2355 * In any case, we'll return back.
2357 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2358 JOBCTL_TRAP_FREEZE) {
2359 spin_unlock_irq(¤t->sighand->siglock);
2364 * Now we're sure that there is no pending fatal signal and no
2365 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2366 * immediately (if there is a non-fatal signal pending), and
2367 * put the task into sleep.
2369 __set_current_state(TASK_INTERRUPTIBLE);
2370 clear_thread_flag(TIF_SIGPENDING);
2371 spin_unlock_irq(¤t->sighand->siglock);
2372 cgroup_enter_frozen();
2373 freezable_schedule();
2376 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2379 * We do not check sig_kernel_stop(signr) but set this marker
2380 * unconditionally because we do not know whether debugger will
2381 * change signr. This flag has no meaning unless we are going
2382 * to stop after return from ptrace_stop(). In this case it will
2383 * be checked in do_signal_stop(), we should only stop if it was
2384 * not cleared by SIGCONT while we were sleeping. See also the
2385 * comment in dequeue_signal().
2387 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2388 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2390 /* We're back. Did the debugger cancel the sig? */
2391 signr = current->exit_code;
2395 current->exit_code = 0;
2398 * Update the siginfo structure if the signal has
2399 * changed. If the debugger wanted something
2400 * specific in the siginfo structure then it should
2401 * have updated *info via PTRACE_SETSIGINFO.
2403 if (signr != info->si_signo) {
2404 clear_siginfo(info);
2405 info->si_signo = signr;
2407 info->si_code = SI_USER;
2409 info->si_pid = task_pid_vnr(current->parent);
2410 info->si_uid = from_kuid_munged(current_user_ns(),
2411 task_uid(current->parent));
2415 /* If the (new) signal is now blocked, requeue it. */
2416 if (sigismember(¤t->blocked, signr)) {
2417 send_signal(signr, info, current, PIDTYPE_PID);
2424 bool get_signal(struct ksignal *ksig)
2426 struct sighand_struct *sighand = current->sighand;
2427 struct signal_struct *signal = current->signal;
2430 if (unlikely(current->task_works))
2433 if (unlikely(uprobe_deny_signal()))
2437 * Do this once, we can't return to user-mode if freezing() == T.
2438 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2439 * thus do not need another check after return.
2444 spin_lock_irq(&sighand->siglock);
2446 * Every stopped thread goes here after wakeup. Check to see if
2447 * we should notify the parent, prepare_signal(SIGCONT) encodes
2448 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2450 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2453 if (signal->flags & SIGNAL_CLD_CONTINUED)
2454 why = CLD_CONTINUED;
2458 signal->flags &= ~SIGNAL_CLD_MASK;
2460 spin_unlock_irq(&sighand->siglock);
2463 * Notify the parent that we're continuing. This event is
2464 * always per-process and doesn't make whole lot of sense
2465 * for ptracers, who shouldn't consume the state via
2466 * wait(2) either, but, for backward compatibility, notify
2467 * the ptracer of the group leader too unless it's gonna be
2470 read_lock(&tasklist_lock);
2471 do_notify_parent_cldstop(current, false, why);
2473 if (ptrace_reparented(current->group_leader))
2474 do_notify_parent_cldstop(current->group_leader,
2476 read_unlock(&tasklist_lock);
2481 /* Has this task already been marked for death? */
2482 if (signal_group_exit(signal)) {
2483 ksig->info.si_signo = signr = SIGKILL;
2484 sigdelset(¤t->pending.signal, SIGKILL);
2485 recalc_sigpending();
2486 current->jobctl &= ~JOBCTL_TRAP_FREEZE;
2487 spin_unlock_irq(&sighand->siglock);
2488 if (unlikely(cgroup_task_frozen(current)))
2489 cgroup_leave_frozen(true);
2494 struct k_sigaction *ka;
2496 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2500 if (unlikely(current->jobctl &
2501 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2502 if (current->jobctl & JOBCTL_TRAP_MASK) {
2504 spin_unlock_irq(&sighand->siglock);
2505 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2512 * If the task is leaving the frozen state, let's update
2513 * cgroup counters and reset the frozen bit.
2515 if (unlikely(cgroup_task_frozen(current))) {
2516 spin_unlock_irq(&sighand->siglock);
2517 cgroup_leave_frozen(false);
2522 * Signals generated by the execution of an instruction
2523 * need to be delivered before any other pending signals
2524 * so that the instruction pointer in the signal stack
2525 * frame points to the faulting instruction.
2527 signr = dequeue_synchronous_signal(&ksig->info);
2529 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2532 break; /* will return 0 */
2534 if (unlikely(current->ptrace) && signr != SIGKILL) {
2535 signr = ptrace_signal(signr, &ksig->info);
2540 ka = &sighand->action[signr-1];
2542 /* Trace actually delivered signals. */
2543 trace_signal_deliver(signr, &ksig->info, ka);
2545 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2547 if (ka->sa.sa_handler != SIG_DFL) {
2548 /* Run the handler. */
2551 if (ka->sa.sa_flags & SA_ONESHOT)
2552 ka->sa.sa_handler = SIG_DFL;
2554 break; /* will return non-zero "signr" value */
2558 * Now we are doing the default action for this signal.
2560 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2564 * Global init gets no signals it doesn't want.
2565 * Container-init gets no signals it doesn't want from same
2568 * Note that if global/container-init sees a sig_kernel_only()
2569 * signal here, the signal must have been generated internally
2570 * or must have come from an ancestor namespace. In either
2571 * case, the signal cannot be dropped.
2573 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2574 !sig_kernel_only(signr))
2577 if (sig_kernel_stop(signr)) {
2579 * The default action is to stop all threads in
2580 * the thread group. The job control signals
2581 * do nothing in an orphaned pgrp, but SIGSTOP
2582 * always works. Note that siglock needs to be
2583 * dropped during the call to is_orphaned_pgrp()
2584 * because of lock ordering with tasklist_lock.
2585 * This allows an intervening SIGCONT to be posted.
2586 * We need to check for that and bail out if necessary.
2588 if (signr != SIGSTOP) {
2589 spin_unlock_irq(&sighand->siglock);
2591 /* signals can be posted during this window */
2593 if (is_current_pgrp_orphaned())
2596 spin_lock_irq(&sighand->siglock);
2599 if (likely(do_signal_stop(ksig->info.si_signo))) {
2600 /* It released the siglock. */
2605 * We didn't actually stop, due to a race
2606 * with SIGCONT or something like that.
2611 spin_unlock_irq(&sighand->siglock);
2615 * Anything else is fatal, maybe with a core dump.
2617 current->flags |= PF_SIGNALED;
2619 if (sig_kernel_coredump(signr)) {
2620 if (print_fatal_signals)
2621 print_fatal_signal(ksig->info.si_signo);
2622 proc_coredump_connector(current);
2624 * If it was able to dump core, this kills all
2625 * other threads in the group and synchronizes with
2626 * their demise. If we lost the race with another
2627 * thread getting here, it set group_exit_code
2628 * first and our do_group_exit call below will use
2629 * that value and ignore the one we pass it.
2631 do_coredump(&ksig->info);
2635 * Death signals, no core dump.
2637 do_group_exit(ksig->info.si_signo);
2640 spin_unlock_irq(&sighand->siglock);
2643 return ksig->sig > 0;
2647 * signal_delivered -
2648 * @ksig: kernel signal struct
2649 * @stepping: nonzero if debugger single-step or block-step in use
2651 * This function should be called when a signal has successfully been
2652 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2653 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2654 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2656 static void signal_delivered(struct ksignal *ksig, int stepping)
2660 /* A signal was successfully delivered, and the
2661 saved sigmask was stored on the signal frame,
2662 and will be restored by sigreturn. So we can
2663 simply clear the restore sigmask flag. */
2664 clear_restore_sigmask();
2666 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2667 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2668 sigaddset(&blocked, ksig->sig);
2669 set_current_blocked(&blocked);
2670 tracehook_signal_handler(stepping);
2673 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2676 force_sigsegv(ksig->sig, current);
2678 signal_delivered(ksig, stepping);
2682 * It could be that complete_signal() picked us to notify about the
2683 * group-wide signal. Other threads should be notified now to take
2684 * the shared signals in @which since we will not.
2686 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2689 struct task_struct *t;
2691 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2692 if (sigisemptyset(&retarget))
2696 while_each_thread(tsk, t) {
2697 if (t->flags & PF_EXITING)
2700 if (!has_pending_signals(&retarget, &t->blocked))
2702 /* Remove the signals this thread can handle. */
2703 sigandsets(&retarget, &retarget, &t->blocked);
2705 if (!signal_pending(t))
2706 signal_wake_up(t, 0);
2708 if (sigisemptyset(&retarget))
2713 void exit_signals(struct task_struct *tsk)
2719 * @tsk is about to have PF_EXITING set - lock out users which
2720 * expect stable threadgroup.
2722 cgroup_threadgroup_change_begin(tsk);
2724 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2725 tsk->flags |= PF_EXITING;
2726 cgroup_threadgroup_change_end(tsk);
2730 spin_lock_irq(&tsk->sighand->siglock);
2732 * From now this task is not visible for group-wide signals,
2733 * see wants_signal(), do_signal_stop().
2735 tsk->flags |= PF_EXITING;
2737 cgroup_threadgroup_change_end(tsk);
2739 if (!signal_pending(tsk))
2742 unblocked = tsk->blocked;
2743 signotset(&unblocked);
2744 retarget_shared_pending(tsk, &unblocked);
2746 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2747 task_participate_group_stop(tsk))
2748 group_stop = CLD_STOPPED;
2750 spin_unlock_irq(&tsk->sighand->siglock);
2753 * If group stop has completed, deliver the notification. This
2754 * should always go to the real parent of the group leader.
2756 if (unlikely(group_stop)) {
2757 read_lock(&tasklist_lock);
2758 do_notify_parent_cldstop(tsk, false, group_stop);
2759 read_unlock(&tasklist_lock);
2764 * System call entry points.
2768 * sys_restart_syscall - restart a system call
2770 SYSCALL_DEFINE0(restart_syscall)
2772 struct restart_block *restart = ¤t->restart_block;
2773 return restart->fn(restart);
2776 long do_no_restart_syscall(struct restart_block *param)
2781 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2783 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2784 sigset_t newblocked;
2785 /* A set of now blocked but previously unblocked signals. */
2786 sigandnsets(&newblocked, newset, ¤t->blocked);
2787 retarget_shared_pending(tsk, &newblocked);
2789 tsk->blocked = *newset;
2790 recalc_sigpending();
2794 * set_current_blocked - change current->blocked mask
2797 * It is wrong to change ->blocked directly, this helper should be used
2798 * to ensure the process can't miss a shared signal we are going to block.
2800 void set_current_blocked(sigset_t *newset)
2802 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2803 __set_current_blocked(newset);
2806 void __set_current_blocked(const sigset_t *newset)
2808 struct task_struct *tsk = current;
2811 * In case the signal mask hasn't changed, there is nothing we need
2812 * to do. The current->blocked shouldn't be modified by other task.
2814 if (sigequalsets(&tsk->blocked, newset))
2817 spin_lock_irq(&tsk->sighand->siglock);
2818 __set_task_blocked(tsk, newset);
2819 spin_unlock_irq(&tsk->sighand->siglock);
2823 * This is also useful for kernel threads that want to temporarily
2824 * (or permanently) block certain signals.
2826 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2827 * interface happily blocks "unblockable" signals like SIGKILL
2830 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2832 struct task_struct *tsk = current;
2835 /* Lockless, only current can change ->blocked, never from irq */
2837 *oldset = tsk->blocked;
2841 sigorsets(&newset, &tsk->blocked, set);
2844 sigandnsets(&newset, &tsk->blocked, set);
2853 __set_current_blocked(&newset);
2856 EXPORT_SYMBOL(sigprocmask);
2859 * The api helps set app-provided sigmasks.
2861 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2862 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2864 int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2865 sigset_t *oldset, size_t sigsetsize)
2870 if (sigsetsize != sizeof(sigset_t))
2872 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2875 *oldset = current->blocked;
2876 set_current_blocked(set);
2880 EXPORT_SYMBOL(set_user_sigmask);
2882 #ifdef CONFIG_COMPAT
2883 int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2884 sigset_t *set, sigset_t *oldset,
2890 if (sigsetsize != sizeof(compat_sigset_t))
2892 if (get_compat_sigset(set, usigmask))
2895 *oldset = current->blocked;
2896 set_current_blocked(set);
2900 EXPORT_SYMBOL(set_compat_user_sigmask);
2904 * restore_user_sigmask:
2905 * usigmask: sigmask passed in from userland.
2906 * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2909 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2910 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2912 void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2918 * When signals are pending, do not restore them here.
2919 * Restoring sigmask here can lead to delivering signals that the above
2920 * syscalls are intended to block because of the sigmask passed in.
2922 if (signal_pending(current)) {
2923 current->saved_sigmask = *sigsaved;
2924 set_restore_sigmask();
2929 * This is needed because the fast syscall return path does not restore
2930 * saved_sigmask when signals are not pending.
2932 set_current_blocked(sigsaved);
2934 EXPORT_SYMBOL(restore_user_sigmask);
2937 * sys_rt_sigprocmask - change the list of currently blocked signals
2938 * @how: whether to add, remove, or set signals
2939 * @nset: stores pending signals
2940 * @oset: previous value of signal mask if non-null
2941 * @sigsetsize: size of sigset_t type
2943 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
2944 sigset_t __user *, oset, size_t, sigsetsize)
2946 sigset_t old_set, new_set;
2949 /* XXX: Don't preclude handling different sized sigset_t's. */
2950 if (sigsetsize != sizeof(sigset_t))
2953 old_set = current->blocked;
2956 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
2958 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2960 error = sigprocmask(how, &new_set, NULL);
2966 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
2973 #ifdef CONFIG_COMPAT
2974 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
2975 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
2977 sigset_t old_set = current->blocked;
2979 /* XXX: Don't preclude handling different sized sigset_t's. */
2980 if (sigsetsize != sizeof(sigset_t))
2986 if (get_compat_sigset(&new_set, nset))
2988 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2990 error = sigprocmask(how, &new_set, NULL);
2994 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
2998 static void do_sigpending(sigset_t *set)
3000 spin_lock_irq(¤t->sighand->siglock);
3001 sigorsets(set, ¤t->pending.signal,
3002 ¤t->signal->shared_pending.signal);
3003 spin_unlock_irq(¤t->sighand->siglock);
3005 /* Outside the lock because only this thread touches it. */
3006 sigandsets(set, ¤t->blocked, set);
3010 * sys_rt_sigpending - examine a pending signal that has been raised
3012 * @uset: stores pending signals
3013 * @sigsetsize: size of sigset_t type or larger
3015 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3019 if (sigsetsize > sizeof(*uset))
3022 do_sigpending(&set);
3024 if (copy_to_user(uset, &set, sigsetsize))
3030 #ifdef CONFIG_COMPAT
3031 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3032 compat_size_t, sigsetsize)
3036 if (sigsetsize > sizeof(*uset))
3039 do_sigpending(&set);
3041 return put_compat_sigset(uset, &set, sigsetsize);
3045 static const struct {
3046 unsigned char limit, layout;
3048 [SIGILL] = { NSIGILL, SIL_FAULT },
3049 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3050 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3051 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3052 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3054 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3056 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3057 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3058 [SIGSYS] = { NSIGSYS, SIL_SYS },
3061 static bool known_siginfo_layout(unsigned sig, int si_code)
3063 if (si_code == SI_KERNEL)
3065 else if ((si_code > SI_USER)) {
3066 if (sig_specific_sicodes(sig)) {
3067 if (si_code <= sig_sicodes[sig].limit)
3070 else if (si_code <= NSIGPOLL)
3073 else if (si_code >= SI_DETHREAD)
3075 else if (si_code == SI_ASYNCNL)
3080 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3082 enum siginfo_layout layout = SIL_KILL;
3083 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3084 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3085 (si_code <= sig_sicodes[sig].limit)) {
3086 layout = sig_sicodes[sig].layout;
3087 /* Handle the exceptions */
3088 if ((sig == SIGBUS) &&
3089 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3090 layout = SIL_FAULT_MCEERR;
3091 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3092 layout = SIL_FAULT_BNDERR;
3094 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3095 layout = SIL_FAULT_PKUERR;
3098 else if (si_code <= NSIGPOLL)
3101 if (si_code == SI_TIMER)
3103 else if (si_code == SI_SIGIO)
3105 else if (si_code < 0)
3111 static inline char __user *si_expansion(const siginfo_t __user *info)
3113 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3116 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3118 char __user *expansion = si_expansion(to);
3119 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3121 if (clear_user(expansion, SI_EXPANSION_SIZE))
3126 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3127 const siginfo_t __user *from)
3129 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3130 char __user *expansion = si_expansion(from);
3131 char buf[SI_EXPANSION_SIZE];
3134 * An unknown si_code might need more than
3135 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3136 * extra bytes are 0. This guarantees copy_siginfo_to_user
3137 * will return this data to userspace exactly.
3139 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3141 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3149 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3150 const siginfo_t __user *from)
3152 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3154 to->si_signo = signo;
3155 return post_copy_siginfo_from_user(to, from);
3158 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3160 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3162 return post_copy_siginfo_from_user(to, from);
3165 #ifdef CONFIG_COMPAT
3166 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3167 const struct kernel_siginfo *from)
3168 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3170 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3172 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3173 const struct kernel_siginfo *from, bool x32_ABI)
3176 struct compat_siginfo new;
3177 memset(&new, 0, sizeof(new));
3179 new.si_signo = from->si_signo;
3180 new.si_errno = from->si_errno;
3181 new.si_code = from->si_code;
3182 switch(siginfo_layout(from->si_signo, from->si_code)) {
3184 new.si_pid = from->si_pid;
3185 new.si_uid = from->si_uid;
3188 new.si_tid = from->si_tid;
3189 new.si_overrun = from->si_overrun;
3190 new.si_int = from->si_int;
3193 new.si_band = from->si_band;
3194 new.si_fd = from->si_fd;
3197 new.si_addr = ptr_to_compat(from->si_addr);
3198 #ifdef __ARCH_SI_TRAPNO
3199 new.si_trapno = from->si_trapno;
3202 case SIL_FAULT_MCEERR:
3203 new.si_addr = ptr_to_compat(from->si_addr);
3204 #ifdef __ARCH_SI_TRAPNO
3205 new.si_trapno = from->si_trapno;
3207 new.si_addr_lsb = from->si_addr_lsb;
3209 case SIL_FAULT_BNDERR:
3210 new.si_addr = ptr_to_compat(from->si_addr);
3211 #ifdef __ARCH_SI_TRAPNO
3212 new.si_trapno = from->si_trapno;
3214 new.si_lower = ptr_to_compat(from->si_lower);
3215 new.si_upper = ptr_to_compat(from->si_upper);
3217 case SIL_FAULT_PKUERR:
3218 new.si_addr = ptr_to_compat(from->si_addr);
3219 #ifdef __ARCH_SI_TRAPNO
3220 new.si_trapno = from->si_trapno;
3222 new.si_pkey = from->si_pkey;
3225 new.si_pid = from->si_pid;
3226 new.si_uid = from->si_uid;
3227 new.si_status = from->si_status;
3228 #ifdef CONFIG_X86_X32_ABI
3230 new._sifields._sigchld_x32._utime = from->si_utime;
3231 new._sifields._sigchld_x32._stime = from->si_stime;
3235 new.si_utime = from->si_utime;
3236 new.si_stime = from->si_stime;
3240 new.si_pid = from->si_pid;
3241 new.si_uid = from->si_uid;
3242 new.si_int = from->si_int;
3245 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3246 new.si_syscall = from->si_syscall;
3247 new.si_arch = from->si_arch;
3251 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3257 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3258 const struct compat_siginfo *from)
3261 to->si_signo = from->si_signo;
3262 to->si_errno = from->si_errno;
3263 to->si_code = from->si_code;
3264 switch(siginfo_layout(from->si_signo, from->si_code)) {
3266 to->si_pid = from->si_pid;
3267 to->si_uid = from->si_uid;
3270 to->si_tid = from->si_tid;
3271 to->si_overrun = from->si_overrun;
3272 to->si_int = from->si_int;
3275 to->si_band = from->si_band;
3276 to->si_fd = from->si_fd;
3279 to->si_addr = compat_ptr(from->si_addr);
3280 #ifdef __ARCH_SI_TRAPNO
3281 to->si_trapno = from->si_trapno;
3284 case SIL_FAULT_MCEERR:
3285 to->si_addr = compat_ptr(from->si_addr);
3286 #ifdef __ARCH_SI_TRAPNO
3287 to->si_trapno = from->si_trapno;
3289 to->si_addr_lsb = from->si_addr_lsb;
3291 case SIL_FAULT_BNDERR:
3292 to->si_addr = compat_ptr(from->si_addr);
3293 #ifdef __ARCH_SI_TRAPNO
3294 to->si_trapno = from->si_trapno;
3296 to->si_lower = compat_ptr(from->si_lower);
3297 to->si_upper = compat_ptr(from->si_upper);
3299 case SIL_FAULT_PKUERR:
3300 to->si_addr = compat_ptr(from->si_addr);
3301 #ifdef __ARCH_SI_TRAPNO
3302 to->si_trapno = from->si_trapno;
3304 to->si_pkey = from->si_pkey;
3307 to->si_pid = from->si_pid;
3308 to->si_uid = from->si_uid;
3309 to->si_status = from->si_status;
3310 #ifdef CONFIG_X86_X32_ABI
3311 if (in_x32_syscall()) {
3312 to->si_utime = from->_sifields._sigchld_x32._utime;
3313 to->si_stime = from->_sifields._sigchld_x32._stime;
3317 to->si_utime = from->si_utime;
3318 to->si_stime = from->si_stime;
3322 to->si_pid = from->si_pid;
3323 to->si_uid = from->si_uid;
3324 to->si_int = from->si_int;
3327 to->si_call_addr = compat_ptr(from->si_call_addr);
3328 to->si_syscall = from->si_syscall;
3329 to->si_arch = from->si_arch;
3335 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3336 const struct compat_siginfo __user *ufrom)
3338 struct compat_siginfo from;
3340 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3343 from.si_signo = signo;
3344 return post_copy_siginfo_from_user32(to, &from);
3347 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3348 const struct compat_siginfo __user *ufrom)
3350 struct compat_siginfo from;
3352 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3355 return post_copy_siginfo_from_user32(to, &from);
3357 #endif /* CONFIG_COMPAT */
3360 * do_sigtimedwait - wait for queued signals specified in @which
3361 * @which: queued signals to wait for
3362 * @info: if non-null, the signal's siginfo is returned here
3363 * @ts: upper bound on process time suspension
3365 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3366 const struct timespec64 *ts)
3368 ktime_t *to = NULL, timeout = KTIME_MAX;
3369 struct task_struct *tsk = current;
3370 sigset_t mask = *which;
3374 if (!timespec64_valid(ts))
3376 timeout = timespec64_to_ktime(*ts);
3381 * Invert the set of allowed signals to get those we want to block.
3383 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3386 spin_lock_irq(&tsk->sighand->siglock);
3387 sig = dequeue_signal(tsk, &mask, info);
3388 if (!sig && timeout) {
3390 * None ready, temporarily unblock those we're interested
3391 * while we are sleeping in so that we'll be awakened when
3392 * they arrive. Unblocking is always fine, we can avoid
3393 * set_current_blocked().
3395 tsk->real_blocked = tsk->blocked;
3396 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3397 recalc_sigpending();
3398 spin_unlock_irq(&tsk->sighand->siglock);
3400 __set_current_state(TASK_INTERRUPTIBLE);
3401 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3403 spin_lock_irq(&tsk->sighand->siglock);
3404 __set_task_blocked(tsk, &tsk->real_blocked);
3405 sigemptyset(&tsk->real_blocked);
3406 sig = dequeue_signal(tsk, &mask, info);
3408 spin_unlock_irq(&tsk->sighand->siglock);
3412 return ret ? -EINTR : -EAGAIN;
3416 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3418 * @uthese: queued signals to wait for
3419 * @uinfo: if non-null, the signal's siginfo is returned here
3420 * @uts: upper bound on process time suspension
3421 * @sigsetsize: size of sigset_t type
3423 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3424 siginfo_t __user *, uinfo,
3425 const struct __kernel_timespec __user *, uts,
3429 struct timespec64 ts;
3430 kernel_siginfo_t info;
3433 /* XXX: Don't preclude handling different sized sigset_t's. */
3434 if (sigsetsize != sizeof(sigset_t))
3437 if (copy_from_user(&these, uthese, sizeof(these)))
3441 if (get_timespec64(&ts, uts))
3445 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3447 if (ret > 0 && uinfo) {
3448 if (copy_siginfo_to_user(uinfo, &info))
3455 #ifdef CONFIG_COMPAT_32BIT_TIME
3456 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3457 siginfo_t __user *, uinfo,
3458 const struct old_timespec32 __user *, uts,
3462 struct timespec64 ts;
3463 kernel_siginfo_t info;
3466 if (sigsetsize != sizeof(sigset_t))
3469 if (copy_from_user(&these, uthese, sizeof(these)))
3473 if (get_old_timespec32(&ts, uts))
3477 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3479 if (ret > 0 && uinfo) {
3480 if (copy_siginfo_to_user(uinfo, &info))
3488 #ifdef CONFIG_COMPAT
3489 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3490 struct compat_siginfo __user *, uinfo,
3491 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3494 struct timespec64 t;
3495 kernel_siginfo_t info;
3498 if (sigsetsize != sizeof(sigset_t))
3501 if (get_compat_sigset(&s, uthese))
3505 if (get_timespec64(&t, uts))
3509 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3511 if (ret > 0 && uinfo) {
3512 if (copy_siginfo_to_user32(uinfo, &info))
3519 #ifdef CONFIG_COMPAT_32BIT_TIME
3520 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3521 struct compat_siginfo __user *, uinfo,
3522 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3525 struct timespec64 t;
3526 kernel_siginfo_t info;
3529 if (sigsetsize != sizeof(sigset_t))
3532 if (get_compat_sigset(&s, uthese))
3536 if (get_old_timespec32(&t, uts))
3540 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3542 if (ret > 0 && uinfo) {
3543 if (copy_siginfo_to_user32(uinfo, &info))
3552 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3554 clear_siginfo(info);
3555 info->si_signo = sig;
3557 info->si_code = SI_USER;
3558 info->si_pid = task_tgid_vnr(current);
3559 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3563 * sys_kill - send a signal to a process
3564 * @pid: the PID of the process
3565 * @sig: signal to be sent
3567 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3569 struct kernel_siginfo info;
3571 prepare_kill_siginfo(sig, &info);
3573 return kill_something_info(sig, &info, pid);
3576 #ifdef CONFIG_PROC_FS
3578 * Verify that the signaler and signalee either are in the same pid namespace
3579 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3582 static bool access_pidfd_pidns(struct pid *pid)
3584 struct pid_namespace *active = task_active_pid_ns(current);
3585 struct pid_namespace *p = ns_of_pid(pid);
3598 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3600 #ifdef CONFIG_COMPAT
3602 * Avoid hooking up compat syscalls and instead handle necessary
3603 * conversions here. Note, this is a stop-gap measure and should not be
3604 * considered a generic solution.
3606 if (in_compat_syscall())
3607 return copy_siginfo_from_user32(
3608 kinfo, (struct compat_siginfo __user *)info);
3610 return copy_siginfo_from_user(kinfo, info);
3614 * sys_pidfd_send_signal - send a signal to a process through a task file
3616 * @pidfd: the file descriptor of the process
3617 * @sig: signal to be sent
3618 * @info: the signal info
3619 * @flags: future flags to be passed
3621 * The syscall currently only signals via PIDTYPE_PID which covers
3622 * kill(<positive-pid>, <signal>. It does not signal threads or process
3624 * In order to extend the syscall to threads and process groups the @flags
3625 * argument should be used. In essence, the @flags argument will determine
3626 * what is signaled and not the file descriptor itself. Put in other words,
3627 * grouping is a property of the flags argument not a property of the file
3630 * Return: 0 on success, negative errno on failure
3632 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3633 siginfo_t __user *, info, unsigned int, flags)
3638 kernel_siginfo_t kinfo;
3640 /* Enforce flags be set to 0 until we add an extension. */
3644 f = fdget_raw(pidfd);
3648 /* Is this a pidfd? */
3649 pid = tgid_pidfd_to_pid(f.file);
3656 if (!access_pidfd_pidns(pid))
3660 ret = copy_siginfo_from_user_any(&kinfo, info);
3665 if (unlikely(sig != kinfo.si_signo))
3668 /* Only allow sending arbitrary signals to yourself. */
3670 if ((task_pid(current) != pid) &&
3671 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3674 prepare_kill_siginfo(sig, &kinfo);
3677 ret = kill_pid_info(sig, &kinfo, pid);
3683 #endif /* CONFIG_PROC_FS */
3686 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3688 struct task_struct *p;
3692 p = find_task_by_vpid(pid);
3693 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3694 error = check_kill_permission(sig, info, p);
3696 * The null signal is a permissions and process existence
3697 * probe. No signal is actually delivered.
3699 if (!error && sig) {
3700 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3702 * If lock_task_sighand() failed we pretend the task
3703 * dies after receiving the signal. The window is tiny,
3704 * and the signal is private anyway.
3706 if (unlikely(error == -ESRCH))
3715 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3717 struct kernel_siginfo info;
3719 clear_siginfo(&info);
3720 info.si_signo = sig;
3722 info.si_code = SI_TKILL;
3723 info.si_pid = task_tgid_vnr(current);
3724 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3726 return do_send_specific(tgid, pid, sig, &info);
3730 * sys_tgkill - send signal to one specific thread
3731 * @tgid: the thread group ID of the thread
3732 * @pid: the PID of the thread
3733 * @sig: signal to be sent
3735 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3736 * exists but it's not belonging to the target process anymore. This
3737 * method solves the problem of threads exiting and PIDs getting reused.
3739 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3741 /* This is only valid for single tasks */
3742 if (pid <= 0 || tgid <= 0)
3745 return do_tkill(tgid, pid, sig);
3749 * sys_tkill - send signal to one specific task
3750 * @pid: the PID of the task
3751 * @sig: signal to be sent
3753 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3755 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3757 /* This is only valid for single tasks */
3761 return do_tkill(0, pid, sig);
3764 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3766 /* Not even root can pretend to send signals from the kernel.
3767 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3769 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3770 (task_pid_vnr(current) != pid))
3773 /* POSIX.1b doesn't mention process groups. */
3774 return kill_proc_info(sig, info, pid);
3778 * sys_rt_sigqueueinfo - send signal information to a signal
3779 * @pid: the PID of the thread
3780 * @sig: signal to be sent
3781 * @uinfo: signal info to be sent
3783 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3784 siginfo_t __user *, uinfo)
3786 kernel_siginfo_t info;
3787 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3790 return do_rt_sigqueueinfo(pid, sig, &info);
3793 #ifdef CONFIG_COMPAT
3794 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3797 struct compat_siginfo __user *, uinfo)
3799 kernel_siginfo_t info;
3800 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3803 return do_rt_sigqueueinfo(pid, sig, &info);
3807 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3809 /* This is only valid for single tasks */
3810 if (pid <= 0 || tgid <= 0)
3813 /* Not even root can pretend to send signals from the kernel.
3814 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3816 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3817 (task_pid_vnr(current) != pid))
3820 return do_send_specific(tgid, pid, sig, info);
3823 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3824 siginfo_t __user *, uinfo)
3826 kernel_siginfo_t info;
3827 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3830 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3833 #ifdef CONFIG_COMPAT
3834 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3838 struct compat_siginfo __user *, uinfo)
3840 kernel_siginfo_t info;
3841 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3844 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3849 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3851 void kernel_sigaction(int sig, __sighandler_t action)
3853 spin_lock_irq(¤t->sighand->siglock);
3854 current->sighand->action[sig - 1].sa.sa_handler = action;
3855 if (action == SIG_IGN) {
3859 sigaddset(&mask, sig);
3861 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3862 flush_sigqueue_mask(&mask, ¤t->pending);
3863 recalc_sigpending();
3865 spin_unlock_irq(¤t->sighand->siglock);
3867 EXPORT_SYMBOL(kernel_sigaction);
3869 void __weak sigaction_compat_abi(struct k_sigaction *act,
3870 struct k_sigaction *oact)
3874 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3876 struct task_struct *p = current, *t;
3877 struct k_sigaction *k;
3880 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3883 k = &p->sighand->action[sig-1];
3885 spin_lock_irq(&p->sighand->siglock);
3889 sigaction_compat_abi(act, oact);
3892 sigdelsetmask(&act->sa.sa_mask,
3893 sigmask(SIGKILL) | sigmask(SIGSTOP));
3897 * "Setting a signal action to SIG_IGN for a signal that is
3898 * pending shall cause the pending signal to be discarded,
3899 * whether or not it is blocked."
3901 * "Setting a signal action to SIG_DFL for a signal that is
3902 * pending and whose default action is to ignore the signal
3903 * (for example, SIGCHLD), shall cause the pending signal to
3904 * be discarded, whether or not it is blocked"
3906 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3908 sigaddset(&mask, sig);
3909 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3910 for_each_thread(p, t)
3911 flush_sigqueue_mask(&mask, &t->pending);
3915 spin_unlock_irq(&p->sighand->siglock);
3920 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3923 struct task_struct *t = current;
3926 memset(oss, 0, sizeof(stack_t));
3927 oss->ss_sp = (void __user *) t->sas_ss_sp;
3928 oss->ss_size = t->sas_ss_size;
3929 oss->ss_flags = sas_ss_flags(sp) |
3930 (current->sas_ss_flags & SS_FLAG_BITS);
3934 void __user *ss_sp = ss->ss_sp;
3935 size_t ss_size = ss->ss_size;
3936 unsigned ss_flags = ss->ss_flags;
3939 if (unlikely(on_sig_stack(sp)))
3942 ss_mode = ss_flags & ~SS_FLAG_BITS;
3943 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3947 if (ss_mode == SS_DISABLE) {
3951 if (unlikely(ss_size < min_ss_size))
3955 t->sas_ss_sp = (unsigned long) ss_sp;
3956 t->sas_ss_size = ss_size;
3957 t->sas_ss_flags = ss_flags;
3962 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
3966 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
3968 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
3969 current_user_stack_pointer(),
3971 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
3976 int restore_altstack(const stack_t __user *uss)
3979 if (copy_from_user(&new, uss, sizeof(stack_t)))
3981 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
3983 /* squash all but EFAULT for now */
3987 int __save_altstack(stack_t __user *uss, unsigned long sp)
3989 struct task_struct *t = current;
3990 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
3991 __put_user(t->sas_ss_flags, &uss->ss_flags) |
3992 __put_user(t->sas_ss_size, &uss->ss_size);
3995 if (t->sas_ss_flags & SS_AUTODISARM)
4000 #ifdef CONFIG_COMPAT
4001 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4002 compat_stack_t __user *uoss_ptr)
4008 compat_stack_t uss32;
4009 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4011 uss.ss_sp = compat_ptr(uss32.ss_sp);
4012 uss.ss_flags = uss32.ss_flags;
4013 uss.ss_size = uss32.ss_size;
4015 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4016 compat_user_stack_pointer(),
4017 COMPAT_MINSIGSTKSZ);
4018 if (ret >= 0 && uoss_ptr) {
4020 memset(&old, 0, sizeof(old));
4021 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4022 old.ss_flags = uoss.ss_flags;
4023 old.ss_size = uoss.ss_size;
4024 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4030 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4031 const compat_stack_t __user *, uss_ptr,
4032 compat_stack_t __user *, uoss_ptr)
4034 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4037 int compat_restore_altstack(const compat_stack_t __user *uss)
4039 int err = do_compat_sigaltstack(uss, NULL);
4040 /* squash all but -EFAULT for now */
4041 return err == -EFAULT ? err : 0;
4044 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4047 struct task_struct *t = current;
4048 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4050 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4051 __put_user(t->sas_ss_size, &uss->ss_size);
4054 if (t->sas_ss_flags & SS_AUTODISARM)
4060 #ifdef __ARCH_WANT_SYS_SIGPENDING
4063 * sys_sigpending - examine pending signals
4064 * @uset: where mask of pending signal is returned
4066 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4070 if (sizeof(old_sigset_t) > sizeof(*uset))
4073 do_sigpending(&set);
4075 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4081 #ifdef CONFIG_COMPAT
4082 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4086 do_sigpending(&set);
4088 return put_user(set.sig[0], set32);
4094 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4096 * sys_sigprocmask - examine and change blocked signals
4097 * @how: whether to add, remove, or set signals
4098 * @nset: signals to add or remove (if non-null)
4099 * @oset: previous value of signal mask if non-null
4101 * Some platforms have their own version with special arguments;
4102 * others support only sys_rt_sigprocmask.
4105 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4106 old_sigset_t __user *, oset)
4108 old_sigset_t old_set, new_set;
4109 sigset_t new_blocked;
4111 old_set = current->blocked.sig[0];
4114 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4117 new_blocked = current->blocked;
4121 sigaddsetmask(&new_blocked, new_set);
4124 sigdelsetmask(&new_blocked, new_set);
4127 new_blocked.sig[0] = new_set;
4133 set_current_blocked(&new_blocked);
4137 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4143 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4145 #ifndef CONFIG_ODD_RT_SIGACTION
4147 * sys_rt_sigaction - alter an action taken by a process
4148 * @sig: signal to be sent
4149 * @act: new sigaction
4150 * @oact: used to save the previous sigaction
4151 * @sigsetsize: size of sigset_t type
4153 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4154 const struct sigaction __user *, act,
4155 struct sigaction __user *, oact,
4158 struct k_sigaction new_sa, old_sa;
4161 /* XXX: Don't preclude handling different sized sigset_t's. */
4162 if (sigsetsize != sizeof(sigset_t))
4165 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4168 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4172 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4177 #ifdef CONFIG_COMPAT
4178 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4179 const struct compat_sigaction __user *, act,
4180 struct compat_sigaction __user *, oact,
4181 compat_size_t, sigsetsize)
4183 struct k_sigaction new_ka, old_ka;
4184 #ifdef __ARCH_HAS_SA_RESTORER
4185 compat_uptr_t restorer;
4189 /* XXX: Don't preclude handling different sized sigset_t's. */
4190 if (sigsetsize != sizeof(compat_sigset_t))
4194 compat_uptr_t handler;
4195 ret = get_user(handler, &act->sa_handler);
4196 new_ka.sa.sa_handler = compat_ptr(handler);
4197 #ifdef __ARCH_HAS_SA_RESTORER
4198 ret |= get_user(restorer, &act->sa_restorer);
4199 new_ka.sa.sa_restorer = compat_ptr(restorer);
4201 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4202 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4207 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4209 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4211 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4212 sizeof(oact->sa_mask));
4213 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4214 #ifdef __ARCH_HAS_SA_RESTORER
4215 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4216 &oact->sa_restorer);
4222 #endif /* !CONFIG_ODD_RT_SIGACTION */
4224 #ifdef CONFIG_OLD_SIGACTION
4225 SYSCALL_DEFINE3(sigaction, int, sig,
4226 const struct old_sigaction __user *, act,
4227 struct old_sigaction __user *, oact)
4229 struct k_sigaction new_ka, old_ka;
4234 if (!access_ok(act, sizeof(*act)) ||
4235 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4236 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4237 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4238 __get_user(mask, &act->sa_mask))
4240 #ifdef __ARCH_HAS_KA_RESTORER
4241 new_ka.ka_restorer = NULL;
4243 siginitset(&new_ka.sa.sa_mask, mask);
4246 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4249 if (!access_ok(oact, sizeof(*oact)) ||
4250 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4251 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4252 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4253 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4260 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4261 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4262 const struct compat_old_sigaction __user *, act,
4263 struct compat_old_sigaction __user *, oact)
4265 struct k_sigaction new_ka, old_ka;
4267 compat_old_sigset_t mask;
4268 compat_uptr_t handler, restorer;
4271 if (!access_ok(act, sizeof(*act)) ||
4272 __get_user(handler, &act->sa_handler) ||
4273 __get_user(restorer, &act->sa_restorer) ||
4274 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4275 __get_user(mask, &act->sa_mask))
4278 #ifdef __ARCH_HAS_KA_RESTORER
4279 new_ka.ka_restorer = NULL;
4281 new_ka.sa.sa_handler = compat_ptr(handler);
4282 new_ka.sa.sa_restorer = compat_ptr(restorer);
4283 siginitset(&new_ka.sa.sa_mask, mask);
4286 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4289 if (!access_ok(oact, sizeof(*oact)) ||
4290 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4291 &oact->sa_handler) ||
4292 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4293 &oact->sa_restorer) ||
4294 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4295 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4302 #ifdef CONFIG_SGETMASK_SYSCALL
4305 * For backwards compatibility. Functionality superseded by sigprocmask.
4307 SYSCALL_DEFINE0(sgetmask)
4310 return current->blocked.sig[0];
4313 SYSCALL_DEFINE1(ssetmask, int, newmask)
4315 int old = current->blocked.sig[0];
4318 siginitset(&newset, newmask);
4319 set_current_blocked(&newset);
4323 #endif /* CONFIG_SGETMASK_SYSCALL */
4325 #ifdef __ARCH_WANT_SYS_SIGNAL
4327 * For backwards compatibility. Functionality superseded by sigaction.
4329 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4331 struct k_sigaction new_sa, old_sa;
4334 new_sa.sa.sa_handler = handler;
4335 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4336 sigemptyset(&new_sa.sa.sa_mask);
4338 ret = do_sigaction(sig, &new_sa, &old_sa);
4340 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4342 #endif /* __ARCH_WANT_SYS_SIGNAL */
4344 #ifdef __ARCH_WANT_SYS_PAUSE
4346 SYSCALL_DEFINE0(pause)
4348 while (!signal_pending(current)) {
4349 __set_current_state(TASK_INTERRUPTIBLE);
4352 return -ERESTARTNOHAND;
4357 static int sigsuspend(sigset_t *set)
4359 current->saved_sigmask = current->blocked;
4360 set_current_blocked(set);
4362 while (!signal_pending(current)) {
4363 __set_current_state(TASK_INTERRUPTIBLE);
4366 set_restore_sigmask();
4367 return -ERESTARTNOHAND;
4371 * sys_rt_sigsuspend - replace the signal mask for a value with the
4372 * @unewset value until a signal is received
4373 * @unewset: new signal mask value
4374 * @sigsetsize: size of sigset_t type
4376 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4380 /* XXX: Don't preclude handling different sized sigset_t's. */
4381 if (sigsetsize != sizeof(sigset_t))
4384 if (copy_from_user(&newset, unewset, sizeof(newset)))
4386 return sigsuspend(&newset);
4389 #ifdef CONFIG_COMPAT
4390 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4394 /* XXX: Don't preclude handling different sized sigset_t's. */
4395 if (sigsetsize != sizeof(sigset_t))
4398 if (get_compat_sigset(&newset, unewset))
4400 return sigsuspend(&newset);
4404 #ifdef CONFIG_OLD_SIGSUSPEND
4405 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4408 siginitset(&blocked, mask);
4409 return sigsuspend(&blocked);
4412 #ifdef CONFIG_OLD_SIGSUSPEND3
4413 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4416 siginitset(&blocked, mask);
4417 return sigsuspend(&blocked);
4421 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4426 static inline void siginfo_buildtime_checks(void)
4428 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4430 /* Verify the offsets in the two siginfos match */
4431 #define CHECK_OFFSET(field) \
4432 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4435 CHECK_OFFSET(si_pid);
4436 CHECK_OFFSET(si_uid);
4439 CHECK_OFFSET(si_tid);
4440 CHECK_OFFSET(si_overrun);
4441 CHECK_OFFSET(si_value);
4444 CHECK_OFFSET(si_pid);
4445 CHECK_OFFSET(si_uid);
4446 CHECK_OFFSET(si_value);
4449 CHECK_OFFSET(si_pid);
4450 CHECK_OFFSET(si_uid);
4451 CHECK_OFFSET(si_status);
4452 CHECK_OFFSET(si_utime);
4453 CHECK_OFFSET(si_stime);
4456 CHECK_OFFSET(si_addr);
4457 CHECK_OFFSET(si_addr_lsb);
4458 CHECK_OFFSET(si_lower);
4459 CHECK_OFFSET(si_upper);
4460 CHECK_OFFSET(si_pkey);
4463 CHECK_OFFSET(si_band);
4464 CHECK_OFFSET(si_fd);
4467 CHECK_OFFSET(si_call_addr);
4468 CHECK_OFFSET(si_syscall);
4469 CHECK_OFFSET(si_arch);
4473 void __init signals_init(void)
4475 siginfo_buildtime_checks();
4477 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4480 #ifdef CONFIG_KGDB_KDB
4481 #include <linux/kdb.h>
4483 * kdb_send_sig - Allows kdb to send signals without exposing
4484 * signal internals. This function checks if the required locks are
4485 * available before calling the main signal code, to avoid kdb
4488 void kdb_send_sig(struct task_struct *t, int sig)
4490 static struct task_struct *kdb_prev_t;
4492 if (!spin_trylock(&t->sighand->siglock)) {
4493 kdb_printf("Can't do kill command now.\n"
4494 "The sigmask lock is held somewhere else in "
4495 "kernel, try again later\n");
4498 new_t = kdb_prev_t != t;
4500 if (t->state != TASK_RUNNING && new_t) {
4501 spin_unlock(&t->sighand->siglock);
4502 kdb_printf("Process is not RUNNING, sending a signal from "
4503 "kdb risks deadlock\n"
4504 "on the run queue locks. "
4505 "The signal has _not_ been sent.\n"
4506 "Reissue the kill command if you want to risk "
4510 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4511 spin_unlock(&t->sighand->siglock);
4513 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4516 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4518 #endif /* CONFIG_KGDB_KDB */