1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
26 #include <linux/proc_fs.h>
27 #include <linux/tty.h>
28 #include <linux/binfmts.h>
29 #include <linux/coredump.h>
30 #include <linux/security.h>
31 #include <linux/syscalls.h>
32 #include <linux/ptrace.h>
33 #include <linux/signal.h>
34 #include <linux/signalfd.h>
35 #include <linux/ratelimit.h>
36 #include <linux/task_work.h>
37 #include <linux/capability.h>
38 #include <linux/freezer.h>
39 #include <linux/pid_namespace.h>
40 #include <linux/nsproxy.h>
41 #include <linux/user_namespace.h>
42 #include <linux/uprobes.h>
43 #include <linux/compat.h>
44 #include <linux/cn_proc.h>
45 #include <linux/compiler.h>
46 #include <linux/posix-timers.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
49 #include <linux/sysctl.h>
51 #define CREATE_TRACE_POINTS
52 #include <trace/events/signal.h>
54 #include <asm/param.h>
55 #include <linux/uaccess.h>
56 #include <asm/unistd.h>
57 #include <asm/siginfo.h>
58 #include <asm/cacheflush.h>
59 #include <asm/syscall.h> /* for syscall_get_* */
62 * SLAB caches for signal bits.
65 static struct kmem_cache *sigqueue_cachep;
67 int print_fatal_signals __read_mostly;
69 static void __user *sig_handler(struct task_struct *t, int sig)
71 return t->sighand->action[sig - 1].sa.sa_handler;
74 static inline bool sig_handler_ignored(void __user *handler, int sig)
76 /* Is it explicitly or implicitly ignored? */
77 return handler == SIG_IGN ||
78 (handler == SIG_DFL && sig_kernel_ignore(sig));
81 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
85 handler = sig_handler(t, sig);
87 /* SIGKILL and SIGSTOP may not be sent to the global init */
88 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
91 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
92 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
95 /* Only allow kernel generated signals to this kthread */
96 if (unlikely((t->flags & PF_KTHREAD) &&
97 (handler == SIG_KTHREAD_KERNEL) && !force))
100 return sig_handler_ignored(handler, sig);
103 static bool sig_ignored(struct task_struct *t, int sig, bool force)
106 * Blocked signals are never ignored, since the
107 * signal handler may change by the time it is
110 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
114 * Tracers may want to know about even ignored signal unless it
115 * is SIGKILL which can't be reported anyway but can be ignored
116 * by SIGNAL_UNKILLABLE task.
118 if (t->ptrace && sig != SIGKILL)
121 return sig_task_ignored(t, sig, force);
125 * Re-calculate pending state from the set of locally pending
126 * signals, globally pending signals, and blocked signals.
128 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
133 switch (_NSIG_WORDS) {
135 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
136 ready |= signal->sig[i] &~ blocked->sig[i];
139 case 4: ready = signal->sig[3] &~ blocked->sig[3];
140 ready |= signal->sig[2] &~ blocked->sig[2];
141 ready |= signal->sig[1] &~ blocked->sig[1];
142 ready |= signal->sig[0] &~ blocked->sig[0];
145 case 2: ready = signal->sig[1] &~ blocked->sig[1];
146 ready |= signal->sig[0] &~ blocked->sig[0];
149 case 1: ready = signal->sig[0] &~ blocked->sig[0];
154 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
156 static bool recalc_sigpending_tsk(struct task_struct *t)
158 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
159 PENDING(&t->pending, &t->blocked) ||
160 PENDING(&t->signal->shared_pending, &t->blocked) ||
161 cgroup_task_frozen(t)) {
162 set_tsk_thread_flag(t, TIF_SIGPENDING);
167 * We must never clear the flag in another thread, or in current
168 * when it's possible the current syscall is returning -ERESTART*.
169 * So we don't clear it here, and only callers who know they should do.
174 void recalc_sigpending(void)
176 if (!recalc_sigpending_tsk(current) && !freezing(current))
177 clear_thread_flag(TIF_SIGPENDING);
180 EXPORT_SYMBOL(recalc_sigpending);
182 void calculate_sigpending(void)
184 /* Have any signals or users of TIF_SIGPENDING been delayed
187 spin_lock_irq(¤t->sighand->siglock);
188 set_tsk_thread_flag(current, TIF_SIGPENDING);
190 spin_unlock_irq(¤t->sighand->siglock);
193 /* Given the mask, find the first available signal that should be serviced. */
195 #define SYNCHRONOUS_MASK \
196 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
197 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
199 int next_signal(struct sigpending *pending, sigset_t *mask)
201 unsigned long i, *s, *m, x;
204 s = pending->signal.sig;
208 * Handle the first word specially: it contains the
209 * synchronous signals that need to be dequeued first.
213 if (x & SYNCHRONOUS_MASK)
214 x &= SYNCHRONOUS_MASK;
219 switch (_NSIG_WORDS) {
221 for (i = 1; i < _NSIG_WORDS; ++i) {
225 sig = ffz(~x) + i*_NSIG_BPW + 1;
234 sig = ffz(~x) + _NSIG_BPW + 1;
245 static inline void print_dropped_signal(int sig)
247 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
249 if (!print_fatal_signals)
252 if (!__ratelimit(&ratelimit_state))
255 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
256 current->comm, current->pid, sig);
260 * task_set_jobctl_pending - set jobctl pending bits
262 * @mask: pending bits to set
264 * Clear @mask from @task->jobctl. @mask must be subset of
265 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
266 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
267 * cleared. If @task is already being killed or exiting, this function
271 * Must be called with @task->sighand->siglock held.
274 * %true if @mask is set, %false if made noop because @task was dying.
276 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
278 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
279 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
280 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
282 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
285 if (mask & JOBCTL_STOP_SIGMASK)
286 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
288 task->jobctl |= mask;
293 * task_clear_jobctl_trapping - clear jobctl trapping bit
296 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
297 * Clear it and wake up the ptracer. Note that we don't need any further
298 * locking. @task->siglock guarantees that @task->parent points to the
302 * Must be called with @task->sighand->siglock held.
304 void task_clear_jobctl_trapping(struct task_struct *task)
306 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
307 task->jobctl &= ~JOBCTL_TRAPPING;
308 smp_mb(); /* advised by wake_up_bit() */
309 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
314 * task_clear_jobctl_pending - clear jobctl pending bits
316 * @mask: pending bits to clear
318 * Clear @mask from @task->jobctl. @mask must be subset of
319 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
320 * STOP bits are cleared together.
322 * If clearing of @mask leaves no stop or trap pending, this function calls
323 * task_clear_jobctl_trapping().
326 * Must be called with @task->sighand->siglock held.
328 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
330 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
332 if (mask & JOBCTL_STOP_PENDING)
333 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
335 task->jobctl &= ~mask;
337 if (!(task->jobctl & JOBCTL_PENDING_MASK))
338 task_clear_jobctl_trapping(task);
342 * task_participate_group_stop - participate in a group stop
343 * @task: task participating in a group stop
345 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
346 * Group stop states are cleared and the group stop count is consumed if
347 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
348 * stop, the appropriate `SIGNAL_*` flags are set.
351 * Must be called with @task->sighand->siglock held.
354 * %true if group stop completion should be notified to the parent, %false
357 static bool task_participate_group_stop(struct task_struct *task)
359 struct signal_struct *sig = task->signal;
360 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
362 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
364 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
369 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
370 sig->group_stop_count--;
373 * Tell the caller to notify completion iff we are entering into a
374 * fresh group stop. Read comment in do_signal_stop() for details.
376 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
377 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
383 void task_join_group_stop(struct task_struct *task)
385 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
386 struct signal_struct *sig = current->signal;
388 if (sig->group_stop_count) {
389 sig->group_stop_count++;
390 mask |= JOBCTL_STOP_CONSUME;
391 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
394 /* Have the new thread join an on-going signal group stop */
395 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
399 * allocate a new signal queue record
400 * - this may be called without locks if and only if t == current, otherwise an
401 * appropriate lock must be held to stop the target task from exiting
403 static struct sigqueue *
404 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
405 int override_rlimit, const unsigned int sigqueue_flags)
407 struct sigqueue *q = NULL;
408 struct ucounts *ucounts;
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
415 * NOTE! A pending signal will hold on to the user refcount,
416 * and we get/put the refcount only when the sigpending count
417 * changes from/to zero.
420 ucounts = task_ucounts(t);
421 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
426 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
427 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
429 print_dropped_signal(sig);
432 if (unlikely(q == NULL)) {
433 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
435 INIT_LIST_HEAD(&q->list);
436 q->flags = sigqueue_flags;
437 q->ucounts = ucounts;
442 static void __sigqueue_free(struct sigqueue *q)
444 if (q->flags & SIGQUEUE_PREALLOC)
447 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
450 kmem_cache_free(sigqueue_cachep, q);
453 void flush_sigqueue(struct sigpending *queue)
457 sigemptyset(&queue->signal);
458 while (!list_empty(&queue->list)) {
459 q = list_entry(queue->list.next, struct sigqueue , list);
460 list_del_init(&q->list);
466 * Flush all pending signals for this kthread.
468 void flush_signals(struct task_struct *t)
472 spin_lock_irqsave(&t->sighand->siglock, flags);
473 clear_tsk_thread_flag(t, TIF_SIGPENDING);
474 flush_sigqueue(&t->pending);
475 flush_sigqueue(&t->signal->shared_pending);
476 spin_unlock_irqrestore(&t->sighand->siglock, flags);
478 EXPORT_SYMBOL(flush_signals);
480 #ifdef CONFIG_POSIX_TIMERS
481 static void __flush_itimer_signals(struct sigpending *pending)
483 sigset_t signal, retain;
484 struct sigqueue *q, *n;
486 signal = pending->signal;
487 sigemptyset(&retain);
489 list_for_each_entry_safe(q, n, &pending->list, list) {
490 int sig = q->info.si_signo;
492 if (likely(q->info.si_code != SI_TIMER)) {
493 sigaddset(&retain, sig);
495 sigdelset(&signal, sig);
496 list_del_init(&q->list);
501 sigorsets(&pending->signal, &signal, &retain);
504 void flush_itimer_signals(void)
506 struct task_struct *tsk = current;
509 spin_lock_irqsave(&tsk->sighand->siglock, flags);
510 __flush_itimer_signals(&tsk->pending);
511 __flush_itimer_signals(&tsk->signal->shared_pending);
512 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
516 void ignore_signals(struct task_struct *t)
520 for (i = 0; i < _NSIG; ++i)
521 t->sighand->action[i].sa.sa_handler = SIG_IGN;
527 * Flush all handlers for a task.
531 flush_signal_handlers(struct task_struct *t, int force_default)
534 struct k_sigaction *ka = &t->sighand->action[0];
535 for (i = _NSIG ; i != 0 ; i--) {
536 if (force_default || ka->sa.sa_handler != SIG_IGN)
537 ka->sa.sa_handler = SIG_DFL;
539 #ifdef __ARCH_HAS_SA_RESTORER
540 ka->sa.sa_restorer = NULL;
542 sigemptyset(&ka->sa.sa_mask);
547 bool unhandled_signal(struct task_struct *tsk, int sig)
549 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
550 if (is_global_init(tsk))
553 if (handler != SIG_IGN && handler != SIG_DFL)
556 /* If dying, we handle all new signals by ignoring them */
557 if (fatal_signal_pending(tsk))
560 /* if ptraced, let the tracer determine */
564 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
567 struct sigqueue *q, *first = NULL;
570 * Collect the siginfo appropriate to this signal. Check if
571 * there is another siginfo for the same signal.
573 list_for_each_entry(q, &list->list, list) {
574 if (q->info.si_signo == sig) {
581 sigdelset(&list->signal, sig);
585 list_del_init(&first->list);
586 copy_siginfo(info, &first->info);
589 (first->flags & SIGQUEUE_PREALLOC) &&
590 (info->si_code == SI_TIMER) &&
591 (info->si_sys_private);
593 __sigqueue_free(first);
596 * Ok, it wasn't in the queue. This must be
597 * a fast-pathed signal or we must have been
598 * out of queue space. So zero out the info.
601 info->si_signo = sig;
603 info->si_code = SI_USER;
609 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
610 kernel_siginfo_t *info, bool *resched_timer)
612 int sig = next_signal(pending, mask);
615 collect_signal(sig, pending, info, resched_timer);
620 * Dequeue a signal and return the element to the caller, which is
621 * expected to free it.
623 * All callers have to hold the siglock.
625 int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
626 kernel_siginfo_t *info, enum pid_type *type)
628 bool resched_timer = false;
631 /* We only dequeue private signals from ourselves, we don't let
632 * signalfd steal them
635 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
637 *type = PIDTYPE_TGID;
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640 #ifdef CONFIG_POSIX_TIMERS
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
671 if (unlikely(sig_kernel_stop(signr))) {
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 #ifdef CONFIG_POSIX_TIMERS
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
704 EXPORT_SYMBOL_GPL(dequeue_signal);
706 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
713 * Might a synchronous signal be in the queue?
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
719 * Return the first synchronous signal in the queue.
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a positive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 * Check if there is another siginfo for the same signal.
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
739 sigdelset(&pending->signal, sync->info.si_signo);
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
749 * Tell a process that it has a new active signal..
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
759 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 lockdep_assert_held(&t->sighand->siglock);
763 set_tsk_thread_flag(t, TIF_SIGPENDING);
766 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
767 * case. We don't check t->state here because there is a race with it
768 * executing another processor and just now entering stopped state.
769 * By using wake_up_state, we ensure the process will wake up and
770 * handle its death signal.
772 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
777 * Remove signals in mask from the pending set and queue.
778 * Returns 1 if any signals were found.
780 * All callers must be holding the siglock.
782 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
784 struct sigqueue *q, *n;
787 sigandsets(&m, mask, &s->signal);
788 if (sigisemptyset(&m))
791 sigandnsets(&s->signal, &s->signal, mask);
792 list_for_each_entry_safe(q, n, &s->list, list) {
793 if (sigismember(mask, q->info.si_signo)) {
794 list_del_init(&q->list);
800 static inline int is_si_special(const struct kernel_siginfo *info)
802 return info <= SEND_SIG_PRIV;
805 static inline bool si_fromuser(const struct kernel_siginfo *info)
807 return info == SEND_SIG_NOINFO ||
808 (!is_si_special(info) && SI_FROMUSER(info));
812 * called with RCU read lock from check_kill_permission()
814 static bool kill_ok_by_cred(struct task_struct *t)
816 const struct cred *cred = current_cred();
817 const struct cred *tcred = __task_cred(t);
819 return uid_eq(cred->euid, tcred->suid) ||
820 uid_eq(cred->euid, tcred->uid) ||
821 uid_eq(cred->uid, tcred->suid) ||
822 uid_eq(cred->uid, tcred->uid) ||
823 ns_capable(tcred->user_ns, CAP_KILL);
827 * Bad permissions for sending the signal
828 * - the caller must hold the RCU read lock
830 static int check_kill_permission(int sig, struct kernel_siginfo *info,
831 struct task_struct *t)
836 if (!valid_signal(sig))
839 if (!si_fromuser(info))
842 error = audit_signal_info(sig, t); /* Let audit system see the signal */
846 if (!same_thread_group(current, t) &&
847 !kill_ok_by_cred(t)) {
850 sid = task_session(t);
852 * We don't return the error if sid == NULL. The
853 * task was unhashed, the caller must notice this.
855 if (!sid || sid == task_session(current))
863 return security_task_kill(t, info, sig, NULL);
867 * ptrace_trap_notify - schedule trap to notify ptracer
868 * @t: tracee wanting to notify tracer
870 * This function schedules sticky ptrace trap which is cleared on the next
871 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
874 * If @t is running, STOP trap will be taken. If trapped for STOP and
875 * ptracer is listening for events, tracee is woken up so that it can
876 * re-trap for the new event. If trapped otherwise, STOP trap will be
877 * eventually taken without returning to userland after the existing traps
878 * are finished by PTRACE_CONT.
881 * Must be called with @task->sighand->siglock held.
883 static void ptrace_trap_notify(struct task_struct *t)
885 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
886 lockdep_assert_held(&t->sighand->siglock);
888 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
889 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
893 * Handle magic process-wide effects of stop/continue signals. Unlike
894 * the signal actions, these happen immediately at signal-generation
895 * time regardless of blocking, ignoring, or handling. This does the
896 * actual continuing for SIGCONT, but not the actual stopping for stop
897 * signals. The process stop is done as a signal action for SIG_DFL.
899 * Returns true if the signal should be actually delivered, otherwise
900 * it should be dropped.
902 static bool prepare_signal(int sig, struct task_struct *p, bool force)
904 struct signal_struct *signal = p->signal;
905 struct task_struct *t;
908 if (signal->flags & SIGNAL_GROUP_EXIT) {
909 if (signal->core_state)
910 return sig == SIGKILL;
912 * The process is in the middle of dying, drop the signal.
915 } else if (sig_kernel_stop(sig)) {
917 * This is a stop signal. Remove SIGCONT from all queues.
919 siginitset(&flush, sigmask(SIGCONT));
920 flush_sigqueue_mask(&flush, &signal->shared_pending);
921 for_each_thread(p, t)
922 flush_sigqueue_mask(&flush, &t->pending);
923 } else if (sig == SIGCONT) {
926 * Remove all stop signals from all queues, wake all threads.
928 siginitset(&flush, SIG_KERNEL_STOP_MASK);
929 flush_sigqueue_mask(&flush, &signal->shared_pending);
930 for_each_thread(p, t) {
931 flush_sigqueue_mask(&flush, &t->pending);
932 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
933 if (likely(!(t->ptrace & PT_SEIZED))) {
934 t->jobctl &= ~JOBCTL_STOPPED;
935 wake_up_state(t, __TASK_STOPPED);
937 ptrace_trap_notify(t);
941 * Notify the parent with CLD_CONTINUED if we were stopped.
943 * If we were in the middle of a group stop, we pretend it
944 * was already finished, and then continued. Since SIGCHLD
945 * doesn't queue we report only CLD_STOPPED, as if the next
946 * CLD_CONTINUED was dropped.
949 if (signal->flags & SIGNAL_STOP_STOPPED)
950 why |= SIGNAL_CLD_CONTINUED;
951 else if (signal->group_stop_count)
952 why |= SIGNAL_CLD_STOPPED;
956 * The first thread which returns from do_signal_stop()
957 * will take ->siglock, notice SIGNAL_CLD_MASK, and
958 * notify its parent. See get_signal().
960 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
961 signal->group_stop_count = 0;
962 signal->group_exit_code = 0;
966 return !sig_ignored(p, sig, force);
970 * Test if P wants to take SIG. After we've checked all threads with this,
971 * it's equivalent to finding no threads not blocking SIG. Any threads not
972 * blocking SIG were ruled out because they are not running and already
973 * have pending signals. Such threads will dequeue from the shared queue
974 * as soon as they're available, so putting the signal on the shared queue
975 * will be equivalent to sending it to one such thread.
977 static inline bool wants_signal(int sig, struct task_struct *p)
979 if (sigismember(&p->blocked, sig))
982 if (p->flags & PF_EXITING)
988 if (task_is_stopped_or_traced(p))
991 return task_curr(p) || !task_sigpending(p);
994 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
996 struct signal_struct *signal = p->signal;
997 struct task_struct *t;
1000 * Now find a thread we can wake up to take the signal off the queue.
1002 * Try the suggested task first (may or may not be the main thread).
1004 if (wants_signal(sig, p))
1006 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1008 * There is just one thread and it does not need to be woken.
1009 * It will dequeue unblocked signals before it runs again.
1014 * Otherwise try to find a suitable thread.
1016 t = signal->curr_target;
1017 while (!wants_signal(sig, t)) {
1019 if (t == signal->curr_target)
1021 * No thread needs to be woken.
1022 * Any eligible threads will see
1023 * the signal in the queue soon.
1027 signal->curr_target = t;
1031 * Found a killable thread. If the signal will be fatal,
1032 * then start taking the whole group down immediately.
1034 if (sig_fatal(p, sig) &&
1035 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
1036 !sigismember(&t->real_blocked, sig) &&
1037 (sig == SIGKILL || !p->ptrace)) {
1039 * This signal will be fatal to the whole group.
1041 if (!sig_kernel_coredump(sig)) {
1043 * Start a group exit and wake everybody up.
1044 * This way we don't have other threads
1045 * running and doing things after a slower
1046 * thread has the fatal signal pending.
1048 signal->flags = SIGNAL_GROUP_EXIT;
1049 signal->group_exit_code = sig;
1050 signal->group_stop_count = 0;
1051 __for_each_thread(signal, t) {
1052 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1053 sigaddset(&t->pending.signal, SIGKILL);
1054 signal_wake_up(t, 1);
1061 * The signal is already in the shared-pending queue.
1062 * Tell the chosen thread to wake up and dequeue it.
1064 signal_wake_up(t, sig == SIGKILL);
1068 static inline bool legacy_queue(struct sigpending *signals, int sig)
1070 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1073 static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1074 struct task_struct *t, enum pid_type type, bool force)
1076 struct sigpending *pending;
1078 int override_rlimit;
1079 int ret = 0, result;
1081 lockdep_assert_held(&t->sighand->siglock);
1083 result = TRACE_SIGNAL_IGNORED;
1084 if (!prepare_signal(sig, t, force))
1087 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1089 * Short-circuit ignored signals and support queuing
1090 * exactly one non-rt signal, so that we can get more
1091 * detailed information about the cause of the signal.
1093 result = TRACE_SIGNAL_ALREADY_PENDING;
1094 if (legacy_queue(pending, sig))
1097 result = TRACE_SIGNAL_DELIVERED;
1099 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1101 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1105 * Real-time signals must be queued if sent by sigqueue, or
1106 * some other real-time mechanism. It is implementation
1107 * defined whether kill() does so. We attempt to do so, on
1108 * the principle of least surprise, but since kill is not
1109 * allowed to fail with EAGAIN when low on memory we just
1110 * make sure at least one signal gets delivered and don't
1111 * pass on the info struct.
1114 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1116 override_rlimit = 0;
1118 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1121 list_add_tail(&q->list, &pending->list);
1122 switch ((unsigned long) info) {
1123 case (unsigned long) SEND_SIG_NOINFO:
1124 clear_siginfo(&q->info);
1125 q->info.si_signo = sig;
1126 q->info.si_errno = 0;
1127 q->info.si_code = SI_USER;
1128 q->info.si_pid = task_tgid_nr_ns(current,
1129 task_active_pid_ns(t));
1132 from_kuid_munged(task_cred_xxx(t, user_ns),
1136 case (unsigned long) SEND_SIG_PRIV:
1137 clear_siginfo(&q->info);
1138 q->info.si_signo = sig;
1139 q->info.si_errno = 0;
1140 q->info.si_code = SI_KERNEL;
1145 copy_siginfo(&q->info, info);
1148 } else if (!is_si_special(info) &&
1149 sig >= SIGRTMIN && info->si_code != SI_USER) {
1151 * Queue overflow, abort. We may abort if the
1152 * signal was rt and sent by user using something
1153 * other than kill().
1155 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1160 * This is a silent loss of information. We still
1161 * send the signal, but the *info bits are lost.
1163 result = TRACE_SIGNAL_LOSE_INFO;
1167 signalfd_notify(t, sig);
1168 sigaddset(&pending->signal, sig);
1170 /* Let multiprocess signals appear after on-going forks */
1171 if (type > PIDTYPE_TGID) {
1172 struct multiprocess_signals *delayed;
1173 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1174 sigset_t *signal = &delayed->signal;
1175 /* Can't queue both a stop and a continue signal */
1177 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1178 else if (sig_kernel_stop(sig))
1179 sigdelset(signal, SIGCONT);
1180 sigaddset(signal, sig);
1184 complete_signal(sig, t, type);
1186 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1190 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1193 switch (siginfo_layout(info->si_signo, info->si_code)) {
1202 case SIL_FAULT_TRAPNO:
1203 case SIL_FAULT_MCEERR:
1204 case SIL_FAULT_BNDERR:
1205 case SIL_FAULT_PKUERR:
1206 case SIL_FAULT_PERF_EVENT:
1214 int send_signal_locked(int sig, struct kernel_siginfo *info,
1215 struct task_struct *t, enum pid_type type)
1217 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1220 if (info == SEND_SIG_NOINFO) {
1221 /* Force if sent from an ancestor pid namespace */
1222 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1223 } else if (info == SEND_SIG_PRIV) {
1224 /* Don't ignore kernel generated signals */
1226 } else if (has_si_pid_and_uid(info)) {
1227 /* SIGKILL and SIGSTOP is special or has ids */
1228 struct user_namespace *t_user_ns;
1231 t_user_ns = task_cred_xxx(t, user_ns);
1232 if (current_user_ns() != t_user_ns) {
1233 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1234 info->si_uid = from_kuid_munged(t_user_ns, uid);
1238 /* A kernel generated signal? */
1239 force = (info->si_code == SI_KERNEL);
1241 /* From an ancestor pid namespace? */
1242 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1247 return __send_signal_locked(sig, info, t, type, force);
1250 static void print_fatal_signal(int signr)
1252 struct pt_regs *regs = task_pt_regs(current);
1253 struct file *exe_file;
1255 exe_file = get_task_exe_file(current);
1257 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1258 exe_file, current->comm, signr);
1261 pr_info("%s: potentially unexpected fatal signal %d.\n",
1262 current->comm, signr);
1265 #if defined(__i386__) && !defined(__arch_um__)
1266 pr_info("code at %08lx: ", regs->ip);
1269 for (i = 0; i < 16; i++) {
1272 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1274 pr_cont("%02x ", insn);
1284 static int __init setup_print_fatal_signals(char *str)
1286 get_option (&str, &print_fatal_signals);
1291 __setup("print-fatal-signals=", setup_print_fatal_signals);
1293 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1296 unsigned long flags;
1299 if (lock_task_sighand(p, &flags)) {
1300 ret = send_signal_locked(sig, info, p, type);
1301 unlock_task_sighand(p, &flags);
1308 HANDLER_CURRENT, /* If reachable use the current handler */
1309 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1310 HANDLER_EXIT, /* Only visible as the process exit code */
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1325 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1326 enum sig_handler handler)
1328 unsigned long int flags;
1329 int ret, blocked, ignored;
1330 struct k_sigaction *action;
1331 int sig = info->si_signo;
1333 spin_lock_irqsave(&t->sighand->siglock, flags);
1334 action = &t->sighand->action[sig-1];
1335 ignored = action->sa.sa_handler == SIG_IGN;
1336 blocked = sigismember(&t->blocked, sig);
1337 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
1338 action->sa.sa_handler = SIG_DFL;
1339 if (handler == HANDLER_EXIT)
1340 action->sa.sa_flags |= SA_IMMUTABLE;
1342 sigdelset(&t->blocked, sig);
1345 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1346 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
1348 if (action->sa.sa_handler == SIG_DFL &&
1349 (!t->ptrace || (handler == HANDLER_EXIT)))
1350 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1351 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
1352 /* This can happen if the signal was already pending and blocked */
1353 if (!task_sigpending(t))
1354 signal_wake_up(t, 0);
1355 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1360 int force_sig_info(struct kernel_siginfo *info)
1362 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
1366 * Nuke all other threads in the group.
1368 int zap_other_threads(struct task_struct *p)
1370 struct task_struct *t;
1373 p->signal->group_stop_count = 0;
1375 for_other_threads(p, t) {
1376 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1377 /* Don't require de_thread to wait for the vhost_worker */
1378 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1381 /* Don't bother with already dead threads */
1384 sigaddset(&t->pending.signal, SIGKILL);
1385 signal_wake_up(t, 1);
1391 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1392 unsigned long *flags)
1394 struct sighand_struct *sighand;
1398 sighand = rcu_dereference(tsk->sighand);
1399 if (unlikely(sighand == NULL))
1403 * This sighand can be already freed and even reused, but
1404 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1405 * initializes ->siglock: this slab can't go away, it has
1406 * the same object type, ->siglock can't be reinitialized.
1408 * We need to ensure that tsk->sighand is still the same
1409 * after we take the lock, we can race with de_thread() or
1410 * __exit_signal(). In the latter case the next iteration
1411 * must see ->sighand == NULL.
1413 spin_lock_irqsave(&sighand->siglock, *flags);
1414 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1416 spin_unlock_irqrestore(&sighand->siglock, *flags);
1423 #ifdef CONFIG_LOCKDEP
1424 void lockdep_assert_task_sighand_held(struct task_struct *task)
1426 struct sighand_struct *sighand;
1429 sighand = rcu_dereference(task->sighand);
1431 lockdep_assert_held(&sighand->siglock);
1439 * send signal info to all the members of a group
1441 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1442 struct task_struct *p, enum pid_type type)
1447 ret = check_kill_permission(sig, info, p);
1451 ret = do_send_sig_info(sig, info, p, type);
1457 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1458 * control characters do (^C, ^Z etc)
1459 * - the caller must hold at least a readlock on tasklist_lock
1461 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1463 struct task_struct *p = NULL;
1466 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1467 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1469 * If group_send_sig_info() succeeds at least once ret
1470 * becomes 0 and after that the code below has no effect.
1471 * Otherwise we return the last err or -ESRCH if this
1472 * process group is empty.
1476 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1481 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1484 struct task_struct *p;
1488 p = pid_task(pid, PIDTYPE_PID);
1490 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1492 if (likely(!p || error != -ESRCH))
1496 * The task was unhashed in between, try again. If it
1497 * is dead, pid_task() will return NULL, if we race with
1498 * de_thread() it will find the new leader.
1503 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1507 error = kill_pid_info(sig, info, find_vpid(pid));
1512 static inline bool kill_as_cred_perm(const struct cred *cred,
1513 struct task_struct *target)
1515 const struct cred *pcred = __task_cred(target);
1517 return uid_eq(cred->euid, pcred->suid) ||
1518 uid_eq(cred->euid, pcred->uid) ||
1519 uid_eq(cred->uid, pcred->suid) ||
1520 uid_eq(cred->uid, pcred->uid);
1524 * The usb asyncio usage of siginfo is wrong. The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 * kernel_pid_t si_pid;
1528 * kernel_uid32_t si_uid;
1529 * sigval_t si_value;
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 * void __user *si_addr;
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace. As the 32bit address will encoded in the low
1537 * 32bits of the pointer. Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer. So userspace will not
1539 * see the address it was expecting for it's completions.
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1548 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 struct pid *pid, const struct cred *cred)
1551 struct kernel_siginfo info;
1552 struct task_struct *p;
1553 unsigned long flags;
1556 if (!valid_signal(sig))
1559 clear_siginfo(&info);
1560 info.si_signo = sig;
1561 info.si_errno = errno;
1562 info.si_code = SI_ASYNCIO;
1563 *((sigval_t *)&info.si_pid) = addr;
1566 p = pid_task(pid, PIDTYPE_PID);
1571 if (!kill_as_cred_perm(cred, p)) {
1575 ret = security_task_kill(p, &info, sig, cred);
1580 if (lock_task_sighand(p, &flags)) {
1581 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
1582 unlock_task_sighand(p, &flags);
1590 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong. Should make it like BSD or SYSV.
1599 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1604 return kill_proc_info(sig, info, pid);
1606 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1610 read_lock(&tasklist_lock);
1612 ret = __kill_pgrp_info(sig, info,
1613 pid ? find_vpid(-pid) : task_pgrp(current));
1615 int retval = 0, count = 0;
1616 struct task_struct * p;
1618 for_each_process(p) {
1619 if (task_pid_vnr(p) > 1 &&
1620 !same_thread_group(p, current)) {
1621 int err = group_send_sig_info(sig, info, p,
1628 ret = count ? retval : -ESRCH;
1630 read_unlock(&tasklist_lock);
1636 * These are for backward compatibility with the rest of the kernel source.
1639 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1642 * Make sure legacy kernel users don't send in bad values
1643 * (normal paths check this in check_kill_permission).
1645 if (!valid_signal(sig))
1648 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1650 EXPORT_SYMBOL(send_sig_info);
1652 #define __si_special(priv) \
1653 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1656 send_sig(int sig, struct task_struct *p, int priv)
1658 return send_sig_info(sig, __si_special(priv), p);
1660 EXPORT_SYMBOL(send_sig);
1662 void force_sig(int sig)
1664 struct kernel_siginfo info;
1666 clear_siginfo(&info);
1667 info.si_signo = sig;
1669 info.si_code = SI_KERNEL;
1672 force_sig_info(&info);
1674 EXPORT_SYMBOL(force_sig);
1676 void force_fatal_sig(int sig)
1678 struct kernel_siginfo info;
1680 clear_siginfo(&info);
1681 info.si_signo = sig;
1683 info.si_code = SI_KERNEL;
1686 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
1689 void force_exit_sig(int sig)
1691 struct kernel_siginfo info;
1693 clear_siginfo(&info);
1694 info.si_signo = sig;
1696 info.si_code = SI_KERNEL;
1699 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1703 * When things go south during signal handling, we
1704 * will force a SIGSEGV. And if the signal that caused
1705 * the problem was already a SIGSEGV, we'll want to
1706 * make sure we don't even try to deliver the signal..
1708 void force_sigsegv(int sig)
1711 force_fatal_sig(SIGSEGV);
1716 int force_sig_fault_to_task(int sig, int code, void __user *addr,
1717 struct task_struct *t)
1719 struct kernel_siginfo info;
1721 clear_siginfo(&info);
1722 info.si_signo = sig;
1724 info.si_code = code;
1725 info.si_addr = addr;
1726 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
1729 int force_sig_fault(int sig, int code, void __user *addr)
1731 return force_sig_fault_to_task(sig, code, addr, current);
1734 int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
1736 struct kernel_siginfo info;
1738 clear_siginfo(&info);
1739 info.si_signo = sig;
1741 info.si_code = code;
1742 info.si_addr = addr;
1743 return send_sig_info(info.si_signo, &info, t);
1746 int force_sig_mceerr(int code, void __user *addr, short lsb)
1748 struct kernel_siginfo info;
1750 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751 clear_siginfo(&info);
1752 info.si_signo = SIGBUS;
1754 info.si_code = code;
1755 info.si_addr = addr;
1756 info.si_addr_lsb = lsb;
1757 return force_sig_info(&info);
1760 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1762 struct kernel_siginfo info;
1764 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765 clear_siginfo(&info);
1766 info.si_signo = SIGBUS;
1768 info.si_code = code;
1769 info.si_addr = addr;
1770 info.si_addr_lsb = lsb;
1771 return send_sig_info(info.si_signo, &info, t);
1773 EXPORT_SYMBOL(send_sig_mceerr);
1775 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1777 struct kernel_siginfo info;
1779 clear_siginfo(&info);
1780 info.si_signo = SIGSEGV;
1782 info.si_code = SEGV_BNDERR;
1783 info.si_addr = addr;
1784 info.si_lower = lower;
1785 info.si_upper = upper;
1786 return force_sig_info(&info);
1790 int force_sig_pkuerr(void __user *addr, u32 pkey)
1792 struct kernel_siginfo info;
1794 clear_siginfo(&info);
1795 info.si_signo = SIGSEGV;
1797 info.si_code = SEGV_PKUERR;
1798 info.si_addr = addr;
1799 info.si_pkey = pkey;
1800 return force_sig_info(&info);
1804 int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
1806 struct kernel_siginfo info;
1808 clear_siginfo(&info);
1809 info.si_signo = SIGTRAP;
1811 info.si_code = TRAP_PERF;
1812 info.si_addr = addr;
1813 info.si_perf_data = sig_data;
1814 info.si_perf_type = type;
1817 * Signals generated by perf events should not terminate the whole
1818 * process if SIGTRAP is blocked, however, delivering the signal
1819 * asynchronously is better than not delivering at all. But tell user
1820 * space if the signal was asynchronous, so it can clearly be
1821 * distinguished from normal synchronous ones.
1823 info.si_perf_flags = sigismember(¤t->blocked, info.si_signo) ?
1824 TRAP_PERF_FLAG_ASYNC :
1827 return send_sig_info(info.si_signo, &info, current);
1831 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1832 * @syscall: syscall number to send to userland
1833 * @reason: filter-supplied reason code to send to userland (via si_errno)
1834 * @force_coredump: true to trigger a coredump
1836 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1838 int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1840 struct kernel_siginfo info;
1842 clear_siginfo(&info);
1843 info.si_signo = SIGSYS;
1844 info.si_code = SYS_SECCOMP;
1845 info.si_call_addr = (void __user *)KSTK_EIP(current);
1846 info.si_errno = reason;
1847 info.si_arch = syscall_get_arch(current);
1848 info.si_syscall = syscall;
1849 return force_sig_info_to_task(&info, current,
1850 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
1853 /* For the crazy architectures that include trap information in
1854 * the errno field, instead of an actual errno value.
1856 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1858 struct kernel_siginfo info;
1860 clear_siginfo(&info);
1861 info.si_signo = SIGTRAP;
1862 info.si_errno = errno;
1863 info.si_code = TRAP_HWBKPT;
1864 info.si_addr = addr;
1865 return force_sig_info(&info);
1868 /* For the rare architectures that include trap information using
1871 int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1873 struct kernel_siginfo info;
1875 clear_siginfo(&info);
1876 info.si_signo = sig;
1878 info.si_code = code;
1879 info.si_addr = addr;
1880 info.si_trapno = trapno;
1881 return force_sig_info(&info);
1884 /* For the rare architectures that include trap information using
1887 int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1888 struct task_struct *t)
1890 struct kernel_siginfo info;
1892 clear_siginfo(&info);
1893 info.si_signo = sig;
1895 info.si_code = code;
1896 info.si_addr = addr;
1897 info.si_trapno = trapno;
1898 return send_sig_info(info.si_signo, &info, t);
1901 int kill_pgrp(struct pid *pid, int sig, int priv)
1905 read_lock(&tasklist_lock);
1906 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1907 read_unlock(&tasklist_lock);
1911 EXPORT_SYMBOL(kill_pgrp);
1913 int kill_pid(struct pid *pid, int sig, int priv)
1915 return kill_pid_info(sig, __si_special(priv), pid);
1917 EXPORT_SYMBOL(kill_pid);
1920 * These functions support sending signals using preallocated sigqueue
1921 * structures. This is needed "because realtime applications cannot
1922 * afford to lose notifications of asynchronous events, like timer
1923 * expirations or I/O completions". In the case of POSIX Timers
1924 * we allocate the sigqueue structure from the timer_create. If this
1925 * allocation fails we are able to report the failure to the application
1926 * with an EAGAIN error.
1928 struct sigqueue *sigqueue_alloc(void)
1930 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1933 void sigqueue_free(struct sigqueue *q)
1935 unsigned long flags;
1936 spinlock_t *lock = ¤t->sighand->siglock;
1938 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1940 * We must hold ->siglock while testing q->list
1941 * to serialize with collect_signal() or with
1942 * __exit_signal()->flush_sigqueue().
1944 spin_lock_irqsave(lock, flags);
1945 q->flags &= ~SIGQUEUE_PREALLOC;
1947 * If it is queued it will be freed when dequeued,
1948 * like the "regular" sigqueue.
1950 if (!list_empty(&q->list))
1952 spin_unlock_irqrestore(lock, flags);
1958 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1960 int sig = q->info.si_signo;
1961 struct sigpending *pending;
1962 struct task_struct *t;
1963 unsigned long flags;
1966 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1972 * This function is used by POSIX timers to deliver a timer signal.
1973 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1974 * set), the signal must be delivered to the specific thread (queues
1977 * Where type is not PIDTYPE_PID, signals must be delivered to the
1978 * process. In this case, prefer to deliver to current if it is in
1979 * the same thread group as the target process, which avoids
1980 * unnecessarily waking up a potentially idle task.
1982 t = pid_task(pid, type);
1985 if (type != PIDTYPE_PID && same_thread_group(t, current))
1987 if (!likely(lock_task_sighand(t, &flags)))
1990 ret = 1; /* the signal is ignored */
1991 result = TRACE_SIGNAL_IGNORED;
1992 if (!prepare_signal(sig, t, false))
1996 if (unlikely(!list_empty(&q->list))) {
1998 * If an SI_TIMER entry is already queue just increment
1999 * the overrun count.
2001 BUG_ON(q->info.si_code != SI_TIMER);
2002 q->info.si_overrun++;
2003 result = TRACE_SIGNAL_ALREADY_PENDING;
2006 q->info.si_overrun = 0;
2008 signalfd_notify(t, sig);
2009 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2010 list_add_tail(&q->list, &pending->list);
2011 sigaddset(&pending->signal, sig);
2012 complete_signal(sig, t, type);
2013 result = TRACE_SIGNAL_DELIVERED;
2015 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
2016 unlock_task_sighand(t, &flags);
2022 static void do_notify_pidfd(struct task_struct *task)
2026 WARN_ON(task->exit_state == 0);
2027 pid = task_pid(task);
2028 wake_up_all(&pid->wait_pidfd);
2032 * Let a parent know about the death of a child.
2033 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2035 * Returns true if our parent ignored us and so we've switched to
2038 bool do_notify_parent(struct task_struct *tsk, int sig)
2040 struct kernel_siginfo info;
2041 unsigned long flags;
2042 struct sighand_struct *psig;
2043 bool autoreap = false;
2046 WARN_ON_ONCE(sig == -1);
2048 /* do_notify_parent_cldstop should have been called instead. */
2049 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
2051 WARN_ON_ONCE(!tsk->ptrace &&
2052 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2054 /* Wake up all pidfd waiters */
2055 do_notify_pidfd(tsk);
2057 if (sig != SIGCHLD) {
2059 * This is only possible if parent == real_parent.
2060 * Check if it has changed security domain.
2062 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
2066 clear_siginfo(&info);
2067 info.si_signo = sig;
2070 * We are under tasklist_lock here so our parent is tied to
2071 * us and cannot change.
2073 * task_active_pid_ns will always return the same pid namespace
2074 * until a task passes through release_task.
2076 * write_lock() currently calls preempt_disable() which is the
2077 * same as rcu_read_lock(), but according to Oleg, this is not
2078 * correct to rely on this
2081 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
2082 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2086 task_cputime(tsk, &utime, &stime);
2087 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2088 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
2090 info.si_status = tsk->exit_code & 0x7f;
2091 if (tsk->exit_code & 0x80)
2092 info.si_code = CLD_DUMPED;
2093 else if (tsk->exit_code & 0x7f)
2094 info.si_code = CLD_KILLED;
2096 info.si_code = CLD_EXITED;
2097 info.si_status = tsk->exit_code >> 8;
2100 psig = tsk->parent->sighand;
2101 spin_lock_irqsave(&psig->siglock, flags);
2102 if (!tsk->ptrace && sig == SIGCHLD &&
2103 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2104 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2106 * We are exiting and our parent doesn't care. POSIX.1
2107 * defines special semantics for setting SIGCHLD to SIG_IGN
2108 * or setting the SA_NOCLDWAIT flag: we should be reaped
2109 * automatically and not left for our parent's wait4 call.
2110 * Rather than having the parent do it as a magic kind of
2111 * signal handler, we just set this to tell do_exit that we
2112 * can be cleaned up without becoming a zombie. Note that
2113 * we still call __wake_up_parent in this case, because a
2114 * blocked sys_wait4 might now return -ECHILD.
2116 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2117 * is implementation-defined: we do (if you don't want
2118 * it, just use SIG_IGN instead).
2121 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2125 * Send with __send_signal as si_pid and si_uid are in the
2126 * parent's namespaces.
2128 if (valid_signal(sig) && sig)
2129 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
2130 __wake_up_parent(tsk, tsk->parent);
2131 spin_unlock_irqrestore(&psig->siglock, flags);
2137 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2138 * @tsk: task reporting the state change
2139 * @for_ptracer: the notification is for ptracer
2140 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2142 * Notify @tsk's parent that the stopped/continued state has changed. If
2143 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2144 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2147 * Must be called with tasklist_lock at least read locked.
2149 static void do_notify_parent_cldstop(struct task_struct *tsk,
2150 bool for_ptracer, int why)
2152 struct kernel_siginfo info;
2153 unsigned long flags;
2154 struct task_struct *parent;
2155 struct sighand_struct *sighand;
2159 parent = tsk->parent;
2161 tsk = tsk->group_leader;
2162 parent = tsk->real_parent;
2165 clear_siginfo(&info);
2166 info.si_signo = SIGCHLD;
2169 * see comment in do_notify_parent() about the following 4 lines
2172 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2173 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2176 task_cputime(tsk, &utime, &stime);
2177 info.si_utime = nsec_to_clock_t(utime);
2178 info.si_stime = nsec_to_clock_t(stime);
2183 info.si_status = SIGCONT;
2186 info.si_status = tsk->signal->group_exit_code & 0x7f;
2189 info.si_status = tsk->exit_code & 0x7f;
2195 sighand = parent->sighand;
2196 spin_lock_irqsave(&sighand->siglock, flags);
2197 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2198 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2199 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
2201 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2203 __wake_up_parent(tsk, parent);
2204 spin_unlock_irqrestore(&sighand->siglock, flags);
2208 * This must be called with current->sighand->siglock held.
2210 * This should be the path for all ptrace stops.
2211 * We always set current->last_siginfo while stopped here.
2212 * That makes it a way to test a stopped process for
2213 * being ptrace-stopped vs being job-control-stopped.
2215 * Returns the signal the ptracer requested the code resume
2216 * with. If the code did not stop because the tracer is gone,
2217 * the stop signal remains unchanged unless clear_code.
2219 static int ptrace_stop(int exit_code, int why, unsigned long message,
2220 kernel_siginfo_t *info)
2221 __releases(¤t->sighand->siglock)
2222 __acquires(¤t->sighand->siglock)
2224 bool gstop_done = false;
2226 if (arch_ptrace_stop_needed()) {
2228 * The arch code has something special to do before a
2229 * ptrace stop. This is allowed to block, e.g. for faults
2230 * on user stack pages. We can't keep the siglock while
2231 * calling arch_ptrace_stop, so we must release it now.
2232 * To preserve proper semantics, we must do this before
2233 * any signal bookkeeping like checking group_stop_count.
2235 spin_unlock_irq(¤t->sighand->siglock);
2237 spin_lock_irq(¤t->sighand->siglock);
2241 * After this point ptrace_signal_wake_up or signal_wake_up
2242 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2243 * signal comes in. Handle previous ptrace_unlinks and fatal
2244 * signals here to prevent ptrace_stop sleeping in schedule.
2246 if (!current->ptrace || __fatal_signal_pending(current))
2249 set_special_state(TASK_TRACED);
2250 current->jobctl |= JOBCTL_TRACED;
2253 * We're committing to trapping. TRACED should be visible before
2254 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2255 * Also, transition to TRACED and updates to ->jobctl should be
2256 * atomic with respect to siglock and should be done after the arch
2257 * hook as siglock is released and regrabbed across it.
2262 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2264 * set_current_state() smp_wmb();
2266 * wait_task_stopped()
2267 * task_stopped_code()
2268 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2272 current->ptrace_message = message;
2273 current->last_siginfo = info;
2274 current->exit_code = exit_code;
2277 * If @why is CLD_STOPPED, we're trapping to participate in a group
2278 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2279 * across siglock relocks since INTERRUPT was scheduled, PENDING
2280 * could be clear now. We act as if SIGCONT is received after
2281 * TASK_TRACED is entered - ignore it.
2283 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2284 gstop_done = task_participate_group_stop(current);
2286 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2287 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2288 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2289 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2291 /* entering a trap, clear TRAPPING */
2292 task_clear_jobctl_trapping(current);
2294 spin_unlock_irq(¤t->sighand->siglock);
2295 read_lock(&tasklist_lock);
2297 * Notify parents of the stop.
2299 * While ptraced, there are two parents - the ptracer and
2300 * the real_parent of the group_leader. The ptracer should
2301 * know about every stop while the real parent is only
2302 * interested in the completion of group stop. The states
2303 * for the two don't interact with each other. Notify
2304 * separately unless they're gonna be duplicates.
2306 if (current->ptrace)
2307 do_notify_parent_cldstop(current, true, why);
2308 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2309 do_notify_parent_cldstop(current, false, why);
2312 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2313 * One a PREEMPTION kernel this can result in preemption requirement
2314 * which will be fulfilled after read_unlock() and the ptracer will be
2316 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2317 * this task wait in schedule(). If this task gets preempted then it
2318 * remains enqueued on the runqueue. The ptracer will observe this and
2319 * then sleep for a delay of one HZ tick. In the meantime this task
2320 * gets scheduled, enters schedule() and will wait for the ptracer.
2322 * This preemption point is not bad from a correctness point of
2323 * view but extends the runtime by one HZ tick time due to the
2324 * ptracer's sleep. The preempt-disable section ensures that there
2325 * will be no preemption between unlock and schedule() and so
2326 * improving the performance since the ptracer will observe that
2327 * the tracee is scheduled out once it gets on the CPU.
2329 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2330 * Therefore the task can be preempted after do_notify_parent_cldstop()
2331 * before unlocking tasklist_lock so there is no benefit in doing this.
2333 * In fact disabling preemption is harmful on PREEMPT_RT because
2334 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2335 * with preemption disabled due to the 'sleeping' spinlock
2336 * substitution of RT.
2338 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2340 read_unlock(&tasklist_lock);
2341 cgroup_enter_frozen();
2342 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2343 preempt_enable_no_resched();
2345 cgroup_leave_frozen(true);
2348 * We are back. Now reacquire the siglock before touching
2349 * last_siginfo, so that we are sure to have synchronized with
2350 * any signal-sending on another CPU that wants to examine it.
2352 spin_lock_irq(¤t->sighand->siglock);
2353 exit_code = current->exit_code;
2354 current->last_siginfo = NULL;
2355 current->ptrace_message = 0;
2356 current->exit_code = 0;
2358 /* LISTENING can be set only during STOP traps, clear it */
2359 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
2362 * Queued signals ignored us while we were stopped for tracing.
2363 * So check for any that we should take before resuming user mode.
2364 * This sets TIF_SIGPENDING, but never clears it.
2366 recalc_sigpending_tsk(current);
2370 static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
2372 kernel_siginfo_t info;
2374 clear_siginfo(&info);
2375 info.si_signo = signr;
2376 info.si_code = exit_code;
2377 info.si_pid = task_pid_vnr(current);
2378 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2380 /* Let the debugger run. */
2381 return ptrace_stop(exit_code, why, message, &info);
2384 int ptrace_notify(int exit_code, unsigned long message)
2388 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2389 if (unlikely(task_work_pending(current)))
2392 spin_lock_irq(¤t->sighand->siglock);
2393 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
2394 spin_unlock_irq(¤t->sighand->siglock);
2399 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2400 * @signr: signr causing group stop if initiating
2402 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2403 * and participate in it. If already set, participate in the existing
2404 * group stop. If participated in a group stop (and thus slept), %true is
2405 * returned with siglock released.
2407 * If ptraced, this function doesn't handle stop itself. Instead,
2408 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2409 * untouched. The caller must ensure that INTERRUPT trap handling takes
2410 * places afterwards.
2413 * Must be called with @current->sighand->siglock held, which is released
2417 * %false if group stop is already cancelled or ptrace trap is scheduled.
2418 * %true if participated in group stop.
2420 static bool do_signal_stop(int signr)
2421 __releases(¤t->sighand->siglock)
2423 struct signal_struct *sig = current->signal;
2425 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2426 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2427 struct task_struct *t;
2429 /* signr will be recorded in task->jobctl for retries */
2430 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2432 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2433 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2434 unlikely(sig->group_exec_task))
2437 * There is no group stop already in progress. We must
2440 * While ptraced, a task may be resumed while group stop is
2441 * still in effect and then receive a stop signal and
2442 * initiate another group stop. This deviates from the
2443 * usual behavior as two consecutive stop signals can't
2444 * cause two group stops when !ptraced. That is why we
2445 * also check !task_is_stopped(t) below.
2447 * The condition can be distinguished by testing whether
2448 * SIGNAL_STOP_STOPPED is already set. Don't generate
2449 * group_exit_code in such case.
2451 * This is not necessary for SIGNAL_STOP_CONTINUED because
2452 * an intervening stop signal is required to cause two
2453 * continued events regardless of ptrace.
2455 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2456 sig->group_exit_code = signr;
2458 sig->group_stop_count = 0;
2459 if (task_set_jobctl_pending(current, signr | gstop))
2460 sig->group_stop_count++;
2462 for_other_threads(current, t) {
2464 * Setting state to TASK_STOPPED for a group
2465 * stop is always done with the siglock held,
2466 * so this check has no races.
2468 if (!task_is_stopped(t) &&
2469 task_set_jobctl_pending(t, signr | gstop)) {
2470 sig->group_stop_count++;
2471 if (likely(!(t->ptrace & PT_SEIZED)))
2472 signal_wake_up(t, 0);
2474 ptrace_trap_notify(t);
2479 if (likely(!current->ptrace)) {
2483 * If there are no other threads in the group, or if there
2484 * is a group stop in progress and we are the last to stop,
2485 * report to the parent.
2487 if (task_participate_group_stop(current))
2488 notify = CLD_STOPPED;
2490 current->jobctl |= JOBCTL_STOPPED;
2491 set_special_state(TASK_STOPPED);
2492 spin_unlock_irq(¤t->sighand->siglock);
2495 * Notify the parent of the group stop completion. Because
2496 * we're not holding either the siglock or tasklist_lock
2497 * here, ptracer may attach inbetween; however, this is for
2498 * group stop and should always be delivered to the real
2499 * parent of the group leader. The new ptracer will get
2500 * its notification when this task transitions into
2504 read_lock(&tasklist_lock);
2505 do_notify_parent_cldstop(current, false, notify);
2506 read_unlock(&tasklist_lock);
2509 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2510 cgroup_enter_frozen();
2515 * While ptraced, group stop is handled by STOP trap.
2516 * Schedule it and let the caller deal with it.
2518 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2524 * do_jobctl_trap - take care of ptrace jobctl traps
2526 * When PT_SEIZED, it's used for both group stop and explicit
2527 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2528 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2529 * the stop signal; otherwise, %SIGTRAP.
2531 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2532 * number as exit_code and no siginfo.
2535 * Must be called with @current->sighand->siglock held, which may be
2536 * released and re-acquired before returning with intervening sleep.
2538 static void do_jobctl_trap(void)
2540 struct signal_struct *signal = current->signal;
2541 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2543 if (current->ptrace & PT_SEIZED) {
2544 if (!signal->group_stop_count &&
2545 !(signal->flags & SIGNAL_STOP_STOPPED))
2547 WARN_ON_ONCE(!signr);
2548 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2551 WARN_ON_ONCE(!signr);
2552 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2557 * do_freezer_trap - handle the freezer jobctl trap
2559 * Puts the task into frozen state, if only the task is not about to quit.
2560 * In this case it drops JOBCTL_TRAP_FREEZE.
2563 * Must be called with @current->sighand->siglock held,
2564 * which is always released before returning.
2566 static void do_freezer_trap(void)
2567 __releases(¤t->sighand->siglock)
2570 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2571 * let's make another loop to give it a chance to be handled.
2572 * In any case, we'll return back.
2574 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2575 JOBCTL_TRAP_FREEZE) {
2576 spin_unlock_irq(¤t->sighand->siglock);
2581 * Now we're sure that there is no pending fatal signal and no
2582 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2583 * immediately (if there is a non-fatal signal pending), and
2584 * put the task into sleep.
2586 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
2587 clear_thread_flag(TIF_SIGPENDING);
2588 spin_unlock_irq(¤t->sighand->siglock);
2589 cgroup_enter_frozen();
2593 static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
2596 * We do not check sig_kernel_stop(signr) but set this marker
2597 * unconditionally because we do not know whether debugger will
2598 * change signr. This flag has no meaning unless we are going
2599 * to stop after return from ptrace_stop(). In this case it will
2600 * be checked in do_signal_stop(), we should only stop if it was
2601 * not cleared by SIGCONT while we were sleeping. See also the
2602 * comment in dequeue_signal().
2604 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2605 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
2607 /* We're back. Did the debugger cancel the sig? */
2612 * Update the siginfo structure if the signal has
2613 * changed. If the debugger wanted something
2614 * specific in the siginfo structure then it should
2615 * have updated *info via PTRACE_SETSIGINFO.
2617 if (signr != info->si_signo) {
2618 clear_siginfo(info);
2619 info->si_signo = signr;
2621 info->si_code = SI_USER;
2623 info->si_pid = task_pid_vnr(current->parent);
2624 info->si_uid = from_kuid_munged(current_user_ns(),
2625 task_uid(current->parent));
2629 /* If the (new) signal is now blocked, requeue it. */
2630 if (sigismember(¤t->blocked, signr) ||
2631 fatal_signal_pending(current)) {
2632 send_signal_locked(signr, info, current, type);
2639 static void hide_si_addr_tag_bits(struct ksignal *ksig)
2641 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2643 case SIL_FAULT_TRAPNO:
2644 case SIL_FAULT_MCEERR:
2645 case SIL_FAULT_BNDERR:
2646 case SIL_FAULT_PKUERR:
2647 case SIL_FAULT_PERF_EVENT:
2648 ksig->info.si_addr = arch_untagged_si_addr(
2649 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2661 bool get_signal(struct ksignal *ksig)
2663 struct sighand_struct *sighand = current->sighand;
2664 struct signal_struct *signal = current->signal;
2667 clear_notify_signal();
2668 if (unlikely(task_work_pending(current)))
2671 if (!task_sigpending(current))
2674 if (unlikely(uprobe_deny_signal()))
2678 * Do this once, we can't return to user-mode if freezing() == T.
2679 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2680 * thus do not need another check after return.
2685 spin_lock_irq(&sighand->siglock);
2688 * Every stopped thread goes here after wakeup. Check to see if
2689 * we should notify the parent, prepare_signal(SIGCONT) encodes
2690 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2692 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2695 if (signal->flags & SIGNAL_CLD_CONTINUED)
2696 why = CLD_CONTINUED;
2700 signal->flags &= ~SIGNAL_CLD_MASK;
2702 spin_unlock_irq(&sighand->siglock);
2705 * Notify the parent that we're continuing. This event is
2706 * always per-process and doesn't make whole lot of sense
2707 * for ptracers, who shouldn't consume the state via
2708 * wait(2) either, but, for backward compatibility, notify
2709 * the ptracer of the group leader too unless it's gonna be
2712 read_lock(&tasklist_lock);
2713 do_notify_parent_cldstop(current, false, why);
2715 if (ptrace_reparented(current->group_leader))
2716 do_notify_parent_cldstop(current->group_leader,
2718 read_unlock(&tasklist_lock);
2724 struct k_sigaction *ka;
2727 /* Has this task already been marked for death? */
2728 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2729 signal->group_exec_task) {
2730 clear_siginfo(&ksig->info);
2731 ksig->info.si_signo = signr = SIGKILL;
2732 sigdelset(¤t->pending.signal, SIGKILL);
2733 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2734 &sighand->action[SIGKILL - 1]);
2735 recalc_sigpending();
2739 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2743 if (unlikely(current->jobctl &
2744 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2745 if (current->jobctl & JOBCTL_TRAP_MASK) {
2747 spin_unlock_irq(&sighand->siglock);
2748 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2755 * If the task is leaving the frozen state, let's update
2756 * cgroup counters and reset the frozen bit.
2758 if (unlikely(cgroup_task_frozen(current))) {
2759 spin_unlock_irq(&sighand->siglock);
2760 cgroup_leave_frozen(false);
2765 * Signals generated by the execution of an instruction
2766 * need to be delivered before any other pending signals
2767 * so that the instruction pointer in the signal stack
2768 * frame points to the faulting instruction.
2771 signr = dequeue_synchronous_signal(&ksig->info);
2773 signr = dequeue_signal(current, ¤t->blocked,
2774 &ksig->info, &type);
2777 break; /* will return 0 */
2779 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2780 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
2781 signr = ptrace_signal(signr, &ksig->info, type);
2786 ka = &sighand->action[signr-1];
2788 /* Trace actually delivered signals. */
2789 trace_signal_deliver(signr, &ksig->info, ka);
2791 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2793 if (ka->sa.sa_handler != SIG_DFL) {
2794 /* Run the handler. */
2797 if (ka->sa.sa_flags & SA_ONESHOT)
2798 ka->sa.sa_handler = SIG_DFL;
2800 break; /* will return non-zero "signr" value */
2804 * Now we are doing the default action for this signal.
2806 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2810 * Global init gets no signals it doesn't want.
2811 * Container-init gets no signals it doesn't want from same
2814 * Note that if global/container-init sees a sig_kernel_only()
2815 * signal here, the signal must have been generated internally
2816 * or must have come from an ancestor namespace. In either
2817 * case, the signal cannot be dropped.
2819 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2820 !sig_kernel_only(signr))
2823 if (sig_kernel_stop(signr)) {
2825 * The default action is to stop all threads in
2826 * the thread group. The job control signals
2827 * do nothing in an orphaned pgrp, but SIGSTOP
2828 * always works. Note that siglock needs to be
2829 * dropped during the call to is_orphaned_pgrp()
2830 * because of lock ordering with tasklist_lock.
2831 * This allows an intervening SIGCONT to be posted.
2832 * We need to check for that and bail out if necessary.
2834 if (signr != SIGSTOP) {
2835 spin_unlock_irq(&sighand->siglock);
2837 /* signals can be posted during this window */
2839 if (is_current_pgrp_orphaned())
2842 spin_lock_irq(&sighand->siglock);
2845 if (likely(do_signal_stop(ksig->info.si_signo))) {
2846 /* It released the siglock. */
2851 * We didn't actually stop, due to a race
2852 * with SIGCONT or something like that.
2858 spin_unlock_irq(&sighand->siglock);
2859 if (unlikely(cgroup_task_frozen(current)))
2860 cgroup_leave_frozen(true);
2863 * Anything else is fatal, maybe with a core dump.
2865 current->flags |= PF_SIGNALED;
2867 if (sig_kernel_coredump(signr)) {
2868 if (print_fatal_signals)
2869 print_fatal_signal(ksig->info.si_signo);
2870 proc_coredump_connector(current);
2872 * If it was able to dump core, this kills all
2873 * other threads in the group and synchronizes with
2874 * their demise. If we lost the race with another
2875 * thread getting here, it set group_exit_code
2876 * first and our do_group_exit call below will use
2877 * that value and ignore the one we pass it.
2879 do_coredump(&ksig->info);
2883 * PF_USER_WORKER threads will catch and exit on fatal signals
2884 * themselves. They have cleanup that must be performed, so
2885 * we cannot call do_exit() on their behalf.
2887 if (current->flags & PF_USER_WORKER)
2891 * Death signals, no core dump.
2893 do_group_exit(ksig->info.si_signo);
2896 spin_unlock_irq(&sighand->siglock);
2900 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2901 hide_si_addr_tag_bits(ksig);
2903 return ksig->sig > 0;
2907 * signal_delivered - called after signal delivery to update blocked signals
2908 * @ksig: kernel signal struct
2909 * @stepping: nonzero if debugger single-step or block-step in use
2911 * This function should be called when a signal has successfully been
2912 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2913 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
2914 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2916 static void signal_delivered(struct ksignal *ksig, int stepping)
2920 /* A signal was successfully delivered, and the
2921 saved sigmask was stored on the signal frame,
2922 and will be restored by sigreturn. So we can
2923 simply clear the restore sigmask flag. */
2924 clear_restore_sigmask();
2926 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2927 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2928 sigaddset(&blocked, ksig->sig);
2929 set_current_blocked(&blocked);
2930 if (current->sas_ss_flags & SS_AUTODISARM)
2931 sas_ss_reset(current);
2933 ptrace_notify(SIGTRAP, 0);
2936 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2939 force_sigsegv(ksig->sig);
2941 signal_delivered(ksig, stepping);
2945 * It could be that complete_signal() picked us to notify about the
2946 * group-wide signal. Other threads should be notified now to take
2947 * the shared signals in @which since we will not.
2949 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2952 struct task_struct *t;
2954 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2955 if (sigisemptyset(&retarget))
2958 for_other_threads(tsk, t) {
2959 if (t->flags & PF_EXITING)
2962 if (!has_pending_signals(&retarget, &t->blocked))
2964 /* Remove the signals this thread can handle. */
2965 sigandsets(&retarget, &retarget, &t->blocked);
2967 if (!task_sigpending(t))
2968 signal_wake_up(t, 0);
2970 if (sigisemptyset(&retarget))
2975 void exit_signals(struct task_struct *tsk)
2981 * @tsk is about to have PF_EXITING set - lock out users which
2982 * expect stable threadgroup.
2984 cgroup_threadgroup_change_begin(tsk);
2986 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
2987 sched_mm_cid_exit_signals(tsk);
2988 tsk->flags |= PF_EXITING;
2989 cgroup_threadgroup_change_end(tsk);
2993 spin_lock_irq(&tsk->sighand->siglock);
2995 * From now this task is not visible for group-wide signals,
2996 * see wants_signal(), do_signal_stop().
2998 sched_mm_cid_exit_signals(tsk);
2999 tsk->flags |= PF_EXITING;
3001 cgroup_threadgroup_change_end(tsk);
3003 if (!task_sigpending(tsk))
3006 unblocked = tsk->blocked;
3007 signotset(&unblocked);
3008 retarget_shared_pending(tsk, &unblocked);
3010 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
3011 task_participate_group_stop(tsk))
3012 group_stop = CLD_STOPPED;
3014 spin_unlock_irq(&tsk->sighand->siglock);
3017 * If group stop has completed, deliver the notification. This
3018 * should always go to the real parent of the group leader.
3020 if (unlikely(group_stop)) {
3021 read_lock(&tasklist_lock);
3022 do_notify_parent_cldstop(tsk, false, group_stop);
3023 read_unlock(&tasklist_lock);
3028 * System call entry points.
3032 * sys_restart_syscall - restart a system call
3034 SYSCALL_DEFINE0(restart_syscall)
3036 struct restart_block *restart = ¤t->restart_block;
3037 return restart->fn(restart);
3040 long do_no_restart_syscall(struct restart_block *param)
3045 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3047 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
3048 sigset_t newblocked;
3049 /* A set of now blocked but previously unblocked signals. */
3050 sigandnsets(&newblocked, newset, ¤t->blocked);
3051 retarget_shared_pending(tsk, &newblocked);
3053 tsk->blocked = *newset;
3054 recalc_sigpending();
3058 * set_current_blocked - change current->blocked mask
3061 * It is wrong to change ->blocked directly, this helper should be used
3062 * to ensure the process can't miss a shared signal we are going to block.
3064 void set_current_blocked(sigset_t *newset)
3066 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
3067 __set_current_blocked(newset);
3070 void __set_current_blocked(const sigset_t *newset)
3072 struct task_struct *tsk = current;
3075 * In case the signal mask hasn't changed, there is nothing we need
3076 * to do. The current->blocked shouldn't be modified by other task.
3078 if (sigequalsets(&tsk->blocked, newset))
3081 spin_lock_irq(&tsk->sighand->siglock);
3082 __set_task_blocked(tsk, newset);
3083 spin_unlock_irq(&tsk->sighand->siglock);
3087 * This is also useful for kernel threads that want to temporarily
3088 * (or permanently) block certain signals.
3090 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3091 * interface happily blocks "unblockable" signals like SIGKILL
3094 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3096 struct task_struct *tsk = current;
3099 /* Lockless, only current can change ->blocked, never from irq */
3101 *oldset = tsk->blocked;
3105 sigorsets(&newset, &tsk->blocked, set);
3108 sigandnsets(&newset, &tsk->blocked, set);
3117 __set_current_blocked(&newset);
3120 EXPORT_SYMBOL(sigprocmask);
3123 * The api helps set app-provided sigmasks.
3125 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3126 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
3128 * Note that it does set_restore_sigmask() in advance, so it must be always
3129 * paired with restore_saved_sigmask_unless() before return from syscall.
3131 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
3137 if (sigsetsize != sizeof(sigset_t))
3139 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
3142 set_restore_sigmask();
3143 current->saved_sigmask = current->blocked;
3144 set_current_blocked(&kmask);
3149 #ifdef CONFIG_COMPAT
3150 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
3157 if (sigsetsize != sizeof(compat_sigset_t))
3159 if (get_compat_sigset(&kmask, umask))
3162 set_restore_sigmask();
3163 current->saved_sigmask = current->blocked;
3164 set_current_blocked(&kmask);
3171 * sys_rt_sigprocmask - change the list of currently blocked signals
3172 * @how: whether to add, remove, or set signals
3173 * @nset: stores pending signals
3174 * @oset: previous value of signal mask if non-null
3175 * @sigsetsize: size of sigset_t type
3177 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3178 sigset_t __user *, oset, size_t, sigsetsize)
3180 sigset_t old_set, new_set;
3183 /* XXX: Don't preclude handling different sized sigset_t's. */
3184 if (sigsetsize != sizeof(sigset_t))
3187 old_set = current->blocked;
3190 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3192 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3194 error = sigprocmask(how, &new_set, NULL);
3200 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3207 #ifdef CONFIG_COMPAT
3208 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3209 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3211 sigset_t old_set = current->blocked;
3213 /* XXX: Don't preclude handling different sized sigset_t's. */
3214 if (sigsetsize != sizeof(sigset_t))
3220 if (get_compat_sigset(&new_set, nset))
3222 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3224 error = sigprocmask(how, &new_set, NULL);
3228 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3232 static void do_sigpending(sigset_t *set)
3234 spin_lock_irq(¤t->sighand->siglock);
3235 sigorsets(set, ¤t->pending.signal,
3236 ¤t->signal->shared_pending.signal);
3237 spin_unlock_irq(¤t->sighand->siglock);
3239 /* Outside the lock because only this thread touches it. */
3240 sigandsets(set, ¤t->blocked, set);
3244 * sys_rt_sigpending - examine a pending signal that has been raised
3246 * @uset: stores pending signals
3247 * @sigsetsize: size of sigset_t type or larger
3249 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3253 if (sigsetsize > sizeof(*uset))
3256 do_sigpending(&set);
3258 if (copy_to_user(uset, &set, sigsetsize))
3264 #ifdef CONFIG_COMPAT
3265 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3266 compat_size_t, sigsetsize)
3270 if (sigsetsize > sizeof(*uset))
3273 do_sigpending(&set);
3275 return put_compat_sigset(uset, &set, sigsetsize);
3279 static const struct {
3280 unsigned char limit, layout;
3282 [SIGILL] = { NSIGILL, SIL_FAULT },
3283 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3284 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3285 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3286 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3288 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3290 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3291 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3292 [SIGSYS] = { NSIGSYS, SIL_SYS },
3295 static bool known_siginfo_layout(unsigned sig, int si_code)
3297 if (si_code == SI_KERNEL)
3299 else if ((si_code > SI_USER)) {
3300 if (sig_specific_sicodes(sig)) {
3301 if (si_code <= sig_sicodes[sig].limit)
3304 else if (si_code <= NSIGPOLL)
3307 else if (si_code >= SI_DETHREAD)
3309 else if (si_code == SI_ASYNCNL)
3314 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3316 enum siginfo_layout layout = SIL_KILL;
3317 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3318 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3319 (si_code <= sig_sicodes[sig].limit)) {
3320 layout = sig_sicodes[sig].layout;
3321 /* Handle the exceptions */
3322 if ((sig == SIGBUS) &&
3323 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3324 layout = SIL_FAULT_MCEERR;
3325 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3326 layout = SIL_FAULT_BNDERR;
3328 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3329 layout = SIL_FAULT_PKUERR;
3331 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3332 layout = SIL_FAULT_PERF_EVENT;
3333 else if (IS_ENABLED(CONFIG_SPARC) &&
3334 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3335 layout = SIL_FAULT_TRAPNO;
3336 else if (IS_ENABLED(CONFIG_ALPHA) &&
3338 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
3339 layout = SIL_FAULT_TRAPNO;
3341 else if (si_code <= NSIGPOLL)
3344 if (si_code == SI_TIMER)
3346 else if (si_code == SI_SIGIO)
3348 else if (si_code < 0)
3354 static inline char __user *si_expansion(const siginfo_t __user *info)
3356 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3359 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3361 char __user *expansion = si_expansion(to);
3362 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3364 if (clear_user(expansion, SI_EXPANSION_SIZE))
3369 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3370 const siginfo_t __user *from)
3372 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3373 char __user *expansion = si_expansion(from);
3374 char buf[SI_EXPANSION_SIZE];
3377 * An unknown si_code might need more than
3378 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3379 * extra bytes are 0. This guarantees copy_siginfo_to_user
3380 * will return this data to userspace exactly.
3382 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3384 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3392 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3393 const siginfo_t __user *from)
3395 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3397 to->si_signo = signo;
3398 return post_copy_siginfo_from_user(to, from);
3401 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3403 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3405 return post_copy_siginfo_from_user(to, from);
3408 #ifdef CONFIG_COMPAT
3410 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3411 * @to: compat siginfo destination
3412 * @from: kernel siginfo source
3414 * Note: This function does not work properly for the SIGCHLD on x32, but
3415 * fortunately it doesn't have to. The only valid callers for this function are
3416 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3417 * The latter does not care because SIGCHLD will never cause a coredump.
3419 void copy_siginfo_to_external32(struct compat_siginfo *to,
3420 const struct kernel_siginfo *from)
3422 memset(to, 0, sizeof(*to));
3424 to->si_signo = from->si_signo;
3425 to->si_errno = from->si_errno;
3426 to->si_code = from->si_code;
3427 switch(siginfo_layout(from->si_signo, from->si_code)) {
3429 to->si_pid = from->si_pid;
3430 to->si_uid = from->si_uid;
3433 to->si_tid = from->si_tid;
3434 to->si_overrun = from->si_overrun;
3435 to->si_int = from->si_int;
3438 to->si_band = from->si_band;
3439 to->si_fd = from->si_fd;
3442 to->si_addr = ptr_to_compat(from->si_addr);
3444 case SIL_FAULT_TRAPNO:
3445 to->si_addr = ptr_to_compat(from->si_addr);
3446 to->si_trapno = from->si_trapno;
3448 case SIL_FAULT_MCEERR:
3449 to->si_addr = ptr_to_compat(from->si_addr);
3450 to->si_addr_lsb = from->si_addr_lsb;
3452 case SIL_FAULT_BNDERR:
3453 to->si_addr = ptr_to_compat(from->si_addr);
3454 to->si_lower = ptr_to_compat(from->si_lower);
3455 to->si_upper = ptr_to_compat(from->si_upper);
3457 case SIL_FAULT_PKUERR:
3458 to->si_addr = ptr_to_compat(from->si_addr);
3459 to->si_pkey = from->si_pkey;
3461 case SIL_FAULT_PERF_EVENT:
3462 to->si_addr = ptr_to_compat(from->si_addr);
3463 to->si_perf_data = from->si_perf_data;
3464 to->si_perf_type = from->si_perf_type;
3465 to->si_perf_flags = from->si_perf_flags;
3468 to->si_pid = from->si_pid;
3469 to->si_uid = from->si_uid;
3470 to->si_status = from->si_status;
3471 to->si_utime = from->si_utime;
3472 to->si_stime = from->si_stime;
3475 to->si_pid = from->si_pid;
3476 to->si_uid = from->si_uid;
3477 to->si_int = from->si_int;
3480 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3481 to->si_syscall = from->si_syscall;
3482 to->si_arch = from->si_arch;
3487 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3488 const struct kernel_siginfo *from)
3490 struct compat_siginfo new;
3492 copy_siginfo_to_external32(&new, from);
3493 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3498 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3499 const struct compat_siginfo *from)
3502 to->si_signo = from->si_signo;
3503 to->si_errno = from->si_errno;
3504 to->si_code = from->si_code;
3505 switch(siginfo_layout(from->si_signo, from->si_code)) {
3507 to->si_pid = from->si_pid;
3508 to->si_uid = from->si_uid;
3511 to->si_tid = from->si_tid;
3512 to->si_overrun = from->si_overrun;
3513 to->si_int = from->si_int;
3516 to->si_band = from->si_band;
3517 to->si_fd = from->si_fd;
3520 to->si_addr = compat_ptr(from->si_addr);
3522 case SIL_FAULT_TRAPNO:
3523 to->si_addr = compat_ptr(from->si_addr);
3524 to->si_trapno = from->si_trapno;
3526 case SIL_FAULT_MCEERR:
3527 to->si_addr = compat_ptr(from->si_addr);
3528 to->si_addr_lsb = from->si_addr_lsb;
3530 case SIL_FAULT_BNDERR:
3531 to->si_addr = compat_ptr(from->si_addr);
3532 to->si_lower = compat_ptr(from->si_lower);
3533 to->si_upper = compat_ptr(from->si_upper);
3535 case SIL_FAULT_PKUERR:
3536 to->si_addr = compat_ptr(from->si_addr);
3537 to->si_pkey = from->si_pkey;
3539 case SIL_FAULT_PERF_EVENT:
3540 to->si_addr = compat_ptr(from->si_addr);
3541 to->si_perf_data = from->si_perf_data;
3542 to->si_perf_type = from->si_perf_type;
3543 to->si_perf_flags = from->si_perf_flags;
3546 to->si_pid = from->si_pid;
3547 to->si_uid = from->si_uid;
3548 to->si_status = from->si_status;
3549 #ifdef CONFIG_X86_X32_ABI
3550 if (in_x32_syscall()) {
3551 to->si_utime = from->_sifields._sigchld_x32._utime;
3552 to->si_stime = from->_sifields._sigchld_x32._stime;
3556 to->si_utime = from->si_utime;
3557 to->si_stime = from->si_stime;
3561 to->si_pid = from->si_pid;
3562 to->si_uid = from->si_uid;
3563 to->si_int = from->si_int;
3566 to->si_call_addr = compat_ptr(from->si_call_addr);
3567 to->si_syscall = from->si_syscall;
3568 to->si_arch = from->si_arch;
3574 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3575 const struct compat_siginfo __user *ufrom)
3577 struct compat_siginfo from;
3579 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3582 from.si_signo = signo;
3583 return post_copy_siginfo_from_user32(to, &from);
3586 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3587 const struct compat_siginfo __user *ufrom)
3589 struct compat_siginfo from;
3591 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3594 return post_copy_siginfo_from_user32(to, &from);
3596 #endif /* CONFIG_COMPAT */
3599 * do_sigtimedwait - wait for queued signals specified in @which
3600 * @which: queued signals to wait for
3601 * @info: if non-null, the signal's siginfo is returned here
3602 * @ts: upper bound on process time suspension
3604 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3605 const struct timespec64 *ts)
3607 ktime_t *to = NULL, timeout = KTIME_MAX;
3608 struct task_struct *tsk = current;
3609 sigset_t mask = *which;
3614 if (!timespec64_valid(ts))
3616 timeout = timespec64_to_ktime(*ts);
3621 * Invert the set of allowed signals to get those we want to block.
3623 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3626 spin_lock_irq(&tsk->sighand->siglock);
3627 sig = dequeue_signal(tsk, &mask, info, &type);
3628 if (!sig && timeout) {
3630 * None ready, temporarily unblock those we're interested
3631 * while we are sleeping in so that we'll be awakened when
3632 * they arrive. Unblocking is always fine, we can avoid
3633 * set_current_blocked().
3635 tsk->real_blocked = tsk->blocked;
3636 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3637 recalc_sigpending();
3638 spin_unlock_irq(&tsk->sighand->siglock);
3640 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3641 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3643 spin_lock_irq(&tsk->sighand->siglock);
3644 __set_task_blocked(tsk, &tsk->real_blocked);
3645 sigemptyset(&tsk->real_blocked);
3646 sig = dequeue_signal(tsk, &mask, info, &type);
3648 spin_unlock_irq(&tsk->sighand->siglock);
3652 return ret ? -EINTR : -EAGAIN;
3656 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3658 * @uthese: queued signals to wait for
3659 * @uinfo: if non-null, the signal's siginfo is returned here
3660 * @uts: upper bound on process time suspension
3661 * @sigsetsize: size of sigset_t type
3663 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3664 siginfo_t __user *, uinfo,
3665 const struct __kernel_timespec __user *, uts,
3669 struct timespec64 ts;
3670 kernel_siginfo_t info;
3673 /* XXX: Don't preclude handling different sized sigset_t's. */
3674 if (sigsetsize != sizeof(sigset_t))
3677 if (copy_from_user(&these, uthese, sizeof(these)))
3681 if (get_timespec64(&ts, uts))
3685 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3687 if (ret > 0 && uinfo) {
3688 if (copy_siginfo_to_user(uinfo, &info))
3695 #ifdef CONFIG_COMPAT_32BIT_TIME
3696 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3697 siginfo_t __user *, uinfo,
3698 const struct old_timespec32 __user *, uts,
3702 struct timespec64 ts;
3703 kernel_siginfo_t info;
3706 if (sigsetsize != sizeof(sigset_t))
3709 if (copy_from_user(&these, uthese, sizeof(these)))
3713 if (get_old_timespec32(&ts, uts))
3717 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3719 if (ret > 0 && uinfo) {
3720 if (copy_siginfo_to_user(uinfo, &info))
3728 #ifdef CONFIG_COMPAT
3729 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3730 struct compat_siginfo __user *, uinfo,
3731 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3734 struct timespec64 t;
3735 kernel_siginfo_t info;
3738 if (sigsetsize != sizeof(sigset_t))
3741 if (get_compat_sigset(&s, uthese))
3745 if (get_timespec64(&t, uts))
3749 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3751 if (ret > 0 && uinfo) {
3752 if (copy_siginfo_to_user32(uinfo, &info))
3759 #ifdef CONFIG_COMPAT_32BIT_TIME
3760 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3761 struct compat_siginfo __user *, uinfo,
3762 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3765 struct timespec64 t;
3766 kernel_siginfo_t info;
3769 if (sigsetsize != sizeof(sigset_t))
3772 if (get_compat_sigset(&s, uthese))
3776 if (get_old_timespec32(&t, uts))
3780 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3782 if (ret > 0 && uinfo) {
3783 if (copy_siginfo_to_user32(uinfo, &info))
3792 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3794 clear_siginfo(info);
3795 info->si_signo = sig;
3797 info->si_code = SI_USER;
3798 info->si_pid = task_tgid_vnr(current);
3799 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3803 * sys_kill - send a signal to a process
3804 * @pid: the PID of the process
3805 * @sig: signal to be sent
3807 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3809 struct kernel_siginfo info;
3811 prepare_kill_siginfo(sig, &info);
3813 return kill_something_info(sig, &info, pid);
3817 * Verify that the signaler and signalee either are in the same pid namespace
3818 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3821 static bool access_pidfd_pidns(struct pid *pid)
3823 struct pid_namespace *active = task_active_pid_ns(current);
3824 struct pid_namespace *p = ns_of_pid(pid);
3837 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3838 siginfo_t __user *info)
3840 #ifdef CONFIG_COMPAT
3842 * Avoid hooking up compat syscalls and instead handle necessary
3843 * conversions here. Note, this is a stop-gap measure and should not be
3844 * considered a generic solution.
3846 if (in_compat_syscall())
3847 return copy_siginfo_from_user32(
3848 kinfo, (struct compat_siginfo __user *)info);
3850 return copy_siginfo_from_user(kinfo, info);
3853 static struct pid *pidfd_to_pid(const struct file *file)
3857 pid = pidfd_pid(file);
3861 return tgid_pidfd_to_pid(file);
3865 * sys_pidfd_send_signal - Signal a process through a pidfd
3866 * @pidfd: file descriptor of the process
3867 * @sig: signal to send
3868 * @info: signal info
3869 * @flags: future flags
3871 * The syscall currently only signals via PIDTYPE_PID which covers
3872 * kill(<positive-pid>, <signal>. It does not signal threads or process
3874 * In order to extend the syscall to threads and process groups the @flags
3875 * argument should be used. In essence, the @flags argument will determine
3876 * what is signaled and not the file descriptor itself. Put in other words,
3877 * grouping is a property of the flags argument not a property of the file
3880 * Return: 0 on success, negative errno on failure
3882 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3883 siginfo_t __user *, info, unsigned int, flags)
3888 kernel_siginfo_t kinfo;
3890 /* Enforce flags be set to 0 until we add an extension. */
3898 /* Is this a pidfd? */
3899 pid = pidfd_to_pid(f.file);
3906 if (!access_pidfd_pidns(pid))
3910 ret = copy_siginfo_from_user_any(&kinfo, info);
3915 if (unlikely(sig != kinfo.si_signo))
3918 /* Only allow sending arbitrary signals to yourself. */
3920 if ((task_pid(current) != pid) &&
3921 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3924 prepare_kill_siginfo(sig, &kinfo);
3927 ret = kill_pid_info(sig, &kinfo, pid);
3935 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3937 struct task_struct *p;
3941 p = find_task_by_vpid(pid);
3942 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3943 error = check_kill_permission(sig, info, p);
3945 * The null signal is a permissions and process existence
3946 * probe. No signal is actually delivered.
3948 if (!error && sig) {
3949 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3951 * If lock_task_sighand() failed we pretend the task
3952 * dies after receiving the signal. The window is tiny,
3953 * and the signal is private anyway.
3955 if (unlikely(error == -ESRCH))
3964 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3966 struct kernel_siginfo info;
3968 clear_siginfo(&info);
3969 info.si_signo = sig;
3971 info.si_code = SI_TKILL;
3972 info.si_pid = task_tgid_vnr(current);
3973 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3975 return do_send_specific(tgid, pid, sig, &info);
3979 * sys_tgkill - send signal to one specific thread
3980 * @tgid: the thread group ID of the thread
3981 * @pid: the PID of the thread
3982 * @sig: signal to be sent
3984 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3985 * exists but it's not belonging to the target process anymore. This
3986 * method solves the problem of threads exiting and PIDs getting reused.
3988 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3990 /* This is only valid for single tasks */
3991 if (pid <= 0 || tgid <= 0)
3994 return do_tkill(tgid, pid, sig);
3998 * sys_tkill - send signal to one specific task
3999 * @pid: the PID of the task
4000 * @sig: signal to be sent
4002 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4004 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
4006 /* This is only valid for single tasks */
4010 return do_tkill(0, pid, sig);
4013 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
4015 /* Not even root can pretend to send signals from the kernel.
4016 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4018 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4019 (task_pid_vnr(current) != pid))
4022 /* POSIX.1b doesn't mention process groups. */
4023 return kill_proc_info(sig, info, pid);
4027 * sys_rt_sigqueueinfo - send signal information to a signal
4028 * @pid: the PID of the thread
4029 * @sig: signal to be sent
4030 * @uinfo: signal info to be sent
4032 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4033 siginfo_t __user *, uinfo)
4035 kernel_siginfo_t info;
4036 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4039 return do_rt_sigqueueinfo(pid, sig, &info);
4042 #ifdef CONFIG_COMPAT
4043 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4046 struct compat_siginfo __user *, uinfo)
4048 kernel_siginfo_t info;
4049 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4052 return do_rt_sigqueueinfo(pid, sig, &info);
4056 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
4058 /* This is only valid for single tasks */
4059 if (pid <= 0 || tgid <= 0)
4062 /* Not even root can pretend to send signals from the kernel.
4063 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4065 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4066 (task_pid_vnr(current) != pid))
4069 return do_send_specific(tgid, pid, sig, info);
4072 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4073 siginfo_t __user *, uinfo)
4075 kernel_siginfo_t info;
4076 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4079 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4082 #ifdef CONFIG_COMPAT
4083 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4087 struct compat_siginfo __user *, uinfo)
4089 kernel_siginfo_t info;
4090 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4093 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4098 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
4100 void kernel_sigaction(int sig, __sighandler_t action)
4102 spin_lock_irq(¤t->sighand->siglock);
4103 current->sighand->action[sig - 1].sa.sa_handler = action;
4104 if (action == SIG_IGN) {
4108 sigaddset(&mask, sig);
4110 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
4111 flush_sigqueue_mask(&mask, ¤t->pending);
4112 recalc_sigpending();
4114 spin_unlock_irq(¤t->sighand->siglock);
4116 EXPORT_SYMBOL(kernel_sigaction);
4118 void __weak sigaction_compat_abi(struct k_sigaction *act,
4119 struct k_sigaction *oact)
4123 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
4125 struct task_struct *p = current, *t;
4126 struct k_sigaction *k;
4129 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
4132 k = &p->sighand->action[sig-1];
4134 spin_lock_irq(&p->sighand->siglock);
4135 if (k->sa.sa_flags & SA_IMMUTABLE) {
4136 spin_unlock_irq(&p->sighand->siglock);
4143 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4144 * e.g. by having an architecture use the bit in their uapi.
4146 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4149 * Clear unknown flag bits in order to allow userspace to detect missing
4150 * support for flag bits and to allow the kernel to use non-uapi bits
4154 act->sa.sa_flags &= UAPI_SA_FLAGS;
4156 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4158 sigaction_compat_abi(act, oact);
4161 sigdelsetmask(&act->sa.sa_mask,
4162 sigmask(SIGKILL) | sigmask(SIGSTOP));
4166 * "Setting a signal action to SIG_IGN for a signal that is
4167 * pending shall cause the pending signal to be discarded,
4168 * whether or not it is blocked."
4170 * "Setting a signal action to SIG_DFL for a signal that is
4171 * pending and whose default action is to ignore the signal
4172 * (for example, SIGCHLD), shall cause the pending signal to
4173 * be discarded, whether or not it is blocked"
4175 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
4177 sigaddset(&mask, sig);
4178 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4179 for_each_thread(p, t)
4180 flush_sigqueue_mask(&mask, &t->pending);
4184 spin_unlock_irq(&p->sighand->siglock);
4188 #ifdef CONFIG_DYNAMIC_SIGFRAME
4189 static inline void sigaltstack_lock(void)
4190 __acquires(¤t->sighand->siglock)
4192 spin_lock_irq(¤t->sighand->siglock);
4195 static inline void sigaltstack_unlock(void)
4196 __releases(¤t->sighand->siglock)
4198 spin_unlock_irq(¤t->sighand->siglock);
4201 static inline void sigaltstack_lock(void) { }
4202 static inline void sigaltstack_unlock(void) { }
4206 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4209 struct task_struct *t = current;
4213 memset(oss, 0, sizeof(stack_t));
4214 oss->ss_sp = (void __user *) t->sas_ss_sp;
4215 oss->ss_size = t->sas_ss_size;
4216 oss->ss_flags = sas_ss_flags(sp) |
4217 (current->sas_ss_flags & SS_FLAG_BITS);
4221 void __user *ss_sp = ss->ss_sp;
4222 size_t ss_size = ss->ss_size;
4223 unsigned ss_flags = ss->ss_flags;
4226 if (unlikely(on_sig_stack(sp)))
4229 ss_mode = ss_flags & ~SS_FLAG_BITS;
4230 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4235 * Return before taking any locks if no actual
4236 * sigaltstack changes were requested.
4238 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4239 t->sas_ss_size == ss_size &&
4240 t->sas_ss_flags == ss_flags)
4244 if (ss_mode == SS_DISABLE) {
4248 if (unlikely(ss_size < min_ss_size))
4250 if (!sigaltstack_size_valid(ss_size))
4254 t->sas_ss_sp = (unsigned long) ss_sp;
4255 t->sas_ss_size = ss_size;
4256 t->sas_ss_flags = ss_flags;
4258 sigaltstack_unlock();
4263 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4267 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4269 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4270 current_user_stack_pointer(),
4272 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4277 int restore_altstack(const stack_t __user *uss)
4280 if (copy_from_user(&new, uss, sizeof(stack_t)))
4282 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4284 /* squash all but EFAULT for now */
4288 int __save_altstack(stack_t __user *uss, unsigned long sp)
4290 struct task_struct *t = current;
4291 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4292 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4293 __put_user(t->sas_ss_size, &uss->ss_size);
4297 #ifdef CONFIG_COMPAT
4298 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4299 compat_stack_t __user *uoss_ptr)
4305 compat_stack_t uss32;
4306 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4308 uss.ss_sp = compat_ptr(uss32.ss_sp);
4309 uss.ss_flags = uss32.ss_flags;
4310 uss.ss_size = uss32.ss_size;
4312 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4313 compat_user_stack_pointer(),
4314 COMPAT_MINSIGSTKSZ);
4315 if (ret >= 0 && uoss_ptr) {
4317 memset(&old, 0, sizeof(old));
4318 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4319 old.ss_flags = uoss.ss_flags;
4320 old.ss_size = uoss.ss_size;
4321 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4327 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4328 const compat_stack_t __user *, uss_ptr,
4329 compat_stack_t __user *, uoss_ptr)
4331 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4334 int compat_restore_altstack(const compat_stack_t __user *uss)
4336 int err = do_compat_sigaltstack(uss, NULL);
4337 /* squash all but -EFAULT for now */
4338 return err == -EFAULT ? err : 0;
4341 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4344 struct task_struct *t = current;
4345 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4347 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4348 __put_user(t->sas_ss_size, &uss->ss_size);
4353 #ifdef __ARCH_WANT_SYS_SIGPENDING
4356 * sys_sigpending - examine pending signals
4357 * @uset: where mask of pending signal is returned
4359 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4363 if (sizeof(old_sigset_t) > sizeof(*uset))
4366 do_sigpending(&set);
4368 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4374 #ifdef CONFIG_COMPAT
4375 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4379 do_sigpending(&set);
4381 return put_user(set.sig[0], set32);
4387 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4389 * sys_sigprocmask - examine and change blocked signals
4390 * @how: whether to add, remove, or set signals
4391 * @nset: signals to add or remove (if non-null)
4392 * @oset: previous value of signal mask if non-null
4394 * Some platforms have their own version with special arguments;
4395 * others support only sys_rt_sigprocmask.
4398 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4399 old_sigset_t __user *, oset)
4401 old_sigset_t old_set, new_set;
4402 sigset_t new_blocked;
4404 old_set = current->blocked.sig[0];
4407 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4410 new_blocked = current->blocked;
4414 sigaddsetmask(&new_blocked, new_set);
4417 sigdelsetmask(&new_blocked, new_set);
4420 new_blocked.sig[0] = new_set;
4426 set_current_blocked(&new_blocked);
4430 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4436 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4438 #ifndef CONFIG_ODD_RT_SIGACTION
4440 * sys_rt_sigaction - alter an action taken by a process
4441 * @sig: signal to be sent
4442 * @act: new sigaction
4443 * @oact: used to save the previous sigaction
4444 * @sigsetsize: size of sigset_t type
4446 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4447 const struct sigaction __user *, act,
4448 struct sigaction __user *, oact,
4451 struct k_sigaction new_sa, old_sa;
4454 /* XXX: Don't preclude handling different sized sigset_t's. */
4455 if (sigsetsize != sizeof(sigset_t))
4458 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4461 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4465 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4470 #ifdef CONFIG_COMPAT
4471 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4472 const struct compat_sigaction __user *, act,
4473 struct compat_sigaction __user *, oact,
4474 compat_size_t, sigsetsize)
4476 struct k_sigaction new_ka, old_ka;
4477 #ifdef __ARCH_HAS_SA_RESTORER
4478 compat_uptr_t restorer;
4482 /* XXX: Don't preclude handling different sized sigset_t's. */
4483 if (sigsetsize != sizeof(compat_sigset_t))
4487 compat_uptr_t handler;
4488 ret = get_user(handler, &act->sa_handler);
4489 new_ka.sa.sa_handler = compat_ptr(handler);
4490 #ifdef __ARCH_HAS_SA_RESTORER
4491 ret |= get_user(restorer, &act->sa_restorer);
4492 new_ka.sa.sa_restorer = compat_ptr(restorer);
4494 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4495 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4500 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4502 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4504 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4505 sizeof(oact->sa_mask));
4506 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4507 #ifdef __ARCH_HAS_SA_RESTORER
4508 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4509 &oact->sa_restorer);
4515 #endif /* !CONFIG_ODD_RT_SIGACTION */
4517 #ifdef CONFIG_OLD_SIGACTION
4518 SYSCALL_DEFINE3(sigaction, int, sig,
4519 const struct old_sigaction __user *, act,
4520 struct old_sigaction __user *, oact)
4522 struct k_sigaction new_ka, old_ka;
4527 if (!access_ok(act, sizeof(*act)) ||
4528 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4529 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4530 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4531 __get_user(mask, &act->sa_mask))
4533 #ifdef __ARCH_HAS_KA_RESTORER
4534 new_ka.ka_restorer = NULL;
4536 siginitset(&new_ka.sa.sa_mask, mask);
4539 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4542 if (!access_ok(oact, sizeof(*oact)) ||
4543 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4544 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4545 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4546 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4553 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4554 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4555 const struct compat_old_sigaction __user *, act,
4556 struct compat_old_sigaction __user *, oact)
4558 struct k_sigaction new_ka, old_ka;
4560 compat_old_sigset_t mask;
4561 compat_uptr_t handler, restorer;
4564 if (!access_ok(act, sizeof(*act)) ||
4565 __get_user(handler, &act->sa_handler) ||
4566 __get_user(restorer, &act->sa_restorer) ||
4567 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4568 __get_user(mask, &act->sa_mask))
4571 #ifdef __ARCH_HAS_KA_RESTORER
4572 new_ka.ka_restorer = NULL;
4574 new_ka.sa.sa_handler = compat_ptr(handler);
4575 new_ka.sa.sa_restorer = compat_ptr(restorer);
4576 siginitset(&new_ka.sa.sa_mask, mask);
4579 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4582 if (!access_ok(oact, sizeof(*oact)) ||
4583 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4584 &oact->sa_handler) ||
4585 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4586 &oact->sa_restorer) ||
4587 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4588 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4595 #ifdef CONFIG_SGETMASK_SYSCALL
4598 * For backwards compatibility. Functionality superseded by sigprocmask.
4600 SYSCALL_DEFINE0(sgetmask)
4603 return current->blocked.sig[0];
4606 SYSCALL_DEFINE1(ssetmask, int, newmask)
4608 int old = current->blocked.sig[0];
4611 siginitset(&newset, newmask);
4612 set_current_blocked(&newset);
4616 #endif /* CONFIG_SGETMASK_SYSCALL */
4618 #ifdef __ARCH_WANT_SYS_SIGNAL
4620 * For backwards compatibility. Functionality superseded by sigaction.
4622 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4624 struct k_sigaction new_sa, old_sa;
4627 new_sa.sa.sa_handler = handler;
4628 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4629 sigemptyset(&new_sa.sa.sa_mask);
4631 ret = do_sigaction(sig, &new_sa, &old_sa);
4633 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4635 #endif /* __ARCH_WANT_SYS_SIGNAL */
4637 #ifdef __ARCH_WANT_SYS_PAUSE
4639 SYSCALL_DEFINE0(pause)
4641 while (!signal_pending(current)) {
4642 __set_current_state(TASK_INTERRUPTIBLE);
4645 return -ERESTARTNOHAND;
4650 static int sigsuspend(sigset_t *set)
4652 current->saved_sigmask = current->blocked;
4653 set_current_blocked(set);
4655 while (!signal_pending(current)) {
4656 __set_current_state(TASK_INTERRUPTIBLE);
4659 set_restore_sigmask();
4660 return -ERESTARTNOHAND;
4664 * sys_rt_sigsuspend - replace the signal mask for a value with the
4665 * @unewset value until a signal is received
4666 * @unewset: new signal mask value
4667 * @sigsetsize: size of sigset_t type
4669 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4673 /* XXX: Don't preclude handling different sized sigset_t's. */
4674 if (sigsetsize != sizeof(sigset_t))
4677 if (copy_from_user(&newset, unewset, sizeof(newset)))
4679 return sigsuspend(&newset);
4682 #ifdef CONFIG_COMPAT
4683 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4687 /* XXX: Don't preclude handling different sized sigset_t's. */
4688 if (sigsetsize != sizeof(sigset_t))
4691 if (get_compat_sigset(&newset, unewset))
4693 return sigsuspend(&newset);
4697 #ifdef CONFIG_OLD_SIGSUSPEND
4698 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4701 siginitset(&blocked, mask);
4702 return sigsuspend(&blocked);
4705 #ifdef CONFIG_OLD_SIGSUSPEND3
4706 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4709 siginitset(&blocked, mask);
4710 return sigsuspend(&blocked);
4714 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4719 static inline void siginfo_buildtime_checks(void)
4721 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4723 /* Verify the offsets in the two siginfos match */
4724 #define CHECK_OFFSET(field) \
4725 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4728 CHECK_OFFSET(si_pid);
4729 CHECK_OFFSET(si_uid);
4732 CHECK_OFFSET(si_tid);
4733 CHECK_OFFSET(si_overrun);
4734 CHECK_OFFSET(si_value);
4737 CHECK_OFFSET(si_pid);
4738 CHECK_OFFSET(si_uid);
4739 CHECK_OFFSET(si_value);
4742 CHECK_OFFSET(si_pid);
4743 CHECK_OFFSET(si_uid);
4744 CHECK_OFFSET(si_status);
4745 CHECK_OFFSET(si_utime);
4746 CHECK_OFFSET(si_stime);
4749 CHECK_OFFSET(si_addr);
4750 CHECK_OFFSET(si_trapno);
4751 CHECK_OFFSET(si_addr_lsb);
4752 CHECK_OFFSET(si_lower);
4753 CHECK_OFFSET(si_upper);
4754 CHECK_OFFSET(si_pkey);
4755 CHECK_OFFSET(si_perf_data);
4756 CHECK_OFFSET(si_perf_type);
4757 CHECK_OFFSET(si_perf_flags);
4760 CHECK_OFFSET(si_band);
4761 CHECK_OFFSET(si_fd);
4764 CHECK_OFFSET(si_call_addr);
4765 CHECK_OFFSET(si_syscall);
4766 CHECK_OFFSET(si_arch);
4770 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4771 offsetof(struct siginfo, si_addr));
4772 if (sizeof(int) == sizeof(void __user *)) {
4773 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4774 sizeof(void __user *));
4776 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4777 sizeof_field(struct siginfo, si_uid)) !=
4778 sizeof(void __user *));
4779 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4780 offsetof(struct siginfo, si_uid));
4782 #ifdef CONFIG_COMPAT
4783 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4784 offsetof(struct compat_siginfo, si_addr));
4785 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4786 sizeof(compat_uptr_t));
4787 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4788 sizeof_field(struct siginfo, si_pid));
4792 #if defined(CONFIG_SYSCTL)
4793 static struct ctl_table signal_debug_table[] = {
4794 #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4796 .procname = "exception-trace",
4797 .data = &show_unhandled_signals,
4798 .maxlen = sizeof(int),
4800 .proc_handler = proc_dointvec
4806 static int __init init_signal_sysctls(void)
4808 register_sysctl_init("debug", signal_debug_table);
4811 early_initcall(init_signal_sysctls);
4812 #endif /* CONFIG_SYSCTL */
4814 void __init signals_init(void)
4816 siginfo_buildtime_checks();
4818 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
4821 #ifdef CONFIG_KGDB_KDB
4822 #include <linux/kdb.h>
4824 * kdb_send_sig - Allows kdb to send signals without exposing
4825 * signal internals. This function checks if the required locks are
4826 * available before calling the main signal code, to avoid kdb
4829 void kdb_send_sig(struct task_struct *t, int sig)
4831 static struct task_struct *kdb_prev_t;
4833 if (!spin_trylock(&t->sighand->siglock)) {
4834 kdb_printf("Can't do kill command now.\n"
4835 "The sigmask lock is held somewhere else in "
4836 "kernel, try again later\n");
4839 new_t = kdb_prev_t != t;
4841 if (!task_is_running(t) && new_t) {
4842 spin_unlock(&t->sighand->siglock);
4843 kdb_printf("Process is not RUNNING, sending a signal from "
4844 "kdb risks deadlock\n"
4845 "on the run queue locks. "
4846 "The signal has _not_ been sent.\n"
4847 "Reissue the kill command if you want to risk "
4851 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4852 spin_unlock(&t->sighand->siglock);
4854 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4857 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4859 #endif /* CONFIG_KGDB_KDB */