1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/kernel/signal.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
14 #include <linux/slab.h>
15 #include <linux/export.h>
16 #include <linux/init.h>
17 #include <linux/sched/mm.h>
18 #include <linux/sched/user.h>
19 #include <linux/sched/debug.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
22 #include <linux/sched/cputime.h>
23 #include <linux/file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/tty.h>
27 #include <linux/binfmts.h>
28 #include <linux/coredump.h>
29 #include <linux/security.h>
30 #include <linux/syscalls.h>
31 #include <linux/ptrace.h>
32 #include <linux/signal.h>
33 #include <linux/signalfd.h>
34 #include <linux/ratelimit.h>
35 #include <linux/tracehook.h>
36 #include <linux/capability.h>
37 #include <linux/freezer.h>
38 #include <linux/pid_namespace.h>
39 #include <linux/nsproxy.h>
40 #include <linux/user_namespace.h>
41 #include <linux/uprobes.h>
42 #include <linux/compat.h>
43 #include <linux/cn_proc.h>
44 #include <linux/compiler.h>
45 #include <linux/posix-timers.h>
46 #include <linux/livepatch.h>
47 #include <linux/cgroup.h>
48 #include <linux/audit.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/signal.h>
53 #include <asm/param.h>
54 #include <linux/uaccess.h>
55 #include <asm/unistd.h>
56 #include <asm/siginfo.h>
57 #include <asm/cacheflush.h>
60 * SLAB caches for signal bits.
63 static struct kmem_cache *sigqueue_cachep;
65 int print_fatal_signals __read_mostly;
67 static void __user *sig_handler(struct task_struct *t, int sig)
69 return t->sighand->action[sig - 1].sa.sa_handler;
72 static inline bool sig_handler_ignored(void __user *handler, int sig)
74 /* Is it explicitly or implicitly ignored? */
75 return handler == SIG_IGN ||
76 (handler == SIG_DFL && sig_kernel_ignore(sig));
79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
83 handler = sig_handler(t, sig);
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
93 /* Only allow kernel generated signals to this kthread */
94 if (unlikely((t->flags & PF_KTHREAD) &&
95 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return sig_handler_ignored(handler, sig);
101 static bool sig_ignored(struct task_struct *t, int sig, bool force)
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
116 if (t->ptrace && sig != SIGKILL)
119 return sig_task_ignored(t, sig, force);
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
126 static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
131 switch (_NSIG_WORDS) {
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
152 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
154 static bool recalc_sigpending_tsk(struct task_struct *t)
156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
157 PENDING(&t->pending, &t->blocked) ||
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
160 set_tsk_thread_flag(t, TIF_SIGPENDING);
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
176 void recalc_sigpending_and_wake(struct task_struct *t)
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
182 void recalc_sigpending(void)
184 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
185 !klp_patch_pending(current))
186 clear_thread_flag(TIF_SIGPENDING);
189 EXPORT_SYMBOL(recalc_sigpending);
191 void calculate_sigpending(void)
193 /* Have any signals or users of TIF_SIGPENDING been delayed
196 spin_lock_irq(¤t->sighand->siglock);
197 set_tsk_thread_flag(current, TIF_SIGPENDING);
199 spin_unlock_irq(¤t->sighand->siglock);
202 /* Given the mask, find the first available signal that should be serviced. */
204 #define SYNCHRONOUS_MASK \
205 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
206 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
208 int next_signal(struct sigpending *pending, sigset_t *mask)
210 unsigned long i, *s, *m, x;
213 s = pending->signal.sig;
217 * Handle the first word specially: it contains the
218 * synchronous signals that need to be dequeued first.
222 if (x & SYNCHRONOUS_MASK)
223 x &= SYNCHRONOUS_MASK;
228 switch (_NSIG_WORDS) {
230 for (i = 1; i < _NSIG_WORDS; ++i) {
234 sig = ffz(~x) + i*_NSIG_BPW + 1;
243 sig = ffz(~x) + _NSIG_BPW + 1;
254 static inline void print_dropped_signal(int sig)
256 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
258 if (!print_fatal_signals)
261 if (!__ratelimit(&ratelimit_state))
264 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
265 current->comm, current->pid, sig);
269 * task_set_jobctl_pending - set jobctl pending bits
271 * @mask: pending bits to set
273 * Clear @mask from @task->jobctl. @mask must be subset of
274 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
275 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
276 * cleared. If @task is already being killed or exiting, this function
280 * Must be called with @task->sighand->siglock held.
283 * %true if @mask is set, %false if made noop because @task was dying.
285 bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
287 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
288 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
289 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
291 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
294 if (mask & JOBCTL_STOP_SIGMASK)
295 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
297 task->jobctl |= mask;
302 * task_clear_jobctl_trapping - clear jobctl trapping bit
305 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
306 * Clear it and wake up the ptracer. Note that we don't need any further
307 * locking. @task->siglock guarantees that @task->parent points to the
311 * Must be called with @task->sighand->siglock held.
313 void task_clear_jobctl_trapping(struct task_struct *task)
315 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
316 task->jobctl &= ~JOBCTL_TRAPPING;
317 smp_mb(); /* advised by wake_up_bit() */
318 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
323 * task_clear_jobctl_pending - clear jobctl pending bits
325 * @mask: pending bits to clear
327 * Clear @mask from @task->jobctl. @mask must be subset of
328 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
329 * STOP bits are cleared together.
331 * If clearing of @mask leaves no stop or trap pending, this function calls
332 * task_clear_jobctl_trapping().
335 * Must be called with @task->sighand->siglock held.
337 void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
339 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
341 if (mask & JOBCTL_STOP_PENDING)
342 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
344 task->jobctl &= ~mask;
346 if (!(task->jobctl & JOBCTL_PENDING_MASK))
347 task_clear_jobctl_trapping(task);
351 * task_participate_group_stop - participate in a group stop
352 * @task: task participating in a group stop
354 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
355 * Group stop states are cleared and the group stop count is consumed if
356 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
357 * stop, the appropriate `SIGNAL_*` flags are set.
360 * Must be called with @task->sighand->siglock held.
363 * %true if group stop completion should be notified to the parent, %false
366 static bool task_participate_group_stop(struct task_struct *task)
368 struct signal_struct *sig = task->signal;
369 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
371 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
373 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
378 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
379 sig->group_stop_count--;
382 * Tell the caller to notify completion iff we are entering into a
383 * fresh group stop. Read comment in do_signal_stop() for details.
385 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
386 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
392 void task_join_group_stop(struct task_struct *task)
394 /* Have the new thread join an on-going signal group stop */
395 unsigned long jobctl = current->jobctl;
396 if (jobctl & JOBCTL_STOP_PENDING) {
397 struct signal_struct *sig = current->signal;
398 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
399 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
400 if (task_set_jobctl_pending(task, signr | gstop)) {
401 sig->group_stop_count++;
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
409 * appropriate lock must be held to stop the target task from exiting
411 static struct sigqueue *
412 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
414 struct sigqueue *q = NULL;
415 struct user_struct *user;
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
434 q = kmem_cache_alloc(sigqueue_cachep, flags);
436 print_dropped_signal(sig);
439 if (unlikely(q == NULL)) {
440 if (atomic_dec_and_test(&user->sigpending))
443 INIT_LIST_HEAD(&q->list);
451 static void __sigqueue_free(struct sigqueue *q)
453 if (q->flags & SIGQUEUE_PREALLOC)
455 if (atomic_dec_and_test(&q->user->sigpending))
457 kmem_cache_free(sigqueue_cachep, q);
460 void flush_sigqueue(struct sigpending *queue)
464 sigemptyset(&queue->signal);
465 while (!list_empty(&queue->list)) {
466 q = list_entry(queue->list.next, struct sigqueue , list);
467 list_del_init(&q->list);
473 * Flush all pending signals for this kthread.
475 void flush_signals(struct task_struct *t)
479 spin_lock_irqsave(&t->sighand->siglock, flags);
480 clear_tsk_thread_flag(t, TIF_SIGPENDING);
481 flush_sigqueue(&t->pending);
482 flush_sigqueue(&t->signal->shared_pending);
483 spin_unlock_irqrestore(&t->sighand->siglock, flags);
485 EXPORT_SYMBOL(flush_signals);
487 #ifdef CONFIG_POSIX_TIMERS
488 static void __flush_itimer_signals(struct sigpending *pending)
490 sigset_t signal, retain;
491 struct sigqueue *q, *n;
493 signal = pending->signal;
494 sigemptyset(&retain);
496 list_for_each_entry_safe(q, n, &pending->list, list) {
497 int sig = q->info.si_signo;
499 if (likely(q->info.si_code != SI_TIMER)) {
500 sigaddset(&retain, sig);
502 sigdelset(&signal, sig);
503 list_del_init(&q->list);
508 sigorsets(&pending->signal, &signal, &retain);
511 void flush_itimer_signals(void)
513 struct task_struct *tsk = current;
516 spin_lock_irqsave(&tsk->sighand->siglock, flags);
517 __flush_itimer_signals(&tsk->pending);
518 __flush_itimer_signals(&tsk->signal->shared_pending);
519 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
523 void ignore_signals(struct task_struct *t)
527 for (i = 0; i < _NSIG; ++i)
528 t->sighand->action[i].sa.sa_handler = SIG_IGN;
534 * Flush all handlers for a task.
538 flush_signal_handlers(struct task_struct *t, int force_default)
541 struct k_sigaction *ka = &t->sighand->action[0];
542 for (i = _NSIG ; i != 0 ; i--) {
543 if (force_default || ka->sa.sa_handler != SIG_IGN)
544 ka->sa.sa_handler = SIG_DFL;
546 #ifdef __ARCH_HAS_SA_RESTORER
547 ka->sa.sa_restorer = NULL;
549 sigemptyset(&ka->sa.sa_mask);
554 bool unhandled_signal(struct task_struct *tsk, int sig)
556 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
557 if (is_global_init(tsk))
560 if (handler != SIG_IGN && handler != SIG_DFL)
563 /* if ptraced, let the tracer determine */
567 static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
570 struct sigqueue *q, *first = NULL;
573 * Collect the siginfo appropriate to this signal. Check if
574 * there is another siginfo for the same signal.
576 list_for_each_entry(q, &list->list, list) {
577 if (q->info.si_signo == sig) {
584 sigdelset(&list->signal, sig);
588 list_del_init(&first->list);
589 copy_siginfo(info, &first->info);
592 (first->flags & SIGQUEUE_PREALLOC) &&
593 (info->si_code == SI_TIMER) &&
594 (info->si_sys_private);
596 __sigqueue_free(first);
599 * Ok, it wasn't in the queue. This must be
600 * a fast-pathed signal or we must have been
601 * out of queue space. So zero out the info.
604 info->si_signo = sig;
606 info->si_code = SI_USER;
612 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
613 kernel_siginfo_t *info, bool *resched_timer)
615 int sig = next_signal(pending, mask);
618 collect_signal(sig, pending, info, resched_timer);
623 * Dequeue a signal and return the element to the caller, which is
624 * expected to free it.
626 * All callers have to hold the siglock.
628 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
630 bool resched_timer = false;
633 /* We only dequeue private signals from ourselves, we don't let
634 * signalfd steal them
636 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
638 signr = __dequeue_signal(&tsk->signal->shared_pending,
639 mask, info, &resched_timer);
640 #ifdef CONFIG_POSIX_TIMERS
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
647 * compliant with the old way of self-restarting
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
657 if (!hrtimer_is_queued(tmr) &&
658 tsk->signal->it_real_incr != 0) {
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
671 if (unlikely(sig_kernel_stop(signr))) {
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
686 #ifdef CONFIG_POSIX_TIMERS
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
694 spin_unlock(&tsk->sighand->siglock);
695 posixtimer_rearm(info);
696 spin_lock(&tsk->sighand->siglock);
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
704 EXPORT_SYMBOL_GPL(dequeue_signal);
706 static int dequeue_synchronous_signal(kernel_siginfo_t *info)
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
713 * Might a synchronous signal be in the queue?
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
719 * Return the first synchronous signal in the queue.
721 list_for_each_entry(q, &pending->list, list) {
722 /* Synchronous signals have a postive si_code */
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
732 * Check if there is another siginfo for the same signal.
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
739 sigdelset(&pending->signal, sync->info.si_signo);
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
749 * Tell a process that it has a new active signal..
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
759 void signal_wake_up_state(struct task_struct *t, unsigned int state)
761 set_tsk_thread_flag(t, TIF_SIGPENDING);
763 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
764 * case. We don't check t->state here because there is a race with it
765 * executing another processor and just now entering stopped state.
766 * By using wake_up_state, we ensure the process will wake up and
767 * handle its death signal.
769 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
774 * Remove signals in mask from the pending set and queue.
775 * Returns 1 if any signals were found.
777 * All callers must be holding the siglock.
779 static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
781 struct sigqueue *q, *n;
784 sigandsets(&m, mask, &s->signal);
785 if (sigisemptyset(&m))
788 sigandnsets(&s->signal, &s->signal, mask);
789 list_for_each_entry_safe(q, n, &s->list, list) {
790 if (sigismember(mask, q->info.si_signo)) {
791 list_del_init(&q->list);
797 static inline int is_si_special(const struct kernel_siginfo *info)
799 return info <= SEND_SIG_PRIV;
802 static inline bool si_fromuser(const struct kernel_siginfo *info)
804 return info == SEND_SIG_NOINFO ||
805 (!is_si_special(info) && SI_FROMUSER(info));
809 * called with RCU read lock from check_kill_permission()
811 static bool kill_ok_by_cred(struct task_struct *t)
813 const struct cred *cred = current_cred();
814 const struct cred *tcred = __task_cred(t);
816 return uid_eq(cred->euid, tcred->suid) ||
817 uid_eq(cred->euid, tcred->uid) ||
818 uid_eq(cred->uid, tcred->suid) ||
819 uid_eq(cred->uid, tcred->uid) ||
820 ns_capable(tcred->user_ns, CAP_KILL);
824 * Bad permissions for sending the signal
825 * - the caller must hold the RCU read lock
827 static int check_kill_permission(int sig, struct kernel_siginfo *info,
828 struct task_struct *t)
833 if (!valid_signal(sig))
836 if (!si_fromuser(info))
839 error = audit_signal_info(sig, t); /* Let audit system see the signal */
843 if (!same_thread_group(current, t) &&
844 !kill_ok_by_cred(t)) {
847 sid = task_session(t);
849 * We don't return the error if sid == NULL. The
850 * task was unhashed, the caller must notice this.
852 if (!sid || sid == task_session(current))
860 return security_task_kill(t, info, sig, NULL);
864 * ptrace_trap_notify - schedule trap to notify ptracer
865 * @t: tracee wanting to notify tracer
867 * This function schedules sticky ptrace trap which is cleared on the next
868 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
871 * If @t is running, STOP trap will be taken. If trapped for STOP and
872 * ptracer is listening for events, tracee is woken up so that it can
873 * re-trap for the new event. If trapped otherwise, STOP trap will be
874 * eventually taken without returning to userland after the existing traps
875 * are finished by PTRACE_CONT.
878 * Must be called with @task->sighand->siglock held.
880 static void ptrace_trap_notify(struct task_struct *t)
882 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
883 assert_spin_locked(&t->sighand->siglock);
885 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
886 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
890 * Handle magic process-wide effects of stop/continue signals. Unlike
891 * the signal actions, these happen immediately at signal-generation
892 * time regardless of blocking, ignoring, or handling. This does the
893 * actual continuing for SIGCONT, but not the actual stopping for stop
894 * signals. The process stop is done as a signal action for SIG_DFL.
896 * Returns true if the signal should be actually delivered, otherwise
897 * it should be dropped.
899 static bool prepare_signal(int sig, struct task_struct *p, bool force)
901 struct signal_struct *signal = p->signal;
902 struct task_struct *t;
905 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
906 if (!(signal->flags & SIGNAL_GROUP_EXIT))
907 return sig == SIGKILL;
909 * The process is in the middle of dying, nothing to do.
911 } else if (sig_kernel_stop(sig)) {
913 * This is a stop signal. Remove SIGCONT from all queues.
915 siginitset(&flush, sigmask(SIGCONT));
916 flush_sigqueue_mask(&flush, &signal->shared_pending);
917 for_each_thread(p, t)
918 flush_sigqueue_mask(&flush, &t->pending);
919 } else if (sig == SIGCONT) {
922 * Remove all stop signals from all queues, wake all threads.
924 siginitset(&flush, SIG_KERNEL_STOP_MASK);
925 flush_sigqueue_mask(&flush, &signal->shared_pending);
926 for_each_thread(p, t) {
927 flush_sigqueue_mask(&flush, &t->pending);
928 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
929 if (likely(!(t->ptrace & PT_SEIZED)))
930 wake_up_state(t, __TASK_STOPPED);
932 ptrace_trap_notify(t);
936 * Notify the parent with CLD_CONTINUED if we were stopped.
938 * If we were in the middle of a group stop, we pretend it
939 * was already finished, and then continued. Since SIGCHLD
940 * doesn't queue we report only CLD_STOPPED, as if the next
941 * CLD_CONTINUED was dropped.
944 if (signal->flags & SIGNAL_STOP_STOPPED)
945 why |= SIGNAL_CLD_CONTINUED;
946 else if (signal->group_stop_count)
947 why |= SIGNAL_CLD_STOPPED;
951 * The first thread which returns from do_signal_stop()
952 * will take ->siglock, notice SIGNAL_CLD_MASK, and
953 * notify its parent. See get_signal().
955 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
956 signal->group_stop_count = 0;
957 signal->group_exit_code = 0;
961 return !sig_ignored(p, sig, force);
965 * Test if P wants to take SIG. After we've checked all threads with this,
966 * it's equivalent to finding no threads not blocking SIG. Any threads not
967 * blocking SIG were ruled out because they are not running and already
968 * have pending signals. Such threads will dequeue from the shared queue
969 * as soon as they're available, so putting the signal on the shared queue
970 * will be equivalent to sending it to one such thread.
972 static inline bool wants_signal(int sig, struct task_struct *p)
974 if (sigismember(&p->blocked, sig))
977 if (p->flags & PF_EXITING)
983 if (task_is_stopped_or_traced(p))
986 return task_curr(p) || !signal_pending(p);
989 static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
991 struct signal_struct *signal = p->signal;
992 struct task_struct *t;
995 * Now find a thread we can wake up to take the signal off the queue.
997 * If the main thread wants the signal, it gets first crack.
998 * Probably the least surprising to the average bear.
1000 if (wants_signal(sig, p))
1002 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
1004 * There is just one thread and it does not need to be woken.
1005 * It will dequeue unblocked signals before it runs again.
1010 * Otherwise try to find a suitable thread.
1012 t = signal->curr_target;
1013 while (!wants_signal(sig, t)) {
1015 if (t == signal->curr_target)
1017 * No thread needs to be woken.
1018 * Any eligible threads will see
1019 * the signal in the queue soon.
1023 signal->curr_target = t;
1027 * Found a killable thread. If the signal will be fatal,
1028 * then start taking the whole group down immediately.
1030 if (sig_fatal(p, sig) &&
1031 !(signal->flags & SIGNAL_GROUP_EXIT) &&
1032 !sigismember(&t->real_blocked, sig) &&
1033 (sig == SIGKILL || !p->ptrace)) {
1035 * This signal will be fatal to the whole group.
1037 if (!sig_kernel_coredump(sig)) {
1039 * Start a group exit and wake everybody up.
1040 * This way we don't have other threads
1041 * running and doing things after a slower
1042 * thread has the fatal signal pending.
1044 signal->flags = SIGNAL_GROUP_EXIT;
1045 signal->group_exit_code = sig;
1046 signal->group_stop_count = 0;
1049 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1050 sigaddset(&t->pending.signal, SIGKILL);
1051 signal_wake_up(t, 1);
1052 } while_each_thread(p, t);
1058 * The signal is already in the shared-pending queue.
1059 * Tell the chosen thread to wake up and dequeue it.
1061 signal_wake_up(t, sig == SIGKILL);
1065 static inline bool legacy_queue(struct sigpending *signals, int sig)
1067 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1070 static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1071 enum pid_type type, bool force)
1073 struct sigpending *pending;
1075 int override_rlimit;
1076 int ret = 0, result;
1078 assert_spin_locked(&t->sighand->siglock);
1080 result = TRACE_SIGNAL_IGNORED;
1081 if (!prepare_signal(sig, t, force))
1084 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1086 * Short-circuit ignored signals and support queuing
1087 * exactly one non-rt signal, so that we can get more
1088 * detailed information about the cause of the signal.
1090 result = TRACE_SIGNAL_ALREADY_PENDING;
1091 if (legacy_queue(pending, sig))
1094 result = TRACE_SIGNAL_DELIVERED;
1096 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1098 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1102 * Real-time signals must be queued if sent by sigqueue, or
1103 * some other real-time mechanism. It is implementation
1104 * defined whether kill() does so. We attempt to do so, on
1105 * the principle of least surprise, but since kill is not
1106 * allowed to fail with EAGAIN when low on memory we just
1107 * make sure at least one signal gets delivered and don't
1108 * pass on the info struct.
1111 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1113 override_rlimit = 0;
1115 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1117 list_add_tail(&q->list, &pending->list);
1118 switch ((unsigned long) info) {
1119 case (unsigned long) SEND_SIG_NOINFO:
1120 clear_siginfo(&q->info);
1121 q->info.si_signo = sig;
1122 q->info.si_errno = 0;
1123 q->info.si_code = SI_USER;
1124 q->info.si_pid = task_tgid_nr_ns(current,
1125 task_active_pid_ns(t));
1128 from_kuid_munged(task_cred_xxx(t, user_ns),
1132 case (unsigned long) SEND_SIG_PRIV:
1133 clear_siginfo(&q->info);
1134 q->info.si_signo = sig;
1135 q->info.si_errno = 0;
1136 q->info.si_code = SI_KERNEL;
1141 copy_siginfo(&q->info, info);
1144 } else if (!is_si_special(info) &&
1145 sig >= SIGRTMIN && info->si_code != SI_USER) {
1147 * Queue overflow, abort. We may abort if the
1148 * signal was rt and sent by user using something
1149 * other than kill().
1151 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156 * This is a silent loss of information. We still
1157 * send the signal, but the *info bits are lost.
1159 result = TRACE_SIGNAL_LOSE_INFO;
1163 signalfd_notify(t, sig);
1164 sigaddset(&pending->signal, sig);
1166 /* Let multiprocess signals appear after on-going forks */
1167 if (type > PIDTYPE_TGID) {
1168 struct multiprocess_signals *delayed;
1169 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1170 sigset_t *signal = &delayed->signal;
1171 /* Can't queue both a stop and a continue signal */
1173 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1174 else if (sig_kernel_stop(sig))
1175 sigdelset(signal, SIGCONT);
1176 sigaddset(signal, sig);
1180 complete_signal(sig, t, type);
1182 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
1186 static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1189 switch (siginfo_layout(info->si_signo, info->si_code)) {
1198 case SIL_FAULT_MCEERR:
1199 case SIL_FAULT_BNDERR:
1200 case SIL_FAULT_PKUERR:
1208 static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
1211 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1214 if (info == SEND_SIG_NOINFO) {
1215 /* Force if sent from an ancestor pid namespace */
1216 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1217 } else if (info == SEND_SIG_PRIV) {
1218 /* Don't ignore kernel generated signals */
1220 } else if (has_si_pid_and_uid(info)) {
1221 /* SIGKILL and SIGSTOP is special or has ids */
1222 struct user_namespace *t_user_ns;
1225 t_user_ns = task_cred_xxx(t, user_ns);
1226 if (current_user_ns() != t_user_ns) {
1227 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1228 info->si_uid = from_kuid_munged(t_user_ns, uid);
1232 /* A kernel generated signal? */
1233 force = (info->si_code == SI_KERNEL);
1235 /* From an ancestor pid namespace? */
1236 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
1241 return __send_signal(sig, info, t, type, force);
1244 static void print_fatal_signal(int signr)
1246 struct pt_regs *regs = signal_pt_regs();
1247 pr_info("potentially unexpected fatal signal %d.\n", signr);
1249 #if defined(__i386__) && !defined(__arch_um__)
1250 pr_info("code at %08lx: ", regs->ip);
1253 for (i = 0; i < 16; i++) {
1256 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1258 pr_cont("%02x ", insn);
1268 static int __init setup_print_fatal_signals(char *str)
1270 get_option (&str, &print_fatal_signals);
1275 __setup("print-fatal-signals=", setup_print_fatal_signals);
1278 __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1280 return send_signal(sig, info, p, PIDTYPE_TGID);
1283 int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
1286 unsigned long flags;
1289 if (lock_task_sighand(p, &flags)) {
1290 ret = send_signal(sig, info, p, type);
1291 unlock_task_sighand(p, &flags);
1298 * Force a signal that the process can't ignore: if necessary
1299 * we unblock the signal and change any SIG_IGN to SIG_DFL.
1301 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1302 * since we do not want to have a signal handler that was blocked
1303 * be invoked when user space had explicitly blocked it.
1305 * We don't want to have recursive SIGSEGV's etc, for example,
1306 * that is why we also clear SIGNAL_UNKILLABLE.
1309 force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1311 unsigned long int flags;
1312 int ret, blocked, ignored;
1313 struct k_sigaction *action;
1314 int sig = info->si_signo;
1316 spin_lock_irqsave(&t->sighand->siglock, flags);
1317 action = &t->sighand->action[sig-1];
1318 ignored = action->sa.sa_handler == SIG_IGN;
1319 blocked = sigismember(&t->blocked, sig);
1320 if (blocked || ignored) {
1321 action->sa.sa_handler = SIG_DFL;
1323 sigdelset(&t->blocked, sig);
1324 recalc_sigpending_and_wake(t);
1328 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1329 * debugging to leave init killable.
1331 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
1332 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1333 ret = send_signal(sig, info, t, PIDTYPE_PID);
1334 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1339 int force_sig_info(struct kernel_siginfo *info)
1341 return force_sig_info_to_task(info, current);
1345 * Nuke all other threads in the group.
1347 int zap_other_threads(struct task_struct *p)
1349 struct task_struct *t = p;
1352 p->signal->group_stop_count = 0;
1354 while_each_thread(p, t) {
1355 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
1358 /* Don't bother with already dead threads */
1361 sigaddset(&t->pending.signal, SIGKILL);
1362 signal_wake_up(t, 1);
1368 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1369 unsigned long *flags)
1371 struct sighand_struct *sighand;
1375 sighand = rcu_dereference(tsk->sighand);
1376 if (unlikely(sighand == NULL))
1380 * This sighand can be already freed and even reused, but
1381 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
1382 * initializes ->siglock: this slab can't go away, it has
1383 * the same object type, ->siglock can't be reinitialized.
1385 * We need to ensure that tsk->sighand is still the same
1386 * after we take the lock, we can race with de_thread() or
1387 * __exit_signal(). In the latter case the next iteration
1388 * must see ->sighand == NULL.
1390 spin_lock_irqsave(&sighand->siglock, *flags);
1391 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
1393 spin_unlock_irqrestore(&sighand->siglock, *flags);
1401 * send signal info to all the members of a group
1403 int group_send_sig_info(int sig, struct kernel_siginfo *info,
1404 struct task_struct *p, enum pid_type type)
1409 ret = check_kill_permission(sig, info, p);
1413 ret = do_send_sig_info(sig, info, p, type);
1419 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1420 * control characters do (^C, ^Z etc)
1421 * - the caller must hold at least a readlock on tasklist_lock
1423 int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1425 struct task_struct *p = NULL;
1426 int retval, success;
1430 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1431 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1434 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1435 return success ? 0 : retval;
1438 int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1441 struct task_struct *p;
1445 p = pid_task(pid, PIDTYPE_PID);
1447 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
1449 if (likely(!p || error != -ESRCH))
1453 * The task was unhashed in between, try again. If it
1454 * is dead, pid_task() will return NULL, if we race with
1455 * de_thread() it will find the new leader.
1460 static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
1464 error = kill_pid_info(sig, info, find_vpid(pid));
1469 static inline bool kill_as_cred_perm(const struct cred *cred,
1470 struct task_struct *target)
1472 const struct cred *pcred = __task_cred(target);
1474 return uid_eq(cred->euid, pcred->suid) ||
1475 uid_eq(cred->euid, pcred->uid) ||
1476 uid_eq(cred->uid, pcred->suid) ||
1477 uid_eq(cred->uid, pcred->uid);
1481 * The usb asyncio usage of siginfo is wrong. The glibc support
1482 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1483 * AKA after the generic fields:
1484 * kernel_pid_t si_pid;
1485 * kernel_uid32_t si_uid;
1486 * sigval_t si_value;
1488 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1489 * after the generic fields is:
1490 * void __user *si_addr;
1492 * This is a practical problem when there is a 64bit big endian kernel
1493 * and a 32bit userspace. As the 32bit address will encoded in the low
1494 * 32bits of the pointer. Those low 32bits will be stored at higher
1495 * address than appear in a 32 bit pointer. So userspace will not
1496 * see the address it was expecting for it's completions.
1498 * There is nothing in the encoding that can allow
1499 * copy_siginfo_to_user32 to detect this confusion of formats, so
1500 * handle this by requiring the caller of kill_pid_usb_asyncio to
1501 * notice when this situration takes place and to store the 32bit
1502 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1505 int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1506 struct pid *pid, const struct cred *cred)
1508 struct kernel_siginfo info;
1509 struct task_struct *p;
1510 unsigned long flags;
1513 if (!valid_signal(sig))
1516 clear_siginfo(&info);
1517 info.si_signo = sig;
1518 info.si_errno = errno;
1519 info.si_code = SI_ASYNCIO;
1520 *((sigval_t *)&info.si_pid) = addr;
1523 p = pid_task(pid, PIDTYPE_PID);
1528 if (!kill_as_cred_perm(cred, p)) {
1532 ret = security_task_kill(p, &info, sig, cred);
1537 if (lock_task_sighand(p, &flags)) {
1538 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
1539 unlock_task_sighand(p, &flags);
1547 EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1550 * kill_something_info() interprets pid in interesting ways just like kill(2).
1552 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1553 * is probably wrong. Should make it like BSD or SYSV.
1556 static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1561 return kill_proc_info(sig, info, pid);
1563 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1567 read_lock(&tasklist_lock);
1569 ret = __kill_pgrp_info(sig, info,
1570 pid ? find_vpid(-pid) : task_pgrp(current));
1572 int retval = 0, count = 0;
1573 struct task_struct * p;
1575 for_each_process(p) {
1576 if (task_pid_vnr(p) > 1 &&
1577 !same_thread_group(p, current)) {
1578 int err = group_send_sig_info(sig, info, p,
1585 ret = count ? retval : -ESRCH;
1587 read_unlock(&tasklist_lock);
1593 * These are for backward compatibility with the rest of the kernel source.
1596 int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1599 * Make sure legacy kernel users don't send in bad values
1600 * (normal paths check this in check_kill_permission).
1602 if (!valid_signal(sig))
1605 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1607 EXPORT_SYMBOL(send_sig_info);
1609 #define __si_special(priv) \
1610 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1613 send_sig(int sig, struct task_struct *p, int priv)
1615 return send_sig_info(sig, __si_special(priv), p);
1617 EXPORT_SYMBOL(send_sig);
1619 void force_sig(int sig)
1621 struct kernel_siginfo info;
1623 clear_siginfo(&info);
1624 info.si_signo = sig;
1626 info.si_code = SI_KERNEL;
1629 force_sig_info(&info);
1631 EXPORT_SYMBOL(force_sig);
1634 * When things go south during signal handling, we
1635 * will force a SIGSEGV. And if the signal that caused
1636 * the problem was already a SIGSEGV, we'll want to
1637 * make sure we don't even try to deliver the signal..
1639 void force_sigsegv(int sig)
1641 struct task_struct *p = current;
1643 if (sig == SIGSEGV) {
1644 unsigned long flags;
1645 spin_lock_irqsave(&p->sighand->siglock, flags);
1646 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1647 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1652 int force_sig_fault_to_task(int sig, int code, void __user *addr
1653 ___ARCH_SI_TRAPNO(int trapno)
1654 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1655 , struct task_struct *t)
1657 struct kernel_siginfo info;
1659 clear_siginfo(&info);
1660 info.si_signo = sig;
1662 info.si_code = code;
1663 info.si_addr = addr;
1664 #ifdef __ARCH_SI_TRAPNO
1665 info.si_trapno = trapno;
1669 info.si_flags = flags;
1672 return force_sig_info_to_task(&info, t);
1675 int force_sig_fault(int sig, int code, void __user *addr
1676 ___ARCH_SI_TRAPNO(int trapno)
1677 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
1679 return force_sig_fault_to_task(sig, code, addr
1680 ___ARCH_SI_TRAPNO(trapno)
1681 ___ARCH_SI_IA64(imm, flags, isr), current);
1684 int send_sig_fault(int sig, int code, void __user *addr
1685 ___ARCH_SI_TRAPNO(int trapno)
1686 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1687 , struct task_struct *t)
1689 struct kernel_siginfo info;
1691 clear_siginfo(&info);
1692 info.si_signo = sig;
1694 info.si_code = code;
1695 info.si_addr = addr;
1696 #ifdef __ARCH_SI_TRAPNO
1697 info.si_trapno = trapno;
1701 info.si_flags = flags;
1704 return send_sig_info(info.si_signo, &info, t);
1707 int force_sig_mceerr(int code, void __user *addr, short lsb)
1709 struct kernel_siginfo info;
1711 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1712 clear_siginfo(&info);
1713 info.si_signo = SIGBUS;
1715 info.si_code = code;
1716 info.si_addr = addr;
1717 info.si_addr_lsb = lsb;
1718 return force_sig_info(&info);
1721 int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1723 struct kernel_siginfo info;
1725 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1726 clear_siginfo(&info);
1727 info.si_signo = SIGBUS;
1729 info.si_code = code;
1730 info.si_addr = addr;
1731 info.si_addr_lsb = lsb;
1732 return send_sig_info(info.si_signo, &info, t);
1734 EXPORT_SYMBOL(send_sig_mceerr);
1736 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1738 struct kernel_siginfo info;
1740 clear_siginfo(&info);
1741 info.si_signo = SIGSEGV;
1743 info.si_code = SEGV_BNDERR;
1744 info.si_addr = addr;
1745 info.si_lower = lower;
1746 info.si_upper = upper;
1747 return force_sig_info(&info);
1751 int force_sig_pkuerr(void __user *addr, u32 pkey)
1753 struct kernel_siginfo info;
1755 clear_siginfo(&info);
1756 info.si_signo = SIGSEGV;
1758 info.si_code = SEGV_PKUERR;
1759 info.si_addr = addr;
1760 info.si_pkey = pkey;
1761 return force_sig_info(&info);
1765 /* For the crazy architectures that include trap information in
1766 * the errno field, instead of an actual errno value.
1768 int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1770 struct kernel_siginfo info;
1772 clear_siginfo(&info);
1773 info.si_signo = SIGTRAP;
1774 info.si_errno = errno;
1775 info.si_code = TRAP_HWBKPT;
1776 info.si_addr = addr;
1777 return force_sig_info(&info);
1780 int kill_pgrp(struct pid *pid, int sig, int priv)
1784 read_lock(&tasklist_lock);
1785 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1786 read_unlock(&tasklist_lock);
1790 EXPORT_SYMBOL(kill_pgrp);
1792 int kill_pid(struct pid *pid, int sig, int priv)
1794 return kill_pid_info(sig, __si_special(priv), pid);
1796 EXPORT_SYMBOL(kill_pid);
1799 * These functions support sending signals using preallocated sigqueue
1800 * structures. This is needed "because realtime applications cannot
1801 * afford to lose notifications of asynchronous events, like timer
1802 * expirations or I/O completions". In the case of POSIX Timers
1803 * we allocate the sigqueue structure from the timer_create. If this
1804 * allocation fails we are able to report the failure to the application
1805 * with an EAGAIN error.
1807 struct sigqueue *sigqueue_alloc(void)
1809 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1812 q->flags |= SIGQUEUE_PREALLOC;
1817 void sigqueue_free(struct sigqueue *q)
1819 unsigned long flags;
1820 spinlock_t *lock = ¤t->sighand->siglock;
1822 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1824 * We must hold ->siglock while testing q->list
1825 * to serialize with collect_signal() or with
1826 * __exit_signal()->flush_sigqueue().
1828 spin_lock_irqsave(lock, flags);
1829 q->flags &= ~SIGQUEUE_PREALLOC;
1831 * If it is queued it will be freed when dequeued,
1832 * like the "regular" sigqueue.
1834 if (!list_empty(&q->list))
1836 spin_unlock_irqrestore(lock, flags);
1842 int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
1844 int sig = q->info.si_signo;
1845 struct sigpending *pending;
1846 struct task_struct *t;
1847 unsigned long flags;
1850 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1854 t = pid_task(pid, type);
1855 if (!t || !likely(lock_task_sighand(t, &flags)))
1858 ret = 1; /* the signal is ignored */
1859 result = TRACE_SIGNAL_IGNORED;
1860 if (!prepare_signal(sig, t, false))
1864 if (unlikely(!list_empty(&q->list))) {
1866 * If an SI_TIMER entry is already queue just increment
1867 * the overrun count.
1869 BUG_ON(q->info.si_code != SI_TIMER);
1870 q->info.si_overrun++;
1871 result = TRACE_SIGNAL_ALREADY_PENDING;
1874 q->info.si_overrun = 0;
1876 signalfd_notify(t, sig);
1877 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
1878 list_add_tail(&q->list, &pending->list);
1879 sigaddset(&pending->signal, sig);
1880 complete_signal(sig, t, type);
1881 result = TRACE_SIGNAL_DELIVERED;
1883 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
1884 unlock_task_sighand(t, &flags);
1890 static void do_notify_pidfd(struct task_struct *task)
1894 WARN_ON(task->exit_state == 0);
1895 pid = task_pid(task);
1896 wake_up_all(&pid->wait_pidfd);
1900 * Let a parent know about the death of a child.
1901 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1903 * Returns true if our parent ignored us and so we've switched to
1906 bool do_notify_parent(struct task_struct *tsk, int sig)
1908 struct kernel_siginfo info;
1909 unsigned long flags;
1910 struct sighand_struct *psig;
1911 bool autoreap = false;
1916 /* do_notify_parent_cldstop should have been called instead. */
1917 BUG_ON(task_is_stopped_or_traced(tsk));
1919 BUG_ON(!tsk->ptrace &&
1920 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1922 /* Wake up all pidfd waiters */
1923 do_notify_pidfd(tsk);
1925 if (sig != SIGCHLD) {
1927 * This is only possible if parent == real_parent.
1928 * Check if it has changed security domain.
1930 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
1934 clear_siginfo(&info);
1935 info.si_signo = sig;
1938 * We are under tasklist_lock here so our parent is tied to
1939 * us and cannot change.
1941 * task_active_pid_ns will always return the same pid namespace
1942 * until a task passes through release_task.
1944 * write_lock() currently calls preempt_disable() which is the
1945 * same as rcu_read_lock(), but according to Oleg, this is not
1946 * correct to rely on this
1949 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1950 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1954 task_cputime(tsk, &utime, &stime);
1955 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1956 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1958 info.si_status = tsk->exit_code & 0x7f;
1959 if (tsk->exit_code & 0x80)
1960 info.si_code = CLD_DUMPED;
1961 else if (tsk->exit_code & 0x7f)
1962 info.si_code = CLD_KILLED;
1964 info.si_code = CLD_EXITED;
1965 info.si_status = tsk->exit_code >> 8;
1968 psig = tsk->parent->sighand;
1969 spin_lock_irqsave(&psig->siglock, flags);
1970 if (!tsk->ptrace && sig == SIGCHLD &&
1971 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1972 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1974 * We are exiting and our parent doesn't care. POSIX.1
1975 * defines special semantics for setting SIGCHLD to SIG_IGN
1976 * or setting the SA_NOCLDWAIT flag: we should be reaped
1977 * automatically and not left for our parent's wait4 call.
1978 * Rather than having the parent do it as a magic kind of
1979 * signal handler, we just set this to tell do_exit that we
1980 * can be cleaned up without becoming a zombie. Note that
1981 * we still call __wake_up_parent in this case, because a
1982 * blocked sys_wait4 might now return -ECHILD.
1984 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1985 * is implementation-defined: we do (if you don't want
1986 * it, just use SIG_IGN instead).
1989 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1992 if (valid_signal(sig) && sig)
1993 __group_send_sig_info(sig, &info, tsk->parent);
1994 __wake_up_parent(tsk, tsk->parent);
1995 spin_unlock_irqrestore(&psig->siglock, flags);
2001 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2002 * @tsk: task reporting the state change
2003 * @for_ptracer: the notification is for ptracer
2004 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2006 * Notify @tsk's parent that the stopped/continued state has changed. If
2007 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2008 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2011 * Must be called with tasklist_lock at least read locked.
2013 static void do_notify_parent_cldstop(struct task_struct *tsk,
2014 bool for_ptracer, int why)
2016 struct kernel_siginfo info;
2017 unsigned long flags;
2018 struct task_struct *parent;
2019 struct sighand_struct *sighand;
2023 parent = tsk->parent;
2025 tsk = tsk->group_leader;
2026 parent = tsk->real_parent;
2029 clear_siginfo(&info);
2030 info.si_signo = SIGCHLD;
2033 * see comment in do_notify_parent() about the following 4 lines
2036 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
2037 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
2040 task_cputime(tsk, &utime, &stime);
2041 info.si_utime = nsec_to_clock_t(utime);
2042 info.si_stime = nsec_to_clock_t(stime);
2047 info.si_status = SIGCONT;
2050 info.si_status = tsk->signal->group_exit_code & 0x7f;
2053 info.si_status = tsk->exit_code & 0x7f;
2059 sighand = parent->sighand;
2060 spin_lock_irqsave(&sighand->siglock, flags);
2061 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2062 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2063 __group_send_sig_info(SIGCHLD, &info, parent);
2065 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2067 __wake_up_parent(tsk, parent);
2068 spin_unlock_irqrestore(&sighand->siglock, flags);
2071 static inline bool may_ptrace_stop(void)
2073 if (!likely(current->ptrace))
2076 * Are we in the middle of do_coredump?
2077 * If so and our tracer is also part of the coredump stopping
2078 * is a deadlock situation, and pointless because our tracer
2079 * is dead so don't allow us to stop.
2080 * If SIGKILL was already sent before the caller unlocked
2081 * ->siglock we must see ->core_state != NULL. Otherwise it
2082 * is safe to enter schedule().
2084 * This is almost outdated, a task with the pending SIGKILL can't
2085 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2086 * after SIGKILL was already dequeued.
2088 if (unlikely(current->mm->core_state) &&
2089 unlikely(current->mm == current->parent->mm))
2096 * Return non-zero if there is a SIGKILL that should be waking us up.
2097 * Called with the siglock held.
2099 static bool sigkill_pending(struct task_struct *tsk)
2101 return sigismember(&tsk->pending.signal, SIGKILL) ||
2102 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
2106 * This must be called with current->sighand->siglock held.
2108 * This should be the path for all ptrace stops.
2109 * We always set current->last_siginfo while stopped here.
2110 * That makes it a way to test a stopped process for
2111 * being ptrace-stopped vs being job-control-stopped.
2113 * If we actually decide not to stop at all because the tracer
2114 * is gone, we keep current->exit_code unless clear_code.
2116 static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
2117 __releases(¤t->sighand->siglock)
2118 __acquires(¤t->sighand->siglock)
2120 bool gstop_done = false;
2122 if (arch_ptrace_stop_needed(exit_code, info)) {
2124 * The arch code has something special to do before a
2125 * ptrace stop. This is allowed to block, e.g. for faults
2126 * on user stack pages. We can't keep the siglock while
2127 * calling arch_ptrace_stop, so we must release it now.
2128 * To preserve proper semantics, we must do this before
2129 * any signal bookkeeping like checking group_stop_count.
2130 * Meanwhile, a SIGKILL could come in before we retake the
2131 * siglock. That must prevent us from sleeping in TASK_TRACED.
2132 * So after regaining the lock, we must check for SIGKILL.
2134 spin_unlock_irq(¤t->sighand->siglock);
2135 arch_ptrace_stop(exit_code, info);
2136 spin_lock_irq(¤t->sighand->siglock);
2137 if (sigkill_pending(current))
2141 set_special_state(TASK_TRACED);
2144 * We're committing to trapping. TRACED should be visible before
2145 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2146 * Also, transition to TRACED and updates to ->jobctl should be
2147 * atomic with respect to siglock and should be done after the arch
2148 * hook as siglock is released and regrabbed across it.
2153 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2155 * set_current_state() smp_wmb();
2157 * wait_task_stopped()
2158 * task_stopped_code()
2159 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
2163 current->last_siginfo = info;
2164 current->exit_code = exit_code;
2167 * If @why is CLD_STOPPED, we're trapping to participate in a group
2168 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
2169 * across siglock relocks since INTERRUPT was scheduled, PENDING
2170 * could be clear now. We act as if SIGCONT is received after
2171 * TASK_TRACED is entered - ignore it.
2173 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
2174 gstop_done = task_participate_group_stop(current);
2176 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
2177 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
2178 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2179 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
2181 /* entering a trap, clear TRAPPING */
2182 task_clear_jobctl_trapping(current);
2184 spin_unlock_irq(¤t->sighand->siglock);
2185 read_lock(&tasklist_lock);
2186 if (may_ptrace_stop()) {
2188 * Notify parents of the stop.
2190 * While ptraced, there are two parents - the ptracer and
2191 * the real_parent of the group_leader. The ptracer should
2192 * know about every stop while the real parent is only
2193 * interested in the completion of group stop. The states
2194 * for the two don't interact with each other. Notify
2195 * separately unless they're gonna be duplicates.
2197 do_notify_parent_cldstop(current, true, why);
2198 if (gstop_done && ptrace_reparented(current))
2199 do_notify_parent_cldstop(current, false, why);
2202 * Don't want to allow preemption here, because
2203 * sys_ptrace() needs this task to be inactive.
2205 * XXX: implement read_unlock_no_resched().
2208 read_unlock(&tasklist_lock);
2209 cgroup_enter_frozen();
2210 preempt_enable_no_resched();
2211 freezable_schedule();
2212 cgroup_leave_frozen(true);
2215 * By the time we got the lock, our tracer went away.
2216 * Don't drop the lock yet, another tracer may come.
2218 * If @gstop_done, the ptracer went away between group stop
2219 * completion and here. During detach, it would have set
2220 * JOBCTL_STOP_PENDING on us and we'll re-enter
2221 * TASK_STOPPED in do_signal_stop() on return, so notifying
2222 * the real parent of the group stop completion is enough.
2225 do_notify_parent_cldstop(current, false, why);
2227 /* tasklist protects us from ptrace_freeze_traced() */
2228 __set_current_state(TASK_RUNNING);
2230 current->exit_code = 0;
2231 read_unlock(&tasklist_lock);
2235 * We are back. Now reacquire the siglock before touching
2236 * last_siginfo, so that we are sure to have synchronized with
2237 * any signal-sending on another CPU that wants to examine it.
2239 spin_lock_irq(¤t->sighand->siglock);
2240 current->last_siginfo = NULL;
2242 /* LISTENING can be set only during STOP traps, clear it */
2243 current->jobctl &= ~JOBCTL_LISTENING;
2246 * Queued signals ignored us while we were stopped for tracing.
2247 * So check for any that we should take before resuming user mode.
2248 * This sets TIF_SIGPENDING, but never clears it.
2250 recalc_sigpending_tsk(current);
2253 static void ptrace_do_notify(int signr, int exit_code, int why)
2255 kernel_siginfo_t info;
2257 clear_siginfo(&info);
2258 info.si_signo = signr;
2259 info.si_code = exit_code;
2260 info.si_pid = task_pid_vnr(current);
2261 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
2263 /* Let the debugger run. */
2264 ptrace_stop(exit_code, why, 1, &info);
2267 void ptrace_notify(int exit_code)
2269 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
2270 if (unlikely(current->task_works))
2273 spin_lock_irq(¤t->sighand->siglock);
2274 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
2275 spin_unlock_irq(¤t->sighand->siglock);
2279 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2280 * @signr: signr causing group stop if initiating
2282 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2283 * and participate in it. If already set, participate in the existing
2284 * group stop. If participated in a group stop (and thus slept), %true is
2285 * returned with siglock released.
2287 * If ptraced, this function doesn't handle stop itself. Instead,
2288 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2289 * untouched. The caller must ensure that INTERRUPT trap handling takes
2290 * places afterwards.
2293 * Must be called with @current->sighand->siglock held, which is released
2297 * %false if group stop is already cancelled or ptrace trap is scheduled.
2298 * %true if participated in group stop.
2300 static bool do_signal_stop(int signr)
2301 __releases(¤t->sighand->siglock)
2303 struct signal_struct *sig = current->signal;
2305 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
2306 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
2307 struct task_struct *t;
2309 /* signr will be recorded in task->jobctl for retries */
2310 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
2312 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
2313 unlikely(signal_group_exit(sig)))
2316 * There is no group stop already in progress. We must
2319 * While ptraced, a task may be resumed while group stop is
2320 * still in effect and then receive a stop signal and
2321 * initiate another group stop. This deviates from the
2322 * usual behavior as two consecutive stop signals can't
2323 * cause two group stops when !ptraced. That is why we
2324 * also check !task_is_stopped(t) below.
2326 * The condition can be distinguished by testing whether
2327 * SIGNAL_STOP_STOPPED is already set. Don't generate
2328 * group_exit_code in such case.
2330 * This is not necessary for SIGNAL_STOP_CONTINUED because
2331 * an intervening stop signal is required to cause two
2332 * continued events regardless of ptrace.
2334 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2335 sig->group_exit_code = signr;
2337 sig->group_stop_count = 0;
2339 if (task_set_jobctl_pending(current, signr | gstop))
2340 sig->group_stop_count++;
2343 while_each_thread(current, t) {
2345 * Setting state to TASK_STOPPED for a group
2346 * stop is always done with the siglock held,
2347 * so this check has no races.
2349 if (!task_is_stopped(t) &&
2350 task_set_jobctl_pending(t, signr | gstop)) {
2351 sig->group_stop_count++;
2352 if (likely(!(t->ptrace & PT_SEIZED)))
2353 signal_wake_up(t, 0);
2355 ptrace_trap_notify(t);
2360 if (likely(!current->ptrace)) {
2364 * If there are no other threads in the group, or if there
2365 * is a group stop in progress and we are the last to stop,
2366 * report to the parent.
2368 if (task_participate_group_stop(current))
2369 notify = CLD_STOPPED;
2371 set_special_state(TASK_STOPPED);
2372 spin_unlock_irq(¤t->sighand->siglock);
2375 * Notify the parent of the group stop completion. Because
2376 * we're not holding either the siglock or tasklist_lock
2377 * here, ptracer may attach inbetween; however, this is for
2378 * group stop and should always be delivered to the real
2379 * parent of the group leader. The new ptracer will get
2380 * its notification when this task transitions into
2384 read_lock(&tasklist_lock);
2385 do_notify_parent_cldstop(current, false, notify);
2386 read_unlock(&tasklist_lock);
2389 /* Now we don't run again until woken by SIGCONT or SIGKILL */
2390 cgroup_enter_frozen();
2391 freezable_schedule();
2395 * While ptraced, group stop is handled by STOP trap.
2396 * Schedule it and let the caller deal with it.
2398 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2404 * do_jobctl_trap - take care of ptrace jobctl traps
2406 * When PT_SEIZED, it's used for both group stop and explicit
2407 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2408 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2409 * the stop signal; otherwise, %SIGTRAP.
2411 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2412 * number as exit_code and no siginfo.
2415 * Must be called with @current->sighand->siglock held, which may be
2416 * released and re-acquired before returning with intervening sleep.
2418 static void do_jobctl_trap(void)
2420 struct signal_struct *signal = current->signal;
2421 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
2423 if (current->ptrace & PT_SEIZED) {
2424 if (!signal->group_stop_count &&
2425 !(signal->flags & SIGNAL_STOP_STOPPED))
2427 WARN_ON_ONCE(!signr);
2428 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2431 WARN_ON_ONCE(!signr);
2432 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2433 current->exit_code = 0;
2438 * do_freezer_trap - handle the freezer jobctl trap
2440 * Puts the task into frozen state, if only the task is not about to quit.
2441 * In this case it drops JOBCTL_TRAP_FREEZE.
2444 * Must be called with @current->sighand->siglock held,
2445 * which is always released before returning.
2447 static void do_freezer_trap(void)
2448 __releases(¤t->sighand->siglock)
2451 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2452 * let's make another loop to give it a chance to be handled.
2453 * In any case, we'll return back.
2455 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2456 JOBCTL_TRAP_FREEZE) {
2457 spin_unlock_irq(¤t->sighand->siglock);
2462 * Now we're sure that there is no pending fatal signal and no
2463 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2464 * immediately (if there is a non-fatal signal pending), and
2465 * put the task into sleep.
2467 __set_current_state(TASK_INTERRUPTIBLE);
2468 clear_thread_flag(TIF_SIGPENDING);
2469 spin_unlock_irq(¤t->sighand->siglock);
2470 cgroup_enter_frozen();
2471 freezable_schedule();
2474 static int ptrace_signal(int signr, kernel_siginfo_t *info)
2477 * We do not check sig_kernel_stop(signr) but set this marker
2478 * unconditionally because we do not know whether debugger will
2479 * change signr. This flag has no meaning unless we are going
2480 * to stop after return from ptrace_stop(). In this case it will
2481 * be checked in do_signal_stop(), we should only stop if it was
2482 * not cleared by SIGCONT while we were sleeping. See also the
2483 * comment in dequeue_signal().
2485 current->jobctl |= JOBCTL_STOP_DEQUEUED;
2486 ptrace_stop(signr, CLD_TRAPPED, 0, info);
2488 /* We're back. Did the debugger cancel the sig? */
2489 signr = current->exit_code;
2493 current->exit_code = 0;
2496 * Update the siginfo structure if the signal has
2497 * changed. If the debugger wanted something
2498 * specific in the siginfo structure then it should
2499 * have updated *info via PTRACE_SETSIGINFO.
2501 if (signr != info->si_signo) {
2502 clear_siginfo(info);
2503 info->si_signo = signr;
2505 info->si_code = SI_USER;
2507 info->si_pid = task_pid_vnr(current->parent);
2508 info->si_uid = from_kuid_munged(current_user_ns(),
2509 task_uid(current->parent));
2513 /* If the (new) signal is now blocked, requeue it. */
2514 if (sigismember(¤t->blocked, signr)) {
2515 send_signal(signr, info, current, PIDTYPE_PID);
2522 bool get_signal(struct ksignal *ksig)
2524 struct sighand_struct *sighand = current->sighand;
2525 struct signal_struct *signal = current->signal;
2528 if (unlikely(current->task_works))
2531 if (unlikely(uprobe_deny_signal()))
2535 * Do this once, we can't return to user-mode if freezing() == T.
2536 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2537 * thus do not need another check after return.
2542 spin_lock_irq(&sighand->siglock);
2544 * Every stopped thread goes here after wakeup. Check to see if
2545 * we should notify the parent, prepare_signal(SIGCONT) encodes
2546 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2548 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
2551 if (signal->flags & SIGNAL_CLD_CONTINUED)
2552 why = CLD_CONTINUED;
2556 signal->flags &= ~SIGNAL_CLD_MASK;
2558 spin_unlock_irq(&sighand->siglock);
2561 * Notify the parent that we're continuing. This event is
2562 * always per-process and doesn't make whole lot of sense
2563 * for ptracers, who shouldn't consume the state via
2564 * wait(2) either, but, for backward compatibility, notify
2565 * the ptracer of the group leader too unless it's gonna be
2568 read_lock(&tasklist_lock);
2569 do_notify_parent_cldstop(current, false, why);
2571 if (ptrace_reparented(current->group_leader))
2572 do_notify_parent_cldstop(current->group_leader,
2574 read_unlock(&tasklist_lock);
2579 /* Has this task already been marked for death? */
2580 if (signal_group_exit(signal)) {
2581 ksig->info.si_signo = signr = SIGKILL;
2582 sigdelset(¤t->pending.signal, SIGKILL);
2583 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2584 &sighand->action[SIGKILL - 1]);
2585 recalc_sigpending();
2590 struct k_sigaction *ka;
2592 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2596 if (unlikely(current->jobctl &
2597 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2598 if (current->jobctl & JOBCTL_TRAP_MASK) {
2600 spin_unlock_irq(&sighand->siglock);
2601 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2608 * If the task is leaving the frozen state, let's update
2609 * cgroup counters and reset the frozen bit.
2611 if (unlikely(cgroup_task_frozen(current))) {
2612 spin_unlock_irq(&sighand->siglock);
2613 cgroup_leave_frozen(false);
2618 * Signals generated by the execution of an instruction
2619 * need to be delivered before any other pending signals
2620 * so that the instruction pointer in the signal stack
2621 * frame points to the faulting instruction.
2623 signr = dequeue_synchronous_signal(&ksig->info);
2625 signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
2628 break; /* will return 0 */
2630 if (unlikely(current->ptrace) && signr != SIGKILL) {
2631 signr = ptrace_signal(signr, &ksig->info);
2636 ka = &sighand->action[signr-1];
2638 /* Trace actually delivered signals. */
2639 trace_signal_deliver(signr, &ksig->info, ka);
2641 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2643 if (ka->sa.sa_handler != SIG_DFL) {
2644 /* Run the handler. */
2647 if (ka->sa.sa_flags & SA_ONESHOT)
2648 ka->sa.sa_handler = SIG_DFL;
2650 break; /* will return non-zero "signr" value */
2654 * Now we are doing the default action for this signal.
2656 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2660 * Global init gets no signals it doesn't want.
2661 * Container-init gets no signals it doesn't want from same
2664 * Note that if global/container-init sees a sig_kernel_only()
2665 * signal here, the signal must have been generated internally
2666 * or must have come from an ancestor namespace. In either
2667 * case, the signal cannot be dropped.
2669 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
2670 !sig_kernel_only(signr))
2673 if (sig_kernel_stop(signr)) {
2675 * The default action is to stop all threads in
2676 * the thread group. The job control signals
2677 * do nothing in an orphaned pgrp, but SIGSTOP
2678 * always works. Note that siglock needs to be
2679 * dropped during the call to is_orphaned_pgrp()
2680 * because of lock ordering with tasklist_lock.
2681 * This allows an intervening SIGCONT to be posted.
2682 * We need to check for that and bail out if necessary.
2684 if (signr != SIGSTOP) {
2685 spin_unlock_irq(&sighand->siglock);
2687 /* signals can be posted during this window */
2689 if (is_current_pgrp_orphaned())
2692 spin_lock_irq(&sighand->siglock);
2695 if (likely(do_signal_stop(ksig->info.si_signo))) {
2696 /* It released the siglock. */
2701 * We didn't actually stop, due to a race
2702 * with SIGCONT or something like that.
2708 spin_unlock_irq(&sighand->siglock);
2709 if (unlikely(cgroup_task_frozen(current)))
2710 cgroup_leave_frozen(true);
2713 * Anything else is fatal, maybe with a core dump.
2715 current->flags |= PF_SIGNALED;
2717 if (sig_kernel_coredump(signr)) {
2718 if (print_fatal_signals)
2719 print_fatal_signal(ksig->info.si_signo);
2720 proc_coredump_connector(current);
2722 * If it was able to dump core, this kills all
2723 * other threads in the group and synchronizes with
2724 * their demise. If we lost the race with another
2725 * thread getting here, it set group_exit_code
2726 * first and our do_group_exit call below will use
2727 * that value and ignore the one we pass it.
2729 do_coredump(&ksig->info);
2733 * Death signals, no core dump.
2735 do_group_exit(ksig->info.si_signo);
2738 spin_unlock_irq(&sighand->siglock);
2741 return ksig->sig > 0;
2745 * signal_delivered -
2746 * @ksig: kernel signal struct
2747 * @stepping: nonzero if debugger single-step or block-step in use
2749 * This function should be called when a signal has successfully been
2750 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
2751 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2752 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
2754 static void signal_delivered(struct ksignal *ksig, int stepping)
2758 /* A signal was successfully delivered, and the
2759 saved sigmask was stored on the signal frame,
2760 and will be restored by sigreturn. So we can
2761 simply clear the restore sigmask flag. */
2762 clear_restore_sigmask();
2764 sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask);
2765 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2766 sigaddset(&blocked, ksig->sig);
2767 set_current_blocked(&blocked);
2768 tracehook_signal_handler(stepping);
2771 void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2774 force_sigsegv(ksig->sig);
2776 signal_delivered(ksig, stepping);
2780 * It could be that complete_signal() picked us to notify about the
2781 * group-wide signal. Other threads should be notified now to take
2782 * the shared signals in @which since we will not.
2784 static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
2787 struct task_struct *t;
2789 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2790 if (sigisemptyset(&retarget))
2794 while_each_thread(tsk, t) {
2795 if (t->flags & PF_EXITING)
2798 if (!has_pending_signals(&retarget, &t->blocked))
2800 /* Remove the signals this thread can handle. */
2801 sigandsets(&retarget, &retarget, &t->blocked);
2803 if (!signal_pending(t))
2804 signal_wake_up(t, 0);
2806 if (sigisemptyset(&retarget))
2811 void exit_signals(struct task_struct *tsk)
2817 * @tsk is about to have PF_EXITING set - lock out users which
2818 * expect stable threadgroup.
2820 cgroup_threadgroup_change_begin(tsk);
2822 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2823 tsk->flags |= PF_EXITING;
2824 cgroup_threadgroup_change_end(tsk);
2828 spin_lock_irq(&tsk->sighand->siglock);
2830 * From now this task is not visible for group-wide signals,
2831 * see wants_signal(), do_signal_stop().
2833 tsk->flags |= PF_EXITING;
2835 cgroup_threadgroup_change_end(tsk);
2837 if (!signal_pending(tsk))
2840 unblocked = tsk->blocked;
2841 signotset(&unblocked);
2842 retarget_shared_pending(tsk, &unblocked);
2844 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
2845 task_participate_group_stop(tsk))
2846 group_stop = CLD_STOPPED;
2848 spin_unlock_irq(&tsk->sighand->siglock);
2851 * If group stop has completed, deliver the notification. This
2852 * should always go to the real parent of the group leader.
2854 if (unlikely(group_stop)) {
2855 read_lock(&tasklist_lock);
2856 do_notify_parent_cldstop(tsk, false, group_stop);
2857 read_unlock(&tasklist_lock);
2862 * System call entry points.
2866 * sys_restart_syscall - restart a system call
2868 SYSCALL_DEFINE0(restart_syscall)
2870 struct restart_block *restart = ¤t->restart_block;
2871 return restart->fn(restart);
2874 long do_no_restart_syscall(struct restart_block *param)
2879 static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2881 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2882 sigset_t newblocked;
2883 /* A set of now blocked but previously unblocked signals. */
2884 sigandnsets(&newblocked, newset, ¤t->blocked);
2885 retarget_shared_pending(tsk, &newblocked);
2887 tsk->blocked = *newset;
2888 recalc_sigpending();
2892 * set_current_blocked - change current->blocked mask
2895 * It is wrong to change ->blocked directly, this helper should be used
2896 * to ensure the process can't miss a shared signal we are going to block.
2898 void set_current_blocked(sigset_t *newset)
2900 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2901 __set_current_blocked(newset);
2904 void __set_current_blocked(const sigset_t *newset)
2906 struct task_struct *tsk = current;
2909 * In case the signal mask hasn't changed, there is nothing we need
2910 * to do. The current->blocked shouldn't be modified by other task.
2912 if (sigequalsets(&tsk->blocked, newset))
2915 spin_lock_irq(&tsk->sighand->siglock);
2916 __set_task_blocked(tsk, newset);
2917 spin_unlock_irq(&tsk->sighand->siglock);
2921 * This is also useful for kernel threads that want to temporarily
2922 * (or permanently) block certain signals.
2924 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2925 * interface happily blocks "unblockable" signals like SIGKILL
2928 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2930 struct task_struct *tsk = current;
2933 /* Lockless, only current can change ->blocked, never from irq */
2935 *oldset = tsk->blocked;
2939 sigorsets(&newset, &tsk->blocked, set);
2942 sigandnsets(&newset, &tsk->blocked, set);
2951 __set_current_blocked(&newset);
2954 EXPORT_SYMBOL(sigprocmask);
2957 * The api helps set app-provided sigmasks.
2959 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2960 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2962 * Note that it does set_restore_sigmask() in advance, so it must be always
2963 * paired with restore_saved_sigmask_unless() before return from syscall.
2965 int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
2971 if (sigsetsize != sizeof(sigset_t))
2973 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
2976 set_restore_sigmask();
2977 current->saved_sigmask = current->blocked;
2978 set_current_blocked(&kmask);
2983 #ifdef CONFIG_COMPAT
2984 int set_compat_user_sigmask(const compat_sigset_t __user *umask,
2991 if (sigsetsize != sizeof(compat_sigset_t))
2993 if (get_compat_sigset(&kmask, umask))
2996 set_restore_sigmask();
2997 current->saved_sigmask = current->blocked;
2998 set_current_blocked(&kmask);
3005 * sys_rt_sigprocmask - change the list of currently blocked signals
3006 * @how: whether to add, remove, or set signals
3007 * @nset: stores pending signals
3008 * @oset: previous value of signal mask if non-null
3009 * @sigsetsize: size of sigset_t type
3011 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
3012 sigset_t __user *, oset, size_t, sigsetsize)
3014 sigset_t old_set, new_set;
3017 /* XXX: Don't preclude handling different sized sigset_t's. */
3018 if (sigsetsize != sizeof(sigset_t))
3021 old_set = current->blocked;
3024 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3026 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3028 error = sigprocmask(how, &new_set, NULL);
3034 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3041 #ifdef CONFIG_COMPAT
3042 COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3043 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
3045 sigset_t old_set = current->blocked;
3047 /* XXX: Don't preclude handling different sized sigset_t's. */
3048 if (sigsetsize != sizeof(sigset_t))
3054 if (get_compat_sigset(&new_set, nset))
3056 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3058 error = sigprocmask(how, &new_set, NULL);
3062 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
3066 static void do_sigpending(sigset_t *set)
3068 spin_lock_irq(¤t->sighand->siglock);
3069 sigorsets(set, ¤t->pending.signal,
3070 ¤t->signal->shared_pending.signal);
3071 spin_unlock_irq(¤t->sighand->siglock);
3073 /* Outside the lock because only this thread touches it. */
3074 sigandsets(set, ¤t->blocked, set);
3078 * sys_rt_sigpending - examine a pending signal that has been raised
3080 * @uset: stores pending signals
3081 * @sigsetsize: size of sigset_t type or larger
3083 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
3087 if (sigsetsize > sizeof(*uset))
3090 do_sigpending(&set);
3092 if (copy_to_user(uset, &set, sigsetsize))
3098 #ifdef CONFIG_COMPAT
3099 COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3100 compat_size_t, sigsetsize)
3104 if (sigsetsize > sizeof(*uset))
3107 do_sigpending(&set);
3109 return put_compat_sigset(uset, &set, sigsetsize);
3113 static const struct {
3114 unsigned char limit, layout;
3116 [SIGILL] = { NSIGILL, SIL_FAULT },
3117 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3118 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3119 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3120 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3122 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3124 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3125 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3126 [SIGSYS] = { NSIGSYS, SIL_SYS },
3129 static bool known_siginfo_layout(unsigned sig, int si_code)
3131 if (si_code == SI_KERNEL)
3133 else if ((si_code > SI_USER)) {
3134 if (sig_specific_sicodes(sig)) {
3135 if (si_code <= sig_sicodes[sig].limit)
3138 else if (si_code <= NSIGPOLL)
3141 else if (si_code >= SI_DETHREAD)
3143 else if (si_code == SI_ASYNCNL)
3148 enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
3150 enum siginfo_layout layout = SIL_KILL;
3151 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
3152 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3153 (si_code <= sig_sicodes[sig].limit)) {
3154 layout = sig_sicodes[sig].layout;
3155 /* Handle the exceptions */
3156 if ((sig == SIGBUS) &&
3157 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3158 layout = SIL_FAULT_MCEERR;
3159 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3160 layout = SIL_FAULT_BNDERR;
3162 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3163 layout = SIL_FAULT_PKUERR;
3166 else if (si_code <= NSIGPOLL)
3169 if (si_code == SI_TIMER)
3171 else if (si_code == SI_SIGIO)
3173 else if (si_code < 0)
3179 static inline char __user *si_expansion(const siginfo_t __user *info)
3181 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3184 int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
3186 char __user *expansion = si_expansion(to);
3187 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
3189 if (clear_user(expansion, SI_EXPANSION_SIZE))
3194 static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3195 const siginfo_t __user *from)
3197 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
3198 char __user *expansion = si_expansion(from);
3199 char buf[SI_EXPANSION_SIZE];
3202 * An unknown si_code might need more than
3203 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3204 * extra bytes are 0. This guarantees copy_siginfo_to_user
3205 * will return this data to userspace exactly.
3207 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3209 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3217 static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3218 const siginfo_t __user *from)
3220 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3222 to->si_signo = signo;
3223 return post_copy_siginfo_from_user(to, from);
3226 int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3228 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3230 return post_copy_siginfo_from_user(to, from);
3233 #ifdef CONFIG_COMPAT
3234 int copy_siginfo_to_user32(struct compat_siginfo __user *to,
3235 const struct kernel_siginfo *from)
3236 #if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3238 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3240 int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3241 const struct kernel_siginfo *from, bool x32_ABI)
3244 struct compat_siginfo new;
3245 memset(&new, 0, sizeof(new));
3247 new.si_signo = from->si_signo;
3248 new.si_errno = from->si_errno;
3249 new.si_code = from->si_code;
3250 switch(siginfo_layout(from->si_signo, from->si_code)) {
3252 new.si_pid = from->si_pid;
3253 new.si_uid = from->si_uid;
3256 new.si_tid = from->si_tid;
3257 new.si_overrun = from->si_overrun;
3258 new.si_int = from->si_int;
3261 new.si_band = from->si_band;
3262 new.si_fd = from->si_fd;
3265 new.si_addr = ptr_to_compat(from->si_addr);
3266 #ifdef __ARCH_SI_TRAPNO
3267 new.si_trapno = from->si_trapno;
3270 case SIL_FAULT_MCEERR:
3271 new.si_addr = ptr_to_compat(from->si_addr);
3272 #ifdef __ARCH_SI_TRAPNO
3273 new.si_trapno = from->si_trapno;
3275 new.si_addr_lsb = from->si_addr_lsb;
3277 case SIL_FAULT_BNDERR:
3278 new.si_addr = ptr_to_compat(from->si_addr);
3279 #ifdef __ARCH_SI_TRAPNO
3280 new.si_trapno = from->si_trapno;
3282 new.si_lower = ptr_to_compat(from->si_lower);
3283 new.si_upper = ptr_to_compat(from->si_upper);
3285 case SIL_FAULT_PKUERR:
3286 new.si_addr = ptr_to_compat(from->si_addr);
3287 #ifdef __ARCH_SI_TRAPNO
3288 new.si_trapno = from->si_trapno;
3290 new.si_pkey = from->si_pkey;
3293 new.si_pid = from->si_pid;
3294 new.si_uid = from->si_uid;
3295 new.si_status = from->si_status;
3296 #ifdef CONFIG_X86_X32_ABI
3298 new._sifields._sigchld_x32._utime = from->si_utime;
3299 new._sifields._sigchld_x32._stime = from->si_stime;
3303 new.si_utime = from->si_utime;
3304 new.si_stime = from->si_stime;
3308 new.si_pid = from->si_pid;
3309 new.si_uid = from->si_uid;
3310 new.si_int = from->si_int;
3313 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3314 new.si_syscall = from->si_syscall;
3315 new.si_arch = from->si_arch;
3319 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3325 static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3326 const struct compat_siginfo *from)
3329 to->si_signo = from->si_signo;
3330 to->si_errno = from->si_errno;
3331 to->si_code = from->si_code;
3332 switch(siginfo_layout(from->si_signo, from->si_code)) {
3334 to->si_pid = from->si_pid;
3335 to->si_uid = from->si_uid;
3338 to->si_tid = from->si_tid;
3339 to->si_overrun = from->si_overrun;
3340 to->si_int = from->si_int;
3343 to->si_band = from->si_band;
3344 to->si_fd = from->si_fd;
3347 to->si_addr = compat_ptr(from->si_addr);
3348 #ifdef __ARCH_SI_TRAPNO
3349 to->si_trapno = from->si_trapno;
3352 case SIL_FAULT_MCEERR:
3353 to->si_addr = compat_ptr(from->si_addr);
3354 #ifdef __ARCH_SI_TRAPNO
3355 to->si_trapno = from->si_trapno;
3357 to->si_addr_lsb = from->si_addr_lsb;
3359 case SIL_FAULT_BNDERR:
3360 to->si_addr = compat_ptr(from->si_addr);
3361 #ifdef __ARCH_SI_TRAPNO
3362 to->si_trapno = from->si_trapno;
3364 to->si_lower = compat_ptr(from->si_lower);
3365 to->si_upper = compat_ptr(from->si_upper);
3367 case SIL_FAULT_PKUERR:
3368 to->si_addr = compat_ptr(from->si_addr);
3369 #ifdef __ARCH_SI_TRAPNO
3370 to->si_trapno = from->si_trapno;
3372 to->si_pkey = from->si_pkey;
3375 to->si_pid = from->si_pid;
3376 to->si_uid = from->si_uid;
3377 to->si_status = from->si_status;
3378 #ifdef CONFIG_X86_X32_ABI
3379 if (in_x32_syscall()) {
3380 to->si_utime = from->_sifields._sigchld_x32._utime;
3381 to->si_stime = from->_sifields._sigchld_x32._stime;
3385 to->si_utime = from->si_utime;
3386 to->si_stime = from->si_stime;
3390 to->si_pid = from->si_pid;
3391 to->si_uid = from->si_uid;
3392 to->si_int = from->si_int;
3395 to->si_call_addr = compat_ptr(from->si_call_addr);
3396 to->si_syscall = from->si_syscall;
3397 to->si_arch = from->si_arch;
3403 static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3404 const struct compat_siginfo __user *ufrom)
3406 struct compat_siginfo from;
3408 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3411 from.si_signo = signo;
3412 return post_copy_siginfo_from_user32(to, &from);
3415 int copy_siginfo_from_user32(struct kernel_siginfo *to,
3416 const struct compat_siginfo __user *ufrom)
3418 struct compat_siginfo from;
3420 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3423 return post_copy_siginfo_from_user32(to, &from);
3425 #endif /* CONFIG_COMPAT */
3428 * do_sigtimedwait - wait for queued signals specified in @which
3429 * @which: queued signals to wait for
3430 * @info: if non-null, the signal's siginfo is returned here
3431 * @ts: upper bound on process time suspension
3433 static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
3434 const struct timespec64 *ts)
3436 ktime_t *to = NULL, timeout = KTIME_MAX;
3437 struct task_struct *tsk = current;
3438 sigset_t mask = *which;
3442 if (!timespec64_valid(ts))
3444 timeout = timespec64_to_ktime(*ts);
3449 * Invert the set of allowed signals to get those we want to block.
3451 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3454 spin_lock_irq(&tsk->sighand->siglock);
3455 sig = dequeue_signal(tsk, &mask, info);
3456 if (!sig && timeout) {
3458 * None ready, temporarily unblock those we're interested
3459 * while we are sleeping in so that we'll be awakened when
3460 * they arrive. Unblocking is always fine, we can avoid
3461 * set_current_blocked().
3463 tsk->real_blocked = tsk->blocked;
3464 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3465 recalc_sigpending();
3466 spin_unlock_irq(&tsk->sighand->siglock);
3468 __set_current_state(TASK_INTERRUPTIBLE);
3469 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3471 spin_lock_irq(&tsk->sighand->siglock);
3472 __set_task_blocked(tsk, &tsk->real_blocked);
3473 sigemptyset(&tsk->real_blocked);
3474 sig = dequeue_signal(tsk, &mask, info);
3476 spin_unlock_irq(&tsk->sighand->siglock);
3480 return ret ? -EINTR : -EAGAIN;
3484 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3486 * @uthese: queued signals to wait for
3487 * @uinfo: if non-null, the signal's siginfo is returned here
3488 * @uts: upper bound on process time suspension
3489 * @sigsetsize: size of sigset_t type
3491 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
3492 siginfo_t __user *, uinfo,
3493 const struct __kernel_timespec __user *, uts,
3497 struct timespec64 ts;
3498 kernel_siginfo_t info;
3501 /* XXX: Don't preclude handling different sized sigset_t's. */
3502 if (sigsetsize != sizeof(sigset_t))
3505 if (copy_from_user(&these, uthese, sizeof(these)))
3509 if (get_timespec64(&ts, uts))
3513 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3515 if (ret > 0 && uinfo) {
3516 if (copy_siginfo_to_user(uinfo, &info))
3523 #ifdef CONFIG_COMPAT_32BIT_TIME
3524 SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3525 siginfo_t __user *, uinfo,
3526 const struct old_timespec32 __user *, uts,
3530 struct timespec64 ts;
3531 kernel_siginfo_t info;
3534 if (sigsetsize != sizeof(sigset_t))
3537 if (copy_from_user(&these, uthese, sizeof(these)))
3541 if (get_old_timespec32(&ts, uts))
3545 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3547 if (ret > 0 && uinfo) {
3548 if (copy_siginfo_to_user(uinfo, &info))
3556 #ifdef CONFIG_COMPAT
3557 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3558 struct compat_siginfo __user *, uinfo,
3559 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3562 struct timespec64 t;
3563 kernel_siginfo_t info;
3566 if (sigsetsize != sizeof(sigset_t))
3569 if (get_compat_sigset(&s, uthese))
3573 if (get_timespec64(&t, uts))
3577 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3579 if (ret > 0 && uinfo) {
3580 if (copy_siginfo_to_user32(uinfo, &info))
3587 #ifdef CONFIG_COMPAT_32BIT_TIME
3588 COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
3589 struct compat_siginfo __user *, uinfo,
3590 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
3593 struct timespec64 t;
3594 kernel_siginfo_t info;
3597 if (sigsetsize != sizeof(sigset_t))
3600 if (get_compat_sigset(&s, uthese))
3604 if (get_old_timespec32(&t, uts))
3608 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3610 if (ret > 0 && uinfo) {
3611 if (copy_siginfo_to_user32(uinfo, &info))
3620 static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3622 clear_siginfo(info);
3623 info->si_signo = sig;
3625 info->si_code = SI_USER;
3626 info->si_pid = task_tgid_vnr(current);
3627 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3631 * sys_kill - send a signal to a process
3632 * @pid: the PID of the process
3633 * @sig: signal to be sent
3635 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
3637 struct kernel_siginfo info;
3639 prepare_kill_siginfo(sig, &info);
3641 return kill_something_info(sig, &info, pid);
3645 * Verify that the signaler and signalee either are in the same pid namespace
3646 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3649 static bool access_pidfd_pidns(struct pid *pid)
3651 struct pid_namespace *active = task_active_pid_ns(current);
3652 struct pid_namespace *p = ns_of_pid(pid);
3665 static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3667 #ifdef CONFIG_COMPAT
3669 * Avoid hooking up compat syscalls and instead handle necessary
3670 * conversions here. Note, this is a stop-gap measure and should not be
3671 * considered a generic solution.
3673 if (in_compat_syscall())
3674 return copy_siginfo_from_user32(
3675 kinfo, (struct compat_siginfo __user *)info);
3677 return copy_siginfo_from_user(kinfo, info);
3680 static struct pid *pidfd_to_pid(const struct file *file)
3684 pid = pidfd_pid(file);
3688 return tgid_pidfd_to_pid(file);
3692 * sys_pidfd_send_signal - Signal a process through a pidfd
3693 * @pidfd: file descriptor of the process
3694 * @sig: signal to send
3695 * @info: signal info
3696 * @flags: future flags
3698 * The syscall currently only signals via PIDTYPE_PID which covers
3699 * kill(<positive-pid>, <signal>. It does not signal threads or process
3701 * In order to extend the syscall to threads and process groups the @flags
3702 * argument should be used. In essence, the @flags argument will determine
3703 * what is signaled and not the file descriptor itself. Put in other words,
3704 * grouping is a property of the flags argument not a property of the file
3707 * Return: 0 on success, negative errno on failure
3709 SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3710 siginfo_t __user *, info, unsigned int, flags)
3715 kernel_siginfo_t kinfo;
3717 /* Enforce flags be set to 0 until we add an extension. */
3725 /* Is this a pidfd? */
3726 pid = pidfd_to_pid(f.file);
3733 if (!access_pidfd_pidns(pid))
3737 ret = copy_siginfo_from_user_any(&kinfo, info);
3742 if (unlikely(sig != kinfo.si_signo))
3745 /* Only allow sending arbitrary signals to yourself. */
3747 if ((task_pid(current) != pid) &&
3748 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3751 prepare_kill_siginfo(sig, &kinfo);
3754 ret = kill_pid_info(sig, &kinfo, pid);
3762 do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
3764 struct task_struct *p;
3768 p = find_task_by_vpid(pid);
3769 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
3770 error = check_kill_permission(sig, info, p);
3772 * The null signal is a permissions and process existence
3773 * probe. No signal is actually delivered.
3775 if (!error && sig) {
3776 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
3778 * If lock_task_sighand() failed we pretend the task
3779 * dies after receiving the signal. The window is tiny,
3780 * and the signal is private anyway.
3782 if (unlikely(error == -ESRCH))
3791 static int do_tkill(pid_t tgid, pid_t pid, int sig)
3793 struct kernel_siginfo info;
3795 clear_siginfo(&info);
3796 info.si_signo = sig;
3798 info.si_code = SI_TKILL;
3799 info.si_pid = task_tgid_vnr(current);
3800 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
3802 return do_send_specific(tgid, pid, sig, &info);
3806 * sys_tgkill - send signal to one specific thread
3807 * @tgid: the thread group ID of the thread
3808 * @pid: the PID of the thread
3809 * @sig: signal to be sent
3811 * This syscall also checks the @tgid and returns -ESRCH even if the PID
3812 * exists but it's not belonging to the target process anymore. This
3813 * method solves the problem of threads exiting and PIDs getting reused.
3815 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
3817 /* This is only valid for single tasks */
3818 if (pid <= 0 || tgid <= 0)
3821 return do_tkill(tgid, pid, sig);
3825 * sys_tkill - send signal to one specific task
3826 * @pid: the PID of the task
3827 * @sig: signal to be sent
3829 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3831 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
3833 /* This is only valid for single tasks */
3837 return do_tkill(0, pid, sig);
3840 static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
3842 /* Not even root can pretend to send signals from the kernel.
3843 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3845 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3846 (task_pid_vnr(current) != pid))
3849 /* POSIX.1b doesn't mention process groups. */
3850 return kill_proc_info(sig, info, pid);
3854 * sys_rt_sigqueueinfo - send signal information to a signal
3855 * @pid: the PID of the thread
3856 * @sig: signal to be sent
3857 * @uinfo: signal info to be sent
3859 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3860 siginfo_t __user *, uinfo)
3862 kernel_siginfo_t info;
3863 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3866 return do_rt_sigqueueinfo(pid, sig, &info);
3869 #ifdef CONFIG_COMPAT
3870 COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3873 struct compat_siginfo __user *, uinfo)
3875 kernel_siginfo_t info;
3876 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3879 return do_rt_sigqueueinfo(pid, sig, &info);
3883 static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
3885 /* This is only valid for single tasks */
3886 if (pid <= 0 || tgid <= 0)
3889 /* Not even root can pretend to send signals from the kernel.
3890 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3892 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3893 (task_pid_vnr(current) != pid))
3896 return do_send_specific(tgid, pid, sig, info);
3899 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3900 siginfo_t __user *, uinfo)
3902 kernel_siginfo_t info;
3903 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
3906 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3909 #ifdef CONFIG_COMPAT
3910 COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3914 struct compat_siginfo __user *, uinfo)
3916 kernel_siginfo_t info;
3917 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
3920 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3925 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
3927 void kernel_sigaction(int sig, __sighandler_t action)
3929 spin_lock_irq(¤t->sighand->siglock);
3930 current->sighand->action[sig - 1].sa.sa_handler = action;
3931 if (action == SIG_IGN) {
3935 sigaddset(&mask, sig);
3937 flush_sigqueue_mask(&mask, ¤t->signal->shared_pending);
3938 flush_sigqueue_mask(&mask, ¤t->pending);
3939 recalc_sigpending();
3941 spin_unlock_irq(¤t->sighand->siglock);
3943 EXPORT_SYMBOL(kernel_sigaction);
3945 void __weak sigaction_compat_abi(struct k_sigaction *act,
3946 struct k_sigaction *oact)
3950 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
3952 struct task_struct *p = current, *t;
3953 struct k_sigaction *k;
3956 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
3959 k = &p->sighand->action[sig-1];
3961 spin_lock_irq(&p->sighand->siglock);
3965 sigaction_compat_abi(act, oact);
3968 sigdelsetmask(&act->sa.sa_mask,
3969 sigmask(SIGKILL) | sigmask(SIGSTOP));
3973 * "Setting a signal action to SIG_IGN for a signal that is
3974 * pending shall cause the pending signal to be discarded,
3975 * whether or not it is blocked."
3977 * "Setting a signal action to SIG_DFL for a signal that is
3978 * pending and whose default action is to ignore the signal
3979 * (for example, SIGCHLD), shall cause the pending signal to
3980 * be discarded, whether or not it is blocked"
3982 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
3984 sigaddset(&mask, sig);
3985 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3986 for_each_thread(p, t)
3987 flush_sigqueue_mask(&mask, &t->pending);
3991 spin_unlock_irq(&p->sighand->siglock);
3996 do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3999 struct task_struct *t = current;
4002 memset(oss, 0, sizeof(stack_t));
4003 oss->ss_sp = (void __user *) t->sas_ss_sp;
4004 oss->ss_size = t->sas_ss_size;
4005 oss->ss_flags = sas_ss_flags(sp) |
4006 (current->sas_ss_flags & SS_FLAG_BITS);
4010 void __user *ss_sp = ss->ss_sp;
4011 size_t ss_size = ss->ss_size;
4012 unsigned ss_flags = ss->ss_flags;
4015 if (unlikely(on_sig_stack(sp)))
4018 ss_mode = ss_flags & ~SS_FLAG_BITS;
4019 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4023 if (ss_mode == SS_DISABLE) {
4027 if (unlikely(ss_size < min_ss_size))
4031 t->sas_ss_sp = (unsigned long) ss_sp;
4032 t->sas_ss_size = ss_size;
4033 t->sas_ss_flags = ss_flags;
4038 SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4042 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4044 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
4045 current_user_stack_pointer(),
4047 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4052 int restore_altstack(const stack_t __user *uss)
4055 if (copy_from_user(&new, uss, sizeof(stack_t)))
4057 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4059 /* squash all but EFAULT for now */
4063 int __save_altstack(stack_t __user *uss, unsigned long sp)
4065 struct task_struct *t = current;
4066 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4067 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4068 __put_user(t->sas_ss_size, &uss->ss_size);
4071 if (t->sas_ss_flags & SS_AUTODISARM)
4076 #ifdef CONFIG_COMPAT
4077 static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4078 compat_stack_t __user *uoss_ptr)
4084 compat_stack_t uss32;
4085 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4087 uss.ss_sp = compat_ptr(uss32.ss_sp);
4088 uss.ss_flags = uss32.ss_flags;
4089 uss.ss_size = uss32.ss_size;
4091 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
4092 compat_user_stack_pointer(),
4093 COMPAT_MINSIGSTKSZ);
4094 if (ret >= 0 && uoss_ptr) {
4096 memset(&old, 0, sizeof(old));
4097 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4098 old.ss_flags = uoss.ss_flags;
4099 old.ss_size = uoss.ss_size;
4100 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
4106 COMPAT_SYSCALL_DEFINE2(sigaltstack,
4107 const compat_stack_t __user *, uss_ptr,
4108 compat_stack_t __user *, uoss_ptr)
4110 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4113 int compat_restore_altstack(const compat_stack_t __user *uss)
4115 int err = do_compat_sigaltstack(uss, NULL);
4116 /* squash all but -EFAULT for now */
4117 return err == -EFAULT ? err : 0;
4120 int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4123 struct task_struct *t = current;
4124 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4126 __put_user(t->sas_ss_flags, &uss->ss_flags) |
4127 __put_user(t->sas_ss_size, &uss->ss_size);
4130 if (t->sas_ss_flags & SS_AUTODISARM)
4136 #ifdef __ARCH_WANT_SYS_SIGPENDING
4139 * sys_sigpending - examine pending signals
4140 * @uset: where mask of pending signal is returned
4142 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
4146 if (sizeof(old_sigset_t) > sizeof(*uset))
4149 do_sigpending(&set);
4151 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4157 #ifdef CONFIG_COMPAT
4158 COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4162 do_sigpending(&set);
4164 return put_user(set.sig[0], set32);
4170 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
4172 * sys_sigprocmask - examine and change blocked signals
4173 * @how: whether to add, remove, or set signals
4174 * @nset: signals to add or remove (if non-null)
4175 * @oset: previous value of signal mask if non-null
4177 * Some platforms have their own version with special arguments;
4178 * others support only sys_rt_sigprocmask.
4181 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
4182 old_sigset_t __user *, oset)
4184 old_sigset_t old_set, new_set;
4185 sigset_t new_blocked;
4187 old_set = current->blocked.sig[0];
4190 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4193 new_blocked = current->blocked;
4197 sigaddsetmask(&new_blocked, new_set);
4200 sigdelsetmask(&new_blocked, new_set);
4203 new_blocked.sig[0] = new_set;
4209 set_current_blocked(&new_blocked);
4213 if (copy_to_user(oset, &old_set, sizeof(*oset)))
4219 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4221 #ifndef CONFIG_ODD_RT_SIGACTION
4223 * sys_rt_sigaction - alter an action taken by a process
4224 * @sig: signal to be sent
4225 * @act: new sigaction
4226 * @oact: used to save the previous sigaction
4227 * @sigsetsize: size of sigset_t type
4229 SYSCALL_DEFINE4(rt_sigaction, int, sig,
4230 const struct sigaction __user *, act,
4231 struct sigaction __user *, oact,
4234 struct k_sigaction new_sa, old_sa;
4237 /* XXX: Don't preclude handling different sized sigset_t's. */
4238 if (sigsetsize != sizeof(sigset_t))
4241 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4244 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
4248 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4253 #ifdef CONFIG_COMPAT
4254 COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4255 const struct compat_sigaction __user *, act,
4256 struct compat_sigaction __user *, oact,
4257 compat_size_t, sigsetsize)
4259 struct k_sigaction new_ka, old_ka;
4260 #ifdef __ARCH_HAS_SA_RESTORER
4261 compat_uptr_t restorer;
4265 /* XXX: Don't preclude handling different sized sigset_t's. */
4266 if (sigsetsize != sizeof(compat_sigset_t))
4270 compat_uptr_t handler;
4271 ret = get_user(handler, &act->sa_handler);
4272 new_ka.sa.sa_handler = compat_ptr(handler);
4273 #ifdef __ARCH_HAS_SA_RESTORER
4274 ret |= get_user(restorer, &act->sa_restorer);
4275 new_ka.sa.sa_restorer = compat_ptr(restorer);
4277 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
4278 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
4283 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4285 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4287 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4288 sizeof(oact->sa_mask));
4289 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
4290 #ifdef __ARCH_HAS_SA_RESTORER
4291 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4292 &oact->sa_restorer);
4298 #endif /* !CONFIG_ODD_RT_SIGACTION */
4300 #ifdef CONFIG_OLD_SIGACTION
4301 SYSCALL_DEFINE3(sigaction, int, sig,
4302 const struct old_sigaction __user *, act,
4303 struct old_sigaction __user *, oact)
4305 struct k_sigaction new_ka, old_ka;
4310 if (!access_ok(act, sizeof(*act)) ||
4311 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4312 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4313 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4314 __get_user(mask, &act->sa_mask))
4316 #ifdef __ARCH_HAS_KA_RESTORER
4317 new_ka.ka_restorer = NULL;
4319 siginitset(&new_ka.sa.sa_mask, mask);
4322 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4325 if (!access_ok(oact, sizeof(*oact)) ||
4326 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4327 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4328 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4329 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4336 #ifdef CONFIG_COMPAT_OLD_SIGACTION
4337 COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4338 const struct compat_old_sigaction __user *, act,
4339 struct compat_old_sigaction __user *, oact)
4341 struct k_sigaction new_ka, old_ka;
4343 compat_old_sigset_t mask;
4344 compat_uptr_t handler, restorer;
4347 if (!access_ok(act, sizeof(*act)) ||
4348 __get_user(handler, &act->sa_handler) ||
4349 __get_user(restorer, &act->sa_restorer) ||
4350 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4351 __get_user(mask, &act->sa_mask))
4354 #ifdef __ARCH_HAS_KA_RESTORER
4355 new_ka.ka_restorer = NULL;
4357 new_ka.sa.sa_handler = compat_ptr(handler);
4358 new_ka.sa.sa_restorer = compat_ptr(restorer);
4359 siginitset(&new_ka.sa.sa_mask, mask);
4362 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4365 if (!access_ok(oact, sizeof(*oact)) ||
4366 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4367 &oact->sa_handler) ||
4368 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4369 &oact->sa_restorer) ||
4370 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4371 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4378 #ifdef CONFIG_SGETMASK_SYSCALL
4381 * For backwards compatibility. Functionality superseded by sigprocmask.
4383 SYSCALL_DEFINE0(sgetmask)
4386 return current->blocked.sig[0];
4389 SYSCALL_DEFINE1(ssetmask, int, newmask)
4391 int old = current->blocked.sig[0];
4394 siginitset(&newset, newmask);
4395 set_current_blocked(&newset);
4399 #endif /* CONFIG_SGETMASK_SYSCALL */
4401 #ifdef __ARCH_WANT_SYS_SIGNAL
4403 * For backwards compatibility. Functionality superseded by sigaction.
4405 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
4407 struct k_sigaction new_sa, old_sa;
4410 new_sa.sa.sa_handler = handler;
4411 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
4412 sigemptyset(&new_sa.sa.sa_mask);
4414 ret = do_sigaction(sig, &new_sa, &old_sa);
4416 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4418 #endif /* __ARCH_WANT_SYS_SIGNAL */
4420 #ifdef __ARCH_WANT_SYS_PAUSE
4422 SYSCALL_DEFINE0(pause)
4424 while (!signal_pending(current)) {
4425 __set_current_state(TASK_INTERRUPTIBLE);
4428 return -ERESTARTNOHAND;
4433 static int sigsuspend(sigset_t *set)
4435 current->saved_sigmask = current->blocked;
4436 set_current_blocked(set);
4438 while (!signal_pending(current)) {
4439 __set_current_state(TASK_INTERRUPTIBLE);
4442 set_restore_sigmask();
4443 return -ERESTARTNOHAND;
4447 * sys_rt_sigsuspend - replace the signal mask for a value with the
4448 * @unewset value until a signal is received
4449 * @unewset: new signal mask value
4450 * @sigsetsize: size of sigset_t type
4452 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
4456 /* XXX: Don't preclude handling different sized sigset_t's. */
4457 if (sigsetsize != sizeof(sigset_t))
4460 if (copy_from_user(&newset, unewset, sizeof(newset)))
4462 return sigsuspend(&newset);
4465 #ifdef CONFIG_COMPAT
4466 COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4470 /* XXX: Don't preclude handling different sized sigset_t's. */
4471 if (sigsetsize != sizeof(sigset_t))
4474 if (get_compat_sigset(&newset, unewset))
4476 return sigsuspend(&newset);
4480 #ifdef CONFIG_OLD_SIGSUSPEND
4481 SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4484 siginitset(&blocked, mask);
4485 return sigsuspend(&blocked);
4488 #ifdef CONFIG_OLD_SIGSUSPEND3
4489 SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4492 siginitset(&blocked, mask);
4493 return sigsuspend(&blocked);
4497 __weak const char *arch_vma_name(struct vm_area_struct *vma)
4502 static inline void siginfo_buildtime_checks(void)
4504 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
4506 /* Verify the offsets in the two siginfos match */
4507 #define CHECK_OFFSET(field) \
4508 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4511 CHECK_OFFSET(si_pid);
4512 CHECK_OFFSET(si_uid);
4515 CHECK_OFFSET(si_tid);
4516 CHECK_OFFSET(si_overrun);
4517 CHECK_OFFSET(si_value);
4520 CHECK_OFFSET(si_pid);
4521 CHECK_OFFSET(si_uid);
4522 CHECK_OFFSET(si_value);
4525 CHECK_OFFSET(si_pid);
4526 CHECK_OFFSET(si_uid);
4527 CHECK_OFFSET(si_status);
4528 CHECK_OFFSET(si_utime);
4529 CHECK_OFFSET(si_stime);
4532 CHECK_OFFSET(si_addr);
4533 CHECK_OFFSET(si_addr_lsb);
4534 CHECK_OFFSET(si_lower);
4535 CHECK_OFFSET(si_upper);
4536 CHECK_OFFSET(si_pkey);
4539 CHECK_OFFSET(si_band);
4540 CHECK_OFFSET(si_fd);
4543 CHECK_OFFSET(si_call_addr);
4544 CHECK_OFFSET(si_syscall);
4545 CHECK_OFFSET(si_arch);
4549 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4550 offsetof(struct siginfo, si_addr));
4551 if (sizeof(int) == sizeof(void __user *)) {
4552 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4553 sizeof(void __user *));
4555 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4556 sizeof_field(struct siginfo, si_uid)) !=
4557 sizeof(void __user *));
4558 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4559 offsetof(struct siginfo, si_uid));
4561 #ifdef CONFIG_COMPAT
4562 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4563 offsetof(struct compat_siginfo, si_addr));
4564 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4565 sizeof(compat_uptr_t));
4566 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4567 sizeof_field(struct siginfo, si_pid));
4571 void __init signals_init(void)
4573 siginfo_buildtime_checks();
4575 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
4578 #ifdef CONFIG_KGDB_KDB
4579 #include <linux/kdb.h>
4581 * kdb_send_sig - Allows kdb to send signals without exposing
4582 * signal internals. This function checks if the required locks are
4583 * available before calling the main signal code, to avoid kdb
4586 void kdb_send_sig(struct task_struct *t, int sig)
4588 static struct task_struct *kdb_prev_t;
4590 if (!spin_trylock(&t->sighand->siglock)) {
4591 kdb_printf("Can't do kill command now.\n"
4592 "The sigmask lock is held somewhere else in "
4593 "kernel, try again later\n");
4596 new_t = kdb_prev_t != t;
4598 if (t->state != TASK_RUNNING && new_t) {
4599 spin_unlock(&t->sighand->siglock);
4600 kdb_printf("Process is not RUNNING, sending a signal from "
4601 "kdb risks deadlock\n"
4602 "on the run queue locks. "
4603 "The signal has _not_ been sent.\n"
4604 "Reissue the kill command if you want to risk "
4608 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
4609 spin_unlock(&t->sighand->siglock);
4611 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4614 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4616 #endif /* CONFIG_KGDB_KDB */