signal: Implement force_fatal_sig
[linux-2.6-block.git] / kernel / signal.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
1da177e4 14#include <linux/slab.h>
9984de1a 15#include <linux/export.h>
1da177e4 16#include <linux/init.h>
589ee628 17#include <linux/sched/mm.h>
8703e8a4 18#include <linux/sched/user.h>
b17b0153 19#include <linux/sched/debug.h>
29930025 20#include <linux/sched/task.h>
68db0cf1 21#include <linux/sched/task_stack.h>
32ef5517 22#include <linux/sched/cputime.h>
3eb39f47 23#include <linux/file.h>
1da177e4 24#include <linux/fs.h>
3eb39f47 25#include <linux/proc_fs.h>
1da177e4
LT
26#include <linux/tty.h>
27#include <linux/binfmts.h>
179899fd 28#include <linux/coredump.h>
1da177e4
LT
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
7ed20e1a 32#include <linux/signal.h>
fba2afaa 33#include <linux/signalfd.h>
f84d49b2 34#include <linux/ratelimit.h>
35de254d 35#include <linux/tracehook.h>
c59ede7b 36#include <linux/capability.h>
7dfb7103 37#include <linux/freezer.h>
84d73786
SB
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
6b550f94 40#include <linux/user_namespace.h>
0326f5a9 41#include <linux/uprobes.h>
90268439 42#include <linux/compat.h>
2b5faa4c 43#include <linux/cn_proc.h>
52f5684c 44#include <linux/compiler.h>
31ea70e0 45#include <linux/posix-timers.h>
76f969e8 46#include <linux/cgroup.h>
b48345aa 47#include <linux/audit.h>
52f5684c 48
d1eb650f
MH
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
84d73786 51
1da177e4 52#include <asm/param.h>
7c0f6ba6 53#include <linux/uaccess.h>
1da177e4
LT
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
d550bbd4 56#include <asm/cacheflush.h>
307d522f 57#include <asm/syscall.h> /* for syscall_get_* */
1da177e4
LT
58
59/*
60 * SLAB caches for signal bits.
61 */
62
e18b890b 63static struct kmem_cache *sigqueue_cachep;
1da177e4 64
f84d49b2
NO
65int print_fatal_signals __read_mostly;
66
35de254d 67static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 68{
35de254d
RM
69 return t->sighand->action[sig - 1].sa.sa_handler;
70}
93585eea 71
e4a8b4ef 72static inline bool sig_handler_ignored(void __user *handler, int sig)
35de254d 73{
93585eea 74 /* Is it explicitly or implicitly ignored? */
93585eea 75 return handler == SIG_IGN ||
e4a8b4ef 76 (handler == SIG_DFL && sig_kernel_ignore(sig));
93585eea 77}
1da177e4 78
41aaa481 79static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
1da177e4 80{
35de254d 81 void __user *handler;
1da177e4 82
f008faff
ON
83 handler = sig_handler(t, sig);
84
86989c41
EB
85 /* SIGKILL and SIGSTOP may not be sent to the global init */
86 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
87 return true;
88
f008faff 89 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
ac253850 90 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
41aaa481 91 return true;
f008faff 92
33da8e7c 93 /* Only allow kernel generated signals to this kthread */
e8b33b8c 94 if (unlikely((t->flags & PF_KTHREAD) &&
33da8e7c
EB
95 (handler == SIG_KTHREAD_KERNEL) && !force))
96 return true;
97
f008faff
ON
98 return sig_handler_ignored(handler, sig);
99}
100
6a0cdcd7 101static bool sig_ignored(struct task_struct *t, int sig, bool force)
f008faff 102{
1da177e4
LT
103 /*
104 * Blocked signals are never ignored, since the
105 * signal handler may change by the time it is
106 * unblocked.
107 */
325d22df 108 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
6a0cdcd7 109 return false;
1da177e4 110
35de254d 111 /*
628c1bcb
ON
112 * Tracers may want to know about even ignored signal unless it
113 * is SIGKILL which can't be reported anyway but can be ignored
114 * by SIGNAL_UNKILLABLE task.
35de254d 115 */
628c1bcb 116 if (t->ptrace && sig != SIGKILL)
6a0cdcd7 117 return false;
628c1bcb
ON
118
119 return sig_task_ignored(t, sig, force);
1da177e4
LT
120}
121
122/*
123 * Re-calculate pending state from the set of locally pending
124 * signals, globally pending signals, and blocked signals.
125 */
938696a8 126static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
1da177e4
LT
127{
128 unsigned long ready;
129 long i;
130
131 switch (_NSIG_WORDS) {
132 default:
133 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
134 ready |= signal->sig[i] &~ blocked->sig[i];
135 break;
136
137 case 4: ready = signal->sig[3] &~ blocked->sig[3];
138 ready |= signal->sig[2] &~ blocked->sig[2];
139 ready |= signal->sig[1] &~ blocked->sig[1];
140 ready |= signal->sig[0] &~ blocked->sig[0];
141 break;
142
143 case 2: ready = signal->sig[1] &~ blocked->sig[1];
144 ready |= signal->sig[0] &~ blocked->sig[0];
145 break;
146
147 case 1: ready = signal->sig[0] &~ blocked->sig[0];
148 }
149 return ready != 0;
150}
151
152#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
153
09ae854e 154static bool recalc_sigpending_tsk(struct task_struct *t)
1da177e4 155{
76f969e8 156 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
1da177e4 157 PENDING(&t->pending, &t->blocked) ||
76f969e8
RG
158 PENDING(&t->signal->shared_pending, &t->blocked) ||
159 cgroup_task_frozen(t)) {
1da177e4 160 set_tsk_thread_flag(t, TIF_SIGPENDING);
09ae854e 161 return true;
7bb44ade 162 }
09ae854e 163
b74d0deb
RM
164 /*
165 * We must never clear the flag in another thread, or in current
166 * when it's possible the current syscall is returning -ERESTART*.
167 * So we don't clear it here, and only callers who know they should do.
168 */
09ae854e 169 return false;
7bb44ade
RM
170}
171
172/*
173 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
174 * This is superfluous when called on current, the wakeup is a harmless no-op.
175 */
176void recalc_sigpending_and_wake(struct task_struct *t)
177{
178 if (recalc_sigpending_tsk(t))
179 signal_wake_up(t, 0);
1da177e4
LT
180}
181
182void recalc_sigpending(void)
183{
8df1947c 184 if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
185 clear_thread_flag(TIF_SIGPENDING);
186
1da177e4 187}
fb50f5a4 188EXPORT_SYMBOL(recalc_sigpending);
1da177e4 189
088fe47c
EB
190void calculate_sigpending(void)
191{
192 /* Have any signals or users of TIF_SIGPENDING been delayed
193 * until after fork?
194 */
195 spin_lock_irq(&current->sighand->siglock);
196 set_tsk_thread_flag(current, TIF_SIGPENDING);
197 recalc_sigpending();
198 spin_unlock_irq(&current->sighand->siglock);
199}
200
1da177e4
LT
201/* Given the mask, find the first available signal that should be serviced. */
202
a27341cd
LT
203#define SYNCHRONOUS_MASK \
204 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
a0727e8c 205 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
a27341cd 206
fba2afaa 207int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
208{
209 unsigned long i, *s, *m, x;
210 int sig = 0;
f84d49b2 211
1da177e4
LT
212 s = pending->signal.sig;
213 m = mask->sig;
a27341cd
LT
214
215 /*
216 * Handle the first word specially: it contains the
217 * synchronous signals that need to be dequeued first.
218 */
219 x = *s &~ *m;
220 if (x) {
221 if (x & SYNCHRONOUS_MASK)
222 x &= SYNCHRONOUS_MASK;
223 sig = ffz(~x) + 1;
224 return sig;
225 }
226
1da177e4
LT
227 switch (_NSIG_WORDS) {
228 default:
a27341cd
LT
229 for (i = 1; i < _NSIG_WORDS; ++i) {
230 x = *++s &~ *++m;
231 if (!x)
232 continue;
233 sig = ffz(~x) + i*_NSIG_BPW + 1;
234 break;
235 }
1da177e4
LT
236 break;
237
a27341cd
LT
238 case 2:
239 x = s[1] &~ m[1];
240 if (!x)
1da177e4 241 break;
a27341cd 242 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
243 break;
244
a27341cd
LT
245 case 1:
246 /* Nothing to do */
1da177e4
LT
247 break;
248 }
f84d49b2 249
1da177e4
LT
250 return sig;
251}
252
f84d49b2
NO
253static inline void print_dropped_signal(int sig)
254{
255 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
256
257 if (!print_fatal_signals)
258 return;
259
260 if (!__ratelimit(&ratelimit_state))
261 return;
262
747800ef 263 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
f84d49b2
NO
264 current->comm, current->pid, sig);
265}
266
d79fdd6d 267/**
7dd3db54 268 * task_set_jobctl_pending - set jobctl pending bits
d79fdd6d 269 * @task: target task
7dd3db54 270 * @mask: pending bits to set
d79fdd6d 271 *
7dd3db54
TH
272 * Clear @mask from @task->jobctl. @mask must be subset of
273 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
274 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
275 * cleared. If @task is already being killed or exiting, this function
276 * becomes noop.
277 *
278 * CONTEXT:
279 * Must be called with @task->sighand->siglock held.
280 *
281 * RETURNS:
282 * %true if @mask is set, %false if made noop because @task was dying.
283 */
b76808e6 284bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
7dd3db54
TH
285{
286 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
287 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
288 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
289
1e4cf0d3 290 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
7dd3db54
TH
291 return false;
292
293 if (mask & JOBCTL_STOP_SIGMASK)
294 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
295
296 task->jobctl |= mask;
297 return true;
298}
299
d79fdd6d 300/**
a8f072c1 301 * task_clear_jobctl_trapping - clear jobctl trapping bit
d79fdd6d
TH
302 * @task: target task
303 *
a8f072c1
TH
304 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
305 * Clear it and wake up the ptracer. Note that we don't need any further
306 * locking. @task->siglock guarantees that @task->parent points to the
307 * ptracer.
d79fdd6d
TH
308 *
309 * CONTEXT:
310 * Must be called with @task->sighand->siglock held.
311 */
73ddff2b 312void task_clear_jobctl_trapping(struct task_struct *task)
d79fdd6d 313{
a8f072c1
TH
314 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
315 task->jobctl &= ~JOBCTL_TRAPPING;
650226bd 316 smp_mb(); /* advised by wake_up_bit() */
62c124ff 317 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
d79fdd6d
TH
318 }
319}
320
e5c1902e 321/**
3759a0d9 322 * task_clear_jobctl_pending - clear jobctl pending bits
e5c1902e 323 * @task: target task
3759a0d9 324 * @mask: pending bits to clear
e5c1902e 325 *
3759a0d9
TH
326 * Clear @mask from @task->jobctl. @mask must be subset of
327 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
328 * STOP bits are cleared together.
e5c1902e 329 *
6dfca329
TH
330 * If clearing of @mask leaves no stop or trap pending, this function calls
331 * task_clear_jobctl_trapping().
e5c1902e
TH
332 *
333 * CONTEXT:
334 * Must be called with @task->sighand->siglock held.
335 */
b76808e6 336void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
e5c1902e 337{
3759a0d9
TH
338 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
339
340 if (mask & JOBCTL_STOP_PENDING)
341 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
342
343 task->jobctl &= ~mask;
6dfca329
TH
344
345 if (!(task->jobctl & JOBCTL_PENDING_MASK))
346 task_clear_jobctl_trapping(task);
e5c1902e
TH
347}
348
349/**
350 * task_participate_group_stop - participate in a group stop
351 * @task: task participating in a group stop
352 *
a8f072c1 353 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
39efa3ef 354 * Group stop states are cleared and the group stop count is consumed if
a8f072c1 355 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
68d8681e 356 * stop, the appropriate `SIGNAL_*` flags are set.
e5c1902e
TH
357 *
358 * CONTEXT:
359 * Must be called with @task->sighand->siglock held.
244056f9
TH
360 *
361 * RETURNS:
362 * %true if group stop completion should be notified to the parent, %false
363 * otherwise.
e5c1902e
TH
364 */
365static bool task_participate_group_stop(struct task_struct *task)
366{
367 struct signal_struct *sig = task->signal;
a8f072c1 368 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
e5c1902e 369
a8f072c1 370 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
39efa3ef 371
3759a0d9 372 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
e5c1902e
TH
373
374 if (!consume)
375 return false;
376
377 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
378 sig->group_stop_count--;
379
244056f9
TH
380 /*
381 * Tell the caller to notify completion iff we are entering into a
382 * fresh group stop. Read comment in do_signal_stop() for details.
383 */
384 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2d39b3cd 385 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
e5c1902e
TH
386 return true;
387 }
388 return false;
389}
390
924de3b8
EB
391void task_join_group_stop(struct task_struct *task)
392{
7b3c36fc
ON
393 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
394 struct signal_struct *sig = current->signal;
395
396 if (sig->group_stop_count) {
397 sig->group_stop_count++;
398 mask |= JOBCTL_STOP_CONSUME;
399 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
400 return;
401
924de3b8 402 /* Have the new thread join an on-going signal group stop */
7b3c36fc 403 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
924de3b8
EB
404}
405
c69e8d9c
DH
406/*
407 * allocate a new signal queue record
408 * - this may be called without locks if and only if t == current, otherwise an
5aba085e 409 * appropriate lock must be held to stop the target task from exiting
c69e8d9c 410 */
f84d49b2 411static struct sigqueue *
69995ebb
TG
412__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
413 int override_rlimit, const unsigned int sigqueue_flags)
1da177e4
LT
414{
415 struct sigqueue *q = NULL;
d6469690
AG
416 struct ucounts *ucounts = NULL;
417 long sigpending;
1da177e4 418
10b1fbdb 419 /*
7cf7db8d
TG
420 * Protect access to @t credentials. This can go away when all
421 * callers hold rcu read lock.
fda31c50
LT
422 *
423 * NOTE! A pending signal will hold on to the user refcount,
424 * and we get/put the refcount only when the sigpending count
425 * changes from/to zero.
10b1fbdb 426 */
7cf7db8d 427 rcu_read_lock();
d6469690
AG
428 ucounts = task_ucounts(t);
429 sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
f3791f4d
AG
430 switch (sigpending) {
431 case 1:
432 if (likely(get_ucounts(ucounts)))
433 break;
434 fallthrough;
435 case LONG_MAX:
436 /*
437 * we need to decrease the ucount in the userns tree on any
438 * failure to avoid counts leaking.
439 */
440 dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1);
441 rcu_read_unlock();
442 return NULL;
443 }
7cf7db8d 444 rcu_read_unlock();
f84d49b2 445
f3791f4d 446 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
b4b27b9e 447 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
f84d49b2
NO
448 } else {
449 print_dropped_signal(sig);
450 }
451
1da177e4 452 if (unlikely(q == NULL)) {
f3791f4d 453 if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1))
d6469690 454 put_ucounts(ucounts);
1da177e4
LT
455 } else {
456 INIT_LIST_HEAD(&q->list);
69995ebb 457 q->flags = sigqueue_flags;
d6469690 458 q->ucounts = ucounts;
1da177e4 459 }
d84f4f99 460 return q;
1da177e4
LT
461}
462
514a01b8 463static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
464{
465 if (q->flags & SIGQUEUE_PREALLOC)
466 return;
d6469690
AG
467 if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) {
468 put_ucounts(q->ucounts);
469 q->ucounts = NULL;
470 }
b4b27b9e 471 kmem_cache_free(sigqueue_cachep, q);
1da177e4
LT
472}
473
6a14c5c9 474void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
475{
476 struct sigqueue *q;
477
478 sigemptyset(&queue->signal);
479 while (!list_empty(&queue->list)) {
480 q = list_entry(queue->list.next, struct sigqueue , list);
481 list_del_init(&q->list);
482 __sigqueue_free(q);
483 }
484}
485
486/*
9e7c8f8c 487 * Flush all pending signals for this kthread.
1da177e4 488 */
c81addc9 489void flush_signals(struct task_struct *t)
1da177e4
LT
490{
491 unsigned long flags;
492
493 spin_lock_irqsave(&t->sighand->siglock, flags);
9e7c8f8c
ON
494 clear_tsk_thread_flag(t, TIF_SIGPENDING);
495 flush_sigqueue(&t->pending);
496 flush_sigqueue(&t->signal->shared_pending);
1da177e4
LT
497 spin_unlock_irqrestore(&t->sighand->siglock, flags);
498}
fb50f5a4 499EXPORT_SYMBOL(flush_signals);
1da177e4 500
baa73d9e 501#ifdef CONFIG_POSIX_TIMERS
cbaffba1
ON
502static void __flush_itimer_signals(struct sigpending *pending)
503{
504 sigset_t signal, retain;
505 struct sigqueue *q, *n;
506
507 signal = pending->signal;
508 sigemptyset(&retain);
509
510 list_for_each_entry_safe(q, n, &pending->list, list) {
511 int sig = q->info.si_signo;
512
513 if (likely(q->info.si_code != SI_TIMER)) {
514 sigaddset(&retain, sig);
515 } else {
516 sigdelset(&signal, sig);
517 list_del_init(&q->list);
518 __sigqueue_free(q);
519 }
520 }
521
522 sigorsets(&pending->signal, &signal, &retain);
523}
524
525void flush_itimer_signals(void)
526{
527 struct task_struct *tsk = current;
528 unsigned long flags;
529
530 spin_lock_irqsave(&tsk->sighand->siglock, flags);
531 __flush_itimer_signals(&tsk->pending);
532 __flush_itimer_signals(&tsk->signal->shared_pending);
533 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
534}
baa73d9e 535#endif
cbaffba1 536
10ab825b
ON
537void ignore_signals(struct task_struct *t)
538{
539 int i;
540
541 for (i = 0; i < _NSIG; ++i)
542 t->sighand->action[i].sa.sa_handler = SIG_IGN;
543
544 flush_signals(t);
545}
546
1da177e4
LT
547/*
548 * Flush all handlers for a task.
549 */
550
551void
552flush_signal_handlers(struct task_struct *t, int force_default)
553{
554 int i;
555 struct k_sigaction *ka = &t->sighand->action[0];
556 for (i = _NSIG ; i != 0 ; i--) {
557 if (force_default || ka->sa.sa_handler != SIG_IGN)
558 ka->sa.sa_handler = SIG_DFL;
559 ka->sa.sa_flags = 0;
522cff14 560#ifdef __ARCH_HAS_SA_RESTORER
2ca39528
KC
561 ka->sa.sa_restorer = NULL;
562#endif
1da177e4
LT
563 sigemptyset(&ka->sa.sa_mask);
564 ka++;
565 }
566}
567
67a48a24 568bool unhandled_signal(struct task_struct *tsk, int sig)
abd4f750 569{
445a91d2 570 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 571 if (is_global_init(tsk))
67a48a24
CB
572 return true;
573
445a91d2 574 if (handler != SIG_IGN && handler != SIG_DFL)
67a48a24
CB
575 return false;
576
a288eecc
TH
577 /* if ptraced, let the tracer determine */
578 return !tsk->ptrace;
abd4f750
MAS
579}
580
ae7795bc 581static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
57db7e4a 582 bool *resched_timer)
1da177e4
LT
583{
584 struct sigqueue *q, *first = NULL;
1da177e4 585
1da177e4
LT
586 /*
587 * Collect the siginfo appropriate to this signal. Check if
588 * there is another siginfo for the same signal.
589 */
590 list_for_each_entry(q, &list->list, list) {
591 if (q->info.si_signo == sig) {
d4434207
ON
592 if (first)
593 goto still_pending;
1da177e4
LT
594 first = q;
595 }
596 }
d4434207
ON
597
598 sigdelset(&list->signal, sig);
599
1da177e4 600 if (first) {
d4434207 601still_pending:
1da177e4
LT
602 list_del_init(&first->list);
603 copy_siginfo(info, &first->info);
57db7e4a
EB
604
605 *resched_timer =
606 (first->flags & SIGQUEUE_PREALLOC) &&
607 (info->si_code == SI_TIMER) &&
608 (info->si_sys_private);
609
1da177e4 610 __sigqueue_free(first);
1da177e4 611 } else {
5aba085e
RD
612 /*
613 * Ok, it wasn't in the queue. This must be
614 * a fast-pathed signal or we must have been
615 * out of queue space. So zero out the info.
1da177e4 616 */
faf1f22b 617 clear_siginfo(info);
1da177e4
LT
618 info->si_signo = sig;
619 info->si_errno = 0;
7486e5d9 620 info->si_code = SI_USER;
1da177e4
LT
621 info->si_pid = 0;
622 info->si_uid = 0;
623 }
1da177e4
LT
624}
625
626static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
ae7795bc 627 kernel_siginfo_t *info, bool *resched_timer)
1da177e4 628{
27d91e07 629 int sig = next_signal(pending, mask);
1da177e4 630
2e01fabe 631 if (sig)
57db7e4a 632 collect_signal(sig, pending, info, resched_timer);
1da177e4
LT
633 return sig;
634}
635
636/*
5aba085e 637 * Dequeue a signal and return the element to the caller, which is
1da177e4
LT
638 * expected to free it.
639 *
640 * All callers have to hold the siglock.
641 */
ae7795bc 642int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
1da177e4 643{
57db7e4a 644 bool resched_timer = false;
c5363d03 645 int signr;
caec4e8d
BH
646
647 /* We only dequeue private signals from ourselves, we don't let
648 * signalfd steal them
649 */
57db7e4a 650 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
8bfd9a7a 651 if (!signr) {
1da177e4 652 signr = __dequeue_signal(&tsk->signal->shared_pending,
57db7e4a 653 mask, info, &resched_timer);
baa73d9e 654#ifdef CONFIG_POSIX_TIMERS
8bfd9a7a
TG
655 /*
656 * itimer signal ?
657 *
658 * itimers are process shared and we restart periodic
659 * itimers in the signal delivery path to prevent DoS
660 * attacks in the high resolution timer case. This is
5aba085e 661 * compliant with the old way of self-restarting
8bfd9a7a
TG
662 * itimers, as the SIGALRM is a legacy signal and only
663 * queued once. Changing the restart behaviour to
664 * restart the timer in the signal dequeue path is
665 * reducing the timer noise on heavy loaded !highres
666 * systems too.
667 */
668 if (unlikely(signr == SIGALRM)) {
669 struct hrtimer *tmr = &tsk->signal->real_timer;
670
671 if (!hrtimer_is_queued(tmr) &&
2456e855 672 tsk->signal->it_real_incr != 0) {
8bfd9a7a
TG
673 hrtimer_forward(tmr, tmr->base->get_time(),
674 tsk->signal->it_real_incr);
675 hrtimer_restart(tmr);
676 }
677 }
baa73d9e 678#endif
8bfd9a7a 679 }
c5363d03 680
b8fceee1 681 recalc_sigpending();
c5363d03
PE
682 if (!signr)
683 return 0;
684
685 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
686 /*
687 * Set a marker that we have dequeued a stop signal. Our
688 * caller might release the siglock and then the pending
689 * stop signal it is about to process is no longer in the
690 * pending bitmasks, but must still be cleared by a SIGCONT
691 * (and overruled by a SIGKILL). So those cases clear this
692 * shared flag after we've set it. Note that this flag may
693 * remain set after the signal we return is ignored or
694 * handled. That doesn't matter because its only purpose
695 * is to alert stop-signal processing code when another
696 * processor has come along and cleared the flag.
697 */
a8f072c1 698 current->jobctl |= JOBCTL_STOP_DEQUEUED;
8bfd9a7a 699 }
baa73d9e 700#ifdef CONFIG_POSIX_TIMERS
57db7e4a 701 if (resched_timer) {
1da177e4
LT
702 /*
703 * Release the siglock to ensure proper locking order
704 * of timer locks outside of siglocks. Note, we leave
705 * irqs disabled here, since the posix-timers code is
706 * about to disable them again anyway.
707 */
708 spin_unlock(&tsk->sighand->siglock);
96fe3b07 709 posixtimer_rearm(info);
1da177e4 710 spin_lock(&tsk->sighand->siglock);
9943d3ac
EB
711
712 /* Don't expose the si_sys_private value to userspace */
713 info->si_sys_private = 0;
1da177e4 714 }
baa73d9e 715#endif
1da177e4
LT
716 return signr;
717}
fb50f5a4 718EXPORT_SYMBOL_GPL(dequeue_signal);
1da177e4 719
7146db33
EB
720static int dequeue_synchronous_signal(kernel_siginfo_t *info)
721{
722 struct task_struct *tsk = current;
723 struct sigpending *pending = &tsk->pending;
724 struct sigqueue *q, *sync = NULL;
725
726 /*
727 * Might a synchronous signal be in the queue?
728 */
729 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
730 return 0;
731
732 /*
733 * Return the first synchronous signal in the queue.
734 */
735 list_for_each_entry(q, &pending->list, list) {
7665a47f 736 /* Synchronous signals have a positive si_code */
7146db33
EB
737 if ((q->info.si_code > SI_USER) &&
738 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
739 sync = q;
740 goto next;
741 }
742 }
743 return 0;
744next:
745 /*
746 * Check if there is another siginfo for the same signal.
747 */
748 list_for_each_entry_continue(q, &pending->list, list) {
749 if (q->info.si_signo == sync->info.si_signo)
750 goto still_pending;
751 }
752
753 sigdelset(&pending->signal, sync->info.si_signo);
754 recalc_sigpending();
755still_pending:
756 list_del_init(&sync->list);
757 copy_siginfo(info, &sync->info);
758 __sigqueue_free(sync);
759 return info->si_signo;
760}
761
1da177e4
LT
762/*
763 * Tell a process that it has a new active signal..
764 *
765 * NOTE! we rely on the previous spin_lock to
766 * lock interrupts for us! We can only be called with
767 * "siglock" held, and the local interrupt must
768 * have been disabled when that got acquired!
769 *
770 * No need to set need_resched since signal event passing
771 * goes through ->blocked
772 */
910ffdb1 773void signal_wake_up_state(struct task_struct *t, unsigned int state)
1da177e4 774{
1da177e4 775 set_tsk_thread_flag(t, TIF_SIGPENDING);
1da177e4 776 /*
910ffdb1 777 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
f021a3c2 778 * case. We don't check t->state here because there is a race with it
1da177e4
LT
779 * executing another processor and just now entering stopped state.
780 * By using wake_up_state, we ensure the process will wake up and
781 * handle its death signal.
782 */
910ffdb1 783 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
1da177e4
LT
784 kick_process(t);
785}
786
71fabd5e
GA
787/*
788 * Remove signals in mask from the pending set and queue.
789 * Returns 1 if any signals were found.
790 *
791 * All callers must be holding the siglock.
71fabd5e 792 */
8f11351e 793static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
71fabd5e
GA
794{
795 struct sigqueue *q, *n;
796 sigset_t m;
797
798 sigandsets(&m, mask, &s->signal);
799 if (sigisemptyset(&m))
8f11351e 800 return;
71fabd5e 801
702a5073 802 sigandnsets(&s->signal, &s->signal, mask);
71fabd5e
GA
803 list_for_each_entry_safe(q, n, &s->list, list) {
804 if (sigismember(mask, q->info.si_signo)) {
805 list_del_init(&q->list);
806 __sigqueue_free(q);
807 }
808 }
71fabd5e 809}
1da177e4 810
ae7795bc 811static inline int is_si_special(const struct kernel_siginfo *info)
614c517d 812{
4ff4c31a 813 return info <= SEND_SIG_PRIV;
614c517d
ON
814}
815
ae7795bc 816static inline bool si_fromuser(const struct kernel_siginfo *info)
614c517d
ON
817{
818 return info == SEND_SIG_NOINFO ||
819 (!is_si_special(info) && SI_FROMUSER(info));
820}
821
39fd3393
SH
822/*
823 * called with RCU read lock from check_kill_permission()
824 */
2a9b9094 825static bool kill_ok_by_cred(struct task_struct *t)
39fd3393
SH
826{
827 const struct cred *cred = current_cred();
828 const struct cred *tcred = __task_cred(t);
829
2a9b9094
CB
830 return uid_eq(cred->euid, tcred->suid) ||
831 uid_eq(cred->euid, tcred->uid) ||
832 uid_eq(cred->uid, tcred->suid) ||
833 uid_eq(cred->uid, tcred->uid) ||
834 ns_capable(tcred->user_ns, CAP_KILL);
39fd3393
SH
835}
836
1da177e4
LT
837/*
838 * Bad permissions for sending the signal
694f690d 839 * - the caller must hold the RCU read lock
1da177e4 840 */
ae7795bc 841static int check_kill_permission(int sig, struct kernel_siginfo *info,
1da177e4
LT
842 struct task_struct *t)
843{
2e2ba22e 844 struct pid *sid;
3b5e9e53
ON
845 int error;
846
7ed20e1a 847 if (!valid_signal(sig))
3b5e9e53
ON
848 return -EINVAL;
849
614c517d 850 if (!si_fromuser(info))
3b5e9e53 851 return 0;
e54dc243 852
3b5e9e53
ON
853 error = audit_signal_info(sig, t); /* Let audit system see the signal */
854 if (error)
1da177e4 855 return error;
3b5e9e53 856
065add39 857 if (!same_thread_group(current, t) &&
39fd3393 858 !kill_ok_by_cred(t)) {
2e2ba22e
ON
859 switch (sig) {
860 case SIGCONT:
2e2ba22e 861 sid = task_session(t);
2e2ba22e
ON
862 /*
863 * We don't return the error if sid == NULL. The
864 * task was unhashed, the caller must notice this.
865 */
866 if (!sid || sid == task_session(current))
867 break;
df561f66 868 fallthrough;
2e2ba22e
ON
869 default:
870 return -EPERM;
871 }
872 }
c2f0c7c3 873
6b4f3d01 874 return security_task_kill(t, info, sig, NULL);
1da177e4
LT
875}
876
fb1d910c
TH
877/**
878 * ptrace_trap_notify - schedule trap to notify ptracer
879 * @t: tracee wanting to notify tracer
880 *
881 * This function schedules sticky ptrace trap which is cleared on the next
882 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
883 * ptracer.
884 *
544b2c91
TH
885 * If @t is running, STOP trap will be taken. If trapped for STOP and
886 * ptracer is listening for events, tracee is woken up so that it can
887 * re-trap for the new event. If trapped otherwise, STOP trap will be
888 * eventually taken without returning to userland after the existing traps
889 * are finished by PTRACE_CONT.
fb1d910c
TH
890 *
891 * CONTEXT:
892 * Must be called with @task->sighand->siglock held.
893 */
894static void ptrace_trap_notify(struct task_struct *t)
895{
896 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
897 assert_spin_locked(&t->sighand->siglock);
898
899 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
910ffdb1 900 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
fb1d910c
TH
901}
902
1da177e4 903/*
7e695a5e
ON
904 * Handle magic process-wide effects of stop/continue signals. Unlike
905 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
906 * time regardless of blocking, ignoring, or handling. This does the
907 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
908 * signals. The process stop is done as a signal action for SIG_DFL.
909 *
910 * Returns true if the signal should be actually delivered, otherwise
911 * it should be dropped.
1da177e4 912 */
403bad72 913static bool prepare_signal(int sig, struct task_struct *p, bool force)
1da177e4 914{
ad16a460 915 struct signal_struct *signal = p->signal;
1da177e4 916 struct task_struct *t;
9490592f 917 sigset_t flush;
1da177e4 918
403bad72 919 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
5fa534c9 920 if (!(signal->flags & SIGNAL_GROUP_EXIT))
403bad72 921 return sig == SIGKILL;
1da177e4 922 /*
7e695a5e 923 * The process is in the middle of dying, nothing to do.
1da177e4 924 */
7e695a5e 925 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
926 /*
927 * This is a stop signal. Remove SIGCONT from all queues.
928 */
9490592f 929 siginitset(&flush, sigmask(SIGCONT));
c09c1441 930 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 931 for_each_thread(p, t)
c09c1441 932 flush_sigqueue_mask(&flush, &t->pending);
1da177e4 933 } else if (sig == SIGCONT) {
fc321d2e 934 unsigned int why;
1da177e4 935 /*
1deac632 936 * Remove all stop signals from all queues, wake all threads.
1da177e4 937 */
9490592f 938 siginitset(&flush, SIG_KERNEL_STOP_MASK);
c09c1441 939 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 940 for_each_thread(p, t) {
c09c1441 941 flush_sigqueue_mask(&flush, &t->pending);
3759a0d9 942 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
fb1d910c
TH
943 if (likely(!(t->ptrace & PT_SEIZED)))
944 wake_up_state(t, __TASK_STOPPED);
945 else
946 ptrace_trap_notify(t);
9490592f 947 }
1da177e4 948
fc321d2e
ON
949 /*
950 * Notify the parent with CLD_CONTINUED if we were stopped.
951 *
952 * If we were in the middle of a group stop, we pretend it
953 * was already finished, and then continued. Since SIGCHLD
954 * doesn't queue we report only CLD_STOPPED, as if the next
955 * CLD_CONTINUED was dropped.
956 */
957 why = 0;
ad16a460 958 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 959 why |= SIGNAL_CLD_CONTINUED;
ad16a460 960 else if (signal->group_stop_count)
fc321d2e
ON
961 why |= SIGNAL_CLD_STOPPED;
962
963 if (why) {
021e1ae3 964 /*
ae6d2ed7 965 * The first thread which returns from do_signal_stop()
021e1ae3 966 * will take ->siglock, notice SIGNAL_CLD_MASK, and
2e58f57d 967 * notify its parent. See get_signal().
021e1ae3 968 */
2d39b3cd 969 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
ad16a460
ON
970 signal->group_stop_count = 0;
971 signal->group_exit_code = 0;
1da177e4 972 }
1da177e4 973 }
7e695a5e 974
def8cf72 975 return !sig_ignored(p, sig, force);
1da177e4
LT
976}
977
71f11dc0
ON
978/*
979 * Test if P wants to take SIG. After we've checked all threads with this,
980 * it's equivalent to finding no threads not blocking SIG. Any threads not
981 * blocking SIG were ruled out because they are not running and already
982 * have pending signals. Such threads will dequeue from the shared queue
983 * as soon as they're available, so putting the signal on the shared queue
984 * will be equivalent to sending it to one such thread.
985 */
acd14e62 986static inline bool wants_signal(int sig, struct task_struct *p)
71f11dc0
ON
987{
988 if (sigismember(&p->blocked, sig))
acd14e62
CB
989 return false;
990
71f11dc0 991 if (p->flags & PF_EXITING)
acd14e62
CB
992 return false;
993
71f11dc0 994 if (sig == SIGKILL)
acd14e62
CB
995 return true;
996
71f11dc0 997 if (task_is_stopped_or_traced(p))
acd14e62
CB
998 return false;
999
5c251e9d 1000 return task_curr(p) || !task_sigpending(p);
71f11dc0
ON
1001}
1002
07296149 1003static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
71f11dc0
ON
1004{
1005 struct signal_struct *signal = p->signal;
1006 struct task_struct *t;
1007
1008 /*
1009 * Now find a thread we can wake up to take the signal off the queue.
1010 *
1011 * If the main thread wants the signal, it gets first crack.
1012 * Probably the least surprising to the average bear.
1013 */
1014 if (wants_signal(sig, p))
1015 t = p;
07296149 1016 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
71f11dc0
ON
1017 /*
1018 * There is just one thread and it does not need to be woken.
1019 * It will dequeue unblocked signals before it runs again.
1020 */
1021 return;
1022 else {
1023 /*
1024 * Otherwise try to find a suitable thread.
1025 */
1026 t = signal->curr_target;
1027 while (!wants_signal(sig, t)) {
1028 t = next_thread(t);
1029 if (t == signal->curr_target)
1030 /*
1031 * No thread needs to be woken.
1032 * Any eligible threads will see
1033 * the signal in the queue soon.
1034 */
1035 return;
1036 }
1037 signal->curr_target = t;
1038 }
1039
1040 /*
1041 * Found a killable thread. If the signal will be fatal,
1042 * then start taking the whole group down immediately.
1043 */
fae5fa44 1044 if (sig_fatal(p, sig) &&
42691579 1045 !(signal->flags & SIGNAL_GROUP_EXIT) &&
71f11dc0 1046 !sigismember(&t->real_blocked, sig) &&
42691579 1047 (sig == SIGKILL || !p->ptrace)) {
71f11dc0
ON
1048 /*
1049 * This signal will be fatal to the whole group.
1050 */
1051 if (!sig_kernel_coredump(sig)) {
1052 /*
1053 * Start a group exit and wake everybody up.
1054 * This way we don't have other threads
1055 * running and doing things after a slower
1056 * thread has the fatal signal pending.
1057 */
1058 signal->flags = SIGNAL_GROUP_EXIT;
1059 signal->group_exit_code = sig;
1060 signal->group_stop_count = 0;
1061 t = p;
1062 do {
6dfca329 1063 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
71f11dc0
ON
1064 sigaddset(&t->pending.signal, SIGKILL);
1065 signal_wake_up(t, 1);
1066 } while_each_thread(p, t);
1067 return;
1068 }
1069 }
1070
1071 /*
1072 * The signal is already in the shared-pending queue.
1073 * Tell the chosen thread to wake up and dequeue it.
1074 */
1075 signal_wake_up(t, sig == SIGKILL);
1076 return;
1077}
1078
a19e2c01 1079static inline bool legacy_queue(struct sigpending *signals, int sig)
af7fff9c
PE
1080{
1081 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1082}
1083
ae7795bc 1084static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
8ad23dea 1085 enum pid_type type, bool force)
1da177e4 1086{
2ca3515a 1087 struct sigpending *pending;
6e65acba 1088 struct sigqueue *q;
7a0aeb14 1089 int override_rlimit;
6c303d3a 1090 int ret = 0, result;
0a16b607 1091
6e65acba 1092 assert_spin_locked(&t->sighand->siglock);
921cf9f6 1093
6c303d3a 1094 result = TRACE_SIGNAL_IGNORED;
8ad23dea 1095 if (!prepare_signal(sig, t, force))
6c303d3a 1096 goto ret;
2ca3515a 1097
5a883cee 1098 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
1099 /*
1100 * Short-circuit ignored signals and support queuing
1101 * exactly one non-rt signal, so that we can get more
1102 * detailed information about the cause of the signal.
1103 */
6c303d3a 1104 result = TRACE_SIGNAL_ALREADY_PENDING;
7e695a5e 1105 if (legacy_queue(pending, sig))
6c303d3a
ON
1106 goto ret;
1107
1108 result = TRACE_SIGNAL_DELIVERED;
1da177e4 1109 /*
a692933a 1110 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1da177e4 1111 */
e8b33b8c 1112 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1da177e4
LT
1113 goto out_set;
1114
5aba085e
RD
1115 /*
1116 * Real-time signals must be queued if sent by sigqueue, or
1117 * some other real-time mechanism. It is implementation
1118 * defined whether kill() does so. We attempt to do so, on
1119 * the principle of least surprise, but since kill is not
1120 * allowed to fail with EAGAIN when low on memory we just
1121 * make sure at least one signal gets delivered and don't
1122 * pass on the info struct.
1123 */
7a0aeb14
VN
1124 if (sig < SIGRTMIN)
1125 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1126 else
1127 override_rlimit = 0;
1128
69995ebb
TG
1129 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1130
1da177e4 1131 if (q) {
2ca3515a 1132 list_add_tail(&q->list, &pending->list);
1da177e4 1133 switch ((unsigned long) info) {
b67a1b9e 1134 case (unsigned long) SEND_SIG_NOINFO:
faf1f22b 1135 clear_siginfo(&q->info);
1da177e4
LT
1136 q->info.si_signo = sig;
1137 q->info.si_errno = 0;
1138 q->info.si_code = SI_USER;
9cd4fd10 1139 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 1140 task_active_pid_ns(t));
7a0cf094
EB
1141 rcu_read_lock();
1142 q->info.si_uid =
1143 from_kuid_munged(task_cred_xxx(t, user_ns),
1144 current_uid());
1145 rcu_read_unlock();
1da177e4 1146 break;
b67a1b9e 1147 case (unsigned long) SEND_SIG_PRIV:
faf1f22b 1148 clear_siginfo(&q->info);
1da177e4
LT
1149 q->info.si_signo = sig;
1150 q->info.si_errno = 0;
1151 q->info.si_code = SI_KERNEL;
1152 q->info.si_pid = 0;
1153 q->info.si_uid = 0;
1154 break;
1155 default:
1156 copy_siginfo(&q->info, info);
1157 break;
1158 }
8917bef3
EB
1159 } else if (!is_si_special(info) &&
1160 sig >= SIGRTMIN && info->si_code != SI_USER) {
1161 /*
1162 * Queue overflow, abort. We may abort if the
1163 * signal was rt and sent by user using something
1164 * other than kill().
1165 */
1166 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1167 ret = -EAGAIN;
1168 goto ret;
1169 } else {
1170 /*
1171 * This is a silent loss of information. We still
1172 * send the signal, but the *info bits are lost.
1173 */
1174 result = TRACE_SIGNAL_LOSE_INFO;
1da177e4
LT
1175 }
1176
1177out_set:
53c30337 1178 signalfd_notify(t, sig);
2ca3515a 1179 sigaddset(&pending->signal, sig);
c3ad2c3b
EB
1180
1181 /* Let multiprocess signals appear after on-going forks */
1182 if (type > PIDTYPE_TGID) {
1183 struct multiprocess_signals *delayed;
1184 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1185 sigset_t *signal = &delayed->signal;
1186 /* Can't queue both a stop and a continue signal */
1187 if (sig == SIGCONT)
1188 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1189 else if (sig_kernel_stop(sig))
1190 sigdelset(signal, SIGCONT);
1191 sigaddset(signal, sig);
1192 }
1193 }
1194
07296149 1195 complete_signal(sig, t, type);
6c303d3a 1196ret:
5a883cee 1197 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
6c303d3a 1198 return ret;
1da177e4
LT
1199}
1200
7a0cf094
EB
1201static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1202{
1203 bool ret = false;
1204 switch (siginfo_layout(info->si_signo, info->si_code)) {
1205 case SIL_KILL:
1206 case SIL_CHLD:
1207 case SIL_RT:
1208 ret = true;
1209 break;
1210 case SIL_TIMER:
1211 case SIL_POLL:
1212 case SIL_FAULT:
9abcabe3 1213 case SIL_FAULT_TRAPNO:
7a0cf094
EB
1214 case SIL_FAULT_MCEERR:
1215 case SIL_FAULT_BNDERR:
1216 case SIL_FAULT_PKUERR:
f4ac7302 1217 case SIL_FAULT_PERF_EVENT:
7a0cf094
EB
1218 case SIL_SYS:
1219 ret = false;
1220 break;
1221 }
1222 return ret;
1223}
1224
ae7795bc 1225static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
b213984b 1226 enum pid_type type)
7978b567 1227{
8ad23dea
EB
1228 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1229 bool force = false;
921cf9f6 1230
8ad23dea
EB
1231 if (info == SEND_SIG_NOINFO) {
1232 /* Force if sent from an ancestor pid namespace */
1233 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1234 } else if (info == SEND_SIG_PRIV) {
1235 /* Don't ignore kernel generated signals */
1236 force = true;
1237 } else if (has_si_pid_and_uid(info)) {
1238 /* SIGKILL and SIGSTOP is special or has ids */
7a0cf094
EB
1239 struct user_namespace *t_user_ns;
1240
1241 rcu_read_lock();
1242 t_user_ns = task_cred_xxx(t, user_ns);
1243 if (current_user_ns() != t_user_ns) {
1244 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1245 info->si_uid = from_kuid_munged(t_user_ns, uid);
1246 }
1247 rcu_read_unlock();
921cf9f6 1248
8ad23dea
EB
1249 /* A kernel generated signal? */
1250 force = (info->si_code == SI_KERNEL);
1251
1252 /* From an ancestor pid namespace? */
1253 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
7a0cf094 1254 info->si_pid = 0;
8ad23dea
EB
1255 force = true;
1256 }
7a0cf094 1257 }
8ad23dea 1258 return __send_signal(sig, info, t, type, force);
7978b567
SB
1259}
1260
4aaefee5 1261static void print_fatal_signal(int signr)
45807a1d 1262{
4aaefee5 1263 struct pt_regs *regs = signal_pt_regs();
747800ef 1264 pr_info("potentially unexpected fatal signal %d.\n", signr);
45807a1d 1265
ca5cd877 1266#if defined(__i386__) && !defined(__arch_um__)
747800ef 1267 pr_info("code at %08lx: ", regs->ip);
45807a1d
IM
1268 {
1269 int i;
1270 for (i = 0; i < 16; i++) {
1271 unsigned char insn;
1272
b45c6e76
AK
1273 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1274 break;
747800ef 1275 pr_cont("%02x ", insn);
45807a1d
IM
1276 }
1277 }
747800ef 1278 pr_cont("\n");
45807a1d 1279#endif
3a9f84d3 1280 preempt_disable();
45807a1d 1281 show_regs(regs);
3a9f84d3 1282 preempt_enable();
45807a1d
IM
1283}
1284
1285static int __init setup_print_fatal_signals(char *str)
1286{
1287 get_option (&str, &print_fatal_signals);
1288
1289 return 1;
1290}
1291
1292__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1293
4cd4b6d4 1294int
ae7795bc 1295__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
4cd4b6d4 1296{
b213984b 1297 return send_signal(sig, info, p, PIDTYPE_TGID);
4cd4b6d4
PE
1298}
1299
ae7795bc 1300int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
40b3b025 1301 enum pid_type type)
4a30debf
ON
1302{
1303 unsigned long flags;
1304 int ret = -ESRCH;
1305
1306 if (lock_task_sighand(p, &flags)) {
b213984b 1307 ret = send_signal(sig, info, p, type);
4a30debf
ON
1308 unlock_task_sighand(p, &flags);
1309 }
1310
1311 return ret;
1312}
1313
1da177e4
LT
1314/*
1315 * Force a signal that the process can't ignore: if necessary
1316 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1317 *
1318 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1319 * since we do not want to have a signal handler that was blocked
1320 * be invoked when user space had explicitly blocked it.
1321 *
80fe728d
ON
1322 * We don't want to have recursive SIGSEGV's etc, for example,
1323 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1324 */
59c0e696 1325static int
307d522f 1326force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl)
1da177e4
LT
1327{
1328 unsigned long int flags;
ae74c3b6
LT
1329 int ret, blocked, ignored;
1330 struct k_sigaction *action;
59c0e696 1331 int sig = info->si_signo;
1da177e4
LT
1332
1333 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1334 action = &t->sighand->action[sig-1];
1335 ignored = action->sa.sa_handler == SIG_IGN;
1336 blocked = sigismember(&t->blocked, sig);
307d522f 1337 if (blocked || ignored || sigdfl) {
ae74c3b6
LT
1338 action->sa.sa_handler = SIG_DFL;
1339 if (blocked) {
1340 sigdelset(&t->blocked, sig);
7bb44ade 1341 recalc_sigpending_and_wake(t);
ae74c3b6 1342 }
1da177e4 1343 }
eb61b591
JI
1344 /*
1345 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1346 * debugging to leave init killable.
1347 */
1348 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
80fe728d 1349 t->signal->flags &= ~SIGNAL_UNKILLABLE;
b21c5bd5 1350 ret = send_signal(sig, info, t, PIDTYPE_PID);
1da177e4
LT
1351 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1352
1353 return ret;
1354}
1355
a89e9b8a 1356int force_sig_info(struct kernel_siginfo *info)
59c0e696 1357{
307d522f 1358 return force_sig_info_to_task(info, current, false);
59c0e696
EB
1359}
1360
1da177e4
LT
1361/*
1362 * Nuke all other threads in the group.
1363 */
09faef11 1364int zap_other_threads(struct task_struct *p)
1da177e4 1365{
09faef11
ON
1366 struct task_struct *t = p;
1367 int count = 0;
1da177e4 1368
1da177e4
LT
1369 p->signal->group_stop_count = 0;
1370
09faef11 1371 while_each_thread(p, t) {
6dfca329 1372 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
09faef11
ON
1373 count++;
1374
1375 /* Don't bother with already dead threads */
1da177e4
LT
1376 if (t->exit_state)
1377 continue;
1da177e4 1378 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1379 signal_wake_up(t, 1);
1380 }
09faef11
ON
1381
1382 return count;
1da177e4
LT
1383}
1384
b8ed374e
NK
1385struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1386 unsigned long *flags)
f63ee72e
ON
1387{
1388 struct sighand_struct *sighand;
1389
59dc6f3c 1390 rcu_read_lock();
f63ee72e
ON
1391 for (;;) {
1392 sighand = rcu_dereference(tsk->sighand);
59dc6f3c 1393 if (unlikely(sighand == NULL))
f63ee72e 1394 break;
59dc6f3c 1395
392809b2
ON
1396 /*
1397 * This sighand can be already freed and even reused, but
5f0d5a3a 1398 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
392809b2
ON
1399 * initializes ->siglock: this slab can't go away, it has
1400 * the same object type, ->siglock can't be reinitialized.
1401 *
1402 * We need to ensure that tsk->sighand is still the same
1403 * after we take the lock, we can race with de_thread() or
1404 * __exit_signal(). In the latter case the next iteration
1405 * must see ->sighand == NULL.
1406 */
59dc6f3c 1407 spin_lock_irqsave(&sighand->siglock, *flags);
913292c9 1408 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
f63ee72e 1409 break;
59dc6f3c 1410 spin_unlock_irqrestore(&sighand->siglock, *flags);
f63ee72e 1411 }
59dc6f3c 1412 rcu_read_unlock();
f63ee72e
ON
1413
1414 return sighand;
1415}
1416
a5dec9f8
FW
1417#ifdef CONFIG_LOCKDEP
1418void lockdep_assert_task_sighand_held(struct task_struct *task)
1419{
1420 struct sighand_struct *sighand;
1421
1422 rcu_read_lock();
1423 sighand = rcu_dereference(task->sighand);
1424 if (sighand)
1425 lockdep_assert_held(&sighand->siglock);
1426 else
1427 WARN_ON_ONCE(1);
1428 rcu_read_unlock();
1429}
1430#endif
1431
c69e8d9c
DH
1432/*
1433 * send signal info to all the members of a group
c69e8d9c 1434 */
ae7795bc
EB
1435int group_send_sig_info(int sig, struct kernel_siginfo *info,
1436 struct task_struct *p, enum pid_type type)
1da177e4 1437{
694f690d
DH
1438 int ret;
1439
1440 rcu_read_lock();
1441 ret = check_kill_permission(sig, info, p);
1442 rcu_read_unlock();
f63ee72e 1443
4a30debf 1444 if (!ret && sig)
40b3b025 1445 ret = do_send_sig_info(sig, info, p, type);
1da177e4
LT
1446
1447 return ret;
1448}
1449
1450/*
146a505d 1451 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1452 * control characters do (^C, ^Z etc)
c69e8d9c 1453 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1454 */
ae7795bc 1455int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1da177e4
LT
1456{
1457 struct task_struct *p = NULL;
1458 int retval, success;
1459
1da177e4
LT
1460 success = 0;
1461 retval = -ESRCH;
c4b92fc1 1462 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
01024980 1463 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1da177e4
LT
1464 success |= !err;
1465 retval = err;
c4b92fc1 1466 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1467 return success ? 0 : retval;
1468}
1469
ae7795bc 1470int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1da177e4 1471{
d36174bc 1472 int error = -ESRCH;
1da177e4
LT
1473 struct task_struct *p;
1474
eca1a089
PM
1475 for (;;) {
1476 rcu_read_lock();
1477 p = pid_task(pid, PIDTYPE_PID);
1478 if (p)
01024980 1479 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
eca1a089
PM
1480 rcu_read_unlock();
1481 if (likely(!p || error != -ESRCH))
1482 return error;
6ca25b55 1483
eca1a089
PM
1484 /*
1485 * The task was unhashed in between, try again. If it
1486 * is dead, pid_task() will return NULL, if we race with
1487 * de_thread() it will find the new leader.
1488 */
1489 }
1da177e4
LT
1490}
1491
ae7795bc 1492static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
c4b92fc1
EB
1493{
1494 int error;
1495 rcu_read_lock();
b488893a 1496 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1497 rcu_read_unlock();
1498 return error;
1499}
1500
bb17fcca
CB
1501static inline bool kill_as_cred_perm(const struct cred *cred,
1502 struct task_struct *target)
d178bc3a
SH
1503{
1504 const struct cred *pcred = __task_cred(target);
bb17fcca
CB
1505
1506 return uid_eq(cred->euid, pcred->suid) ||
1507 uid_eq(cred->euid, pcred->uid) ||
1508 uid_eq(cred->uid, pcred->suid) ||
1509 uid_eq(cred->uid, pcred->uid);
d178bc3a
SH
1510}
1511
70f1b0d3
EB
1512/*
1513 * The usb asyncio usage of siginfo is wrong. The glibc support
1514 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1515 * AKA after the generic fields:
1516 * kernel_pid_t si_pid;
1517 * kernel_uid32_t si_uid;
1518 * sigval_t si_value;
1519 *
1520 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1521 * after the generic fields is:
1522 * void __user *si_addr;
1523 *
1524 * This is a practical problem when there is a 64bit big endian kernel
1525 * and a 32bit userspace. As the 32bit address will encoded in the low
1526 * 32bits of the pointer. Those low 32bits will be stored at higher
1527 * address than appear in a 32 bit pointer. So userspace will not
1528 * see the address it was expecting for it's completions.
1529 *
1530 * There is nothing in the encoding that can allow
1531 * copy_siginfo_to_user32 to detect this confusion of formats, so
1532 * handle this by requiring the caller of kill_pid_usb_asyncio to
1533 * notice when this situration takes place and to store the 32bit
1534 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1535 * parameter.
1536 */
1537int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1538 struct pid *pid, const struct cred *cred)
46113830 1539{
70f1b0d3 1540 struct kernel_siginfo info;
46113830 1541 struct task_struct *p;
14d8c9f3 1542 unsigned long flags;
70f1b0d3
EB
1543 int ret = -EINVAL;
1544
eaec2b0b
ZL
1545 if (!valid_signal(sig))
1546 return ret;
1547
70f1b0d3
EB
1548 clear_siginfo(&info);
1549 info.si_signo = sig;
1550 info.si_errno = errno;
1551 info.si_code = SI_ASYNCIO;
1552 *((sigval_t *)&info.si_pid) = addr;
46113830 1553
14d8c9f3 1554 rcu_read_lock();
2425c08b 1555 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1556 if (!p) {
1557 ret = -ESRCH;
1558 goto out_unlock;
1559 }
70f1b0d3 1560 if (!kill_as_cred_perm(cred, p)) {
46113830
HW
1561 ret = -EPERM;
1562 goto out_unlock;
1563 }
70f1b0d3 1564 ret = security_task_kill(p, &info, sig, cred);
8f95dc58
DQ
1565 if (ret)
1566 goto out_unlock;
14d8c9f3
TG
1567
1568 if (sig) {
1569 if (lock_task_sighand(p, &flags)) {
8ad23dea 1570 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
14d8c9f3
TG
1571 unlock_task_sighand(p, &flags);
1572 } else
1573 ret = -ESRCH;
46113830
HW
1574 }
1575out_unlock:
14d8c9f3 1576 rcu_read_unlock();
46113830
HW
1577 return ret;
1578}
70f1b0d3 1579EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1da177e4
LT
1580
1581/*
1582 * kill_something_info() interprets pid in interesting ways just like kill(2).
1583 *
1584 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1585 * is probably wrong. Should make it like BSD or SYSV.
1586 */
1587
ae7795bc 1588static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1da177e4 1589{
8d42db18 1590 int ret;
d5df763b 1591
3075afdf
ZL
1592 if (pid > 0)
1593 return kill_proc_info(sig, info, pid);
d5df763b 1594
4ea77014 1595 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1596 if (pid == INT_MIN)
1597 return -ESRCH;
1598
d5df763b
PE
1599 read_lock(&tasklist_lock);
1600 if (pid != -1) {
1601 ret = __kill_pgrp_info(sig, info,
1602 pid ? find_vpid(-pid) : task_pgrp(current));
1603 } else {
1da177e4
LT
1604 int retval = 0, count = 0;
1605 struct task_struct * p;
1606
1da177e4 1607 for_each_process(p) {
d25141a8
SB
1608 if (task_pid_vnr(p) > 1 &&
1609 !same_thread_group(p, current)) {
01024980
EB
1610 int err = group_send_sig_info(sig, info, p,
1611 PIDTYPE_MAX);
1da177e4
LT
1612 ++count;
1613 if (err != -EPERM)
1614 retval = err;
1615 }
1616 }
8d42db18 1617 ret = count ? retval : -ESRCH;
1da177e4 1618 }
d5df763b
PE
1619 read_unlock(&tasklist_lock);
1620
8d42db18 1621 return ret;
1da177e4
LT
1622}
1623
1624/*
1625 * These are for backward compatibility with the rest of the kernel source.
1626 */
1627
ae7795bc 1628int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1da177e4 1629{
1da177e4
LT
1630 /*
1631 * Make sure legacy kernel users don't send in bad values
1632 * (normal paths check this in check_kill_permission).
1633 */
7ed20e1a 1634 if (!valid_signal(sig))
1da177e4
LT
1635 return -EINVAL;
1636
40b3b025 1637 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1da177e4 1638}
fb50f5a4 1639EXPORT_SYMBOL(send_sig_info);
1da177e4 1640
b67a1b9e
ON
1641#define __si_special(priv) \
1642 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1643
1da177e4
LT
1644int
1645send_sig(int sig, struct task_struct *p, int priv)
1646{
b67a1b9e 1647 return send_sig_info(sig, __si_special(priv), p);
1da177e4 1648}
fb50f5a4 1649EXPORT_SYMBOL(send_sig);
1da177e4 1650
3cf5d076 1651void force_sig(int sig)
1da177e4 1652{
ffafd23b
EB
1653 struct kernel_siginfo info;
1654
1655 clear_siginfo(&info);
1656 info.si_signo = sig;
1657 info.si_errno = 0;
1658 info.si_code = SI_KERNEL;
1659 info.si_pid = 0;
1660 info.si_uid = 0;
a89e9b8a 1661 force_sig_info(&info);
1da177e4 1662}
fb50f5a4 1663EXPORT_SYMBOL(force_sig);
1da177e4 1664
26d5badb
EB
1665void force_fatal_sig(int sig)
1666{
1667 struct kernel_siginfo info;
1668
1669 clear_siginfo(&info);
1670 info.si_signo = sig;
1671 info.si_errno = 0;
1672 info.si_code = SI_KERNEL;
1673 info.si_pid = 0;
1674 info.si_uid = 0;
1675 force_sig_info_to_task(&info, current, true);
1676}
1677
1da177e4
LT
1678/*
1679 * When things go south during signal handling, we
1680 * will force a SIGSEGV. And if the signal that caused
1681 * the problem was already a SIGSEGV, we'll want to
1682 * make sure we don't even try to deliver the signal..
1683 */
cb44c9a0 1684void force_sigsegv(int sig)
1da177e4 1685{
26d5badb
EB
1686 if (sig == SIGSEGV)
1687 force_fatal_sig(SIGSEGV);
1688 else
1689 force_sig(SIGSEGV);
1da177e4
LT
1690}
1691
91ca180d 1692int force_sig_fault_to_task(int sig, int code, void __user *addr
f8ec6601
EB
1693 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1694 , struct task_struct *t)
1695{
ae7795bc 1696 struct kernel_siginfo info;
f8ec6601
EB
1697
1698 clear_siginfo(&info);
1699 info.si_signo = sig;
1700 info.si_errno = 0;
1701 info.si_code = code;
1702 info.si_addr = addr;
f8ec6601
EB
1703#ifdef __ia64__
1704 info.si_imm = imm;
1705 info.si_flags = flags;
1706 info.si_isr = isr;
1707#endif
307d522f 1708 return force_sig_info_to_task(&info, t, false);
f8ec6601
EB
1709}
1710
91ca180d 1711int force_sig_fault(int sig, int code, void __user *addr
2e1661d2 1712 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
91ca180d
EB
1713{
1714 return force_sig_fault_to_task(sig, code, addr
2e1661d2 1715 ___ARCH_SI_IA64(imm, flags, isr), current);
f8ec6601
EB
1716}
1717
1718int send_sig_fault(int sig, int code, void __user *addr
f8ec6601
EB
1719 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1720 , struct task_struct *t)
1721{
ae7795bc 1722 struct kernel_siginfo info;
f8ec6601
EB
1723
1724 clear_siginfo(&info);
1725 info.si_signo = sig;
1726 info.si_errno = 0;
1727 info.si_code = code;
1728 info.si_addr = addr;
f8ec6601
EB
1729#ifdef __ia64__
1730 info.si_imm = imm;
1731 info.si_flags = flags;
1732 info.si_isr = isr;
1733#endif
1734 return send_sig_info(info.si_signo, &info, t);
1735}
1736
f8eac901 1737int force_sig_mceerr(int code, void __user *addr, short lsb)
38246735 1738{
ae7795bc 1739 struct kernel_siginfo info;
38246735
EB
1740
1741 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1742 clear_siginfo(&info);
1743 info.si_signo = SIGBUS;
1744 info.si_errno = 0;
1745 info.si_code = code;
1746 info.si_addr = addr;
1747 info.si_addr_lsb = lsb;
a89e9b8a 1748 return force_sig_info(&info);
38246735
EB
1749}
1750
1751int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1752{
ae7795bc 1753 struct kernel_siginfo info;
38246735
EB
1754
1755 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1756 clear_siginfo(&info);
1757 info.si_signo = SIGBUS;
1758 info.si_errno = 0;
1759 info.si_code = code;
1760 info.si_addr = addr;
1761 info.si_addr_lsb = lsb;
1762 return send_sig_info(info.si_signo, &info, t);
1763}
1764EXPORT_SYMBOL(send_sig_mceerr);
38246735 1765
38246735
EB
1766int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1767{
ae7795bc 1768 struct kernel_siginfo info;
38246735
EB
1769
1770 clear_siginfo(&info);
1771 info.si_signo = SIGSEGV;
1772 info.si_errno = 0;
1773 info.si_code = SEGV_BNDERR;
1774 info.si_addr = addr;
1775 info.si_lower = lower;
1776 info.si_upper = upper;
a89e9b8a 1777 return force_sig_info(&info);
38246735 1778}
38246735
EB
1779
1780#ifdef SEGV_PKUERR
1781int force_sig_pkuerr(void __user *addr, u32 pkey)
1782{
ae7795bc 1783 struct kernel_siginfo info;
38246735
EB
1784
1785 clear_siginfo(&info);
1786 info.si_signo = SIGSEGV;
1787 info.si_errno = 0;
1788 info.si_code = SEGV_PKUERR;
1789 info.si_addr = addr;
1790 info.si_pkey = pkey;
a89e9b8a 1791 return force_sig_info(&info);
38246735
EB
1792}
1793#endif
f8ec6601 1794
af5eeab7
EB
1795int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1796{
1797 struct kernel_siginfo info;
1798
1799 clear_siginfo(&info);
0683b531
EB
1800 info.si_signo = SIGTRAP;
1801 info.si_errno = 0;
1802 info.si_code = TRAP_PERF;
1803 info.si_addr = addr;
1804 info.si_perf_data = sig_data;
1805 info.si_perf_type = type;
1806
af5eeab7
EB
1807 return force_sig_info(&info);
1808}
1809
307d522f
EB
1810/**
1811 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1812 * @syscall: syscall number to send to userland
1813 * @reason: filter-supplied reason code to send to userland (via si_errno)
1814 *
1815 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1816 */
1817int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1818{
1819 struct kernel_siginfo info;
1820
1821 clear_siginfo(&info);
1822 info.si_signo = SIGSYS;
1823 info.si_code = SYS_SECCOMP;
1824 info.si_call_addr = (void __user *)KSTK_EIP(current);
1825 info.si_errno = reason;
1826 info.si_arch = syscall_get_arch(current);
1827 info.si_syscall = syscall;
1828 return force_sig_info_to_task(&info, current, force_coredump);
1829}
1830
f71dd7dc
EB
1831/* For the crazy architectures that include trap information in
1832 * the errno field, instead of an actual errno value.
1833 */
1834int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1835{
ae7795bc 1836 struct kernel_siginfo info;
f71dd7dc
EB
1837
1838 clear_siginfo(&info);
1839 info.si_signo = SIGTRAP;
1840 info.si_errno = errno;
1841 info.si_code = TRAP_HWBKPT;
1842 info.si_addr = addr;
a89e9b8a 1843 return force_sig_info(&info);
f71dd7dc
EB
1844}
1845
2c9f7eaf
EB
1846/* For the rare architectures that include trap information using
1847 * si_trapno.
1848 */
1849int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1850{
1851 struct kernel_siginfo info;
1852
1853 clear_siginfo(&info);
1854 info.si_signo = sig;
1855 info.si_errno = 0;
1856 info.si_code = code;
1857 info.si_addr = addr;
1858 info.si_trapno = trapno;
1859 return force_sig_info(&info);
1860}
1861
7de5f68d
EB
1862/* For the rare architectures that include trap information using
1863 * si_trapno.
1864 */
1865int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1866 struct task_struct *t)
1867{
1868 struct kernel_siginfo info;
1869
1870 clear_siginfo(&info);
1871 info.si_signo = sig;
1872 info.si_errno = 0;
1873 info.si_code = code;
1874 info.si_addr = addr;
1875 info.si_trapno = trapno;
1876 return send_sig_info(info.si_signo, &info, t);
1877}
1878
c4b92fc1
EB
1879int kill_pgrp(struct pid *pid, int sig, int priv)
1880{
146a505d
PE
1881 int ret;
1882
1883 read_lock(&tasklist_lock);
1884 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1885 read_unlock(&tasklist_lock);
1886
1887 return ret;
c4b92fc1
EB
1888}
1889EXPORT_SYMBOL(kill_pgrp);
1890
1891int kill_pid(struct pid *pid, int sig, int priv)
1892{
1893 return kill_pid_info(sig, __si_special(priv), pid);
1894}
1895EXPORT_SYMBOL(kill_pid);
1896
1da177e4
LT
1897/*
1898 * These functions support sending signals using preallocated sigqueue
1899 * structures. This is needed "because realtime applications cannot
1900 * afford to lose notifications of asynchronous events, like timer
5aba085e 1901 * expirations or I/O completions". In the case of POSIX Timers
1da177e4
LT
1902 * we allocate the sigqueue structure from the timer_create. If this
1903 * allocation fails we are able to report the failure to the application
1904 * with an EAGAIN error.
1905 */
1da177e4
LT
1906struct sigqueue *sigqueue_alloc(void)
1907{
69995ebb 1908 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1da177e4
LT
1909}
1910
1911void sigqueue_free(struct sigqueue *q)
1912{
1913 unsigned long flags;
60187d27
ON
1914 spinlock_t *lock = &current->sighand->siglock;
1915
1da177e4
LT
1916 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1917 /*
c8e85b4f
ON
1918 * We must hold ->siglock while testing q->list
1919 * to serialize with collect_signal() or with
da7978b0 1920 * __exit_signal()->flush_sigqueue().
1da177e4 1921 */
60187d27 1922 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1923 q->flags &= ~SIGQUEUE_PREALLOC;
1924 /*
1925 * If it is queued it will be freed when dequeued,
1926 * like the "regular" sigqueue.
1927 */
60187d27 1928 if (!list_empty(&q->list))
c8e85b4f 1929 q = NULL;
60187d27
ON
1930 spin_unlock_irqrestore(lock, flags);
1931
c8e85b4f
ON
1932 if (q)
1933 __sigqueue_free(q);
1da177e4
LT
1934}
1935
24122c7f 1936int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
9e3bd6c3 1937{
e62e6650 1938 int sig = q->info.si_signo;
2ca3515a 1939 struct sigpending *pending;
24122c7f 1940 struct task_struct *t;
e62e6650 1941 unsigned long flags;
163566f6 1942 int ret, result;
2ca3515a 1943
4cd4b6d4 1944 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1945
1946 ret = -1;
24122c7f
EB
1947 rcu_read_lock();
1948 t = pid_task(pid, type);
1949 if (!t || !likely(lock_task_sighand(t, &flags)))
e62e6650
ON
1950 goto ret;
1951
7e695a5e 1952 ret = 1; /* the signal is ignored */
163566f6 1953 result = TRACE_SIGNAL_IGNORED;
def8cf72 1954 if (!prepare_signal(sig, t, false))
e62e6650
ON
1955 goto out;
1956
1957 ret = 0;
9e3bd6c3
PE
1958 if (unlikely(!list_empty(&q->list))) {
1959 /*
1960 * If an SI_TIMER entry is already queue just increment
1961 * the overrun count.
1962 */
9e3bd6c3
PE
1963 BUG_ON(q->info.si_code != SI_TIMER);
1964 q->info.si_overrun++;
163566f6 1965 result = TRACE_SIGNAL_ALREADY_PENDING;
e62e6650 1966 goto out;
9e3bd6c3 1967 }
ba661292 1968 q->info.si_overrun = 0;
9e3bd6c3 1969
9e3bd6c3 1970 signalfd_notify(t, sig);
24122c7f 1971 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1972 list_add_tail(&q->list, &pending->list);
1973 sigaddset(&pending->signal, sig);
07296149 1974 complete_signal(sig, t, type);
163566f6 1975 result = TRACE_SIGNAL_DELIVERED;
e62e6650 1976out:
24122c7f 1977 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
e62e6650
ON
1978 unlock_task_sighand(t, &flags);
1979ret:
24122c7f 1980 rcu_read_unlock();
e62e6650 1981 return ret;
9e3bd6c3
PE
1982}
1983
b53b0b9d
JFG
1984static void do_notify_pidfd(struct task_struct *task)
1985{
1986 struct pid *pid;
1987
1caf7d50 1988 WARN_ON(task->exit_state == 0);
b53b0b9d
JFG
1989 pid = task_pid(task);
1990 wake_up_all(&pid->wait_pidfd);
1991}
1992
1da177e4
LT
1993/*
1994 * Let a parent know about the death of a child.
1995 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6 1996 *
53c8f9f1
ON
1997 * Returns true if our parent ignored us and so we've switched to
1998 * self-reaping.
1da177e4 1999 */
53c8f9f1 2000bool do_notify_parent(struct task_struct *tsk, int sig)
1da177e4 2001{
ae7795bc 2002 struct kernel_siginfo info;
1da177e4
LT
2003 unsigned long flags;
2004 struct sighand_struct *psig;
53c8f9f1 2005 bool autoreap = false;
bde8285e 2006 u64 utime, stime;
1da177e4
LT
2007
2008 BUG_ON(sig == -1);
2009
2010 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 2011 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 2012
d21142ec 2013 BUG_ON(!tsk->ptrace &&
1da177e4
LT
2014 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2015
b53b0b9d
JFG
2016 /* Wake up all pidfd waiters */
2017 do_notify_pidfd(tsk);
2018
b6e238dc
ON
2019 if (sig != SIGCHLD) {
2020 /*
2021 * This is only possible if parent == real_parent.
2022 * Check if it has changed security domain.
2023 */
d1e7fd64 2024 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
b6e238dc
ON
2025 sig = SIGCHLD;
2026 }
2027
faf1f22b 2028 clear_siginfo(&info);
1da177e4
LT
2029 info.si_signo = sig;
2030 info.si_errno = 0;
b488893a 2031 /*
32084504
EB
2032 * We are under tasklist_lock here so our parent is tied to
2033 * us and cannot change.
b488893a 2034 *
32084504
EB
2035 * task_active_pid_ns will always return the same pid namespace
2036 * until a task passes through release_task.
b488893a
PE
2037 *
2038 * write_lock() currently calls preempt_disable() which is the
2039 * same as rcu_read_lock(), but according to Oleg, this is not
2040 * correct to rely on this
2041 */
2042 rcu_read_lock();
32084504 2043 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
54ba47ed
EB
2044 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2045 task_uid(tsk));
b488893a
PE
2046 rcu_read_unlock();
2047
bde8285e
FW
2048 task_cputime(tsk, &utime, &stime);
2049 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2050 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1da177e4
LT
2051
2052 info.si_status = tsk->exit_code & 0x7f;
2053 if (tsk->exit_code & 0x80)
2054 info.si_code = CLD_DUMPED;
2055 else if (tsk->exit_code & 0x7f)
2056 info.si_code = CLD_KILLED;
2057 else {
2058 info.si_code = CLD_EXITED;
2059 info.si_status = tsk->exit_code >> 8;
2060 }
2061
2062 psig = tsk->parent->sighand;
2063 spin_lock_irqsave(&psig->siglock, flags);
d21142ec 2064 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
2065 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2066 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2067 /*
2068 * We are exiting and our parent doesn't care. POSIX.1
2069 * defines special semantics for setting SIGCHLD to SIG_IGN
2070 * or setting the SA_NOCLDWAIT flag: we should be reaped
2071 * automatically and not left for our parent's wait4 call.
2072 * Rather than having the parent do it as a magic kind of
2073 * signal handler, we just set this to tell do_exit that we
2074 * can be cleaned up without becoming a zombie. Note that
2075 * we still call __wake_up_parent in this case, because a
2076 * blocked sys_wait4 might now return -ECHILD.
2077 *
2078 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2079 * is implementation-defined: we do (if you don't want
2080 * it, just use SIG_IGN instead).
2081 */
53c8f9f1 2082 autoreap = true;
1da177e4 2083 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
53c8f9f1 2084 sig = 0;
1da177e4 2085 }
61e713bd
EB
2086 /*
2087 * Send with __send_signal as si_pid and si_uid are in the
2088 * parent's namespaces.
2089 */
53c8f9f1 2090 if (valid_signal(sig) && sig)
61e713bd 2091 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1da177e4
LT
2092 __wake_up_parent(tsk, tsk->parent);
2093 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 2094
53c8f9f1 2095 return autoreap;
1da177e4
LT
2096}
2097
75b95953
TH
2098/**
2099 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2100 * @tsk: task reporting the state change
2101 * @for_ptracer: the notification is for ptracer
2102 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2103 *
2104 * Notify @tsk's parent that the stopped/continued state has changed. If
2105 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2106 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2107 *
2108 * CONTEXT:
2109 * Must be called with tasklist_lock at least read locked.
2110 */
2111static void do_notify_parent_cldstop(struct task_struct *tsk,
2112 bool for_ptracer, int why)
1da177e4 2113{
ae7795bc 2114 struct kernel_siginfo info;
1da177e4 2115 unsigned long flags;
bc505a47 2116 struct task_struct *parent;
1da177e4 2117 struct sighand_struct *sighand;
bde8285e 2118 u64 utime, stime;
1da177e4 2119
75b95953 2120 if (for_ptracer) {
bc505a47 2121 parent = tsk->parent;
75b95953 2122 } else {
bc505a47
ON
2123 tsk = tsk->group_leader;
2124 parent = tsk->real_parent;
2125 }
2126
faf1f22b 2127 clear_siginfo(&info);
1da177e4
LT
2128 info.si_signo = SIGCHLD;
2129 info.si_errno = 0;
b488893a 2130 /*
5aba085e 2131 * see comment in do_notify_parent() about the following 4 lines
b488893a
PE
2132 */
2133 rcu_read_lock();
17cf22c3 2134 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
54ba47ed 2135 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
b488893a
PE
2136 rcu_read_unlock();
2137
bde8285e
FW
2138 task_cputime(tsk, &utime, &stime);
2139 info.si_utime = nsec_to_clock_t(utime);
2140 info.si_stime = nsec_to_clock_t(stime);
1da177e4
LT
2141
2142 info.si_code = why;
2143 switch (why) {
2144 case CLD_CONTINUED:
2145 info.si_status = SIGCONT;
2146 break;
2147 case CLD_STOPPED:
2148 info.si_status = tsk->signal->group_exit_code & 0x7f;
2149 break;
2150 case CLD_TRAPPED:
2151 info.si_status = tsk->exit_code & 0x7f;
2152 break;
2153 default:
2154 BUG();
2155 }
2156
2157 sighand = parent->sighand;
2158 spin_lock_irqsave(&sighand->siglock, flags);
2159 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2160 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2161 __group_send_sig_info(SIGCHLD, &info, parent);
2162 /*
2163 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2164 */
2165 __wake_up_parent(tsk, parent);
2166 spin_unlock_irqrestore(&sighand->siglock, flags);
2167}
2168
6527de95 2169static inline bool may_ptrace_stop(void)
d5f70c00 2170{
d21142ec 2171 if (!likely(current->ptrace))
6527de95 2172 return false;
d5f70c00
ON
2173 /*
2174 * Are we in the middle of do_coredump?
2175 * If so and our tracer is also part of the coredump stopping
2176 * is a deadlock situation, and pointless because our tracer
2177 * is dead so don't allow us to stop.
2178 * If SIGKILL was already sent before the caller unlocked
999d9fc1 2179 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00 2180 * is safe to enter schedule().
9899d11f
ON
2181 *
2182 * This is almost outdated, a task with the pending SIGKILL can't
2183 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2184 * after SIGKILL was already dequeued.
d5f70c00 2185 */
999d9fc1 2186 if (unlikely(current->mm->core_state) &&
d5f70c00 2187 unlikely(current->mm == current->parent->mm))
6527de95 2188 return false;
d5f70c00 2189
6527de95 2190 return true;
d5f70c00
ON
2191}
2192
1a669c2f 2193/*
5aba085e 2194 * Return non-zero if there is a SIGKILL that should be waking us up.
1a669c2f
RM
2195 * Called with the siglock held.
2196 */
f99e9d8c 2197static bool sigkill_pending(struct task_struct *tsk)
1a669c2f 2198{
f99e9d8c
CB
2199 return sigismember(&tsk->pending.signal, SIGKILL) ||
2200 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
2201}
2202
1da177e4
LT
2203/*
2204 * This must be called with current->sighand->siglock held.
2205 *
2206 * This should be the path for all ptrace stops.
2207 * We always set current->last_siginfo while stopped here.
2208 * That makes it a way to test a stopped process for
2209 * being ptrace-stopped vs being job-control-stopped.
2210 *
20686a30
ON
2211 * If we actually decide not to stop at all because the tracer
2212 * is gone, we keep current->exit_code unless clear_code.
1da177e4 2213 */
ae7795bc 2214static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
b8401150
NK
2215 __releases(&current->sighand->siglock)
2216 __acquires(&current->sighand->siglock)
1da177e4 2217{
ceb6bd67
TH
2218 bool gstop_done = false;
2219
1a669c2f
RM
2220 if (arch_ptrace_stop_needed(exit_code, info)) {
2221 /*
2222 * The arch code has something special to do before a
2223 * ptrace stop. This is allowed to block, e.g. for faults
2224 * on user stack pages. We can't keep the siglock while
2225 * calling arch_ptrace_stop, so we must release it now.
2226 * To preserve proper semantics, we must do this before
2227 * any signal bookkeeping like checking group_stop_count.
2228 * Meanwhile, a SIGKILL could come in before we retake the
2229 * siglock. That must prevent us from sleeping in TASK_TRACED.
2230 * So after regaining the lock, we must check for SIGKILL.
2231 */
2232 spin_unlock_irq(&current->sighand->siglock);
2233 arch_ptrace_stop(exit_code, info);
2234 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
2235 if (sigkill_pending(current))
2236 return;
1a669c2f
RM
2237 }
2238
b5bf9a90
PZ
2239 set_special_state(TASK_TRACED);
2240
1da177e4 2241 /*
81be24b8
TH
2242 * We're committing to trapping. TRACED should be visible before
2243 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2244 * Also, transition to TRACED and updates to ->jobctl should be
2245 * atomic with respect to siglock and should be done after the arch
2246 * hook as siglock is released and regrabbed across it.
b5bf9a90
PZ
2247 *
2248 * TRACER TRACEE
2249 *
2250 * ptrace_attach()
2251 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2252 * do_wait()
2253 * set_current_state() smp_wmb();
2254 * ptrace_do_wait()
2255 * wait_task_stopped()
2256 * task_stopped_code()
2257 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1da177e4 2258 */
b5bf9a90 2259 smp_wmb();
1da177e4
LT
2260
2261 current->last_siginfo = info;
2262 current->exit_code = exit_code;
2263
d79fdd6d 2264 /*
0ae8ce1c
TH
2265 * If @why is CLD_STOPPED, we're trapping to participate in a group
2266 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
73ddff2b
TH
2267 * across siglock relocks since INTERRUPT was scheduled, PENDING
2268 * could be clear now. We act as if SIGCONT is received after
2269 * TASK_TRACED is entered - ignore it.
d79fdd6d 2270 */
a8f072c1 2271 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
ceb6bd67 2272 gstop_done = task_participate_group_stop(current);
d79fdd6d 2273
fb1d910c 2274 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
73ddff2b 2275 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
fb1d910c
TH
2276 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2277 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
73ddff2b 2278
81be24b8 2279 /* entering a trap, clear TRAPPING */
a8f072c1 2280 task_clear_jobctl_trapping(current);
d79fdd6d 2281
1da177e4
LT
2282 spin_unlock_irq(&current->sighand->siglock);
2283 read_lock(&tasklist_lock);
3d749b9e 2284 if (may_ptrace_stop()) {
ceb6bd67
TH
2285 /*
2286 * Notify parents of the stop.
2287 *
2288 * While ptraced, there are two parents - the ptracer and
2289 * the real_parent of the group_leader. The ptracer should
2290 * know about every stop while the real parent is only
2291 * interested in the completion of group stop. The states
2292 * for the two don't interact with each other. Notify
2293 * separately unless they're gonna be duplicates.
2294 */
2295 do_notify_parent_cldstop(current, true, why);
bb3696da 2296 if (gstop_done && ptrace_reparented(current))
ceb6bd67
TH
2297 do_notify_parent_cldstop(current, false, why);
2298
53da1d94
MS
2299 /*
2300 * Don't want to allow preemption here, because
2301 * sys_ptrace() needs this task to be inactive.
2302 *
2303 * XXX: implement read_unlock_no_resched().
2304 */
2305 preempt_disable();
1da177e4 2306 read_unlock(&tasklist_lock);
76f969e8 2307 cgroup_enter_frozen();
937c6b27 2308 preempt_enable_no_resched();
5d8f72b5 2309 freezable_schedule();
05b28926 2310 cgroup_leave_frozen(true);
1da177e4
LT
2311 } else {
2312 /*
2313 * By the time we got the lock, our tracer went away.
6405f7f4 2314 * Don't drop the lock yet, another tracer may come.
ceb6bd67
TH
2315 *
2316 * If @gstop_done, the ptracer went away between group stop
2317 * completion and here. During detach, it would have set
a8f072c1
TH
2318 * JOBCTL_STOP_PENDING on us and we'll re-enter
2319 * TASK_STOPPED in do_signal_stop() on return, so notifying
2320 * the real parent of the group stop completion is enough.
1da177e4 2321 */
ceb6bd67
TH
2322 if (gstop_done)
2323 do_notify_parent_cldstop(current, false, why);
2324
9899d11f 2325 /* tasklist protects us from ptrace_freeze_traced() */
6405f7f4 2326 __set_current_state(TASK_RUNNING);
20686a30
ON
2327 if (clear_code)
2328 current->exit_code = 0;
6405f7f4 2329 read_unlock(&tasklist_lock);
1da177e4
LT
2330 }
2331
2332 /*
2333 * We are back. Now reacquire the siglock before touching
2334 * last_siginfo, so that we are sure to have synchronized with
2335 * any signal-sending on another CPU that wants to examine it.
2336 */
2337 spin_lock_irq(&current->sighand->siglock);
2338 current->last_siginfo = NULL;
2339
544b2c91
TH
2340 /* LISTENING can be set only during STOP traps, clear it */
2341 current->jobctl &= ~JOBCTL_LISTENING;
2342
1da177e4
LT
2343 /*
2344 * Queued signals ignored us while we were stopped for tracing.
2345 * So check for any that we should take before resuming user mode.
b74d0deb 2346 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 2347 */
b74d0deb 2348 recalc_sigpending_tsk(current);
1da177e4
LT
2349}
2350
3544d72a 2351static void ptrace_do_notify(int signr, int exit_code, int why)
1da177e4 2352{
ae7795bc 2353 kernel_siginfo_t info;
1da177e4 2354
faf1f22b 2355 clear_siginfo(&info);
3544d72a 2356 info.si_signo = signr;
1da177e4 2357 info.si_code = exit_code;
b488893a 2358 info.si_pid = task_pid_vnr(current);
078de5f7 2359 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1da177e4
LT
2360
2361 /* Let the debugger run. */
3544d72a
TH
2362 ptrace_stop(exit_code, why, 1, &info);
2363}
2364
2365void ptrace_notify(int exit_code)
2366{
2367 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
f784e8a7
ON
2368 if (unlikely(current->task_works))
2369 task_work_run();
3544d72a 2370
1da177e4 2371 spin_lock_irq(&current->sighand->siglock);
3544d72a 2372 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1da177e4
LT
2373 spin_unlock_irq(&current->sighand->siglock);
2374}
2375
73ddff2b
TH
2376/**
2377 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2378 * @signr: signr causing group stop if initiating
2379 *
2380 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2381 * and participate in it. If already set, participate in the existing
2382 * group stop. If participated in a group stop (and thus slept), %true is
2383 * returned with siglock released.
2384 *
2385 * If ptraced, this function doesn't handle stop itself. Instead,
2386 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2387 * untouched. The caller must ensure that INTERRUPT trap handling takes
2388 * places afterwards.
2389 *
2390 * CONTEXT:
2391 * Must be called with @current->sighand->siglock held, which is released
2392 * on %true return.
2393 *
2394 * RETURNS:
2395 * %false if group stop is already cancelled or ptrace trap is scheduled.
2396 * %true if participated in group stop.
1da177e4 2397 */
73ddff2b
TH
2398static bool do_signal_stop(int signr)
2399 __releases(&current->sighand->siglock)
1da177e4
LT
2400{
2401 struct signal_struct *sig = current->signal;
1da177e4 2402
a8f072c1 2403 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
b76808e6 2404 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
f558b7e4
ON
2405 struct task_struct *t;
2406
a8f072c1
TH
2407 /* signr will be recorded in task->jobctl for retries */
2408 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
d79fdd6d 2409
a8f072c1 2410 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
573cf9ad 2411 unlikely(signal_group_exit(sig)))
73ddff2b 2412 return false;
1da177e4 2413 /*
408a37de
TH
2414 * There is no group stop already in progress. We must
2415 * initiate one now.
2416 *
2417 * While ptraced, a task may be resumed while group stop is
2418 * still in effect and then receive a stop signal and
2419 * initiate another group stop. This deviates from the
2420 * usual behavior as two consecutive stop signals can't
780006ea
ON
2421 * cause two group stops when !ptraced. That is why we
2422 * also check !task_is_stopped(t) below.
408a37de
TH
2423 *
2424 * The condition can be distinguished by testing whether
2425 * SIGNAL_STOP_STOPPED is already set. Don't generate
2426 * group_exit_code in such case.
2427 *
2428 * This is not necessary for SIGNAL_STOP_CONTINUED because
2429 * an intervening stop signal is required to cause two
2430 * continued events regardless of ptrace.
1da177e4 2431 */
408a37de
TH
2432 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2433 sig->group_exit_code = signr;
1da177e4 2434
7dd3db54
TH
2435 sig->group_stop_count = 0;
2436
2437 if (task_set_jobctl_pending(current, signr | gstop))
2438 sig->group_stop_count++;
1da177e4 2439
8d38f203
ON
2440 t = current;
2441 while_each_thread(current, t) {
1da177e4 2442 /*
a122b341
ON
2443 * Setting state to TASK_STOPPED for a group
2444 * stop is always done with the siglock held,
2445 * so this check has no races.
1da177e4 2446 */
7dd3db54
TH
2447 if (!task_is_stopped(t) &&
2448 task_set_jobctl_pending(t, signr | gstop)) {
ae6d2ed7 2449 sig->group_stop_count++;
fb1d910c
TH
2450 if (likely(!(t->ptrace & PT_SEIZED)))
2451 signal_wake_up(t, 0);
2452 else
2453 ptrace_trap_notify(t);
a122b341 2454 }
d79fdd6d 2455 }
1da177e4 2456 }
73ddff2b 2457
d21142ec 2458 if (likely(!current->ptrace)) {
5224fa36 2459 int notify = 0;
1da177e4 2460
5224fa36
TH
2461 /*
2462 * If there are no other threads in the group, or if there
2463 * is a group stop in progress and we are the last to stop,
2464 * report to the parent.
2465 */
2466 if (task_participate_group_stop(current))
2467 notify = CLD_STOPPED;
2468
b5bf9a90 2469 set_special_state(TASK_STOPPED);
5224fa36
TH
2470 spin_unlock_irq(&current->sighand->siglock);
2471
62bcf9d9
TH
2472 /*
2473 * Notify the parent of the group stop completion. Because
2474 * we're not holding either the siglock or tasklist_lock
2475 * here, ptracer may attach inbetween; however, this is for
2476 * group stop and should always be delivered to the real
2477 * parent of the group leader. The new ptracer will get
2478 * its notification when this task transitions into
2479 * TASK_TRACED.
2480 */
5224fa36
TH
2481 if (notify) {
2482 read_lock(&tasklist_lock);
62bcf9d9 2483 do_notify_parent_cldstop(current, false, notify);
5224fa36
TH
2484 read_unlock(&tasklist_lock);
2485 }
2486
2487 /* Now we don't run again until woken by SIGCONT or SIGKILL */
76f969e8 2488 cgroup_enter_frozen();
5d8f72b5 2489 freezable_schedule();
73ddff2b 2490 return true;
d79fdd6d 2491 } else {
73ddff2b
TH
2492 /*
2493 * While ptraced, group stop is handled by STOP trap.
2494 * Schedule it and let the caller deal with it.
2495 */
2496 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2497 return false;
ae6d2ed7 2498 }
73ddff2b 2499}
1da177e4 2500
73ddff2b
TH
2501/**
2502 * do_jobctl_trap - take care of ptrace jobctl traps
2503 *
3544d72a
TH
2504 * When PT_SEIZED, it's used for both group stop and explicit
2505 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2506 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2507 * the stop signal; otherwise, %SIGTRAP.
2508 *
2509 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2510 * number as exit_code and no siginfo.
73ddff2b
TH
2511 *
2512 * CONTEXT:
2513 * Must be called with @current->sighand->siglock held, which may be
2514 * released and re-acquired before returning with intervening sleep.
2515 */
2516static void do_jobctl_trap(void)
2517{
3544d72a 2518 struct signal_struct *signal = current->signal;
73ddff2b 2519 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
ae6d2ed7 2520
3544d72a
TH
2521 if (current->ptrace & PT_SEIZED) {
2522 if (!signal->group_stop_count &&
2523 !(signal->flags & SIGNAL_STOP_STOPPED))
2524 signr = SIGTRAP;
2525 WARN_ON_ONCE(!signr);
2526 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2527 CLD_STOPPED);
2528 } else {
2529 WARN_ON_ONCE(!signr);
2530 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2531 current->exit_code = 0;
ae6d2ed7 2532 }
1da177e4
LT
2533}
2534
76f969e8
RG
2535/**
2536 * do_freezer_trap - handle the freezer jobctl trap
2537 *
2538 * Puts the task into frozen state, if only the task is not about to quit.
2539 * In this case it drops JOBCTL_TRAP_FREEZE.
2540 *
2541 * CONTEXT:
2542 * Must be called with @current->sighand->siglock held,
2543 * which is always released before returning.
2544 */
2545static void do_freezer_trap(void)
2546 __releases(&current->sighand->siglock)
2547{
2548 /*
2549 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2550 * let's make another loop to give it a chance to be handled.
2551 * In any case, we'll return back.
2552 */
2553 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2554 JOBCTL_TRAP_FREEZE) {
2555 spin_unlock_irq(&current->sighand->siglock);
2556 return;
2557 }
2558
2559 /*
2560 * Now we're sure that there is no pending fatal signal and no
2561 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2562 * immediately (if there is a non-fatal signal pending), and
2563 * put the task into sleep.
2564 */
2565 __set_current_state(TASK_INTERRUPTIBLE);
2566 clear_thread_flag(TIF_SIGPENDING);
2567 spin_unlock_irq(&current->sighand->siglock);
2568 cgroup_enter_frozen();
2569 freezable_schedule();
2570}
2571
ae7795bc 2572static int ptrace_signal(int signr, kernel_siginfo_t *info)
18c98b65 2573{
8a352418
ON
2574 /*
2575 * We do not check sig_kernel_stop(signr) but set this marker
2576 * unconditionally because we do not know whether debugger will
2577 * change signr. This flag has no meaning unless we are going
2578 * to stop after return from ptrace_stop(). In this case it will
2579 * be checked in do_signal_stop(), we should only stop if it was
2580 * not cleared by SIGCONT while we were sleeping. See also the
2581 * comment in dequeue_signal().
2582 */
2583 current->jobctl |= JOBCTL_STOP_DEQUEUED;
fe1bc6a0 2584 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
2585
2586 /* We're back. Did the debugger cancel the sig? */
2587 signr = current->exit_code;
2588 if (signr == 0)
2589 return signr;
2590
2591 current->exit_code = 0;
2592
5aba085e
RD
2593 /*
2594 * Update the siginfo structure if the signal has
2595 * changed. If the debugger wanted something
2596 * specific in the siginfo structure then it should
2597 * have updated *info via PTRACE_SETSIGINFO.
2598 */
18c98b65 2599 if (signr != info->si_signo) {
faf1f22b 2600 clear_siginfo(info);
18c98b65
RM
2601 info->si_signo = signr;
2602 info->si_errno = 0;
2603 info->si_code = SI_USER;
6b550f94 2604 rcu_read_lock();
18c98b65 2605 info->si_pid = task_pid_vnr(current->parent);
54ba47ed
EB
2606 info->si_uid = from_kuid_munged(current_user_ns(),
2607 task_uid(current->parent));
6b550f94 2608 rcu_read_unlock();
18c98b65
RM
2609 }
2610
2611 /* If the (new) signal is now blocked, requeue it. */
2612 if (sigismember(&current->blocked, signr)) {
b21c5bd5 2613 send_signal(signr, info, current, PIDTYPE_PID);
18c98b65
RM
2614 signr = 0;
2615 }
2616
2617 return signr;
2618}
2619
6ac05e83
PC
2620static void hide_si_addr_tag_bits(struct ksignal *ksig)
2621{
2622 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2623 case SIL_FAULT:
9abcabe3 2624 case SIL_FAULT_TRAPNO:
6ac05e83
PC
2625 case SIL_FAULT_MCEERR:
2626 case SIL_FAULT_BNDERR:
2627 case SIL_FAULT_PKUERR:
f4ac7302 2628 case SIL_FAULT_PERF_EVENT:
6ac05e83
PC
2629 ksig->info.si_addr = arch_untagged_si_addr(
2630 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2631 break;
2632 case SIL_KILL:
2633 case SIL_TIMER:
2634 case SIL_POLL:
2635 case SIL_CHLD:
2636 case SIL_RT:
2637 case SIL_SYS:
2638 break;
2639 }
2640}
2641
20ab7218 2642bool get_signal(struct ksignal *ksig)
1da177e4 2643{
f6b76d4f
ON
2644 struct sighand_struct *sighand = current->sighand;
2645 struct signal_struct *signal = current->signal;
2646 int signr;
1da177e4 2647
35d0b389
JA
2648 if (unlikely(current->task_works))
2649 task_work_run();
2650
12db8b69
JA
2651 /*
2652 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2653 * that the arch handlers don't all have to do it. If we get here
2654 * without TIF_SIGPENDING, just exit after running signal work.
2655 */
12db8b69
JA
2656 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2657 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2658 tracehook_notify_signal();
2659 if (!task_sigpending(current))
2660 return false;
2661 }
12db8b69 2662
0326f5a9 2663 if (unlikely(uprobe_deny_signal()))
20ab7218 2664 return false;
0326f5a9 2665
13b1c3d4 2666 /*
5d8f72b5
ON
2667 * Do this once, we can't return to user-mode if freezing() == T.
2668 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2669 * thus do not need another check after return.
13b1c3d4 2670 */
fc558a74
RW
2671 try_to_freeze();
2672
5d8f72b5 2673relock:
f6b76d4f 2674 spin_lock_irq(&sighand->siglock);
e91b4816 2675
021e1ae3
ON
2676 /*
2677 * Every stopped thread goes here after wakeup. Check to see if
2678 * we should notify the parent, prepare_signal(SIGCONT) encodes
2679 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2680 */
f6b76d4f 2681 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
2682 int why;
2683
2684 if (signal->flags & SIGNAL_CLD_CONTINUED)
2685 why = CLD_CONTINUED;
2686 else
2687 why = CLD_STOPPED;
2688
f6b76d4f 2689 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 2690
ae6d2ed7 2691 spin_unlock_irq(&sighand->siglock);
fa00b80b 2692
ceb6bd67
TH
2693 /*
2694 * Notify the parent that we're continuing. This event is
2695 * always per-process and doesn't make whole lot of sense
2696 * for ptracers, who shouldn't consume the state via
2697 * wait(2) either, but, for backward compatibility, notify
2698 * the ptracer of the group leader too unless it's gonna be
2699 * a duplicate.
2700 */
edf2ed15 2701 read_lock(&tasklist_lock);
ceb6bd67
TH
2702 do_notify_parent_cldstop(current, false, why);
2703
bb3696da
ON
2704 if (ptrace_reparented(current->group_leader))
2705 do_notify_parent_cldstop(current->group_leader,
2706 true, why);
edf2ed15 2707 read_unlock(&tasklist_lock);
ceb6bd67 2708
e4420551
ON
2709 goto relock;
2710 }
2711
35634ffa 2712 /* Has this task already been marked for death? */
cf43a757
EB
2713 if (signal_group_exit(signal)) {
2714 ksig->info.si_signo = signr = SIGKILL;
2715 sigdelset(&current->pending.signal, SIGKILL);
98af37d6
ZW
2716 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2717 &sighand->action[SIGKILL - 1]);
cf43a757 2718 recalc_sigpending();
35634ffa 2719 goto fatal;
cf43a757 2720 }
35634ffa 2721
1da177e4
LT
2722 for (;;) {
2723 struct k_sigaction *ka;
1be53963 2724
dd1d6772
TH
2725 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2726 do_signal_stop(0))
7bcf6a2c 2727 goto relock;
1be53963 2728
76f969e8
RG
2729 if (unlikely(current->jobctl &
2730 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2731 if (current->jobctl & JOBCTL_TRAP_MASK) {
2732 do_jobctl_trap();
2733 spin_unlock_irq(&sighand->siglock);
2734 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2735 do_freezer_trap();
2736
2737 goto relock;
2738 }
2739
2740 /*
2741 * If the task is leaving the frozen state, let's update
2742 * cgroup counters and reset the frozen bit.
2743 */
2744 if (unlikely(cgroup_task_frozen(current))) {
73ddff2b 2745 spin_unlock_irq(&sighand->siglock);
cb2c4cd8 2746 cgroup_leave_frozen(false);
73ddff2b
TH
2747 goto relock;
2748 }
1da177e4 2749
7146db33
EB
2750 /*
2751 * Signals generated by the execution of an instruction
2752 * need to be delivered before any other pending signals
2753 * so that the instruction pointer in the signal stack
2754 * frame points to the faulting instruction.
2755 */
2756 signr = dequeue_synchronous_signal(&ksig->info);
2757 if (!signr)
2758 signr = dequeue_signal(current, &current->blocked, &ksig->info);
7bcf6a2c 2759
dd1d6772
TH
2760 if (!signr)
2761 break; /* will return 0 */
7bcf6a2c 2762
8a352418 2763 if (unlikely(current->ptrace) && signr != SIGKILL) {
828b1f65 2764 signr = ptrace_signal(signr, &ksig->info);
dd1d6772
TH
2765 if (!signr)
2766 continue;
1da177e4
LT
2767 }
2768
dd1d6772
TH
2769 ka = &sighand->action[signr-1];
2770
f9d4257e 2771 /* Trace actually delivered signals. */
828b1f65 2772 trace_signal_deliver(signr, &ksig->info, ka);
f9d4257e 2773
1da177e4
LT
2774 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2775 continue;
2776 if (ka->sa.sa_handler != SIG_DFL) {
2777 /* Run the handler. */
828b1f65 2778 ksig->ka = *ka;
1da177e4
LT
2779
2780 if (ka->sa.sa_flags & SA_ONESHOT)
2781 ka->sa.sa_handler = SIG_DFL;
2782
2783 break; /* will return non-zero "signr" value */
2784 }
2785
2786 /*
2787 * Now we are doing the default action for this signal.
2788 */
2789 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2790 continue;
2791
84d73786 2792 /*
0fbc26a6 2793 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
2794 * Container-init gets no signals it doesn't want from same
2795 * container.
2796 *
2797 * Note that if global/container-init sees a sig_kernel_only()
2798 * signal here, the signal must have been generated internally
2799 * or must have come from an ancestor namespace. In either
2800 * case, the signal cannot be dropped.
84d73786 2801 */
fae5fa44 2802 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 2803 !sig_kernel_only(signr))
1da177e4
LT
2804 continue;
2805
2806 if (sig_kernel_stop(signr)) {
2807 /*
2808 * The default action is to stop all threads in
2809 * the thread group. The job control signals
2810 * do nothing in an orphaned pgrp, but SIGSTOP
2811 * always works. Note that siglock needs to be
2812 * dropped during the call to is_orphaned_pgrp()
2813 * because of lock ordering with tasklist_lock.
2814 * This allows an intervening SIGCONT to be posted.
2815 * We need to check for that and bail out if necessary.
2816 */
2817 if (signr != SIGSTOP) {
f6b76d4f 2818 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2819
2820 /* signals can be posted during this window */
2821
3e7cd6c4 2822 if (is_current_pgrp_orphaned())
1da177e4
LT
2823 goto relock;
2824
f6b76d4f 2825 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2826 }
2827
828b1f65 2828 if (likely(do_signal_stop(ksig->info.si_signo))) {
1da177e4
LT
2829 /* It released the siglock. */
2830 goto relock;
2831 }
2832
2833 /*
2834 * We didn't actually stop, due to a race
2835 * with SIGCONT or something like that.
2836 */
2837 continue;
2838 }
2839
35634ffa 2840 fatal:
f6b76d4f 2841 spin_unlock_irq(&sighand->siglock);
f2b31bb5
RG
2842 if (unlikely(cgroup_task_frozen(current)))
2843 cgroup_leave_frozen(true);
1da177e4
LT
2844
2845 /*
2846 * Anything else is fatal, maybe with a core dump.
2847 */
2848 current->flags |= PF_SIGNALED;
2dce81bf 2849
1da177e4 2850 if (sig_kernel_coredump(signr)) {
2dce81bf 2851 if (print_fatal_signals)
828b1f65 2852 print_fatal_signal(ksig->info.si_signo);
2b5faa4c 2853 proc_coredump_connector(current);
1da177e4
LT
2854 /*
2855 * If it was able to dump core, this kills all
2856 * other threads in the group and synchronizes with
2857 * their demise. If we lost the race with another
2858 * thread getting here, it set group_exit_code
2859 * first and our do_group_exit call below will use
2860 * that value and ignore the one we pass it.
2861 */
828b1f65 2862 do_coredump(&ksig->info);
1da177e4
LT
2863 }
2864
10442994
JA
2865 /*
2866 * PF_IO_WORKER threads will catch and exit on fatal signals
2867 * themselves. They have cleanup that must be performed, so
2868 * we cannot call do_exit() on their behalf.
2869 */
2870 if (current->flags & PF_IO_WORKER)
2871 goto out;
2872
1da177e4
LT
2873 /*
2874 * Death signals, no core dump.
2875 */
828b1f65 2876 do_group_exit(ksig->info.si_signo);
1da177e4
LT
2877 /* NOTREACHED */
2878 }
f6b76d4f 2879 spin_unlock_irq(&sighand->siglock);
10442994 2880out:
828b1f65 2881 ksig->sig = signr;
6ac05e83
PC
2882
2883 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2884 hide_si_addr_tag_bits(ksig);
2885
828b1f65 2886 return ksig->sig > 0;
1da177e4
LT
2887}
2888
5e6292c0 2889/**
efee984c 2890 * signal_delivered -
10b1c7ac 2891 * @ksig: kernel signal struct
efee984c 2892 * @stepping: nonzero if debugger single-step or block-step in use
5e6292c0 2893 *
e227867f 2894 * This function should be called when a signal has successfully been
10b1c7ac 2895 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
efee984c 2896 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
10b1c7ac 2897 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
5e6292c0 2898 */
10b1c7ac 2899static void signal_delivered(struct ksignal *ksig, int stepping)
5e6292c0
MF
2900{
2901 sigset_t blocked;
2902
a610d6e6
AV
2903 /* A signal was successfully delivered, and the
2904 saved sigmask was stored on the signal frame,
2905 and will be restored by sigreturn. So we can
2906 simply clear the restore sigmask flag. */
2907 clear_restore_sigmask();
2908
10b1c7ac
RW
2909 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2910 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2911 sigaddset(&blocked, ksig->sig);
5e6292c0 2912 set_current_blocked(&blocked);
97c885d5
AV
2913 if (current->sas_ss_flags & SS_AUTODISARM)
2914 sas_ss_reset(current);
df5601f9 2915 tracehook_signal_handler(stepping);
5e6292c0
MF
2916}
2917
2ce5da17
AV
2918void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2919{
2920 if (failed)
cb44c9a0 2921 force_sigsegv(ksig->sig);
2ce5da17 2922 else
10b1c7ac 2923 signal_delivered(ksig, stepping);
2ce5da17
AV
2924}
2925
0edceb7b
ON
2926/*
2927 * It could be that complete_signal() picked us to notify about the
fec9993d
ON
2928 * group-wide signal. Other threads should be notified now to take
2929 * the shared signals in @which since we will not.
0edceb7b 2930 */
f646e227 2931static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
0edceb7b 2932{
f646e227 2933 sigset_t retarget;
0edceb7b
ON
2934 struct task_struct *t;
2935
f646e227
ON
2936 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2937 if (sigisemptyset(&retarget))
2938 return;
2939
0edceb7b
ON
2940 t = tsk;
2941 while_each_thread(tsk, t) {
fec9993d
ON
2942 if (t->flags & PF_EXITING)
2943 continue;
2944
2945 if (!has_pending_signals(&retarget, &t->blocked))
2946 continue;
2947 /* Remove the signals this thread can handle. */
2948 sigandsets(&retarget, &retarget, &t->blocked);
2949
5c251e9d 2950 if (!task_sigpending(t))
fec9993d
ON
2951 signal_wake_up(t, 0);
2952
2953 if (sigisemptyset(&retarget))
2954 break;
0edceb7b
ON
2955 }
2956}
2957
d12619b5
ON
2958void exit_signals(struct task_struct *tsk)
2959{
2960 int group_stop = 0;
f646e227 2961 sigset_t unblocked;
d12619b5 2962
77e4ef99
TH
2963 /*
2964 * @tsk is about to have PF_EXITING set - lock out users which
2965 * expect stable threadgroup.
2966 */
780de9dd 2967 cgroup_threadgroup_change_begin(tsk);
77e4ef99 2968
5dee1707
ON
2969 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2970 tsk->flags |= PF_EXITING;
780de9dd 2971 cgroup_threadgroup_change_end(tsk);
5dee1707 2972 return;
d12619b5
ON
2973 }
2974
5dee1707 2975 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2976 /*
2977 * From now this task is not visible for group-wide signals,
2978 * see wants_signal(), do_signal_stop().
2979 */
2980 tsk->flags |= PF_EXITING;
77e4ef99 2981
780de9dd 2982 cgroup_threadgroup_change_end(tsk);
77e4ef99 2983
5c251e9d 2984 if (!task_sigpending(tsk))
5dee1707
ON
2985 goto out;
2986
f646e227
ON
2987 unblocked = tsk->blocked;
2988 signotset(&unblocked);
2989 retarget_shared_pending(tsk, &unblocked);
5dee1707 2990
a8f072c1 2991 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
e5c1902e 2992 task_participate_group_stop(tsk))
edf2ed15 2993 group_stop = CLD_STOPPED;
5dee1707 2994out:
d12619b5
ON
2995 spin_unlock_irq(&tsk->sighand->siglock);
2996
62bcf9d9
TH
2997 /*
2998 * If group stop has completed, deliver the notification. This
2999 * should always go to the real parent of the group leader.
3000 */
ae6d2ed7 3001 if (unlikely(group_stop)) {
d12619b5 3002 read_lock(&tasklist_lock);
62bcf9d9 3003 do_notify_parent_cldstop(tsk, false, group_stop);
d12619b5
ON
3004 read_unlock(&tasklist_lock);
3005 }
3006}
3007
1da177e4
LT
3008/*
3009 * System call entry points.
3010 */
3011
41c57892
RD
3012/**
3013 * sys_restart_syscall - restart a system call
3014 */
754fe8d2 3015SYSCALL_DEFINE0(restart_syscall)
1da177e4 3016{
f56141e3 3017 struct restart_block *restart = &current->restart_block;
1da177e4
LT
3018 return restart->fn(restart);
3019}
3020
3021long do_no_restart_syscall(struct restart_block *param)
3022{
3023 return -EINTR;
3024}
3025
b182801a
ON
3026static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3027{
5c251e9d 3028 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
b182801a
ON
3029 sigset_t newblocked;
3030 /* A set of now blocked but previously unblocked signals. */
702a5073 3031 sigandnsets(&newblocked, newset, &current->blocked);
b182801a
ON
3032 retarget_shared_pending(tsk, &newblocked);
3033 }
3034 tsk->blocked = *newset;
3035 recalc_sigpending();
3036}
3037
e6fa16ab
ON
3038/**
3039 * set_current_blocked - change current->blocked mask
3040 * @newset: new mask
3041 *
3042 * It is wrong to change ->blocked directly, this helper should be used
3043 * to ensure the process can't miss a shared signal we are going to block.
1da177e4 3044 */
77097ae5
AV
3045void set_current_blocked(sigset_t *newset)
3046{
77097ae5 3047 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
0c4a8423 3048 __set_current_blocked(newset);
77097ae5
AV
3049}
3050
3051void __set_current_blocked(const sigset_t *newset)
e6fa16ab
ON
3052{
3053 struct task_struct *tsk = current;
3054
c7be96af
WL
3055 /*
3056 * In case the signal mask hasn't changed, there is nothing we need
3057 * to do. The current->blocked shouldn't be modified by other task.
3058 */
3059 if (sigequalsets(&tsk->blocked, newset))
3060 return;
3061
e6fa16ab 3062 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3063 __set_task_blocked(tsk, newset);
e6fa16ab
ON
3064 spin_unlock_irq(&tsk->sighand->siglock);
3065}
1da177e4
LT
3066
3067/*
3068 * This is also useful for kernel threads that want to temporarily
3069 * (or permanently) block certain signals.
3070 *
3071 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3072 * interface happily blocks "unblockable" signals like SIGKILL
3073 * and friends.
3074 */
3075int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3076{
73ef4aeb
ON
3077 struct task_struct *tsk = current;
3078 sigset_t newset;
1da177e4 3079
73ef4aeb 3080 /* Lockless, only current can change ->blocked, never from irq */
a26fd335 3081 if (oldset)
73ef4aeb 3082 *oldset = tsk->blocked;
a26fd335 3083
1da177e4
LT
3084 switch (how) {
3085 case SIG_BLOCK:
73ef4aeb 3086 sigorsets(&newset, &tsk->blocked, set);
1da177e4
LT
3087 break;
3088 case SIG_UNBLOCK:
702a5073 3089 sigandnsets(&newset, &tsk->blocked, set);
1da177e4
LT
3090 break;
3091 case SIG_SETMASK:
73ef4aeb 3092 newset = *set;
1da177e4
LT
3093 break;
3094 default:
73ef4aeb 3095 return -EINVAL;
1da177e4 3096 }
a26fd335 3097
77097ae5 3098 __set_current_blocked(&newset);
73ef4aeb 3099 return 0;
1da177e4 3100}
fb50f5a4 3101EXPORT_SYMBOL(sigprocmask);
1da177e4 3102
ded653cc
DD
3103/*
3104 * The api helps set app-provided sigmasks.
3105 *
3106 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3107 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
b772434b
ON
3108 *
3109 * Note that it does set_restore_sigmask() in advance, so it must be always
3110 * paired with restore_saved_sigmask_unless() before return from syscall.
ded653cc 3111 */
b772434b 3112int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
ded653cc 3113{
b772434b 3114 sigset_t kmask;
ded653cc 3115
b772434b
ON
3116 if (!umask)
3117 return 0;
ded653cc
DD
3118 if (sigsetsize != sizeof(sigset_t))
3119 return -EINVAL;
b772434b 3120 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
ded653cc
DD
3121 return -EFAULT;
3122
b772434b
ON
3123 set_restore_sigmask();
3124 current->saved_sigmask = current->blocked;
3125 set_current_blocked(&kmask);
ded653cc
DD
3126
3127 return 0;
3128}
ded653cc
DD
3129
3130#ifdef CONFIG_COMPAT
b772434b 3131int set_compat_user_sigmask(const compat_sigset_t __user *umask,
ded653cc
DD
3132 size_t sigsetsize)
3133{
b772434b 3134 sigset_t kmask;
ded653cc 3135
b772434b
ON
3136 if (!umask)
3137 return 0;
ded653cc
DD
3138 if (sigsetsize != sizeof(compat_sigset_t))
3139 return -EINVAL;
b772434b 3140 if (get_compat_sigset(&kmask, umask))
ded653cc
DD
3141 return -EFAULT;
3142
b772434b
ON
3143 set_restore_sigmask();
3144 current->saved_sigmask = current->blocked;
3145 set_current_blocked(&kmask);
ded653cc
DD
3146
3147 return 0;
3148}
ded653cc
DD
3149#endif
3150
41c57892
RD
3151/**
3152 * sys_rt_sigprocmask - change the list of currently blocked signals
3153 * @how: whether to add, remove, or set signals
ada9c933 3154 * @nset: stores pending signals
41c57892
RD
3155 * @oset: previous value of signal mask if non-null
3156 * @sigsetsize: size of sigset_t type
3157 */
bb7efee2 3158SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
17da2bd9 3159 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4 3160{
1da177e4 3161 sigset_t old_set, new_set;
bb7efee2 3162 int error;
1da177e4
LT
3163
3164 /* XXX: Don't preclude handling different sized sigset_t's. */
3165 if (sigsetsize != sizeof(sigset_t))
bb7efee2 3166 return -EINVAL;
1da177e4 3167
bb7efee2
ON
3168 old_set = current->blocked;
3169
3170 if (nset) {
3171 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3172 return -EFAULT;
1da177e4
LT
3173 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3174
bb7efee2 3175 error = sigprocmask(how, &new_set, NULL);
1da177e4 3176 if (error)
bb7efee2
ON
3177 return error;
3178 }
1da177e4 3179
bb7efee2
ON
3180 if (oset) {
3181 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3182 return -EFAULT;
1da177e4 3183 }
bb7efee2
ON
3184
3185 return 0;
1da177e4
LT
3186}
3187
322a56cb 3188#ifdef CONFIG_COMPAT
322a56cb
AV
3189COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3190 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
1da177e4 3191{
322a56cb
AV
3192 sigset_t old_set = current->blocked;
3193
3194 /* XXX: Don't preclude handling different sized sigset_t's. */
3195 if (sigsetsize != sizeof(sigset_t))
3196 return -EINVAL;
3197
3198 if (nset) {
322a56cb
AV
3199 sigset_t new_set;
3200 int error;
3968cf62 3201 if (get_compat_sigset(&new_set, nset))
322a56cb 3202 return -EFAULT;
322a56cb
AV
3203 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3204
3205 error = sigprocmask(how, &new_set, NULL);
3206 if (error)
3207 return error;
3208 }
f454322e 3209 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
322a56cb
AV
3210}
3211#endif
1da177e4 3212
b1d294c8 3213static void do_sigpending(sigset_t *set)
1da177e4 3214{
1da177e4 3215 spin_lock_irq(&current->sighand->siglock);
fe9c1db2 3216 sigorsets(set, &current->pending.signal,
1da177e4
LT
3217 &current->signal->shared_pending.signal);
3218 spin_unlock_irq(&current->sighand->siglock);
3219
3220 /* Outside the lock because only this thread touches it. */
fe9c1db2 3221 sigandsets(set, &current->blocked, set);
5aba085e 3222}
1da177e4 3223
41c57892
RD
3224/**
3225 * sys_rt_sigpending - examine a pending signal that has been raised
3226 * while blocked
20f22ab4 3227 * @uset: stores pending signals
41c57892
RD
3228 * @sigsetsize: size of sigset_t type or larger
3229 */
fe9c1db2 3230SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
1da177e4 3231{
fe9c1db2 3232 sigset_t set;
176826af
DL
3233
3234 if (sigsetsize > sizeof(*uset))
3235 return -EINVAL;
3236
b1d294c8
CB
3237 do_sigpending(&set);
3238
3239 if (copy_to_user(uset, &set, sigsetsize))
3240 return -EFAULT;
3241
3242 return 0;
fe9c1db2
AV
3243}
3244
3245#ifdef CONFIG_COMPAT
fe9c1db2
AV
3246COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3247 compat_size_t, sigsetsize)
1da177e4 3248{
fe9c1db2 3249 sigset_t set;
176826af
DL
3250
3251 if (sigsetsize > sizeof(*uset))
3252 return -EINVAL;
3253
b1d294c8
CB
3254 do_sigpending(&set);
3255
3256 return put_compat_sigset(uset, &set, sigsetsize);
1da177e4 3257}
fe9c1db2 3258#endif
1da177e4 3259
4ce5f9c9
EB
3260static const struct {
3261 unsigned char limit, layout;
3262} sig_sicodes[] = {
3263 [SIGILL] = { NSIGILL, SIL_FAULT },
3264 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3265 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3266 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3267 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3268#if defined(SIGEMT)
3269 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3270#endif
3271 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3272 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3273 [SIGSYS] = { NSIGSYS, SIL_SYS },
3274};
3275
b2a2ab52 3276static bool known_siginfo_layout(unsigned sig, int si_code)
4ce5f9c9
EB
3277{
3278 if (si_code == SI_KERNEL)
3279 return true;
3280 else if ((si_code > SI_USER)) {
3281 if (sig_specific_sicodes(sig)) {
3282 if (si_code <= sig_sicodes[sig].limit)
3283 return true;
3284 }
3285 else if (si_code <= NSIGPOLL)
3286 return true;
3287 }
3288 else if (si_code >= SI_DETHREAD)
3289 return true;
3290 else if (si_code == SI_ASYNCNL)
3291 return true;
3292 return false;
3293}
3294
a3670058 3295enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
cc731525
EB
3296{
3297 enum siginfo_layout layout = SIL_KILL;
3298 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
4ce5f9c9
EB
3299 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3300 (si_code <= sig_sicodes[sig].limit)) {
3301 layout = sig_sicodes[sig].layout;
31931c93
EB
3302 /* Handle the exceptions */
3303 if ((sig == SIGBUS) &&
3304 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3305 layout = SIL_FAULT_MCEERR;
3306 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3307 layout = SIL_FAULT_BNDERR;
3308#ifdef SEGV_PKUERR
3309 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3310 layout = SIL_FAULT_PKUERR;
3311#endif
ed8e5080 3312 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
f4ac7302 3313 layout = SIL_FAULT_PERF_EVENT;
2c9f7eaf
EB
3314 else if (IS_ENABLED(CONFIG_SPARC) &&
3315 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3316 layout = SIL_FAULT_TRAPNO;
7de5f68d
EB
3317 else if (IS_ENABLED(CONFIG_ALPHA) &&
3318 ((sig == SIGFPE) ||
3319 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
9abcabe3 3320 layout = SIL_FAULT_TRAPNO;
31931c93 3321 }
cc731525
EB
3322 else if (si_code <= NSIGPOLL)
3323 layout = SIL_POLL;
3324 } else {
3325 if (si_code == SI_TIMER)
3326 layout = SIL_TIMER;
3327 else if (si_code == SI_SIGIO)
3328 layout = SIL_POLL;
3329 else if (si_code < 0)
3330 layout = SIL_RT;
cc731525
EB
3331 }
3332 return layout;
3333}
3334
4ce5f9c9
EB
3335static inline char __user *si_expansion(const siginfo_t __user *info)
3336{
3337 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3338}
3339
ae7795bc 3340int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
1da177e4 3341{
4ce5f9c9 3342 char __user *expansion = si_expansion(to);
ae7795bc 3343 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
1da177e4 3344 return -EFAULT;
4ce5f9c9 3345 if (clear_user(expansion, SI_EXPANSION_SIZE))
1da177e4 3346 return -EFAULT;
c999b933 3347 return 0;
1da177e4
LT
3348}
3349
601d5abf
EB
3350static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3351 const siginfo_t __user *from)
4cd2e0e7 3352{
601d5abf 3353 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
4ce5f9c9
EB
3354 char __user *expansion = si_expansion(from);
3355 char buf[SI_EXPANSION_SIZE];
3356 int i;
3357 /*
3358 * An unknown si_code might need more than
3359 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3360 * extra bytes are 0. This guarantees copy_siginfo_to_user
3361 * will return this data to userspace exactly.
3362 */
3363 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3364 return -EFAULT;
3365 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3366 if (buf[i] != 0)
3367 return -E2BIG;
3368 }
3369 }
4cd2e0e7
EB
3370 return 0;
3371}
3372
601d5abf
EB
3373static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3374 const siginfo_t __user *from)
3375{
3376 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3377 return -EFAULT;
3378 to->si_signo = signo;
3379 return post_copy_siginfo_from_user(to, from);
3380}
3381
3382int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3383{
3384 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3385 return -EFAULT;
3386 return post_copy_siginfo_from_user(to, from);
3387}
3388
212a36a1 3389#ifdef CONFIG_COMPAT
c3b3f524
CH
3390/**
3391 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3392 * @to: compat siginfo destination
3393 * @from: kernel siginfo source
3394 *
3395 * Note: This function does not work properly for the SIGCHLD on x32, but
3396 * fortunately it doesn't have to. The only valid callers for this function are
3397 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3398 * The latter does not care because SIGCHLD will never cause a coredump.
3399 */
3400void copy_siginfo_to_external32(struct compat_siginfo *to,
3401 const struct kernel_siginfo *from)
ea64d5ac 3402{
c3b3f524 3403 memset(to, 0, sizeof(*to));
ea64d5ac 3404
c3b3f524
CH
3405 to->si_signo = from->si_signo;
3406 to->si_errno = from->si_errno;
3407 to->si_code = from->si_code;
ea64d5ac
EB
3408 switch(siginfo_layout(from->si_signo, from->si_code)) {
3409 case SIL_KILL:
c3b3f524
CH
3410 to->si_pid = from->si_pid;
3411 to->si_uid = from->si_uid;
ea64d5ac
EB
3412 break;
3413 case SIL_TIMER:
c3b3f524
CH
3414 to->si_tid = from->si_tid;
3415 to->si_overrun = from->si_overrun;
3416 to->si_int = from->si_int;
ea64d5ac
EB
3417 break;
3418 case SIL_POLL:
c3b3f524
CH
3419 to->si_band = from->si_band;
3420 to->si_fd = from->si_fd;
ea64d5ac
EB
3421 break;
3422 case SIL_FAULT:
c3b3f524 3423 to->si_addr = ptr_to_compat(from->si_addr);
9abcabe3
EB
3424 break;
3425 case SIL_FAULT_TRAPNO:
3426 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3427 to->si_trapno = from->si_trapno;
31931c93
EB
3428 break;
3429 case SIL_FAULT_MCEERR:
c3b3f524 3430 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3431 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3432 break;
3433 case SIL_FAULT_BNDERR:
c3b3f524 3434 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524
CH
3435 to->si_lower = ptr_to_compat(from->si_lower);
3436 to->si_upper = ptr_to_compat(from->si_upper);
31931c93
EB
3437 break;
3438 case SIL_FAULT_PKUERR:
c3b3f524 3439 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3440 to->si_pkey = from->si_pkey;
ea64d5ac 3441 break;
f4ac7302 3442 case SIL_FAULT_PERF_EVENT:
fb6cc127 3443 to->si_addr = ptr_to_compat(from->si_addr);
0683b531
EB
3444 to->si_perf_data = from->si_perf_data;
3445 to->si_perf_type = from->si_perf_type;
fb6cc127 3446 break;
ea64d5ac 3447 case SIL_CHLD:
c3b3f524
CH
3448 to->si_pid = from->si_pid;
3449 to->si_uid = from->si_uid;
3450 to->si_status = from->si_status;
3451 to->si_utime = from->si_utime;
3452 to->si_stime = from->si_stime;
ea64d5ac
EB
3453 break;
3454 case SIL_RT:
c3b3f524
CH
3455 to->si_pid = from->si_pid;
3456 to->si_uid = from->si_uid;
3457 to->si_int = from->si_int;
ea64d5ac
EB
3458 break;
3459 case SIL_SYS:
c3b3f524
CH
3460 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3461 to->si_syscall = from->si_syscall;
3462 to->si_arch = from->si_arch;
ea64d5ac
EB
3463 break;
3464 }
c3b3f524 3465}
ea64d5ac 3466
c3b3f524
CH
3467int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3468 const struct kernel_siginfo *from)
3469{
3470 struct compat_siginfo new;
3471
3472 copy_siginfo_to_external32(&new, from);
ea64d5ac
EB
3473 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3474 return -EFAULT;
ea64d5ac
EB
3475 return 0;
3476}
3477
601d5abf
EB
3478static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3479 const struct compat_siginfo *from)
212a36a1 3480{
212a36a1 3481 clear_siginfo(to);
601d5abf
EB
3482 to->si_signo = from->si_signo;
3483 to->si_errno = from->si_errno;
3484 to->si_code = from->si_code;
3485 switch(siginfo_layout(from->si_signo, from->si_code)) {
212a36a1 3486 case SIL_KILL:
601d5abf
EB
3487 to->si_pid = from->si_pid;
3488 to->si_uid = from->si_uid;
212a36a1
EB
3489 break;
3490 case SIL_TIMER:
601d5abf
EB
3491 to->si_tid = from->si_tid;
3492 to->si_overrun = from->si_overrun;
3493 to->si_int = from->si_int;
212a36a1
EB
3494 break;
3495 case SIL_POLL:
601d5abf
EB
3496 to->si_band = from->si_band;
3497 to->si_fd = from->si_fd;
212a36a1
EB
3498 break;
3499 case SIL_FAULT:
601d5abf 3500 to->si_addr = compat_ptr(from->si_addr);
9abcabe3
EB
3501 break;
3502 case SIL_FAULT_TRAPNO:
3503 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3504 to->si_trapno = from->si_trapno;
31931c93
EB
3505 break;
3506 case SIL_FAULT_MCEERR:
601d5abf 3507 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3508 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3509 break;
3510 case SIL_FAULT_BNDERR:
601d5abf 3511 to->si_addr = compat_ptr(from->si_addr);
601d5abf
EB
3512 to->si_lower = compat_ptr(from->si_lower);
3513 to->si_upper = compat_ptr(from->si_upper);
31931c93
EB
3514 break;
3515 case SIL_FAULT_PKUERR:
601d5abf 3516 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3517 to->si_pkey = from->si_pkey;
212a36a1 3518 break;
f4ac7302 3519 case SIL_FAULT_PERF_EVENT:
fb6cc127 3520 to->si_addr = compat_ptr(from->si_addr);
0683b531
EB
3521 to->si_perf_data = from->si_perf_data;
3522 to->si_perf_type = from->si_perf_type;
fb6cc127 3523 break;
212a36a1 3524 case SIL_CHLD:
601d5abf
EB
3525 to->si_pid = from->si_pid;
3526 to->si_uid = from->si_uid;
3527 to->si_status = from->si_status;
212a36a1
EB
3528#ifdef CONFIG_X86_X32_ABI
3529 if (in_x32_syscall()) {
601d5abf
EB
3530 to->si_utime = from->_sifields._sigchld_x32._utime;
3531 to->si_stime = from->_sifields._sigchld_x32._stime;
212a36a1
EB
3532 } else
3533#endif
3534 {
601d5abf
EB
3535 to->si_utime = from->si_utime;
3536 to->si_stime = from->si_stime;
212a36a1
EB
3537 }
3538 break;
3539 case SIL_RT:
601d5abf
EB
3540 to->si_pid = from->si_pid;
3541 to->si_uid = from->si_uid;
3542 to->si_int = from->si_int;
212a36a1
EB
3543 break;
3544 case SIL_SYS:
601d5abf
EB
3545 to->si_call_addr = compat_ptr(from->si_call_addr);
3546 to->si_syscall = from->si_syscall;
3547 to->si_arch = from->si_arch;
212a36a1
EB
3548 break;
3549 }
3550 return 0;
3551}
601d5abf
EB
3552
3553static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3554 const struct compat_siginfo __user *ufrom)
3555{
3556 struct compat_siginfo from;
3557
3558 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3559 return -EFAULT;
3560
3561 from.si_signo = signo;
3562 return post_copy_siginfo_from_user32(to, &from);
3563}
3564
3565int copy_siginfo_from_user32(struct kernel_siginfo *to,
3566 const struct compat_siginfo __user *ufrom)
3567{
3568 struct compat_siginfo from;
3569
3570 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3571 return -EFAULT;
3572
3573 return post_copy_siginfo_from_user32(to, &from);
3574}
212a36a1
EB
3575#endif /* CONFIG_COMPAT */
3576
943df148
ON
3577/**
3578 * do_sigtimedwait - wait for queued signals specified in @which
3579 * @which: queued signals to wait for
3580 * @info: if non-null, the signal's siginfo is returned here
3581 * @ts: upper bound on process time suspension
3582 */
ae7795bc 3583static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
49c39f84 3584 const struct timespec64 *ts)
943df148 3585{
2456e855 3586 ktime_t *to = NULL, timeout = KTIME_MAX;
943df148 3587 struct task_struct *tsk = current;
943df148 3588 sigset_t mask = *which;
2b1ecc3d 3589 int sig, ret = 0;
943df148
ON
3590
3591 if (ts) {
49c39f84 3592 if (!timespec64_valid(ts))
943df148 3593 return -EINVAL;
49c39f84 3594 timeout = timespec64_to_ktime(*ts);
2b1ecc3d 3595 to = &timeout;
943df148
ON
3596 }
3597
3598 /*
3599 * Invert the set of allowed signals to get those we want to block.
3600 */
3601 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3602 signotset(&mask);
3603
3604 spin_lock_irq(&tsk->sighand->siglock);
3605 sig = dequeue_signal(tsk, &mask, info);
2456e855 3606 if (!sig && timeout) {
943df148
ON
3607 /*
3608 * None ready, temporarily unblock those we're interested
3609 * while we are sleeping in so that we'll be awakened when
b182801a
ON
3610 * they arrive. Unblocking is always fine, we can avoid
3611 * set_current_blocked().
943df148
ON
3612 */
3613 tsk->real_blocked = tsk->blocked;
3614 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3615 recalc_sigpending();
3616 spin_unlock_irq(&tsk->sighand->siglock);
3617
2b1ecc3d
TG
3618 __set_current_state(TASK_INTERRUPTIBLE);
3619 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3620 HRTIMER_MODE_REL);
943df148 3621 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3622 __set_task_blocked(tsk, &tsk->real_blocked);
6114041a 3623 sigemptyset(&tsk->real_blocked);
b182801a 3624 sig = dequeue_signal(tsk, &mask, info);
943df148
ON
3625 }
3626 spin_unlock_irq(&tsk->sighand->siglock);
3627
3628 if (sig)
3629 return sig;
2b1ecc3d 3630 return ret ? -EINTR : -EAGAIN;
943df148
ON
3631}
3632
41c57892
RD
3633/**
3634 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3635 * in @uthese
3636 * @uthese: queued signals to wait for
3637 * @uinfo: if non-null, the signal's siginfo is returned here
3638 * @uts: upper bound on process time suspension
3639 * @sigsetsize: size of sigset_t type
3640 */
17da2bd9 3641SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
49c39f84
AB
3642 siginfo_t __user *, uinfo,
3643 const struct __kernel_timespec __user *, uts,
17da2bd9 3644 size_t, sigsetsize)
1da177e4 3645{
1da177e4 3646 sigset_t these;
49c39f84 3647 struct timespec64 ts;
ae7795bc 3648 kernel_siginfo_t info;
943df148 3649 int ret;
1da177e4
LT
3650
3651 /* XXX: Don't preclude handling different sized sigset_t's. */
3652 if (sigsetsize != sizeof(sigset_t))
3653 return -EINVAL;
3654
3655 if (copy_from_user(&these, uthese, sizeof(these)))
3656 return -EFAULT;
5aba085e 3657
1da177e4 3658 if (uts) {
49c39f84 3659 if (get_timespec64(&ts, uts))
1da177e4 3660 return -EFAULT;
1da177e4
LT
3661 }
3662
943df148 3663 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
1da177e4 3664
943df148
ON
3665 if (ret > 0 && uinfo) {
3666 if (copy_siginfo_to_user(uinfo, &info))
3667 ret = -EFAULT;
1da177e4
LT
3668 }
3669
3670 return ret;
3671}
3672
df8522a3
AB
3673#ifdef CONFIG_COMPAT_32BIT_TIME
3674SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3675 siginfo_t __user *, uinfo,
3676 const struct old_timespec32 __user *, uts,
3677 size_t, sigsetsize)
3678{
3679 sigset_t these;
3680 struct timespec64 ts;
3681 kernel_siginfo_t info;
3682 int ret;
3683
3684 if (sigsetsize != sizeof(sigset_t))
3685 return -EINVAL;
3686
3687 if (copy_from_user(&these, uthese, sizeof(these)))
3688 return -EFAULT;
3689
3690 if (uts) {
3691 if (get_old_timespec32(&ts, uts))
3692 return -EFAULT;
3693 }
3694
3695 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3696
3697 if (ret > 0 && uinfo) {
3698 if (copy_siginfo_to_user(uinfo, &info))
3699 ret = -EFAULT;
3700 }
3701
3702 return ret;
3703}
3704#endif
3705
1b3c872c 3706#ifdef CONFIG_COMPAT
2367c4b5
AB
3707COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3708 struct compat_siginfo __user *, uinfo,
3709 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3710{
3711 sigset_t s;
3712 struct timespec64 t;
3713 kernel_siginfo_t info;
3714 long ret;
3715
3716 if (sigsetsize != sizeof(sigset_t))
3717 return -EINVAL;
3718
3719 if (get_compat_sigset(&s, uthese))
3720 return -EFAULT;
3721
3722 if (uts) {
3723 if (get_timespec64(&t, uts))
3724 return -EFAULT;
3725 }
3726
3727 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3728
3729 if (ret > 0 && uinfo) {
3730 if (copy_siginfo_to_user32(uinfo, &info))
3731 ret = -EFAULT;
3732 }
3733
3734 return ret;
3735}
3736
3737#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724 3738COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
1b3c872c 3739 struct compat_siginfo __user *, uinfo,
9afc5eee 3740 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
1b3c872c 3741{
1b3c872c 3742 sigset_t s;
49c39f84 3743 struct timespec64 t;
ae7795bc 3744 kernel_siginfo_t info;
1b3c872c
AV
3745 long ret;
3746
3747 if (sigsetsize != sizeof(sigset_t))
3748 return -EINVAL;
3749
3968cf62 3750 if (get_compat_sigset(&s, uthese))
1b3c872c 3751 return -EFAULT;
1b3c872c
AV
3752
3753 if (uts) {
49c39f84 3754 if (get_old_timespec32(&t, uts))
1b3c872c
AV
3755 return -EFAULT;
3756 }
3757
3758 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3759
3760 if (ret > 0 && uinfo) {
3761 if (copy_siginfo_to_user32(uinfo, &info))
3762 ret = -EFAULT;
3763 }
3764
3765 return ret;
3766}
3767#endif
2367c4b5 3768#endif
1b3c872c 3769
3eb39f47
CB
3770static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3771{
3772 clear_siginfo(info);
3773 info->si_signo = sig;
3774 info->si_errno = 0;
3775 info->si_code = SI_USER;
3776 info->si_pid = task_tgid_vnr(current);
3777 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3778}
3779
41c57892
RD
3780/**
3781 * sys_kill - send a signal to a process
3782 * @pid: the PID of the process
3783 * @sig: signal to be sent
3784 */
17da2bd9 3785SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4 3786{
ae7795bc 3787 struct kernel_siginfo info;
1da177e4 3788
3eb39f47 3789 prepare_kill_siginfo(sig, &info);
1da177e4
LT
3790
3791 return kill_something_info(sig, &info, pid);
3792}
3793
3eb39f47
CB
3794/*
3795 * Verify that the signaler and signalee either are in the same pid namespace
3796 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3797 * namespace.
3798 */
3799static bool access_pidfd_pidns(struct pid *pid)
3800{
3801 struct pid_namespace *active = task_active_pid_ns(current);
3802 struct pid_namespace *p = ns_of_pid(pid);
3803
3804 for (;;) {
3805 if (!p)
3806 return false;
3807 if (p == active)
3808 break;
3809 p = p->parent;
3810 }
3811
3812 return true;
3813}
3814
adc5d875
JH
3815static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3816 siginfo_t __user *info)
3eb39f47
CB
3817{
3818#ifdef CONFIG_COMPAT
3819 /*
3820 * Avoid hooking up compat syscalls and instead handle necessary
3821 * conversions here. Note, this is a stop-gap measure and should not be
3822 * considered a generic solution.
3823 */
3824 if (in_compat_syscall())
3825 return copy_siginfo_from_user32(
3826 kinfo, (struct compat_siginfo __user *)info);
3827#endif
3828 return copy_siginfo_from_user(kinfo, info);
3829}
3830
2151ad1b
CB
3831static struct pid *pidfd_to_pid(const struct file *file)
3832{
3695eae5
CB
3833 struct pid *pid;
3834
3835 pid = pidfd_pid(file);
3836 if (!IS_ERR(pid))
3837 return pid;
2151ad1b
CB
3838
3839 return tgid_pidfd_to_pid(file);
3840}
3841
3eb39f47 3842/**
c732327f
CB
3843 * sys_pidfd_send_signal - Signal a process through a pidfd
3844 * @pidfd: file descriptor of the process
3845 * @sig: signal to send
3846 * @info: signal info
3847 * @flags: future flags
3eb39f47
CB
3848 *
3849 * The syscall currently only signals via PIDTYPE_PID which covers
3850 * kill(<positive-pid>, <signal>. It does not signal threads or process
3851 * groups.
3852 * In order to extend the syscall to threads and process groups the @flags
3853 * argument should be used. In essence, the @flags argument will determine
3854 * what is signaled and not the file descriptor itself. Put in other words,
3855 * grouping is a property of the flags argument not a property of the file
3856 * descriptor.
3857 *
3858 * Return: 0 on success, negative errno on failure
3859 */
3860SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3861 siginfo_t __user *, info, unsigned int, flags)
3862{
3863 int ret;
3864 struct fd f;
3865 struct pid *pid;
3866 kernel_siginfo_t kinfo;
3867
3868 /* Enforce flags be set to 0 until we add an extension. */
3869 if (flags)
3870 return -EINVAL;
3871
738a7832 3872 f = fdget(pidfd);
3eb39f47
CB
3873 if (!f.file)
3874 return -EBADF;
3875
3876 /* Is this a pidfd? */
2151ad1b 3877 pid = pidfd_to_pid(f.file);
3eb39f47
CB
3878 if (IS_ERR(pid)) {
3879 ret = PTR_ERR(pid);
3880 goto err;
3881 }
3882
3883 ret = -EINVAL;
3884 if (!access_pidfd_pidns(pid))
3885 goto err;
3886
3887 if (info) {
3888 ret = copy_siginfo_from_user_any(&kinfo, info);
3889 if (unlikely(ret))
3890 goto err;
3891
3892 ret = -EINVAL;
3893 if (unlikely(sig != kinfo.si_signo))
3894 goto err;
3895
556a888a
JH
3896 /* Only allow sending arbitrary signals to yourself. */
3897 ret = -EPERM;
3eb39f47 3898 if ((task_pid(current) != pid) &&
556a888a
JH
3899 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3900 goto err;
3eb39f47
CB
3901 } else {
3902 prepare_kill_siginfo(sig, &kinfo);
3903 }
3904
3905 ret = kill_pid_info(sig, &kinfo, pid);
3906
3907err:
3908 fdput(f);
3909 return ret;
3910}
3eb39f47 3911
30b4ae8a 3912static int
ae7795bc 3913do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
1da177e4 3914{
1da177e4 3915 struct task_struct *p;
30b4ae8a 3916 int error = -ESRCH;
1da177e4 3917
3547ff3a 3918 rcu_read_lock();
228ebcbe 3919 p = find_task_by_vpid(pid);
b488893a 3920 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 3921 error = check_kill_permission(sig, info, p);
1da177e4
LT
3922 /*
3923 * The null signal is a permissions and process existence
3924 * probe. No signal is actually delivered.
3925 */
4a30debf 3926 if (!error && sig) {
40b3b025 3927 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4a30debf
ON
3928 /*
3929 * If lock_task_sighand() failed we pretend the task
3930 * dies after receiving the signal. The window is tiny,
3931 * and the signal is private anyway.
3932 */
3933 if (unlikely(error == -ESRCH))
3934 error = 0;
1da177e4
LT
3935 }
3936 }
3547ff3a 3937 rcu_read_unlock();
6dd69f10 3938
1da177e4
LT
3939 return error;
3940}
3941
30b4ae8a
TG
3942static int do_tkill(pid_t tgid, pid_t pid, int sig)
3943{
ae7795bc 3944 struct kernel_siginfo info;
30b4ae8a 3945
5f74972c 3946 clear_siginfo(&info);
30b4ae8a
TG
3947 info.si_signo = sig;
3948 info.si_errno = 0;
3949 info.si_code = SI_TKILL;
3950 info.si_pid = task_tgid_vnr(current);
078de5f7 3951 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
30b4ae8a
TG
3952
3953 return do_send_specific(tgid, pid, sig, &info);
3954}
3955
6dd69f10
VL
3956/**
3957 * sys_tgkill - send signal to one specific thread
3958 * @tgid: the thread group ID of the thread
3959 * @pid: the PID of the thread
3960 * @sig: signal to be sent
3961 *
72fd4a35 3962 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
3963 * exists but it's not belonging to the target process anymore. This
3964 * method solves the problem of threads exiting and PIDs getting reused.
3965 */
a5f8fa9e 3966SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
3967{
3968 /* This is only valid for single tasks */
3969 if (pid <= 0 || tgid <= 0)
3970 return -EINVAL;
3971
3972 return do_tkill(tgid, pid, sig);
3973}
3974
41c57892
RD
3975/**
3976 * sys_tkill - send signal to one specific task
3977 * @pid: the PID of the task
3978 * @sig: signal to be sent
3979 *
1da177e4
LT
3980 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3981 */
a5f8fa9e 3982SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 3983{
1da177e4
LT
3984 /* This is only valid for single tasks */
3985 if (pid <= 0)
3986 return -EINVAL;
3987
6dd69f10 3988 return do_tkill(0, pid, sig);
1da177e4
LT
3989}
3990
ae7795bc 3991static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
75907d4d
AV
3992{
3993 /* Not even root can pretend to send signals from the kernel.
3994 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3995 */
66dd34ad 3996 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
69828dce 3997 (task_pid_vnr(current) != pid))
75907d4d 3998 return -EPERM;
69828dce 3999
75907d4d
AV
4000 /* POSIX.1b doesn't mention process groups. */
4001 return kill_proc_info(sig, info, pid);
4002}
4003
41c57892
RD
4004/**
4005 * sys_rt_sigqueueinfo - send signal information to a signal
4006 * @pid: the PID of the thread
4007 * @sig: signal to be sent
4008 * @uinfo: signal info to be sent
4009 */
a5f8fa9e
HC
4010SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4011 siginfo_t __user *, uinfo)
1da177e4 4012{
ae7795bc 4013 kernel_siginfo_t info;
601d5abf 4014 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
4015 if (unlikely(ret))
4016 return ret;
75907d4d
AV
4017 return do_rt_sigqueueinfo(pid, sig, &info);
4018}
1da177e4 4019
75907d4d 4020#ifdef CONFIG_COMPAT
75907d4d
AV
4021COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4022 compat_pid_t, pid,
4023 int, sig,
4024 struct compat_siginfo __user *, uinfo)
4025{
ae7795bc 4026 kernel_siginfo_t info;
601d5abf 4027 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
75907d4d
AV
4028 if (unlikely(ret))
4029 return ret;
4030 return do_rt_sigqueueinfo(pid, sig, &info);
1da177e4 4031}
75907d4d 4032#endif
1da177e4 4033
ae7795bc 4034static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
62ab4505
TG
4035{
4036 /* This is only valid for single tasks */
4037 if (pid <= 0 || tgid <= 0)
4038 return -EINVAL;
4039
4040 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
4041 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4042 */
69828dce
VD
4043 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4044 (task_pid_vnr(current) != pid))
62ab4505 4045 return -EPERM;
69828dce 4046
62ab4505
TG
4047 return do_send_specific(tgid, pid, sig, info);
4048}
4049
4050SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4051 siginfo_t __user *, uinfo)
4052{
ae7795bc 4053 kernel_siginfo_t info;
601d5abf 4054 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
4055 if (unlikely(ret))
4056 return ret;
62ab4505
TG
4057 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4058}
4059
9aae8fc0
AV
4060#ifdef CONFIG_COMPAT
4061COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4062 compat_pid_t, tgid,
4063 compat_pid_t, pid,
4064 int, sig,
4065 struct compat_siginfo __user *, uinfo)
4066{
ae7795bc 4067 kernel_siginfo_t info;
601d5abf 4068 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4cd2e0e7
EB
4069 if (unlikely(ret))
4070 return ret;
9aae8fc0
AV
4071 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4072}
4073#endif
4074
0341729b 4075/*
b4e74264 4076 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
0341729b 4077 */
b4e74264 4078void kernel_sigaction(int sig, __sighandler_t action)
0341729b 4079{
ec5955b8 4080 spin_lock_irq(&current->sighand->siglock);
b4e74264
ON
4081 current->sighand->action[sig - 1].sa.sa_handler = action;
4082 if (action == SIG_IGN) {
4083 sigset_t mask;
0341729b 4084
b4e74264
ON
4085 sigemptyset(&mask);
4086 sigaddset(&mask, sig);
580d34e4 4087
b4e74264
ON
4088 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4089 flush_sigqueue_mask(&mask, &current->pending);
4090 recalc_sigpending();
4091 }
0341729b
ON
4092 spin_unlock_irq(&current->sighand->siglock);
4093}
b4e74264 4094EXPORT_SYMBOL(kernel_sigaction);
0341729b 4095
68463510
DS
4096void __weak sigaction_compat_abi(struct k_sigaction *act,
4097 struct k_sigaction *oact)
4098{
4099}
4100
88531f72 4101int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 4102{
afe2b038 4103 struct task_struct *p = current, *t;
1da177e4 4104 struct k_sigaction *k;
71fabd5e 4105 sigset_t mask;
1da177e4 4106
7ed20e1a 4107 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
4108 return -EINVAL;
4109
afe2b038 4110 k = &p->sighand->action[sig-1];
1da177e4 4111
afe2b038 4112 spin_lock_irq(&p->sighand->siglock);
1da177e4
LT
4113 if (oact)
4114 *oact = *k;
4115
a54f0dfd
PC
4116 /*
4117 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4118 * e.g. by having an architecture use the bit in their uapi.
4119 */
4120 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4121
23acdc76
PC
4122 /*
4123 * Clear unknown flag bits in order to allow userspace to detect missing
4124 * support for flag bits and to allow the kernel to use non-uapi bits
4125 * internally.
4126 */
4127 if (act)
4128 act->sa.sa_flags &= UAPI_SA_FLAGS;
4129 if (oact)
4130 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4131
68463510
DS
4132 sigaction_compat_abi(act, oact);
4133
1da177e4 4134 if (act) {
9ac95f2f
ON
4135 sigdelsetmask(&act->sa.sa_mask,
4136 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 4137 *k = *act;
1da177e4
LT
4138 /*
4139 * POSIX 3.3.1.3:
4140 * "Setting a signal action to SIG_IGN for a signal that is
4141 * pending shall cause the pending signal to be discarded,
4142 * whether or not it is blocked."
4143 *
4144 * "Setting a signal action to SIG_DFL for a signal that is
4145 * pending and whose default action is to ignore the signal
4146 * (for example, SIGCHLD), shall cause the pending signal to
4147 * be discarded, whether or not it is blocked"
4148 */
afe2b038 4149 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
71fabd5e
GA
4150 sigemptyset(&mask);
4151 sigaddset(&mask, sig);
afe2b038
ON
4152 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4153 for_each_thread(p, t)
c09c1441 4154 flush_sigqueue_mask(&mask, &t->pending);
1da177e4 4155 }
1da177e4
LT
4156 }
4157
afe2b038 4158 spin_unlock_irq(&p->sighand->siglock);
1da177e4
LT
4159 return 0;
4160}
4161
c09c1441 4162static int
22839869
WD
4163do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4164 size_t min_ss_size)
1da177e4 4165{
bcfe8ad8 4166 struct task_struct *t = current;
1da177e4 4167
bcfe8ad8
AV
4168 if (oss) {
4169 memset(oss, 0, sizeof(stack_t));
4170 oss->ss_sp = (void __user *) t->sas_ss_sp;
4171 oss->ss_size = t->sas_ss_size;
4172 oss->ss_flags = sas_ss_flags(sp) |
4173 (current->sas_ss_flags & SS_FLAG_BITS);
4174 }
1da177e4 4175
bcfe8ad8
AV
4176 if (ss) {
4177 void __user *ss_sp = ss->ss_sp;
4178 size_t ss_size = ss->ss_size;
4179 unsigned ss_flags = ss->ss_flags;
407bc16a 4180 int ss_mode;
1da177e4 4181
bcfe8ad8
AV
4182 if (unlikely(on_sig_stack(sp)))
4183 return -EPERM;
1da177e4 4184
407bc16a 4185 ss_mode = ss_flags & ~SS_FLAG_BITS;
bcfe8ad8
AV
4186 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4187 ss_mode != 0))
4188 return -EINVAL;
1da177e4 4189
407bc16a 4190 if (ss_mode == SS_DISABLE) {
1da177e4
LT
4191 ss_size = 0;
4192 ss_sp = NULL;
4193 } else {
22839869 4194 if (unlikely(ss_size < min_ss_size))
bcfe8ad8 4195 return -ENOMEM;
1da177e4
LT
4196 }
4197
bcfe8ad8
AV
4198 t->sas_ss_sp = (unsigned long) ss_sp;
4199 t->sas_ss_size = ss_size;
4200 t->sas_ss_flags = ss_flags;
1da177e4 4201 }
bcfe8ad8 4202 return 0;
1da177e4 4203}
bcfe8ad8 4204
6bf9adfc
AV
4205SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4206{
bcfe8ad8
AV
4207 stack_t new, old;
4208 int err;
4209 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4210 return -EFAULT;
4211 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
22839869
WD
4212 current_user_stack_pointer(),
4213 MINSIGSTKSZ);
bcfe8ad8
AV
4214 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4215 err = -EFAULT;
4216 return err;
6bf9adfc 4217}
1da177e4 4218
5c49574f
AV
4219int restore_altstack(const stack_t __user *uss)
4220{
bcfe8ad8
AV
4221 stack_t new;
4222 if (copy_from_user(&new, uss, sizeof(stack_t)))
4223 return -EFAULT;
22839869
WD
4224 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4225 MINSIGSTKSZ);
5c49574f 4226 /* squash all but EFAULT for now */
bcfe8ad8 4227 return 0;
5c49574f
AV
4228}
4229
c40702c4
AV
4230int __save_altstack(stack_t __user *uss, unsigned long sp)
4231{
4232 struct task_struct *t = current;
2a742138
SS
4233 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4234 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4235 __put_user(t->sas_ss_size, &uss->ss_size);
97c885d5 4236 return err;
c40702c4
AV
4237}
4238
90268439 4239#ifdef CONFIG_COMPAT
6203deb0
DB
4240static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4241 compat_stack_t __user *uoss_ptr)
90268439
AV
4242{
4243 stack_t uss, uoss;
4244 int ret;
90268439
AV
4245
4246 if (uss_ptr) {
4247 compat_stack_t uss32;
90268439
AV
4248 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4249 return -EFAULT;
4250 uss.ss_sp = compat_ptr(uss32.ss_sp);
4251 uss.ss_flags = uss32.ss_flags;
4252 uss.ss_size = uss32.ss_size;
4253 }
bcfe8ad8 4254 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
22839869
WD
4255 compat_user_stack_pointer(),
4256 COMPAT_MINSIGSTKSZ);
90268439 4257 if (ret >= 0 && uoss_ptr) {
bcfe8ad8
AV
4258 compat_stack_t old;
4259 memset(&old, 0, sizeof(old));
4260 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4261 old.ss_flags = uoss.ss_flags;
4262 old.ss_size = uoss.ss_size;
4263 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
90268439
AV
4264 ret = -EFAULT;
4265 }
4266 return ret;
4267}
4268
6203deb0
DB
4269COMPAT_SYSCALL_DEFINE2(sigaltstack,
4270 const compat_stack_t __user *, uss_ptr,
4271 compat_stack_t __user *, uoss_ptr)
4272{
4273 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4274}
4275
90268439
AV
4276int compat_restore_altstack(const compat_stack_t __user *uss)
4277{
6203deb0 4278 int err = do_compat_sigaltstack(uss, NULL);
90268439
AV
4279 /* squash all but -EFAULT for now */
4280 return err == -EFAULT ? err : 0;
4281}
c40702c4
AV
4282
4283int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4284{
441398d3 4285 int err;
c40702c4 4286 struct task_struct *t = current;
441398d3
SS
4287 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4288 &uss->ss_sp) |
4289 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4290 __put_user(t->sas_ss_size, &uss->ss_size);
97c885d5 4291 return err;
c40702c4 4292}
90268439 4293#endif
1da177e4
LT
4294
4295#ifdef __ARCH_WANT_SYS_SIGPENDING
4296
41c57892
RD
4297/**
4298 * sys_sigpending - examine pending signals
d53238cd 4299 * @uset: where mask of pending signal is returned
41c57892 4300 */
d53238cd 4301SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
1da177e4 4302{
d53238cd 4303 sigset_t set;
d53238cd
DB
4304
4305 if (sizeof(old_sigset_t) > sizeof(*uset))
4306 return -EINVAL;
4307
b1d294c8
CB
4308 do_sigpending(&set);
4309
4310 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4311 return -EFAULT;
4312
4313 return 0;
1da177e4
LT
4314}
4315
8f13621a
AV
4316#ifdef CONFIG_COMPAT
4317COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4318{
4319 sigset_t set;
b1d294c8
CB
4320
4321 do_sigpending(&set);
4322
4323 return put_user(set.sig[0], set32);
8f13621a
AV
4324}
4325#endif
4326
1da177e4
LT
4327#endif
4328
4329#ifdef __ARCH_WANT_SYS_SIGPROCMASK
41c57892
RD
4330/**
4331 * sys_sigprocmask - examine and change blocked signals
4332 * @how: whether to add, remove, or set signals
b013c399 4333 * @nset: signals to add or remove (if non-null)
41c57892
RD
4334 * @oset: previous value of signal mask if non-null
4335 *
5aba085e
RD
4336 * Some platforms have their own version with special arguments;
4337 * others support only sys_rt_sigprocmask.
4338 */
1da177e4 4339
b013c399 4340SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
b290ebe2 4341 old_sigset_t __user *, oset)
1da177e4 4342{
1da177e4 4343 old_sigset_t old_set, new_set;
2e4f7c77 4344 sigset_t new_blocked;
1da177e4 4345
b013c399 4346 old_set = current->blocked.sig[0];
1da177e4 4347
b013c399
ON
4348 if (nset) {
4349 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4350 return -EFAULT;
1da177e4 4351
2e4f7c77 4352 new_blocked = current->blocked;
1da177e4 4353
1da177e4 4354 switch (how) {
1da177e4 4355 case SIG_BLOCK:
2e4f7c77 4356 sigaddsetmask(&new_blocked, new_set);
1da177e4
LT
4357 break;
4358 case SIG_UNBLOCK:
2e4f7c77 4359 sigdelsetmask(&new_blocked, new_set);
1da177e4
LT
4360 break;
4361 case SIG_SETMASK:
2e4f7c77 4362 new_blocked.sig[0] = new_set;
1da177e4 4363 break;
2e4f7c77
ON
4364 default:
4365 return -EINVAL;
1da177e4
LT
4366 }
4367
0c4a8423 4368 set_current_blocked(&new_blocked);
b013c399
ON
4369 }
4370
4371 if (oset) {
1da177e4 4372 if (copy_to_user(oset, &old_set, sizeof(*oset)))
b013c399 4373 return -EFAULT;
1da177e4 4374 }
b013c399
ON
4375
4376 return 0;
1da177e4
LT
4377}
4378#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4379
eaca6eae 4380#ifndef CONFIG_ODD_RT_SIGACTION
41c57892
RD
4381/**
4382 * sys_rt_sigaction - alter an action taken by a process
4383 * @sig: signal to be sent
f9fa0bc1
RD
4384 * @act: new sigaction
4385 * @oact: used to save the previous sigaction
41c57892
RD
4386 * @sigsetsize: size of sigset_t type
4387 */
d4e82042
HC
4388SYSCALL_DEFINE4(rt_sigaction, int, sig,
4389 const struct sigaction __user *, act,
4390 struct sigaction __user *, oact,
4391 size_t, sigsetsize)
1da177e4
LT
4392{
4393 struct k_sigaction new_sa, old_sa;
d8f993b3 4394 int ret;
1da177e4
LT
4395
4396 /* XXX: Don't preclude handling different sized sigset_t's. */
4397 if (sigsetsize != sizeof(sigset_t))
d8f993b3 4398 return -EINVAL;
1da177e4 4399
d8f993b3
CB
4400 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4401 return -EFAULT;
1da177e4
LT
4402
4403 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
d8f993b3
CB
4404 if (ret)
4405 return ret;
1da177e4 4406
d8f993b3
CB
4407 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4408 return -EFAULT;
4409
4410 return 0;
1da177e4 4411}
08d32fe5 4412#ifdef CONFIG_COMPAT
08d32fe5
AV
4413COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4414 const struct compat_sigaction __user *, act,
4415 struct compat_sigaction __user *, oact,
4416 compat_size_t, sigsetsize)
4417{
4418 struct k_sigaction new_ka, old_ka;
08d32fe5
AV
4419#ifdef __ARCH_HAS_SA_RESTORER
4420 compat_uptr_t restorer;
4421#endif
4422 int ret;
4423
4424 /* XXX: Don't preclude handling different sized sigset_t's. */
4425 if (sigsetsize != sizeof(compat_sigset_t))
4426 return -EINVAL;
4427
4428 if (act) {
4429 compat_uptr_t handler;
4430 ret = get_user(handler, &act->sa_handler);
4431 new_ka.sa.sa_handler = compat_ptr(handler);
4432#ifdef __ARCH_HAS_SA_RESTORER
4433 ret |= get_user(restorer, &act->sa_restorer);
4434 new_ka.sa.sa_restorer = compat_ptr(restorer);
4435#endif
3968cf62 4436 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3ddc5b46 4437 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
08d32fe5
AV
4438 if (ret)
4439 return -EFAULT;
08d32fe5
AV
4440 }
4441
4442 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4443 if (!ret && oact) {
08d32fe5
AV
4444 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4445 &oact->sa_handler);
f454322e
DL
4446 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4447 sizeof(oact->sa_mask));
3ddc5b46 4448 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
08d32fe5
AV
4449#ifdef __ARCH_HAS_SA_RESTORER
4450 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4451 &oact->sa_restorer);
4452#endif
4453 }
4454 return ret;
4455}
4456#endif
eaca6eae 4457#endif /* !CONFIG_ODD_RT_SIGACTION */
1da177e4 4458
495dfbf7
AV
4459#ifdef CONFIG_OLD_SIGACTION
4460SYSCALL_DEFINE3(sigaction, int, sig,
4461 const struct old_sigaction __user *, act,
4462 struct old_sigaction __user *, oact)
4463{
4464 struct k_sigaction new_ka, old_ka;
4465 int ret;
4466
4467 if (act) {
4468 old_sigset_t mask;
96d4f267 4469 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4470 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4471 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4472 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4473 __get_user(mask, &act->sa_mask))
4474 return -EFAULT;
4475#ifdef __ARCH_HAS_KA_RESTORER
4476 new_ka.ka_restorer = NULL;
4477#endif
4478 siginitset(&new_ka.sa.sa_mask, mask);
4479 }
4480
4481 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4482
4483 if (!ret && oact) {
96d4f267 4484 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4485 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4486 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4487 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4488 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4489 return -EFAULT;
4490 }
4491
4492 return ret;
4493}
4494#endif
4495#ifdef CONFIG_COMPAT_OLD_SIGACTION
4496COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4497 const struct compat_old_sigaction __user *, act,
4498 struct compat_old_sigaction __user *, oact)
4499{
4500 struct k_sigaction new_ka, old_ka;
4501 int ret;
4502 compat_old_sigset_t mask;
4503 compat_uptr_t handler, restorer;
4504
4505 if (act) {
96d4f267 4506 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4507 __get_user(handler, &act->sa_handler) ||
4508 __get_user(restorer, &act->sa_restorer) ||
4509 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4510 __get_user(mask, &act->sa_mask))
4511 return -EFAULT;
4512
4513#ifdef __ARCH_HAS_KA_RESTORER
4514 new_ka.ka_restorer = NULL;
4515#endif
4516 new_ka.sa.sa_handler = compat_ptr(handler);
4517 new_ka.sa.sa_restorer = compat_ptr(restorer);
4518 siginitset(&new_ka.sa.sa_mask, mask);
4519 }
4520
4521 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4522
4523 if (!ret && oact) {
96d4f267 4524 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4525 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4526 &oact->sa_handler) ||
4527 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4528 &oact->sa_restorer) ||
4529 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4530 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4531 return -EFAULT;
4532 }
4533 return ret;
4534}
4535#endif
1da177e4 4536
f6187769 4537#ifdef CONFIG_SGETMASK_SYSCALL
1da177e4
LT
4538
4539/*
4540 * For backwards compatibility. Functionality superseded by sigprocmask.
4541 */
a5f8fa9e 4542SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
4543{
4544 /* SMP safe */
4545 return current->blocked.sig[0];
4546}
4547
a5f8fa9e 4548SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4 4549{
c1095c6d
ON
4550 int old = current->blocked.sig[0];
4551 sigset_t newset;
1da177e4 4552
5ba53ff6 4553 siginitset(&newset, newmask);
c1095c6d 4554 set_current_blocked(&newset);
1da177e4
LT
4555
4556 return old;
4557}
f6187769 4558#endif /* CONFIG_SGETMASK_SYSCALL */
1da177e4
LT
4559
4560#ifdef __ARCH_WANT_SYS_SIGNAL
4561/*
4562 * For backwards compatibility. Functionality superseded by sigaction.
4563 */
a5f8fa9e 4564SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
4565{
4566 struct k_sigaction new_sa, old_sa;
4567 int ret;
4568
4569 new_sa.sa.sa_handler = handler;
4570 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 4571 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
4572
4573 ret = do_sigaction(sig, &new_sa, &old_sa);
4574
4575 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4576}
4577#endif /* __ARCH_WANT_SYS_SIGNAL */
4578
4579#ifdef __ARCH_WANT_SYS_PAUSE
4580
a5f8fa9e 4581SYSCALL_DEFINE0(pause)
1da177e4 4582{
d92fcf05 4583 while (!signal_pending(current)) {
1df01355 4584 __set_current_state(TASK_INTERRUPTIBLE);
d92fcf05
ON
4585 schedule();
4586 }
1da177e4
LT
4587 return -ERESTARTNOHAND;
4588}
4589
4590#endif
4591
9d8a7652 4592static int sigsuspend(sigset_t *set)
68f3f16d 4593{
68f3f16d
AV
4594 current->saved_sigmask = current->blocked;
4595 set_current_blocked(set);
4596
823dd322
SL
4597 while (!signal_pending(current)) {
4598 __set_current_state(TASK_INTERRUPTIBLE);
4599 schedule();
4600 }
68f3f16d
AV
4601 set_restore_sigmask();
4602 return -ERESTARTNOHAND;
4603}
68f3f16d 4604
41c57892
RD
4605/**
4606 * sys_rt_sigsuspend - replace the signal mask for a value with the
4607 * @unewset value until a signal is received
4608 * @unewset: new signal mask value
4609 * @sigsetsize: size of sigset_t type
4610 */
d4e82042 4611SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
4612{
4613 sigset_t newset;
4614
4615 /* XXX: Don't preclude handling different sized sigset_t's. */
4616 if (sigsetsize != sizeof(sigset_t))
4617 return -EINVAL;
4618
4619 if (copy_from_user(&newset, unewset, sizeof(newset)))
4620 return -EFAULT;
68f3f16d 4621 return sigsuspend(&newset);
150256d8 4622}
ad4b65a4
AV
4623
4624#ifdef CONFIG_COMPAT
4625COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4626{
ad4b65a4 4627 sigset_t newset;
ad4b65a4
AV
4628
4629 /* XXX: Don't preclude handling different sized sigset_t's. */
4630 if (sigsetsize != sizeof(sigset_t))
4631 return -EINVAL;
4632
3968cf62 4633 if (get_compat_sigset(&newset, unewset))
ad4b65a4 4634 return -EFAULT;
ad4b65a4 4635 return sigsuspend(&newset);
ad4b65a4
AV
4636}
4637#endif
150256d8 4638
0a0e8cdf
AV
4639#ifdef CONFIG_OLD_SIGSUSPEND
4640SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4641{
4642 sigset_t blocked;
4643 siginitset(&blocked, mask);
4644 return sigsuspend(&blocked);
4645}
4646#endif
4647#ifdef CONFIG_OLD_SIGSUSPEND3
4648SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4649{
4650 sigset_t blocked;
4651 siginitset(&blocked, mask);
4652 return sigsuspend(&blocked);
4653}
4654#endif
150256d8 4655
52f5684c 4656__weak const char *arch_vma_name(struct vm_area_struct *vma)
f269fdd1
DH
4657{
4658 return NULL;
4659}
4660
ae7795bc 4661static inline void siginfo_buildtime_checks(void)
1da177e4 4662{
aba1be2f 4663 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
41b27154 4664
ae7795bc
EB
4665 /* Verify the offsets in the two siginfos match */
4666#define CHECK_OFFSET(field) \
4667 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4668
4669 /* kill */
4670 CHECK_OFFSET(si_pid);
4671 CHECK_OFFSET(si_uid);
4672
4673 /* timer */
4674 CHECK_OFFSET(si_tid);
4675 CHECK_OFFSET(si_overrun);
4676 CHECK_OFFSET(si_value);
4677
4678 /* rt */
4679 CHECK_OFFSET(si_pid);
4680 CHECK_OFFSET(si_uid);
4681 CHECK_OFFSET(si_value);
4682
4683 /* sigchld */
4684 CHECK_OFFSET(si_pid);
4685 CHECK_OFFSET(si_uid);
4686 CHECK_OFFSET(si_status);
4687 CHECK_OFFSET(si_utime);
4688 CHECK_OFFSET(si_stime);
4689
4690 /* sigfault */
4691 CHECK_OFFSET(si_addr);
add0b32e 4692 CHECK_OFFSET(si_trapno);
ae7795bc
EB
4693 CHECK_OFFSET(si_addr_lsb);
4694 CHECK_OFFSET(si_lower);
4695 CHECK_OFFSET(si_upper);
4696 CHECK_OFFSET(si_pkey);
0683b531
EB
4697 CHECK_OFFSET(si_perf_data);
4698 CHECK_OFFSET(si_perf_type);
ae7795bc
EB
4699
4700 /* sigpoll */
4701 CHECK_OFFSET(si_band);
4702 CHECK_OFFSET(si_fd);
4703
4704 /* sigsys */
4705 CHECK_OFFSET(si_call_addr);
4706 CHECK_OFFSET(si_syscall);
4707 CHECK_OFFSET(si_arch);
4708#undef CHECK_OFFSET
70f1b0d3
EB
4709
4710 /* usb asyncio */
4711 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4712 offsetof(struct siginfo, si_addr));
4713 if (sizeof(int) == sizeof(void __user *)) {
4714 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4715 sizeof(void __user *));
4716 } else {
4717 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4718 sizeof_field(struct siginfo, si_uid)) !=
4719 sizeof(void __user *));
4720 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4721 offsetof(struct siginfo, si_uid));
4722 }
4723#ifdef CONFIG_COMPAT
4724 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4725 offsetof(struct compat_siginfo, si_addr));
4726 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4727 sizeof(compat_uptr_t));
4728 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4729 sizeof_field(struct siginfo, si_pid));
4730#endif
ae7795bc
EB
4731}
4732
4733void __init signals_init(void)
4734{
4735 siginfo_buildtime_checks();
4736
5f58c398 4737 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
1da177e4 4738}
67fc4e0c
JW
4739
4740#ifdef CONFIG_KGDB_KDB
4741#include <linux/kdb.h>
4742/*
0b44bf9a 4743 * kdb_send_sig - Allows kdb to send signals without exposing
67fc4e0c
JW
4744 * signal internals. This function checks if the required locks are
4745 * available before calling the main signal code, to avoid kdb
4746 * deadlocks.
4747 */
0b44bf9a 4748void kdb_send_sig(struct task_struct *t, int sig)
67fc4e0c
JW
4749{
4750 static struct task_struct *kdb_prev_t;
0b44bf9a 4751 int new_t, ret;
67fc4e0c
JW
4752 if (!spin_trylock(&t->sighand->siglock)) {
4753 kdb_printf("Can't do kill command now.\n"
4754 "The sigmask lock is held somewhere else in "
4755 "kernel, try again later\n");
4756 return;
4757 }
67fc4e0c
JW
4758 new_t = kdb_prev_t != t;
4759 kdb_prev_t = t;
b03fbd4f 4760 if (!task_is_running(t) && new_t) {
0b44bf9a 4761 spin_unlock(&t->sighand->siglock);
67fc4e0c
JW
4762 kdb_printf("Process is not RUNNING, sending a signal from "
4763 "kdb risks deadlock\n"
4764 "on the run queue locks. "
4765 "The signal has _not_ been sent.\n"
4766 "Reissue the kill command if you want to risk "
4767 "the deadlock.\n");
4768 return;
4769 }
b213984b 4770 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
0b44bf9a
EB
4771 spin_unlock(&t->sighand->siglock);
4772 if (ret)
67fc4e0c
JW
4773 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4774 sig, t->pid);
4775 else
4776 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4777}
4778#endif /* CONFIG_KGDB_KDB */