get_signal: don't initialize ksig->info if SIGNAL_GROUP_EXIT/group_exec_task
[linux-2.6-block.git] / kernel / signal.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
1da177e4 14#include <linux/slab.h>
9984de1a 15#include <linux/export.h>
1da177e4 16#include <linux/init.h>
589ee628 17#include <linux/sched/mm.h>
8703e8a4 18#include <linux/sched/user.h>
b17b0153 19#include <linux/sched/debug.h>
29930025 20#include <linux/sched/task.h>
68db0cf1 21#include <linux/sched/task_stack.h>
32ef5517 22#include <linux/sched/cputime.h>
3eb39f47 23#include <linux/file.h>
1da177e4 24#include <linux/fs.h>
b0b88e02 25#include <linux/mm.h>
3eb39f47 26#include <linux/proc_fs.h>
1da177e4
LT
27#include <linux/tty.h>
28#include <linux/binfmts.h>
179899fd 29#include <linux/coredump.h>
1da177e4
LT
30#include <linux/security.h>
31#include <linux/syscalls.h>
32#include <linux/ptrace.h>
7ed20e1a 33#include <linux/signal.h>
fba2afaa 34#include <linux/signalfd.h>
f84d49b2 35#include <linux/ratelimit.h>
355f841a 36#include <linux/task_work.h>
c59ede7b 37#include <linux/capability.h>
7dfb7103 38#include <linux/freezer.h>
84d73786
SB
39#include <linux/pid_namespace.h>
40#include <linux/nsproxy.h>
6b550f94 41#include <linux/user_namespace.h>
0326f5a9 42#include <linux/uprobes.h>
90268439 43#include <linux/compat.h>
2b5faa4c 44#include <linux/cn_proc.h>
52f5684c 45#include <linux/compiler.h>
31ea70e0 46#include <linux/posix-timers.h>
76f969e8 47#include <linux/cgroup.h>
b48345aa 48#include <linux/audit.h>
01e6aac7 49#include <linux/sysctl.h>
52f5684c 50
d1eb650f
MH
51#define CREATE_TRACE_POINTS
52#include <trace/events/signal.h>
84d73786 53
1da177e4 54#include <asm/param.h>
7c0f6ba6 55#include <linux/uaccess.h>
1da177e4
LT
56#include <asm/unistd.h>
57#include <asm/siginfo.h>
d550bbd4 58#include <asm/cacheflush.h>
307d522f 59#include <asm/syscall.h> /* for syscall_get_* */
1da177e4
LT
60
61/*
62 * SLAB caches for signal bits.
63 */
64
e18b890b 65static struct kmem_cache *sigqueue_cachep;
1da177e4 66
f84d49b2
NO
67int print_fatal_signals __read_mostly;
68
35de254d 69static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 70{
35de254d
RM
71 return t->sighand->action[sig - 1].sa.sa_handler;
72}
93585eea 73
e4a8b4ef 74static inline bool sig_handler_ignored(void __user *handler, int sig)
35de254d 75{
93585eea 76 /* Is it explicitly or implicitly ignored? */
93585eea 77 return handler == SIG_IGN ||
e4a8b4ef 78 (handler == SIG_DFL && sig_kernel_ignore(sig));
93585eea 79}
1da177e4 80
41aaa481 81static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
1da177e4 82{
35de254d 83 void __user *handler;
1da177e4 84
f008faff
ON
85 handler = sig_handler(t, sig);
86
86989c41
EB
87 /* SIGKILL and SIGSTOP may not be sent to the global init */
88 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
89 return true;
90
f008faff 91 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
ac253850 92 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
41aaa481 93 return true;
f008faff 94
33da8e7c 95 /* Only allow kernel generated signals to this kthread */
e8b33b8c 96 if (unlikely((t->flags & PF_KTHREAD) &&
33da8e7c
EB
97 (handler == SIG_KTHREAD_KERNEL) && !force))
98 return true;
99
f008faff
ON
100 return sig_handler_ignored(handler, sig);
101}
102
6a0cdcd7 103static bool sig_ignored(struct task_struct *t, int sig, bool force)
f008faff 104{
1da177e4
LT
105 /*
106 * Blocked signals are never ignored, since the
107 * signal handler may change by the time it is
108 * unblocked.
109 */
325d22df 110 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
6a0cdcd7 111 return false;
1da177e4 112
35de254d 113 /*
628c1bcb
ON
114 * Tracers may want to know about even ignored signal unless it
115 * is SIGKILL which can't be reported anyway but can be ignored
116 * by SIGNAL_UNKILLABLE task.
35de254d 117 */
628c1bcb 118 if (t->ptrace && sig != SIGKILL)
6a0cdcd7 119 return false;
628c1bcb
ON
120
121 return sig_task_ignored(t, sig, force);
1da177e4
LT
122}
123
124/*
125 * Re-calculate pending state from the set of locally pending
126 * signals, globally pending signals, and blocked signals.
127 */
938696a8 128static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
1da177e4
LT
129{
130 unsigned long ready;
131 long i;
132
133 switch (_NSIG_WORDS) {
134 default:
135 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
136 ready |= signal->sig[i] &~ blocked->sig[i];
137 break;
138
139 case 4: ready = signal->sig[3] &~ blocked->sig[3];
140 ready |= signal->sig[2] &~ blocked->sig[2];
141 ready |= signal->sig[1] &~ blocked->sig[1];
142 ready |= signal->sig[0] &~ blocked->sig[0];
143 break;
144
145 case 2: ready = signal->sig[1] &~ blocked->sig[1];
146 ready |= signal->sig[0] &~ blocked->sig[0];
147 break;
148
149 case 1: ready = signal->sig[0] &~ blocked->sig[0];
150 }
151 return ready != 0;
152}
153
154#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
155
09ae854e 156static bool recalc_sigpending_tsk(struct task_struct *t)
1da177e4 157{
76f969e8 158 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
1da177e4 159 PENDING(&t->pending, &t->blocked) ||
76f969e8
RG
160 PENDING(&t->signal->shared_pending, &t->blocked) ||
161 cgroup_task_frozen(t)) {
1da177e4 162 set_tsk_thread_flag(t, TIF_SIGPENDING);
09ae854e 163 return true;
7bb44ade 164 }
09ae854e 165
b74d0deb
RM
166 /*
167 * We must never clear the flag in another thread, or in current
168 * when it's possible the current syscall is returning -ERESTART*.
169 * So we don't clear it here, and only callers who know they should do.
170 */
09ae854e 171 return false;
7bb44ade
RM
172}
173
1da177e4
LT
174void recalc_sigpending(void)
175{
8df1947c 176 if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
177 clear_thread_flag(TIF_SIGPENDING);
178
1da177e4 179}
fb50f5a4 180EXPORT_SYMBOL(recalc_sigpending);
1da177e4 181
088fe47c
EB
182void calculate_sigpending(void)
183{
184 /* Have any signals or users of TIF_SIGPENDING been delayed
185 * until after fork?
186 */
187 spin_lock_irq(&current->sighand->siglock);
188 set_tsk_thread_flag(current, TIF_SIGPENDING);
189 recalc_sigpending();
190 spin_unlock_irq(&current->sighand->siglock);
191}
192
1da177e4
LT
193/* Given the mask, find the first available signal that should be serviced. */
194
a27341cd
LT
195#define SYNCHRONOUS_MASK \
196 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
a0727e8c 197 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
a27341cd 198
fba2afaa 199int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
200{
201 unsigned long i, *s, *m, x;
202 int sig = 0;
f84d49b2 203
1da177e4
LT
204 s = pending->signal.sig;
205 m = mask->sig;
a27341cd
LT
206
207 /*
208 * Handle the first word specially: it contains the
209 * synchronous signals that need to be dequeued first.
210 */
211 x = *s &~ *m;
212 if (x) {
213 if (x & SYNCHRONOUS_MASK)
214 x &= SYNCHRONOUS_MASK;
215 sig = ffz(~x) + 1;
216 return sig;
217 }
218
1da177e4
LT
219 switch (_NSIG_WORDS) {
220 default:
a27341cd
LT
221 for (i = 1; i < _NSIG_WORDS; ++i) {
222 x = *++s &~ *++m;
223 if (!x)
224 continue;
225 sig = ffz(~x) + i*_NSIG_BPW + 1;
226 break;
227 }
1da177e4
LT
228 break;
229
a27341cd
LT
230 case 2:
231 x = s[1] &~ m[1];
232 if (!x)
1da177e4 233 break;
a27341cd 234 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
235 break;
236
a27341cd
LT
237 case 1:
238 /* Nothing to do */
1da177e4
LT
239 break;
240 }
f84d49b2 241
1da177e4
LT
242 return sig;
243}
244
f84d49b2
NO
245static inline void print_dropped_signal(int sig)
246{
247 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
248
249 if (!print_fatal_signals)
250 return;
251
252 if (!__ratelimit(&ratelimit_state))
253 return;
254
747800ef 255 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
f84d49b2
NO
256 current->comm, current->pid, sig);
257}
258
d79fdd6d 259/**
7dd3db54 260 * task_set_jobctl_pending - set jobctl pending bits
d79fdd6d 261 * @task: target task
7dd3db54 262 * @mask: pending bits to set
d79fdd6d 263 *
7dd3db54
TH
264 * Clear @mask from @task->jobctl. @mask must be subset of
265 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
266 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
267 * cleared. If @task is already being killed or exiting, this function
268 * becomes noop.
269 *
270 * CONTEXT:
271 * Must be called with @task->sighand->siglock held.
272 *
273 * RETURNS:
274 * %true if @mask is set, %false if made noop because @task was dying.
275 */
b76808e6 276bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
7dd3db54
TH
277{
278 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
279 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
280 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
281
1e4cf0d3 282 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
7dd3db54
TH
283 return false;
284
285 if (mask & JOBCTL_STOP_SIGMASK)
286 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
287
288 task->jobctl |= mask;
289 return true;
290}
291
d79fdd6d 292/**
a8f072c1 293 * task_clear_jobctl_trapping - clear jobctl trapping bit
d79fdd6d
TH
294 * @task: target task
295 *
a8f072c1
TH
296 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
297 * Clear it and wake up the ptracer. Note that we don't need any further
298 * locking. @task->siglock guarantees that @task->parent points to the
299 * ptracer.
d79fdd6d
TH
300 *
301 * CONTEXT:
302 * Must be called with @task->sighand->siglock held.
303 */
73ddff2b 304void task_clear_jobctl_trapping(struct task_struct *task)
d79fdd6d 305{
a8f072c1
TH
306 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
307 task->jobctl &= ~JOBCTL_TRAPPING;
650226bd 308 smp_mb(); /* advised by wake_up_bit() */
62c124ff 309 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
d79fdd6d
TH
310 }
311}
312
e5c1902e 313/**
3759a0d9 314 * task_clear_jobctl_pending - clear jobctl pending bits
e5c1902e 315 * @task: target task
3759a0d9 316 * @mask: pending bits to clear
e5c1902e 317 *
3759a0d9
TH
318 * Clear @mask from @task->jobctl. @mask must be subset of
319 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
320 * STOP bits are cleared together.
e5c1902e 321 *
6dfca329
TH
322 * If clearing of @mask leaves no stop or trap pending, this function calls
323 * task_clear_jobctl_trapping().
e5c1902e
TH
324 *
325 * CONTEXT:
326 * Must be called with @task->sighand->siglock held.
327 */
b76808e6 328void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
e5c1902e 329{
3759a0d9
TH
330 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
331
332 if (mask & JOBCTL_STOP_PENDING)
333 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
334
335 task->jobctl &= ~mask;
6dfca329
TH
336
337 if (!(task->jobctl & JOBCTL_PENDING_MASK))
338 task_clear_jobctl_trapping(task);
e5c1902e
TH
339}
340
341/**
342 * task_participate_group_stop - participate in a group stop
343 * @task: task participating in a group stop
344 *
a8f072c1 345 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
39efa3ef 346 * Group stop states are cleared and the group stop count is consumed if
a8f072c1 347 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
68d8681e 348 * stop, the appropriate `SIGNAL_*` flags are set.
e5c1902e
TH
349 *
350 * CONTEXT:
351 * Must be called with @task->sighand->siglock held.
244056f9
TH
352 *
353 * RETURNS:
354 * %true if group stop completion should be notified to the parent, %false
355 * otherwise.
e5c1902e
TH
356 */
357static bool task_participate_group_stop(struct task_struct *task)
358{
359 struct signal_struct *sig = task->signal;
a8f072c1 360 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
e5c1902e 361
a8f072c1 362 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
39efa3ef 363
3759a0d9 364 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
e5c1902e
TH
365
366 if (!consume)
367 return false;
368
369 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
370 sig->group_stop_count--;
371
244056f9
TH
372 /*
373 * Tell the caller to notify completion iff we are entering into a
374 * fresh group stop. Read comment in do_signal_stop() for details.
375 */
376 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2d39b3cd 377 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
e5c1902e
TH
378 return true;
379 }
380 return false;
381}
382
924de3b8
EB
383void task_join_group_stop(struct task_struct *task)
384{
7b3c36fc
ON
385 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
386 struct signal_struct *sig = current->signal;
387
388 if (sig->group_stop_count) {
389 sig->group_stop_count++;
390 mask |= JOBCTL_STOP_CONSUME;
391 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
392 return;
393
924de3b8 394 /* Have the new thread join an on-going signal group stop */
7b3c36fc 395 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
924de3b8
EB
396}
397
c69e8d9c
DH
398/*
399 * allocate a new signal queue record
400 * - this may be called without locks if and only if t == current, otherwise an
5aba085e 401 * appropriate lock must be held to stop the target task from exiting
c69e8d9c 402 */
f84d49b2 403static struct sigqueue *
69995ebb
TG
404__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
405 int override_rlimit, const unsigned int sigqueue_flags)
1da177e4
LT
406{
407 struct sigqueue *q = NULL;
a287116a 408 struct ucounts *ucounts;
d6469690 409 long sigpending;
1da177e4 410
10b1fbdb 411 /*
7cf7db8d
TG
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
fda31c50
LT
414 *
415 * NOTE! A pending signal will hold on to the user refcount,
416 * and we get/put the refcount only when the sigpending count
417 * changes from/to zero.
10b1fbdb 418 */
7cf7db8d 419 rcu_read_lock();
d6469690 420 ucounts = task_ucounts(t);
15bc01ef 421 sigpending = inc_rlimit_get_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
7cf7db8d 422 rcu_read_unlock();
15bc01ef
EB
423 if (!sigpending)
424 return NULL;
f84d49b2 425
f3791f4d 426 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
b4b27b9e 427 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
f84d49b2
NO
428 } else {
429 print_dropped_signal(sig);
430 }
431
1da177e4 432 if (unlikely(q == NULL)) {
15bc01ef 433 dec_rlimit_put_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING);
1da177e4
LT
434 } else {
435 INIT_LIST_HEAD(&q->list);
69995ebb 436 q->flags = sigqueue_flags;
d6469690 437 q->ucounts = ucounts;
1da177e4 438 }
d84f4f99 439 return q;
1da177e4
LT
440}
441
514a01b8 442static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
443{
444 if (q->flags & SIGQUEUE_PREALLOC)
445 return;
15bc01ef
EB
446 if (q->ucounts) {
447 dec_rlimit_put_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING);
d6469690
AG
448 q->ucounts = NULL;
449 }
b4b27b9e 450 kmem_cache_free(sigqueue_cachep, q);
1da177e4
LT
451}
452
6a14c5c9 453void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
454{
455 struct sigqueue *q;
456
457 sigemptyset(&queue->signal);
458 while (!list_empty(&queue->list)) {
459 q = list_entry(queue->list.next, struct sigqueue , list);
460 list_del_init(&q->list);
461 __sigqueue_free(q);
462 }
463}
464
465/*
9e7c8f8c 466 * Flush all pending signals for this kthread.
1da177e4 467 */
c81addc9 468void flush_signals(struct task_struct *t)
1da177e4
LT
469{
470 unsigned long flags;
471
472 spin_lock_irqsave(&t->sighand->siglock, flags);
9e7c8f8c
ON
473 clear_tsk_thread_flag(t, TIF_SIGPENDING);
474 flush_sigqueue(&t->pending);
475 flush_sigqueue(&t->signal->shared_pending);
1da177e4
LT
476 spin_unlock_irqrestore(&t->sighand->siglock, flags);
477}
fb50f5a4 478EXPORT_SYMBOL(flush_signals);
1da177e4 479
baa73d9e 480#ifdef CONFIG_POSIX_TIMERS
cbaffba1
ON
481static void __flush_itimer_signals(struct sigpending *pending)
482{
483 sigset_t signal, retain;
484 struct sigqueue *q, *n;
485
486 signal = pending->signal;
487 sigemptyset(&retain);
488
489 list_for_each_entry_safe(q, n, &pending->list, list) {
490 int sig = q->info.si_signo;
491
492 if (likely(q->info.si_code != SI_TIMER)) {
493 sigaddset(&retain, sig);
494 } else {
495 sigdelset(&signal, sig);
496 list_del_init(&q->list);
497 __sigqueue_free(q);
498 }
499 }
500
501 sigorsets(&pending->signal, &signal, &retain);
502}
503
504void flush_itimer_signals(void)
505{
506 struct task_struct *tsk = current;
507 unsigned long flags;
508
509 spin_lock_irqsave(&tsk->sighand->siglock, flags);
510 __flush_itimer_signals(&tsk->pending);
511 __flush_itimer_signals(&tsk->signal->shared_pending);
512 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
513}
baa73d9e 514#endif
cbaffba1 515
10ab825b
ON
516void ignore_signals(struct task_struct *t)
517{
518 int i;
519
520 for (i = 0; i < _NSIG; ++i)
521 t->sighand->action[i].sa.sa_handler = SIG_IGN;
522
523 flush_signals(t);
524}
525
1da177e4
LT
526/*
527 * Flush all handlers for a task.
528 */
529
530void
531flush_signal_handlers(struct task_struct *t, int force_default)
532{
533 int i;
534 struct k_sigaction *ka = &t->sighand->action[0];
535 for (i = _NSIG ; i != 0 ; i--) {
536 if (force_default || ka->sa.sa_handler != SIG_IGN)
537 ka->sa.sa_handler = SIG_DFL;
538 ka->sa.sa_flags = 0;
522cff14 539#ifdef __ARCH_HAS_SA_RESTORER
2ca39528
KC
540 ka->sa.sa_restorer = NULL;
541#endif
1da177e4
LT
542 sigemptyset(&ka->sa.sa_mask);
543 ka++;
544 }
545}
546
67a48a24 547bool unhandled_signal(struct task_struct *tsk, int sig)
abd4f750 548{
445a91d2 549 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 550 if (is_global_init(tsk))
67a48a24
CB
551 return true;
552
445a91d2 553 if (handler != SIG_IGN && handler != SIG_DFL)
67a48a24
CB
554 return false;
555
5f0bc0b0
LT
556 /* If dying, we handle all new signals by ignoring them */
557 if (fatal_signal_pending(tsk))
558 return false;
559
a288eecc
TH
560 /* if ptraced, let the tracer determine */
561 return !tsk->ptrace;
abd4f750
MAS
562}
563
ae7795bc 564static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
57db7e4a 565 bool *resched_timer)
1da177e4
LT
566{
567 struct sigqueue *q, *first = NULL;
1da177e4 568
1da177e4
LT
569 /*
570 * Collect the siginfo appropriate to this signal. Check if
571 * there is another siginfo for the same signal.
572 */
573 list_for_each_entry(q, &list->list, list) {
574 if (q->info.si_signo == sig) {
d4434207
ON
575 if (first)
576 goto still_pending;
1da177e4
LT
577 first = q;
578 }
579 }
d4434207
ON
580
581 sigdelset(&list->signal, sig);
582
1da177e4 583 if (first) {
d4434207 584still_pending:
1da177e4
LT
585 list_del_init(&first->list);
586 copy_siginfo(info, &first->info);
57db7e4a
EB
587
588 *resched_timer =
589 (first->flags & SIGQUEUE_PREALLOC) &&
590 (info->si_code == SI_TIMER) &&
591 (info->si_sys_private);
592
1da177e4 593 __sigqueue_free(first);
1da177e4 594 } else {
5aba085e
RD
595 /*
596 * Ok, it wasn't in the queue. This must be
597 * a fast-pathed signal or we must have been
598 * out of queue space. So zero out the info.
1da177e4 599 */
faf1f22b 600 clear_siginfo(info);
1da177e4
LT
601 info->si_signo = sig;
602 info->si_errno = 0;
7486e5d9 603 info->si_code = SI_USER;
1da177e4
LT
604 info->si_pid = 0;
605 info->si_uid = 0;
606 }
1da177e4
LT
607}
608
609static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
ae7795bc 610 kernel_siginfo_t *info, bool *resched_timer)
1da177e4 611{
27d91e07 612 int sig = next_signal(pending, mask);
1da177e4 613
2e01fabe 614 if (sig)
57db7e4a 615 collect_signal(sig, pending, info, resched_timer);
1da177e4
LT
616 return sig;
617}
618
619/*
5aba085e 620 * Dequeue a signal and return the element to the caller, which is
1da177e4
LT
621 * expected to free it.
622 *
623 * All callers have to hold the siglock.
624 */
5768d890
EB
625int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
626 kernel_siginfo_t *info, enum pid_type *type)
1da177e4 627{
57db7e4a 628 bool resched_timer = false;
c5363d03 629 int signr;
caec4e8d
BH
630
631 /* We only dequeue private signals from ourselves, we don't let
632 * signalfd steal them
633 */
5768d890 634 *type = PIDTYPE_PID;
57db7e4a 635 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
8bfd9a7a 636 if (!signr) {
5768d890 637 *type = PIDTYPE_TGID;
1da177e4 638 signr = __dequeue_signal(&tsk->signal->shared_pending,
57db7e4a 639 mask, info, &resched_timer);
baa73d9e 640#ifdef CONFIG_POSIX_TIMERS
8bfd9a7a
TG
641 /*
642 * itimer signal ?
643 *
644 * itimers are process shared and we restart periodic
645 * itimers in the signal delivery path to prevent DoS
646 * attacks in the high resolution timer case. This is
5aba085e 647 * compliant with the old way of self-restarting
8bfd9a7a
TG
648 * itimers, as the SIGALRM is a legacy signal and only
649 * queued once. Changing the restart behaviour to
650 * restart the timer in the signal dequeue path is
651 * reducing the timer noise on heavy loaded !highres
652 * systems too.
653 */
654 if (unlikely(signr == SIGALRM)) {
655 struct hrtimer *tmr = &tsk->signal->real_timer;
656
657 if (!hrtimer_is_queued(tmr) &&
2456e855 658 tsk->signal->it_real_incr != 0) {
8bfd9a7a
TG
659 hrtimer_forward(tmr, tmr->base->get_time(),
660 tsk->signal->it_real_incr);
661 hrtimer_restart(tmr);
662 }
663 }
baa73d9e 664#endif
8bfd9a7a 665 }
c5363d03 666
b8fceee1 667 recalc_sigpending();
c5363d03
PE
668 if (!signr)
669 return 0;
670
671 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
672 /*
673 * Set a marker that we have dequeued a stop signal. Our
674 * caller might release the siglock and then the pending
675 * stop signal it is about to process is no longer in the
676 * pending bitmasks, but must still be cleared by a SIGCONT
677 * (and overruled by a SIGKILL). So those cases clear this
678 * shared flag after we've set it. Note that this flag may
679 * remain set after the signal we return is ignored or
680 * handled. That doesn't matter because its only purpose
681 * is to alert stop-signal processing code when another
682 * processor has come along and cleared the flag.
683 */
a8f072c1 684 current->jobctl |= JOBCTL_STOP_DEQUEUED;
8bfd9a7a 685 }
baa73d9e 686#ifdef CONFIG_POSIX_TIMERS
57db7e4a 687 if (resched_timer) {
1da177e4
LT
688 /*
689 * Release the siglock to ensure proper locking order
690 * of timer locks outside of siglocks. Note, we leave
691 * irqs disabled here, since the posix-timers code is
692 * about to disable them again anyway.
693 */
694 spin_unlock(&tsk->sighand->siglock);
96fe3b07 695 posixtimer_rearm(info);
1da177e4 696 spin_lock(&tsk->sighand->siglock);
9943d3ac
EB
697
698 /* Don't expose the si_sys_private value to userspace */
699 info->si_sys_private = 0;
1da177e4 700 }
baa73d9e 701#endif
1da177e4
LT
702 return signr;
703}
fb50f5a4 704EXPORT_SYMBOL_GPL(dequeue_signal);
1da177e4 705
7146db33
EB
706static int dequeue_synchronous_signal(kernel_siginfo_t *info)
707{
708 struct task_struct *tsk = current;
709 struct sigpending *pending = &tsk->pending;
710 struct sigqueue *q, *sync = NULL;
711
712 /*
713 * Might a synchronous signal be in the queue?
714 */
715 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
716 return 0;
717
718 /*
719 * Return the first synchronous signal in the queue.
720 */
721 list_for_each_entry(q, &pending->list, list) {
7665a47f 722 /* Synchronous signals have a positive si_code */
7146db33
EB
723 if ((q->info.si_code > SI_USER) &&
724 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
725 sync = q;
726 goto next;
727 }
728 }
729 return 0;
730next:
731 /*
732 * Check if there is another siginfo for the same signal.
733 */
734 list_for_each_entry_continue(q, &pending->list, list) {
735 if (q->info.si_signo == sync->info.si_signo)
736 goto still_pending;
737 }
738
739 sigdelset(&pending->signal, sync->info.si_signo);
740 recalc_sigpending();
741still_pending:
742 list_del_init(&sync->list);
743 copy_siginfo(info, &sync->info);
744 __sigqueue_free(sync);
745 return info->si_signo;
746}
747
1da177e4
LT
748/*
749 * Tell a process that it has a new active signal..
750 *
751 * NOTE! we rely on the previous spin_lock to
752 * lock interrupts for us! We can only be called with
753 * "siglock" held, and the local interrupt must
754 * have been disabled when that got acquired!
755 *
756 * No need to set need_resched since signal event passing
757 * goes through ->blocked
758 */
910ffdb1 759void signal_wake_up_state(struct task_struct *t, unsigned int state)
1da177e4 760{
31cae1ea
PZ
761 lockdep_assert_held(&t->sighand->siglock);
762
1da177e4 763 set_tsk_thread_flag(t, TIF_SIGPENDING);
31cae1ea 764
1da177e4 765 /*
910ffdb1 766 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
f021a3c2 767 * case. We don't check t->state here because there is a race with it
1da177e4
LT
768 * executing another processor and just now entering stopped state.
769 * By using wake_up_state, we ensure the process will wake up and
770 * handle its death signal.
771 */
910ffdb1 772 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
1da177e4
LT
773 kick_process(t);
774}
775
71fabd5e
GA
776/*
777 * Remove signals in mask from the pending set and queue.
778 * Returns 1 if any signals were found.
779 *
780 * All callers must be holding the siglock.
71fabd5e 781 */
8f11351e 782static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
71fabd5e
GA
783{
784 struct sigqueue *q, *n;
785 sigset_t m;
786
787 sigandsets(&m, mask, &s->signal);
788 if (sigisemptyset(&m))
8f11351e 789 return;
71fabd5e 790
702a5073 791 sigandnsets(&s->signal, &s->signal, mask);
71fabd5e
GA
792 list_for_each_entry_safe(q, n, &s->list, list) {
793 if (sigismember(mask, q->info.si_signo)) {
794 list_del_init(&q->list);
795 __sigqueue_free(q);
796 }
797 }
71fabd5e 798}
1da177e4 799
ae7795bc 800static inline int is_si_special(const struct kernel_siginfo *info)
614c517d 801{
4ff4c31a 802 return info <= SEND_SIG_PRIV;
614c517d
ON
803}
804
ae7795bc 805static inline bool si_fromuser(const struct kernel_siginfo *info)
614c517d
ON
806{
807 return info == SEND_SIG_NOINFO ||
808 (!is_si_special(info) && SI_FROMUSER(info));
809}
810
39fd3393
SH
811/*
812 * called with RCU read lock from check_kill_permission()
813 */
2a9b9094 814static bool kill_ok_by_cred(struct task_struct *t)
39fd3393
SH
815{
816 const struct cred *cred = current_cred();
817 const struct cred *tcred = __task_cred(t);
818
2a9b9094
CB
819 return uid_eq(cred->euid, tcred->suid) ||
820 uid_eq(cred->euid, tcred->uid) ||
821 uid_eq(cred->uid, tcred->suid) ||
822 uid_eq(cred->uid, tcred->uid) ||
823 ns_capable(tcred->user_ns, CAP_KILL);
39fd3393
SH
824}
825
1da177e4
LT
826/*
827 * Bad permissions for sending the signal
694f690d 828 * - the caller must hold the RCU read lock
1da177e4 829 */
ae7795bc 830static int check_kill_permission(int sig, struct kernel_siginfo *info,
1da177e4
LT
831 struct task_struct *t)
832{
2e2ba22e 833 struct pid *sid;
3b5e9e53
ON
834 int error;
835
7ed20e1a 836 if (!valid_signal(sig))
3b5e9e53
ON
837 return -EINVAL;
838
614c517d 839 if (!si_fromuser(info))
3b5e9e53 840 return 0;
e54dc243 841
3b5e9e53
ON
842 error = audit_signal_info(sig, t); /* Let audit system see the signal */
843 if (error)
1da177e4 844 return error;
3b5e9e53 845
065add39 846 if (!same_thread_group(current, t) &&
39fd3393 847 !kill_ok_by_cred(t)) {
2e2ba22e
ON
848 switch (sig) {
849 case SIGCONT:
2e2ba22e 850 sid = task_session(t);
2e2ba22e
ON
851 /*
852 * We don't return the error if sid == NULL. The
853 * task was unhashed, the caller must notice this.
854 */
855 if (!sid || sid == task_session(current))
856 break;
df561f66 857 fallthrough;
2e2ba22e
ON
858 default:
859 return -EPERM;
860 }
861 }
c2f0c7c3 862
6b4f3d01 863 return security_task_kill(t, info, sig, NULL);
1da177e4
LT
864}
865
fb1d910c
TH
866/**
867 * ptrace_trap_notify - schedule trap to notify ptracer
868 * @t: tracee wanting to notify tracer
869 *
870 * This function schedules sticky ptrace trap which is cleared on the next
871 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
872 * ptracer.
873 *
544b2c91
TH
874 * If @t is running, STOP trap will be taken. If trapped for STOP and
875 * ptracer is listening for events, tracee is woken up so that it can
876 * re-trap for the new event. If trapped otherwise, STOP trap will be
877 * eventually taken without returning to userland after the existing traps
878 * are finished by PTRACE_CONT.
fb1d910c
TH
879 *
880 * CONTEXT:
881 * Must be called with @task->sighand->siglock held.
882 */
883static void ptrace_trap_notify(struct task_struct *t)
884{
885 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
cb3c19c9 886 lockdep_assert_held(&t->sighand->siglock);
fb1d910c
TH
887
888 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
910ffdb1 889 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
fb1d910c
TH
890}
891
1da177e4 892/*
7e695a5e
ON
893 * Handle magic process-wide effects of stop/continue signals. Unlike
894 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
895 * time regardless of blocking, ignoring, or handling. This does the
896 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
897 * signals. The process stop is done as a signal action for SIG_DFL.
898 *
899 * Returns true if the signal should be actually delivered, otherwise
900 * it should be dropped.
1da177e4 901 */
403bad72 902static bool prepare_signal(int sig, struct task_struct *p, bool force)
1da177e4 903{
ad16a460 904 struct signal_struct *signal = p->signal;
1da177e4 905 struct task_struct *t;
9490592f 906 sigset_t flush;
1da177e4 907
2f824d4d 908 if (signal->flags & SIGNAL_GROUP_EXIT) {
a0287db0 909 if (signal->core_state)
403bad72 910 return sig == SIGKILL;
1da177e4 911 /*
9a95f78e 912 * The process is in the middle of dying, drop the signal.
1da177e4 913 */
9a95f78e 914 return false;
7e695a5e 915 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
916 /*
917 * This is a stop signal. Remove SIGCONT from all queues.
918 */
9490592f 919 siginitset(&flush, sigmask(SIGCONT));
c09c1441 920 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 921 for_each_thread(p, t)
c09c1441 922 flush_sigqueue_mask(&flush, &t->pending);
1da177e4 923 } else if (sig == SIGCONT) {
fc321d2e 924 unsigned int why;
1da177e4 925 /*
1deac632 926 * Remove all stop signals from all queues, wake all threads.
1da177e4 927 */
9490592f 928 siginitset(&flush, SIG_KERNEL_STOP_MASK);
c09c1441 929 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 930 for_each_thread(p, t) {
c09c1441 931 flush_sigqueue_mask(&flush, &t->pending);
3759a0d9 932 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
31cae1ea
PZ
933 if (likely(!(t->ptrace & PT_SEIZED))) {
934 t->jobctl &= ~JOBCTL_STOPPED;
fb1d910c 935 wake_up_state(t, __TASK_STOPPED);
31cae1ea 936 } else
fb1d910c 937 ptrace_trap_notify(t);
9490592f 938 }
1da177e4 939
fc321d2e
ON
940 /*
941 * Notify the parent with CLD_CONTINUED if we were stopped.
942 *
943 * If we were in the middle of a group stop, we pretend it
944 * was already finished, and then continued. Since SIGCHLD
945 * doesn't queue we report only CLD_STOPPED, as if the next
946 * CLD_CONTINUED was dropped.
947 */
948 why = 0;
ad16a460 949 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 950 why |= SIGNAL_CLD_CONTINUED;
ad16a460 951 else if (signal->group_stop_count)
fc321d2e
ON
952 why |= SIGNAL_CLD_STOPPED;
953
954 if (why) {
021e1ae3 955 /*
ae6d2ed7 956 * The first thread which returns from do_signal_stop()
021e1ae3 957 * will take ->siglock, notice SIGNAL_CLD_MASK, and
2e58f57d 958 * notify its parent. See get_signal().
021e1ae3 959 */
2d39b3cd 960 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
ad16a460
ON
961 signal->group_stop_count = 0;
962 signal->group_exit_code = 0;
1da177e4 963 }
1da177e4 964 }
7e695a5e 965
def8cf72 966 return !sig_ignored(p, sig, force);
1da177e4
LT
967}
968
71f11dc0
ON
969/*
970 * Test if P wants to take SIG. After we've checked all threads with this,
971 * it's equivalent to finding no threads not blocking SIG. Any threads not
972 * blocking SIG were ruled out because they are not running and already
973 * have pending signals. Such threads will dequeue from the shared queue
974 * as soon as they're available, so putting the signal on the shared queue
975 * will be equivalent to sending it to one such thread.
976 */
acd14e62 977static inline bool wants_signal(int sig, struct task_struct *p)
71f11dc0
ON
978{
979 if (sigismember(&p->blocked, sig))
acd14e62
CB
980 return false;
981
71f11dc0 982 if (p->flags & PF_EXITING)
acd14e62
CB
983 return false;
984
71f11dc0 985 if (sig == SIGKILL)
acd14e62
CB
986 return true;
987
71f11dc0 988 if (task_is_stopped_or_traced(p))
acd14e62
CB
989 return false;
990
5c251e9d 991 return task_curr(p) || !task_sigpending(p);
71f11dc0
ON
992}
993
07296149 994static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
71f11dc0
ON
995{
996 struct signal_struct *signal = p->signal;
997 struct task_struct *t;
998
999 /*
1000 * Now find a thread we can wake up to take the signal off the queue.
1001 *
bcb7ee79 1002 * Try the suggested task first (may or may not be the main thread).
71f11dc0
ON
1003 */
1004 if (wants_signal(sig, p))
1005 t = p;
07296149 1006 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
71f11dc0
ON
1007 /*
1008 * There is just one thread and it does not need to be woken.
1009 * It will dequeue unblocked signals before it runs again.
1010 */
1011 return;
1012 else {
1013 /*
1014 * Otherwise try to find a suitable thread.
1015 */
1016 t = signal->curr_target;
1017 while (!wants_signal(sig, t)) {
1018 t = next_thread(t);
1019 if (t == signal->curr_target)
1020 /*
1021 * No thread needs to be woken.
1022 * Any eligible threads will see
1023 * the signal in the queue soon.
1024 */
1025 return;
1026 }
1027 signal->curr_target = t;
1028 }
1029
1030 /*
1031 * Found a killable thread. If the signal will be fatal,
1032 * then start taking the whole group down immediately.
1033 */
fae5fa44 1034 if (sig_fatal(p, sig) &&
7ba03471 1035 (signal->core_state || !(signal->flags & SIGNAL_GROUP_EXIT)) &&
71f11dc0 1036 !sigismember(&t->real_blocked, sig) &&
42691579 1037 (sig == SIGKILL || !p->ptrace)) {
71f11dc0
ON
1038 /*
1039 * This signal will be fatal to the whole group.
1040 */
1041 if (!sig_kernel_coredump(sig)) {
1042 /*
1043 * Start a group exit and wake everybody up.
1044 * This way we don't have other threads
1045 * running and doing things after a slower
1046 * thread has the fatal signal pending.
1047 */
1048 signal->flags = SIGNAL_GROUP_EXIT;
1049 signal->group_exit_code = sig;
1050 signal->group_stop_count = 0;
e5ecf29c 1051 __for_each_thread(signal, t) {
6dfca329 1052 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
71f11dc0
ON
1053 sigaddset(&t->pending.signal, SIGKILL);
1054 signal_wake_up(t, 1);
e5ecf29c 1055 }
71f11dc0
ON
1056 return;
1057 }
1058 }
1059
1060 /*
1061 * The signal is already in the shared-pending queue.
1062 * Tell the chosen thread to wake up and dequeue it.
1063 */
1064 signal_wake_up(t, sig == SIGKILL);
1065 return;
1066}
1067
a19e2c01 1068static inline bool legacy_queue(struct sigpending *signals, int sig)
af7fff9c
PE
1069{
1070 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1071}
1072
157cc181
EB
1073static int __send_signal_locked(int sig, struct kernel_siginfo *info,
1074 struct task_struct *t, enum pid_type type, bool force)
1da177e4 1075{
2ca3515a 1076 struct sigpending *pending;
6e65acba 1077 struct sigqueue *q;
7a0aeb14 1078 int override_rlimit;
6c303d3a 1079 int ret = 0, result;
0a16b607 1080
cb3c19c9 1081 lockdep_assert_held(&t->sighand->siglock);
921cf9f6 1082
6c303d3a 1083 result = TRACE_SIGNAL_IGNORED;
8ad23dea 1084 if (!prepare_signal(sig, t, force))
6c303d3a 1085 goto ret;
2ca3515a 1086
5a883cee 1087 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
1088 /*
1089 * Short-circuit ignored signals and support queuing
1090 * exactly one non-rt signal, so that we can get more
1091 * detailed information about the cause of the signal.
1092 */
6c303d3a 1093 result = TRACE_SIGNAL_ALREADY_PENDING;
7e695a5e 1094 if (legacy_queue(pending, sig))
6c303d3a
ON
1095 goto ret;
1096
1097 result = TRACE_SIGNAL_DELIVERED;
1da177e4 1098 /*
a692933a 1099 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1da177e4 1100 */
e8b33b8c 1101 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1da177e4
LT
1102 goto out_set;
1103
5aba085e
RD
1104 /*
1105 * Real-time signals must be queued if sent by sigqueue, or
1106 * some other real-time mechanism. It is implementation
1107 * defined whether kill() does so. We attempt to do so, on
1108 * the principle of least surprise, but since kill is not
1109 * allowed to fail with EAGAIN when low on memory we just
1110 * make sure at least one signal gets delivered and don't
1111 * pass on the info struct.
1112 */
7a0aeb14
VN
1113 if (sig < SIGRTMIN)
1114 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1115 else
1116 override_rlimit = 0;
1117
69995ebb
TG
1118 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1119
1da177e4 1120 if (q) {
2ca3515a 1121 list_add_tail(&q->list, &pending->list);
1da177e4 1122 switch ((unsigned long) info) {
b67a1b9e 1123 case (unsigned long) SEND_SIG_NOINFO:
faf1f22b 1124 clear_siginfo(&q->info);
1da177e4
LT
1125 q->info.si_signo = sig;
1126 q->info.si_errno = 0;
1127 q->info.si_code = SI_USER;
9cd4fd10 1128 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 1129 task_active_pid_ns(t));
7a0cf094
EB
1130 rcu_read_lock();
1131 q->info.si_uid =
1132 from_kuid_munged(task_cred_xxx(t, user_ns),
1133 current_uid());
1134 rcu_read_unlock();
1da177e4 1135 break;
b67a1b9e 1136 case (unsigned long) SEND_SIG_PRIV:
faf1f22b 1137 clear_siginfo(&q->info);
1da177e4
LT
1138 q->info.si_signo = sig;
1139 q->info.si_errno = 0;
1140 q->info.si_code = SI_KERNEL;
1141 q->info.si_pid = 0;
1142 q->info.si_uid = 0;
1143 break;
1144 default:
1145 copy_siginfo(&q->info, info);
1146 break;
1147 }
8917bef3
EB
1148 } else if (!is_si_special(info) &&
1149 sig >= SIGRTMIN && info->si_code != SI_USER) {
1150 /*
1151 * Queue overflow, abort. We may abort if the
1152 * signal was rt and sent by user using something
1153 * other than kill().
1154 */
1155 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1156 ret = -EAGAIN;
1157 goto ret;
1158 } else {
1159 /*
1160 * This is a silent loss of information. We still
1161 * send the signal, but the *info bits are lost.
1162 */
1163 result = TRACE_SIGNAL_LOSE_INFO;
1da177e4
LT
1164 }
1165
1166out_set:
53c30337 1167 signalfd_notify(t, sig);
2ca3515a 1168 sigaddset(&pending->signal, sig);
c3ad2c3b
EB
1169
1170 /* Let multiprocess signals appear after on-going forks */
1171 if (type > PIDTYPE_TGID) {
1172 struct multiprocess_signals *delayed;
1173 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1174 sigset_t *signal = &delayed->signal;
1175 /* Can't queue both a stop and a continue signal */
1176 if (sig == SIGCONT)
1177 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1178 else if (sig_kernel_stop(sig))
1179 sigdelset(signal, SIGCONT);
1180 sigaddset(signal, sig);
1181 }
1182 }
1183
07296149 1184 complete_signal(sig, t, type);
6c303d3a 1185ret:
5a883cee 1186 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
6c303d3a 1187 return ret;
1da177e4
LT
1188}
1189
7a0cf094
EB
1190static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1191{
1192 bool ret = false;
1193 switch (siginfo_layout(info->si_signo, info->si_code)) {
1194 case SIL_KILL:
1195 case SIL_CHLD:
1196 case SIL_RT:
1197 ret = true;
1198 break;
1199 case SIL_TIMER:
1200 case SIL_POLL:
1201 case SIL_FAULT:
9abcabe3 1202 case SIL_FAULT_TRAPNO:
7a0cf094
EB
1203 case SIL_FAULT_MCEERR:
1204 case SIL_FAULT_BNDERR:
1205 case SIL_FAULT_PKUERR:
f4ac7302 1206 case SIL_FAULT_PERF_EVENT:
7a0cf094
EB
1207 case SIL_SYS:
1208 ret = false;
1209 break;
1210 }
1211 return ret;
1212}
1213
157cc181
EB
1214int send_signal_locked(int sig, struct kernel_siginfo *info,
1215 struct task_struct *t, enum pid_type type)
7978b567 1216{
8ad23dea
EB
1217 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1218 bool force = false;
921cf9f6 1219
8ad23dea
EB
1220 if (info == SEND_SIG_NOINFO) {
1221 /* Force if sent from an ancestor pid namespace */
1222 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1223 } else if (info == SEND_SIG_PRIV) {
1224 /* Don't ignore kernel generated signals */
1225 force = true;
1226 } else if (has_si_pid_and_uid(info)) {
1227 /* SIGKILL and SIGSTOP is special or has ids */
7a0cf094
EB
1228 struct user_namespace *t_user_ns;
1229
1230 rcu_read_lock();
1231 t_user_ns = task_cred_xxx(t, user_ns);
1232 if (current_user_ns() != t_user_ns) {
1233 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1234 info->si_uid = from_kuid_munged(t_user_ns, uid);
1235 }
1236 rcu_read_unlock();
921cf9f6 1237
8ad23dea
EB
1238 /* A kernel generated signal? */
1239 force = (info->si_code == SI_KERNEL);
1240
1241 /* From an ancestor pid namespace? */
1242 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
7a0cf094 1243 info->si_pid = 0;
8ad23dea
EB
1244 force = true;
1245 }
7a0cf094 1246 }
157cc181 1247 return __send_signal_locked(sig, info, t, type, force);
7978b567
SB
1248}
1249
4aaefee5 1250static void print_fatal_signal(int signr)
45807a1d 1251{
6a542d1d 1252 struct pt_regs *regs = task_pt_regs(current);
b0b88e02
VW
1253 struct file *exe_file;
1254
1255 exe_file = get_task_exe_file(current);
1256 if (exe_file) {
1257 pr_info("%pD: %s: potentially unexpected fatal signal %d.\n",
1258 exe_file, current->comm, signr);
1259 fput(exe_file);
1260 } else {
1261 pr_info("%s: potentially unexpected fatal signal %d.\n",
1262 current->comm, signr);
1263 }
45807a1d 1264
ca5cd877 1265#if defined(__i386__) && !defined(__arch_um__)
747800ef 1266 pr_info("code at %08lx: ", regs->ip);
45807a1d
IM
1267 {
1268 int i;
1269 for (i = 0; i < 16; i++) {
1270 unsigned char insn;
1271
b45c6e76
AK
1272 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1273 break;
747800ef 1274 pr_cont("%02x ", insn);
45807a1d
IM
1275 }
1276 }
747800ef 1277 pr_cont("\n");
45807a1d 1278#endif
3a9f84d3 1279 preempt_disable();
45807a1d 1280 show_regs(regs);
3a9f84d3 1281 preempt_enable();
45807a1d
IM
1282}
1283
1284static int __init setup_print_fatal_signals(char *str)
1285{
1286 get_option (&str, &print_fatal_signals);
1287
1288 return 1;
1289}
1290
1291__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1292
ae7795bc 1293int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
40b3b025 1294 enum pid_type type)
4a30debf
ON
1295{
1296 unsigned long flags;
1297 int ret = -ESRCH;
1298
1299 if (lock_task_sighand(p, &flags)) {
157cc181 1300 ret = send_signal_locked(sig, info, p, type);
4a30debf
ON
1301 unlock_task_sighand(p, &flags);
1302 }
1303
1304 return ret;
1305}
1306
e349d945
EB
1307enum sig_handler {
1308 HANDLER_CURRENT, /* If reachable use the current handler */
1309 HANDLER_SIG_DFL, /* Always use SIG_DFL handler semantics */
1310 HANDLER_EXIT, /* Only visible as the process exit code */
1311};
1312
1da177e4
LT
1313/*
1314 * Force a signal that the process can't ignore: if necessary
1315 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1316 *
1317 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1318 * since we do not want to have a signal handler that was blocked
1319 * be invoked when user space had explicitly blocked it.
1320 *
80fe728d
ON
1321 * We don't want to have recursive SIGSEGV's etc, for example,
1322 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1323 */
59c0e696 1324static int
e349d945
EB
1325force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t,
1326 enum sig_handler handler)
1da177e4
LT
1327{
1328 unsigned long int flags;
ae74c3b6
LT
1329 int ret, blocked, ignored;
1330 struct k_sigaction *action;
59c0e696 1331 int sig = info->si_signo;
1da177e4
LT
1332
1333 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1334 action = &t->sighand->action[sig-1];
1335 ignored = action->sa.sa_handler == SIG_IGN;
1336 blocked = sigismember(&t->blocked, sig);
e349d945 1337 if (blocked || ignored || (handler != HANDLER_CURRENT)) {
ae74c3b6 1338 action->sa.sa_handler = SIG_DFL;
e349d945
EB
1339 if (handler == HANDLER_EXIT)
1340 action->sa.sa_flags |= SA_IMMUTABLE;
b454ec29 1341 if (blocked)
ae74c3b6 1342 sigdelset(&t->blocked, sig);
1da177e4 1343 }
eb61b591
JI
1344 /*
1345 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
5c72263e 1346 * debugging to leave init killable. But HANDLER_EXIT is always fatal.
eb61b591 1347 */
5c72263e
KC
1348 if (action->sa.sa_handler == SIG_DFL &&
1349 (!t->ptrace || (handler == HANDLER_EXIT)))
80fe728d 1350 t->signal->flags &= ~SIGNAL_UNKILLABLE;
157cc181 1351 ret = send_signal_locked(sig, info, t, PIDTYPE_PID);
b454ec29
ON
1352 /* This can happen if the signal was already pending and blocked */
1353 if (!task_sigpending(t))
1354 signal_wake_up(t, 0);
1da177e4
LT
1355 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1356
1357 return ret;
1358}
1359
a89e9b8a 1360int force_sig_info(struct kernel_siginfo *info)
59c0e696 1361{
e349d945 1362 return force_sig_info_to_task(info, current, HANDLER_CURRENT);
59c0e696
EB
1363}
1364
1da177e4
LT
1365/*
1366 * Nuke all other threads in the group.
1367 */
09faef11 1368int zap_other_threads(struct task_struct *p)
1da177e4 1369{
61a7a5e2 1370 struct task_struct *t;
09faef11 1371 int count = 0;
1da177e4 1372
1da177e4
LT
1373 p->signal->group_stop_count = 0;
1374
61a7a5e2 1375 for_other_threads(p, t) {
6dfca329 1376 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
f9010dbd
MC
1377 /* Don't require de_thread to wait for the vhost_worker */
1378 if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
1379 count++;
09faef11
ON
1380
1381 /* Don't bother with already dead threads */
1da177e4
LT
1382 if (t->exit_state)
1383 continue;
1da177e4 1384 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1385 signal_wake_up(t, 1);
1386 }
09faef11
ON
1387
1388 return count;
1da177e4
LT
1389}
1390
b8ed374e
NK
1391struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1392 unsigned long *flags)
f63ee72e
ON
1393{
1394 struct sighand_struct *sighand;
1395
59dc6f3c 1396 rcu_read_lock();
f63ee72e
ON
1397 for (;;) {
1398 sighand = rcu_dereference(tsk->sighand);
59dc6f3c 1399 if (unlikely(sighand == NULL))
f63ee72e 1400 break;
59dc6f3c 1401
392809b2
ON
1402 /*
1403 * This sighand can be already freed and even reused, but
5f0d5a3a 1404 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
392809b2
ON
1405 * initializes ->siglock: this slab can't go away, it has
1406 * the same object type, ->siglock can't be reinitialized.
1407 *
1408 * We need to ensure that tsk->sighand is still the same
1409 * after we take the lock, we can race with de_thread() or
1410 * __exit_signal(). In the latter case the next iteration
1411 * must see ->sighand == NULL.
1412 */
59dc6f3c 1413 spin_lock_irqsave(&sighand->siglock, *flags);
913292c9 1414 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
f63ee72e 1415 break;
59dc6f3c 1416 spin_unlock_irqrestore(&sighand->siglock, *flags);
f63ee72e 1417 }
59dc6f3c 1418 rcu_read_unlock();
f63ee72e
ON
1419
1420 return sighand;
1421}
1422
a5dec9f8
FW
1423#ifdef CONFIG_LOCKDEP
1424void lockdep_assert_task_sighand_held(struct task_struct *task)
1425{
1426 struct sighand_struct *sighand;
1427
1428 rcu_read_lock();
1429 sighand = rcu_dereference(task->sighand);
1430 if (sighand)
1431 lockdep_assert_held(&sighand->siglock);
1432 else
1433 WARN_ON_ONCE(1);
1434 rcu_read_unlock();
1435}
1436#endif
1437
c69e8d9c
DH
1438/*
1439 * send signal info to all the members of a group
c69e8d9c 1440 */
ae7795bc
EB
1441int group_send_sig_info(int sig, struct kernel_siginfo *info,
1442 struct task_struct *p, enum pid_type type)
1da177e4 1443{
694f690d
DH
1444 int ret;
1445
1446 rcu_read_lock();
1447 ret = check_kill_permission(sig, info, p);
1448 rcu_read_unlock();
f63ee72e 1449
4a30debf 1450 if (!ret && sig)
40b3b025 1451 ret = do_send_sig_info(sig, info, p, type);
1da177e4
LT
1452
1453 return ret;
1454}
1455
1456/*
146a505d 1457 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1458 * control characters do (^C, ^Z etc)
c69e8d9c 1459 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1460 */
ae7795bc 1461int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1da177e4
LT
1462{
1463 struct task_struct *p = NULL;
39835204 1464 int ret = -ESRCH;
1da177e4 1465
c4b92fc1 1466 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
01024980 1467 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
39835204
ON
1468 /*
1469 * If group_send_sig_info() succeeds at least once ret
1470 * becomes 0 and after that the code below has no effect.
1471 * Otherwise we return the last err or -ESRCH if this
1472 * process group is empty.
1473 */
1474 if (ret)
1475 ret = err;
c4b92fc1 1476 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
39835204
ON
1477
1478 return ret;
1da177e4
LT
1479}
1480
ae7795bc 1481int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1da177e4 1482{
d36174bc 1483 int error = -ESRCH;
1da177e4
LT
1484 struct task_struct *p;
1485
eca1a089
PM
1486 for (;;) {
1487 rcu_read_lock();
1488 p = pid_task(pid, PIDTYPE_PID);
1489 if (p)
01024980 1490 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
eca1a089
PM
1491 rcu_read_unlock();
1492 if (likely(!p || error != -ESRCH))
1493 return error;
6ca25b55 1494
eca1a089
PM
1495 /*
1496 * The task was unhashed in between, try again. If it
1497 * is dead, pid_task() will return NULL, if we race with
1498 * de_thread() it will find the new leader.
1499 */
1500 }
1da177e4
LT
1501}
1502
ae7795bc 1503static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
c4b92fc1
EB
1504{
1505 int error;
1506 rcu_read_lock();
b488893a 1507 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1508 rcu_read_unlock();
1509 return error;
1510}
1511
bb17fcca
CB
1512static inline bool kill_as_cred_perm(const struct cred *cred,
1513 struct task_struct *target)
d178bc3a
SH
1514{
1515 const struct cred *pcred = __task_cred(target);
bb17fcca
CB
1516
1517 return uid_eq(cred->euid, pcred->suid) ||
1518 uid_eq(cred->euid, pcred->uid) ||
1519 uid_eq(cred->uid, pcred->suid) ||
1520 uid_eq(cred->uid, pcred->uid);
d178bc3a
SH
1521}
1522
70f1b0d3
EB
1523/*
1524 * The usb asyncio usage of siginfo is wrong. The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 * kernel_pid_t si_pid;
1528 * kernel_uid32_t si_uid;
1529 * sigval_t si_value;
1530 *
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 * void __user *si_addr;
1534 *
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace. As the 32bit address will encoded in the low
1537 * 32bits of the pointer. Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer. So userspace will not
1539 * see the address it was expecting for it's completions.
1540 *
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1546 * parameter.
1547 */
1548int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 struct pid *pid, const struct cred *cred)
46113830 1550{
70f1b0d3 1551 struct kernel_siginfo info;
46113830 1552 struct task_struct *p;
14d8c9f3 1553 unsigned long flags;
70f1b0d3
EB
1554 int ret = -EINVAL;
1555
eaec2b0b
ZL
1556 if (!valid_signal(sig))
1557 return ret;
1558
70f1b0d3
EB
1559 clear_siginfo(&info);
1560 info.si_signo = sig;
1561 info.si_errno = errno;
1562 info.si_code = SI_ASYNCIO;
1563 *((sigval_t *)&info.si_pid) = addr;
46113830 1564
14d8c9f3 1565 rcu_read_lock();
2425c08b 1566 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1567 if (!p) {
1568 ret = -ESRCH;
1569 goto out_unlock;
1570 }
70f1b0d3 1571 if (!kill_as_cred_perm(cred, p)) {
46113830
HW
1572 ret = -EPERM;
1573 goto out_unlock;
1574 }
70f1b0d3 1575 ret = security_task_kill(p, &info, sig, cred);
8f95dc58
DQ
1576 if (ret)
1577 goto out_unlock;
14d8c9f3
TG
1578
1579 if (sig) {
1580 if (lock_task_sighand(p, &flags)) {
157cc181 1581 ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false);
14d8c9f3
TG
1582 unlock_task_sighand(p, &flags);
1583 } else
1584 ret = -ESRCH;
46113830
HW
1585 }
1586out_unlock:
14d8c9f3 1587 rcu_read_unlock();
46113830
HW
1588 return ret;
1589}
70f1b0d3 1590EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1da177e4
LT
1591
1592/*
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1594 *
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong. Should make it like BSD or SYSV.
1597 */
1598
ae7795bc 1599static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1da177e4 1600{
8d42db18 1601 int ret;
d5df763b 1602
3075afdf
ZL
1603 if (pid > 0)
1604 return kill_proc_info(sig, info, pid);
d5df763b 1605
4ea77014 1606 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1607 if (pid == INT_MIN)
1608 return -ESRCH;
1609
d5df763b
PE
1610 read_lock(&tasklist_lock);
1611 if (pid != -1) {
1612 ret = __kill_pgrp_info(sig, info,
1613 pid ? find_vpid(-pid) : task_pgrp(current));
1614 } else {
1da177e4
LT
1615 int retval = 0, count = 0;
1616 struct task_struct * p;
1617
1da177e4 1618 for_each_process(p) {
d25141a8
SB
1619 if (task_pid_vnr(p) > 1 &&
1620 !same_thread_group(p, current)) {
01024980
EB
1621 int err = group_send_sig_info(sig, info, p,
1622 PIDTYPE_MAX);
1da177e4
LT
1623 ++count;
1624 if (err != -EPERM)
1625 retval = err;
1626 }
1627 }
8d42db18 1628 ret = count ? retval : -ESRCH;
1da177e4 1629 }
d5df763b
PE
1630 read_unlock(&tasklist_lock);
1631
8d42db18 1632 return ret;
1da177e4
LT
1633}
1634
1635/*
1636 * These are for backward compatibility with the rest of the kernel source.
1637 */
1638
ae7795bc 1639int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1da177e4 1640{
1da177e4
LT
1641 /*
1642 * Make sure legacy kernel users don't send in bad values
1643 * (normal paths check this in check_kill_permission).
1644 */
7ed20e1a 1645 if (!valid_signal(sig))
1da177e4
LT
1646 return -EINVAL;
1647
40b3b025 1648 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1da177e4 1649}
fb50f5a4 1650EXPORT_SYMBOL(send_sig_info);
1da177e4 1651
b67a1b9e
ON
1652#define __si_special(priv) \
1653 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1654
1da177e4
LT
1655int
1656send_sig(int sig, struct task_struct *p, int priv)
1657{
b67a1b9e 1658 return send_sig_info(sig, __si_special(priv), p);
1da177e4 1659}
fb50f5a4 1660EXPORT_SYMBOL(send_sig);
1da177e4 1661
3cf5d076 1662void force_sig(int sig)
1da177e4 1663{
ffafd23b
EB
1664 struct kernel_siginfo info;
1665
1666 clear_siginfo(&info);
1667 info.si_signo = sig;
1668 info.si_errno = 0;
1669 info.si_code = SI_KERNEL;
1670 info.si_pid = 0;
1671 info.si_uid = 0;
a89e9b8a 1672 force_sig_info(&info);
1da177e4 1673}
fb50f5a4 1674EXPORT_SYMBOL(force_sig);
1da177e4 1675
26d5badb
EB
1676void force_fatal_sig(int sig)
1677{
1678 struct kernel_siginfo info;
1679
1680 clear_siginfo(&info);
1681 info.si_signo = sig;
1682 info.si_errno = 0;
1683 info.si_code = SI_KERNEL;
1684 info.si_pid = 0;
1685 info.si_uid = 0;
e349d945 1686 force_sig_info_to_task(&info, current, HANDLER_SIG_DFL);
26d5badb
EB
1687}
1688
fcb116bc
EB
1689void force_exit_sig(int sig)
1690{
1691 struct kernel_siginfo info;
1692
1693 clear_siginfo(&info);
1694 info.si_signo = sig;
1695 info.si_errno = 0;
1696 info.si_code = SI_KERNEL;
1697 info.si_pid = 0;
1698 info.si_uid = 0;
1699 force_sig_info_to_task(&info, current, HANDLER_EXIT);
1700}
1701
1da177e4
LT
1702/*
1703 * When things go south during signal handling, we
1704 * will force a SIGSEGV. And if the signal that caused
1705 * the problem was already a SIGSEGV, we'll want to
1706 * make sure we don't even try to deliver the signal..
1707 */
cb44c9a0 1708void force_sigsegv(int sig)
1da177e4 1709{
26d5badb
EB
1710 if (sig == SIGSEGV)
1711 force_fatal_sig(SIGSEGV);
1712 else
1713 force_sig(SIGSEGV);
1da177e4
LT
1714}
1715
f5e83688
AB
1716int force_sig_fault_to_task(int sig, int code, void __user *addr,
1717 struct task_struct *t)
f8ec6601 1718{
ae7795bc 1719 struct kernel_siginfo info;
f8ec6601
EB
1720
1721 clear_siginfo(&info);
1722 info.si_signo = sig;
1723 info.si_errno = 0;
1724 info.si_code = code;
1725 info.si_addr = addr;
e349d945 1726 return force_sig_info_to_task(&info, t, HANDLER_CURRENT);
f8ec6601
EB
1727}
1728
f5e83688 1729int force_sig_fault(int sig, int code, void __user *addr)
91ca180d 1730{
f5e83688 1731 return force_sig_fault_to_task(sig, code, addr, current);
f8ec6601
EB
1732}
1733
f5e83688 1734int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t)
f8ec6601 1735{
ae7795bc 1736 struct kernel_siginfo info;
f8ec6601
EB
1737
1738 clear_siginfo(&info);
1739 info.si_signo = sig;
1740 info.si_errno = 0;
1741 info.si_code = code;
1742 info.si_addr = addr;
f8ec6601
EB
1743 return send_sig_info(info.si_signo, &info, t);
1744}
1745
f8eac901 1746int force_sig_mceerr(int code, void __user *addr, short lsb)
38246735 1747{
ae7795bc 1748 struct kernel_siginfo info;
38246735
EB
1749
1750 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1751 clear_siginfo(&info);
1752 info.si_signo = SIGBUS;
1753 info.si_errno = 0;
1754 info.si_code = code;
1755 info.si_addr = addr;
1756 info.si_addr_lsb = lsb;
a89e9b8a 1757 return force_sig_info(&info);
38246735
EB
1758}
1759
1760int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1761{
ae7795bc 1762 struct kernel_siginfo info;
38246735
EB
1763
1764 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1765 clear_siginfo(&info);
1766 info.si_signo = SIGBUS;
1767 info.si_errno = 0;
1768 info.si_code = code;
1769 info.si_addr = addr;
1770 info.si_addr_lsb = lsb;
1771 return send_sig_info(info.si_signo, &info, t);
1772}
1773EXPORT_SYMBOL(send_sig_mceerr);
38246735 1774
38246735
EB
1775int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1776{
ae7795bc 1777 struct kernel_siginfo info;
38246735
EB
1778
1779 clear_siginfo(&info);
1780 info.si_signo = SIGSEGV;
1781 info.si_errno = 0;
1782 info.si_code = SEGV_BNDERR;
1783 info.si_addr = addr;
1784 info.si_lower = lower;
1785 info.si_upper = upper;
a89e9b8a 1786 return force_sig_info(&info);
38246735 1787}
38246735
EB
1788
1789#ifdef SEGV_PKUERR
1790int force_sig_pkuerr(void __user *addr, u32 pkey)
1791{
ae7795bc 1792 struct kernel_siginfo info;
38246735
EB
1793
1794 clear_siginfo(&info);
1795 info.si_signo = SIGSEGV;
1796 info.si_errno = 0;
1797 info.si_code = SEGV_PKUERR;
1798 info.si_addr = addr;
1799 info.si_pkey = pkey;
a89e9b8a 1800 return force_sig_info(&info);
38246735
EB
1801}
1802#endif
f8ec6601 1803
78ed93d7 1804int send_sig_perf(void __user *addr, u32 type, u64 sig_data)
af5eeab7
EB
1805{
1806 struct kernel_siginfo info;
1807
1808 clear_siginfo(&info);
0683b531
EB
1809 info.si_signo = SIGTRAP;
1810 info.si_errno = 0;
1811 info.si_code = TRAP_PERF;
1812 info.si_addr = addr;
1813 info.si_perf_data = sig_data;
1814 info.si_perf_type = type;
1815
78ed93d7
ME
1816 /*
1817 * Signals generated by perf events should not terminate the whole
1818 * process if SIGTRAP is blocked, however, delivering the signal
1819 * asynchronously is better than not delivering at all. But tell user
1820 * space if the signal was asynchronous, so it can clearly be
1821 * distinguished from normal synchronous ones.
1822 */
1823 info.si_perf_flags = sigismember(&current->blocked, info.si_signo) ?
1824 TRAP_PERF_FLAG_ASYNC :
1825 0;
1826
1827 return send_sig_info(info.si_signo, &info, current);
af5eeab7
EB
1828}
1829
307d522f
EB
1830/**
1831 * force_sig_seccomp - signals the task to allow in-process syscall emulation
1832 * @syscall: syscall number to send to userland
1833 * @reason: filter-supplied reason code to send to userland (via si_errno)
6410349e 1834 * @force_coredump: true to trigger a coredump
307d522f
EB
1835 *
1836 * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info.
1837 */
1838int force_sig_seccomp(int syscall, int reason, bool force_coredump)
1839{
1840 struct kernel_siginfo info;
1841
1842 clear_siginfo(&info);
1843 info.si_signo = SIGSYS;
1844 info.si_code = SYS_SECCOMP;
1845 info.si_call_addr = (void __user *)KSTK_EIP(current);
1846 info.si_errno = reason;
1847 info.si_arch = syscall_get_arch(current);
1848 info.si_syscall = syscall;
e349d945
EB
1849 return force_sig_info_to_task(&info, current,
1850 force_coredump ? HANDLER_EXIT : HANDLER_CURRENT);
307d522f
EB
1851}
1852
f71dd7dc
EB
1853/* For the crazy architectures that include trap information in
1854 * the errno field, instead of an actual errno value.
1855 */
1856int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1857{
ae7795bc 1858 struct kernel_siginfo info;
f71dd7dc
EB
1859
1860 clear_siginfo(&info);
1861 info.si_signo = SIGTRAP;
1862 info.si_errno = errno;
1863 info.si_code = TRAP_HWBKPT;
1864 info.si_addr = addr;
a89e9b8a 1865 return force_sig_info(&info);
f71dd7dc
EB
1866}
1867
2c9f7eaf
EB
1868/* For the rare architectures that include trap information using
1869 * si_trapno.
1870 */
1871int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno)
1872{
1873 struct kernel_siginfo info;
1874
1875 clear_siginfo(&info);
1876 info.si_signo = sig;
1877 info.si_errno = 0;
1878 info.si_code = code;
1879 info.si_addr = addr;
1880 info.si_trapno = trapno;
1881 return force_sig_info(&info);
1882}
1883
7de5f68d
EB
1884/* For the rare architectures that include trap information using
1885 * si_trapno.
1886 */
1887int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno,
1888 struct task_struct *t)
1889{
1890 struct kernel_siginfo info;
1891
1892 clear_siginfo(&info);
1893 info.si_signo = sig;
1894 info.si_errno = 0;
1895 info.si_code = code;
1896 info.si_addr = addr;
1897 info.si_trapno = trapno;
1898 return send_sig_info(info.si_signo, &info, t);
1899}
1900
c4b92fc1
EB
1901int kill_pgrp(struct pid *pid, int sig, int priv)
1902{
146a505d
PE
1903 int ret;
1904
1905 read_lock(&tasklist_lock);
1906 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1907 read_unlock(&tasklist_lock);
1908
1909 return ret;
c4b92fc1
EB
1910}
1911EXPORT_SYMBOL(kill_pgrp);
1912
1913int kill_pid(struct pid *pid, int sig, int priv)
1914{
1915 return kill_pid_info(sig, __si_special(priv), pid);
1916}
1917EXPORT_SYMBOL(kill_pid);
1918
1da177e4
LT
1919/*
1920 * These functions support sending signals using preallocated sigqueue
1921 * structures. This is needed "because realtime applications cannot
1922 * afford to lose notifications of asynchronous events, like timer
5aba085e 1923 * expirations or I/O completions". In the case of POSIX Timers
1da177e4
LT
1924 * we allocate the sigqueue structure from the timer_create. If this
1925 * allocation fails we are able to report the failure to the application
1926 * with an EAGAIN error.
1927 */
1da177e4
LT
1928struct sigqueue *sigqueue_alloc(void)
1929{
69995ebb 1930 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1da177e4
LT
1931}
1932
1933void sigqueue_free(struct sigqueue *q)
1934{
1935 unsigned long flags;
60187d27
ON
1936 spinlock_t *lock = &current->sighand->siglock;
1937
1da177e4
LT
1938 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1939 /*
c8e85b4f
ON
1940 * We must hold ->siglock while testing q->list
1941 * to serialize with collect_signal() or with
da7978b0 1942 * __exit_signal()->flush_sigqueue().
1da177e4 1943 */
60187d27 1944 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1945 q->flags &= ~SIGQUEUE_PREALLOC;
1946 /*
1947 * If it is queued it will be freed when dequeued,
1948 * like the "regular" sigqueue.
1949 */
60187d27 1950 if (!list_empty(&q->list))
c8e85b4f 1951 q = NULL;
60187d27
ON
1952 spin_unlock_irqrestore(lock, flags);
1953
c8e85b4f
ON
1954 if (q)
1955 __sigqueue_free(q);
1da177e4
LT
1956}
1957
24122c7f 1958int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
9e3bd6c3 1959{
e62e6650 1960 int sig = q->info.si_signo;
2ca3515a 1961 struct sigpending *pending;
24122c7f 1962 struct task_struct *t;
e62e6650 1963 unsigned long flags;
163566f6 1964 int ret, result;
2ca3515a 1965
4cd4b6d4 1966 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1967
1968 ret = -1;
24122c7f 1969 rcu_read_lock();
bcb7ee79
DV
1970
1971 /*
1972 * This function is used by POSIX timers to deliver a timer signal.
1973 * Where type is PIDTYPE_PID (such as for timers with SIGEV_THREAD_ID
1974 * set), the signal must be delivered to the specific thread (queues
1975 * into t->pending).
1976 *
1977 * Where type is not PIDTYPE_PID, signals must be delivered to the
1978 * process. In this case, prefer to deliver to current if it is in
1979 * the same thread group as the target process, which avoids
1980 * unnecessarily waking up a potentially idle task.
1981 */
24122c7f 1982 t = pid_task(pid, type);
bcb7ee79
DV
1983 if (!t)
1984 goto ret;
1985 if (type != PIDTYPE_PID && same_thread_group(t, current))
1986 t = current;
1987 if (!likely(lock_task_sighand(t, &flags)))
e62e6650
ON
1988 goto ret;
1989
7e695a5e 1990 ret = 1; /* the signal is ignored */
163566f6 1991 result = TRACE_SIGNAL_IGNORED;
def8cf72 1992 if (!prepare_signal(sig, t, false))
e62e6650
ON
1993 goto out;
1994
1995 ret = 0;
9e3bd6c3
PE
1996 if (unlikely(!list_empty(&q->list))) {
1997 /*
1998 * If an SI_TIMER entry is already queue just increment
1999 * the overrun count.
2000 */
9e3bd6c3
PE
2001 BUG_ON(q->info.si_code != SI_TIMER);
2002 q->info.si_overrun++;
163566f6 2003 result = TRACE_SIGNAL_ALREADY_PENDING;
e62e6650 2004 goto out;
9e3bd6c3 2005 }
ba661292 2006 q->info.si_overrun = 0;
9e3bd6c3 2007
9e3bd6c3 2008 signalfd_notify(t, sig);
24122c7f 2009 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
2010 list_add_tail(&q->list, &pending->list);
2011 sigaddset(&pending->signal, sig);
07296149 2012 complete_signal(sig, t, type);
163566f6 2013 result = TRACE_SIGNAL_DELIVERED;
e62e6650 2014out:
24122c7f 2015 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
e62e6650
ON
2016 unlock_task_sighand(t, &flags);
2017ret:
24122c7f 2018 rcu_read_unlock();
e62e6650 2019 return ret;
9e3bd6c3
PE
2020}
2021
b53b0b9d
JFG
2022static void do_notify_pidfd(struct task_struct *task)
2023{
2024 struct pid *pid;
2025
1caf7d50 2026 WARN_ON(task->exit_state == 0);
b53b0b9d
JFG
2027 pid = task_pid(task);
2028 wake_up_all(&pid->wait_pidfd);
2029}
2030
1da177e4
LT
2031/*
2032 * Let a parent know about the death of a child.
2033 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6 2034 *
53c8f9f1
ON
2035 * Returns true if our parent ignored us and so we've switched to
2036 * self-reaping.
1da177e4 2037 */
53c8f9f1 2038bool do_notify_parent(struct task_struct *tsk, int sig)
1da177e4 2039{
ae7795bc 2040 struct kernel_siginfo info;
1da177e4
LT
2041 unsigned long flags;
2042 struct sighand_struct *psig;
53c8f9f1 2043 bool autoreap = false;
bde8285e 2044 u64 utime, stime;
1da177e4 2045
a382f8fe 2046 WARN_ON_ONCE(sig == -1);
1da177e4 2047
a382f8fe
LT
2048 /* do_notify_parent_cldstop should have been called instead. */
2049 WARN_ON_ONCE(task_is_stopped_or_traced(tsk));
1da177e4 2050
a382f8fe 2051 WARN_ON_ONCE(!tsk->ptrace &&
1da177e4
LT
2052 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
2053
b53b0b9d
JFG
2054 /* Wake up all pidfd waiters */
2055 do_notify_pidfd(tsk);
2056
b6e238dc
ON
2057 if (sig != SIGCHLD) {
2058 /*
2059 * This is only possible if parent == real_parent.
2060 * Check if it has changed security domain.
2061 */
d1e7fd64 2062 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
b6e238dc
ON
2063 sig = SIGCHLD;
2064 }
2065
faf1f22b 2066 clear_siginfo(&info);
1da177e4
LT
2067 info.si_signo = sig;
2068 info.si_errno = 0;
b488893a 2069 /*
32084504
EB
2070 * We are under tasklist_lock here so our parent is tied to
2071 * us and cannot change.
b488893a 2072 *
32084504
EB
2073 * task_active_pid_ns will always return the same pid namespace
2074 * until a task passes through release_task.
b488893a
PE
2075 *
2076 * write_lock() currently calls preempt_disable() which is the
2077 * same as rcu_read_lock(), but according to Oleg, this is not
2078 * correct to rely on this
2079 */
2080 rcu_read_lock();
32084504 2081 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
54ba47ed
EB
2082 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2083 task_uid(tsk));
b488893a
PE
2084 rcu_read_unlock();
2085
bde8285e
FW
2086 task_cputime(tsk, &utime, &stime);
2087 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2088 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1da177e4
LT
2089
2090 info.si_status = tsk->exit_code & 0x7f;
2091 if (tsk->exit_code & 0x80)
2092 info.si_code = CLD_DUMPED;
2093 else if (tsk->exit_code & 0x7f)
2094 info.si_code = CLD_KILLED;
2095 else {
2096 info.si_code = CLD_EXITED;
2097 info.si_status = tsk->exit_code >> 8;
2098 }
2099
2100 psig = tsk->parent->sighand;
2101 spin_lock_irqsave(&psig->siglock, flags);
d21142ec 2102 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
2103 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2104 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2105 /*
2106 * We are exiting and our parent doesn't care. POSIX.1
2107 * defines special semantics for setting SIGCHLD to SIG_IGN
2108 * or setting the SA_NOCLDWAIT flag: we should be reaped
2109 * automatically and not left for our parent's wait4 call.
2110 * Rather than having the parent do it as a magic kind of
2111 * signal handler, we just set this to tell do_exit that we
2112 * can be cleaned up without becoming a zombie. Note that
2113 * we still call __wake_up_parent in this case, because a
2114 * blocked sys_wait4 might now return -ECHILD.
2115 *
2116 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2117 * is implementation-defined: we do (if you don't want
2118 * it, just use SIG_IGN instead).
2119 */
53c8f9f1 2120 autoreap = true;
1da177e4 2121 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
53c8f9f1 2122 sig = 0;
1da177e4 2123 }
61e713bd
EB
2124 /*
2125 * Send with __send_signal as si_pid and si_uid are in the
2126 * parent's namespaces.
2127 */
53c8f9f1 2128 if (valid_signal(sig) && sig)
157cc181 2129 __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1da177e4
LT
2130 __wake_up_parent(tsk, tsk->parent);
2131 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 2132
53c8f9f1 2133 return autoreap;
1da177e4
LT
2134}
2135
75b95953
TH
2136/**
2137 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2138 * @tsk: task reporting the state change
2139 * @for_ptracer: the notification is for ptracer
2140 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2141 *
2142 * Notify @tsk's parent that the stopped/continued state has changed. If
2143 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2144 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2145 *
2146 * CONTEXT:
2147 * Must be called with tasklist_lock at least read locked.
2148 */
2149static void do_notify_parent_cldstop(struct task_struct *tsk,
2150 bool for_ptracer, int why)
1da177e4 2151{
ae7795bc 2152 struct kernel_siginfo info;
1da177e4 2153 unsigned long flags;
bc505a47 2154 struct task_struct *parent;
1da177e4 2155 struct sighand_struct *sighand;
bde8285e 2156 u64 utime, stime;
1da177e4 2157
75b95953 2158 if (for_ptracer) {
bc505a47 2159 parent = tsk->parent;
75b95953 2160 } else {
bc505a47
ON
2161 tsk = tsk->group_leader;
2162 parent = tsk->real_parent;
2163 }
2164
faf1f22b 2165 clear_siginfo(&info);
1da177e4
LT
2166 info.si_signo = SIGCHLD;
2167 info.si_errno = 0;
b488893a 2168 /*
5aba085e 2169 * see comment in do_notify_parent() about the following 4 lines
b488893a
PE
2170 */
2171 rcu_read_lock();
17cf22c3 2172 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
54ba47ed 2173 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
b488893a
PE
2174 rcu_read_unlock();
2175
bde8285e
FW
2176 task_cputime(tsk, &utime, &stime);
2177 info.si_utime = nsec_to_clock_t(utime);
2178 info.si_stime = nsec_to_clock_t(stime);
1da177e4
LT
2179
2180 info.si_code = why;
2181 switch (why) {
2182 case CLD_CONTINUED:
2183 info.si_status = SIGCONT;
2184 break;
2185 case CLD_STOPPED:
2186 info.si_status = tsk->signal->group_exit_code & 0x7f;
2187 break;
2188 case CLD_TRAPPED:
2189 info.si_status = tsk->exit_code & 0x7f;
2190 break;
2191 default:
2192 BUG();
2193 }
2194
2195 sighand = parent->sighand;
2196 spin_lock_irqsave(&sighand->siglock, flags);
2197 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2198 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
e71ba124 2199 send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID);
1da177e4
LT
2200 /*
2201 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2202 */
2203 __wake_up_parent(tsk, parent);
2204 spin_unlock_irqrestore(&sighand->siglock, flags);
2205}
2206
2207/*
2208 * This must be called with current->sighand->siglock held.
2209 *
2210 * This should be the path for all ptrace stops.
2211 * We always set current->last_siginfo while stopped here.
2212 * That makes it a way to test a stopped process for
2213 * being ptrace-stopped vs being job-control-stopped.
2214 *
6487d1da
EB
2215 * Returns the signal the ptracer requested the code resume
2216 * with. If the code did not stop because the tracer is gone,
2217 * the stop signal remains unchanged unless clear_code.
1da177e4 2218 */
57b6de08
EB
2219static int ptrace_stop(int exit_code, int why, unsigned long message,
2220 kernel_siginfo_t *info)
b8401150
NK
2221 __releases(&current->sighand->siglock)
2222 __acquires(&current->sighand->siglock)
1da177e4 2223{
ceb6bd67
TH
2224 bool gstop_done = false;
2225
4f627af8 2226 if (arch_ptrace_stop_needed()) {
1a669c2f
RM
2227 /*
2228 * The arch code has something special to do before a
2229 * ptrace stop. This is allowed to block, e.g. for faults
2230 * on user stack pages. We can't keep the siglock while
2231 * calling arch_ptrace_stop, so we must release it now.
2232 * To preserve proper semantics, we must do this before
2233 * any signal bookkeeping like checking group_stop_count.
1a669c2f
RM
2234 */
2235 spin_unlock_irq(&current->sighand->siglock);
4f627af8 2236 arch_ptrace_stop();
1a669c2f 2237 spin_lock_irq(&current->sighand->siglock);
1a669c2f
RM
2238 }
2239
7d613f9f 2240 /*
2500ad1c
EB
2241 * After this point ptrace_signal_wake_up or signal_wake_up
2242 * will clear TASK_TRACED if ptrace_unlink happens or a fatal
2243 * signal comes in. Handle previous ptrace_unlinks and fatal
2244 * signals here to prevent ptrace_stop sleeping in schedule.
7d613f9f 2245 */
2500ad1c 2246 if (!current->ptrace || __fatal_signal_pending(current))
57b6de08
EB
2247 return exit_code;
2248
b5bf9a90 2249 set_special_state(TASK_TRACED);
31cae1ea 2250 current->jobctl |= JOBCTL_TRACED;
b5bf9a90 2251
1da177e4 2252 /*
81be24b8
TH
2253 * We're committing to trapping. TRACED should be visible before
2254 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2255 * Also, transition to TRACED and updates to ->jobctl should be
2256 * atomic with respect to siglock and should be done after the arch
2257 * hook as siglock is released and regrabbed across it.
b5bf9a90
PZ
2258 *
2259 * TRACER TRACEE
2260 *
2261 * ptrace_attach()
2262 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2263 * do_wait()
2264 * set_current_state() smp_wmb();
2265 * ptrace_do_wait()
2266 * wait_task_stopped()
2267 * task_stopped_code()
2268 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1da177e4 2269 */
b5bf9a90 2270 smp_wmb();
1da177e4 2271
336d4b81 2272 current->ptrace_message = message;
1da177e4
LT
2273 current->last_siginfo = info;
2274 current->exit_code = exit_code;
2275
d79fdd6d 2276 /*
0ae8ce1c
TH
2277 * If @why is CLD_STOPPED, we're trapping to participate in a group
2278 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
73ddff2b
TH
2279 * across siglock relocks since INTERRUPT was scheduled, PENDING
2280 * could be clear now. We act as if SIGCONT is received after
2281 * TASK_TRACED is entered - ignore it.
d79fdd6d 2282 */
a8f072c1 2283 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
ceb6bd67 2284 gstop_done = task_participate_group_stop(current);
d79fdd6d 2285
fb1d910c 2286 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
73ddff2b 2287 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
fb1d910c
TH
2288 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2289 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
73ddff2b 2290
81be24b8 2291 /* entering a trap, clear TRAPPING */
a8f072c1 2292 task_clear_jobctl_trapping(current);
d79fdd6d 2293
1da177e4
LT
2294 spin_unlock_irq(&current->sighand->siglock);
2295 read_lock(&tasklist_lock);
57b6de08
EB
2296 /*
2297 * Notify parents of the stop.
2298 *
2299 * While ptraced, there are two parents - the ptracer and
2300 * the real_parent of the group_leader. The ptracer should
2301 * know about every stop while the real parent is only
2302 * interested in the completion of group stop. The states
2303 * for the two don't interact with each other. Notify
2304 * separately unless they're gonna be duplicates.
2305 */
2306 if (current->ptrace)
ceb6bd67 2307 do_notify_parent_cldstop(current, true, why);
57b6de08
EB
2308 if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
2309 do_notify_parent_cldstop(current, false, why);
ceb6bd67 2310
57b6de08 2311 /*
a20d6f63
SAS
2312 * The previous do_notify_parent_cldstop() invocation woke ptracer.
2313 * One a PREEMPTION kernel this can result in preemption requirement
2314 * which will be fulfilled after read_unlock() and the ptracer will be
2315 * put on the CPU.
2316 * The ptracer is in wait_task_inactive(, __TASK_TRACED) waiting for
2317 * this task wait in schedule(). If this task gets preempted then it
2318 * remains enqueued on the runqueue. The ptracer will observe this and
2319 * then sleep for a delay of one HZ tick. In the meantime this task
2320 * gets scheduled, enters schedule() and will wait for the ptracer.
57b6de08 2321 *
a20d6f63
SAS
2322 * This preemption point is not bad from a correctness point of
2323 * view but extends the runtime by one HZ tick time due to the
2324 * ptracer's sleep. The preempt-disable section ensures that there
2325 * will be no preemption between unlock and schedule() and so
2326 * improving the performance since the ptracer will observe that
2327 * the tracee is scheduled out once it gets on the CPU.
1aabbc53
SAS
2328 *
2329 * On PREEMPT_RT locking tasklist_lock does not disable preemption.
2330 * Therefore the task can be preempted after do_notify_parent_cldstop()
2331 * before unlocking tasklist_lock so there is no benefit in doing this.
2332 *
2333 * In fact disabling preemption is harmful on PREEMPT_RT because
2334 * the spinlock_t in cgroup_enter_frozen() must not be acquired
2335 * with preemption disabled due to the 'sleeping' spinlock
2336 * substitution of RT.
57b6de08 2337 */
1aabbc53
SAS
2338 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2339 preempt_disable();
57b6de08
EB
2340 read_unlock(&tasklist_lock);
2341 cgroup_enter_frozen();
1aabbc53
SAS
2342 if (!IS_ENABLED(CONFIG_PREEMPT_RT))
2343 preempt_enable_no_resched();
f5d39b02 2344 schedule();
57b6de08 2345 cgroup_leave_frozen(true);
1da177e4
LT
2346
2347 /*
2348 * We are back. Now reacquire the siglock before touching
2349 * last_siginfo, so that we are sure to have synchronized with
2350 * any signal-sending on another CPU that wants to examine it.
2351 */
2352 spin_lock_irq(&current->sighand->siglock);
57b6de08 2353 exit_code = current->exit_code;
1da177e4 2354 current->last_siginfo = NULL;
336d4b81 2355 current->ptrace_message = 0;
6487d1da 2356 current->exit_code = 0;
1da177e4 2357
544b2c91 2358 /* LISTENING can be set only during STOP traps, clear it */
2500ad1c 2359 current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN);
544b2c91 2360
1da177e4
LT
2361 /*
2362 * Queued signals ignored us while we were stopped for tracing.
2363 * So check for any that we should take before resuming user mode.
b74d0deb 2364 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 2365 */
b74d0deb 2366 recalc_sigpending_tsk(current);
6487d1da 2367 return exit_code;
1da177e4
LT
2368}
2369
6487d1da 2370static int ptrace_do_notify(int signr, int exit_code, int why, unsigned long message)
1da177e4 2371{
ae7795bc 2372 kernel_siginfo_t info;
1da177e4 2373
faf1f22b 2374 clear_siginfo(&info);
3544d72a 2375 info.si_signo = signr;
1da177e4 2376 info.si_code = exit_code;
b488893a 2377 info.si_pid = task_pid_vnr(current);
078de5f7 2378 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1da177e4
LT
2379
2380 /* Let the debugger run. */
57b6de08 2381 return ptrace_stop(exit_code, why, message, &info);
3544d72a
TH
2382}
2383
6487d1da 2384int ptrace_notify(int exit_code, unsigned long message)
3544d72a 2385{
6487d1da
EB
2386 int signr;
2387
3544d72a 2388 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
7f62d40d 2389 if (unlikely(task_work_pending(current)))
f784e8a7 2390 task_work_run();
3544d72a 2391
1da177e4 2392 spin_lock_irq(&current->sighand->siglock);
6487d1da 2393 signr = ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED, message);
1da177e4 2394 spin_unlock_irq(&current->sighand->siglock);
6487d1da 2395 return signr;
1da177e4
LT
2396}
2397
73ddff2b
TH
2398/**
2399 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2400 * @signr: signr causing group stop if initiating
2401 *
2402 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2403 * and participate in it. If already set, participate in the existing
2404 * group stop. If participated in a group stop (and thus slept), %true is
2405 * returned with siglock released.
2406 *
2407 * If ptraced, this function doesn't handle stop itself. Instead,
2408 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2409 * untouched. The caller must ensure that INTERRUPT trap handling takes
2410 * places afterwards.
2411 *
2412 * CONTEXT:
2413 * Must be called with @current->sighand->siglock held, which is released
2414 * on %true return.
2415 *
2416 * RETURNS:
2417 * %false if group stop is already cancelled or ptrace trap is scheduled.
2418 * %true if participated in group stop.
1da177e4 2419 */
73ddff2b
TH
2420static bool do_signal_stop(int signr)
2421 __releases(&current->sighand->siglock)
1da177e4
LT
2422{
2423 struct signal_struct *sig = current->signal;
1da177e4 2424
a8f072c1 2425 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
b76808e6 2426 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
f558b7e4
ON
2427 struct task_struct *t;
2428
a8f072c1
TH
2429 /* signr will be recorded in task->jobctl for retries */
2430 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
d79fdd6d 2431
a8f072c1 2432 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
49697335
EB
2433 unlikely(sig->flags & SIGNAL_GROUP_EXIT) ||
2434 unlikely(sig->group_exec_task))
73ddff2b 2435 return false;
1da177e4 2436 /*
408a37de
TH
2437 * There is no group stop already in progress. We must
2438 * initiate one now.
2439 *
2440 * While ptraced, a task may be resumed while group stop is
2441 * still in effect and then receive a stop signal and
2442 * initiate another group stop. This deviates from the
2443 * usual behavior as two consecutive stop signals can't
780006ea
ON
2444 * cause two group stops when !ptraced. That is why we
2445 * also check !task_is_stopped(t) below.
408a37de
TH
2446 *
2447 * The condition can be distinguished by testing whether
2448 * SIGNAL_STOP_STOPPED is already set. Don't generate
2449 * group_exit_code in such case.
2450 *
2451 * This is not necessary for SIGNAL_STOP_CONTINUED because
2452 * an intervening stop signal is required to cause two
2453 * continued events regardless of ptrace.
1da177e4 2454 */
408a37de
TH
2455 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2456 sig->group_exit_code = signr;
1da177e4 2457
7dd3db54 2458 sig->group_stop_count = 0;
7dd3db54
TH
2459 if (task_set_jobctl_pending(current, signr | gstop))
2460 sig->group_stop_count++;
1da177e4 2461
61a7a5e2 2462 for_other_threads(current, t) {
1da177e4 2463 /*
a122b341
ON
2464 * Setting state to TASK_STOPPED for a group
2465 * stop is always done with the siglock held,
2466 * so this check has no races.
1da177e4 2467 */
7dd3db54
TH
2468 if (!task_is_stopped(t) &&
2469 task_set_jobctl_pending(t, signr | gstop)) {
ae6d2ed7 2470 sig->group_stop_count++;
fb1d910c
TH
2471 if (likely(!(t->ptrace & PT_SEIZED)))
2472 signal_wake_up(t, 0);
2473 else
2474 ptrace_trap_notify(t);
a122b341 2475 }
d79fdd6d 2476 }
1da177e4 2477 }
73ddff2b 2478
d21142ec 2479 if (likely(!current->ptrace)) {
5224fa36 2480 int notify = 0;
1da177e4 2481
5224fa36
TH
2482 /*
2483 * If there are no other threads in the group, or if there
2484 * is a group stop in progress and we are the last to stop,
2485 * report to the parent.
2486 */
2487 if (task_participate_group_stop(current))
2488 notify = CLD_STOPPED;
2489
31cae1ea 2490 current->jobctl |= JOBCTL_STOPPED;
b5bf9a90 2491 set_special_state(TASK_STOPPED);
5224fa36
TH
2492 spin_unlock_irq(&current->sighand->siglock);
2493
62bcf9d9
TH
2494 /*
2495 * Notify the parent of the group stop completion. Because
2496 * we're not holding either the siglock or tasklist_lock
2497 * here, ptracer may attach inbetween; however, this is for
2498 * group stop and should always be delivered to the real
2499 * parent of the group leader. The new ptracer will get
2500 * its notification when this task transitions into
2501 * TASK_TRACED.
2502 */
5224fa36
TH
2503 if (notify) {
2504 read_lock(&tasklist_lock);
62bcf9d9 2505 do_notify_parent_cldstop(current, false, notify);
5224fa36
TH
2506 read_unlock(&tasklist_lock);
2507 }
2508
2509 /* Now we don't run again until woken by SIGCONT or SIGKILL */
76f969e8 2510 cgroup_enter_frozen();
f5d39b02 2511 schedule();
73ddff2b 2512 return true;
d79fdd6d 2513 } else {
73ddff2b
TH
2514 /*
2515 * While ptraced, group stop is handled by STOP trap.
2516 * Schedule it and let the caller deal with it.
2517 */
2518 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2519 return false;
ae6d2ed7 2520 }
73ddff2b 2521}
1da177e4 2522
73ddff2b
TH
2523/**
2524 * do_jobctl_trap - take care of ptrace jobctl traps
2525 *
3544d72a
TH
2526 * When PT_SEIZED, it's used for both group stop and explicit
2527 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2528 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2529 * the stop signal; otherwise, %SIGTRAP.
2530 *
2531 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2532 * number as exit_code and no siginfo.
73ddff2b
TH
2533 *
2534 * CONTEXT:
2535 * Must be called with @current->sighand->siglock held, which may be
2536 * released and re-acquired before returning with intervening sleep.
2537 */
2538static void do_jobctl_trap(void)
2539{
3544d72a 2540 struct signal_struct *signal = current->signal;
73ddff2b 2541 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
ae6d2ed7 2542
3544d72a
TH
2543 if (current->ptrace & PT_SEIZED) {
2544 if (!signal->group_stop_count &&
2545 !(signal->flags & SIGNAL_STOP_STOPPED))
2546 signr = SIGTRAP;
2547 WARN_ON_ONCE(!signr);
2548 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
336d4b81 2549 CLD_STOPPED, 0);
3544d72a
TH
2550 } else {
2551 WARN_ON_ONCE(!signr);
57b6de08 2552 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
ae6d2ed7 2553 }
1da177e4
LT
2554}
2555
76f969e8
RG
2556/**
2557 * do_freezer_trap - handle the freezer jobctl trap
2558 *
2559 * Puts the task into frozen state, if only the task is not about to quit.
2560 * In this case it drops JOBCTL_TRAP_FREEZE.
2561 *
2562 * CONTEXT:
2563 * Must be called with @current->sighand->siglock held,
2564 * which is always released before returning.
2565 */
2566static void do_freezer_trap(void)
2567 __releases(&current->sighand->siglock)
2568{
2569 /*
2570 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2571 * let's make another loop to give it a chance to be handled.
2572 * In any case, we'll return back.
2573 */
2574 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2575 JOBCTL_TRAP_FREEZE) {
2576 spin_unlock_irq(&current->sighand->siglock);
2577 return;
2578 }
2579
2580 /*
2581 * Now we're sure that there is no pending fatal signal and no
2582 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2583 * immediately (if there is a non-fatal signal pending), and
2584 * put the task into sleep.
2585 */
f5d39b02 2586 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
76f969e8
RG
2587 clear_thread_flag(TIF_SIGPENDING);
2588 spin_unlock_irq(&current->sighand->siglock);
2589 cgroup_enter_frozen();
f5d39b02 2590 schedule();
76f969e8
RG
2591}
2592
5768d890 2593static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
18c98b65 2594{
8a352418
ON
2595 /*
2596 * We do not check sig_kernel_stop(signr) but set this marker
2597 * unconditionally because we do not know whether debugger will
2598 * change signr. This flag has no meaning unless we are going
2599 * to stop after return from ptrace_stop(). In this case it will
2600 * be checked in do_signal_stop(), we should only stop if it was
2601 * not cleared by SIGCONT while we were sleeping. See also the
2602 * comment in dequeue_signal().
2603 */
2604 current->jobctl |= JOBCTL_STOP_DEQUEUED;
57b6de08 2605 signr = ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
2606
2607 /* We're back. Did the debugger cancel the sig? */
18c98b65
RM
2608 if (signr == 0)
2609 return signr;
2610
5aba085e
RD
2611 /*
2612 * Update the siginfo structure if the signal has
2613 * changed. If the debugger wanted something
2614 * specific in the siginfo structure then it should
2615 * have updated *info via PTRACE_SETSIGINFO.
2616 */
18c98b65 2617 if (signr != info->si_signo) {
faf1f22b 2618 clear_siginfo(info);
18c98b65
RM
2619 info->si_signo = signr;
2620 info->si_errno = 0;
2621 info->si_code = SI_USER;
6b550f94 2622 rcu_read_lock();
18c98b65 2623 info->si_pid = task_pid_vnr(current->parent);
54ba47ed
EB
2624 info->si_uid = from_kuid_munged(current_user_ns(),
2625 task_uid(current->parent));
6b550f94 2626 rcu_read_unlock();
18c98b65
RM
2627 }
2628
2629 /* If the (new) signal is now blocked, requeue it. */
b171f667
EB
2630 if (sigismember(&current->blocked, signr) ||
2631 fatal_signal_pending(current)) {
157cc181 2632 send_signal_locked(signr, info, current, type);
18c98b65
RM
2633 signr = 0;
2634 }
2635
2636 return signr;
2637}
2638
6ac05e83
PC
2639static void hide_si_addr_tag_bits(struct ksignal *ksig)
2640{
2641 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2642 case SIL_FAULT:
9abcabe3 2643 case SIL_FAULT_TRAPNO:
6ac05e83
PC
2644 case SIL_FAULT_MCEERR:
2645 case SIL_FAULT_BNDERR:
2646 case SIL_FAULT_PKUERR:
f4ac7302 2647 case SIL_FAULT_PERF_EVENT:
6ac05e83
PC
2648 ksig->info.si_addr = arch_untagged_si_addr(
2649 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2650 break;
2651 case SIL_KILL:
2652 case SIL_TIMER:
2653 case SIL_POLL:
2654 case SIL_CHLD:
2655 case SIL_RT:
2656 case SIL_SYS:
2657 break;
2658 }
2659}
2660
20ab7218 2661bool get_signal(struct ksignal *ksig)
1da177e4 2662{
f6b76d4f
ON
2663 struct sighand_struct *sighand = current->sighand;
2664 struct signal_struct *signal = current->signal;
2665 int signr;
1da177e4 2666
8ba62d37 2667 clear_notify_signal();
7f62d40d 2668 if (unlikely(task_work_pending(current)))
35d0b389
JA
2669 task_work_run();
2670
8ba62d37
EB
2671 if (!task_sigpending(current))
2672 return false;
12db8b69 2673
0326f5a9 2674 if (unlikely(uprobe_deny_signal()))
20ab7218 2675 return false;
0326f5a9 2676
13b1c3d4 2677 /*
5d8f72b5
ON
2678 * Do this once, we can't return to user-mode if freezing() == T.
2679 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2680 * thus do not need another check after return.
13b1c3d4 2681 */
fc558a74
RW
2682 try_to_freeze();
2683
5d8f72b5 2684relock:
f6b76d4f 2685 spin_lock_irq(&sighand->siglock);
e91b4816 2686
021e1ae3
ON
2687 /*
2688 * Every stopped thread goes here after wakeup. Check to see if
2689 * we should notify the parent, prepare_signal(SIGCONT) encodes
2690 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2691 */
f6b76d4f 2692 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
2693 int why;
2694
2695 if (signal->flags & SIGNAL_CLD_CONTINUED)
2696 why = CLD_CONTINUED;
2697 else
2698 why = CLD_STOPPED;
2699
f6b76d4f 2700 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 2701
ae6d2ed7 2702 spin_unlock_irq(&sighand->siglock);
fa00b80b 2703
ceb6bd67
TH
2704 /*
2705 * Notify the parent that we're continuing. This event is
2706 * always per-process and doesn't make whole lot of sense
2707 * for ptracers, who shouldn't consume the state via
2708 * wait(2) either, but, for backward compatibility, notify
2709 * the ptracer of the group leader too unless it's gonna be
2710 * a duplicate.
2711 */
edf2ed15 2712 read_lock(&tasklist_lock);
ceb6bd67
TH
2713 do_notify_parent_cldstop(current, false, why);
2714
bb3696da
ON
2715 if (ptrace_reparented(current->group_leader))
2716 do_notify_parent_cldstop(current->group_leader,
2717 true, why);
edf2ed15 2718 read_unlock(&tasklist_lock);
ceb6bd67 2719
e4420551
ON
2720 goto relock;
2721 }
2722
1da177e4
LT
2723 for (;;) {
2724 struct k_sigaction *ka;
5768d890 2725 enum pid_type type;
1be53963 2726
e7f7c99b 2727 /* Has this task already been marked for death? */
49697335
EB
2728 if ((signal->flags & SIGNAL_GROUP_EXIT) ||
2729 signal->group_exec_task) {
a436184e 2730 signr = SIGKILL;
e7f7c99b
EB
2731 sigdelset(&current->pending.signal, SIGKILL);
2732 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
a436184e 2733 &sighand->action[SIGKILL-1]);
e7f7c99b 2734 recalc_sigpending();
a436184e
ON
2735 /*
2736 * implies do_group_exit() or return to PF_USER_WORKER,
2737 * no need to initialize ksig->info/etc.
2738 */
e7f7c99b
EB
2739 goto fatal;
2740 }
1be53963 2741
dd1d6772
TH
2742 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2743 do_signal_stop(0))
7bcf6a2c 2744 goto relock;
1be53963 2745
76f969e8
RG
2746 if (unlikely(current->jobctl &
2747 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2748 if (current->jobctl & JOBCTL_TRAP_MASK) {
2749 do_jobctl_trap();
2750 spin_unlock_irq(&sighand->siglock);
2751 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2752 do_freezer_trap();
2753
2754 goto relock;
2755 }
2756
2757 /*
2758 * If the task is leaving the frozen state, let's update
2759 * cgroup counters and reset the frozen bit.
2760 */
2761 if (unlikely(cgroup_task_frozen(current))) {
73ddff2b 2762 spin_unlock_irq(&sighand->siglock);
cb2c4cd8 2763 cgroup_leave_frozen(false);
73ddff2b
TH
2764 goto relock;
2765 }
1da177e4 2766
7146db33
EB
2767 /*
2768 * Signals generated by the execution of an instruction
2769 * need to be delivered before any other pending signals
2770 * so that the instruction pointer in the signal stack
2771 * frame points to the faulting instruction.
2772 */
5768d890 2773 type = PIDTYPE_PID;
7146db33
EB
2774 signr = dequeue_synchronous_signal(&ksig->info);
2775 if (!signr)
5768d890
EB
2776 signr = dequeue_signal(current, &current->blocked,
2777 &ksig->info, &type);
7bcf6a2c 2778
dd1d6772
TH
2779 if (!signr)
2780 break; /* will return 0 */
7bcf6a2c 2781
00b06da2
EB
2782 if (unlikely(current->ptrace) && (signr != SIGKILL) &&
2783 !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
5768d890 2784 signr = ptrace_signal(signr, &ksig->info, type);
dd1d6772
TH
2785 if (!signr)
2786 continue;
1da177e4
LT
2787 }
2788
dd1d6772
TH
2789 ka = &sighand->action[signr-1];
2790
f9d4257e 2791 /* Trace actually delivered signals. */
828b1f65 2792 trace_signal_deliver(signr, &ksig->info, ka);
f9d4257e 2793
1da177e4
LT
2794 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2795 continue;
2796 if (ka->sa.sa_handler != SIG_DFL) {
2797 /* Run the handler. */
828b1f65 2798 ksig->ka = *ka;
1da177e4
LT
2799
2800 if (ka->sa.sa_flags & SA_ONESHOT)
2801 ka->sa.sa_handler = SIG_DFL;
2802
2803 break; /* will return non-zero "signr" value */
2804 }
2805
2806 /*
2807 * Now we are doing the default action for this signal.
2808 */
2809 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2810 continue;
2811
84d73786 2812 /*
0fbc26a6 2813 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
2814 * Container-init gets no signals it doesn't want from same
2815 * container.
2816 *
2817 * Note that if global/container-init sees a sig_kernel_only()
2818 * signal here, the signal must have been generated internally
2819 * or must have come from an ancestor namespace. In either
2820 * case, the signal cannot be dropped.
84d73786 2821 */
fae5fa44 2822 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 2823 !sig_kernel_only(signr))
1da177e4
LT
2824 continue;
2825
2826 if (sig_kernel_stop(signr)) {
2827 /*
2828 * The default action is to stop all threads in
2829 * the thread group. The job control signals
2830 * do nothing in an orphaned pgrp, but SIGSTOP
2831 * always works. Note that siglock needs to be
2832 * dropped during the call to is_orphaned_pgrp()
2833 * because of lock ordering with tasklist_lock.
2834 * This allows an intervening SIGCONT to be posted.
2835 * We need to check for that and bail out if necessary.
2836 */
2837 if (signr != SIGSTOP) {
f6b76d4f 2838 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2839
2840 /* signals can be posted during this window */
2841
3e7cd6c4 2842 if (is_current_pgrp_orphaned())
1da177e4
LT
2843 goto relock;
2844
f6b76d4f 2845 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2846 }
2847
49fd5f5a 2848 if (likely(do_signal_stop(signr))) {
1da177e4
LT
2849 /* It released the siglock. */
2850 goto relock;
2851 }
2852
2853 /*
2854 * We didn't actually stop, due to a race
2855 * with SIGCONT or something like that.
2856 */
2857 continue;
2858 }
2859
35634ffa 2860 fatal:
f6b76d4f 2861 spin_unlock_irq(&sighand->siglock);
f2b31bb5
RG
2862 if (unlikely(cgroup_task_frozen(current)))
2863 cgroup_leave_frozen(true);
1da177e4
LT
2864
2865 /*
2866 * Anything else is fatal, maybe with a core dump.
2867 */
2868 current->flags |= PF_SIGNALED;
2dce81bf 2869
1da177e4 2870 if (sig_kernel_coredump(signr)) {
2dce81bf 2871 if (print_fatal_signals)
49fd5f5a 2872 print_fatal_signal(signr);
2b5faa4c 2873 proc_coredump_connector(current);
1da177e4
LT
2874 /*
2875 * If it was able to dump core, this kills all
2876 * other threads in the group and synchronizes with
2877 * their demise. If we lost the race with another
2878 * thread getting here, it set group_exit_code
2879 * first and our do_group_exit call below will use
2880 * that value and ignore the one we pass it.
2881 */
828b1f65 2882 do_coredump(&ksig->info);
1da177e4
LT
2883 }
2884
10442994 2885 /*
f9010dbd 2886 * PF_USER_WORKER threads will catch and exit on fatal signals
dd69edd6
ON
2887 * themselves. They have cleanup that must be performed, so we
2888 * cannot call do_exit() on their behalf. Note that ksig won't
2889 * be properly initialized, PF_USER_WORKER's shouldn't use it.
10442994 2890 */
f9010dbd 2891 if (current->flags & PF_USER_WORKER)
10442994
JA
2892 goto out;
2893
1da177e4
LT
2894 /*
2895 * Death signals, no core dump.
2896 */
49fd5f5a 2897 do_group_exit(signr);
1da177e4
LT
2898 /* NOTREACHED */
2899 }
f6b76d4f 2900 spin_unlock_irq(&sighand->siglock);
dd69edd6 2901
828b1f65 2902 ksig->sig = signr;
6ac05e83 2903
dd69edd6 2904 if (signr && !(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
6ac05e83 2905 hide_si_addr_tag_bits(ksig);
dd69edd6 2906out:
49fd5f5a 2907 return signr > 0;
1da177e4
LT
2908}
2909
5e6292c0 2910/**
6410349e 2911 * signal_delivered - called after signal delivery to update blocked signals
10b1c7ac 2912 * @ksig: kernel signal struct
efee984c 2913 * @stepping: nonzero if debugger single-step or block-step in use
5e6292c0 2914 *
e227867f 2915 * This function should be called when a signal has successfully been
10b1c7ac 2916 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
6410349e 2917 * is always blocked), and the signal itself is blocked unless %SA_NODEFER
10b1c7ac 2918 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
5e6292c0 2919 */
10b1c7ac 2920static void signal_delivered(struct ksignal *ksig, int stepping)
5e6292c0
MF
2921{
2922 sigset_t blocked;
2923
a610d6e6
AV
2924 /* A signal was successfully delivered, and the
2925 saved sigmask was stored on the signal frame,
2926 and will be restored by sigreturn. So we can
2927 simply clear the restore sigmask flag. */
2928 clear_restore_sigmask();
2929
10b1c7ac
RW
2930 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2931 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2932 sigaddset(&blocked, ksig->sig);
5e6292c0 2933 set_current_blocked(&blocked);
97c885d5
AV
2934 if (current->sas_ss_flags & SS_AUTODISARM)
2935 sas_ss_reset(current);
c145137d 2936 if (stepping)
336d4b81 2937 ptrace_notify(SIGTRAP, 0);
5e6292c0
MF
2938}
2939
2ce5da17
AV
2940void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2941{
2942 if (failed)
cb44c9a0 2943 force_sigsegv(ksig->sig);
2ce5da17 2944 else
10b1c7ac 2945 signal_delivered(ksig, stepping);
2ce5da17
AV
2946}
2947
0edceb7b
ON
2948/*
2949 * It could be that complete_signal() picked us to notify about the
fec9993d
ON
2950 * group-wide signal. Other threads should be notified now to take
2951 * the shared signals in @which since we will not.
0edceb7b 2952 */
f646e227 2953static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
0edceb7b 2954{
f646e227 2955 sigset_t retarget;
0edceb7b
ON
2956 struct task_struct *t;
2957
f646e227
ON
2958 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2959 if (sigisemptyset(&retarget))
2960 return;
2961
61a7a5e2 2962 for_other_threads(tsk, t) {
fec9993d
ON
2963 if (t->flags & PF_EXITING)
2964 continue;
2965
2966 if (!has_pending_signals(&retarget, &t->blocked))
2967 continue;
2968 /* Remove the signals this thread can handle. */
2969 sigandsets(&retarget, &retarget, &t->blocked);
2970
5c251e9d 2971 if (!task_sigpending(t))
fec9993d
ON
2972 signal_wake_up(t, 0);
2973
2974 if (sigisemptyset(&retarget))
2975 break;
0edceb7b
ON
2976 }
2977}
2978
d12619b5
ON
2979void exit_signals(struct task_struct *tsk)
2980{
2981 int group_stop = 0;
f646e227 2982 sigset_t unblocked;
d12619b5 2983
77e4ef99
TH
2984 /*
2985 * @tsk is about to have PF_EXITING set - lock out users which
2986 * expect stable threadgroup.
2987 */
780de9dd 2988 cgroup_threadgroup_change_begin(tsk);
77e4ef99 2989
49697335 2990 if (thread_group_empty(tsk) || (tsk->signal->flags & SIGNAL_GROUP_EXIT)) {
af7f588d 2991 sched_mm_cid_exit_signals(tsk);
5dee1707 2992 tsk->flags |= PF_EXITING;
780de9dd 2993 cgroup_threadgroup_change_end(tsk);
5dee1707 2994 return;
d12619b5
ON
2995 }
2996
5dee1707 2997 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2998 /*
2999 * From now this task is not visible for group-wide signals,
3000 * see wants_signal(), do_signal_stop().
3001 */
af7f588d 3002 sched_mm_cid_exit_signals(tsk);
d12619b5 3003 tsk->flags |= PF_EXITING;
77e4ef99 3004
780de9dd 3005 cgroup_threadgroup_change_end(tsk);
77e4ef99 3006
5c251e9d 3007 if (!task_sigpending(tsk))
5dee1707
ON
3008 goto out;
3009
f646e227
ON
3010 unblocked = tsk->blocked;
3011 signotset(&unblocked);
3012 retarget_shared_pending(tsk, &unblocked);
5dee1707 3013
a8f072c1 3014 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
e5c1902e 3015 task_participate_group_stop(tsk))
edf2ed15 3016 group_stop = CLD_STOPPED;
5dee1707 3017out:
d12619b5
ON
3018 spin_unlock_irq(&tsk->sighand->siglock);
3019
62bcf9d9
TH
3020 /*
3021 * If group stop has completed, deliver the notification. This
3022 * should always go to the real parent of the group leader.
3023 */
ae6d2ed7 3024 if (unlikely(group_stop)) {
d12619b5 3025 read_lock(&tasklist_lock);
62bcf9d9 3026 do_notify_parent_cldstop(tsk, false, group_stop);
d12619b5
ON
3027 read_unlock(&tasklist_lock);
3028 }
3029}
3030
1da177e4
LT
3031/*
3032 * System call entry points.
3033 */
3034
41c57892
RD
3035/**
3036 * sys_restart_syscall - restart a system call
3037 */
754fe8d2 3038SYSCALL_DEFINE0(restart_syscall)
1da177e4 3039{
f56141e3 3040 struct restart_block *restart = &current->restart_block;
1da177e4
LT
3041 return restart->fn(restart);
3042}
3043
3044long do_no_restart_syscall(struct restart_block *param)
3045{
3046 return -EINTR;
3047}
3048
b182801a
ON
3049static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
3050{
5c251e9d 3051 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
b182801a
ON
3052 sigset_t newblocked;
3053 /* A set of now blocked but previously unblocked signals. */
702a5073 3054 sigandnsets(&newblocked, newset, &current->blocked);
b182801a
ON
3055 retarget_shared_pending(tsk, &newblocked);
3056 }
3057 tsk->blocked = *newset;
3058 recalc_sigpending();
3059}
3060
e6fa16ab
ON
3061/**
3062 * set_current_blocked - change current->blocked mask
3063 * @newset: new mask
3064 *
3065 * It is wrong to change ->blocked directly, this helper should be used
3066 * to ensure the process can't miss a shared signal we are going to block.
1da177e4 3067 */
77097ae5
AV
3068void set_current_blocked(sigset_t *newset)
3069{
77097ae5 3070 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
0c4a8423 3071 __set_current_blocked(newset);
77097ae5
AV
3072}
3073
3074void __set_current_blocked(const sigset_t *newset)
e6fa16ab
ON
3075{
3076 struct task_struct *tsk = current;
3077
c7be96af
WL
3078 /*
3079 * In case the signal mask hasn't changed, there is nothing we need
3080 * to do. The current->blocked shouldn't be modified by other task.
3081 */
3082 if (sigequalsets(&tsk->blocked, newset))
3083 return;
3084
e6fa16ab 3085 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3086 __set_task_blocked(tsk, newset);
e6fa16ab
ON
3087 spin_unlock_irq(&tsk->sighand->siglock);
3088}
1da177e4
LT
3089
3090/*
3091 * This is also useful for kernel threads that want to temporarily
3092 * (or permanently) block certain signals.
3093 *
3094 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3095 * interface happily blocks "unblockable" signals like SIGKILL
3096 * and friends.
3097 */
3098int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3099{
73ef4aeb
ON
3100 struct task_struct *tsk = current;
3101 sigset_t newset;
1da177e4 3102
73ef4aeb 3103 /* Lockless, only current can change ->blocked, never from irq */
a26fd335 3104 if (oldset)
73ef4aeb 3105 *oldset = tsk->blocked;
a26fd335 3106
1da177e4
LT
3107 switch (how) {
3108 case SIG_BLOCK:
73ef4aeb 3109 sigorsets(&newset, &tsk->blocked, set);
1da177e4
LT
3110 break;
3111 case SIG_UNBLOCK:
702a5073 3112 sigandnsets(&newset, &tsk->blocked, set);
1da177e4
LT
3113 break;
3114 case SIG_SETMASK:
73ef4aeb 3115 newset = *set;
1da177e4
LT
3116 break;
3117 default:
73ef4aeb 3118 return -EINVAL;
1da177e4 3119 }
a26fd335 3120
77097ae5 3121 __set_current_blocked(&newset);
73ef4aeb 3122 return 0;
1da177e4 3123}
fb50f5a4 3124EXPORT_SYMBOL(sigprocmask);
1da177e4 3125
ded653cc
DD
3126/*
3127 * The api helps set app-provided sigmasks.
3128 *
3129 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3130 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
b772434b
ON
3131 *
3132 * Note that it does set_restore_sigmask() in advance, so it must be always
3133 * paired with restore_saved_sigmask_unless() before return from syscall.
ded653cc 3134 */
b772434b 3135int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
ded653cc 3136{
b772434b 3137 sigset_t kmask;
ded653cc 3138
b772434b
ON
3139 if (!umask)
3140 return 0;
ded653cc
DD
3141 if (sigsetsize != sizeof(sigset_t))
3142 return -EINVAL;
b772434b 3143 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
ded653cc
DD
3144 return -EFAULT;
3145
b772434b
ON
3146 set_restore_sigmask();
3147 current->saved_sigmask = current->blocked;
3148 set_current_blocked(&kmask);
ded653cc
DD
3149
3150 return 0;
3151}
ded653cc
DD
3152
3153#ifdef CONFIG_COMPAT
b772434b 3154int set_compat_user_sigmask(const compat_sigset_t __user *umask,
ded653cc
DD
3155 size_t sigsetsize)
3156{
b772434b 3157 sigset_t kmask;
ded653cc 3158
b772434b
ON
3159 if (!umask)
3160 return 0;
ded653cc
DD
3161 if (sigsetsize != sizeof(compat_sigset_t))
3162 return -EINVAL;
b772434b 3163 if (get_compat_sigset(&kmask, umask))
ded653cc
DD
3164 return -EFAULT;
3165
b772434b
ON
3166 set_restore_sigmask();
3167 current->saved_sigmask = current->blocked;
3168 set_current_blocked(&kmask);
ded653cc
DD
3169
3170 return 0;
3171}
ded653cc
DD
3172#endif
3173
41c57892
RD
3174/**
3175 * sys_rt_sigprocmask - change the list of currently blocked signals
3176 * @how: whether to add, remove, or set signals
ada9c933 3177 * @nset: stores pending signals
41c57892
RD
3178 * @oset: previous value of signal mask if non-null
3179 * @sigsetsize: size of sigset_t type
3180 */
bb7efee2 3181SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
17da2bd9 3182 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4 3183{
1da177e4 3184 sigset_t old_set, new_set;
bb7efee2 3185 int error;
1da177e4
LT
3186
3187 /* XXX: Don't preclude handling different sized sigset_t's. */
3188 if (sigsetsize != sizeof(sigset_t))
bb7efee2 3189 return -EINVAL;
1da177e4 3190
bb7efee2
ON
3191 old_set = current->blocked;
3192
3193 if (nset) {
3194 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3195 return -EFAULT;
1da177e4
LT
3196 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3197
bb7efee2 3198 error = sigprocmask(how, &new_set, NULL);
1da177e4 3199 if (error)
bb7efee2
ON
3200 return error;
3201 }
1da177e4 3202
bb7efee2
ON
3203 if (oset) {
3204 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3205 return -EFAULT;
1da177e4 3206 }
bb7efee2
ON
3207
3208 return 0;
1da177e4
LT
3209}
3210
322a56cb 3211#ifdef CONFIG_COMPAT
322a56cb
AV
3212COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3213 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
1da177e4 3214{
322a56cb
AV
3215 sigset_t old_set = current->blocked;
3216
3217 /* XXX: Don't preclude handling different sized sigset_t's. */
3218 if (sigsetsize != sizeof(sigset_t))
3219 return -EINVAL;
3220
3221 if (nset) {
322a56cb
AV
3222 sigset_t new_set;
3223 int error;
3968cf62 3224 if (get_compat_sigset(&new_set, nset))
322a56cb 3225 return -EFAULT;
322a56cb
AV
3226 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3227
3228 error = sigprocmask(how, &new_set, NULL);
3229 if (error)
3230 return error;
3231 }
f454322e 3232 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
322a56cb
AV
3233}
3234#endif
1da177e4 3235
b1d294c8 3236static void do_sigpending(sigset_t *set)
1da177e4 3237{
1da177e4 3238 spin_lock_irq(&current->sighand->siglock);
fe9c1db2 3239 sigorsets(set, &current->pending.signal,
1da177e4
LT
3240 &current->signal->shared_pending.signal);
3241 spin_unlock_irq(&current->sighand->siglock);
3242
3243 /* Outside the lock because only this thread touches it. */
fe9c1db2 3244 sigandsets(set, &current->blocked, set);
5aba085e 3245}
1da177e4 3246
41c57892
RD
3247/**
3248 * sys_rt_sigpending - examine a pending signal that has been raised
3249 * while blocked
20f22ab4 3250 * @uset: stores pending signals
41c57892
RD
3251 * @sigsetsize: size of sigset_t type or larger
3252 */
fe9c1db2 3253SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
1da177e4 3254{
fe9c1db2 3255 sigset_t set;
176826af
DL
3256
3257 if (sigsetsize > sizeof(*uset))
3258 return -EINVAL;
3259
b1d294c8
CB
3260 do_sigpending(&set);
3261
3262 if (copy_to_user(uset, &set, sigsetsize))
3263 return -EFAULT;
3264
3265 return 0;
fe9c1db2
AV
3266}
3267
3268#ifdef CONFIG_COMPAT
fe9c1db2
AV
3269COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3270 compat_size_t, sigsetsize)
1da177e4 3271{
fe9c1db2 3272 sigset_t set;
176826af
DL
3273
3274 if (sigsetsize > sizeof(*uset))
3275 return -EINVAL;
3276
b1d294c8
CB
3277 do_sigpending(&set);
3278
3279 return put_compat_sigset(uset, &set, sigsetsize);
1da177e4 3280}
fe9c1db2 3281#endif
1da177e4 3282
4ce5f9c9
EB
3283static const struct {
3284 unsigned char limit, layout;
3285} sig_sicodes[] = {
3286 [SIGILL] = { NSIGILL, SIL_FAULT },
3287 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3288 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3289 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3290 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3291#if defined(SIGEMT)
3292 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3293#endif
3294 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3295 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3296 [SIGSYS] = { NSIGSYS, SIL_SYS },
3297};
3298
b2a2ab52 3299static bool known_siginfo_layout(unsigned sig, int si_code)
4ce5f9c9
EB
3300{
3301 if (si_code == SI_KERNEL)
3302 return true;
3303 else if ((si_code > SI_USER)) {
3304 if (sig_specific_sicodes(sig)) {
3305 if (si_code <= sig_sicodes[sig].limit)
3306 return true;
3307 }
3308 else if (si_code <= NSIGPOLL)
3309 return true;
3310 }
3311 else if (si_code >= SI_DETHREAD)
3312 return true;
3313 else if (si_code == SI_ASYNCNL)
3314 return true;
3315 return false;
3316}
3317
a3670058 3318enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
cc731525
EB
3319{
3320 enum siginfo_layout layout = SIL_KILL;
3321 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
4ce5f9c9
EB
3322 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3323 (si_code <= sig_sicodes[sig].limit)) {
3324 layout = sig_sicodes[sig].layout;
31931c93
EB
3325 /* Handle the exceptions */
3326 if ((sig == SIGBUS) &&
3327 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3328 layout = SIL_FAULT_MCEERR;
3329 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3330 layout = SIL_FAULT_BNDERR;
3331#ifdef SEGV_PKUERR
3332 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3333 layout = SIL_FAULT_PKUERR;
3334#endif
ed8e5080 3335 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
f4ac7302 3336 layout = SIL_FAULT_PERF_EVENT;
2c9f7eaf
EB
3337 else if (IS_ENABLED(CONFIG_SPARC) &&
3338 (sig == SIGILL) && (si_code == ILL_ILLTRP))
3339 layout = SIL_FAULT_TRAPNO;
7de5f68d
EB
3340 else if (IS_ENABLED(CONFIG_ALPHA) &&
3341 ((sig == SIGFPE) ||
3342 ((sig == SIGTRAP) && (si_code == TRAP_UNK))))
9abcabe3 3343 layout = SIL_FAULT_TRAPNO;
31931c93 3344 }
cc731525
EB
3345 else if (si_code <= NSIGPOLL)
3346 layout = SIL_POLL;
3347 } else {
3348 if (si_code == SI_TIMER)
3349 layout = SIL_TIMER;
3350 else if (si_code == SI_SIGIO)
3351 layout = SIL_POLL;
3352 else if (si_code < 0)
3353 layout = SIL_RT;
cc731525
EB
3354 }
3355 return layout;
3356}
3357
4ce5f9c9
EB
3358static inline char __user *si_expansion(const siginfo_t __user *info)
3359{
3360 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3361}
3362
ae7795bc 3363int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
1da177e4 3364{
4ce5f9c9 3365 char __user *expansion = si_expansion(to);
ae7795bc 3366 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
1da177e4 3367 return -EFAULT;
4ce5f9c9 3368 if (clear_user(expansion, SI_EXPANSION_SIZE))
1da177e4 3369 return -EFAULT;
c999b933 3370 return 0;
1da177e4
LT
3371}
3372
601d5abf
EB
3373static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3374 const siginfo_t __user *from)
4cd2e0e7 3375{
601d5abf 3376 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
4ce5f9c9
EB
3377 char __user *expansion = si_expansion(from);
3378 char buf[SI_EXPANSION_SIZE];
3379 int i;
3380 /*
3381 * An unknown si_code might need more than
3382 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3383 * extra bytes are 0. This guarantees copy_siginfo_to_user
3384 * will return this data to userspace exactly.
3385 */
3386 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3387 return -EFAULT;
3388 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3389 if (buf[i] != 0)
3390 return -E2BIG;
3391 }
3392 }
4cd2e0e7
EB
3393 return 0;
3394}
3395
601d5abf
EB
3396static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3397 const siginfo_t __user *from)
3398{
3399 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3400 return -EFAULT;
3401 to->si_signo = signo;
3402 return post_copy_siginfo_from_user(to, from);
3403}
3404
3405int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3406{
3407 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3408 return -EFAULT;
3409 return post_copy_siginfo_from_user(to, from);
3410}
3411
212a36a1 3412#ifdef CONFIG_COMPAT
c3b3f524
CH
3413/**
3414 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3415 * @to: compat siginfo destination
3416 * @from: kernel siginfo source
3417 *
3418 * Note: This function does not work properly for the SIGCHLD on x32, but
3419 * fortunately it doesn't have to. The only valid callers for this function are
3420 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3421 * The latter does not care because SIGCHLD will never cause a coredump.
3422 */
3423void copy_siginfo_to_external32(struct compat_siginfo *to,
3424 const struct kernel_siginfo *from)
ea64d5ac 3425{
c3b3f524 3426 memset(to, 0, sizeof(*to));
ea64d5ac 3427
c3b3f524
CH
3428 to->si_signo = from->si_signo;
3429 to->si_errno = from->si_errno;
3430 to->si_code = from->si_code;
ea64d5ac
EB
3431 switch(siginfo_layout(from->si_signo, from->si_code)) {
3432 case SIL_KILL:
c3b3f524
CH
3433 to->si_pid = from->si_pid;
3434 to->si_uid = from->si_uid;
ea64d5ac
EB
3435 break;
3436 case SIL_TIMER:
c3b3f524
CH
3437 to->si_tid = from->si_tid;
3438 to->si_overrun = from->si_overrun;
3439 to->si_int = from->si_int;
ea64d5ac
EB
3440 break;
3441 case SIL_POLL:
c3b3f524
CH
3442 to->si_band = from->si_band;
3443 to->si_fd = from->si_fd;
ea64d5ac
EB
3444 break;
3445 case SIL_FAULT:
c3b3f524 3446 to->si_addr = ptr_to_compat(from->si_addr);
9abcabe3
EB
3447 break;
3448 case SIL_FAULT_TRAPNO:
3449 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3450 to->si_trapno = from->si_trapno;
31931c93
EB
3451 break;
3452 case SIL_FAULT_MCEERR:
c3b3f524 3453 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3454 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3455 break;
3456 case SIL_FAULT_BNDERR:
c3b3f524 3457 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524
CH
3458 to->si_lower = ptr_to_compat(from->si_lower);
3459 to->si_upper = ptr_to_compat(from->si_upper);
31931c93
EB
3460 break;
3461 case SIL_FAULT_PKUERR:
c3b3f524 3462 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3463 to->si_pkey = from->si_pkey;
ea64d5ac 3464 break;
f4ac7302 3465 case SIL_FAULT_PERF_EVENT:
fb6cc127 3466 to->si_addr = ptr_to_compat(from->si_addr);
0683b531
EB
3467 to->si_perf_data = from->si_perf_data;
3468 to->si_perf_type = from->si_perf_type;
78ed93d7 3469 to->si_perf_flags = from->si_perf_flags;
fb6cc127 3470 break;
ea64d5ac 3471 case SIL_CHLD:
c3b3f524
CH
3472 to->si_pid = from->si_pid;
3473 to->si_uid = from->si_uid;
3474 to->si_status = from->si_status;
3475 to->si_utime = from->si_utime;
3476 to->si_stime = from->si_stime;
ea64d5ac
EB
3477 break;
3478 case SIL_RT:
c3b3f524
CH
3479 to->si_pid = from->si_pid;
3480 to->si_uid = from->si_uid;
3481 to->si_int = from->si_int;
ea64d5ac
EB
3482 break;
3483 case SIL_SYS:
c3b3f524
CH
3484 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3485 to->si_syscall = from->si_syscall;
3486 to->si_arch = from->si_arch;
ea64d5ac
EB
3487 break;
3488 }
c3b3f524 3489}
ea64d5ac 3490
c3b3f524
CH
3491int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3492 const struct kernel_siginfo *from)
3493{
3494 struct compat_siginfo new;
3495
3496 copy_siginfo_to_external32(&new, from);
ea64d5ac
EB
3497 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3498 return -EFAULT;
ea64d5ac
EB
3499 return 0;
3500}
3501
601d5abf
EB
3502static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3503 const struct compat_siginfo *from)
212a36a1 3504{
212a36a1 3505 clear_siginfo(to);
601d5abf
EB
3506 to->si_signo = from->si_signo;
3507 to->si_errno = from->si_errno;
3508 to->si_code = from->si_code;
3509 switch(siginfo_layout(from->si_signo, from->si_code)) {
212a36a1 3510 case SIL_KILL:
601d5abf
EB
3511 to->si_pid = from->si_pid;
3512 to->si_uid = from->si_uid;
212a36a1
EB
3513 break;
3514 case SIL_TIMER:
601d5abf
EB
3515 to->si_tid = from->si_tid;
3516 to->si_overrun = from->si_overrun;
3517 to->si_int = from->si_int;
212a36a1
EB
3518 break;
3519 case SIL_POLL:
601d5abf
EB
3520 to->si_band = from->si_band;
3521 to->si_fd = from->si_fd;
212a36a1
EB
3522 break;
3523 case SIL_FAULT:
601d5abf 3524 to->si_addr = compat_ptr(from->si_addr);
9abcabe3
EB
3525 break;
3526 case SIL_FAULT_TRAPNO:
3527 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3528 to->si_trapno = from->si_trapno;
31931c93
EB
3529 break;
3530 case SIL_FAULT_MCEERR:
601d5abf 3531 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3532 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3533 break;
3534 case SIL_FAULT_BNDERR:
601d5abf 3535 to->si_addr = compat_ptr(from->si_addr);
601d5abf
EB
3536 to->si_lower = compat_ptr(from->si_lower);
3537 to->si_upper = compat_ptr(from->si_upper);
31931c93
EB
3538 break;
3539 case SIL_FAULT_PKUERR:
601d5abf 3540 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3541 to->si_pkey = from->si_pkey;
212a36a1 3542 break;
f4ac7302 3543 case SIL_FAULT_PERF_EVENT:
fb6cc127 3544 to->si_addr = compat_ptr(from->si_addr);
0683b531
EB
3545 to->si_perf_data = from->si_perf_data;
3546 to->si_perf_type = from->si_perf_type;
78ed93d7 3547 to->si_perf_flags = from->si_perf_flags;
fb6cc127 3548 break;
212a36a1 3549 case SIL_CHLD:
601d5abf
EB
3550 to->si_pid = from->si_pid;
3551 to->si_uid = from->si_uid;
3552 to->si_status = from->si_status;
212a36a1
EB
3553#ifdef CONFIG_X86_X32_ABI
3554 if (in_x32_syscall()) {
601d5abf
EB
3555 to->si_utime = from->_sifields._sigchld_x32._utime;
3556 to->si_stime = from->_sifields._sigchld_x32._stime;
212a36a1
EB
3557 } else
3558#endif
3559 {
601d5abf
EB
3560 to->si_utime = from->si_utime;
3561 to->si_stime = from->si_stime;
212a36a1
EB
3562 }
3563 break;
3564 case SIL_RT:
601d5abf
EB
3565 to->si_pid = from->si_pid;
3566 to->si_uid = from->si_uid;
3567 to->si_int = from->si_int;
212a36a1
EB
3568 break;
3569 case SIL_SYS:
601d5abf
EB
3570 to->si_call_addr = compat_ptr(from->si_call_addr);
3571 to->si_syscall = from->si_syscall;
3572 to->si_arch = from->si_arch;
212a36a1
EB
3573 break;
3574 }
3575 return 0;
3576}
601d5abf
EB
3577
3578static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3579 const struct compat_siginfo __user *ufrom)
3580{
3581 struct compat_siginfo from;
3582
3583 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3584 return -EFAULT;
3585
3586 from.si_signo = signo;
3587 return post_copy_siginfo_from_user32(to, &from);
3588}
3589
3590int copy_siginfo_from_user32(struct kernel_siginfo *to,
3591 const struct compat_siginfo __user *ufrom)
3592{
3593 struct compat_siginfo from;
3594
3595 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3596 return -EFAULT;
3597
3598 return post_copy_siginfo_from_user32(to, &from);
3599}
212a36a1
EB
3600#endif /* CONFIG_COMPAT */
3601
943df148
ON
3602/**
3603 * do_sigtimedwait - wait for queued signals specified in @which
3604 * @which: queued signals to wait for
3605 * @info: if non-null, the signal's siginfo is returned here
3606 * @ts: upper bound on process time suspension
3607 */
ae7795bc 3608static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
49c39f84 3609 const struct timespec64 *ts)
943df148 3610{
2456e855 3611 ktime_t *to = NULL, timeout = KTIME_MAX;
943df148 3612 struct task_struct *tsk = current;
943df148 3613 sigset_t mask = *which;
5768d890 3614 enum pid_type type;
2b1ecc3d 3615 int sig, ret = 0;
943df148
ON
3616
3617 if (ts) {
49c39f84 3618 if (!timespec64_valid(ts))
943df148 3619 return -EINVAL;
49c39f84 3620 timeout = timespec64_to_ktime(*ts);
2b1ecc3d 3621 to = &timeout;
943df148
ON
3622 }
3623
3624 /*
3625 * Invert the set of allowed signals to get those we want to block.
3626 */
3627 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3628 signotset(&mask);
3629
3630 spin_lock_irq(&tsk->sighand->siglock);
5768d890 3631 sig = dequeue_signal(tsk, &mask, info, &type);
2456e855 3632 if (!sig && timeout) {
943df148
ON
3633 /*
3634 * None ready, temporarily unblock those we're interested
3635 * while we are sleeping in so that we'll be awakened when
b182801a
ON
3636 * they arrive. Unblocking is always fine, we can avoid
3637 * set_current_blocked().
943df148
ON
3638 */
3639 tsk->real_blocked = tsk->blocked;
3640 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3641 recalc_sigpending();
3642 spin_unlock_irq(&tsk->sighand->siglock);
3643
f5d39b02
PZ
3644 __set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
3645 ret = schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3646 HRTIMER_MODE_REL);
943df148 3647 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3648 __set_task_blocked(tsk, &tsk->real_blocked);
6114041a 3649 sigemptyset(&tsk->real_blocked);
5768d890 3650 sig = dequeue_signal(tsk, &mask, info, &type);
943df148
ON
3651 }
3652 spin_unlock_irq(&tsk->sighand->siglock);
3653
3654 if (sig)
3655 return sig;
2b1ecc3d 3656 return ret ? -EINTR : -EAGAIN;
943df148
ON
3657}
3658
41c57892
RD
3659/**
3660 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3661 * in @uthese
3662 * @uthese: queued signals to wait for
3663 * @uinfo: if non-null, the signal's siginfo is returned here
3664 * @uts: upper bound on process time suspension
3665 * @sigsetsize: size of sigset_t type
3666 */
17da2bd9 3667SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
49c39f84
AB
3668 siginfo_t __user *, uinfo,
3669 const struct __kernel_timespec __user *, uts,
17da2bd9 3670 size_t, sigsetsize)
1da177e4 3671{
1da177e4 3672 sigset_t these;
49c39f84 3673 struct timespec64 ts;
ae7795bc 3674 kernel_siginfo_t info;
943df148 3675 int ret;
1da177e4
LT
3676
3677 /* XXX: Don't preclude handling different sized sigset_t's. */
3678 if (sigsetsize != sizeof(sigset_t))
3679 return -EINVAL;
3680
3681 if (copy_from_user(&these, uthese, sizeof(these)))
3682 return -EFAULT;
5aba085e 3683
1da177e4 3684 if (uts) {
49c39f84 3685 if (get_timespec64(&ts, uts))
1da177e4 3686 return -EFAULT;
1da177e4
LT
3687 }
3688
943df148 3689 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
1da177e4 3690
943df148
ON
3691 if (ret > 0 && uinfo) {
3692 if (copy_siginfo_to_user(uinfo, &info))
3693 ret = -EFAULT;
1da177e4
LT
3694 }
3695
3696 return ret;
3697}
3698
df8522a3
AB
3699#ifdef CONFIG_COMPAT_32BIT_TIME
3700SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3701 siginfo_t __user *, uinfo,
3702 const struct old_timespec32 __user *, uts,
3703 size_t, sigsetsize)
3704{
3705 sigset_t these;
3706 struct timespec64 ts;
3707 kernel_siginfo_t info;
3708 int ret;
3709
3710 if (sigsetsize != sizeof(sigset_t))
3711 return -EINVAL;
3712
3713 if (copy_from_user(&these, uthese, sizeof(these)))
3714 return -EFAULT;
3715
3716 if (uts) {
3717 if (get_old_timespec32(&ts, uts))
3718 return -EFAULT;
3719 }
3720
3721 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3722
3723 if (ret > 0 && uinfo) {
3724 if (copy_siginfo_to_user(uinfo, &info))
3725 ret = -EFAULT;
3726 }
3727
3728 return ret;
3729}
3730#endif
3731
1b3c872c 3732#ifdef CONFIG_COMPAT
2367c4b5
AB
3733COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3734 struct compat_siginfo __user *, uinfo,
3735 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3736{
3737 sigset_t s;
3738 struct timespec64 t;
3739 kernel_siginfo_t info;
3740 long ret;
3741
3742 if (sigsetsize != sizeof(sigset_t))
3743 return -EINVAL;
3744
3745 if (get_compat_sigset(&s, uthese))
3746 return -EFAULT;
3747
3748 if (uts) {
3749 if (get_timespec64(&t, uts))
3750 return -EFAULT;
3751 }
3752
3753 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3754
3755 if (ret > 0 && uinfo) {
3756 if (copy_siginfo_to_user32(uinfo, &info))
3757 ret = -EFAULT;
3758 }
3759
3760 return ret;
3761}
3762
3763#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724 3764COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
1b3c872c 3765 struct compat_siginfo __user *, uinfo,
9afc5eee 3766 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
1b3c872c 3767{
1b3c872c 3768 sigset_t s;
49c39f84 3769 struct timespec64 t;
ae7795bc 3770 kernel_siginfo_t info;
1b3c872c
AV
3771 long ret;
3772
3773 if (sigsetsize != sizeof(sigset_t))
3774 return -EINVAL;
3775
3968cf62 3776 if (get_compat_sigset(&s, uthese))
1b3c872c 3777 return -EFAULT;
1b3c872c
AV
3778
3779 if (uts) {
49c39f84 3780 if (get_old_timespec32(&t, uts))
1b3c872c
AV
3781 return -EFAULT;
3782 }
3783
3784 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3785
3786 if (ret > 0 && uinfo) {
3787 if (copy_siginfo_to_user32(uinfo, &info))
3788 ret = -EFAULT;
3789 }
3790
3791 return ret;
3792}
3793#endif
2367c4b5 3794#endif
1b3c872c 3795
3eb39f47
CB
3796static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3797{
3798 clear_siginfo(info);
3799 info->si_signo = sig;
3800 info->si_errno = 0;
3801 info->si_code = SI_USER;
3802 info->si_pid = task_tgid_vnr(current);
3803 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3804}
3805
41c57892
RD
3806/**
3807 * sys_kill - send a signal to a process
3808 * @pid: the PID of the process
3809 * @sig: signal to be sent
3810 */
17da2bd9 3811SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4 3812{
ae7795bc 3813 struct kernel_siginfo info;
1da177e4 3814
3eb39f47 3815 prepare_kill_siginfo(sig, &info);
1da177e4
LT
3816
3817 return kill_something_info(sig, &info, pid);
3818}
3819
3eb39f47
CB
3820/*
3821 * Verify that the signaler and signalee either are in the same pid namespace
3822 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3823 * namespace.
3824 */
3825static bool access_pidfd_pidns(struct pid *pid)
3826{
3827 struct pid_namespace *active = task_active_pid_ns(current);
3828 struct pid_namespace *p = ns_of_pid(pid);
3829
3830 for (;;) {
3831 if (!p)
3832 return false;
3833 if (p == active)
3834 break;
3835 p = p->parent;
3836 }
3837
3838 return true;
3839}
3840
adc5d875
JH
3841static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3842 siginfo_t __user *info)
3eb39f47
CB
3843{
3844#ifdef CONFIG_COMPAT
3845 /*
3846 * Avoid hooking up compat syscalls and instead handle necessary
3847 * conversions here. Note, this is a stop-gap measure and should not be
3848 * considered a generic solution.
3849 */
3850 if (in_compat_syscall())
3851 return copy_siginfo_from_user32(
3852 kinfo, (struct compat_siginfo __user *)info);
3853#endif
3854 return copy_siginfo_from_user(kinfo, info);
3855}
3856
2151ad1b
CB
3857static struct pid *pidfd_to_pid(const struct file *file)
3858{
3695eae5
CB
3859 struct pid *pid;
3860
3861 pid = pidfd_pid(file);
3862 if (!IS_ERR(pid))
3863 return pid;
2151ad1b
CB
3864
3865 return tgid_pidfd_to_pid(file);
3866}
3867
3eb39f47 3868/**
c732327f
CB
3869 * sys_pidfd_send_signal - Signal a process through a pidfd
3870 * @pidfd: file descriptor of the process
3871 * @sig: signal to send
3872 * @info: signal info
3873 * @flags: future flags
3eb39f47
CB
3874 *
3875 * The syscall currently only signals via PIDTYPE_PID which covers
3876 * kill(<positive-pid>, <signal>. It does not signal threads or process
3877 * groups.
3878 * In order to extend the syscall to threads and process groups the @flags
3879 * argument should be used. In essence, the @flags argument will determine
3880 * what is signaled and not the file descriptor itself. Put in other words,
3881 * grouping is a property of the flags argument not a property of the file
3882 * descriptor.
3883 *
3884 * Return: 0 on success, negative errno on failure
3885 */
3886SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3887 siginfo_t __user *, info, unsigned int, flags)
3888{
3889 int ret;
3890 struct fd f;
3891 struct pid *pid;
3892 kernel_siginfo_t kinfo;
3893
3894 /* Enforce flags be set to 0 until we add an extension. */
3895 if (flags)
3896 return -EINVAL;
3897
738a7832 3898 f = fdget(pidfd);
3eb39f47
CB
3899 if (!f.file)
3900 return -EBADF;
3901
3902 /* Is this a pidfd? */
2151ad1b 3903 pid = pidfd_to_pid(f.file);
3eb39f47
CB
3904 if (IS_ERR(pid)) {
3905 ret = PTR_ERR(pid);
3906 goto err;
3907 }
3908
3909 ret = -EINVAL;
3910 if (!access_pidfd_pidns(pid))
3911 goto err;
3912
3913 if (info) {
3914 ret = copy_siginfo_from_user_any(&kinfo, info);
3915 if (unlikely(ret))
3916 goto err;
3917
3918 ret = -EINVAL;
3919 if (unlikely(sig != kinfo.si_signo))
3920 goto err;
3921
556a888a
JH
3922 /* Only allow sending arbitrary signals to yourself. */
3923 ret = -EPERM;
3eb39f47 3924 if ((task_pid(current) != pid) &&
556a888a
JH
3925 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3926 goto err;
3eb39f47
CB
3927 } else {
3928 prepare_kill_siginfo(sig, &kinfo);
3929 }
3930
3931 ret = kill_pid_info(sig, &kinfo, pid);
3932
3933err:
3934 fdput(f);
3935 return ret;
3936}
3eb39f47 3937
30b4ae8a 3938static int
ae7795bc 3939do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
1da177e4 3940{
1da177e4 3941 struct task_struct *p;
30b4ae8a 3942 int error = -ESRCH;
1da177e4 3943
3547ff3a 3944 rcu_read_lock();
228ebcbe 3945 p = find_task_by_vpid(pid);
b488893a 3946 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 3947 error = check_kill_permission(sig, info, p);
1da177e4
LT
3948 /*
3949 * The null signal is a permissions and process existence
3950 * probe. No signal is actually delivered.
3951 */
4a30debf 3952 if (!error && sig) {
40b3b025 3953 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4a30debf
ON
3954 /*
3955 * If lock_task_sighand() failed we pretend the task
3956 * dies after receiving the signal. The window is tiny,
3957 * and the signal is private anyway.
3958 */
3959 if (unlikely(error == -ESRCH))
3960 error = 0;
1da177e4
LT
3961 }
3962 }
3547ff3a 3963 rcu_read_unlock();
6dd69f10 3964
1da177e4
LT
3965 return error;
3966}
3967
30b4ae8a
TG
3968static int do_tkill(pid_t tgid, pid_t pid, int sig)
3969{
ae7795bc 3970 struct kernel_siginfo info;
30b4ae8a 3971
5f74972c 3972 clear_siginfo(&info);
30b4ae8a
TG
3973 info.si_signo = sig;
3974 info.si_errno = 0;
3975 info.si_code = SI_TKILL;
3976 info.si_pid = task_tgid_vnr(current);
078de5f7 3977 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
30b4ae8a
TG
3978
3979 return do_send_specific(tgid, pid, sig, &info);
3980}
3981
6dd69f10
VL
3982/**
3983 * sys_tgkill - send signal to one specific thread
3984 * @tgid: the thread group ID of the thread
3985 * @pid: the PID of the thread
3986 * @sig: signal to be sent
3987 *
72fd4a35 3988 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
3989 * exists but it's not belonging to the target process anymore. This
3990 * method solves the problem of threads exiting and PIDs getting reused.
3991 */
a5f8fa9e 3992SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
3993{
3994 /* This is only valid for single tasks */
3995 if (pid <= 0 || tgid <= 0)
3996 return -EINVAL;
3997
3998 return do_tkill(tgid, pid, sig);
3999}
4000
41c57892
RD
4001/**
4002 * sys_tkill - send signal to one specific task
4003 * @pid: the PID of the task
4004 * @sig: signal to be sent
4005 *
1da177e4
LT
4006 * Send a signal to only one task, even if it's a CLONE_THREAD task.
4007 */
a5f8fa9e 4008SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 4009{
1da177e4
LT
4010 /* This is only valid for single tasks */
4011 if (pid <= 0)
4012 return -EINVAL;
4013
6dd69f10 4014 return do_tkill(0, pid, sig);
1da177e4
LT
4015}
4016
ae7795bc 4017static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
75907d4d
AV
4018{
4019 /* Not even root can pretend to send signals from the kernel.
4020 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4021 */
66dd34ad 4022 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
69828dce 4023 (task_pid_vnr(current) != pid))
75907d4d 4024 return -EPERM;
69828dce 4025
75907d4d
AV
4026 /* POSIX.1b doesn't mention process groups. */
4027 return kill_proc_info(sig, info, pid);
4028}
4029
41c57892
RD
4030/**
4031 * sys_rt_sigqueueinfo - send signal information to a signal
4032 * @pid: the PID of the thread
4033 * @sig: signal to be sent
4034 * @uinfo: signal info to be sent
4035 */
a5f8fa9e
HC
4036SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
4037 siginfo_t __user *, uinfo)
1da177e4 4038{
ae7795bc 4039 kernel_siginfo_t info;
601d5abf 4040 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
4041 if (unlikely(ret))
4042 return ret;
75907d4d
AV
4043 return do_rt_sigqueueinfo(pid, sig, &info);
4044}
1da177e4 4045
75907d4d 4046#ifdef CONFIG_COMPAT
75907d4d
AV
4047COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
4048 compat_pid_t, pid,
4049 int, sig,
4050 struct compat_siginfo __user *, uinfo)
4051{
ae7795bc 4052 kernel_siginfo_t info;
601d5abf 4053 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
75907d4d
AV
4054 if (unlikely(ret))
4055 return ret;
4056 return do_rt_sigqueueinfo(pid, sig, &info);
1da177e4 4057}
75907d4d 4058#endif
1da177e4 4059
ae7795bc 4060static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
62ab4505
TG
4061{
4062 /* This is only valid for single tasks */
4063 if (pid <= 0 || tgid <= 0)
4064 return -EINVAL;
4065
4066 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
4067 * Nor can they impersonate a kill()/tgkill(), which adds source info.
4068 */
69828dce
VD
4069 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
4070 (task_pid_vnr(current) != pid))
62ab4505 4071 return -EPERM;
69828dce 4072
62ab4505
TG
4073 return do_send_specific(tgid, pid, sig, info);
4074}
4075
4076SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4077 siginfo_t __user *, uinfo)
4078{
ae7795bc 4079 kernel_siginfo_t info;
601d5abf 4080 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
4081 if (unlikely(ret))
4082 return ret;
62ab4505
TG
4083 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4084}
4085
9aae8fc0
AV
4086#ifdef CONFIG_COMPAT
4087COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4088 compat_pid_t, tgid,
4089 compat_pid_t, pid,
4090 int, sig,
4091 struct compat_siginfo __user *, uinfo)
4092{
ae7795bc 4093 kernel_siginfo_t info;
601d5abf 4094 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4cd2e0e7
EB
4095 if (unlikely(ret))
4096 return ret;
9aae8fc0
AV
4097 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4098}
4099#endif
4100
0341729b 4101/*
b4e74264 4102 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
0341729b 4103 */
b4e74264 4104void kernel_sigaction(int sig, __sighandler_t action)
0341729b 4105{
ec5955b8 4106 spin_lock_irq(&current->sighand->siglock);
b4e74264
ON
4107 current->sighand->action[sig - 1].sa.sa_handler = action;
4108 if (action == SIG_IGN) {
4109 sigset_t mask;
0341729b 4110
b4e74264
ON
4111 sigemptyset(&mask);
4112 sigaddset(&mask, sig);
580d34e4 4113
b4e74264
ON
4114 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4115 flush_sigqueue_mask(&mask, &current->pending);
4116 recalc_sigpending();
4117 }
0341729b
ON
4118 spin_unlock_irq(&current->sighand->siglock);
4119}
b4e74264 4120EXPORT_SYMBOL(kernel_sigaction);
0341729b 4121
68463510
DS
4122void __weak sigaction_compat_abi(struct k_sigaction *act,
4123 struct k_sigaction *oact)
4124{
4125}
4126
88531f72 4127int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 4128{
afe2b038 4129 struct task_struct *p = current, *t;
1da177e4 4130 struct k_sigaction *k;
71fabd5e 4131 sigset_t mask;
1da177e4 4132
7ed20e1a 4133 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
4134 return -EINVAL;
4135
afe2b038 4136 k = &p->sighand->action[sig-1];
1da177e4 4137
afe2b038 4138 spin_lock_irq(&p->sighand->siglock);
00b06da2
EB
4139 if (k->sa.sa_flags & SA_IMMUTABLE) {
4140 spin_unlock_irq(&p->sighand->siglock);
4141 return -EINVAL;
4142 }
1da177e4
LT
4143 if (oact)
4144 *oact = *k;
4145
a54f0dfd
PC
4146 /*
4147 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4148 * e.g. by having an architecture use the bit in their uapi.
4149 */
4150 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4151
23acdc76
PC
4152 /*
4153 * Clear unknown flag bits in order to allow userspace to detect missing
4154 * support for flag bits and to allow the kernel to use non-uapi bits
4155 * internally.
4156 */
4157 if (act)
4158 act->sa.sa_flags &= UAPI_SA_FLAGS;
4159 if (oact)
4160 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4161
68463510
DS
4162 sigaction_compat_abi(act, oact);
4163
1da177e4 4164 if (act) {
9ac95f2f
ON
4165 sigdelsetmask(&act->sa.sa_mask,
4166 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 4167 *k = *act;
1da177e4
LT
4168 /*
4169 * POSIX 3.3.1.3:
4170 * "Setting a signal action to SIG_IGN for a signal that is
4171 * pending shall cause the pending signal to be discarded,
4172 * whether or not it is blocked."
4173 *
4174 * "Setting a signal action to SIG_DFL for a signal that is
4175 * pending and whose default action is to ignore the signal
4176 * (for example, SIGCHLD), shall cause the pending signal to
4177 * be discarded, whether or not it is blocked"
4178 */
afe2b038 4179 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
71fabd5e
GA
4180 sigemptyset(&mask);
4181 sigaddset(&mask, sig);
afe2b038
ON
4182 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4183 for_each_thread(p, t)
c09c1441 4184 flush_sigqueue_mask(&mask, &t->pending);
1da177e4 4185 }
1da177e4
LT
4186 }
4187
afe2b038 4188 spin_unlock_irq(&p->sighand->siglock);
1da177e4
LT
4189 return 0;
4190}
4191
1bdda24c
TG
4192#ifdef CONFIG_DYNAMIC_SIGFRAME
4193static inline void sigaltstack_lock(void)
4194 __acquires(&current->sighand->siglock)
4195{
4196 spin_lock_irq(&current->sighand->siglock);
4197}
4198
4199static inline void sigaltstack_unlock(void)
4200 __releases(&current->sighand->siglock)
4201{
4202 spin_unlock_irq(&current->sighand->siglock);
4203}
4204#else
4205static inline void sigaltstack_lock(void) { }
4206static inline void sigaltstack_unlock(void) { }
4207#endif
4208
c09c1441 4209static int
22839869
WD
4210do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4211 size_t min_ss_size)
1da177e4 4212{
bcfe8ad8 4213 struct task_struct *t = current;
1bdda24c 4214 int ret = 0;
1da177e4 4215
bcfe8ad8
AV
4216 if (oss) {
4217 memset(oss, 0, sizeof(stack_t));
4218 oss->ss_sp = (void __user *) t->sas_ss_sp;
4219 oss->ss_size = t->sas_ss_size;
4220 oss->ss_flags = sas_ss_flags(sp) |
4221 (current->sas_ss_flags & SS_FLAG_BITS);
4222 }
1da177e4 4223
bcfe8ad8
AV
4224 if (ss) {
4225 void __user *ss_sp = ss->ss_sp;
4226 size_t ss_size = ss->ss_size;
4227 unsigned ss_flags = ss->ss_flags;
407bc16a 4228 int ss_mode;
1da177e4 4229
bcfe8ad8
AV
4230 if (unlikely(on_sig_stack(sp)))
4231 return -EPERM;
1da177e4 4232
407bc16a 4233 ss_mode = ss_flags & ~SS_FLAG_BITS;
bcfe8ad8
AV
4234 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4235 ss_mode != 0))
4236 return -EINVAL;
1da177e4 4237
6c3118c3
CB
4238 /*
4239 * Return before taking any locks if no actual
4240 * sigaltstack changes were requested.
4241 */
4242 if (t->sas_ss_sp == (unsigned long)ss_sp &&
4243 t->sas_ss_size == ss_size &&
4244 t->sas_ss_flags == ss_flags)
4245 return 0;
4246
1bdda24c 4247 sigaltstack_lock();
407bc16a 4248 if (ss_mode == SS_DISABLE) {
1da177e4
LT
4249 ss_size = 0;
4250 ss_sp = NULL;
4251 } else {
22839869 4252 if (unlikely(ss_size < min_ss_size))
1bdda24c
TG
4253 ret = -ENOMEM;
4254 if (!sigaltstack_size_valid(ss_size))
4255 ret = -ENOMEM;
1da177e4 4256 }
1bdda24c
TG
4257 if (!ret) {
4258 t->sas_ss_sp = (unsigned long) ss_sp;
4259 t->sas_ss_size = ss_size;
4260 t->sas_ss_flags = ss_flags;
4261 }
4262 sigaltstack_unlock();
1da177e4 4263 }
1bdda24c 4264 return ret;
1da177e4 4265}
bcfe8ad8 4266
6bf9adfc
AV
4267SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4268{
bcfe8ad8
AV
4269 stack_t new, old;
4270 int err;
4271 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4272 return -EFAULT;
4273 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
22839869
WD
4274 current_user_stack_pointer(),
4275 MINSIGSTKSZ);
bcfe8ad8
AV
4276 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4277 err = -EFAULT;
4278 return err;
6bf9adfc 4279}
1da177e4 4280
5c49574f
AV
4281int restore_altstack(const stack_t __user *uss)
4282{
bcfe8ad8
AV
4283 stack_t new;
4284 if (copy_from_user(&new, uss, sizeof(stack_t)))
4285 return -EFAULT;
22839869
WD
4286 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4287 MINSIGSTKSZ);
5c49574f 4288 /* squash all but EFAULT for now */
bcfe8ad8 4289 return 0;
5c49574f
AV
4290}
4291
c40702c4
AV
4292int __save_altstack(stack_t __user *uss, unsigned long sp)
4293{
4294 struct task_struct *t = current;
2a742138
SS
4295 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4296 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4297 __put_user(t->sas_ss_size, &uss->ss_size);
97c885d5 4298 return err;
c40702c4
AV
4299}
4300
90268439 4301#ifdef CONFIG_COMPAT
6203deb0
DB
4302static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4303 compat_stack_t __user *uoss_ptr)
90268439
AV
4304{
4305 stack_t uss, uoss;
4306 int ret;
90268439
AV
4307
4308 if (uss_ptr) {
4309 compat_stack_t uss32;
90268439
AV
4310 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4311 return -EFAULT;
4312 uss.ss_sp = compat_ptr(uss32.ss_sp);
4313 uss.ss_flags = uss32.ss_flags;
4314 uss.ss_size = uss32.ss_size;
4315 }
bcfe8ad8 4316 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
22839869
WD
4317 compat_user_stack_pointer(),
4318 COMPAT_MINSIGSTKSZ);
90268439 4319 if (ret >= 0 && uoss_ptr) {
bcfe8ad8
AV
4320 compat_stack_t old;
4321 memset(&old, 0, sizeof(old));
4322 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4323 old.ss_flags = uoss.ss_flags;
4324 old.ss_size = uoss.ss_size;
4325 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
90268439
AV
4326 ret = -EFAULT;
4327 }
4328 return ret;
4329}
4330
6203deb0
DB
4331COMPAT_SYSCALL_DEFINE2(sigaltstack,
4332 const compat_stack_t __user *, uss_ptr,
4333 compat_stack_t __user *, uoss_ptr)
4334{
4335 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4336}
4337
90268439
AV
4338int compat_restore_altstack(const compat_stack_t __user *uss)
4339{
6203deb0 4340 int err = do_compat_sigaltstack(uss, NULL);
90268439
AV
4341 /* squash all but -EFAULT for now */
4342 return err == -EFAULT ? err : 0;
4343}
c40702c4
AV
4344
4345int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4346{
441398d3 4347 int err;
c40702c4 4348 struct task_struct *t = current;
441398d3
SS
4349 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4350 &uss->ss_sp) |
4351 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4352 __put_user(t->sas_ss_size, &uss->ss_size);
97c885d5 4353 return err;
c40702c4 4354}
90268439 4355#endif
1da177e4
LT
4356
4357#ifdef __ARCH_WANT_SYS_SIGPENDING
4358
41c57892
RD
4359/**
4360 * sys_sigpending - examine pending signals
d53238cd 4361 * @uset: where mask of pending signal is returned
41c57892 4362 */
d53238cd 4363SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
1da177e4 4364{
d53238cd 4365 sigset_t set;
d53238cd
DB
4366
4367 if (sizeof(old_sigset_t) > sizeof(*uset))
4368 return -EINVAL;
4369
b1d294c8
CB
4370 do_sigpending(&set);
4371
4372 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4373 return -EFAULT;
4374
4375 return 0;
1da177e4
LT
4376}
4377
8f13621a
AV
4378#ifdef CONFIG_COMPAT
4379COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4380{
4381 sigset_t set;
b1d294c8
CB
4382
4383 do_sigpending(&set);
4384
4385 return put_user(set.sig[0], set32);
8f13621a
AV
4386}
4387#endif
4388
1da177e4
LT
4389#endif
4390
4391#ifdef __ARCH_WANT_SYS_SIGPROCMASK
41c57892
RD
4392/**
4393 * sys_sigprocmask - examine and change blocked signals
4394 * @how: whether to add, remove, or set signals
b013c399 4395 * @nset: signals to add or remove (if non-null)
41c57892
RD
4396 * @oset: previous value of signal mask if non-null
4397 *
5aba085e
RD
4398 * Some platforms have their own version with special arguments;
4399 * others support only sys_rt_sigprocmask.
4400 */
1da177e4 4401
b013c399 4402SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
b290ebe2 4403 old_sigset_t __user *, oset)
1da177e4 4404{
1da177e4 4405 old_sigset_t old_set, new_set;
2e4f7c77 4406 sigset_t new_blocked;
1da177e4 4407
b013c399 4408 old_set = current->blocked.sig[0];
1da177e4 4409
b013c399
ON
4410 if (nset) {
4411 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4412 return -EFAULT;
1da177e4 4413
2e4f7c77 4414 new_blocked = current->blocked;
1da177e4 4415
1da177e4 4416 switch (how) {
1da177e4 4417 case SIG_BLOCK:
2e4f7c77 4418 sigaddsetmask(&new_blocked, new_set);
1da177e4
LT
4419 break;
4420 case SIG_UNBLOCK:
2e4f7c77 4421 sigdelsetmask(&new_blocked, new_set);
1da177e4
LT
4422 break;
4423 case SIG_SETMASK:
2e4f7c77 4424 new_blocked.sig[0] = new_set;
1da177e4 4425 break;
2e4f7c77
ON
4426 default:
4427 return -EINVAL;
1da177e4
LT
4428 }
4429
0c4a8423 4430 set_current_blocked(&new_blocked);
b013c399
ON
4431 }
4432
4433 if (oset) {
1da177e4 4434 if (copy_to_user(oset, &old_set, sizeof(*oset)))
b013c399 4435 return -EFAULT;
1da177e4 4436 }
b013c399
ON
4437
4438 return 0;
1da177e4
LT
4439}
4440#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4441
eaca6eae 4442#ifndef CONFIG_ODD_RT_SIGACTION
41c57892
RD
4443/**
4444 * sys_rt_sigaction - alter an action taken by a process
4445 * @sig: signal to be sent
f9fa0bc1
RD
4446 * @act: new sigaction
4447 * @oact: used to save the previous sigaction
41c57892
RD
4448 * @sigsetsize: size of sigset_t type
4449 */
d4e82042
HC
4450SYSCALL_DEFINE4(rt_sigaction, int, sig,
4451 const struct sigaction __user *, act,
4452 struct sigaction __user *, oact,
4453 size_t, sigsetsize)
1da177e4
LT
4454{
4455 struct k_sigaction new_sa, old_sa;
d8f993b3 4456 int ret;
1da177e4
LT
4457
4458 /* XXX: Don't preclude handling different sized sigset_t's. */
4459 if (sigsetsize != sizeof(sigset_t))
d8f993b3 4460 return -EINVAL;
1da177e4 4461
d8f993b3
CB
4462 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4463 return -EFAULT;
1da177e4
LT
4464
4465 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
d8f993b3
CB
4466 if (ret)
4467 return ret;
1da177e4 4468
d8f993b3
CB
4469 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4470 return -EFAULT;
4471
4472 return 0;
1da177e4 4473}
08d32fe5 4474#ifdef CONFIG_COMPAT
08d32fe5
AV
4475COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4476 const struct compat_sigaction __user *, act,
4477 struct compat_sigaction __user *, oact,
4478 compat_size_t, sigsetsize)
4479{
4480 struct k_sigaction new_ka, old_ka;
08d32fe5
AV
4481#ifdef __ARCH_HAS_SA_RESTORER
4482 compat_uptr_t restorer;
4483#endif
4484 int ret;
4485
4486 /* XXX: Don't preclude handling different sized sigset_t's. */
4487 if (sigsetsize != sizeof(compat_sigset_t))
4488 return -EINVAL;
4489
4490 if (act) {
4491 compat_uptr_t handler;
4492 ret = get_user(handler, &act->sa_handler);
4493 new_ka.sa.sa_handler = compat_ptr(handler);
4494#ifdef __ARCH_HAS_SA_RESTORER
4495 ret |= get_user(restorer, &act->sa_restorer);
4496 new_ka.sa.sa_restorer = compat_ptr(restorer);
4497#endif
3968cf62 4498 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3ddc5b46 4499 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
08d32fe5
AV
4500 if (ret)
4501 return -EFAULT;
08d32fe5
AV
4502 }
4503
4504 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4505 if (!ret && oact) {
08d32fe5
AV
4506 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4507 &oact->sa_handler);
f454322e
DL
4508 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4509 sizeof(oact->sa_mask));
3ddc5b46 4510 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
08d32fe5
AV
4511#ifdef __ARCH_HAS_SA_RESTORER
4512 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4513 &oact->sa_restorer);
4514#endif
4515 }
4516 return ret;
4517}
4518#endif
eaca6eae 4519#endif /* !CONFIG_ODD_RT_SIGACTION */
1da177e4 4520
495dfbf7
AV
4521#ifdef CONFIG_OLD_SIGACTION
4522SYSCALL_DEFINE3(sigaction, int, sig,
4523 const struct old_sigaction __user *, act,
4524 struct old_sigaction __user *, oact)
4525{
4526 struct k_sigaction new_ka, old_ka;
4527 int ret;
4528
4529 if (act) {
4530 old_sigset_t mask;
96d4f267 4531 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4532 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4533 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4534 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4535 __get_user(mask, &act->sa_mask))
4536 return -EFAULT;
4537#ifdef __ARCH_HAS_KA_RESTORER
4538 new_ka.ka_restorer = NULL;
4539#endif
4540 siginitset(&new_ka.sa.sa_mask, mask);
4541 }
4542
4543 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4544
4545 if (!ret && oact) {
96d4f267 4546 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4547 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4548 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4549 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4550 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4551 return -EFAULT;
4552 }
4553
4554 return ret;
4555}
4556#endif
4557#ifdef CONFIG_COMPAT_OLD_SIGACTION
4558COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4559 const struct compat_old_sigaction __user *, act,
4560 struct compat_old_sigaction __user *, oact)
4561{
4562 struct k_sigaction new_ka, old_ka;
4563 int ret;
4564 compat_old_sigset_t mask;
4565 compat_uptr_t handler, restorer;
4566
4567 if (act) {
96d4f267 4568 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4569 __get_user(handler, &act->sa_handler) ||
4570 __get_user(restorer, &act->sa_restorer) ||
4571 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4572 __get_user(mask, &act->sa_mask))
4573 return -EFAULT;
4574
4575#ifdef __ARCH_HAS_KA_RESTORER
4576 new_ka.ka_restorer = NULL;
4577#endif
4578 new_ka.sa.sa_handler = compat_ptr(handler);
4579 new_ka.sa.sa_restorer = compat_ptr(restorer);
4580 siginitset(&new_ka.sa.sa_mask, mask);
4581 }
4582
4583 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4584
4585 if (!ret && oact) {
96d4f267 4586 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4587 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4588 &oact->sa_handler) ||
4589 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4590 &oact->sa_restorer) ||
4591 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4592 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4593 return -EFAULT;
4594 }
4595 return ret;
4596}
4597#endif
1da177e4 4598
f6187769 4599#ifdef CONFIG_SGETMASK_SYSCALL
1da177e4
LT
4600
4601/*
4602 * For backwards compatibility. Functionality superseded by sigprocmask.
4603 */
a5f8fa9e 4604SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
4605{
4606 /* SMP safe */
4607 return current->blocked.sig[0];
4608}
4609
a5f8fa9e 4610SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4 4611{
c1095c6d
ON
4612 int old = current->blocked.sig[0];
4613 sigset_t newset;
1da177e4 4614
5ba53ff6 4615 siginitset(&newset, newmask);
c1095c6d 4616 set_current_blocked(&newset);
1da177e4
LT
4617
4618 return old;
4619}
f6187769 4620#endif /* CONFIG_SGETMASK_SYSCALL */
1da177e4
LT
4621
4622#ifdef __ARCH_WANT_SYS_SIGNAL
4623/*
4624 * For backwards compatibility. Functionality superseded by sigaction.
4625 */
a5f8fa9e 4626SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
4627{
4628 struct k_sigaction new_sa, old_sa;
4629 int ret;
4630
4631 new_sa.sa.sa_handler = handler;
4632 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 4633 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
4634
4635 ret = do_sigaction(sig, &new_sa, &old_sa);
4636
4637 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4638}
4639#endif /* __ARCH_WANT_SYS_SIGNAL */
4640
4641#ifdef __ARCH_WANT_SYS_PAUSE
4642
a5f8fa9e 4643SYSCALL_DEFINE0(pause)
1da177e4 4644{
d92fcf05 4645 while (!signal_pending(current)) {
1df01355 4646 __set_current_state(TASK_INTERRUPTIBLE);
d92fcf05
ON
4647 schedule();
4648 }
1da177e4
LT
4649 return -ERESTARTNOHAND;
4650}
4651
4652#endif
4653
9d8a7652 4654static int sigsuspend(sigset_t *set)
68f3f16d 4655{
68f3f16d
AV
4656 current->saved_sigmask = current->blocked;
4657 set_current_blocked(set);
4658
823dd322
SL
4659 while (!signal_pending(current)) {
4660 __set_current_state(TASK_INTERRUPTIBLE);
4661 schedule();
4662 }
68f3f16d
AV
4663 set_restore_sigmask();
4664 return -ERESTARTNOHAND;
4665}
68f3f16d 4666
41c57892
RD
4667/**
4668 * sys_rt_sigsuspend - replace the signal mask for a value with the
4669 * @unewset value until a signal is received
4670 * @unewset: new signal mask value
4671 * @sigsetsize: size of sigset_t type
4672 */
d4e82042 4673SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
4674{
4675 sigset_t newset;
4676
4677 /* XXX: Don't preclude handling different sized sigset_t's. */
4678 if (sigsetsize != sizeof(sigset_t))
4679 return -EINVAL;
4680
4681 if (copy_from_user(&newset, unewset, sizeof(newset)))
4682 return -EFAULT;
68f3f16d 4683 return sigsuspend(&newset);
150256d8 4684}
ad4b65a4
AV
4685
4686#ifdef CONFIG_COMPAT
4687COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4688{
ad4b65a4 4689 sigset_t newset;
ad4b65a4
AV
4690
4691 /* XXX: Don't preclude handling different sized sigset_t's. */
4692 if (sigsetsize != sizeof(sigset_t))
4693 return -EINVAL;
4694
3968cf62 4695 if (get_compat_sigset(&newset, unewset))
ad4b65a4 4696 return -EFAULT;
ad4b65a4 4697 return sigsuspend(&newset);
ad4b65a4
AV
4698}
4699#endif
150256d8 4700
0a0e8cdf
AV
4701#ifdef CONFIG_OLD_SIGSUSPEND
4702SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4703{
4704 sigset_t blocked;
4705 siginitset(&blocked, mask);
4706 return sigsuspend(&blocked);
4707}
4708#endif
4709#ifdef CONFIG_OLD_SIGSUSPEND3
4710SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4711{
4712 sigset_t blocked;
4713 siginitset(&blocked, mask);
4714 return sigsuspend(&blocked);
4715}
4716#endif
150256d8 4717
52f5684c 4718__weak const char *arch_vma_name(struct vm_area_struct *vma)
f269fdd1
DH
4719{
4720 return NULL;
4721}
4722
ae7795bc 4723static inline void siginfo_buildtime_checks(void)
1da177e4 4724{
aba1be2f 4725 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
41b27154 4726
ae7795bc
EB
4727 /* Verify the offsets in the two siginfos match */
4728#define CHECK_OFFSET(field) \
4729 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4730
4731 /* kill */
4732 CHECK_OFFSET(si_pid);
4733 CHECK_OFFSET(si_uid);
4734
4735 /* timer */
4736 CHECK_OFFSET(si_tid);
4737 CHECK_OFFSET(si_overrun);
4738 CHECK_OFFSET(si_value);
4739
4740 /* rt */
4741 CHECK_OFFSET(si_pid);
4742 CHECK_OFFSET(si_uid);
4743 CHECK_OFFSET(si_value);
4744
4745 /* sigchld */
4746 CHECK_OFFSET(si_pid);
4747 CHECK_OFFSET(si_uid);
4748 CHECK_OFFSET(si_status);
4749 CHECK_OFFSET(si_utime);
4750 CHECK_OFFSET(si_stime);
4751
4752 /* sigfault */
4753 CHECK_OFFSET(si_addr);
add0b32e 4754 CHECK_OFFSET(si_trapno);
ae7795bc
EB
4755 CHECK_OFFSET(si_addr_lsb);
4756 CHECK_OFFSET(si_lower);
4757 CHECK_OFFSET(si_upper);
4758 CHECK_OFFSET(si_pkey);
0683b531
EB
4759 CHECK_OFFSET(si_perf_data);
4760 CHECK_OFFSET(si_perf_type);
78ed93d7 4761 CHECK_OFFSET(si_perf_flags);
ae7795bc
EB
4762
4763 /* sigpoll */
4764 CHECK_OFFSET(si_band);
4765 CHECK_OFFSET(si_fd);
4766
4767 /* sigsys */
4768 CHECK_OFFSET(si_call_addr);
4769 CHECK_OFFSET(si_syscall);
4770 CHECK_OFFSET(si_arch);
4771#undef CHECK_OFFSET
70f1b0d3
EB
4772
4773 /* usb asyncio */
4774 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4775 offsetof(struct siginfo, si_addr));
4776 if (sizeof(int) == sizeof(void __user *)) {
4777 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4778 sizeof(void __user *));
4779 } else {
4780 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4781 sizeof_field(struct siginfo, si_uid)) !=
4782 sizeof(void __user *));
4783 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4784 offsetof(struct siginfo, si_uid));
4785 }
4786#ifdef CONFIG_COMPAT
4787 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4788 offsetof(struct compat_siginfo, si_addr));
4789 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4790 sizeof(compat_uptr_t));
4791 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4792 sizeof_field(struct siginfo, si_pid));
4793#endif
ae7795bc
EB
4794}
4795
01e6aac7
LC
4796#if defined(CONFIG_SYSCTL)
4797static struct ctl_table signal_debug_table[] = {
4798#ifdef CONFIG_SYSCTL_EXCEPTION_TRACE
4799 {
4800 .procname = "exception-trace",
4801 .data = &show_unhandled_signals,
4802 .maxlen = sizeof(int),
4803 .mode = 0644,
4804 .proc_handler = proc_dointvec
4805 },
4806#endif
4807 { }
4808};
4809
4810static int __init init_signal_sysctls(void)
4811{
4812 register_sysctl_init("debug", signal_debug_table);
4813 return 0;
4814}
4815early_initcall(init_signal_sysctls);
4816#endif /* CONFIG_SYSCTL */
4817
ae7795bc
EB
4818void __init signals_init(void)
4819{
4820 siginfo_buildtime_checks();
4821
5f58c398 4822 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT);
1da177e4 4823}
67fc4e0c
JW
4824
4825#ifdef CONFIG_KGDB_KDB
4826#include <linux/kdb.h>
4827/*
0b44bf9a 4828 * kdb_send_sig - Allows kdb to send signals without exposing
67fc4e0c
JW
4829 * signal internals. This function checks if the required locks are
4830 * available before calling the main signal code, to avoid kdb
4831 * deadlocks.
4832 */
0b44bf9a 4833void kdb_send_sig(struct task_struct *t, int sig)
67fc4e0c
JW
4834{
4835 static struct task_struct *kdb_prev_t;
0b44bf9a 4836 int new_t, ret;
67fc4e0c
JW
4837 if (!spin_trylock(&t->sighand->siglock)) {
4838 kdb_printf("Can't do kill command now.\n"
4839 "The sigmask lock is held somewhere else in "
4840 "kernel, try again later\n");
4841 return;
4842 }
67fc4e0c
JW
4843 new_t = kdb_prev_t != t;
4844 kdb_prev_t = t;
b03fbd4f 4845 if (!task_is_running(t) && new_t) {
0b44bf9a 4846 spin_unlock(&t->sighand->siglock);
67fc4e0c
JW
4847 kdb_printf("Process is not RUNNING, sending a signal from "
4848 "kdb risks deadlock\n"
4849 "on the run queue locks. "
4850 "The signal has _not_ been sent.\n"
4851 "Reissue the kill command if you want to risk "
4852 "the deadlock.\n");
4853 return;
4854 }
157cc181 4855 ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
0b44bf9a
EB
4856 spin_unlock(&t->sighand->siglock);
4857 if (ret)
67fc4e0c
JW
4858 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4859 sig, t->pid);
4860 else
4861 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4862}
4863#endif /* CONFIG_KGDB_KDB */