Merge tag 'locking-urgent-2021-05-23' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / kernel / signal.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/signal.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 *
7 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
8 *
9 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
10 * Changes to use preallocated sigqueue structures
11 * to allow signals to be sent reliably.
12 */
13
1da177e4 14#include <linux/slab.h>
9984de1a 15#include <linux/export.h>
1da177e4 16#include <linux/init.h>
589ee628 17#include <linux/sched/mm.h>
8703e8a4 18#include <linux/sched/user.h>
b17b0153 19#include <linux/sched/debug.h>
29930025 20#include <linux/sched/task.h>
68db0cf1 21#include <linux/sched/task_stack.h>
32ef5517 22#include <linux/sched/cputime.h>
3eb39f47 23#include <linux/file.h>
1da177e4 24#include <linux/fs.h>
3eb39f47 25#include <linux/proc_fs.h>
1da177e4
LT
26#include <linux/tty.h>
27#include <linux/binfmts.h>
179899fd 28#include <linux/coredump.h>
1da177e4
LT
29#include <linux/security.h>
30#include <linux/syscalls.h>
31#include <linux/ptrace.h>
7ed20e1a 32#include <linux/signal.h>
fba2afaa 33#include <linux/signalfd.h>
f84d49b2 34#include <linux/ratelimit.h>
35de254d 35#include <linux/tracehook.h>
c59ede7b 36#include <linux/capability.h>
7dfb7103 37#include <linux/freezer.h>
84d73786
SB
38#include <linux/pid_namespace.h>
39#include <linux/nsproxy.h>
6b550f94 40#include <linux/user_namespace.h>
0326f5a9 41#include <linux/uprobes.h>
90268439 42#include <linux/compat.h>
2b5faa4c 43#include <linux/cn_proc.h>
52f5684c 44#include <linux/compiler.h>
31ea70e0 45#include <linux/posix-timers.h>
76f969e8 46#include <linux/cgroup.h>
b48345aa 47#include <linux/audit.h>
52f5684c 48
d1eb650f
MH
49#define CREATE_TRACE_POINTS
50#include <trace/events/signal.h>
84d73786 51
1da177e4 52#include <asm/param.h>
7c0f6ba6 53#include <linux/uaccess.h>
1da177e4
LT
54#include <asm/unistd.h>
55#include <asm/siginfo.h>
d550bbd4 56#include <asm/cacheflush.h>
1da177e4
LT
57
58/*
59 * SLAB caches for signal bits.
60 */
61
e18b890b 62static struct kmem_cache *sigqueue_cachep;
1da177e4 63
f84d49b2
NO
64int print_fatal_signals __read_mostly;
65
35de254d 66static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 67{
35de254d
RM
68 return t->sighand->action[sig - 1].sa.sa_handler;
69}
93585eea 70
e4a8b4ef 71static inline bool sig_handler_ignored(void __user *handler, int sig)
35de254d 72{
93585eea 73 /* Is it explicitly or implicitly ignored? */
93585eea 74 return handler == SIG_IGN ||
e4a8b4ef 75 (handler == SIG_DFL && sig_kernel_ignore(sig));
93585eea 76}
1da177e4 77
41aaa481 78static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
1da177e4 79{
35de254d 80 void __user *handler;
1da177e4 81
f008faff
ON
82 handler = sig_handler(t, sig);
83
86989c41
EB
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 return true;
87
f008faff 88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
ac253850 89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
41aaa481 90 return true;
f008faff 91
33da8e7c 92 /* Only allow kernel generated signals to this kthread */
e8b33b8c 93 if (unlikely((t->flags & PF_KTHREAD) &&
33da8e7c
EB
94 (handler == SIG_KTHREAD_KERNEL) && !force))
95 return true;
96
f008faff
ON
97 return sig_handler_ignored(handler, sig);
98}
99
6a0cdcd7 100static bool sig_ignored(struct task_struct *t, int sig, bool force)
f008faff 101{
1da177e4
LT
102 /*
103 * Blocked signals are never ignored, since the
104 * signal handler may change by the time it is
105 * unblocked.
106 */
325d22df 107 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
6a0cdcd7 108 return false;
1da177e4 109
35de254d 110 /*
628c1bcb
ON
111 * Tracers may want to know about even ignored signal unless it
112 * is SIGKILL which can't be reported anyway but can be ignored
113 * by SIGNAL_UNKILLABLE task.
35de254d 114 */
628c1bcb 115 if (t->ptrace && sig != SIGKILL)
6a0cdcd7 116 return false;
628c1bcb
ON
117
118 return sig_task_ignored(t, sig, force);
1da177e4
LT
119}
120
121/*
122 * Re-calculate pending state from the set of locally pending
123 * signals, globally pending signals, and blocked signals.
124 */
938696a8 125static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
1da177e4
LT
126{
127 unsigned long ready;
128 long i;
129
130 switch (_NSIG_WORDS) {
131 default:
132 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
133 ready |= signal->sig[i] &~ blocked->sig[i];
134 break;
135
136 case 4: ready = signal->sig[3] &~ blocked->sig[3];
137 ready |= signal->sig[2] &~ blocked->sig[2];
138 ready |= signal->sig[1] &~ blocked->sig[1];
139 ready |= signal->sig[0] &~ blocked->sig[0];
140 break;
141
142 case 2: ready = signal->sig[1] &~ blocked->sig[1];
143 ready |= signal->sig[0] &~ blocked->sig[0];
144 break;
145
146 case 1: ready = signal->sig[0] &~ blocked->sig[0];
147 }
148 return ready != 0;
149}
150
151#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
152
09ae854e 153static bool recalc_sigpending_tsk(struct task_struct *t)
1da177e4 154{
76f969e8 155 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
1da177e4 156 PENDING(&t->pending, &t->blocked) ||
76f969e8
RG
157 PENDING(&t->signal->shared_pending, &t->blocked) ||
158 cgroup_task_frozen(t)) {
1da177e4 159 set_tsk_thread_flag(t, TIF_SIGPENDING);
09ae854e 160 return true;
7bb44ade 161 }
09ae854e 162
b74d0deb
RM
163 /*
164 * We must never clear the flag in another thread, or in current
165 * when it's possible the current syscall is returning -ERESTART*.
166 * So we don't clear it here, and only callers who know they should do.
167 */
09ae854e 168 return false;
7bb44ade
RM
169}
170
171/*
172 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
173 * This is superfluous when called on current, the wakeup is a harmless no-op.
174 */
175void recalc_sigpending_and_wake(struct task_struct *t)
176{
177 if (recalc_sigpending_tsk(t))
178 signal_wake_up(t, 0);
1da177e4
LT
179}
180
181void recalc_sigpending(void)
182{
8df1947c 183 if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
184 clear_thread_flag(TIF_SIGPENDING);
185
1da177e4 186}
fb50f5a4 187EXPORT_SYMBOL(recalc_sigpending);
1da177e4 188
088fe47c
EB
189void calculate_sigpending(void)
190{
191 /* Have any signals or users of TIF_SIGPENDING been delayed
192 * until after fork?
193 */
194 spin_lock_irq(&current->sighand->siglock);
195 set_tsk_thread_flag(current, TIF_SIGPENDING);
196 recalc_sigpending();
197 spin_unlock_irq(&current->sighand->siglock);
198}
199
1da177e4
LT
200/* Given the mask, find the first available signal that should be serviced. */
201
a27341cd
LT
202#define SYNCHRONOUS_MASK \
203 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
a0727e8c 204 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
a27341cd 205
fba2afaa 206int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
207{
208 unsigned long i, *s, *m, x;
209 int sig = 0;
f84d49b2 210
1da177e4
LT
211 s = pending->signal.sig;
212 m = mask->sig;
a27341cd
LT
213
214 /*
215 * Handle the first word specially: it contains the
216 * synchronous signals that need to be dequeued first.
217 */
218 x = *s &~ *m;
219 if (x) {
220 if (x & SYNCHRONOUS_MASK)
221 x &= SYNCHRONOUS_MASK;
222 sig = ffz(~x) + 1;
223 return sig;
224 }
225
1da177e4
LT
226 switch (_NSIG_WORDS) {
227 default:
a27341cd
LT
228 for (i = 1; i < _NSIG_WORDS; ++i) {
229 x = *++s &~ *++m;
230 if (!x)
231 continue;
232 sig = ffz(~x) + i*_NSIG_BPW + 1;
233 break;
234 }
1da177e4
LT
235 break;
236
a27341cd
LT
237 case 2:
238 x = s[1] &~ m[1];
239 if (!x)
1da177e4 240 break;
a27341cd 241 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
242 break;
243
a27341cd
LT
244 case 1:
245 /* Nothing to do */
1da177e4
LT
246 break;
247 }
f84d49b2 248
1da177e4
LT
249 return sig;
250}
251
f84d49b2
NO
252static inline void print_dropped_signal(int sig)
253{
254 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
255
256 if (!print_fatal_signals)
257 return;
258
259 if (!__ratelimit(&ratelimit_state))
260 return;
261
747800ef 262 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
f84d49b2
NO
263 current->comm, current->pid, sig);
264}
265
d79fdd6d 266/**
7dd3db54 267 * task_set_jobctl_pending - set jobctl pending bits
d79fdd6d 268 * @task: target task
7dd3db54 269 * @mask: pending bits to set
d79fdd6d 270 *
7dd3db54
TH
271 * Clear @mask from @task->jobctl. @mask must be subset of
272 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
273 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
274 * cleared. If @task is already being killed or exiting, this function
275 * becomes noop.
276 *
277 * CONTEXT:
278 * Must be called with @task->sighand->siglock held.
279 *
280 * RETURNS:
281 * %true if @mask is set, %false if made noop because @task was dying.
282 */
b76808e6 283bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
7dd3db54
TH
284{
285 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
286 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
287 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
288
1e4cf0d3 289 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
7dd3db54
TH
290 return false;
291
292 if (mask & JOBCTL_STOP_SIGMASK)
293 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
294
295 task->jobctl |= mask;
296 return true;
297}
298
d79fdd6d 299/**
a8f072c1 300 * task_clear_jobctl_trapping - clear jobctl trapping bit
d79fdd6d
TH
301 * @task: target task
302 *
a8f072c1
TH
303 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
304 * Clear it and wake up the ptracer. Note that we don't need any further
305 * locking. @task->siglock guarantees that @task->parent points to the
306 * ptracer.
d79fdd6d
TH
307 *
308 * CONTEXT:
309 * Must be called with @task->sighand->siglock held.
310 */
73ddff2b 311void task_clear_jobctl_trapping(struct task_struct *task)
d79fdd6d 312{
a8f072c1
TH
313 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
314 task->jobctl &= ~JOBCTL_TRAPPING;
650226bd 315 smp_mb(); /* advised by wake_up_bit() */
62c124ff 316 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
d79fdd6d
TH
317 }
318}
319
e5c1902e 320/**
3759a0d9 321 * task_clear_jobctl_pending - clear jobctl pending bits
e5c1902e 322 * @task: target task
3759a0d9 323 * @mask: pending bits to clear
e5c1902e 324 *
3759a0d9
TH
325 * Clear @mask from @task->jobctl. @mask must be subset of
326 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
327 * STOP bits are cleared together.
e5c1902e 328 *
6dfca329
TH
329 * If clearing of @mask leaves no stop or trap pending, this function calls
330 * task_clear_jobctl_trapping().
e5c1902e
TH
331 *
332 * CONTEXT:
333 * Must be called with @task->sighand->siglock held.
334 */
b76808e6 335void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
e5c1902e 336{
3759a0d9
TH
337 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
338
339 if (mask & JOBCTL_STOP_PENDING)
340 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
341
342 task->jobctl &= ~mask;
6dfca329
TH
343
344 if (!(task->jobctl & JOBCTL_PENDING_MASK))
345 task_clear_jobctl_trapping(task);
e5c1902e
TH
346}
347
348/**
349 * task_participate_group_stop - participate in a group stop
350 * @task: task participating in a group stop
351 *
a8f072c1 352 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
39efa3ef 353 * Group stop states are cleared and the group stop count is consumed if
a8f072c1 354 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
68d8681e 355 * stop, the appropriate `SIGNAL_*` flags are set.
e5c1902e
TH
356 *
357 * CONTEXT:
358 * Must be called with @task->sighand->siglock held.
244056f9
TH
359 *
360 * RETURNS:
361 * %true if group stop completion should be notified to the parent, %false
362 * otherwise.
e5c1902e
TH
363 */
364static bool task_participate_group_stop(struct task_struct *task)
365{
366 struct signal_struct *sig = task->signal;
a8f072c1 367 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
e5c1902e 368
a8f072c1 369 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
39efa3ef 370
3759a0d9 371 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
e5c1902e
TH
372
373 if (!consume)
374 return false;
375
376 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
377 sig->group_stop_count--;
378
244056f9
TH
379 /*
380 * Tell the caller to notify completion iff we are entering into a
381 * fresh group stop. Read comment in do_signal_stop() for details.
382 */
383 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2d39b3cd 384 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
e5c1902e
TH
385 return true;
386 }
387 return false;
388}
389
924de3b8
EB
390void task_join_group_stop(struct task_struct *task)
391{
7b3c36fc
ON
392 unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK;
393 struct signal_struct *sig = current->signal;
394
395 if (sig->group_stop_count) {
396 sig->group_stop_count++;
397 mask |= JOBCTL_STOP_CONSUME;
398 } else if (!(sig->flags & SIGNAL_STOP_STOPPED))
399 return;
400
924de3b8 401 /* Have the new thread join an on-going signal group stop */
7b3c36fc 402 task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING);
924de3b8
EB
403}
404
c69e8d9c
DH
405/*
406 * allocate a new signal queue record
407 * - this may be called without locks if and only if t == current, otherwise an
5aba085e 408 * appropriate lock must be held to stop the target task from exiting
c69e8d9c 409 */
f84d49b2 410static struct sigqueue *
69995ebb
TG
411__sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags,
412 int override_rlimit, const unsigned int sigqueue_flags)
1da177e4
LT
413{
414 struct sigqueue *q = NULL;
10b1fbdb 415 struct user_struct *user;
fda31c50 416 int sigpending;
1da177e4 417
10b1fbdb 418 /*
7cf7db8d
TG
419 * Protect access to @t credentials. This can go away when all
420 * callers hold rcu read lock.
fda31c50
LT
421 *
422 * NOTE! A pending signal will hold on to the user refcount,
423 * and we get/put the refcount only when the sigpending count
424 * changes from/to zero.
10b1fbdb 425 */
7cf7db8d 426 rcu_read_lock();
fda31c50
LT
427 user = __task_cred(t)->user;
428 sigpending = atomic_inc_return(&user->sigpending);
429 if (sigpending == 1)
430 get_uid(user);
7cf7db8d 431 rcu_read_unlock();
f84d49b2 432
fda31c50 433 if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
4bad58eb
TG
434 /*
435 * Preallocation does not hold sighand::siglock so it can't
436 * use the cache. The lockless caching requires that only
437 * one consumer and only one producer run at a time.
438 */
439 q = READ_ONCE(t->sigqueue_cache);
440 if (!q || sigqueue_flags)
441 q = kmem_cache_alloc(sigqueue_cachep, gfp_flags);
442 else
443 WRITE_ONCE(t->sigqueue_cache, NULL);
f84d49b2
NO
444 } else {
445 print_dropped_signal(sig);
446 }
447
1da177e4 448 if (unlikely(q == NULL)) {
fda31c50
LT
449 if (atomic_dec_and_test(&user->sigpending))
450 free_uid(user);
1da177e4
LT
451 } else {
452 INIT_LIST_HEAD(&q->list);
69995ebb 453 q->flags = sigqueue_flags;
d84f4f99 454 q->user = user;
1da177e4 455 }
d84f4f99
DH
456
457 return q;
1da177e4
LT
458}
459
4bad58eb
TG
460void exit_task_sigqueue_cache(struct task_struct *tsk)
461{
462 /* Race free because @tsk is mopped up */
463 struct sigqueue *q = tsk->sigqueue_cache;
464
465 if (q) {
466 tsk->sigqueue_cache = NULL;
467 /*
468 * Hand it back to the cache as the task might
469 * be self reaping which would leak the object.
470 */
471 kmem_cache_free(sigqueue_cachep, q);
472 }
473}
474
475static void sigqueue_cache_or_free(struct sigqueue *q)
476{
477 /*
478 * Cache one sigqueue per task. This pairs with the consumer side
479 * in __sigqueue_alloc() and needs READ/WRITE_ONCE() to prevent the
480 * compiler from store tearing and to tell KCSAN that the data race
481 * is intentional when run without holding current->sighand->siglock,
482 * which is fine as current obviously cannot run __sigqueue_free()
483 * concurrently.
484 */
485 if (!READ_ONCE(current->sigqueue_cache))
486 WRITE_ONCE(current->sigqueue_cache, q);
487 else
488 kmem_cache_free(sigqueue_cachep, q);
489}
490
514a01b8 491static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
492{
493 if (q->flags & SIGQUEUE_PREALLOC)
494 return;
fda31c50
LT
495 if (atomic_dec_and_test(&q->user->sigpending))
496 free_uid(q->user);
4bad58eb 497 sigqueue_cache_or_free(q);
1da177e4
LT
498}
499
6a14c5c9 500void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
501{
502 struct sigqueue *q;
503
504 sigemptyset(&queue->signal);
505 while (!list_empty(&queue->list)) {
506 q = list_entry(queue->list.next, struct sigqueue , list);
507 list_del_init(&q->list);
508 __sigqueue_free(q);
509 }
510}
511
512/*
9e7c8f8c 513 * Flush all pending signals for this kthread.
1da177e4 514 */
c81addc9 515void flush_signals(struct task_struct *t)
1da177e4
LT
516{
517 unsigned long flags;
518
519 spin_lock_irqsave(&t->sighand->siglock, flags);
9e7c8f8c
ON
520 clear_tsk_thread_flag(t, TIF_SIGPENDING);
521 flush_sigqueue(&t->pending);
522 flush_sigqueue(&t->signal->shared_pending);
1da177e4
LT
523 spin_unlock_irqrestore(&t->sighand->siglock, flags);
524}
fb50f5a4 525EXPORT_SYMBOL(flush_signals);
1da177e4 526
baa73d9e 527#ifdef CONFIG_POSIX_TIMERS
cbaffba1
ON
528static void __flush_itimer_signals(struct sigpending *pending)
529{
530 sigset_t signal, retain;
531 struct sigqueue *q, *n;
532
533 signal = pending->signal;
534 sigemptyset(&retain);
535
536 list_for_each_entry_safe(q, n, &pending->list, list) {
537 int sig = q->info.si_signo;
538
539 if (likely(q->info.si_code != SI_TIMER)) {
540 sigaddset(&retain, sig);
541 } else {
542 sigdelset(&signal, sig);
543 list_del_init(&q->list);
544 __sigqueue_free(q);
545 }
546 }
547
548 sigorsets(&pending->signal, &signal, &retain);
549}
550
551void flush_itimer_signals(void)
552{
553 struct task_struct *tsk = current;
554 unsigned long flags;
555
556 spin_lock_irqsave(&tsk->sighand->siglock, flags);
557 __flush_itimer_signals(&tsk->pending);
558 __flush_itimer_signals(&tsk->signal->shared_pending);
559 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
560}
baa73d9e 561#endif
cbaffba1 562
10ab825b
ON
563void ignore_signals(struct task_struct *t)
564{
565 int i;
566
567 for (i = 0; i < _NSIG; ++i)
568 t->sighand->action[i].sa.sa_handler = SIG_IGN;
569
570 flush_signals(t);
571}
572
1da177e4
LT
573/*
574 * Flush all handlers for a task.
575 */
576
577void
578flush_signal_handlers(struct task_struct *t, int force_default)
579{
580 int i;
581 struct k_sigaction *ka = &t->sighand->action[0];
582 for (i = _NSIG ; i != 0 ; i--) {
583 if (force_default || ka->sa.sa_handler != SIG_IGN)
584 ka->sa.sa_handler = SIG_DFL;
585 ka->sa.sa_flags = 0;
522cff14 586#ifdef __ARCH_HAS_SA_RESTORER
2ca39528
KC
587 ka->sa.sa_restorer = NULL;
588#endif
1da177e4
LT
589 sigemptyset(&ka->sa.sa_mask);
590 ka++;
591 }
592}
593
67a48a24 594bool unhandled_signal(struct task_struct *tsk, int sig)
abd4f750 595{
445a91d2 596 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 597 if (is_global_init(tsk))
67a48a24
CB
598 return true;
599
445a91d2 600 if (handler != SIG_IGN && handler != SIG_DFL)
67a48a24
CB
601 return false;
602
a288eecc
TH
603 /* if ptraced, let the tracer determine */
604 return !tsk->ptrace;
abd4f750
MAS
605}
606
ae7795bc 607static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
57db7e4a 608 bool *resched_timer)
1da177e4
LT
609{
610 struct sigqueue *q, *first = NULL;
1da177e4 611
1da177e4
LT
612 /*
613 * Collect the siginfo appropriate to this signal. Check if
614 * there is another siginfo for the same signal.
615 */
616 list_for_each_entry(q, &list->list, list) {
617 if (q->info.si_signo == sig) {
d4434207
ON
618 if (first)
619 goto still_pending;
1da177e4
LT
620 first = q;
621 }
622 }
d4434207
ON
623
624 sigdelset(&list->signal, sig);
625
1da177e4 626 if (first) {
d4434207 627still_pending:
1da177e4
LT
628 list_del_init(&first->list);
629 copy_siginfo(info, &first->info);
57db7e4a
EB
630
631 *resched_timer =
632 (first->flags & SIGQUEUE_PREALLOC) &&
633 (info->si_code == SI_TIMER) &&
634 (info->si_sys_private);
635
1da177e4 636 __sigqueue_free(first);
1da177e4 637 } else {
5aba085e
RD
638 /*
639 * Ok, it wasn't in the queue. This must be
640 * a fast-pathed signal or we must have been
641 * out of queue space. So zero out the info.
1da177e4 642 */
faf1f22b 643 clear_siginfo(info);
1da177e4
LT
644 info->si_signo = sig;
645 info->si_errno = 0;
7486e5d9 646 info->si_code = SI_USER;
1da177e4
LT
647 info->si_pid = 0;
648 info->si_uid = 0;
649 }
1da177e4
LT
650}
651
652static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
ae7795bc 653 kernel_siginfo_t *info, bool *resched_timer)
1da177e4 654{
27d91e07 655 int sig = next_signal(pending, mask);
1da177e4 656
2e01fabe 657 if (sig)
57db7e4a 658 collect_signal(sig, pending, info, resched_timer);
1da177e4
LT
659 return sig;
660}
661
662/*
5aba085e 663 * Dequeue a signal and return the element to the caller, which is
1da177e4
LT
664 * expected to free it.
665 *
666 * All callers have to hold the siglock.
667 */
ae7795bc 668int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
1da177e4 669{
57db7e4a 670 bool resched_timer = false;
c5363d03 671 int signr;
caec4e8d
BH
672
673 /* We only dequeue private signals from ourselves, we don't let
674 * signalfd steal them
675 */
57db7e4a 676 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
8bfd9a7a 677 if (!signr) {
1da177e4 678 signr = __dequeue_signal(&tsk->signal->shared_pending,
57db7e4a 679 mask, info, &resched_timer);
baa73d9e 680#ifdef CONFIG_POSIX_TIMERS
8bfd9a7a
TG
681 /*
682 * itimer signal ?
683 *
684 * itimers are process shared and we restart periodic
685 * itimers in the signal delivery path to prevent DoS
686 * attacks in the high resolution timer case. This is
5aba085e 687 * compliant with the old way of self-restarting
8bfd9a7a
TG
688 * itimers, as the SIGALRM is a legacy signal and only
689 * queued once. Changing the restart behaviour to
690 * restart the timer in the signal dequeue path is
691 * reducing the timer noise on heavy loaded !highres
692 * systems too.
693 */
694 if (unlikely(signr == SIGALRM)) {
695 struct hrtimer *tmr = &tsk->signal->real_timer;
696
697 if (!hrtimer_is_queued(tmr) &&
2456e855 698 tsk->signal->it_real_incr != 0) {
8bfd9a7a
TG
699 hrtimer_forward(tmr, tmr->base->get_time(),
700 tsk->signal->it_real_incr);
701 hrtimer_restart(tmr);
702 }
703 }
baa73d9e 704#endif
8bfd9a7a 705 }
c5363d03 706
b8fceee1 707 recalc_sigpending();
c5363d03
PE
708 if (!signr)
709 return 0;
710
711 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
712 /*
713 * Set a marker that we have dequeued a stop signal. Our
714 * caller might release the siglock and then the pending
715 * stop signal it is about to process is no longer in the
716 * pending bitmasks, but must still be cleared by a SIGCONT
717 * (and overruled by a SIGKILL). So those cases clear this
718 * shared flag after we've set it. Note that this flag may
719 * remain set after the signal we return is ignored or
720 * handled. That doesn't matter because its only purpose
721 * is to alert stop-signal processing code when another
722 * processor has come along and cleared the flag.
723 */
a8f072c1 724 current->jobctl |= JOBCTL_STOP_DEQUEUED;
8bfd9a7a 725 }
baa73d9e 726#ifdef CONFIG_POSIX_TIMERS
57db7e4a 727 if (resched_timer) {
1da177e4
LT
728 /*
729 * Release the siglock to ensure proper locking order
730 * of timer locks outside of siglocks. Note, we leave
731 * irqs disabled here, since the posix-timers code is
732 * about to disable them again anyway.
733 */
734 spin_unlock(&tsk->sighand->siglock);
96fe3b07 735 posixtimer_rearm(info);
1da177e4 736 spin_lock(&tsk->sighand->siglock);
9943d3ac
EB
737
738 /* Don't expose the si_sys_private value to userspace */
739 info->si_sys_private = 0;
1da177e4 740 }
baa73d9e 741#endif
1da177e4
LT
742 return signr;
743}
fb50f5a4 744EXPORT_SYMBOL_GPL(dequeue_signal);
1da177e4 745
7146db33
EB
746static int dequeue_synchronous_signal(kernel_siginfo_t *info)
747{
748 struct task_struct *tsk = current;
749 struct sigpending *pending = &tsk->pending;
750 struct sigqueue *q, *sync = NULL;
751
752 /*
753 * Might a synchronous signal be in the queue?
754 */
755 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
756 return 0;
757
758 /*
759 * Return the first synchronous signal in the queue.
760 */
761 list_for_each_entry(q, &pending->list, list) {
7665a47f 762 /* Synchronous signals have a positive si_code */
7146db33
EB
763 if ((q->info.si_code > SI_USER) &&
764 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
765 sync = q;
766 goto next;
767 }
768 }
769 return 0;
770next:
771 /*
772 * Check if there is another siginfo for the same signal.
773 */
774 list_for_each_entry_continue(q, &pending->list, list) {
775 if (q->info.si_signo == sync->info.si_signo)
776 goto still_pending;
777 }
778
779 sigdelset(&pending->signal, sync->info.si_signo);
780 recalc_sigpending();
781still_pending:
782 list_del_init(&sync->list);
783 copy_siginfo(info, &sync->info);
784 __sigqueue_free(sync);
785 return info->si_signo;
786}
787
1da177e4
LT
788/*
789 * Tell a process that it has a new active signal..
790 *
791 * NOTE! we rely on the previous spin_lock to
792 * lock interrupts for us! We can only be called with
793 * "siglock" held, and the local interrupt must
794 * have been disabled when that got acquired!
795 *
796 * No need to set need_resched since signal event passing
797 * goes through ->blocked
798 */
910ffdb1 799void signal_wake_up_state(struct task_struct *t, unsigned int state)
1da177e4 800{
1da177e4 801 set_tsk_thread_flag(t, TIF_SIGPENDING);
1da177e4 802 /*
910ffdb1 803 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
f021a3c2 804 * case. We don't check t->state here because there is a race with it
1da177e4
LT
805 * executing another processor and just now entering stopped state.
806 * By using wake_up_state, we ensure the process will wake up and
807 * handle its death signal.
808 */
910ffdb1 809 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
1da177e4
LT
810 kick_process(t);
811}
812
71fabd5e
GA
813/*
814 * Remove signals in mask from the pending set and queue.
815 * Returns 1 if any signals were found.
816 *
817 * All callers must be holding the siglock.
71fabd5e 818 */
8f11351e 819static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
71fabd5e
GA
820{
821 struct sigqueue *q, *n;
822 sigset_t m;
823
824 sigandsets(&m, mask, &s->signal);
825 if (sigisemptyset(&m))
8f11351e 826 return;
71fabd5e 827
702a5073 828 sigandnsets(&s->signal, &s->signal, mask);
71fabd5e
GA
829 list_for_each_entry_safe(q, n, &s->list, list) {
830 if (sigismember(mask, q->info.si_signo)) {
831 list_del_init(&q->list);
832 __sigqueue_free(q);
833 }
834 }
71fabd5e 835}
1da177e4 836
ae7795bc 837static inline int is_si_special(const struct kernel_siginfo *info)
614c517d 838{
4ff4c31a 839 return info <= SEND_SIG_PRIV;
614c517d
ON
840}
841
ae7795bc 842static inline bool si_fromuser(const struct kernel_siginfo *info)
614c517d
ON
843{
844 return info == SEND_SIG_NOINFO ||
845 (!is_si_special(info) && SI_FROMUSER(info));
846}
847
39fd3393
SH
848/*
849 * called with RCU read lock from check_kill_permission()
850 */
2a9b9094 851static bool kill_ok_by_cred(struct task_struct *t)
39fd3393
SH
852{
853 const struct cred *cred = current_cred();
854 const struct cred *tcred = __task_cred(t);
855
2a9b9094
CB
856 return uid_eq(cred->euid, tcred->suid) ||
857 uid_eq(cred->euid, tcred->uid) ||
858 uid_eq(cred->uid, tcred->suid) ||
859 uid_eq(cred->uid, tcred->uid) ||
860 ns_capable(tcred->user_ns, CAP_KILL);
39fd3393
SH
861}
862
1da177e4
LT
863/*
864 * Bad permissions for sending the signal
694f690d 865 * - the caller must hold the RCU read lock
1da177e4 866 */
ae7795bc 867static int check_kill_permission(int sig, struct kernel_siginfo *info,
1da177e4
LT
868 struct task_struct *t)
869{
2e2ba22e 870 struct pid *sid;
3b5e9e53
ON
871 int error;
872
7ed20e1a 873 if (!valid_signal(sig))
3b5e9e53
ON
874 return -EINVAL;
875
614c517d 876 if (!si_fromuser(info))
3b5e9e53 877 return 0;
e54dc243 878
3b5e9e53
ON
879 error = audit_signal_info(sig, t); /* Let audit system see the signal */
880 if (error)
1da177e4 881 return error;
3b5e9e53 882
065add39 883 if (!same_thread_group(current, t) &&
39fd3393 884 !kill_ok_by_cred(t)) {
2e2ba22e
ON
885 switch (sig) {
886 case SIGCONT:
2e2ba22e 887 sid = task_session(t);
2e2ba22e
ON
888 /*
889 * We don't return the error if sid == NULL. The
890 * task was unhashed, the caller must notice this.
891 */
892 if (!sid || sid == task_session(current))
893 break;
df561f66 894 fallthrough;
2e2ba22e
ON
895 default:
896 return -EPERM;
897 }
898 }
c2f0c7c3 899
6b4f3d01 900 return security_task_kill(t, info, sig, NULL);
1da177e4
LT
901}
902
fb1d910c
TH
903/**
904 * ptrace_trap_notify - schedule trap to notify ptracer
905 * @t: tracee wanting to notify tracer
906 *
907 * This function schedules sticky ptrace trap which is cleared on the next
908 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
909 * ptracer.
910 *
544b2c91
TH
911 * If @t is running, STOP trap will be taken. If trapped for STOP and
912 * ptracer is listening for events, tracee is woken up so that it can
913 * re-trap for the new event. If trapped otherwise, STOP trap will be
914 * eventually taken without returning to userland after the existing traps
915 * are finished by PTRACE_CONT.
fb1d910c
TH
916 *
917 * CONTEXT:
918 * Must be called with @task->sighand->siglock held.
919 */
920static void ptrace_trap_notify(struct task_struct *t)
921{
922 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
923 assert_spin_locked(&t->sighand->siglock);
924
925 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
910ffdb1 926 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
fb1d910c
TH
927}
928
1da177e4 929/*
7e695a5e
ON
930 * Handle magic process-wide effects of stop/continue signals. Unlike
931 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
932 * time regardless of blocking, ignoring, or handling. This does the
933 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
934 * signals. The process stop is done as a signal action for SIG_DFL.
935 *
936 * Returns true if the signal should be actually delivered, otherwise
937 * it should be dropped.
1da177e4 938 */
403bad72 939static bool prepare_signal(int sig, struct task_struct *p, bool force)
1da177e4 940{
ad16a460 941 struct signal_struct *signal = p->signal;
1da177e4 942 struct task_struct *t;
9490592f 943 sigset_t flush;
1da177e4 944
403bad72 945 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
5fa534c9 946 if (!(signal->flags & SIGNAL_GROUP_EXIT))
403bad72 947 return sig == SIGKILL;
1da177e4 948 /*
7e695a5e 949 * The process is in the middle of dying, nothing to do.
1da177e4 950 */
7e695a5e 951 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
952 /*
953 * This is a stop signal. Remove SIGCONT from all queues.
954 */
9490592f 955 siginitset(&flush, sigmask(SIGCONT));
c09c1441 956 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 957 for_each_thread(p, t)
c09c1441 958 flush_sigqueue_mask(&flush, &t->pending);
1da177e4 959 } else if (sig == SIGCONT) {
fc321d2e 960 unsigned int why;
1da177e4 961 /*
1deac632 962 * Remove all stop signals from all queues, wake all threads.
1da177e4 963 */
9490592f 964 siginitset(&flush, SIG_KERNEL_STOP_MASK);
c09c1441 965 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 966 for_each_thread(p, t) {
c09c1441 967 flush_sigqueue_mask(&flush, &t->pending);
3759a0d9 968 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
fb1d910c
TH
969 if (likely(!(t->ptrace & PT_SEIZED)))
970 wake_up_state(t, __TASK_STOPPED);
971 else
972 ptrace_trap_notify(t);
9490592f 973 }
1da177e4 974
fc321d2e
ON
975 /*
976 * Notify the parent with CLD_CONTINUED if we were stopped.
977 *
978 * If we were in the middle of a group stop, we pretend it
979 * was already finished, and then continued. Since SIGCHLD
980 * doesn't queue we report only CLD_STOPPED, as if the next
981 * CLD_CONTINUED was dropped.
982 */
983 why = 0;
ad16a460 984 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 985 why |= SIGNAL_CLD_CONTINUED;
ad16a460 986 else if (signal->group_stop_count)
fc321d2e
ON
987 why |= SIGNAL_CLD_STOPPED;
988
989 if (why) {
021e1ae3 990 /*
ae6d2ed7 991 * The first thread which returns from do_signal_stop()
021e1ae3 992 * will take ->siglock, notice SIGNAL_CLD_MASK, and
2e58f57d 993 * notify its parent. See get_signal().
021e1ae3 994 */
2d39b3cd 995 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
ad16a460
ON
996 signal->group_stop_count = 0;
997 signal->group_exit_code = 0;
1da177e4 998 }
1da177e4 999 }
7e695a5e 1000
def8cf72 1001 return !sig_ignored(p, sig, force);
1da177e4
LT
1002}
1003
71f11dc0
ON
1004/*
1005 * Test if P wants to take SIG. After we've checked all threads with this,
1006 * it's equivalent to finding no threads not blocking SIG. Any threads not
1007 * blocking SIG were ruled out because they are not running and already
1008 * have pending signals. Such threads will dequeue from the shared queue
1009 * as soon as they're available, so putting the signal on the shared queue
1010 * will be equivalent to sending it to one such thread.
1011 */
acd14e62 1012static inline bool wants_signal(int sig, struct task_struct *p)
71f11dc0
ON
1013{
1014 if (sigismember(&p->blocked, sig))
acd14e62
CB
1015 return false;
1016
71f11dc0 1017 if (p->flags & PF_EXITING)
acd14e62
CB
1018 return false;
1019
71f11dc0 1020 if (sig == SIGKILL)
acd14e62
CB
1021 return true;
1022
71f11dc0 1023 if (task_is_stopped_or_traced(p))
acd14e62
CB
1024 return false;
1025
5c251e9d 1026 return task_curr(p) || !task_sigpending(p);
71f11dc0
ON
1027}
1028
07296149 1029static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
71f11dc0
ON
1030{
1031 struct signal_struct *signal = p->signal;
1032 struct task_struct *t;
1033
1034 /*
1035 * Now find a thread we can wake up to take the signal off the queue.
1036 *
1037 * If the main thread wants the signal, it gets first crack.
1038 * Probably the least surprising to the average bear.
1039 */
1040 if (wants_signal(sig, p))
1041 t = p;
07296149 1042 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
71f11dc0
ON
1043 /*
1044 * There is just one thread and it does not need to be woken.
1045 * It will dequeue unblocked signals before it runs again.
1046 */
1047 return;
1048 else {
1049 /*
1050 * Otherwise try to find a suitable thread.
1051 */
1052 t = signal->curr_target;
1053 while (!wants_signal(sig, t)) {
1054 t = next_thread(t);
1055 if (t == signal->curr_target)
1056 /*
1057 * No thread needs to be woken.
1058 * Any eligible threads will see
1059 * the signal in the queue soon.
1060 */
1061 return;
1062 }
1063 signal->curr_target = t;
1064 }
1065
1066 /*
1067 * Found a killable thread. If the signal will be fatal,
1068 * then start taking the whole group down immediately.
1069 */
fae5fa44 1070 if (sig_fatal(p, sig) &&
42691579 1071 !(signal->flags & SIGNAL_GROUP_EXIT) &&
71f11dc0 1072 !sigismember(&t->real_blocked, sig) &&
42691579 1073 (sig == SIGKILL || !p->ptrace)) {
71f11dc0
ON
1074 /*
1075 * This signal will be fatal to the whole group.
1076 */
1077 if (!sig_kernel_coredump(sig)) {
1078 /*
1079 * Start a group exit and wake everybody up.
1080 * This way we don't have other threads
1081 * running and doing things after a slower
1082 * thread has the fatal signal pending.
1083 */
1084 signal->flags = SIGNAL_GROUP_EXIT;
1085 signal->group_exit_code = sig;
1086 signal->group_stop_count = 0;
1087 t = p;
1088 do {
6dfca329 1089 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
71f11dc0
ON
1090 sigaddset(&t->pending.signal, SIGKILL);
1091 signal_wake_up(t, 1);
1092 } while_each_thread(p, t);
1093 return;
1094 }
1095 }
1096
1097 /*
1098 * The signal is already in the shared-pending queue.
1099 * Tell the chosen thread to wake up and dequeue it.
1100 */
1101 signal_wake_up(t, sig == SIGKILL);
1102 return;
1103}
1104
a19e2c01 1105static inline bool legacy_queue(struct sigpending *signals, int sig)
af7fff9c
PE
1106{
1107 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1108}
1109
ae7795bc 1110static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
8ad23dea 1111 enum pid_type type, bool force)
1da177e4 1112{
2ca3515a 1113 struct sigpending *pending;
6e65acba 1114 struct sigqueue *q;
7a0aeb14 1115 int override_rlimit;
6c303d3a 1116 int ret = 0, result;
0a16b607 1117
6e65acba 1118 assert_spin_locked(&t->sighand->siglock);
921cf9f6 1119
6c303d3a 1120 result = TRACE_SIGNAL_IGNORED;
8ad23dea 1121 if (!prepare_signal(sig, t, force))
6c303d3a 1122 goto ret;
2ca3515a 1123
5a883cee 1124 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
1125 /*
1126 * Short-circuit ignored signals and support queuing
1127 * exactly one non-rt signal, so that we can get more
1128 * detailed information about the cause of the signal.
1129 */
6c303d3a 1130 result = TRACE_SIGNAL_ALREADY_PENDING;
7e695a5e 1131 if (legacy_queue(pending, sig))
6c303d3a
ON
1132 goto ret;
1133
1134 result = TRACE_SIGNAL_DELIVERED;
1da177e4 1135 /*
a692933a 1136 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1da177e4 1137 */
e8b33b8c 1138 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1da177e4
LT
1139 goto out_set;
1140
5aba085e
RD
1141 /*
1142 * Real-time signals must be queued if sent by sigqueue, or
1143 * some other real-time mechanism. It is implementation
1144 * defined whether kill() does so. We attempt to do so, on
1145 * the principle of least surprise, but since kill is not
1146 * allowed to fail with EAGAIN when low on memory we just
1147 * make sure at least one signal gets delivered and don't
1148 * pass on the info struct.
1149 */
7a0aeb14
VN
1150 if (sig < SIGRTMIN)
1151 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1152 else
1153 override_rlimit = 0;
1154
69995ebb
TG
1155 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0);
1156
1da177e4 1157 if (q) {
2ca3515a 1158 list_add_tail(&q->list, &pending->list);
1da177e4 1159 switch ((unsigned long) info) {
b67a1b9e 1160 case (unsigned long) SEND_SIG_NOINFO:
faf1f22b 1161 clear_siginfo(&q->info);
1da177e4
LT
1162 q->info.si_signo = sig;
1163 q->info.si_errno = 0;
1164 q->info.si_code = SI_USER;
9cd4fd10 1165 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 1166 task_active_pid_ns(t));
7a0cf094
EB
1167 rcu_read_lock();
1168 q->info.si_uid =
1169 from_kuid_munged(task_cred_xxx(t, user_ns),
1170 current_uid());
1171 rcu_read_unlock();
1da177e4 1172 break;
b67a1b9e 1173 case (unsigned long) SEND_SIG_PRIV:
faf1f22b 1174 clear_siginfo(&q->info);
1da177e4
LT
1175 q->info.si_signo = sig;
1176 q->info.si_errno = 0;
1177 q->info.si_code = SI_KERNEL;
1178 q->info.si_pid = 0;
1179 q->info.si_uid = 0;
1180 break;
1181 default:
1182 copy_siginfo(&q->info, info);
1183 break;
1184 }
8917bef3
EB
1185 } else if (!is_si_special(info) &&
1186 sig >= SIGRTMIN && info->si_code != SI_USER) {
1187 /*
1188 * Queue overflow, abort. We may abort if the
1189 * signal was rt and sent by user using something
1190 * other than kill().
1191 */
1192 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1193 ret = -EAGAIN;
1194 goto ret;
1195 } else {
1196 /*
1197 * This is a silent loss of information. We still
1198 * send the signal, but the *info bits are lost.
1199 */
1200 result = TRACE_SIGNAL_LOSE_INFO;
1da177e4
LT
1201 }
1202
1203out_set:
53c30337 1204 signalfd_notify(t, sig);
2ca3515a 1205 sigaddset(&pending->signal, sig);
c3ad2c3b
EB
1206
1207 /* Let multiprocess signals appear after on-going forks */
1208 if (type > PIDTYPE_TGID) {
1209 struct multiprocess_signals *delayed;
1210 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1211 sigset_t *signal = &delayed->signal;
1212 /* Can't queue both a stop and a continue signal */
1213 if (sig == SIGCONT)
1214 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1215 else if (sig_kernel_stop(sig))
1216 sigdelset(signal, SIGCONT);
1217 sigaddset(signal, sig);
1218 }
1219 }
1220
07296149 1221 complete_signal(sig, t, type);
6c303d3a 1222ret:
5a883cee 1223 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
6c303d3a 1224 return ret;
1da177e4
LT
1225}
1226
7a0cf094
EB
1227static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1228{
1229 bool ret = false;
1230 switch (siginfo_layout(info->si_signo, info->si_code)) {
1231 case SIL_KILL:
1232 case SIL_CHLD:
1233 case SIL_RT:
1234 ret = true;
1235 break;
1236 case SIL_TIMER:
1237 case SIL_POLL:
1238 case SIL_FAULT:
9abcabe3 1239 case SIL_FAULT_TRAPNO:
7a0cf094
EB
1240 case SIL_FAULT_MCEERR:
1241 case SIL_FAULT_BNDERR:
1242 case SIL_FAULT_PKUERR:
fb6cc127 1243 case SIL_PERF_EVENT:
7a0cf094
EB
1244 case SIL_SYS:
1245 ret = false;
1246 break;
1247 }
1248 return ret;
1249}
1250
ae7795bc 1251static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
b213984b 1252 enum pid_type type)
7978b567 1253{
8ad23dea
EB
1254 /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */
1255 bool force = false;
921cf9f6 1256
8ad23dea
EB
1257 if (info == SEND_SIG_NOINFO) {
1258 /* Force if sent from an ancestor pid namespace */
1259 force = !task_pid_nr_ns(current, task_active_pid_ns(t));
1260 } else if (info == SEND_SIG_PRIV) {
1261 /* Don't ignore kernel generated signals */
1262 force = true;
1263 } else if (has_si_pid_and_uid(info)) {
1264 /* SIGKILL and SIGSTOP is special or has ids */
7a0cf094
EB
1265 struct user_namespace *t_user_ns;
1266
1267 rcu_read_lock();
1268 t_user_ns = task_cred_xxx(t, user_ns);
1269 if (current_user_ns() != t_user_ns) {
1270 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1271 info->si_uid = from_kuid_munged(t_user_ns, uid);
1272 }
1273 rcu_read_unlock();
921cf9f6 1274
8ad23dea
EB
1275 /* A kernel generated signal? */
1276 force = (info->si_code == SI_KERNEL);
1277
1278 /* From an ancestor pid namespace? */
1279 if (!task_pid_nr_ns(current, task_active_pid_ns(t))) {
7a0cf094 1280 info->si_pid = 0;
8ad23dea
EB
1281 force = true;
1282 }
7a0cf094 1283 }
8ad23dea 1284 return __send_signal(sig, info, t, type, force);
7978b567
SB
1285}
1286
4aaefee5 1287static void print_fatal_signal(int signr)
45807a1d 1288{
4aaefee5 1289 struct pt_regs *regs = signal_pt_regs();
747800ef 1290 pr_info("potentially unexpected fatal signal %d.\n", signr);
45807a1d 1291
ca5cd877 1292#if defined(__i386__) && !defined(__arch_um__)
747800ef 1293 pr_info("code at %08lx: ", regs->ip);
45807a1d
IM
1294 {
1295 int i;
1296 for (i = 0; i < 16; i++) {
1297 unsigned char insn;
1298
b45c6e76
AK
1299 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1300 break;
747800ef 1301 pr_cont("%02x ", insn);
45807a1d
IM
1302 }
1303 }
747800ef 1304 pr_cont("\n");
45807a1d 1305#endif
3a9f84d3 1306 preempt_disable();
45807a1d 1307 show_regs(regs);
3a9f84d3 1308 preempt_enable();
45807a1d
IM
1309}
1310
1311static int __init setup_print_fatal_signals(char *str)
1312{
1313 get_option (&str, &print_fatal_signals);
1314
1315 return 1;
1316}
1317
1318__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1319
4cd4b6d4 1320int
ae7795bc 1321__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
4cd4b6d4 1322{
b213984b 1323 return send_signal(sig, info, p, PIDTYPE_TGID);
4cd4b6d4
PE
1324}
1325
ae7795bc 1326int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
40b3b025 1327 enum pid_type type)
4a30debf
ON
1328{
1329 unsigned long flags;
1330 int ret = -ESRCH;
1331
1332 if (lock_task_sighand(p, &flags)) {
b213984b 1333 ret = send_signal(sig, info, p, type);
4a30debf
ON
1334 unlock_task_sighand(p, &flags);
1335 }
1336
1337 return ret;
1338}
1339
1da177e4
LT
1340/*
1341 * Force a signal that the process can't ignore: if necessary
1342 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1343 *
1344 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1345 * since we do not want to have a signal handler that was blocked
1346 * be invoked when user space had explicitly blocked it.
1347 *
80fe728d
ON
1348 * We don't want to have recursive SIGSEGV's etc, for example,
1349 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1350 */
59c0e696
EB
1351static int
1352force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t)
1da177e4
LT
1353{
1354 unsigned long int flags;
ae74c3b6
LT
1355 int ret, blocked, ignored;
1356 struct k_sigaction *action;
59c0e696 1357 int sig = info->si_signo;
1da177e4
LT
1358
1359 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1360 action = &t->sighand->action[sig-1];
1361 ignored = action->sa.sa_handler == SIG_IGN;
1362 blocked = sigismember(&t->blocked, sig);
1363 if (blocked || ignored) {
1364 action->sa.sa_handler = SIG_DFL;
1365 if (blocked) {
1366 sigdelset(&t->blocked, sig);
7bb44ade 1367 recalc_sigpending_and_wake(t);
ae74c3b6 1368 }
1da177e4 1369 }
eb61b591
JI
1370 /*
1371 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1372 * debugging to leave init killable.
1373 */
1374 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
80fe728d 1375 t->signal->flags &= ~SIGNAL_UNKILLABLE;
b21c5bd5 1376 ret = send_signal(sig, info, t, PIDTYPE_PID);
1da177e4
LT
1377 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1378
1379 return ret;
1380}
1381
a89e9b8a 1382int force_sig_info(struct kernel_siginfo *info)
59c0e696 1383{
a89e9b8a 1384 return force_sig_info_to_task(info, current);
59c0e696
EB
1385}
1386
1da177e4
LT
1387/*
1388 * Nuke all other threads in the group.
1389 */
09faef11 1390int zap_other_threads(struct task_struct *p)
1da177e4 1391{
09faef11
ON
1392 struct task_struct *t = p;
1393 int count = 0;
1da177e4 1394
1da177e4
LT
1395 p->signal->group_stop_count = 0;
1396
09faef11 1397 while_each_thread(p, t) {
6dfca329 1398 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
09faef11
ON
1399 count++;
1400
1401 /* Don't bother with already dead threads */
1da177e4
LT
1402 if (t->exit_state)
1403 continue;
1da177e4 1404 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1405 signal_wake_up(t, 1);
1406 }
09faef11
ON
1407
1408 return count;
1da177e4
LT
1409}
1410
b8ed374e
NK
1411struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1412 unsigned long *flags)
f63ee72e
ON
1413{
1414 struct sighand_struct *sighand;
1415
59dc6f3c 1416 rcu_read_lock();
f63ee72e
ON
1417 for (;;) {
1418 sighand = rcu_dereference(tsk->sighand);
59dc6f3c 1419 if (unlikely(sighand == NULL))
f63ee72e 1420 break;
59dc6f3c 1421
392809b2
ON
1422 /*
1423 * This sighand can be already freed and even reused, but
5f0d5a3a 1424 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
392809b2
ON
1425 * initializes ->siglock: this slab can't go away, it has
1426 * the same object type, ->siglock can't be reinitialized.
1427 *
1428 * We need to ensure that tsk->sighand is still the same
1429 * after we take the lock, we can race with de_thread() or
1430 * __exit_signal(). In the latter case the next iteration
1431 * must see ->sighand == NULL.
1432 */
59dc6f3c 1433 spin_lock_irqsave(&sighand->siglock, *flags);
913292c9 1434 if (likely(sighand == rcu_access_pointer(tsk->sighand)))
f63ee72e 1435 break;
59dc6f3c 1436 spin_unlock_irqrestore(&sighand->siglock, *flags);
f63ee72e 1437 }
59dc6f3c 1438 rcu_read_unlock();
f63ee72e
ON
1439
1440 return sighand;
1441}
1442
c69e8d9c
DH
1443/*
1444 * send signal info to all the members of a group
c69e8d9c 1445 */
ae7795bc
EB
1446int group_send_sig_info(int sig, struct kernel_siginfo *info,
1447 struct task_struct *p, enum pid_type type)
1da177e4 1448{
694f690d
DH
1449 int ret;
1450
1451 rcu_read_lock();
1452 ret = check_kill_permission(sig, info, p);
1453 rcu_read_unlock();
f63ee72e 1454
4a30debf 1455 if (!ret && sig)
40b3b025 1456 ret = do_send_sig_info(sig, info, p, type);
1da177e4
LT
1457
1458 return ret;
1459}
1460
1461/*
146a505d 1462 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1463 * control characters do (^C, ^Z etc)
c69e8d9c 1464 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1465 */
ae7795bc 1466int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1da177e4
LT
1467{
1468 struct task_struct *p = NULL;
1469 int retval, success;
1470
1da177e4
LT
1471 success = 0;
1472 retval = -ESRCH;
c4b92fc1 1473 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
01024980 1474 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1da177e4
LT
1475 success |= !err;
1476 retval = err;
c4b92fc1 1477 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1478 return success ? 0 : retval;
1479}
1480
ae7795bc 1481int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1da177e4 1482{
d36174bc 1483 int error = -ESRCH;
1da177e4
LT
1484 struct task_struct *p;
1485
eca1a089
PM
1486 for (;;) {
1487 rcu_read_lock();
1488 p = pid_task(pid, PIDTYPE_PID);
1489 if (p)
01024980 1490 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
eca1a089
PM
1491 rcu_read_unlock();
1492 if (likely(!p || error != -ESRCH))
1493 return error;
6ca25b55 1494
eca1a089
PM
1495 /*
1496 * The task was unhashed in between, try again. If it
1497 * is dead, pid_task() will return NULL, if we race with
1498 * de_thread() it will find the new leader.
1499 */
1500 }
1da177e4
LT
1501}
1502
ae7795bc 1503static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
c4b92fc1
EB
1504{
1505 int error;
1506 rcu_read_lock();
b488893a 1507 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1508 rcu_read_unlock();
1509 return error;
1510}
1511
bb17fcca
CB
1512static inline bool kill_as_cred_perm(const struct cred *cred,
1513 struct task_struct *target)
d178bc3a
SH
1514{
1515 const struct cred *pcred = __task_cred(target);
bb17fcca
CB
1516
1517 return uid_eq(cred->euid, pcred->suid) ||
1518 uid_eq(cred->euid, pcred->uid) ||
1519 uid_eq(cred->uid, pcred->suid) ||
1520 uid_eq(cred->uid, pcred->uid);
d178bc3a
SH
1521}
1522
70f1b0d3
EB
1523/*
1524 * The usb asyncio usage of siginfo is wrong. The glibc support
1525 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1526 * AKA after the generic fields:
1527 * kernel_pid_t si_pid;
1528 * kernel_uid32_t si_uid;
1529 * sigval_t si_value;
1530 *
1531 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1532 * after the generic fields is:
1533 * void __user *si_addr;
1534 *
1535 * This is a practical problem when there is a 64bit big endian kernel
1536 * and a 32bit userspace. As the 32bit address will encoded in the low
1537 * 32bits of the pointer. Those low 32bits will be stored at higher
1538 * address than appear in a 32 bit pointer. So userspace will not
1539 * see the address it was expecting for it's completions.
1540 *
1541 * There is nothing in the encoding that can allow
1542 * copy_siginfo_to_user32 to detect this confusion of formats, so
1543 * handle this by requiring the caller of kill_pid_usb_asyncio to
1544 * notice when this situration takes place and to store the 32bit
1545 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1546 * parameter.
1547 */
1548int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1549 struct pid *pid, const struct cred *cred)
46113830 1550{
70f1b0d3 1551 struct kernel_siginfo info;
46113830 1552 struct task_struct *p;
14d8c9f3 1553 unsigned long flags;
70f1b0d3
EB
1554 int ret = -EINVAL;
1555
eaec2b0b
ZL
1556 if (!valid_signal(sig))
1557 return ret;
1558
70f1b0d3
EB
1559 clear_siginfo(&info);
1560 info.si_signo = sig;
1561 info.si_errno = errno;
1562 info.si_code = SI_ASYNCIO;
1563 *((sigval_t *)&info.si_pid) = addr;
46113830 1564
14d8c9f3 1565 rcu_read_lock();
2425c08b 1566 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1567 if (!p) {
1568 ret = -ESRCH;
1569 goto out_unlock;
1570 }
70f1b0d3 1571 if (!kill_as_cred_perm(cred, p)) {
46113830
HW
1572 ret = -EPERM;
1573 goto out_unlock;
1574 }
70f1b0d3 1575 ret = security_task_kill(p, &info, sig, cred);
8f95dc58
DQ
1576 if (ret)
1577 goto out_unlock;
14d8c9f3
TG
1578
1579 if (sig) {
1580 if (lock_task_sighand(p, &flags)) {
8ad23dea 1581 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false);
14d8c9f3
TG
1582 unlock_task_sighand(p, &flags);
1583 } else
1584 ret = -ESRCH;
46113830
HW
1585 }
1586out_unlock:
14d8c9f3 1587 rcu_read_unlock();
46113830
HW
1588 return ret;
1589}
70f1b0d3 1590EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1da177e4
LT
1591
1592/*
1593 * kill_something_info() interprets pid in interesting ways just like kill(2).
1594 *
1595 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1596 * is probably wrong. Should make it like BSD or SYSV.
1597 */
1598
ae7795bc 1599static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1da177e4 1600{
8d42db18 1601 int ret;
d5df763b 1602
3075afdf
ZL
1603 if (pid > 0)
1604 return kill_proc_info(sig, info, pid);
d5df763b 1605
4ea77014 1606 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1607 if (pid == INT_MIN)
1608 return -ESRCH;
1609
d5df763b
PE
1610 read_lock(&tasklist_lock);
1611 if (pid != -1) {
1612 ret = __kill_pgrp_info(sig, info,
1613 pid ? find_vpid(-pid) : task_pgrp(current));
1614 } else {
1da177e4
LT
1615 int retval = 0, count = 0;
1616 struct task_struct * p;
1617
1da177e4 1618 for_each_process(p) {
d25141a8
SB
1619 if (task_pid_vnr(p) > 1 &&
1620 !same_thread_group(p, current)) {
01024980
EB
1621 int err = group_send_sig_info(sig, info, p,
1622 PIDTYPE_MAX);
1da177e4
LT
1623 ++count;
1624 if (err != -EPERM)
1625 retval = err;
1626 }
1627 }
8d42db18 1628 ret = count ? retval : -ESRCH;
1da177e4 1629 }
d5df763b
PE
1630 read_unlock(&tasklist_lock);
1631
8d42db18 1632 return ret;
1da177e4
LT
1633}
1634
1635/*
1636 * These are for backward compatibility with the rest of the kernel source.
1637 */
1638
ae7795bc 1639int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1da177e4 1640{
1da177e4
LT
1641 /*
1642 * Make sure legacy kernel users don't send in bad values
1643 * (normal paths check this in check_kill_permission).
1644 */
7ed20e1a 1645 if (!valid_signal(sig))
1da177e4
LT
1646 return -EINVAL;
1647
40b3b025 1648 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1da177e4 1649}
fb50f5a4 1650EXPORT_SYMBOL(send_sig_info);
1da177e4 1651
b67a1b9e
ON
1652#define __si_special(priv) \
1653 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1654
1da177e4
LT
1655int
1656send_sig(int sig, struct task_struct *p, int priv)
1657{
b67a1b9e 1658 return send_sig_info(sig, __si_special(priv), p);
1da177e4 1659}
fb50f5a4 1660EXPORT_SYMBOL(send_sig);
1da177e4 1661
3cf5d076 1662void force_sig(int sig)
1da177e4 1663{
ffafd23b
EB
1664 struct kernel_siginfo info;
1665
1666 clear_siginfo(&info);
1667 info.si_signo = sig;
1668 info.si_errno = 0;
1669 info.si_code = SI_KERNEL;
1670 info.si_pid = 0;
1671 info.si_uid = 0;
a89e9b8a 1672 force_sig_info(&info);
1da177e4 1673}
fb50f5a4 1674EXPORT_SYMBOL(force_sig);
1da177e4
LT
1675
1676/*
1677 * When things go south during signal handling, we
1678 * will force a SIGSEGV. And if the signal that caused
1679 * the problem was already a SIGSEGV, we'll want to
1680 * make sure we don't even try to deliver the signal..
1681 */
cb44c9a0 1682void force_sigsegv(int sig)
1da177e4 1683{
cb44c9a0
EB
1684 struct task_struct *p = current;
1685
1da177e4
LT
1686 if (sig == SIGSEGV) {
1687 unsigned long flags;
1688 spin_lock_irqsave(&p->sighand->siglock, flags);
1689 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1690 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1691 }
3cf5d076 1692 force_sig(SIGSEGV);
1da177e4
LT
1693}
1694
91ca180d 1695int force_sig_fault_to_task(int sig, int code, void __user *addr
f8ec6601
EB
1696 ___ARCH_SI_TRAPNO(int trapno)
1697 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1698 , struct task_struct *t)
1699{
ae7795bc 1700 struct kernel_siginfo info;
f8ec6601
EB
1701
1702 clear_siginfo(&info);
1703 info.si_signo = sig;
1704 info.si_errno = 0;
1705 info.si_code = code;
1706 info.si_addr = addr;
1707#ifdef __ARCH_SI_TRAPNO
1708 info.si_trapno = trapno;
1709#endif
1710#ifdef __ia64__
1711 info.si_imm = imm;
1712 info.si_flags = flags;
1713 info.si_isr = isr;
1714#endif
59c0e696 1715 return force_sig_info_to_task(&info, t);
f8ec6601
EB
1716}
1717
91ca180d
EB
1718int force_sig_fault(int sig, int code, void __user *addr
1719 ___ARCH_SI_TRAPNO(int trapno)
2e1661d2 1720 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr))
91ca180d
EB
1721{
1722 return force_sig_fault_to_task(sig, code, addr
1723 ___ARCH_SI_TRAPNO(trapno)
2e1661d2 1724 ___ARCH_SI_IA64(imm, flags, isr), current);
f8ec6601
EB
1725}
1726
1727int send_sig_fault(int sig, int code, void __user *addr
1728 ___ARCH_SI_TRAPNO(int trapno)
1729 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1730 , struct task_struct *t)
1731{
ae7795bc 1732 struct kernel_siginfo info;
f8ec6601
EB
1733
1734 clear_siginfo(&info);
1735 info.si_signo = sig;
1736 info.si_errno = 0;
1737 info.si_code = code;
1738 info.si_addr = addr;
1739#ifdef __ARCH_SI_TRAPNO
1740 info.si_trapno = trapno;
1741#endif
1742#ifdef __ia64__
1743 info.si_imm = imm;
1744 info.si_flags = flags;
1745 info.si_isr = isr;
1746#endif
1747 return send_sig_info(info.si_signo, &info, t);
1748}
1749
f8eac901 1750int force_sig_mceerr(int code, void __user *addr, short lsb)
38246735 1751{
ae7795bc 1752 struct kernel_siginfo info;
38246735
EB
1753
1754 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1755 clear_siginfo(&info);
1756 info.si_signo = SIGBUS;
1757 info.si_errno = 0;
1758 info.si_code = code;
1759 info.si_addr = addr;
1760 info.si_addr_lsb = lsb;
a89e9b8a 1761 return force_sig_info(&info);
38246735
EB
1762}
1763
1764int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1765{
ae7795bc 1766 struct kernel_siginfo info;
38246735
EB
1767
1768 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1769 clear_siginfo(&info);
1770 info.si_signo = SIGBUS;
1771 info.si_errno = 0;
1772 info.si_code = code;
1773 info.si_addr = addr;
1774 info.si_addr_lsb = lsb;
1775 return send_sig_info(info.si_signo, &info, t);
1776}
1777EXPORT_SYMBOL(send_sig_mceerr);
38246735 1778
38246735
EB
1779int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1780{
ae7795bc 1781 struct kernel_siginfo info;
38246735
EB
1782
1783 clear_siginfo(&info);
1784 info.si_signo = SIGSEGV;
1785 info.si_errno = 0;
1786 info.si_code = SEGV_BNDERR;
1787 info.si_addr = addr;
1788 info.si_lower = lower;
1789 info.si_upper = upper;
a89e9b8a 1790 return force_sig_info(&info);
38246735 1791}
38246735
EB
1792
1793#ifdef SEGV_PKUERR
1794int force_sig_pkuerr(void __user *addr, u32 pkey)
1795{
ae7795bc 1796 struct kernel_siginfo info;
38246735
EB
1797
1798 clear_siginfo(&info);
1799 info.si_signo = SIGSEGV;
1800 info.si_errno = 0;
1801 info.si_code = SEGV_PKUERR;
1802 info.si_addr = addr;
1803 info.si_pkey = pkey;
a89e9b8a 1804 return force_sig_info(&info);
38246735
EB
1805}
1806#endif
f8ec6601 1807
af5eeab7
EB
1808int force_sig_perf(void __user *addr, u32 type, u64 sig_data)
1809{
1810 struct kernel_siginfo info;
1811
1812 clear_siginfo(&info);
0683b531
EB
1813 info.si_signo = SIGTRAP;
1814 info.si_errno = 0;
1815 info.si_code = TRAP_PERF;
1816 info.si_addr = addr;
1817 info.si_perf_data = sig_data;
1818 info.si_perf_type = type;
1819
af5eeab7
EB
1820 return force_sig_info(&info);
1821}
1822
f71dd7dc
EB
1823/* For the crazy architectures that include trap information in
1824 * the errno field, instead of an actual errno value.
1825 */
1826int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1827{
ae7795bc 1828 struct kernel_siginfo info;
f71dd7dc
EB
1829
1830 clear_siginfo(&info);
1831 info.si_signo = SIGTRAP;
1832 info.si_errno = errno;
1833 info.si_code = TRAP_HWBKPT;
1834 info.si_addr = addr;
a89e9b8a 1835 return force_sig_info(&info);
f71dd7dc
EB
1836}
1837
c4b92fc1
EB
1838int kill_pgrp(struct pid *pid, int sig, int priv)
1839{
146a505d
PE
1840 int ret;
1841
1842 read_lock(&tasklist_lock);
1843 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1844 read_unlock(&tasklist_lock);
1845
1846 return ret;
c4b92fc1
EB
1847}
1848EXPORT_SYMBOL(kill_pgrp);
1849
1850int kill_pid(struct pid *pid, int sig, int priv)
1851{
1852 return kill_pid_info(sig, __si_special(priv), pid);
1853}
1854EXPORT_SYMBOL(kill_pid);
1855
1da177e4
LT
1856/*
1857 * These functions support sending signals using preallocated sigqueue
1858 * structures. This is needed "because realtime applications cannot
1859 * afford to lose notifications of asynchronous events, like timer
5aba085e 1860 * expirations or I/O completions". In the case of POSIX Timers
1da177e4
LT
1861 * we allocate the sigqueue structure from the timer_create. If this
1862 * allocation fails we are able to report the failure to the application
1863 * with an EAGAIN error.
1864 */
1da177e4
LT
1865struct sigqueue *sigqueue_alloc(void)
1866{
69995ebb 1867 return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC);
1da177e4
LT
1868}
1869
1870void sigqueue_free(struct sigqueue *q)
1871{
1872 unsigned long flags;
60187d27
ON
1873 spinlock_t *lock = &current->sighand->siglock;
1874
1da177e4
LT
1875 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1876 /*
c8e85b4f
ON
1877 * We must hold ->siglock while testing q->list
1878 * to serialize with collect_signal() or with
da7978b0 1879 * __exit_signal()->flush_sigqueue().
1da177e4 1880 */
60187d27 1881 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1882 q->flags &= ~SIGQUEUE_PREALLOC;
1883 /*
1884 * If it is queued it will be freed when dequeued,
1885 * like the "regular" sigqueue.
1886 */
60187d27 1887 if (!list_empty(&q->list))
c8e85b4f 1888 q = NULL;
60187d27
ON
1889 spin_unlock_irqrestore(lock, flags);
1890
c8e85b4f
ON
1891 if (q)
1892 __sigqueue_free(q);
1da177e4
LT
1893}
1894
24122c7f 1895int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
9e3bd6c3 1896{
e62e6650 1897 int sig = q->info.si_signo;
2ca3515a 1898 struct sigpending *pending;
24122c7f 1899 struct task_struct *t;
e62e6650 1900 unsigned long flags;
163566f6 1901 int ret, result;
2ca3515a 1902
4cd4b6d4 1903 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1904
1905 ret = -1;
24122c7f
EB
1906 rcu_read_lock();
1907 t = pid_task(pid, type);
1908 if (!t || !likely(lock_task_sighand(t, &flags)))
e62e6650
ON
1909 goto ret;
1910
7e695a5e 1911 ret = 1; /* the signal is ignored */
163566f6 1912 result = TRACE_SIGNAL_IGNORED;
def8cf72 1913 if (!prepare_signal(sig, t, false))
e62e6650
ON
1914 goto out;
1915
1916 ret = 0;
9e3bd6c3
PE
1917 if (unlikely(!list_empty(&q->list))) {
1918 /*
1919 * If an SI_TIMER entry is already queue just increment
1920 * the overrun count.
1921 */
9e3bd6c3
PE
1922 BUG_ON(q->info.si_code != SI_TIMER);
1923 q->info.si_overrun++;
163566f6 1924 result = TRACE_SIGNAL_ALREADY_PENDING;
e62e6650 1925 goto out;
9e3bd6c3 1926 }
ba661292 1927 q->info.si_overrun = 0;
9e3bd6c3 1928
9e3bd6c3 1929 signalfd_notify(t, sig);
24122c7f 1930 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1931 list_add_tail(&q->list, &pending->list);
1932 sigaddset(&pending->signal, sig);
07296149 1933 complete_signal(sig, t, type);
163566f6 1934 result = TRACE_SIGNAL_DELIVERED;
e62e6650 1935out:
24122c7f 1936 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
e62e6650
ON
1937 unlock_task_sighand(t, &flags);
1938ret:
24122c7f 1939 rcu_read_unlock();
e62e6650 1940 return ret;
9e3bd6c3
PE
1941}
1942
b53b0b9d
JFG
1943static void do_notify_pidfd(struct task_struct *task)
1944{
1945 struct pid *pid;
1946
1caf7d50 1947 WARN_ON(task->exit_state == 0);
b53b0b9d
JFG
1948 pid = task_pid(task);
1949 wake_up_all(&pid->wait_pidfd);
1950}
1951
1da177e4
LT
1952/*
1953 * Let a parent know about the death of a child.
1954 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6 1955 *
53c8f9f1
ON
1956 * Returns true if our parent ignored us and so we've switched to
1957 * self-reaping.
1da177e4 1958 */
53c8f9f1 1959bool do_notify_parent(struct task_struct *tsk, int sig)
1da177e4 1960{
ae7795bc 1961 struct kernel_siginfo info;
1da177e4
LT
1962 unsigned long flags;
1963 struct sighand_struct *psig;
53c8f9f1 1964 bool autoreap = false;
bde8285e 1965 u64 utime, stime;
1da177e4
LT
1966
1967 BUG_ON(sig == -1);
1968
1969 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1970 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 1971
d21142ec 1972 BUG_ON(!tsk->ptrace &&
1da177e4
LT
1973 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1974
b53b0b9d
JFG
1975 /* Wake up all pidfd waiters */
1976 do_notify_pidfd(tsk);
1977
b6e238dc
ON
1978 if (sig != SIGCHLD) {
1979 /*
1980 * This is only possible if parent == real_parent.
1981 * Check if it has changed security domain.
1982 */
d1e7fd64 1983 if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id))
b6e238dc
ON
1984 sig = SIGCHLD;
1985 }
1986
faf1f22b 1987 clear_siginfo(&info);
1da177e4
LT
1988 info.si_signo = sig;
1989 info.si_errno = 0;
b488893a 1990 /*
32084504
EB
1991 * We are under tasklist_lock here so our parent is tied to
1992 * us and cannot change.
b488893a 1993 *
32084504
EB
1994 * task_active_pid_ns will always return the same pid namespace
1995 * until a task passes through release_task.
b488893a
PE
1996 *
1997 * write_lock() currently calls preempt_disable() which is the
1998 * same as rcu_read_lock(), but according to Oleg, this is not
1999 * correct to rely on this
2000 */
2001 rcu_read_lock();
32084504 2002 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
54ba47ed
EB
2003 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
2004 task_uid(tsk));
b488893a
PE
2005 rcu_read_unlock();
2006
bde8285e
FW
2007 task_cputime(tsk, &utime, &stime);
2008 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
2009 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1da177e4
LT
2010
2011 info.si_status = tsk->exit_code & 0x7f;
2012 if (tsk->exit_code & 0x80)
2013 info.si_code = CLD_DUMPED;
2014 else if (tsk->exit_code & 0x7f)
2015 info.si_code = CLD_KILLED;
2016 else {
2017 info.si_code = CLD_EXITED;
2018 info.si_status = tsk->exit_code >> 8;
2019 }
2020
2021 psig = tsk->parent->sighand;
2022 spin_lock_irqsave(&psig->siglock, flags);
d21142ec 2023 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
2024 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
2025 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
2026 /*
2027 * We are exiting and our parent doesn't care. POSIX.1
2028 * defines special semantics for setting SIGCHLD to SIG_IGN
2029 * or setting the SA_NOCLDWAIT flag: we should be reaped
2030 * automatically and not left for our parent's wait4 call.
2031 * Rather than having the parent do it as a magic kind of
2032 * signal handler, we just set this to tell do_exit that we
2033 * can be cleaned up without becoming a zombie. Note that
2034 * we still call __wake_up_parent in this case, because a
2035 * blocked sys_wait4 might now return -ECHILD.
2036 *
2037 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
2038 * is implementation-defined: we do (if you don't want
2039 * it, just use SIG_IGN instead).
2040 */
53c8f9f1 2041 autoreap = true;
1da177e4 2042 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
53c8f9f1 2043 sig = 0;
1da177e4 2044 }
61e713bd
EB
2045 /*
2046 * Send with __send_signal as si_pid and si_uid are in the
2047 * parent's namespaces.
2048 */
53c8f9f1 2049 if (valid_signal(sig) && sig)
61e713bd 2050 __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false);
1da177e4
LT
2051 __wake_up_parent(tsk, tsk->parent);
2052 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 2053
53c8f9f1 2054 return autoreap;
1da177e4
LT
2055}
2056
75b95953
TH
2057/**
2058 * do_notify_parent_cldstop - notify parent of stopped/continued state change
2059 * @tsk: task reporting the state change
2060 * @for_ptracer: the notification is for ptracer
2061 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
2062 *
2063 * Notify @tsk's parent that the stopped/continued state has changed. If
2064 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
2065 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
2066 *
2067 * CONTEXT:
2068 * Must be called with tasklist_lock at least read locked.
2069 */
2070static void do_notify_parent_cldstop(struct task_struct *tsk,
2071 bool for_ptracer, int why)
1da177e4 2072{
ae7795bc 2073 struct kernel_siginfo info;
1da177e4 2074 unsigned long flags;
bc505a47 2075 struct task_struct *parent;
1da177e4 2076 struct sighand_struct *sighand;
bde8285e 2077 u64 utime, stime;
1da177e4 2078
75b95953 2079 if (for_ptracer) {
bc505a47 2080 parent = tsk->parent;
75b95953 2081 } else {
bc505a47
ON
2082 tsk = tsk->group_leader;
2083 parent = tsk->real_parent;
2084 }
2085
faf1f22b 2086 clear_siginfo(&info);
1da177e4
LT
2087 info.si_signo = SIGCHLD;
2088 info.si_errno = 0;
b488893a 2089 /*
5aba085e 2090 * see comment in do_notify_parent() about the following 4 lines
b488893a
PE
2091 */
2092 rcu_read_lock();
17cf22c3 2093 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
54ba47ed 2094 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
b488893a
PE
2095 rcu_read_unlock();
2096
bde8285e
FW
2097 task_cputime(tsk, &utime, &stime);
2098 info.si_utime = nsec_to_clock_t(utime);
2099 info.si_stime = nsec_to_clock_t(stime);
1da177e4
LT
2100
2101 info.si_code = why;
2102 switch (why) {
2103 case CLD_CONTINUED:
2104 info.si_status = SIGCONT;
2105 break;
2106 case CLD_STOPPED:
2107 info.si_status = tsk->signal->group_exit_code & 0x7f;
2108 break;
2109 case CLD_TRAPPED:
2110 info.si_status = tsk->exit_code & 0x7f;
2111 break;
2112 default:
2113 BUG();
2114 }
2115
2116 sighand = parent->sighand;
2117 spin_lock_irqsave(&sighand->siglock, flags);
2118 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2119 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2120 __group_send_sig_info(SIGCHLD, &info, parent);
2121 /*
2122 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2123 */
2124 __wake_up_parent(tsk, parent);
2125 spin_unlock_irqrestore(&sighand->siglock, flags);
2126}
2127
6527de95 2128static inline bool may_ptrace_stop(void)
d5f70c00 2129{
d21142ec 2130 if (!likely(current->ptrace))
6527de95 2131 return false;
d5f70c00
ON
2132 /*
2133 * Are we in the middle of do_coredump?
2134 * If so and our tracer is also part of the coredump stopping
2135 * is a deadlock situation, and pointless because our tracer
2136 * is dead so don't allow us to stop.
2137 * If SIGKILL was already sent before the caller unlocked
999d9fc1 2138 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00 2139 * is safe to enter schedule().
9899d11f
ON
2140 *
2141 * This is almost outdated, a task with the pending SIGKILL can't
2142 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2143 * after SIGKILL was already dequeued.
d5f70c00 2144 */
999d9fc1 2145 if (unlikely(current->mm->core_state) &&
d5f70c00 2146 unlikely(current->mm == current->parent->mm))
6527de95 2147 return false;
d5f70c00 2148
6527de95 2149 return true;
d5f70c00
ON
2150}
2151
1a669c2f 2152/*
5aba085e 2153 * Return non-zero if there is a SIGKILL that should be waking us up.
1a669c2f
RM
2154 * Called with the siglock held.
2155 */
f99e9d8c 2156static bool sigkill_pending(struct task_struct *tsk)
1a669c2f 2157{
f99e9d8c
CB
2158 return sigismember(&tsk->pending.signal, SIGKILL) ||
2159 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
2160}
2161
1da177e4
LT
2162/*
2163 * This must be called with current->sighand->siglock held.
2164 *
2165 * This should be the path for all ptrace stops.
2166 * We always set current->last_siginfo while stopped here.
2167 * That makes it a way to test a stopped process for
2168 * being ptrace-stopped vs being job-control-stopped.
2169 *
20686a30
ON
2170 * If we actually decide not to stop at all because the tracer
2171 * is gone, we keep current->exit_code unless clear_code.
1da177e4 2172 */
ae7795bc 2173static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
b8401150
NK
2174 __releases(&current->sighand->siglock)
2175 __acquires(&current->sighand->siglock)
1da177e4 2176{
ceb6bd67
TH
2177 bool gstop_done = false;
2178
1a669c2f
RM
2179 if (arch_ptrace_stop_needed(exit_code, info)) {
2180 /*
2181 * The arch code has something special to do before a
2182 * ptrace stop. This is allowed to block, e.g. for faults
2183 * on user stack pages. We can't keep the siglock while
2184 * calling arch_ptrace_stop, so we must release it now.
2185 * To preserve proper semantics, we must do this before
2186 * any signal bookkeeping like checking group_stop_count.
2187 * Meanwhile, a SIGKILL could come in before we retake the
2188 * siglock. That must prevent us from sleeping in TASK_TRACED.
2189 * So after regaining the lock, we must check for SIGKILL.
2190 */
2191 spin_unlock_irq(&current->sighand->siglock);
2192 arch_ptrace_stop(exit_code, info);
2193 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
2194 if (sigkill_pending(current))
2195 return;
1a669c2f
RM
2196 }
2197
b5bf9a90
PZ
2198 set_special_state(TASK_TRACED);
2199
1da177e4 2200 /*
81be24b8
TH
2201 * We're committing to trapping. TRACED should be visible before
2202 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2203 * Also, transition to TRACED and updates to ->jobctl should be
2204 * atomic with respect to siglock and should be done after the arch
2205 * hook as siglock is released and regrabbed across it.
b5bf9a90
PZ
2206 *
2207 * TRACER TRACEE
2208 *
2209 * ptrace_attach()
2210 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2211 * do_wait()
2212 * set_current_state() smp_wmb();
2213 * ptrace_do_wait()
2214 * wait_task_stopped()
2215 * task_stopped_code()
2216 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1da177e4 2217 */
b5bf9a90 2218 smp_wmb();
1da177e4
LT
2219
2220 current->last_siginfo = info;
2221 current->exit_code = exit_code;
2222
d79fdd6d 2223 /*
0ae8ce1c
TH
2224 * If @why is CLD_STOPPED, we're trapping to participate in a group
2225 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
73ddff2b
TH
2226 * across siglock relocks since INTERRUPT was scheduled, PENDING
2227 * could be clear now. We act as if SIGCONT is received after
2228 * TASK_TRACED is entered - ignore it.
d79fdd6d 2229 */
a8f072c1 2230 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
ceb6bd67 2231 gstop_done = task_participate_group_stop(current);
d79fdd6d 2232
fb1d910c 2233 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
73ddff2b 2234 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
fb1d910c
TH
2235 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2236 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
73ddff2b 2237
81be24b8 2238 /* entering a trap, clear TRAPPING */
a8f072c1 2239 task_clear_jobctl_trapping(current);
d79fdd6d 2240
1da177e4
LT
2241 spin_unlock_irq(&current->sighand->siglock);
2242 read_lock(&tasklist_lock);
3d749b9e 2243 if (may_ptrace_stop()) {
ceb6bd67
TH
2244 /*
2245 * Notify parents of the stop.
2246 *
2247 * While ptraced, there are two parents - the ptracer and
2248 * the real_parent of the group_leader. The ptracer should
2249 * know about every stop while the real parent is only
2250 * interested in the completion of group stop. The states
2251 * for the two don't interact with each other. Notify
2252 * separately unless they're gonna be duplicates.
2253 */
2254 do_notify_parent_cldstop(current, true, why);
bb3696da 2255 if (gstop_done && ptrace_reparented(current))
ceb6bd67
TH
2256 do_notify_parent_cldstop(current, false, why);
2257
53da1d94
MS
2258 /*
2259 * Don't want to allow preemption here, because
2260 * sys_ptrace() needs this task to be inactive.
2261 *
2262 * XXX: implement read_unlock_no_resched().
2263 */
2264 preempt_disable();
1da177e4 2265 read_unlock(&tasklist_lock);
76f969e8 2266 cgroup_enter_frozen();
937c6b27 2267 preempt_enable_no_resched();
5d8f72b5 2268 freezable_schedule();
05b28926 2269 cgroup_leave_frozen(true);
1da177e4
LT
2270 } else {
2271 /*
2272 * By the time we got the lock, our tracer went away.
6405f7f4 2273 * Don't drop the lock yet, another tracer may come.
ceb6bd67
TH
2274 *
2275 * If @gstop_done, the ptracer went away between group stop
2276 * completion and here. During detach, it would have set
a8f072c1
TH
2277 * JOBCTL_STOP_PENDING on us and we'll re-enter
2278 * TASK_STOPPED in do_signal_stop() on return, so notifying
2279 * the real parent of the group stop completion is enough.
1da177e4 2280 */
ceb6bd67
TH
2281 if (gstop_done)
2282 do_notify_parent_cldstop(current, false, why);
2283
9899d11f 2284 /* tasklist protects us from ptrace_freeze_traced() */
6405f7f4 2285 __set_current_state(TASK_RUNNING);
20686a30
ON
2286 if (clear_code)
2287 current->exit_code = 0;
6405f7f4 2288 read_unlock(&tasklist_lock);
1da177e4
LT
2289 }
2290
2291 /*
2292 * We are back. Now reacquire the siglock before touching
2293 * last_siginfo, so that we are sure to have synchronized with
2294 * any signal-sending on another CPU that wants to examine it.
2295 */
2296 spin_lock_irq(&current->sighand->siglock);
2297 current->last_siginfo = NULL;
2298
544b2c91
TH
2299 /* LISTENING can be set only during STOP traps, clear it */
2300 current->jobctl &= ~JOBCTL_LISTENING;
2301
1da177e4
LT
2302 /*
2303 * Queued signals ignored us while we were stopped for tracing.
2304 * So check for any that we should take before resuming user mode.
b74d0deb 2305 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 2306 */
b74d0deb 2307 recalc_sigpending_tsk(current);
1da177e4
LT
2308}
2309
3544d72a 2310static void ptrace_do_notify(int signr, int exit_code, int why)
1da177e4 2311{
ae7795bc 2312 kernel_siginfo_t info;
1da177e4 2313
faf1f22b 2314 clear_siginfo(&info);
3544d72a 2315 info.si_signo = signr;
1da177e4 2316 info.si_code = exit_code;
b488893a 2317 info.si_pid = task_pid_vnr(current);
078de5f7 2318 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1da177e4
LT
2319
2320 /* Let the debugger run. */
3544d72a
TH
2321 ptrace_stop(exit_code, why, 1, &info);
2322}
2323
2324void ptrace_notify(int exit_code)
2325{
2326 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
f784e8a7
ON
2327 if (unlikely(current->task_works))
2328 task_work_run();
3544d72a 2329
1da177e4 2330 spin_lock_irq(&current->sighand->siglock);
3544d72a 2331 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1da177e4
LT
2332 spin_unlock_irq(&current->sighand->siglock);
2333}
2334
73ddff2b
TH
2335/**
2336 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2337 * @signr: signr causing group stop if initiating
2338 *
2339 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2340 * and participate in it. If already set, participate in the existing
2341 * group stop. If participated in a group stop (and thus slept), %true is
2342 * returned with siglock released.
2343 *
2344 * If ptraced, this function doesn't handle stop itself. Instead,
2345 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2346 * untouched. The caller must ensure that INTERRUPT trap handling takes
2347 * places afterwards.
2348 *
2349 * CONTEXT:
2350 * Must be called with @current->sighand->siglock held, which is released
2351 * on %true return.
2352 *
2353 * RETURNS:
2354 * %false if group stop is already cancelled or ptrace trap is scheduled.
2355 * %true if participated in group stop.
1da177e4 2356 */
73ddff2b
TH
2357static bool do_signal_stop(int signr)
2358 __releases(&current->sighand->siglock)
1da177e4
LT
2359{
2360 struct signal_struct *sig = current->signal;
1da177e4 2361
a8f072c1 2362 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
b76808e6 2363 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
f558b7e4
ON
2364 struct task_struct *t;
2365
a8f072c1
TH
2366 /* signr will be recorded in task->jobctl for retries */
2367 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
d79fdd6d 2368
a8f072c1 2369 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
573cf9ad 2370 unlikely(signal_group_exit(sig)))
73ddff2b 2371 return false;
1da177e4 2372 /*
408a37de
TH
2373 * There is no group stop already in progress. We must
2374 * initiate one now.
2375 *
2376 * While ptraced, a task may be resumed while group stop is
2377 * still in effect and then receive a stop signal and
2378 * initiate another group stop. This deviates from the
2379 * usual behavior as two consecutive stop signals can't
780006ea
ON
2380 * cause two group stops when !ptraced. That is why we
2381 * also check !task_is_stopped(t) below.
408a37de
TH
2382 *
2383 * The condition can be distinguished by testing whether
2384 * SIGNAL_STOP_STOPPED is already set. Don't generate
2385 * group_exit_code in such case.
2386 *
2387 * This is not necessary for SIGNAL_STOP_CONTINUED because
2388 * an intervening stop signal is required to cause two
2389 * continued events regardless of ptrace.
1da177e4 2390 */
408a37de
TH
2391 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2392 sig->group_exit_code = signr;
1da177e4 2393
7dd3db54
TH
2394 sig->group_stop_count = 0;
2395
2396 if (task_set_jobctl_pending(current, signr | gstop))
2397 sig->group_stop_count++;
1da177e4 2398
8d38f203
ON
2399 t = current;
2400 while_each_thread(current, t) {
1da177e4 2401 /*
a122b341
ON
2402 * Setting state to TASK_STOPPED for a group
2403 * stop is always done with the siglock held,
2404 * so this check has no races.
1da177e4 2405 */
7dd3db54
TH
2406 if (!task_is_stopped(t) &&
2407 task_set_jobctl_pending(t, signr | gstop)) {
ae6d2ed7 2408 sig->group_stop_count++;
fb1d910c
TH
2409 if (likely(!(t->ptrace & PT_SEIZED)))
2410 signal_wake_up(t, 0);
2411 else
2412 ptrace_trap_notify(t);
a122b341 2413 }
d79fdd6d 2414 }
1da177e4 2415 }
73ddff2b 2416
d21142ec 2417 if (likely(!current->ptrace)) {
5224fa36 2418 int notify = 0;
1da177e4 2419
5224fa36
TH
2420 /*
2421 * If there are no other threads in the group, or if there
2422 * is a group stop in progress and we are the last to stop,
2423 * report to the parent.
2424 */
2425 if (task_participate_group_stop(current))
2426 notify = CLD_STOPPED;
2427
b5bf9a90 2428 set_special_state(TASK_STOPPED);
5224fa36
TH
2429 spin_unlock_irq(&current->sighand->siglock);
2430
62bcf9d9
TH
2431 /*
2432 * Notify the parent of the group stop completion. Because
2433 * we're not holding either the siglock or tasklist_lock
2434 * here, ptracer may attach inbetween; however, this is for
2435 * group stop and should always be delivered to the real
2436 * parent of the group leader. The new ptracer will get
2437 * its notification when this task transitions into
2438 * TASK_TRACED.
2439 */
5224fa36
TH
2440 if (notify) {
2441 read_lock(&tasklist_lock);
62bcf9d9 2442 do_notify_parent_cldstop(current, false, notify);
5224fa36
TH
2443 read_unlock(&tasklist_lock);
2444 }
2445
2446 /* Now we don't run again until woken by SIGCONT or SIGKILL */
76f969e8 2447 cgroup_enter_frozen();
5d8f72b5 2448 freezable_schedule();
73ddff2b 2449 return true;
d79fdd6d 2450 } else {
73ddff2b
TH
2451 /*
2452 * While ptraced, group stop is handled by STOP trap.
2453 * Schedule it and let the caller deal with it.
2454 */
2455 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2456 return false;
ae6d2ed7 2457 }
73ddff2b 2458}
1da177e4 2459
73ddff2b
TH
2460/**
2461 * do_jobctl_trap - take care of ptrace jobctl traps
2462 *
3544d72a
TH
2463 * When PT_SEIZED, it's used for both group stop and explicit
2464 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2465 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2466 * the stop signal; otherwise, %SIGTRAP.
2467 *
2468 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2469 * number as exit_code and no siginfo.
73ddff2b
TH
2470 *
2471 * CONTEXT:
2472 * Must be called with @current->sighand->siglock held, which may be
2473 * released and re-acquired before returning with intervening sleep.
2474 */
2475static void do_jobctl_trap(void)
2476{
3544d72a 2477 struct signal_struct *signal = current->signal;
73ddff2b 2478 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
ae6d2ed7 2479
3544d72a
TH
2480 if (current->ptrace & PT_SEIZED) {
2481 if (!signal->group_stop_count &&
2482 !(signal->flags & SIGNAL_STOP_STOPPED))
2483 signr = SIGTRAP;
2484 WARN_ON_ONCE(!signr);
2485 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2486 CLD_STOPPED);
2487 } else {
2488 WARN_ON_ONCE(!signr);
2489 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2490 current->exit_code = 0;
ae6d2ed7 2491 }
1da177e4
LT
2492}
2493
76f969e8
RG
2494/**
2495 * do_freezer_trap - handle the freezer jobctl trap
2496 *
2497 * Puts the task into frozen state, if only the task is not about to quit.
2498 * In this case it drops JOBCTL_TRAP_FREEZE.
2499 *
2500 * CONTEXT:
2501 * Must be called with @current->sighand->siglock held,
2502 * which is always released before returning.
2503 */
2504static void do_freezer_trap(void)
2505 __releases(&current->sighand->siglock)
2506{
2507 /*
2508 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2509 * let's make another loop to give it a chance to be handled.
2510 * In any case, we'll return back.
2511 */
2512 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2513 JOBCTL_TRAP_FREEZE) {
2514 spin_unlock_irq(&current->sighand->siglock);
2515 return;
2516 }
2517
2518 /*
2519 * Now we're sure that there is no pending fatal signal and no
2520 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2521 * immediately (if there is a non-fatal signal pending), and
2522 * put the task into sleep.
2523 */
2524 __set_current_state(TASK_INTERRUPTIBLE);
2525 clear_thread_flag(TIF_SIGPENDING);
2526 spin_unlock_irq(&current->sighand->siglock);
2527 cgroup_enter_frozen();
2528 freezable_schedule();
2529}
2530
ae7795bc 2531static int ptrace_signal(int signr, kernel_siginfo_t *info)
18c98b65 2532{
8a352418
ON
2533 /*
2534 * We do not check sig_kernel_stop(signr) but set this marker
2535 * unconditionally because we do not know whether debugger will
2536 * change signr. This flag has no meaning unless we are going
2537 * to stop after return from ptrace_stop(). In this case it will
2538 * be checked in do_signal_stop(), we should only stop if it was
2539 * not cleared by SIGCONT while we were sleeping. See also the
2540 * comment in dequeue_signal().
2541 */
2542 current->jobctl |= JOBCTL_STOP_DEQUEUED;
fe1bc6a0 2543 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
2544
2545 /* We're back. Did the debugger cancel the sig? */
2546 signr = current->exit_code;
2547 if (signr == 0)
2548 return signr;
2549
2550 current->exit_code = 0;
2551
5aba085e
RD
2552 /*
2553 * Update the siginfo structure if the signal has
2554 * changed. If the debugger wanted something
2555 * specific in the siginfo structure then it should
2556 * have updated *info via PTRACE_SETSIGINFO.
2557 */
18c98b65 2558 if (signr != info->si_signo) {
faf1f22b 2559 clear_siginfo(info);
18c98b65
RM
2560 info->si_signo = signr;
2561 info->si_errno = 0;
2562 info->si_code = SI_USER;
6b550f94 2563 rcu_read_lock();
18c98b65 2564 info->si_pid = task_pid_vnr(current->parent);
54ba47ed
EB
2565 info->si_uid = from_kuid_munged(current_user_ns(),
2566 task_uid(current->parent));
6b550f94 2567 rcu_read_unlock();
18c98b65
RM
2568 }
2569
2570 /* If the (new) signal is now blocked, requeue it. */
2571 if (sigismember(&current->blocked, signr)) {
b21c5bd5 2572 send_signal(signr, info, current, PIDTYPE_PID);
18c98b65
RM
2573 signr = 0;
2574 }
2575
2576 return signr;
2577}
2578
6ac05e83
PC
2579static void hide_si_addr_tag_bits(struct ksignal *ksig)
2580{
2581 switch (siginfo_layout(ksig->sig, ksig->info.si_code)) {
2582 case SIL_FAULT:
9abcabe3 2583 case SIL_FAULT_TRAPNO:
6ac05e83
PC
2584 case SIL_FAULT_MCEERR:
2585 case SIL_FAULT_BNDERR:
2586 case SIL_FAULT_PKUERR:
fb6cc127 2587 case SIL_PERF_EVENT:
6ac05e83
PC
2588 ksig->info.si_addr = arch_untagged_si_addr(
2589 ksig->info.si_addr, ksig->sig, ksig->info.si_code);
2590 break;
2591 case SIL_KILL:
2592 case SIL_TIMER:
2593 case SIL_POLL:
2594 case SIL_CHLD:
2595 case SIL_RT:
2596 case SIL_SYS:
2597 break;
2598 }
2599}
2600
20ab7218 2601bool get_signal(struct ksignal *ksig)
1da177e4 2602{
f6b76d4f
ON
2603 struct sighand_struct *sighand = current->sighand;
2604 struct signal_struct *signal = current->signal;
2605 int signr;
1da177e4 2606
35d0b389
JA
2607 if (unlikely(current->task_works))
2608 task_work_run();
2609
12db8b69
JA
2610 /*
2611 * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so
2612 * that the arch handlers don't all have to do it. If we get here
2613 * without TIF_SIGPENDING, just exit after running signal work.
2614 */
12db8b69
JA
2615 if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) {
2616 if (test_thread_flag(TIF_NOTIFY_SIGNAL))
2617 tracehook_notify_signal();
2618 if (!task_sigpending(current))
2619 return false;
2620 }
12db8b69 2621
0326f5a9 2622 if (unlikely(uprobe_deny_signal()))
20ab7218 2623 return false;
0326f5a9 2624
13b1c3d4 2625 /*
5d8f72b5
ON
2626 * Do this once, we can't return to user-mode if freezing() == T.
2627 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2628 * thus do not need another check after return.
13b1c3d4 2629 */
fc558a74
RW
2630 try_to_freeze();
2631
5d8f72b5 2632relock:
f6b76d4f 2633 spin_lock_irq(&sighand->siglock);
e91b4816 2634
021e1ae3
ON
2635 /*
2636 * Every stopped thread goes here after wakeup. Check to see if
2637 * we should notify the parent, prepare_signal(SIGCONT) encodes
2638 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2639 */
f6b76d4f 2640 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
2641 int why;
2642
2643 if (signal->flags & SIGNAL_CLD_CONTINUED)
2644 why = CLD_CONTINUED;
2645 else
2646 why = CLD_STOPPED;
2647
f6b76d4f 2648 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 2649
ae6d2ed7 2650 spin_unlock_irq(&sighand->siglock);
fa00b80b 2651
ceb6bd67
TH
2652 /*
2653 * Notify the parent that we're continuing. This event is
2654 * always per-process and doesn't make whole lot of sense
2655 * for ptracers, who shouldn't consume the state via
2656 * wait(2) either, but, for backward compatibility, notify
2657 * the ptracer of the group leader too unless it's gonna be
2658 * a duplicate.
2659 */
edf2ed15 2660 read_lock(&tasklist_lock);
ceb6bd67
TH
2661 do_notify_parent_cldstop(current, false, why);
2662
bb3696da
ON
2663 if (ptrace_reparented(current->group_leader))
2664 do_notify_parent_cldstop(current->group_leader,
2665 true, why);
edf2ed15 2666 read_unlock(&tasklist_lock);
ceb6bd67 2667
e4420551
ON
2668 goto relock;
2669 }
2670
35634ffa 2671 /* Has this task already been marked for death? */
cf43a757
EB
2672 if (signal_group_exit(signal)) {
2673 ksig->info.si_signo = signr = SIGKILL;
2674 sigdelset(&current->pending.signal, SIGKILL);
98af37d6
ZW
2675 trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
2676 &sighand->action[SIGKILL - 1]);
cf43a757 2677 recalc_sigpending();
35634ffa 2678 goto fatal;
cf43a757 2679 }
35634ffa 2680
1da177e4
LT
2681 for (;;) {
2682 struct k_sigaction *ka;
1be53963 2683
dd1d6772
TH
2684 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2685 do_signal_stop(0))
7bcf6a2c 2686 goto relock;
1be53963 2687
76f969e8
RG
2688 if (unlikely(current->jobctl &
2689 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2690 if (current->jobctl & JOBCTL_TRAP_MASK) {
2691 do_jobctl_trap();
2692 spin_unlock_irq(&sighand->siglock);
2693 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2694 do_freezer_trap();
2695
2696 goto relock;
2697 }
2698
2699 /*
2700 * If the task is leaving the frozen state, let's update
2701 * cgroup counters and reset the frozen bit.
2702 */
2703 if (unlikely(cgroup_task_frozen(current))) {
73ddff2b 2704 spin_unlock_irq(&sighand->siglock);
cb2c4cd8 2705 cgroup_leave_frozen(false);
73ddff2b
TH
2706 goto relock;
2707 }
1da177e4 2708
7146db33
EB
2709 /*
2710 * Signals generated by the execution of an instruction
2711 * need to be delivered before any other pending signals
2712 * so that the instruction pointer in the signal stack
2713 * frame points to the faulting instruction.
2714 */
2715 signr = dequeue_synchronous_signal(&ksig->info);
2716 if (!signr)
2717 signr = dequeue_signal(current, &current->blocked, &ksig->info);
7bcf6a2c 2718
dd1d6772
TH
2719 if (!signr)
2720 break; /* will return 0 */
7bcf6a2c 2721
8a352418 2722 if (unlikely(current->ptrace) && signr != SIGKILL) {
828b1f65 2723 signr = ptrace_signal(signr, &ksig->info);
dd1d6772
TH
2724 if (!signr)
2725 continue;
1da177e4
LT
2726 }
2727
dd1d6772
TH
2728 ka = &sighand->action[signr-1];
2729
f9d4257e 2730 /* Trace actually delivered signals. */
828b1f65 2731 trace_signal_deliver(signr, &ksig->info, ka);
f9d4257e 2732
1da177e4
LT
2733 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2734 continue;
2735 if (ka->sa.sa_handler != SIG_DFL) {
2736 /* Run the handler. */
828b1f65 2737 ksig->ka = *ka;
1da177e4
LT
2738
2739 if (ka->sa.sa_flags & SA_ONESHOT)
2740 ka->sa.sa_handler = SIG_DFL;
2741
2742 break; /* will return non-zero "signr" value */
2743 }
2744
2745 /*
2746 * Now we are doing the default action for this signal.
2747 */
2748 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2749 continue;
2750
84d73786 2751 /*
0fbc26a6 2752 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
2753 * Container-init gets no signals it doesn't want from same
2754 * container.
2755 *
2756 * Note that if global/container-init sees a sig_kernel_only()
2757 * signal here, the signal must have been generated internally
2758 * or must have come from an ancestor namespace. In either
2759 * case, the signal cannot be dropped.
84d73786 2760 */
fae5fa44 2761 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 2762 !sig_kernel_only(signr))
1da177e4
LT
2763 continue;
2764
2765 if (sig_kernel_stop(signr)) {
2766 /*
2767 * The default action is to stop all threads in
2768 * the thread group. The job control signals
2769 * do nothing in an orphaned pgrp, but SIGSTOP
2770 * always works. Note that siglock needs to be
2771 * dropped during the call to is_orphaned_pgrp()
2772 * because of lock ordering with tasklist_lock.
2773 * This allows an intervening SIGCONT to be posted.
2774 * We need to check for that and bail out if necessary.
2775 */
2776 if (signr != SIGSTOP) {
f6b76d4f 2777 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2778
2779 /* signals can be posted during this window */
2780
3e7cd6c4 2781 if (is_current_pgrp_orphaned())
1da177e4
LT
2782 goto relock;
2783
f6b76d4f 2784 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2785 }
2786
828b1f65 2787 if (likely(do_signal_stop(ksig->info.si_signo))) {
1da177e4
LT
2788 /* It released the siglock. */
2789 goto relock;
2790 }
2791
2792 /*
2793 * We didn't actually stop, due to a race
2794 * with SIGCONT or something like that.
2795 */
2796 continue;
2797 }
2798
35634ffa 2799 fatal:
f6b76d4f 2800 spin_unlock_irq(&sighand->siglock);
f2b31bb5
RG
2801 if (unlikely(cgroup_task_frozen(current)))
2802 cgroup_leave_frozen(true);
1da177e4
LT
2803
2804 /*
2805 * Anything else is fatal, maybe with a core dump.
2806 */
2807 current->flags |= PF_SIGNALED;
2dce81bf 2808
1da177e4 2809 if (sig_kernel_coredump(signr)) {
2dce81bf 2810 if (print_fatal_signals)
828b1f65 2811 print_fatal_signal(ksig->info.si_signo);
2b5faa4c 2812 proc_coredump_connector(current);
1da177e4
LT
2813 /*
2814 * If it was able to dump core, this kills all
2815 * other threads in the group and synchronizes with
2816 * their demise. If we lost the race with another
2817 * thread getting here, it set group_exit_code
2818 * first and our do_group_exit call below will use
2819 * that value and ignore the one we pass it.
2820 */
828b1f65 2821 do_coredump(&ksig->info);
1da177e4
LT
2822 }
2823
10442994
JA
2824 /*
2825 * PF_IO_WORKER threads will catch and exit on fatal signals
2826 * themselves. They have cleanup that must be performed, so
2827 * we cannot call do_exit() on their behalf.
2828 */
2829 if (current->flags & PF_IO_WORKER)
2830 goto out;
2831
1da177e4
LT
2832 /*
2833 * Death signals, no core dump.
2834 */
828b1f65 2835 do_group_exit(ksig->info.si_signo);
1da177e4
LT
2836 /* NOTREACHED */
2837 }
f6b76d4f 2838 spin_unlock_irq(&sighand->siglock);
10442994 2839out:
828b1f65 2840 ksig->sig = signr;
6ac05e83
PC
2841
2842 if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS))
2843 hide_si_addr_tag_bits(ksig);
2844
828b1f65 2845 return ksig->sig > 0;
1da177e4
LT
2846}
2847
5e6292c0 2848/**
efee984c 2849 * signal_delivered -
10b1c7ac 2850 * @ksig: kernel signal struct
efee984c 2851 * @stepping: nonzero if debugger single-step or block-step in use
5e6292c0 2852 *
e227867f 2853 * This function should be called when a signal has successfully been
10b1c7ac 2854 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
efee984c 2855 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
10b1c7ac 2856 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
5e6292c0 2857 */
10b1c7ac 2858static void signal_delivered(struct ksignal *ksig, int stepping)
5e6292c0
MF
2859{
2860 sigset_t blocked;
2861
a610d6e6
AV
2862 /* A signal was successfully delivered, and the
2863 saved sigmask was stored on the signal frame,
2864 and will be restored by sigreturn. So we can
2865 simply clear the restore sigmask flag. */
2866 clear_restore_sigmask();
2867
10b1c7ac
RW
2868 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2869 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2870 sigaddset(&blocked, ksig->sig);
5e6292c0 2871 set_current_blocked(&blocked);
df5601f9 2872 tracehook_signal_handler(stepping);
5e6292c0
MF
2873}
2874
2ce5da17
AV
2875void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2876{
2877 if (failed)
cb44c9a0 2878 force_sigsegv(ksig->sig);
2ce5da17 2879 else
10b1c7ac 2880 signal_delivered(ksig, stepping);
2ce5da17
AV
2881}
2882
0edceb7b
ON
2883/*
2884 * It could be that complete_signal() picked us to notify about the
fec9993d
ON
2885 * group-wide signal. Other threads should be notified now to take
2886 * the shared signals in @which since we will not.
0edceb7b 2887 */
f646e227 2888static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
0edceb7b 2889{
f646e227 2890 sigset_t retarget;
0edceb7b
ON
2891 struct task_struct *t;
2892
f646e227
ON
2893 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2894 if (sigisemptyset(&retarget))
2895 return;
2896
0edceb7b
ON
2897 t = tsk;
2898 while_each_thread(tsk, t) {
fec9993d
ON
2899 if (t->flags & PF_EXITING)
2900 continue;
2901
2902 if (!has_pending_signals(&retarget, &t->blocked))
2903 continue;
2904 /* Remove the signals this thread can handle. */
2905 sigandsets(&retarget, &retarget, &t->blocked);
2906
5c251e9d 2907 if (!task_sigpending(t))
fec9993d
ON
2908 signal_wake_up(t, 0);
2909
2910 if (sigisemptyset(&retarget))
2911 break;
0edceb7b
ON
2912 }
2913}
2914
d12619b5
ON
2915void exit_signals(struct task_struct *tsk)
2916{
2917 int group_stop = 0;
f646e227 2918 sigset_t unblocked;
d12619b5 2919
77e4ef99
TH
2920 /*
2921 * @tsk is about to have PF_EXITING set - lock out users which
2922 * expect stable threadgroup.
2923 */
780de9dd 2924 cgroup_threadgroup_change_begin(tsk);
77e4ef99 2925
5dee1707
ON
2926 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2927 tsk->flags |= PF_EXITING;
780de9dd 2928 cgroup_threadgroup_change_end(tsk);
5dee1707 2929 return;
d12619b5
ON
2930 }
2931
5dee1707 2932 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2933 /*
2934 * From now this task is not visible for group-wide signals,
2935 * see wants_signal(), do_signal_stop().
2936 */
2937 tsk->flags |= PF_EXITING;
77e4ef99 2938
780de9dd 2939 cgroup_threadgroup_change_end(tsk);
77e4ef99 2940
5c251e9d 2941 if (!task_sigpending(tsk))
5dee1707
ON
2942 goto out;
2943
f646e227
ON
2944 unblocked = tsk->blocked;
2945 signotset(&unblocked);
2946 retarget_shared_pending(tsk, &unblocked);
5dee1707 2947
a8f072c1 2948 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
e5c1902e 2949 task_participate_group_stop(tsk))
edf2ed15 2950 group_stop = CLD_STOPPED;
5dee1707 2951out:
d12619b5
ON
2952 spin_unlock_irq(&tsk->sighand->siglock);
2953
62bcf9d9
TH
2954 /*
2955 * If group stop has completed, deliver the notification. This
2956 * should always go to the real parent of the group leader.
2957 */
ae6d2ed7 2958 if (unlikely(group_stop)) {
d12619b5 2959 read_lock(&tasklist_lock);
62bcf9d9 2960 do_notify_parent_cldstop(tsk, false, group_stop);
d12619b5
ON
2961 read_unlock(&tasklist_lock);
2962 }
2963}
2964
1da177e4
LT
2965/*
2966 * System call entry points.
2967 */
2968
41c57892
RD
2969/**
2970 * sys_restart_syscall - restart a system call
2971 */
754fe8d2 2972SYSCALL_DEFINE0(restart_syscall)
1da177e4 2973{
f56141e3 2974 struct restart_block *restart = &current->restart_block;
1da177e4
LT
2975 return restart->fn(restart);
2976}
2977
2978long do_no_restart_syscall(struct restart_block *param)
2979{
2980 return -EINTR;
2981}
2982
b182801a
ON
2983static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2984{
5c251e9d 2985 if (task_sigpending(tsk) && !thread_group_empty(tsk)) {
b182801a
ON
2986 sigset_t newblocked;
2987 /* A set of now blocked but previously unblocked signals. */
702a5073 2988 sigandnsets(&newblocked, newset, &current->blocked);
b182801a
ON
2989 retarget_shared_pending(tsk, &newblocked);
2990 }
2991 tsk->blocked = *newset;
2992 recalc_sigpending();
2993}
2994
e6fa16ab
ON
2995/**
2996 * set_current_blocked - change current->blocked mask
2997 * @newset: new mask
2998 *
2999 * It is wrong to change ->blocked directly, this helper should be used
3000 * to ensure the process can't miss a shared signal we are going to block.
1da177e4 3001 */
77097ae5
AV
3002void set_current_blocked(sigset_t *newset)
3003{
77097ae5 3004 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
0c4a8423 3005 __set_current_blocked(newset);
77097ae5
AV
3006}
3007
3008void __set_current_blocked(const sigset_t *newset)
e6fa16ab
ON
3009{
3010 struct task_struct *tsk = current;
3011
c7be96af
WL
3012 /*
3013 * In case the signal mask hasn't changed, there is nothing we need
3014 * to do. The current->blocked shouldn't be modified by other task.
3015 */
3016 if (sigequalsets(&tsk->blocked, newset))
3017 return;
3018
e6fa16ab 3019 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3020 __set_task_blocked(tsk, newset);
e6fa16ab
ON
3021 spin_unlock_irq(&tsk->sighand->siglock);
3022}
1da177e4
LT
3023
3024/*
3025 * This is also useful for kernel threads that want to temporarily
3026 * (or permanently) block certain signals.
3027 *
3028 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
3029 * interface happily blocks "unblockable" signals like SIGKILL
3030 * and friends.
3031 */
3032int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
3033{
73ef4aeb
ON
3034 struct task_struct *tsk = current;
3035 sigset_t newset;
1da177e4 3036
73ef4aeb 3037 /* Lockless, only current can change ->blocked, never from irq */
a26fd335 3038 if (oldset)
73ef4aeb 3039 *oldset = tsk->blocked;
a26fd335 3040
1da177e4
LT
3041 switch (how) {
3042 case SIG_BLOCK:
73ef4aeb 3043 sigorsets(&newset, &tsk->blocked, set);
1da177e4
LT
3044 break;
3045 case SIG_UNBLOCK:
702a5073 3046 sigandnsets(&newset, &tsk->blocked, set);
1da177e4
LT
3047 break;
3048 case SIG_SETMASK:
73ef4aeb 3049 newset = *set;
1da177e4
LT
3050 break;
3051 default:
73ef4aeb 3052 return -EINVAL;
1da177e4 3053 }
a26fd335 3054
77097ae5 3055 __set_current_blocked(&newset);
73ef4aeb 3056 return 0;
1da177e4 3057}
fb50f5a4 3058EXPORT_SYMBOL(sigprocmask);
1da177e4 3059
ded653cc
DD
3060/*
3061 * The api helps set app-provided sigmasks.
3062 *
3063 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
3064 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
b772434b
ON
3065 *
3066 * Note that it does set_restore_sigmask() in advance, so it must be always
3067 * paired with restore_saved_sigmask_unless() before return from syscall.
ded653cc 3068 */
b772434b 3069int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize)
ded653cc 3070{
b772434b 3071 sigset_t kmask;
ded653cc 3072
b772434b
ON
3073 if (!umask)
3074 return 0;
ded653cc
DD
3075 if (sigsetsize != sizeof(sigset_t))
3076 return -EINVAL;
b772434b 3077 if (copy_from_user(&kmask, umask, sizeof(sigset_t)))
ded653cc
DD
3078 return -EFAULT;
3079
b772434b
ON
3080 set_restore_sigmask();
3081 current->saved_sigmask = current->blocked;
3082 set_current_blocked(&kmask);
ded653cc
DD
3083
3084 return 0;
3085}
ded653cc
DD
3086
3087#ifdef CONFIG_COMPAT
b772434b 3088int set_compat_user_sigmask(const compat_sigset_t __user *umask,
ded653cc
DD
3089 size_t sigsetsize)
3090{
b772434b 3091 sigset_t kmask;
ded653cc 3092
b772434b
ON
3093 if (!umask)
3094 return 0;
ded653cc
DD
3095 if (sigsetsize != sizeof(compat_sigset_t))
3096 return -EINVAL;
b772434b 3097 if (get_compat_sigset(&kmask, umask))
ded653cc
DD
3098 return -EFAULT;
3099
b772434b
ON
3100 set_restore_sigmask();
3101 current->saved_sigmask = current->blocked;
3102 set_current_blocked(&kmask);
ded653cc
DD
3103
3104 return 0;
3105}
ded653cc
DD
3106#endif
3107
41c57892
RD
3108/**
3109 * sys_rt_sigprocmask - change the list of currently blocked signals
3110 * @how: whether to add, remove, or set signals
ada9c933 3111 * @nset: stores pending signals
41c57892
RD
3112 * @oset: previous value of signal mask if non-null
3113 * @sigsetsize: size of sigset_t type
3114 */
bb7efee2 3115SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
17da2bd9 3116 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4 3117{
1da177e4 3118 sigset_t old_set, new_set;
bb7efee2 3119 int error;
1da177e4
LT
3120
3121 /* XXX: Don't preclude handling different sized sigset_t's. */
3122 if (sigsetsize != sizeof(sigset_t))
bb7efee2 3123 return -EINVAL;
1da177e4 3124
bb7efee2
ON
3125 old_set = current->blocked;
3126
3127 if (nset) {
3128 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3129 return -EFAULT;
1da177e4
LT
3130 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3131
bb7efee2 3132 error = sigprocmask(how, &new_set, NULL);
1da177e4 3133 if (error)
bb7efee2
ON
3134 return error;
3135 }
1da177e4 3136
bb7efee2
ON
3137 if (oset) {
3138 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3139 return -EFAULT;
1da177e4 3140 }
bb7efee2
ON
3141
3142 return 0;
1da177e4
LT
3143}
3144
322a56cb 3145#ifdef CONFIG_COMPAT
322a56cb
AV
3146COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3147 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
1da177e4 3148{
322a56cb
AV
3149 sigset_t old_set = current->blocked;
3150
3151 /* XXX: Don't preclude handling different sized sigset_t's. */
3152 if (sigsetsize != sizeof(sigset_t))
3153 return -EINVAL;
3154
3155 if (nset) {
322a56cb
AV
3156 sigset_t new_set;
3157 int error;
3968cf62 3158 if (get_compat_sigset(&new_set, nset))
322a56cb 3159 return -EFAULT;
322a56cb
AV
3160 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3161
3162 error = sigprocmask(how, &new_set, NULL);
3163 if (error)
3164 return error;
3165 }
f454322e 3166 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
322a56cb
AV
3167}
3168#endif
1da177e4 3169
b1d294c8 3170static void do_sigpending(sigset_t *set)
1da177e4 3171{
1da177e4 3172 spin_lock_irq(&current->sighand->siglock);
fe9c1db2 3173 sigorsets(set, &current->pending.signal,
1da177e4
LT
3174 &current->signal->shared_pending.signal);
3175 spin_unlock_irq(&current->sighand->siglock);
3176
3177 /* Outside the lock because only this thread touches it. */
fe9c1db2 3178 sigandsets(set, &current->blocked, set);
5aba085e 3179}
1da177e4 3180
41c57892
RD
3181/**
3182 * sys_rt_sigpending - examine a pending signal that has been raised
3183 * while blocked
20f22ab4 3184 * @uset: stores pending signals
41c57892
RD
3185 * @sigsetsize: size of sigset_t type or larger
3186 */
fe9c1db2 3187SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
1da177e4 3188{
fe9c1db2 3189 sigset_t set;
176826af
DL
3190
3191 if (sigsetsize > sizeof(*uset))
3192 return -EINVAL;
3193
b1d294c8
CB
3194 do_sigpending(&set);
3195
3196 if (copy_to_user(uset, &set, sigsetsize))
3197 return -EFAULT;
3198
3199 return 0;
fe9c1db2
AV
3200}
3201
3202#ifdef CONFIG_COMPAT
fe9c1db2
AV
3203COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3204 compat_size_t, sigsetsize)
1da177e4 3205{
fe9c1db2 3206 sigset_t set;
176826af
DL
3207
3208 if (sigsetsize > sizeof(*uset))
3209 return -EINVAL;
3210
b1d294c8
CB
3211 do_sigpending(&set);
3212
3213 return put_compat_sigset(uset, &set, sigsetsize);
1da177e4 3214}
fe9c1db2 3215#endif
1da177e4 3216
4ce5f9c9
EB
3217static const struct {
3218 unsigned char limit, layout;
3219} sig_sicodes[] = {
3220 [SIGILL] = { NSIGILL, SIL_FAULT },
3221 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3222 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3223 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3224 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3225#if defined(SIGEMT)
3226 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3227#endif
3228 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3229 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3230 [SIGSYS] = { NSIGSYS, SIL_SYS },
3231};
3232
b2a2ab52 3233static bool known_siginfo_layout(unsigned sig, int si_code)
4ce5f9c9
EB
3234{
3235 if (si_code == SI_KERNEL)
3236 return true;
3237 else if ((si_code > SI_USER)) {
3238 if (sig_specific_sicodes(sig)) {
3239 if (si_code <= sig_sicodes[sig].limit)
3240 return true;
3241 }
3242 else if (si_code <= NSIGPOLL)
3243 return true;
3244 }
3245 else if (si_code >= SI_DETHREAD)
3246 return true;
3247 else if (si_code == SI_ASYNCNL)
3248 return true;
3249 return false;
3250}
3251
a3670058 3252enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
cc731525
EB
3253{
3254 enum siginfo_layout layout = SIL_KILL;
3255 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
4ce5f9c9
EB
3256 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3257 (si_code <= sig_sicodes[sig].limit)) {
3258 layout = sig_sicodes[sig].layout;
31931c93
EB
3259 /* Handle the exceptions */
3260 if ((sig == SIGBUS) &&
3261 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3262 layout = SIL_FAULT_MCEERR;
3263 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3264 layout = SIL_FAULT_BNDERR;
3265#ifdef SEGV_PKUERR
3266 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3267 layout = SIL_FAULT_PKUERR;
3268#endif
ed8e5080
ME
3269 else if ((sig == SIGTRAP) && (si_code == TRAP_PERF))
3270 layout = SIL_PERF_EVENT;
9abcabe3
EB
3271#ifdef __ARCH_SI_TRAPNO
3272 else if (layout == SIL_FAULT)
3273 layout = SIL_FAULT_TRAPNO;
3274#endif
31931c93 3275 }
cc731525
EB
3276 else if (si_code <= NSIGPOLL)
3277 layout = SIL_POLL;
3278 } else {
3279 if (si_code == SI_TIMER)
3280 layout = SIL_TIMER;
3281 else if (si_code == SI_SIGIO)
3282 layout = SIL_POLL;
3283 else if (si_code < 0)
3284 layout = SIL_RT;
cc731525
EB
3285 }
3286 return layout;
3287}
3288
4ce5f9c9
EB
3289static inline char __user *si_expansion(const siginfo_t __user *info)
3290{
3291 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3292}
3293
ae7795bc 3294int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
1da177e4 3295{
4ce5f9c9 3296 char __user *expansion = si_expansion(to);
ae7795bc 3297 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
1da177e4 3298 return -EFAULT;
4ce5f9c9 3299 if (clear_user(expansion, SI_EXPANSION_SIZE))
1da177e4 3300 return -EFAULT;
c999b933 3301 return 0;
1da177e4
LT
3302}
3303
601d5abf
EB
3304static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3305 const siginfo_t __user *from)
4cd2e0e7 3306{
601d5abf 3307 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
4ce5f9c9
EB
3308 char __user *expansion = si_expansion(from);
3309 char buf[SI_EXPANSION_SIZE];
3310 int i;
3311 /*
3312 * An unknown si_code might need more than
3313 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3314 * extra bytes are 0. This guarantees copy_siginfo_to_user
3315 * will return this data to userspace exactly.
3316 */
3317 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3318 return -EFAULT;
3319 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3320 if (buf[i] != 0)
3321 return -E2BIG;
3322 }
3323 }
4cd2e0e7
EB
3324 return 0;
3325}
3326
601d5abf
EB
3327static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3328 const siginfo_t __user *from)
3329{
3330 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3331 return -EFAULT;
3332 to->si_signo = signo;
3333 return post_copy_siginfo_from_user(to, from);
3334}
3335
3336int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3337{
3338 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3339 return -EFAULT;
3340 return post_copy_siginfo_from_user(to, from);
3341}
3342
212a36a1 3343#ifdef CONFIG_COMPAT
c3b3f524
CH
3344/**
3345 * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo
3346 * @to: compat siginfo destination
3347 * @from: kernel siginfo source
3348 *
3349 * Note: This function does not work properly for the SIGCHLD on x32, but
3350 * fortunately it doesn't have to. The only valid callers for this function are
3351 * copy_siginfo_to_user32, which is overriden for x32 and the coredump code.
3352 * The latter does not care because SIGCHLD will never cause a coredump.
3353 */
3354void copy_siginfo_to_external32(struct compat_siginfo *to,
3355 const struct kernel_siginfo *from)
ea64d5ac 3356{
c3b3f524 3357 memset(to, 0, sizeof(*to));
ea64d5ac 3358
c3b3f524
CH
3359 to->si_signo = from->si_signo;
3360 to->si_errno = from->si_errno;
3361 to->si_code = from->si_code;
ea64d5ac
EB
3362 switch(siginfo_layout(from->si_signo, from->si_code)) {
3363 case SIL_KILL:
c3b3f524
CH
3364 to->si_pid = from->si_pid;
3365 to->si_uid = from->si_uid;
ea64d5ac
EB
3366 break;
3367 case SIL_TIMER:
c3b3f524
CH
3368 to->si_tid = from->si_tid;
3369 to->si_overrun = from->si_overrun;
3370 to->si_int = from->si_int;
ea64d5ac
EB
3371 break;
3372 case SIL_POLL:
c3b3f524
CH
3373 to->si_band = from->si_band;
3374 to->si_fd = from->si_fd;
ea64d5ac
EB
3375 break;
3376 case SIL_FAULT:
c3b3f524 3377 to->si_addr = ptr_to_compat(from->si_addr);
9abcabe3
EB
3378 break;
3379 case SIL_FAULT_TRAPNO:
3380 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3381 to->si_trapno = from->si_trapno;
31931c93
EB
3382 break;
3383 case SIL_FAULT_MCEERR:
c3b3f524 3384 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3385 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3386 break;
3387 case SIL_FAULT_BNDERR:
c3b3f524 3388 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524
CH
3389 to->si_lower = ptr_to_compat(from->si_lower);
3390 to->si_upper = ptr_to_compat(from->si_upper);
31931c93
EB
3391 break;
3392 case SIL_FAULT_PKUERR:
c3b3f524 3393 to->si_addr = ptr_to_compat(from->si_addr);
c3b3f524 3394 to->si_pkey = from->si_pkey;
ea64d5ac 3395 break;
fb6cc127
ME
3396 case SIL_PERF_EVENT:
3397 to->si_addr = ptr_to_compat(from->si_addr);
0683b531
EB
3398 to->si_perf_data = from->si_perf_data;
3399 to->si_perf_type = from->si_perf_type;
fb6cc127 3400 break;
ea64d5ac 3401 case SIL_CHLD:
c3b3f524
CH
3402 to->si_pid = from->si_pid;
3403 to->si_uid = from->si_uid;
3404 to->si_status = from->si_status;
3405 to->si_utime = from->si_utime;
3406 to->si_stime = from->si_stime;
ea64d5ac
EB
3407 break;
3408 case SIL_RT:
c3b3f524
CH
3409 to->si_pid = from->si_pid;
3410 to->si_uid = from->si_uid;
3411 to->si_int = from->si_int;
ea64d5ac
EB
3412 break;
3413 case SIL_SYS:
c3b3f524
CH
3414 to->si_call_addr = ptr_to_compat(from->si_call_addr);
3415 to->si_syscall = from->si_syscall;
3416 to->si_arch = from->si_arch;
ea64d5ac
EB
3417 break;
3418 }
c3b3f524 3419}
ea64d5ac 3420
c3b3f524
CH
3421int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
3422 const struct kernel_siginfo *from)
3423{
3424 struct compat_siginfo new;
3425
3426 copy_siginfo_to_external32(&new, from);
ea64d5ac
EB
3427 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3428 return -EFAULT;
ea64d5ac
EB
3429 return 0;
3430}
3431
601d5abf
EB
3432static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3433 const struct compat_siginfo *from)
212a36a1 3434{
212a36a1 3435 clear_siginfo(to);
601d5abf
EB
3436 to->si_signo = from->si_signo;
3437 to->si_errno = from->si_errno;
3438 to->si_code = from->si_code;
3439 switch(siginfo_layout(from->si_signo, from->si_code)) {
212a36a1 3440 case SIL_KILL:
601d5abf
EB
3441 to->si_pid = from->si_pid;
3442 to->si_uid = from->si_uid;
212a36a1
EB
3443 break;
3444 case SIL_TIMER:
601d5abf
EB
3445 to->si_tid = from->si_tid;
3446 to->si_overrun = from->si_overrun;
3447 to->si_int = from->si_int;
212a36a1
EB
3448 break;
3449 case SIL_POLL:
601d5abf
EB
3450 to->si_band = from->si_band;
3451 to->si_fd = from->si_fd;
212a36a1
EB
3452 break;
3453 case SIL_FAULT:
601d5abf 3454 to->si_addr = compat_ptr(from->si_addr);
9abcabe3
EB
3455 break;
3456 case SIL_FAULT_TRAPNO:
3457 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3458 to->si_trapno = from->si_trapno;
31931c93
EB
3459 break;
3460 case SIL_FAULT_MCEERR:
601d5abf 3461 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3462 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3463 break;
3464 case SIL_FAULT_BNDERR:
601d5abf 3465 to->si_addr = compat_ptr(from->si_addr);
601d5abf
EB
3466 to->si_lower = compat_ptr(from->si_lower);
3467 to->si_upper = compat_ptr(from->si_upper);
31931c93
EB
3468 break;
3469 case SIL_FAULT_PKUERR:
601d5abf 3470 to->si_addr = compat_ptr(from->si_addr);
601d5abf 3471 to->si_pkey = from->si_pkey;
212a36a1 3472 break;
fb6cc127
ME
3473 case SIL_PERF_EVENT:
3474 to->si_addr = compat_ptr(from->si_addr);
0683b531
EB
3475 to->si_perf_data = from->si_perf_data;
3476 to->si_perf_type = from->si_perf_type;
fb6cc127 3477 break;
212a36a1 3478 case SIL_CHLD:
601d5abf
EB
3479 to->si_pid = from->si_pid;
3480 to->si_uid = from->si_uid;
3481 to->si_status = from->si_status;
212a36a1
EB
3482#ifdef CONFIG_X86_X32_ABI
3483 if (in_x32_syscall()) {
601d5abf
EB
3484 to->si_utime = from->_sifields._sigchld_x32._utime;
3485 to->si_stime = from->_sifields._sigchld_x32._stime;
212a36a1
EB
3486 } else
3487#endif
3488 {
601d5abf
EB
3489 to->si_utime = from->si_utime;
3490 to->si_stime = from->si_stime;
212a36a1
EB
3491 }
3492 break;
3493 case SIL_RT:
601d5abf
EB
3494 to->si_pid = from->si_pid;
3495 to->si_uid = from->si_uid;
3496 to->si_int = from->si_int;
212a36a1
EB
3497 break;
3498 case SIL_SYS:
601d5abf
EB
3499 to->si_call_addr = compat_ptr(from->si_call_addr);
3500 to->si_syscall = from->si_syscall;
3501 to->si_arch = from->si_arch;
212a36a1
EB
3502 break;
3503 }
3504 return 0;
3505}
601d5abf
EB
3506
3507static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3508 const struct compat_siginfo __user *ufrom)
3509{
3510 struct compat_siginfo from;
3511
3512 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3513 return -EFAULT;
3514
3515 from.si_signo = signo;
3516 return post_copy_siginfo_from_user32(to, &from);
3517}
3518
3519int copy_siginfo_from_user32(struct kernel_siginfo *to,
3520 const struct compat_siginfo __user *ufrom)
3521{
3522 struct compat_siginfo from;
3523
3524 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3525 return -EFAULT;
3526
3527 return post_copy_siginfo_from_user32(to, &from);
3528}
212a36a1
EB
3529#endif /* CONFIG_COMPAT */
3530
943df148
ON
3531/**
3532 * do_sigtimedwait - wait for queued signals specified in @which
3533 * @which: queued signals to wait for
3534 * @info: if non-null, the signal's siginfo is returned here
3535 * @ts: upper bound on process time suspension
3536 */
ae7795bc 3537static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
49c39f84 3538 const struct timespec64 *ts)
943df148 3539{
2456e855 3540 ktime_t *to = NULL, timeout = KTIME_MAX;
943df148 3541 struct task_struct *tsk = current;
943df148 3542 sigset_t mask = *which;
2b1ecc3d 3543 int sig, ret = 0;
943df148
ON
3544
3545 if (ts) {
49c39f84 3546 if (!timespec64_valid(ts))
943df148 3547 return -EINVAL;
49c39f84 3548 timeout = timespec64_to_ktime(*ts);
2b1ecc3d 3549 to = &timeout;
943df148
ON
3550 }
3551
3552 /*
3553 * Invert the set of allowed signals to get those we want to block.
3554 */
3555 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3556 signotset(&mask);
3557
3558 spin_lock_irq(&tsk->sighand->siglock);
3559 sig = dequeue_signal(tsk, &mask, info);
2456e855 3560 if (!sig && timeout) {
943df148
ON
3561 /*
3562 * None ready, temporarily unblock those we're interested
3563 * while we are sleeping in so that we'll be awakened when
b182801a
ON
3564 * they arrive. Unblocking is always fine, we can avoid
3565 * set_current_blocked().
943df148
ON
3566 */
3567 tsk->real_blocked = tsk->blocked;
3568 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3569 recalc_sigpending();
3570 spin_unlock_irq(&tsk->sighand->siglock);
3571
2b1ecc3d
TG
3572 __set_current_state(TASK_INTERRUPTIBLE);
3573 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3574 HRTIMER_MODE_REL);
943df148 3575 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3576 __set_task_blocked(tsk, &tsk->real_blocked);
6114041a 3577 sigemptyset(&tsk->real_blocked);
b182801a 3578 sig = dequeue_signal(tsk, &mask, info);
943df148
ON
3579 }
3580 spin_unlock_irq(&tsk->sighand->siglock);
3581
3582 if (sig)
3583 return sig;
2b1ecc3d 3584 return ret ? -EINTR : -EAGAIN;
943df148
ON
3585}
3586
41c57892
RD
3587/**
3588 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3589 * in @uthese
3590 * @uthese: queued signals to wait for
3591 * @uinfo: if non-null, the signal's siginfo is returned here
3592 * @uts: upper bound on process time suspension
3593 * @sigsetsize: size of sigset_t type
3594 */
17da2bd9 3595SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
49c39f84
AB
3596 siginfo_t __user *, uinfo,
3597 const struct __kernel_timespec __user *, uts,
17da2bd9 3598 size_t, sigsetsize)
1da177e4 3599{
1da177e4 3600 sigset_t these;
49c39f84 3601 struct timespec64 ts;
ae7795bc 3602 kernel_siginfo_t info;
943df148 3603 int ret;
1da177e4
LT
3604
3605 /* XXX: Don't preclude handling different sized sigset_t's. */
3606 if (sigsetsize != sizeof(sigset_t))
3607 return -EINVAL;
3608
3609 if (copy_from_user(&these, uthese, sizeof(these)))
3610 return -EFAULT;
5aba085e 3611
1da177e4 3612 if (uts) {
49c39f84 3613 if (get_timespec64(&ts, uts))
1da177e4 3614 return -EFAULT;
1da177e4
LT
3615 }
3616
943df148 3617 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
1da177e4 3618
943df148
ON
3619 if (ret > 0 && uinfo) {
3620 if (copy_siginfo_to_user(uinfo, &info))
3621 ret = -EFAULT;
1da177e4
LT
3622 }
3623
3624 return ret;
3625}
3626
df8522a3
AB
3627#ifdef CONFIG_COMPAT_32BIT_TIME
3628SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3629 siginfo_t __user *, uinfo,
3630 const struct old_timespec32 __user *, uts,
3631 size_t, sigsetsize)
3632{
3633 sigset_t these;
3634 struct timespec64 ts;
3635 kernel_siginfo_t info;
3636 int ret;
3637
3638 if (sigsetsize != sizeof(sigset_t))
3639 return -EINVAL;
3640
3641 if (copy_from_user(&these, uthese, sizeof(these)))
3642 return -EFAULT;
3643
3644 if (uts) {
3645 if (get_old_timespec32(&ts, uts))
3646 return -EFAULT;
3647 }
3648
3649 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3650
3651 if (ret > 0 && uinfo) {
3652 if (copy_siginfo_to_user(uinfo, &info))
3653 ret = -EFAULT;
3654 }
3655
3656 return ret;
3657}
3658#endif
3659
1b3c872c 3660#ifdef CONFIG_COMPAT
2367c4b5
AB
3661COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3662 struct compat_siginfo __user *, uinfo,
3663 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3664{
3665 sigset_t s;
3666 struct timespec64 t;
3667 kernel_siginfo_t info;
3668 long ret;
3669
3670 if (sigsetsize != sizeof(sigset_t))
3671 return -EINVAL;
3672
3673 if (get_compat_sigset(&s, uthese))
3674 return -EFAULT;
3675
3676 if (uts) {
3677 if (get_timespec64(&t, uts))
3678 return -EFAULT;
3679 }
3680
3681 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3682
3683 if (ret > 0 && uinfo) {
3684 if (copy_siginfo_to_user32(uinfo, &info))
3685 ret = -EFAULT;
3686 }
3687
3688 return ret;
3689}
3690
3691#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724 3692COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
1b3c872c 3693 struct compat_siginfo __user *, uinfo,
9afc5eee 3694 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
1b3c872c 3695{
1b3c872c 3696 sigset_t s;
49c39f84 3697 struct timespec64 t;
ae7795bc 3698 kernel_siginfo_t info;
1b3c872c
AV
3699 long ret;
3700
3701 if (sigsetsize != sizeof(sigset_t))
3702 return -EINVAL;
3703
3968cf62 3704 if (get_compat_sigset(&s, uthese))
1b3c872c 3705 return -EFAULT;
1b3c872c
AV
3706
3707 if (uts) {
49c39f84 3708 if (get_old_timespec32(&t, uts))
1b3c872c
AV
3709 return -EFAULT;
3710 }
3711
3712 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3713
3714 if (ret > 0 && uinfo) {
3715 if (copy_siginfo_to_user32(uinfo, &info))
3716 ret = -EFAULT;
3717 }
3718
3719 return ret;
3720}
3721#endif
2367c4b5 3722#endif
1b3c872c 3723
3eb39f47
CB
3724static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3725{
3726 clear_siginfo(info);
3727 info->si_signo = sig;
3728 info->si_errno = 0;
3729 info->si_code = SI_USER;
3730 info->si_pid = task_tgid_vnr(current);
3731 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3732}
3733
41c57892
RD
3734/**
3735 * sys_kill - send a signal to a process
3736 * @pid: the PID of the process
3737 * @sig: signal to be sent
3738 */
17da2bd9 3739SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4 3740{
ae7795bc 3741 struct kernel_siginfo info;
1da177e4 3742
3eb39f47 3743 prepare_kill_siginfo(sig, &info);
1da177e4
LT
3744
3745 return kill_something_info(sig, &info, pid);
3746}
3747
3eb39f47
CB
3748/*
3749 * Verify that the signaler and signalee either are in the same pid namespace
3750 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3751 * namespace.
3752 */
3753static bool access_pidfd_pidns(struct pid *pid)
3754{
3755 struct pid_namespace *active = task_active_pid_ns(current);
3756 struct pid_namespace *p = ns_of_pid(pid);
3757
3758 for (;;) {
3759 if (!p)
3760 return false;
3761 if (p == active)
3762 break;
3763 p = p->parent;
3764 }
3765
3766 return true;
3767}
3768
adc5d875
JH
3769static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo,
3770 siginfo_t __user *info)
3eb39f47
CB
3771{
3772#ifdef CONFIG_COMPAT
3773 /*
3774 * Avoid hooking up compat syscalls and instead handle necessary
3775 * conversions here. Note, this is a stop-gap measure and should not be
3776 * considered a generic solution.
3777 */
3778 if (in_compat_syscall())
3779 return copy_siginfo_from_user32(
3780 kinfo, (struct compat_siginfo __user *)info);
3781#endif
3782 return copy_siginfo_from_user(kinfo, info);
3783}
3784
2151ad1b
CB
3785static struct pid *pidfd_to_pid(const struct file *file)
3786{
3695eae5
CB
3787 struct pid *pid;
3788
3789 pid = pidfd_pid(file);
3790 if (!IS_ERR(pid))
3791 return pid;
2151ad1b
CB
3792
3793 return tgid_pidfd_to_pid(file);
3794}
3795
3eb39f47 3796/**
c732327f
CB
3797 * sys_pidfd_send_signal - Signal a process through a pidfd
3798 * @pidfd: file descriptor of the process
3799 * @sig: signal to send
3800 * @info: signal info
3801 * @flags: future flags
3eb39f47
CB
3802 *
3803 * The syscall currently only signals via PIDTYPE_PID which covers
3804 * kill(<positive-pid>, <signal>. It does not signal threads or process
3805 * groups.
3806 * In order to extend the syscall to threads and process groups the @flags
3807 * argument should be used. In essence, the @flags argument will determine
3808 * what is signaled and not the file descriptor itself. Put in other words,
3809 * grouping is a property of the flags argument not a property of the file
3810 * descriptor.
3811 *
3812 * Return: 0 on success, negative errno on failure
3813 */
3814SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3815 siginfo_t __user *, info, unsigned int, flags)
3816{
3817 int ret;
3818 struct fd f;
3819 struct pid *pid;
3820 kernel_siginfo_t kinfo;
3821
3822 /* Enforce flags be set to 0 until we add an extension. */
3823 if (flags)
3824 return -EINVAL;
3825
738a7832 3826 f = fdget(pidfd);
3eb39f47
CB
3827 if (!f.file)
3828 return -EBADF;
3829
3830 /* Is this a pidfd? */
2151ad1b 3831 pid = pidfd_to_pid(f.file);
3eb39f47
CB
3832 if (IS_ERR(pid)) {
3833 ret = PTR_ERR(pid);
3834 goto err;
3835 }
3836
3837 ret = -EINVAL;
3838 if (!access_pidfd_pidns(pid))
3839 goto err;
3840
3841 if (info) {
3842 ret = copy_siginfo_from_user_any(&kinfo, info);
3843 if (unlikely(ret))
3844 goto err;
3845
3846 ret = -EINVAL;
3847 if (unlikely(sig != kinfo.si_signo))
3848 goto err;
3849
556a888a
JH
3850 /* Only allow sending arbitrary signals to yourself. */
3851 ret = -EPERM;
3eb39f47 3852 if ((task_pid(current) != pid) &&
556a888a
JH
3853 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3854 goto err;
3eb39f47
CB
3855 } else {
3856 prepare_kill_siginfo(sig, &kinfo);
3857 }
3858
3859 ret = kill_pid_info(sig, &kinfo, pid);
3860
3861err:
3862 fdput(f);
3863 return ret;
3864}
3eb39f47 3865
30b4ae8a 3866static int
ae7795bc 3867do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
1da177e4 3868{
1da177e4 3869 struct task_struct *p;
30b4ae8a 3870 int error = -ESRCH;
1da177e4 3871
3547ff3a 3872 rcu_read_lock();
228ebcbe 3873 p = find_task_by_vpid(pid);
b488893a 3874 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 3875 error = check_kill_permission(sig, info, p);
1da177e4
LT
3876 /*
3877 * The null signal is a permissions and process existence
3878 * probe. No signal is actually delivered.
3879 */
4a30debf 3880 if (!error && sig) {
40b3b025 3881 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4a30debf
ON
3882 /*
3883 * If lock_task_sighand() failed we pretend the task
3884 * dies after receiving the signal. The window is tiny,
3885 * and the signal is private anyway.
3886 */
3887 if (unlikely(error == -ESRCH))
3888 error = 0;
1da177e4
LT
3889 }
3890 }
3547ff3a 3891 rcu_read_unlock();
6dd69f10 3892
1da177e4
LT
3893 return error;
3894}
3895
30b4ae8a
TG
3896static int do_tkill(pid_t tgid, pid_t pid, int sig)
3897{
ae7795bc 3898 struct kernel_siginfo info;
30b4ae8a 3899
5f74972c 3900 clear_siginfo(&info);
30b4ae8a
TG
3901 info.si_signo = sig;
3902 info.si_errno = 0;
3903 info.si_code = SI_TKILL;
3904 info.si_pid = task_tgid_vnr(current);
078de5f7 3905 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
30b4ae8a
TG
3906
3907 return do_send_specific(tgid, pid, sig, &info);
3908}
3909
6dd69f10
VL
3910/**
3911 * sys_tgkill - send signal to one specific thread
3912 * @tgid: the thread group ID of the thread
3913 * @pid: the PID of the thread
3914 * @sig: signal to be sent
3915 *
72fd4a35 3916 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
3917 * exists but it's not belonging to the target process anymore. This
3918 * method solves the problem of threads exiting and PIDs getting reused.
3919 */
a5f8fa9e 3920SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
3921{
3922 /* This is only valid for single tasks */
3923 if (pid <= 0 || tgid <= 0)
3924 return -EINVAL;
3925
3926 return do_tkill(tgid, pid, sig);
3927}
3928
41c57892
RD
3929/**
3930 * sys_tkill - send signal to one specific task
3931 * @pid: the PID of the task
3932 * @sig: signal to be sent
3933 *
1da177e4
LT
3934 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3935 */
a5f8fa9e 3936SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 3937{
1da177e4
LT
3938 /* This is only valid for single tasks */
3939 if (pid <= 0)
3940 return -EINVAL;
3941
6dd69f10 3942 return do_tkill(0, pid, sig);
1da177e4
LT
3943}
3944
ae7795bc 3945static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
75907d4d
AV
3946{
3947 /* Not even root can pretend to send signals from the kernel.
3948 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3949 */
66dd34ad 3950 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
69828dce 3951 (task_pid_vnr(current) != pid))
75907d4d 3952 return -EPERM;
69828dce 3953
75907d4d
AV
3954 /* POSIX.1b doesn't mention process groups. */
3955 return kill_proc_info(sig, info, pid);
3956}
3957
41c57892
RD
3958/**
3959 * sys_rt_sigqueueinfo - send signal information to a signal
3960 * @pid: the PID of the thread
3961 * @sig: signal to be sent
3962 * @uinfo: signal info to be sent
3963 */
a5f8fa9e
HC
3964SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3965 siginfo_t __user *, uinfo)
1da177e4 3966{
ae7795bc 3967 kernel_siginfo_t info;
601d5abf 3968 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
3969 if (unlikely(ret))
3970 return ret;
75907d4d
AV
3971 return do_rt_sigqueueinfo(pid, sig, &info);
3972}
1da177e4 3973
75907d4d 3974#ifdef CONFIG_COMPAT
75907d4d
AV
3975COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3976 compat_pid_t, pid,
3977 int, sig,
3978 struct compat_siginfo __user *, uinfo)
3979{
ae7795bc 3980 kernel_siginfo_t info;
601d5abf 3981 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
75907d4d
AV
3982 if (unlikely(ret))
3983 return ret;
3984 return do_rt_sigqueueinfo(pid, sig, &info);
1da177e4 3985}
75907d4d 3986#endif
1da177e4 3987
ae7795bc 3988static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
62ab4505
TG
3989{
3990 /* This is only valid for single tasks */
3991 if (pid <= 0 || tgid <= 0)
3992 return -EINVAL;
3993
3994 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
3995 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3996 */
69828dce
VD
3997 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3998 (task_pid_vnr(current) != pid))
62ab4505 3999 return -EPERM;
69828dce 4000
62ab4505
TG
4001 return do_send_specific(tgid, pid, sig, info);
4002}
4003
4004SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
4005 siginfo_t __user *, uinfo)
4006{
ae7795bc 4007 kernel_siginfo_t info;
601d5abf 4008 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
4009 if (unlikely(ret))
4010 return ret;
62ab4505
TG
4011 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4012}
4013
9aae8fc0
AV
4014#ifdef CONFIG_COMPAT
4015COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
4016 compat_pid_t, tgid,
4017 compat_pid_t, pid,
4018 int, sig,
4019 struct compat_siginfo __user *, uinfo)
4020{
ae7795bc 4021 kernel_siginfo_t info;
601d5abf 4022 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4cd2e0e7
EB
4023 if (unlikely(ret))
4024 return ret;
9aae8fc0
AV
4025 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
4026}
4027#endif
4028
0341729b 4029/*
b4e74264 4030 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
0341729b 4031 */
b4e74264 4032void kernel_sigaction(int sig, __sighandler_t action)
0341729b 4033{
ec5955b8 4034 spin_lock_irq(&current->sighand->siglock);
b4e74264
ON
4035 current->sighand->action[sig - 1].sa.sa_handler = action;
4036 if (action == SIG_IGN) {
4037 sigset_t mask;
0341729b 4038
b4e74264
ON
4039 sigemptyset(&mask);
4040 sigaddset(&mask, sig);
580d34e4 4041
b4e74264
ON
4042 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
4043 flush_sigqueue_mask(&mask, &current->pending);
4044 recalc_sigpending();
4045 }
0341729b
ON
4046 spin_unlock_irq(&current->sighand->siglock);
4047}
b4e74264 4048EXPORT_SYMBOL(kernel_sigaction);
0341729b 4049
68463510
DS
4050void __weak sigaction_compat_abi(struct k_sigaction *act,
4051 struct k_sigaction *oact)
4052{
4053}
4054
88531f72 4055int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 4056{
afe2b038 4057 struct task_struct *p = current, *t;
1da177e4 4058 struct k_sigaction *k;
71fabd5e 4059 sigset_t mask;
1da177e4 4060
7ed20e1a 4061 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
4062 return -EINVAL;
4063
afe2b038 4064 k = &p->sighand->action[sig-1];
1da177e4 4065
afe2b038 4066 spin_lock_irq(&p->sighand->siglock);
1da177e4
LT
4067 if (oact)
4068 *oact = *k;
4069
a54f0dfd
PC
4070 /*
4071 * Make sure that we never accidentally claim to support SA_UNSUPPORTED,
4072 * e.g. by having an architecture use the bit in their uapi.
4073 */
4074 BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED);
4075
23acdc76
PC
4076 /*
4077 * Clear unknown flag bits in order to allow userspace to detect missing
4078 * support for flag bits and to allow the kernel to use non-uapi bits
4079 * internally.
4080 */
4081 if (act)
4082 act->sa.sa_flags &= UAPI_SA_FLAGS;
4083 if (oact)
4084 oact->sa.sa_flags &= UAPI_SA_FLAGS;
4085
68463510
DS
4086 sigaction_compat_abi(act, oact);
4087
1da177e4 4088 if (act) {
9ac95f2f
ON
4089 sigdelsetmask(&act->sa.sa_mask,
4090 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 4091 *k = *act;
1da177e4
LT
4092 /*
4093 * POSIX 3.3.1.3:
4094 * "Setting a signal action to SIG_IGN for a signal that is
4095 * pending shall cause the pending signal to be discarded,
4096 * whether or not it is blocked."
4097 *
4098 * "Setting a signal action to SIG_DFL for a signal that is
4099 * pending and whose default action is to ignore the signal
4100 * (for example, SIGCHLD), shall cause the pending signal to
4101 * be discarded, whether or not it is blocked"
4102 */
afe2b038 4103 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
71fabd5e
GA
4104 sigemptyset(&mask);
4105 sigaddset(&mask, sig);
afe2b038
ON
4106 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
4107 for_each_thread(p, t)
c09c1441 4108 flush_sigqueue_mask(&mask, &t->pending);
1da177e4 4109 }
1da177e4
LT
4110 }
4111
afe2b038 4112 spin_unlock_irq(&p->sighand->siglock);
1da177e4
LT
4113 return 0;
4114}
4115
c09c1441 4116static int
22839869
WD
4117do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
4118 size_t min_ss_size)
1da177e4 4119{
bcfe8ad8 4120 struct task_struct *t = current;
1da177e4 4121
bcfe8ad8
AV
4122 if (oss) {
4123 memset(oss, 0, sizeof(stack_t));
4124 oss->ss_sp = (void __user *) t->sas_ss_sp;
4125 oss->ss_size = t->sas_ss_size;
4126 oss->ss_flags = sas_ss_flags(sp) |
4127 (current->sas_ss_flags & SS_FLAG_BITS);
4128 }
1da177e4 4129
bcfe8ad8
AV
4130 if (ss) {
4131 void __user *ss_sp = ss->ss_sp;
4132 size_t ss_size = ss->ss_size;
4133 unsigned ss_flags = ss->ss_flags;
407bc16a 4134 int ss_mode;
1da177e4 4135
bcfe8ad8
AV
4136 if (unlikely(on_sig_stack(sp)))
4137 return -EPERM;
1da177e4 4138
407bc16a 4139 ss_mode = ss_flags & ~SS_FLAG_BITS;
bcfe8ad8
AV
4140 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
4141 ss_mode != 0))
4142 return -EINVAL;
1da177e4 4143
407bc16a 4144 if (ss_mode == SS_DISABLE) {
1da177e4
LT
4145 ss_size = 0;
4146 ss_sp = NULL;
4147 } else {
22839869 4148 if (unlikely(ss_size < min_ss_size))
bcfe8ad8 4149 return -ENOMEM;
1da177e4
LT
4150 }
4151
bcfe8ad8
AV
4152 t->sas_ss_sp = (unsigned long) ss_sp;
4153 t->sas_ss_size = ss_size;
4154 t->sas_ss_flags = ss_flags;
1da177e4 4155 }
bcfe8ad8 4156 return 0;
1da177e4 4157}
bcfe8ad8 4158
6bf9adfc
AV
4159SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4160{
bcfe8ad8
AV
4161 stack_t new, old;
4162 int err;
4163 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4164 return -EFAULT;
4165 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
22839869
WD
4166 current_user_stack_pointer(),
4167 MINSIGSTKSZ);
bcfe8ad8
AV
4168 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4169 err = -EFAULT;
4170 return err;
6bf9adfc 4171}
1da177e4 4172
5c49574f
AV
4173int restore_altstack(const stack_t __user *uss)
4174{
bcfe8ad8
AV
4175 stack_t new;
4176 if (copy_from_user(&new, uss, sizeof(stack_t)))
4177 return -EFAULT;
22839869
WD
4178 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4179 MINSIGSTKSZ);
5c49574f 4180 /* squash all but EFAULT for now */
bcfe8ad8 4181 return 0;
5c49574f
AV
4182}
4183
c40702c4
AV
4184int __save_altstack(stack_t __user *uss, unsigned long sp)
4185{
4186 struct task_struct *t = current;
2a742138
SS
4187 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4188 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4189 __put_user(t->sas_ss_size, &uss->ss_size);
2a742138
SS
4190 if (err)
4191 return err;
4192 if (t->sas_ss_flags & SS_AUTODISARM)
4193 sas_ss_reset(t);
4194 return 0;
c40702c4
AV
4195}
4196
90268439 4197#ifdef CONFIG_COMPAT
6203deb0
DB
4198static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4199 compat_stack_t __user *uoss_ptr)
90268439
AV
4200{
4201 stack_t uss, uoss;
4202 int ret;
90268439
AV
4203
4204 if (uss_ptr) {
4205 compat_stack_t uss32;
90268439
AV
4206 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4207 return -EFAULT;
4208 uss.ss_sp = compat_ptr(uss32.ss_sp);
4209 uss.ss_flags = uss32.ss_flags;
4210 uss.ss_size = uss32.ss_size;
4211 }
bcfe8ad8 4212 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
22839869
WD
4213 compat_user_stack_pointer(),
4214 COMPAT_MINSIGSTKSZ);
90268439 4215 if (ret >= 0 && uoss_ptr) {
bcfe8ad8
AV
4216 compat_stack_t old;
4217 memset(&old, 0, sizeof(old));
4218 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4219 old.ss_flags = uoss.ss_flags;
4220 old.ss_size = uoss.ss_size;
4221 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
90268439
AV
4222 ret = -EFAULT;
4223 }
4224 return ret;
4225}
4226
6203deb0
DB
4227COMPAT_SYSCALL_DEFINE2(sigaltstack,
4228 const compat_stack_t __user *, uss_ptr,
4229 compat_stack_t __user *, uoss_ptr)
4230{
4231 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4232}
4233
90268439
AV
4234int compat_restore_altstack(const compat_stack_t __user *uss)
4235{
6203deb0 4236 int err = do_compat_sigaltstack(uss, NULL);
90268439
AV
4237 /* squash all but -EFAULT for now */
4238 return err == -EFAULT ? err : 0;
4239}
c40702c4
AV
4240
4241int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4242{
441398d3 4243 int err;
c40702c4 4244 struct task_struct *t = current;
441398d3
SS
4245 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4246 &uss->ss_sp) |
4247 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4248 __put_user(t->sas_ss_size, &uss->ss_size);
441398d3
SS
4249 if (err)
4250 return err;
4251 if (t->sas_ss_flags & SS_AUTODISARM)
4252 sas_ss_reset(t);
4253 return 0;
c40702c4 4254}
90268439 4255#endif
1da177e4
LT
4256
4257#ifdef __ARCH_WANT_SYS_SIGPENDING
4258
41c57892
RD
4259/**
4260 * sys_sigpending - examine pending signals
d53238cd 4261 * @uset: where mask of pending signal is returned
41c57892 4262 */
d53238cd 4263SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
1da177e4 4264{
d53238cd 4265 sigset_t set;
d53238cd
DB
4266
4267 if (sizeof(old_sigset_t) > sizeof(*uset))
4268 return -EINVAL;
4269
b1d294c8
CB
4270 do_sigpending(&set);
4271
4272 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4273 return -EFAULT;
4274
4275 return 0;
1da177e4
LT
4276}
4277
8f13621a
AV
4278#ifdef CONFIG_COMPAT
4279COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4280{
4281 sigset_t set;
b1d294c8
CB
4282
4283 do_sigpending(&set);
4284
4285 return put_user(set.sig[0], set32);
8f13621a
AV
4286}
4287#endif
4288
1da177e4
LT
4289#endif
4290
4291#ifdef __ARCH_WANT_SYS_SIGPROCMASK
41c57892
RD
4292/**
4293 * sys_sigprocmask - examine and change blocked signals
4294 * @how: whether to add, remove, or set signals
b013c399 4295 * @nset: signals to add or remove (if non-null)
41c57892
RD
4296 * @oset: previous value of signal mask if non-null
4297 *
5aba085e
RD
4298 * Some platforms have their own version with special arguments;
4299 * others support only sys_rt_sigprocmask.
4300 */
1da177e4 4301
b013c399 4302SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
b290ebe2 4303 old_sigset_t __user *, oset)
1da177e4 4304{
1da177e4 4305 old_sigset_t old_set, new_set;
2e4f7c77 4306 sigset_t new_blocked;
1da177e4 4307
b013c399 4308 old_set = current->blocked.sig[0];
1da177e4 4309
b013c399
ON
4310 if (nset) {
4311 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4312 return -EFAULT;
1da177e4 4313
2e4f7c77 4314 new_blocked = current->blocked;
1da177e4 4315
1da177e4 4316 switch (how) {
1da177e4 4317 case SIG_BLOCK:
2e4f7c77 4318 sigaddsetmask(&new_blocked, new_set);
1da177e4
LT
4319 break;
4320 case SIG_UNBLOCK:
2e4f7c77 4321 sigdelsetmask(&new_blocked, new_set);
1da177e4
LT
4322 break;
4323 case SIG_SETMASK:
2e4f7c77 4324 new_blocked.sig[0] = new_set;
1da177e4 4325 break;
2e4f7c77
ON
4326 default:
4327 return -EINVAL;
1da177e4
LT
4328 }
4329
0c4a8423 4330 set_current_blocked(&new_blocked);
b013c399
ON
4331 }
4332
4333 if (oset) {
1da177e4 4334 if (copy_to_user(oset, &old_set, sizeof(*oset)))
b013c399 4335 return -EFAULT;
1da177e4 4336 }
b013c399
ON
4337
4338 return 0;
1da177e4
LT
4339}
4340#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4341
eaca6eae 4342#ifndef CONFIG_ODD_RT_SIGACTION
41c57892
RD
4343/**
4344 * sys_rt_sigaction - alter an action taken by a process
4345 * @sig: signal to be sent
f9fa0bc1
RD
4346 * @act: new sigaction
4347 * @oact: used to save the previous sigaction
41c57892
RD
4348 * @sigsetsize: size of sigset_t type
4349 */
d4e82042
HC
4350SYSCALL_DEFINE4(rt_sigaction, int, sig,
4351 const struct sigaction __user *, act,
4352 struct sigaction __user *, oact,
4353 size_t, sigsetsize)
1da177e4
LT
4354{
4355 struct k_sigaction new_sa, old_sa;
d8f993b3 4356 int ret;
1da177e4
LT
4357
4358 /* XXX: Don't preclude handling different sized sigset_t's. */
4359 if (sigsetsize != sizeof(sigset_t))
d8f993b3 4360 return -EINVAL;
1da177e4 4361
d8f993b3
CB
4362 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4363 return -EFAULT;
1da177e4
LT
4364
4365 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
d8f993b3
CB
4366 if (ret)
4367 return ret;
1da177e4 4368
d8f993b3
CB
4369 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4370 return -EFAULT;
4371
4372 return 0;
1da177e4 4373}
08d32fe5 4374#ifdef CONFIG_COMPAT
08d32fe5
AV
4375COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4376 const struct compat_sigaction __user *, act,
4377 struct compat_sigaction __user *, oact,
4378 compat_size_t, sigsetsize)
4379{
4380 struct k_sigaction new_ka, old_ka;
08d32fe5
AV
4381#ifdef __ARCH_HAS_SA_RESTORER
4382 compat_uptr_t restorer;
4383#endif
4384 int ret;
4385
4386 /* XXX: Don't preclude handling different sized sigset_t's. */
4387 if (sigsetsize != sizeof(compat_sigset_t))
4388 return -EINVAL;
4389
4390 if (act) {
4391 compat_uptr_t handler;
4392 ret = get_user(handler, &act->sa_handler);
4393 new_ka.sa.sa_handler = compat_ptr(handler);
4394#ifdef __ARCH_HAS_SA_RESTORER
4395 ret |= get_user(restorer, &act->sa_restorer);
4396 new_ka.sa.sa_restorer = compat_ptr(restorer);
4397#endif
3968cf62 4398 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3ddc5b46 4399 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
08d32fe5
AV
4400 if (ret)
4401 return -EFAULT;
08d32fe5
AV
4402 }
4403
4404 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4405 if (!ret && oact) {
08d32fe5
AV
4406 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4407 &oact->sa_handler);
f454322e
DL
4408 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4409 sizeof(oact->sa_mask));
3ddc5b46 4410 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
08d32fe5
AV
4411#ifdef __ARCH_HAS_SA_RESTORER
4412 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4413 &oact->sa_restorer);
4414#endif
4415 }
4416 return ret;
4417}
4418#endif
eaca6eae 4419#endif /* !CONFIG_ODD_RT_SIGACTION */
1da177e4 4420
495dfbf7
AV
4421#ifdef CONFIG_OLD_SIGACTION
4422SYSCALL_DEFINE3(sigaction, int, sig,
4423 const struct old_sigaction __user *, act,
4424 struct old_sigaction __user *, oact)
4425{
4426 struct k_sigaction new_ka, old_ka;
4427 int ret;
4428
4429 if (act) {
4430 old_sigset_t mask;
96d4f267 4431 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4432 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4433 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4434 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4435 __get_user(mask, &act->sa_mask))
4436 return -EFAULT;
4437#ifdef __ARCH_HAS_KA_RESTORER
4438 new_ka.ka_restorer = NULL;
4439#endif
4440 siginitset(&new_ka.sa.sa_mask, mask);
4441 }
4442
4443 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4444
4445 if (!ret && oact) {
96d4f267 4446 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4447 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4448 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4449 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4450 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4451 return -EFAULT;
4452 }
4453
4454 return ret;
4455}
4456#endif
4457#ifdef CONFIG_COMPAT_OLD_SIGACTION
4458COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4459 const struct compat_old_sigaction __user *, act,
4460 struct compat_old_sigaction __user *, oact)
4461{
4462 struct k_sigaction new_ka, old_ka;
4463 int ret;
4464 compat_old_sigset_t mask;
4465 compat_uptr_t handler, restorer;
4466
4467 if (act) {
96d4f267 4468 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4469 __get_user(handler, &act->sa_handler) ||
4470 __get_user(restorer, &act->sa_restorer) ||
4471 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4472 __get_user(mask, &act->sa_mask))
4473 return -EFAULT;
4474
4475#ifdef __ARCH_HAS_KA_RESTORER
4476 new_ka.ka_restorer = NULL;
4477#endif
4478 new_ka.sa.sa_handler = compat_ptr(handler);
4479 new_ka.sa.sa_restorer = compat_ptr(restorer);
4480 siginitset(&new_ka.sa.sa_mask, mask);
4481 }
4482
4483 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4484
4485 if (!ret && oact) {
96d4f267 4486 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4487 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4488 &oact->sa_handler) ||
4489 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4490 &oact->sa_restorer) ||
4491 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4492 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4493 return -EFAULT;
4494 }
4495 return ret;
4496}
4497#endif
1da177e4 4498
f6187769 4499#ifdef CONFIG_SGETMASK_SYSCALL
1da177e4
LT
4500
4501/*
4502 * For backwards compatibility. Functionality superseded by sigprocmask.
4503 */
a5f8fa9e 4504SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
4505{
4506 /* SMP safe */
4507 return current->blocked.sig[0];
4508}
4509
a5f8fa9e 4510SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4 4511{
c1095c6d
ON
4512 int old = current->blocked.sig[0];
4513 sigset_t newset;
1da177e4 4514
5ba53ff6 4515 siginitset(&newset, newmask);
c1095c6d 4516 set_current_blocked(&newset);
1da177e4
LT
4517
4518 return old;
4519}
f6187769 4520#endif /* CONFIG_SGETMASK_SYSCALL */
1da177e4
LT
4521
4522#ifdef __ARCH_WANT_SYS_SIGNAL
4523/*
4524 * For backwards compatibility. Functionality superseded by sigaction.
4525 */
a5f8fa9e 4526SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
4527{
4528 struct k_sigaction new_sa, old_sa;
4529 int ret;
4530
4531 new_sa.sa.sa_handler = handler;
4532 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 4533 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
4534
4535 ret = do_sigaction(sig, &new_sa, &old_sa);
4536
4537 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4538}
4539#endif /* __ARCH_WANT_SYS_SIGNAL */
4540
4541#ifdef __ARCH_WANT_SYS_PAUSE
4542
a5f8fa9e 4543SYSCALL_DEFINE0(pause)
1da177e4 4544{
d92fcf05 4545 while (!signal_pending(current)) {
1df01355 4546 __set_current_state(TASK_INTERRUPTIBLE);
d92fcf05
ON
4547 schedule();
4548 }
1da177e4
LT
4549 return -ERESTARTNOHAND;
4550}
4551
4552#endif
4553
9d8a7652 4554static int sigsuspend(sigset_t *set)
68f3f16d 4555{
68f3f16d
AV
4556 current->saved_sigmask = current->blocked;
4557 set_current_blocked(set);
4558
823dd322
SL
4559 while (!signal_pending(current)) {
4560 __set_current_state(TASK_INTERRUPTIBLE);
4561 schedule();
4562 }
68f3f16d
AV
4563 set_restore_sigmask();
4564 return -ERESTARTNOHAND;
4565}
68f3f16d 4566
41c57892
RD
4567/**
4568 * sys_rt_sigsuspend - replace the signal mask for a value with the
4569 * @unewset value until a signal is received
4570 * @unewset: new signal mask value
4571 * @sigsetsize: size of sigset_t type
4572 */
d4e82042 4573SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
4574{
4575 sigset_t newset;
4576
4577 /* XXX: Don't preclude handling different sized sigset_t's. */
4578 if (sigsetsize != sizeof(sigset_t))
4579 return -EINVAL;
4580
4581 if (copy_from_user(&newset, unewset, sizeof(newset)))
4582 return -EFAULT;
68f3f16d 4583 return sigsuspend(&newset);
150256d8 4584}
ad4b65a4
AV
4585
4586#ifdef CONFIG_COMPAT
4587COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4588{
ad4b65a4 4589 sigset_t newset;
ad4b65a4
AV
4590
4591 /* XXX: Don't preclude handling different sized sigset_t's. */
4592 if (sigsetsize != sizeof(sigset_t))
4593 return -EINVAL;
4594
3968cf62 4595 if (get_compat_sigset(&newset, unewset))
ad4b65a4 4596 return -EFAULT;
ad4b65a4 4597 return sigsuspend(&newset);
ad4b65a4
AV
4598}
4599#endif
150256d8 4600
0a0e8cdf
AV
4601#ifdef CONFIG_OLD_SIGSUSPEND
4602SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4603{
4604 sigset_t blocked;
4605 siginitset(&blocked, mask);
4606 return sigsuspend(&blocked);
4607}
4608#endif
4609#ifdef CONFIG_OLD_SIGSUSPEND3
4610SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4611{
4612 sigset_t blocked;
4613 siginitset(&blocked, mask);
4614 return sigsuspend(&blocked);
4615}
4616#endif
150256d8 4617
52f5684c 4618__weak const char *arch_vma_name(struct vm_area_struct *vma)
f269fdd1
DH
4619{
4620 return NULL;
4621}
4622
ae7795bc 4623static inline void siginfo_buildtime_checks(void)
1da177e4 4624{
aba1be2f 4625 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
41b27154 4626
ae7795bc
EB
4627 /* Verify the offsets in the two siginfos match */
4628#define CHECK_OFFSET(field) \
4629 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4630
4631 /* kill */
4632 CHECK_OFFSET(si_pid);
4633 CHECK_OFFSET(si_uid);
4634
4635 /* timer */
4636 CHECK_OFFSET(si_tid);
4637 CHECK_OFFSET(si_overrun);
4638 CHECK_OFFSET(si_value);
4639
4640 /* rt */
4641 CHECK_OFFSET(si_pid);
4642 CHECK_OFFSET(si_uid);
4643 CHECK_OFFSET(si_value);
4644
4645 /* sigchld */
4646 CHECK_OFFSET(si_pid);
4647 CHECK_OFFSET(si_uid);
4648 CHECK_OFFSET(si_status);
4649 CHECK_OFFSET(si_utime);
4650 CHECK_OFFSET(si_stime);
4651
4652 /* sigfault */
4653 CHECK_OFFSET(si_addr);
add0b32e 4654 CHECK_OFFSET(si_trapno);
ae7795bc
EB
4655 CHECK_OFFSET(si_addr_lsb);
4656 CHECK_OFFSET(si_lower);
4657 CHECK_OFFSET(si_upper);
4658 CHECK_OFFSET(si_pkey);
0683b531
EB
4659 CHECK_OFFSET(si_perf_data);
4660 CHECK_OFFSET(si_perf_type);
ae7795bc
EB
4661
4662 /* sigpoll */
4663 CHECK_OFFSET(si_band);
4664 CHECK_OFFSET(si_fd);
4665
4666 /* sigsys */
4667 CHECK_OFFSET(si_call_addr);
4668 CHECK_OFFSET(si_syscall);
4669 CHECK_OFFSET(si_arch);
4670#undef CHECK_OFFSET
70f1b0d3
EB
4671
4672 /* usb asyncio */
4673 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4674 offsetof(struct siginfo, si_addr));
4675 if (sizeof(int) == sizeof(void __user *)) {
4676 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4677 sizeof(void __user *));
4678 } else {
4679 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4680 sizeof_field(struct siginfo, si_uid)) !=
4681 sizeof(void __user *));
4682 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4683 offsetof(struct siginfo, si_uid));
4684 }
4685#ifdef CONFIG_COMPAT
4686 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4687 offsetof(struct compat_siginfo, si_addr));
4688 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4689 sizeof(compat_uptr_t));
4690 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4691 sizeof_field(struct siginfo, si_pid));
4692#endif
ae7795bc
EB
4693}
4694
4695void __init signals_init(void)
4696{
4697 siginfo_buildtime_checks();
4698
0a31bd5f 4699 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 4700}
67fc4e0c
JW
4701
4702#ifdef CONFIG_KGDB_KDB
4703#include <linux/kdb.h>
4704/*
0b44bf9a 4705 * kdb_send_sig - Allows kdb to send signals without exposing
67fc4e0c
JW
4706 * signal internals. This function checks if the required locks are
4707 * available before calling the main signal code, to avoid kdb
4708 * deadlocks.
4709 */
0b44bf9a 4710void kdb_send_sig(struct task_struct *t, int sig)
67fc4e0c
JW
4711{
4712 static struct task_struct *kdb_prev_t;
0b44bf9a 4713 int new_t, ret;
67fc4e0c
JW
4714 if (!spin_trylock(&t->sighand->siglock)) {
4715 kdb_printf("Can't do kill command now.\n"
4716 "The sigmask lock is held somewhere else in "
4717 "kernel, try again later\n");
4718 return;
4719 }
67fc4e0c
JW
4720 new_t = kdb_prev_t != t;
4721 kdb_prev_t = t;
4722 if (t->state != TASK_RUNNING && new_t) {
0b44bf9a 4723 spin_unlock(&t->sighand->siglock);
67fc4e0c
JW
4724 kdb_printf("Process is not RUNNING, sending a signal from "
4725 "kdb risks deadlock\n"
4726 "on the run queue locks. "
4727 "The signal has _not_ been sent.\n"
4728 "Reissue the kill command if you want to risk "
4729 "the deadlock.\n");
4730 return;
4731 }
b213984b 4732 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
0b44bf9a
EB
4733 spin_unlock(&t->sighand->siglock);
4734 if (ret)
67fc4e0c
JW
4735 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4736 sig, t->pid);
4737 else
4738 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4739}
4740#endif /* CONFIG_KGDB_KDB */