signal: Remove task parameter from force_sigsegv
[linux-2.6-block.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4 13#include <linux/slab.h>
9984de1a 14#include <linux/export.h>
1da177e4 15#include <linux/init.h>
589ee628 16#include <linux/sched/mm.h>
8703e8a4 17#include <linux/sched/user.h>
b17b0153 18#include <linux/sched/debug.h>
29930025 19#include <linux/sched/task.h>
68db0cf1 20#include <linux/sched/task_stack.h>
32ef5517 21#include <linux/sched/cputime.h>
3eb39f47 22#include <linux/file.h>
1da177e4 23#include <linux/fs.h>
3eb39f47 24#include <linux/proc_fs.h>
1da177e4
LT
25#include <linux/tty.h>
26#include <linux/binfmts.h>
179899fd 27#include <linux/coredump.h>
1da177e4
LT
28#include <linux/security.h>
29#include <linux/syscalls.h>
30#include <linux/ptrace.h>
7ed20e1a 31#include <linux/signal.h>
fba2afaa 32#include <linux/signalfd.h>
f84d49b2 33#include <linux/ratelimit.h>
35de254d 34#include <linux/tracehook.h>
c59ede7b 35#include <linux/capability.h>
7dfb7103 36#include <linux/freezer.h>
84d73786
SB
37#include <linux/pid_namespace.h>
38#include <linux/nsproxy.h>
6b550f94 39#include <linux/user_namespace.h>
0326f5a9 40#include <linux/uprobes.h>
90268439 41#include <linux/compat.h>
2b5faa4c 42#include <linux/cn_proc.h>
52f5684c 43#include <linux/compiler.h>
31ea70e0 44#include <linux/posix-timers.h>
43347d56 45#include <linux/livepatch.h>
76f969e8 46#include <linux/cgroup.h>
52f5684c 47
d1eb650f
MH
48#define CREATE_TRACE_POINTS
49#include <trace/events/signal.h>
84d73786 50
1da177e4 51#include <asm/param.h>
7c0f6ba6 52#include <linux/uaccess.h>
1da177e4
LT
53#include <asm/unistd.h>
54#include <asm/siginfo.h>
d550bbd4 55#include <asm/cacheflush.h>
e1396065 56#include "audit.h" /* audit_signal_info() */
1da177e4
LT
57
58/*
59 * SLAB caches for signal bits.
60 */
61
e18b890b 62static struct kmem_cache *sigqueue_cachep;
1da177e4 63
f84d49b2
NO
64int print_fatal_signals __read_mostly;
65
35de254d 66static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 67{
35de254d
RM
68 return t->sighand->action[sig - 1].sa.sa_handler;
69}
93585eea 70
e4a8b4ef 71static inline bool sig_handler_ignored(void __user *handler, int sig)
35de254d 72{
93585eea 73 /* Is it explicitly or implicitly ignored? */
93585eea 74 return handler == SIG_IGN ||
e4a8b4ef 75 (handler == SIG_DFL && sig_kernel_ignore(sig));
93585eea 76}
1da177e4 77
41aaa481 78static bool sig_task_ignored(struct task_struct *t, int sig, bool force)
1da177e4 79{
35de254d 80 void __user *handler;
1da177e4 81
f008faff
ON
82 handler = sig_handler(t, sig);
83
86989c41
EB
84 /* SIGKILL and SIGSTOP may not be sent to the global init */
85 if (unlikely(is_global_init(t) && sig_kernel_only(sig)))
86 return true;
87
f008faff 88 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
ac253850 89 handler == SIG_DFL && !(force && sig_kernel_only(sig)))
41aaa481 90 return true;
f008faff
ON
91
92 return sig_handler_ignored(handler, sig);
93}
94
6a0cdcd7 95static bool sig_ignored(struct task_struct *t, int sig, bool force)
f008faff 96{
1da177e4
LT
97 /*
98 * Blocked signals are never ignored, since the
99 * signal handler may change by the time it is
100 * unblocked.
101 */
325d22df 102 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
6a0cdcd7 103 return false;
1da177e4 104
35de254d 105 /*
628c1bcb
ON
106 * Tracers may want to know about even ignored signal unless it
107 * is SIGKILL which can't be reported anyway but can be ignored
108 * by SIGNAL_UNKILLABLE task.
35de254d 109 */
628c1bcb 110 if (t->ptrace && sig != SIGKILL)
6a0cdcd7 111 return false;
628c1bcb
ON
112
113 return sig_task_ignored(t, sig, force);
1da177e4
LT
114}
115
116/*
117 * Re-calculate pending state from the set of locally pending
118 * signals, globally pending signals, and blocked signals.
119 */
938696a8 120static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked)
1da177e4
LT
121{
122 unsigned long ready;
123 long i;
124
125 switch (_NSIG_WORDS) {
126 default:
127 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
128 ready |= signal->sig[i] &~ blocked->sig[i];
129 break;
130
131 case 4: ready = signal->sig[3] &~ blocked->sig[3];
132 ready |= signal->sig[2] &~ blocked->sig[2];
133 ready |= signal->sig[1] &~ blocked->sig[1];
134 ready |= signal->sig[0] &~ blocked->sig[0];
135 break;
136
137 case 2: ready = signal->sig[1] &~ blocked->sig[1];
138 ready |= signal->sig[0] &~ blocked->sig[0];
139 break;
140
141 case 1: ready = signal->sig[0] &~ blocked->sig[0];
142 }
143 return ready != 0;
144}
145
146#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
147
09ae854e 148static bool recalc_sigpending_tsk(struct task_struct *t)
1da177e4 149{
76f969e8 150 if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) ||
1da177e4 151 PENDING(&t->pending, &t->blocked) ||
76f969e8
RG
152 PENDING(&t->signal->shared_pending, &t->blocked) ||
153 cgroup_task_frozen(t)) {
1da177e4 154 set_tsk_thread_flag(t, TIF_SIGPENDING);
09ae854e 155 return true;
7bb44ade 156 }
09ae854e 157
b74d0deb
RM
158 /*
159 * We must never clear the flag in another thread, or in current
160 * when it's possible the current syscall is returning -ERESTART*.
161 * So we don't clear it here, and only callers who know they should do.
162 */
09ae854e 163 return false;
7bb44ade
RM
164}
165
166/*
167 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
168 * This is superfluous when called on current, the wakeup is a harmless no-op.
169 */
170void recalc_sigpending_and_wake(struct task_struct *t)
171{
172 if (recalc_sigpending_tsk(t))
173 signal_wake_up(t, 0);
1da177e4
LT
174}
175
176void recalc_sigpending(void)
177{
43347d56
MB
178 if (!recalc_sigpending_tsk(current) && !freezing(current) &&
179 !klp_patch_pending(current))
b74d0deb
RM
180 clear_thread_flag(TIF_SIGPENDING);
181
1da177e4 182}
fb50f5a4 183EXPORT_SYMBOL(recalc_sigpending);
1da177e4 184
088fe47c
EB
185void calculate_sigpending(void)
186{
187 /* Have any signals or users of TIF_SIGPENDING been delayed
188 * until after fork?
189 */
190 spin_lock_irq(&current->sighand->siglock);
191 set_tsk_thread_flag(current, TIF_SIGPENDING);
192 recalc_sigpending();
193 spin_unlock_irq(&current->sighand->siglock);
194}
195
1da177e4
LT
196/* Given the mask, find the first available signal that should be serviced. */
197
a27341cd
LT
198#define SYNCHRONOUS_MASK \
199 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
a0727e8c 200 sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS))
a27341cd 201
fba2afaa 202int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
203{
204 unsigned long i, *s, *m, x;
205 int sig = 0;
f84d49b2 206
1da177e4
LT
207 s = pending->signal.sig;
208 m = mask->sig;
a27341cd
LT
209
210 /*
211 * Handle the first word specially: it contains the
212 * synchronous signals that need to be dequeued first.
213 */
214 x = *s &~ *m;
215 if (x) {
216 if (x & SYNCHRONOUS_MASK)
217 x &= SYNCHRONOUS_MASK;
218 sig = ffz(~x) + 1;
219 return sig;
220 }
221
1da177e4
LT
222 switch (_NSIG_WORDS) {
223 default:
a27341cd
LT
224 for (i = 1; i < _NSIG_WORDS; ++i) {
225 x = *++s &~ *++m;
226 if (!x)
227 continue;
228 sig = ffz(~x) + i*_NSIG_BPW + 1;
229 break;
230 }
1da177e4
LT
231 break;
232
a27341cd
LT
233 case 2:
234 x = s[1] &~ m[1];
235 if (!x)
1da177e4 236 break;
a27341cd 237 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
238 break;
239
a27341cd
LT
240 case 1:
241 /* Nothing to do */
1da177e4
LT
242 break;
243 }
f84d49b2 244
1da177e4
LT
245 return sig;
246}
247
f84d49b2
NO
248static inline void print_dropped_signal(int sig)
249{
250 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
251
252 if (!print_fatal_signals)
253 return;
254
255 if (!__ratelimit(&ratelimit_state))
256 return;
257
747800ef 258 pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
f84d49b2
NO
259 current->comm, current->pid, sig);
260}
261
d79fdd6d 262/**
7dd3db54 263 * task_set_jobctl_pending - set jobctl pending bits
d79fdd6d 264 * @task: target task
7dd3db54 265 * @mask: pending bits to set
d79fdd6d 266 *
7dd3db54
TH
267 * Clear @mask from @task->jobctl. @mask must be subset of
268 * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK |
269 * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is
270 * cleared. If @task is already being killed or exiting, this function
271 * becomes noop.
272 *
273 * CONTEXT:
274 * Must be called with @task->sighand->siglock held.
275 *
276 * RETURNS:
277 * %true if @mask is set, %false if made noop because @task was dying.
278 */
b76808e6 279bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
7dd3db54
TH
280{
281 BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
282 JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
283 BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK));
284
285 if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING)))
286 return false;
287
288 if (mask & JOBCTL_STOP_SIGMASK)
289 task->jobctl &= ~JOBCTL_STOP_SIGMASK;
290
291 task->jobctl |= mask;
292 return true;
293}
294
d79fdd6d 295/**
a8f072c1 296 * task_clear_jobctl_trapping - clear jobctl trapping bit
d79fdd6d
TH
297 * @task: target task
298 *
a8f072c1
TH
299 * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED.
300 * Clear it and wake up the ptracer. Note that we don't need any further
301 * locking. @task->siglock guarantees that @task->parent points to the
302 * ptracer.
d79fdd6d
TH
303 *
304 * CONTEXT:
305 * Must be called with @task->sighand->siglock held.
306 */
73ddff2b 307void task_clear_jobctl_trapping(struct task_struct *task)
d79fdd6d 308{
a8f072c1
TH
309 if (unlikely(task->jobctl & JOBCTL_TRAPPING)) {
310 task->jobctl &= ~JOBCTL_TRAPPING;
650226bd 311 smp_mb(); /* advised by wake_up_bit() */
62c124ff 312 wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT);
d79fdd6d
TH
313 }
314}
315
e5c1902e 316/**
3759a0d9 317 * task_clear_jobctl_pending - clear jobctl pending bits
e5c1902e 318 * @task: target task
3759a0d9 319 * @mask: pending bits to clear
e5c1902e 320 *
3759a0d9
TH
321 * Clear @mask from @task->jobctl. @mask must be subset of
322 * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other
323 * STOP bits are cleared together.
e5c1902e 324 *
6dfca329
TH
325 * If clearing of @mask leaves no stop or trap pending, this function calls
326 * task_clear_jobctl_trapping().
e5c1902e
TH
327 *
328 * CONTEXT:
329 * Must be called with @task->sighand->siglock held.
330 */
b76808e6 331void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
e5c1902e 332{
3759a0d9
TH
333 BUG_ON(mask & ~JOBCTL_PENDING_MASK);
334
335 if (mask & JOBCTL_STOP_PENDING)
336 mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED;
337
338 task->jobctl &= ~mask;
6dfca329
TH
339
340 if (!(task->jobctl & JOBCTL_PENDING_MASK))
341 task_clear_jobctl_trapping(task);
e5c1902e
TH
342}
343
344/**
345 * task_participate_group_stop - participate in a group stop
346 * @task: task participating in a group stop
347 *
a8f072c1 348 * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop.
39efa3ef 349 * Group stop states are cleared and the group stop count is consumed if
a8f072c1 350 * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group
39efa3ef 351 * stop, the appropriate %SIGNAL_* flags are set.
e5c1902e
TH
352 *
353 * CONTEXT:
354 * Must be called with @task->sighand->siglock held.
244056f9
TH
355 *
356 * RETURNS:
357 * %true if group stop completion should be notified to the parent, %false
358 * otherwise.
e5c1902e
TH
359 */
360static bool task_participate_group_stop(struct task_struct *task)
361{
362 struct signal_struct *sig = task->signal;
a8f072c1 363 bool consume = task->jobctl & JOBCTL_STOP_CONSUME;
e5c1902e 364
a8f072c1 365 WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING));
39efa3ef 366
3759a0d9 367 task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING);
e5c1902e
TH
368
369 if (!consume)
370 return false;
371
372 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
373 sig->group_stop_count--;
374
244056f9
TH
375 /*
376 * Tell the caller to notify completion iff we are entering into a
377 * fresh group stop. Read comment in do_signal_stop() for details.
378 */
379 if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
2d39b3cd 380 signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
e5c1902e
TH
381 return true;
382 }
383 return false;
384}
385
924de3b8
EB
386void task_join_group_stop(struct task_struct *task)
387{
388 /* Have the new thread join an on-going signal group stop */
389 unsigned long jobctl = current->jobctl;
390 if (jobctl & JOBCTL_STOP_PENDING) {
391 struct signal_struct *sig = current->signal;
392 unsigned long signr = jobctl & JOBCTL_STOP_SIGMASK;
393 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
394 if (task_set_jobctl_pending(task, signr | gstop)) {
395 sig->group_stop_count++;
396 }
397 }
398}
399
c69e8d9c
DH
400/*
401 * allocate a new signal queue record
402 * - this may be called without locks if and only if t == current, otherwise an
5aba085e 403 * appropriate lock must be held to stop the target task from exiting
c69e8d9c 404 */
f84d49b2
NO
405static struct sigqueue *
406__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
1da177e4
LT
407{
408 struct sigqueue *q = NULL;
10b1fbdb 409 struct user_struct *user;
1da177e4 410
10b1fbdb 411 /*
7cf7db8d
TG
412 * Protect access to @t credentials. This can go away when all
413 * callers hold rcu read lock.
10b1fbdb 414 */
7cf7db8d 415 rcu_read_lock();
d84f4f99 416 user = get_uid(__task_cred(t)->user);
10b1fbdb 417 atomic_inc(&user->sigpending);
7cf7db8d 418 rcu_read_unlock();
f84d49b2 419
1da177e4 420 if (override_rlimit ||
10b1fbdb 421 atomic_read(&user->sigpending) <=
78d7d407 422 task_rlimit(t, RLIMIT_SIGPENDING)) {
1da177e4 423 q = kmem_cache_alloc(sigqueue_cachep, flags);
f84d49b2
NO
424 } else {
425 print_dropped_signal(sig);
426 }
427
1da177e4 428 if (unlikely(q == NULL)) {
10b1fbdb 429 atomic_dec(&user->sigpending);
d84f4f99 430 free_uid(user);
1da177e4
LT
431 } else {
432 INIT_LIST_HEAD(&q->list);
433 q->flags = 0;
d84f4f99 434 q->user = user;
1da177e4 435 }
d84f4f99
DH
436
437 return q;
1da177e4
LT
438}
439
514a01b8 440static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
441{
442 if (q->flags & SIGQUEUE_PREALLOC)
443 return;
444 atomic_dec(&q->user->sigpending);
445 free_uid(q->user);
446 kmem_cache_free(sigqueue_cachep, q);
447}
448
6a14c5c9 449void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
450{
451 struct sigqueue *q;
452
453 sigemptyset(&queue->signal);
454 while (!list_empty(&queue->list)) {
455 q = list_entry(queue->list.next, struct sigqueue , list);
456 list_del_init(&q->list);
457 __sigqueue_free(q);
458 }
459}
460
461/*
9e7c8f8c 462 * Flush all pending signals for this kthread.
1da177e4 463 */
c81addc9 464void flush_signals(struct task_struct *t)
1da177e4
LT
465{
466 unsigned long flags;
467
468 spin_lock_irqsave(&t->sighand->siglock, flags);
9e7c8f8c
ON
469 clear_tsk_thread_flag(t, TIF_SIGPENDING);
470 flush_sigqueue(&t->pending);
471 flush_sigqueue(&t->signal->shared_pending);
1da177e4
LT
472 spin_unlock_irqrestore(&t->sighand->siglock, flags);
473}
fb50f5a4 474EXPORT_SYMBOL(flush_signals);
1da177e4 475
baa73d9e 476#ifdef CONFIG_POSIX_TIMERS
cbaffba1
ON
477static void __flush_itimer_signals(struct sigpending *pending)
478{
479 sigset_t signal, retain;
480 struct sigqueue *q, *n;
481
482 signal = pending->signal;
483 sigemptyset(&retain);
484
485 list_for_each_entry_safe(q, n, &pending->list, list) {
486 int sig = q->info.si_signo;
487
488 if (likely(q->info.si_code != SI_TIMER)) {
489 sigaddset(&retain, sig);
490 } else {
491 sigdelset(&signal, sig);
492 list_del_init(&q->list);
493 __sigqueue_free(q);
494 }
495 }
496
497 sigorsets(&pending->signal, &signal, &retain);
498}
499
500void flush_itimer_signals(void)
501{
502 struct task_struct *tsk = current;
503 unsigned long flags;
504
505 spin_lock_irqsave(&tsk->sighand->siglock, flags);
506 __flush_itimer_signals(&tsk->pending);
507 __flush_itimer_signals(&tsk->signal->shared_pending);
508 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
509}
baa73d9e 510#endif
cbaffba1 511
10ab825b
ON
512void ignore_signals(struct task_struct *t)
513{
514 int i;
515
516 for (i = 0; i < _NSIG; ++i)
517 t->sighand->action[i].sa.sa_handler = SIG_IGN;
518
519 flush_signals(t);
520}
521
1da177e4
LT
522/*
523 * Flush all handlers for a task.
524 */
525
526void
527flush_signal_handlers(struct task_struct *t, int force_default)
528{
529 int i;
530 struct k_sigaction *ka = &t->sighand->action[0];
531 for (i = _NSIG ; i != 0 ; i--) {
532 if (force_default || ka->sa.sa_handler != SIG_IGN)
533 ka->sa.sa_handler = SIG_DFL;
534 ka->sa.sa_flags = 0;
522cff14 535#ifdef __ARCH_HAS_SA_RESTORER
2ca39528
KC
536 ka->sa.sa_restorer = NULL;
537#endif
1da177e4
LT
538 sigemptyset(&ka->sa.sa_mask);
539 ka++;
540 }
541}
542
67a48a24 543bool unhandled_signal(struct task_struct *tsk, int sig)
abd4f750 544{
445a91d2 545 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 546 if (is_global_init(tsk))
67a48a24
CB
547 return true;
548
445a91d2 549 if (handler != SIG_IGN && handler != SIG_DFL)
67a48a24
CB
550 return false;
551
a288eecc
TH
552 /* if ptraced, let the tracer determine */
553 return !tsk->ptrace;
abd4f750
MAS
554}
555
ae7795bc 556static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info,
57db7e4a 557 bool *resched_timer)
1da177e4
LT
558{
559 struct sigqueue *q, *first = NULL;
1da177e4 560
1da177e4
LT
561 /*
562 * Collect the siginfo appropriate to this signal. Check if
563 * there is another siginfo for the same signal.
564 */
565 list_for_each_entry(q, &list->list, list) {
566 if (q->info.si_signo == sig) {
d4434207
ON
567 if (first)
568 goto still_pending;
1da177e4
LT
569 first = q;
570 }
571 }
d4434207
ON
572
573 sigdelset(&list->signal, sig);
574
1da177e4 575 if (first) {
d4434207 576still_pending:
1da177e4
LT
577 list_del_init(&first->list);
578 copy_siginfo(info, &first->info);
57db7e4a
EB
579
580 *resched_timer =
581 (first->flags & SIGQUEUE_PREALLOC) &&
582 (info->si_code == SI_TIMER) &&
583 (info->si_sys_private);
584
1da177e4 585 __sigqueue_free(first);
1da177e4 586 } else {
5aba085e
RD
587 /*
588 * Ok, it wasn't in the queue. This must be
589 * a fast-pathed signal or we must have been
590 * out of queue space. So zero out the info.
1da177e4 591 */
faf1f22b 592 clear_siginfo(info);
1da177e4
LT
593 info->si_signo = sig;
594 info->si_errno = 0;
7486e5d9 595 info->si_code = SI_USER;
1da177e4
LT
596 info->si_pid = 0;
597 info->si_uid = 0;
598 }
1da177e4
LT
599}
600
601static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
ae7795bc 602 kernel_siginfo_t *info, bool *resched_timer)
1da177e4 603{
27d91e07 604 int sig = next_signal(pending, mask);
1da177e4 605
2e01fabe 606 if (sig)
57db7e4a 607 collect_signal(sig, pending, info, resched_timer);
1da177e4
LT
608 return sig;
609}
610
611/*
5aba085e 612 * Dequeue a signal and return the element to the caller, which is
1da177e4
LT
613 * expected to free it.
614 *
615 * All callers have to hold the siglock.
616 */
ae7795bc 617int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
1da177e4 618{
57db7e4a 619 bool resched_timer = false;
c5363d03 620 int signr;
caec4e8d
BH
621
622 /* We only dequeue private signals from ourselves, we don't let
623 * signalfd steal them
624 */
57db7e4a 625 signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
8bfd9a7a 626 if (!signr) {
1da177e4 627 signr = __dequeue_signal(&tsk->signal->shared_pending,
57db7e4a 628 mask, info, &resched_timer);
baa73d9e 629#ifdef CONFIG_POSIX_TIMERS
8bfd9a7a
TG
630 /*
631 * itimer signal ?
632 *
633 * itimers are process shared and we restart periodic
634 * itimers in the signal delivery path to prevent DoS
635 * attacks in the high resolution timer case. This is
5aba085e 636 * compliant with the old way of self-restarting
8bfd9a7a
TG
637 * itimers, as the SIGALRM is a legacy signal and only
638 * queued once. Changing the restart behaviour to
639 * restart the timer in the signal dequeue path is
640 * reducing the timer noise on heavy loaded !highres
641 * systems too.
642 */
643 if (unlikely(signr == SIGALRM)) {
644 struct hrtimer *tmr = &tsk->signal->real_timer;
645
646 if (!hrtimer_is_queued(tmr) &&
2456e855 647 tsk->signal->it_real_incr != 0) {
8bfd9a7a
TG
648 hrtimer_forward(tmr, tmr->base->get_time(),
649 tsk->signal->it_real_incr);
650 hrtimer_restart(tmr);
651 }
652 }
baa73d9e 653#endif
8bfd9a7a 654 }
c5363d03 655
b8fceee1 656 recalc_sigpending();
c5363d03
PE
657 if (!signr)
658 return 0;
659
660 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
661 /*
662 * Set a marker that we have dequeued a stop signal. Our
663 * caller might release the siglock and then the pending
664 * stop signal it is about to process is no longer in the
665 * pending bitmasks, but must still be cleared by a SIGCONT
666 * (and overruled by a SIGKILL). So those cases clear this
667 * shared flag after we've set it. Note that this flag may
668 * remain set after the signal we return is ignored or
669 * handled. That doesn't matter because its only purpose
670 * is to alert stop-signal processing code when another
671 * processor has come along and cleared the flag.
672 */
a8f072c1 673 current->jobctl |= JOBCTL_STOP_DEQUEUED;
8bfd9a7a 674 }
baa73d9e 675#ifdef CONFIG_POSIX_TIMERS
57db7e4a 676 if (resched_timer) {
1da177e4
LT
677 /*
678 * Release the siglock to ensure proper locking order
679 * of timer locks outside of siglocks. Note, we leave
680 * irqs disabled here, since the posix-timers code is
681 * about to disable them again anyway.
682 */
683 spin_unlock(&tsk->sighand->siglock);
96fe3b07 684 posixtimer_rearm(info);
1da177e4 685 spin_lock(&tsk->sighand->siglock);
9943d3ac
EB
686
687 /* Don't expose the si_sys_private value to userspace */
688 info->si_sys_private = 0;
1da177e4 689 }
baa73d9e 690#endif
1da177e4
LT
691 return signr;
692}
fb50f5a4 693EXPORT_SYMBOL_GPL(dequeue_signal);
1da177e4 694
7146db33
EB
695static int dequeue_synchronous_signal(kernel_siginfo_t *info)
696{
697 struct task_struct *tsk = current;
698 struct sigpending *pending = &tsk->pending;
699 struct sigqueue *q, *sync = NULL;
700
701 /*
702 * Might a synchronous signal be in the queue?
703 */
704 if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
705 return 0;
706
707 /*
708 * Return the first synchronous signal in the queue.
709 */
710 list_for_each_entry(q, &pending->list, list) {
711 /* Synchronous signals have a postive si_code */
712 if ((q->info.si_code > SI_USER) &&
713 (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
714 sync = q;
715 goto next;
716 }
717 }
718 return 0;
719next:
720 /*
721 * Check if there is another siginfo for the same signal.
722 */
723 list_for_each_entry_continue(q, &pending->list, list) {
724 if (q->info.si_signo == sync->info.si_signo)
725 goto still_pending;
726 }
727
728 sigdelset(&pending->signal, sync->info.si_signo);
729 recalc_sigpending();
730still_pending:
731 list_del_init(&sync->list);
732 copy_siginfo(info, &sync->info);
733 __sigqueue_free(sync);
734 return info->si_signo;
735}
736
1da177e4
LT
737/*
738 * Tell a process that it has a new active signal..
739 *
740 * NOTE! we rely on the previous spin_lock to
741 * lock interrupts for us! We can only be called with
742 * "siglock" held, and the local interrupt must
743 * have been disabled when that got acquired!
744 *
745 * No need to set need_resched since signal event passing
746 * goes through ->blocked
747 */
910ffdb1 748void signal_wake_up_state(struct task_struct *t, unsigned int state)
1da177e4 749{
1da177e4 750 set_tsk_thread_flag(t, TIF_SIGPENDING);
1da177e4 751 /*
910ffdb1 752 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
f021a3c2 753 * case. We don't check t->state here because there is a race with it
1da177e4
LT
754 * executing another processor and just now entering stopped state.
755 * By using wake_up_state, we ensure the process will wake up and
756 * handle its death signal.
757 */
910ffdb1 758 if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
1da177e4
LT
759 kick_process(t);
760}
761
71fabd5e
GA
762/*
763 * Remove signals in mask from the pending set and queue.
764 * Returns 1 if any signals were found.
765 *
766 * All callers must be holding the siglock.
71fabd5e 767 */
8f11351e 768static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s)
71fabd5e
GA
769{
770 struct sigqueue *q, *n;
771 sigset_t m;
772
773 sigandsets(&m, mask, &s->signal);
774 if (sigisemptyset(&m))
8f11351e 775 return;
71fabd5e 776
702a5073 777 sigandnsets(&s->signal, &s->signal, mask);
71fabd5e
GA
778 list_for_each_entry_safe(q, n, &s->list, list) {
779 if (sigismember(mask, q->info.si_signo)) {
780 list_del_init(&q->list);
781 __sigqueue_free(q);
782 }
783 }
71fabd5e 784}
1da177e4 785
ae7795bc 786static inline int is_si_special(const struct kernel_siginfo *info)
614c517d 787{
4ff4c31a 788 return info <= SEND_SIG_PRIV;
614c517d
ON
789}
790
ae7795bc 791static inline bool si_fromuser(const struct kernel_siginfo *info)
614c517d
ON
792{
793 return info == SEND_SIG_NOINFO ||
794 (!is_si_special(info) && SI_FROMUSER(info));
795}
796
39fd3393
SH
797/*
798 * called with RCU read lock from check_kill_permission()
799 */
2a9b9094 800static bool kill_ok_by_cred(struct task_struct *t)
39fd3393
SH
801{
802 const struct cred *cred = current_cred();
803 const struct cred *tcred = __task_cred(t);
804
2a9b9094
CB
805 return uid_eq(cred->euid, tcred->suid) ||
806 uid_eq(cred->euid, tcred->uid) ||
807 uid_eq(cred->uid, tcred->suid) ||
808 uid_eq(cred->uid, tcred->uid) ||
809 ns_capable(tcred->user_ns, CAP_KILL);
39fd3393
SH
810}
811
1da177e4
LT
812/*
813 * Bad permissions for sending the signal
694f690d 814 * - the caller must hold the RCU read lock
1da177e4 815 */
ae7795bc 816static int check_kill_permission(int sig, struct kernel_siginfo *info,
1da177e4
LT
817 struct task_struct *t)
818{
2e2ba22e 819 struct pid *sid;
3b5e9e53
ON
820 int error;
821
7ed20e1a 822 if (!valid_signal(sig))
3b5e9e53
ON
823 return -EINVAL;
824
614c517d 825 if (!si_fromuser(info))
3b5e9e53 826 return 0;
e54dc243 827
3b5e9e53
ON
828 error = audit_signal_info(sig, t); /* Let audit system see the signal */
829 if (error)
1da177e4 830 return error;
3b5e9e53 831
065add39 832 if (!same_thread_group(current, t) &&
39fd3393 833 !kill_ok_by_cred(t)) {
2e2ba22e
ON
834 switch (sig) {
835 case SIGCONT:
2e2ba22e 836 sid = task_session(t);
2e2ba22e
ON
837 /*
838 * We don't return the error if sid == NULL. The
839 * task was unhashed, the caller must notice this.
840 */
841 if (!sid || sid == task_session(current))
842 break;
b028fb61 843 /* fall through */
2e2ba22e
ON
844 default:
845 return -EPERM;
846 }
847 }
c2f0c7c3 848
6b4f3d01 849 return security_task_kill(t, info, sig, NULL);
1da177e4
LT
850}
851
fb1d910c
TH
852/**
853 * ptrace_trap_notify - schedule trap to notify ptracer
854 * @t: tracee wanting to notify tracer
855 *
856 * This function schedules sticky ptrace trap which is cleared on the next
857 * TRAP_STOP to notify ptracer of an event. @t must have been seized by
858 * ptracer.
859 *
544b2c91
TH
860 * If @t is running, STOP trap will be taken. If trapped for STOP and
861 * ptracer is listening for events, tracee is woken up so that it can
862 * re-trap for the new event. If trapped otherwise, STOP trap will be
863 * eventually taken without returning to userland after the existing traps
864 * are finished by PTRACE_CONT.
fb1d910c
TH
865 *
866 * CONTEXT:
867 * Must be called with @task->sighand->siglock held.
868 */
869static void ptrace_trap_notify(struct task_struct *t)
870{
871 WARN_ON_ONCE(!(t->ptrace & PT_SEIZED));
872 assert_spin_locked(&t->sighand->siglock);
873
874 task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
910ffdb1 875 ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
fb1d910c
TH
876}
877
1da177e4 878/*
7e695a5e
ON
879 * Handle magic process-wide effects of stop/continue signals. Unlike
880 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
881 * time regardless of blocking, ignoring, or handling. This does the
882 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
883 * signals. The process stop is done as a signal action for SIG_DFL.
884 *
885 * Returns true if the signal should be actually delivered, otherwise
886 * it should be dropped.
1da177e4 887 */
403bad72 888static bool prepare_signal(int sig, struct task_struct *p, bool force)
1da177e4 889{
ad16a460 890 struct signal_struct *signal = p->signal;
1da177e4 891 struct task_struct *t;
9490592f 892 sigset_t flush;
1da177e4 893
403bad72 894 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
5fa534c9 895 if (!(signal->flags & SIGNAL_GROUP_EXIT))
403bad72 896 return sig == SIGKILL;
1da177e4 897 /*
7e695a5e 898 * The process is in the middle of dying, nothing to do.
1da177e4 899 */
7e695a5e 900 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
901 /*
902 * This is a stop signal. Remove SIGCONT from all queues.
903 */
9490592f 904 siginitset(&flush, sigmask(SIGCONT));
c09c1441 905 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 906 for_each_thread(p, t)
c09c1441 907 flush_sigqueue_mask(&flush, &t->pending);
1da177e4 908 } else if (sig == SIGCONT) {
fc321d2e 909 unsigned int why;
1da177e4 910 /*
1deac632 911 * Remove all stop signals from all queues, wake all threads.
1da177e4 912 */
9490592f 913 siginitset(&flush, SIG_KERNEL_STOP_MASK);
c09c1441 914 flush_sigqueue_mask(&flush, &signal->shared_pending);
9490592f 915 for_each_thread(p, t) {
c09c1441 916 flush_sigqueue_mask(&flush, &t->pending);
3759a0d9 917 task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING);
fb1d910c
TH
918 if (likely(!(t->ptrace & PT_SEIZED)))
919 wake_up_state(t, __TASK_STOPPED);
920 else
921 ptrace_trap_notify(t);
9490592f 922 }
1da177e4 923
fc321d2e
ON
924 /*
925 * Notify the parent with CLD_CONTINUED if we were stopped.
926 *
927 * If we were in the middle of a group stop, we pretend it
928 * was already finished, and then continued. Since SIGCHLD
929 * doesn't queue we report only CLD_STOPPED, as if the next
930 * CLD_CONTINUED was dropped.
931 */
932 why = 0;
ad16a460 933 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 934 why |= SIGNAL_CLD_CONTINUED;
ad16a460 935 else if (signal->group_stop_count)
fc321d2e
ON
936 why |= SIGNAL_CLD_STOPPED;
937
938 if (why) {
021e1ae3 939 /*
ae6d2ed7 940 * The first thread which returns from do_signal_stop()
021e1ae3 941 * will take ->siglock, notice SIGNAL_CLD_MASK, and
2e58f57d 942 * notify its parent. See get_signal().
021e1ae3 943 */
2d39b3cd 944 signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
ad16a460
ON
945 signal->group_stop_count = 0;
946 signal->group_exit_code = 0;
1da177e4 947 }
1da177e4 948 }
7e695a5e 949
def8cf72 950 return !sig_ignored(p, sig, force);
1da177e4
LT
951}
952
71f11dc0
ON
953/*
954 * Test if P wants to take SIG. After we've checked all threads with this,
955 * it's equivalent to finding no threads not blocking SIG. Any threads not
956 * blocking SIG were ruled out because they are not running and already
957 * have pending signals. Such threads will dequeue from the shared queue
958 * as soon as they're available, so putting the signal on the shared queue
959 * will be equivalent to sending it to one such thread.
960 */
acd14e62 961static inline bool wants_signal(int sig, struct task_struct *p)
71f11dc0
ON
962{
963 if (sigismember(&p->blocked, sig))
acd14e62
CB
964 return false;
965
71f11dc0 966 if (p->flags & PF_EXITING)
acd14e62
CB
967 return false;
968
71f11dc0 969 if (sig == SIGKILL)
acd14e62
CB
970 return true;
971
71f11dc0 972 if (task_is_stopped_or_traced(p))
acd14e62
CB
973 return false;
974
71f11dc0
ON
975 return task_curr(p) || !signal_pending(p);
976}
977
07296149 978static void complete_signal(int sig, struct task_struct *p, enum pid_type type)
71f11dc0
ON
979{
980 struct signal_struct *signal = p->signal;
981 struct task_struct *t;
982
983 /*
984 * Now find a thread we can wake up to take the signal off the queue.
985 *
986 * If the main thread wants the signal, it gets first crack.
987 * Probably the least surprising to the average bear.
988 */
989 if (wants_signal(sig, p))
990 t = p;
07296149 991 else if ((type == PIDTYPE_PID) || thread_group_empty(p))
71f11dc0
ON
992 /*
993 * There is just one thread and it does not need to be woken.
994 * It will dequeue unblocked signals before it runs again.
995 */
996 return;
997 else {
998 /*
999 * Otherwise try to find a suitable thread.
1000 */
1001 t = signal->curr_target;
1002 while (!wants_signal(sig, t)) {
1003 t = next_thread(t);
1004 if (t == signal->curr_target)
1005 /*
1006 * No thread needs to be woken.
1007 * Any eligible threads will see
1008 * the signal in the queue soon.
1009 */
1010 return;
1011 }
1012 signal->curr_target = t;
1013 }
1014
1015 /*
1016 * Found a killable thread. If the signal will be fatal,
1017 * then start taking the whole group down immediately.
1018 */
fae5fa44 1019 if (sig_fatal(p, sig) &&
42691579 1020 !(signal->flags & SIGNAL_GROUP_EXIT) &&
71f11dc0 1021 !sigismember(&t->real_blocked, sig) &&
42691579 1022 (sig == SIGKILL || !p->ptrace)) {
71f11dc0
ON
1023 /*
1024 * This signal will be fatal to the whole group.
1025 */
1026 if (!sig_kernel_coredump(sig)) {
1027 /*
1028 * Start a group exit and wake everybody up.
1029 * This way we don't have other threads
1030 * running and doing things after a slower
1031 * thread has the fatal signal pending.
1032 */
1033 signal->flags = SIGNAL_GROUP_EXIT;
1034 signal->group_exit_code = sig;
1035 signal->group_stop_count = 0;
1036 t = p;
1037 do {
6dfca329 1038 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
71f11dc0
ON
1039 sigaddset(&t->pending.signal, SIGKILL);
1040 signal_wake_up(t, 1);
1041 } while_each_thread(p, t);
1042 return;
1043 }
1044 }
1045
1046 /*
1047 * The signal is already in the shared-pending queue.
1048 * Tell the chosen thread to wake up and dequeue it.
1049 */
1050 signal_wake_up(t, sig == SIGKILL);
1051 return;
1052}
1053
a19e2c01 1054static inline bool legacy_queue(struct sigpending *signals, int sig)
af7fff9c
PE
1055{
1056 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
1057}
1058
ae7795bc 1059static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
5a883cee 1060 enum pid_type type, int from_ancestor_ns)
1da177e4 1061{
2ca3515a 1062 struct sigpending *pending;
6e65acba 1063 struct sigqueue *q;
7a0aeb14 1064 int override_rlimit;
6c303d3a 1065 int ret = 0, result;
0a16b607 1066
6e65acba 1067 assert_spin_locked(&t->sighand->siglock);
921cf9f6 1068
6c303d3a 1069 result = TRACE_SIGNAL_IGNORED;
629d362b 1070 if (!prepare_signal(sig, t,
4ff4c31a 1071 from_ancestor_ns || (info == SEND_SIG_PRIV)))
6c303d3a 1072 goto ret;
2ca3515a 1073
5a883cee 1074 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
1075 /*
1076 * Short-circuit ignored signals and support queuing
1077 * exactly one non-rt signal, so that we can get more
1078 * detailed information about the cause of the signal.
1079 */
6c303d3a 1080 result = TRACE_SIGNAL_ALREADY_PENDING;
7e695a5e 1081 if (legacy_queue(pending, sig))
6c303d3a
ON
1082 goto ret;
1083
1084 result = TRACE_SIGNAL_DELIVERED;
1da177e4 1085 /*
a692933a 1086 * Skip useless siginfo allocation for SIGKILL and kernel threads.
1da177e4 1087 */
a692933a 1088 if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
1da177e4
LT
1089 goto out_set;
1090
5aba085e
RD
1091 /*
1092 * Real-time signals must be queued if sent by sigqueue, or
1093 * some other real-time mechanism. It is implementation
1094 * defined whether kill() does so. We attempt to do so, on
1095 * the principle of least surprise, but since kill is not
1096 * allowed to fail with EAGAIN when low on memory we just
1097 * make sure at least one signal gets delivered and don't
1098 * pass on the info struct.
1099 */
7a0aeb14
VN
1100 if (sig < SIGRTMIN)
1101 override_rlimit = (is_si_special(info) || info->si_code >= 0);
1102 else
1103 override_rlimit = 0;
1104
75f296d9 1105 q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
1da177e4 1106 if (q) {
2ca3515a 1107 list_add_tail(&q->list, &pending->list);
1da177e4 1108 switch ((unsigned long) info) {
b67a1b9e 1109 case (unsigned long) SEND_SIG_NOINFO:
faf1f22b 1110 clear_siginfo(&q->info);
1da177e4
LT
1111 q->info.si_signo = sig;
1112 q->info.si_errno = 0;
1113 q->info.si_code = SI_USER;
9cd4fd10 1114 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 1115 task_active_pid_ns(t));
7a0cf094
EB
1116 rcu_read_lock();
1117 q->info.si_uid =
1118 from_kuid_munged(task_cred_xxx(t, user_ns),
1119 current_uid());
1120 rcu_read_unlock();
1da177e4 1121 break;
b67a1b9e 1122 case (unsigned long) SEND_SIG_PRIV:
faf1f22b 1123 clear_siginfo(&q->info);
1da177e4
LT
1124 q->info.si_signo = sig;
1125 q->info.si_errno = 0;
1126 q->info.si_code = SI_KERNEL;
1127 q->info.si_pid = 0;
1128 q->info.si_uid = 0;
1129 break;
1130 default:
1131 copy_siginfo(&q->info, info);
1132 break;
1133 }
621d3121 1134 } else if (!is_si_special(info)) {
ba005e1f
MH
1135 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1136 /*
1137 * Queue overflow, abort. We may abort if the
1138 * signal was rt and sent by user using something
1139 * other than kill().
1140 */
6c303d3a
ON
1141 result = TRACE_SIGNAL_OVERFLOW_FAIL;
1142 ret = -EAGAIN;
1143 goto ret;
ba005e1f
MH
1144 } else {
1145 /*
1146 * This is a silent loss of information. We still
1147 * send the signal, but the *info bits are lost.
1148 */
6c303d3a 1149 result = TRACE_SIGNAL_LOSE_INFO;
ba005e1f 1150 }
1da177e4
LT
1151 }
1152
1153out_set:
53c30337 1154 signalfd_notify(t, sig);
2ca3515a 1155 sigaddset(&pending->signal, sig);
c3ad2c3b
EB
1156
1157 /* Let multiprocess signals appear after on-going forks */
1158 if (type > PIDTYPE_TGID) {
1159 struct multiprocess_signals *delayed;
1160 hlist_for_each_entry(delayed, &t->signal->multiprocess, node) {
1161 sigset_t *signal = &delayed->signal;
1162 /* Can't queue both a stop and a continue signal */
1163 if (sig == SIGCONT)
1164 sigdelsetmask(signal, SIG_KERNEL_STOP_MASK);
1165 else if (sig_kernel_stop(sig))
1166 sigdelset(signal, SIGCONT);
1167 sigaddset(signal, sig);
1168 }
1169 }
1170
07296149 1171 complete_signal(sig, t, type);
6c303d3a 1172ret:
5a883cee 1173 trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result);
6c303d3a 1174 return ret;
1da177e4
LT
1175}
1176
7a0cf094
EB
1177static inline bool has_si_pid_and_uid(struct kernel_siginfo *info)
1178{
1179 bool ret = false;
1180 switch (siginfo_layout(info->si_signo, info->si_code)) {
1181 case SIL_KILL:
1182 case SIL_CHLD:
1183 case SIL_RT:
1184 ret = true;
1185 break;
1186 case SIL_TIMER:
1187 case SIL_POLL:
1188 case SIL_FAULT:
1189 case SIL_FAULT_MCEERR:
1190 case SIL_FAULT_BNDERR:
1191 case SIL_FAULT_PKUERR:
1192 case SIL_SYS:
1193 ret = false;
1194 break;
1195 }
1196 return ret;
1197}
1198
ae7795bc 1199static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t,
b213984b 1200 enum pid_type type)
7978b567 1201{
921cf9f6
SB
1202 int from_ancestor_ns = 0;
1203
1204#ifdef CONFIG_PID_NS
dd34200a
ON
1205 from_ancestor_ns = si_fromuser(info) &&
1206 !task_pid_nr_ns(current, task_active_pid_ns(t));
921cf9f6 1207#endif
7a0cf094
EB
1208 if (!is_si_special(info) && has_si_pid_and_uid(info)) {
1209 struct user_namespace *t_user_ns;
1210
1211 rcu_read_lock();
1212 t_user_ns = task_cred_xxx(t, user_ns);
1213 if (current_user_ns() != t_user_ns) {
1214 kuid_t uid = make_kuid(current_user_ns(), info->si_uid);
1215 info->si_uid = from_kuid_munged(t_user_ns, uid);
1216 }
1217 rcu_read_unlock();
921cf9f6 1218
7a0cf094
EB
1219 if (!task_pid_nr_ns(current, task_active_pid_ns(t)))
1220 info->si_pid = 0;
1221 }
5a883cee 1222 return __send_signal(sig, info, t, type, from_ancestor_ns);
7978b567
SB
1223}
1224
4aaefee5 1225static void print_fatal_signal(int signr)
45807a1d 1226{
4aaefee5 1227 struct pt_regs *regs = signal_pt_regs();
747800ef 1228 pr_info("potentially unexpected fatal signal %d.\n", signr);
45807a1d 1229
ca5cd877 1230#if defined(__i386__) && !defined(__arch_um__)
747800ef 1231 pr_info("code at %08lx: ", regs->ip);
45807a1d
IM
1232 {
1233 int i;
1234 for (i = 0; i < 16; i++) {
1235 unsigned char insn;
1236
b45c6e76
AK
1237 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1238 break;
747800ef 1239 pr_cont("%02x ", insn);
45807a1d
IM
1240 }
1241 }
747800ef 1242 pr_cont("\n");
45807a1d 1243#endif
3a9f84d3 1244 preempt_disable();
45807a1d 1245 show_regs(regs);
3a9f84d3 1246 preempt_enable();
45807a1d
IM
1247}
1248
1249static int __init setup_print_fatal_signals(char *str)
1250{
1251 get_option (&str, &print_fatal_signals);
1252
1253 return 1;
1254}
1255
1256__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1257
4cd4b6d4 1258int
ae7795bc 1259__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
4cd4b6d4 1260{
b213984b 1261 return send_signal(sig, info, p, PIDTYPE_TGID);
4cd4b6d4
PE
1262}
1263
ae7795bc 1264int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p,
40b3b025 1265 enum pid_type type)
4a30debf
ON
1266{
1267 unsigned long flags;
1268 int ret = -ESRCH;
1269
1270 if (lock_task_sighand(p, &flags)) {
b213984b 1271 ret = send_signal(sig, info, p, type);
4a30debf
ON
1272 unlock_task_sighand(p, &flags);
1273 }
1274
1275 return ret;
1276}
1277
1da177e4
LT
1278/*
1279 * Force a signal that the process can't ignore: if necessary
1280 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1281 *
1282 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1283 * since we do not want to have a signal handler that was blocked
1284 * be invoked when user space had explicitly blocked it.
1285 *
80fe728d
ON
1286 * We don't want to have recursive SIGSEGV's etc, for example,
1287 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1288 */
1da177e4 1289int
ae7795bc 1290force_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *t)
1da177e4
LT
1291{
1292 unsigned long int flags;
ae74c3b6
LT
1293 int ret, blocked, ignored;
1294 struct k_sigaction *action;
1da177e4
LT
1295
1296 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1297 action = &t->sighand->action[sig-1];
1298 ignored = action->sa.sa_handler == SIG_IGN;
1299 blocked = sigismember(&t->blocked, sig);
1300 if (blocked || ignored) {
1301 action->sa.sa_handler = SIG_DFL;
1302 if (blocked) {
1303 sigdelset(&t->blocked, sig);
7bb44ade 1304 recalc_sigpending_and_wake(t);
ae74c3b6 1305 }
1da177e4 1306 }
eb61b591
JI
1307 /*
1308 * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect
1309 * debugging to leave init killable.
1310 */
1311 if (action->sa.sa_handler == SIG_DFL && !t->ptrace)
80fe728d 1312 t->signal->flags &= ~SIGNAL_UNKILLABLE;
b21c5bd5 1313 ret = send_signal(sig, info, t, PIDTYPE_PID);
1da177e4
LT
1314 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1315
1316 return ret;
1317}
1318
1da177e4
LT
1319/*
1320 * Nuke all other threads in the group.
1321 */
09faef11 1322int zap_other_threads(struct task_struct *p)
1da177e4 1323{
09faef11
ON
1324 struct task_struct *t = p;
1325 int count = 0;
1da177e4 1326
1da177e4
LT
1327 p->signal->group_stop_count = 0;
1328
09faef11 1329 while_each_thread(p, t) {
6dfca329 1330 task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
09faef11
ON
1331 count++;
1332
1333 /* Don't bother with already dead threads */
1da177e4
LT
1334 if (t->exit_state)
1335 continue;
1da177e4 1336 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1337 signal_wake_up(t, 1);
1338 }
09faef11
ON
1339
1340 return count;
1da177e4
LT
1341}
1342
b8ed374e
NK
1343struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1344 unsigned long *flags)
f63ee72e
ON
1345{
1346 struct sighand_struct *sighand;
1347
59dc6f3c 1348 rcu_read_lock();
f63ee72e
ON
1349 for (;;) {
1350 sighand = rcu_dereference(tsk->sighand);
59dc6f3c 1351 if (unlikely(sighand == NULL))
f63ee72e 1352 break;
59dc6f3c 1353
392809b2
ON
1354 /*
1355 * This sighand can be already freed and even reused, but
5f0d5a3a 1356 * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which
392809b2
ON
1357 * initializes ->siglock: this slab can't go away, it has
1358 * the same object type, ->siglock can't be reinitialized.
1359 *
1360 * We need to ensure that tsk->sighand is still the same
1361 * after we take the lock, we can race with de_thread() or
1362 * __exit_signal(). In the latter case the next iteration
1363 * must see ->sighand == NULL.
1364 */
59dc6f3c
AMG
1365 spin_lock_irqsave(&sighand->siglock, *flags);
1366 if (likely(sighand == tsk->sighand))
f63ee72e 1367 break;
59dc6f3c 1368 spin_unlock_irqrestore(&sighand->siglock, *flags);
f63ee72e 1369 }
59dc6f3c 1370 rcu_read_unlock();
f63ee72e
ON
1371
1372 return sighand;
1373}
1374
c69e8d9c
DH
1375/*
1376 * send signal info to all the members of a group
c69e8d9c 1377 */
ae7795bc
EB
1378int group_send_sig_info(int sig, struct kernel_siginfo *info,
1379 struct task_struct *p, enum pid_type type)
1da177e4 1380{
694f690d
DH
1381 int ret;
1382
1383 rcu_read_lock();
1384 ret = check_kill_permission(sig, info, p);
1385 rcu_read_unlock();
f63ee72e 1386
4a30debf 1387 if (!ret && sig)
40b3b025 1388 ret = do_send_sig_info(sig, info, p, type);
1da177e4
LT
1389
1390 return ret;
1391}
1392
1393/*
146a505d 1394 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1395 * control characters do (^C, ^Z etc)
c69e8d9c 1396 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1397 */
ae7795bc 1398int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp)
1da177e4
LT
1399{
1400 struct task_struct *p = NULL;
1401 int retval, success;
1402
1da177e4
LT
1403 success = 0;
1404 retval = -ESRCH;
c4b92fc1 1405 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
01024980 1406 int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID);
1da177e4
LT
1407 success |= !err;
1408 retval = err;
c4b92fc1 1409 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1410 return success ? 0 : retval;
1411}
1412
ae7795bc 1413int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid)
1da177e4 1414{
d36174bc 1415 int error = -ESRCH;
1da177e4
LT
1416 struct task_struct *p;
1417
eca1a089
PM
1418 for (;;) {
1419 rcu_read_lock();
1420 p = pid_task(pid, PIDTYPE_PID);
1421 if (p)
01024980 1422 error = group_send_sig_info(sig, info, p, PIDTYPE_TGID);
eca1a089
PM
1423 rcu_read_unlock();
1424 if (likely(!p || error != -ESRCH))
1425 return error;
6ca25b55 1426
eca1a089
PM
1427 /*
1428 * The task was unhashed in between, try again. If it
1429 * is dead, pid_task() will return NULL, if we race with
1430 * de_thread() it will find the new leader.
1431 */
1432 }
1da177e4
LT
1433}
1434
ae7795bc 1435static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid)
c4b92fc1
EB
1436{
1437 int error;
1438 rcu_read_lock();
b488893a 1439 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1440 rcu_read_unlock();
1441 return error;
1442}
1443
bb17fcca
CB
1444static inline bool kill_as_cred_perm(const struct cred *cred,
1445 struct task_struct *target)
d178bc3a
SH
1446{
1447 const struct cred *pcred = __task_cred(target);
bb17fcca
CB
1448
1449 return uid_eq(cred->euid, pcred->suid) ||
1450 uid_eq(cred->euid, pcred->uid) ||
1451 uid_eq(cred->uid, pcred->suid) ||
1452 uid_eq(cred->uid, pcred->uid);
d178bc3a
SH
1453}
1454
70f1b0d3
EB
1455/*
1456 * The usb asyncio usage of siginfo is wrong. The glibc support
1457 * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT.
1458 * AKA after the generic fields:
1459 * kernel_pid_t si_pid;
1460 * kernel_uid32_t si_uid;
1461 * sigval_t si_value;
1462 *
1463 * Unfortunately when usb generates SI_ASYNCIO it assumes the layout
1464 * after the generic fields is:
1465 * void __user *si_addr;
1466 *
1467 * This is a practical problem when there is a 64bit big endian kernel
1468 * and a 32bit userspace. As the 32bit address will encoded in the low
1469 * 32bits of the pointer. Those low 32bits will be stored at higher
1470 * address than appear in a 32 bit pointer. So userspace will not
1471 * see the address it was expecting for it's completions.
1472 *
1473 * There is nothing in the encoding that can allow
1474 * copy_siginfo_to_user32 to detect this confusion of formats, so
1475 * handle this by requiring the caller of kill_pid_usb_asyncio to
1476 * notice when this situration takes place and to store the 32bit
1477 * pointer in sival_int, instead of sival_addr of the sigval_t addr
1478 * parameter.
1479 */
1480int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr,
1481 struct pid *pid, const struct cred *cred)
46113830 1482{
70f1b0d3 1483 struct kernel_siginfo info;
46113830 1484 struct task_struct *p;
14d8c9f3 1485 unsigned long flags;
70f1b0d3
EB
1486 int ret = -EINVAL;
1487
1488 clear_siginfo(&info);
1489 info.si_signo = sig;
1490 info.si_errno = errno;
1491 info.si_code = SI_ASYNCIO;
1492 *((sigval_t *)&info.si_pid) = addr;
46113830
HW
1493
1494 if (!valid_signal(sig))
1495 return ret;
1496
14d8c9f3 1497 rcu_read_lock();
2425c08b 1498 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1499 if (!p) {
1500 ret = -ESRCH;
1501 goto out_unlock;
1502 }
70f1b0d3 1503 if (!kill_as_cred_perm(cred, p)) {
46113830
HW
1504 ret = -EPERM;
1505 goto out_unlock;
1506 }
70f1b0d3 1507 ret = security_task_kill(p, &info, sig, cred);
8f95dc58
DQ
1508 if (ret)
1509 goto out_unlock;
14d8c9f3
TG
1510
1511 if (sig) {
1512 if (lock_task_sighand(p, &flags)) {
70f1b0d3 1513 ret = __send_signal(sig, &info, p, PIDTYPE_TGID, 0);
14d8c9f3
TG
1514 unlock_task_sighand(p, &flags);
1515 } else
1516 ret = -ESRCH;
46113830
HW
1517 }
1518out_unlock:
14d8c9f3 1519 rcu_read_unlock();
46113830
HW
1520 return ret;
1521}
70f1b0d3 1522EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio);
1da177e4
LT
1523
1524/*
1525 * kill_something_info() interprets pid in interesting ways just like kill(2).
1526 *
1527 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1528 * is probably wrong. Should make it like BSD or SYSV.
1529 */
1530
ae7795bc 1531static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid)
1da177e4 1532{
8d42db18 1533 int ret;
d5df763b
PE
1534
1535 if (pid > 0) {
1536 rcu_read_lock();
1537 ret = kill_pid_info(sig, info, find_vpid(pid));
1538 rcu_read_unlock();
1539 return ret;
1540 }
1541
4ea77014 1542 /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
1543 if (pid == INT_MIN)
1544 return -ESRCH;
1545
d5df763b
PE
1546 read_lock(&tasklist_lock);
1547 if (pid != -1) {
1548 ret = __kill_pgrp_info(sig, info,
1549 pid ? find_vpid(-pid) : task_pgrp(current));
1550 } else {
1da177e4
LT
1551 int retval = 0, count = 0;
1552 struct task_struct * p;
1553
1da177e4 1554 for_each_process(p) {
d25141a8
SB
1555 if (task_pid_vnr(p) > 1 &&
1556 !same_thread_group(p, current)) {
01024980
EB
1557 int err = group_send_sig_info(sig, info, p,
1558 PIDTYPE_MAX);
1da177e4
LT
1559 ++count;
1560 if (err != -EPERM)
1561 retval = err;
1562 }
1563 }
8d42db18 1564 ret = count ? retval : -ESRCH;
1da177e4 1565 }
d5df763b
PE
1566 read_unlock(&tasklist_lock);
1567
8d42db18 1568 return ret;
1da177e4
LT
1569}
1570
1571/*
1572 * These are for backward compatibility with the rest of the kernel source.
1573 */
1574
ae7795bc 1575int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p)
1da177e4 1576{
1da177e4
LT
1577 /*
1578 * Make sure legacy kernel users don't send in bad values
1579 * (normal paths check this in check_kill_permission).
1580 */
7ed20e1a 1581 if (!valid_signal(sig))
1da177e4
LT
1582 return -EINVAL;
1583
40b3b025 1584 return do_send_sig_info(sig, info, p, PIDTYPE_PID);
1da177e4 1585}
fb50f5a4 1586EXPORT_SYMBOL(send_sig_info);
1da177e4 1587
b67a1b9e
ON
1588#define __si_special(priv) \
1589 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1590
1da177e4
LT
1591int
1592send_sig(int sig, struct task_struct *p, int priv)
1593{
b67a1b9e 1594 return send_sig_info(sig, __si_special(priv), p);
1da177e4 1595}
fb50f5a4 1596EXPORT_SYMBOL(send_sig);
1da177e4 1597
52cba1a2 1598void force_sig(int sig, struct task_struct *p)
1da177e4 1599{
b67a1b9e 1600 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4 1601}
fb50f5a4 1602EXPORT_SYMBOL(force_sig);
1da177e4
LT
1603
1604/*
1605 * When things go south during signal handling, we
1606 * will force a SIGSEGV. And if the signal that caused
1607 * the problem was already a SIGSEGV, we'll want to
1608 * make sure we don't even try to deliver the signal..
1609 */
cb44c9a0 1610void force_sigsegv(int sig)
1da177e4 1611{
cb44c9a0
EB
1612 struct task_struct *p = current;
1613
1da177e4
LT
1614 if (sig == SIGSEGV) {
1615 unsigned long flags;
1616 spin_lock_irqsave(&p->sighand->siglock, flags);
1617 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1618 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1619 }
1620 force_sig(SIGSEGV, p);
1da177e4
LT
1621}
1622
f8ec6601
EB
1623int force_sig_fault(int sig, int code, void __user *addr
1624 ___ARCH_SI_TRAPNO(int trapno)
1625 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1626 , struct task_struct *t)
1627{
ae7795bc 1628 struct kernel_siginfo info;
f8ec6601
EB
1629
1630 clear_siginfo(&info);
1631 info.si_signo = sig;
1632 info.si_errno = 0;
1633 info.si_code = code;
1634 info.si_addr = addr;
1635#ifdef __ARCH_SI_TRAPNO
1636 info.si_trapno = trapno;
1637#endif
1638#ifdef __ia64__
1639 info.si_imm = imm;
1640 info.si_flags = flags;
1641 info.si_isr = isr;
1642#endif
1643 return force_sig_info(info.si_signo, &info, t);
1644}
1645
1646int send_sig_fault(int sig, int code, void __user *addr
1647 ___ARCH_SI_TRAPNO(int trapno)
1648 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
1649 , struct task_struct *t)
1650{
ae7795bc 1651 struct kernel_siginfo info;
f8ec6601
EB
1652
1653 clear_siginfo(&info);
1654 info.si_signo = sig;
1655 info.si_errno = 0;
1656 info.si_code = code;
1657 info.si_addr = addr;
1658#ifdef __ARCH_SI_TRAPNO
1659 info.si_trapno = trapno;
1660#endif
1661#ifdef __ia64__
1662 info.si_imm = imm;
1663 info.si_flags = flags;
1664 info.si_isr = isr;
1665#endif
1666 return send_sig_info(info.si_signo, &info, t);
1667}
1668
38246735
EB
1669int force_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1670{
ae7795bc 1671 struct kernel_siginfo info;
38246735
EB
1672
1673 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1674 clear_siginfo(&info);
1675 info.si_signo = SIGBUS;
1676 info.si_errno = 0;
1677 info.si_code = code;
1678 info.si_addr = addr;
1679 info.si_addr_lsb = lsb;
1680 return force_sig_info(info.si_signo, &info, t);
1681}
1682
1683int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t)
1684{
ae7795bc 1685 struct kernel_siginfo info;
38246735
EB
1686
1687 WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR));
1688 clear_siginfo(&info);
1689 info.si_signo = SIGBUS;
1690 info.si_errno = 0;
1691 info.si_code = code;
1692 info.si_addr = addr;
1693 info.si_addr_lsb = lsb;
1694 return send_sig_info(info.si_signo, &info, t);
1695}
1696EXPORT_SYMBOL(send_sig_mceerr);
38246735 1697
38246735
EB
1698int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper)
1699{
ae7795bc 1700 struct kernel_siginfo info;
38246735
EB
1701
1702 clear_siginfo(&info);
1703 info.si_signo = SIGSEGV;
1704 info.si_errno = 0;
1705 info.si_code = SEGV_BNDERR;
1706 info.si_addr = addr;
1707 info.si_lower = lower;
1708 info.si_upper = upper;
1709 return force_sig_info(info.si_signo, &info, current);
1710}
38246735
EB
1711
1712#ifdef SEGV_PKUERR
1713int force_sig_pkuerr(void __user *addr, u32 pkey)
1714{
ae7795bc 1715 struct kernel_siginfo info;
38246735
EB
1716
1717 clear_siginfo(&info);
1718 info.si_signo = SIGSEGV;
1719 info.si_errno = 0;
1720 info.si_code = SEGV_PKUERR;
1721 info.si_addr = addr;
1722 info.si_pkey = pkey;
1723 return force_sig_info(info.si_signo, &info, current);
1724}
1725#endif
f8ec6601 1726
f71dd7dc
EB
1727/* For the crazy architectures that include trap information in
1728 * the errno field, instead of an actual errno value.
1729 */
1730int force_sig_ptrace_errno_trap(int errno, void __user *addr)
1731{
ae7795bc 1732 struct kernel_siginfo info;
f71dd7dc
EB
1733
1734 clear_siginfo(&info);
1735 info.si_signo = SIGTRAP;
1736 info.si_errno = errno;
1737 info.si_code = TRAP_HWBKPT;
1738 info.si_addr = addr;
1739 return force_sig_info(info.si_signo, &info, current);
1740}
1741
c4b92fc1
EB
1742int kill_pgrp(struct pid *pid, int sig, int priv)
1743{
146a505d
PE
1744 int ret;
1745
1746 read_lock(&tasklist_lock);
1747 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1748 read_unlock(&tasklist_lock);
1749
1750 return ret;
c4b92fc1
EB
1751}
1752EXPORT_SYMBOL(kill_pgrp);
1753
1754int kill_pid(struct pid *pid, int sig, int priv)
1755{
1756 return kill_pid_info(sig, __si_special(priv), pid);
1757}
1758EXPORT_SYMBOL(kill_pid);
1759
1da177e4
LT
1760/*
1761 * These functions support sending signals using preallocated sigqueue
1762 * structures. This is needed "because realtime applications cannot
1763 * afford to lose notifications of asynchronous events, like timer
5aba085e 1764 * expirations or I/O completions". In the case of POSIX Timers
1da177e4
LT
1765 * we allocate the sigqueue structure from the timer_create. If this
1766 * allocation fails we are able to report the failure to the application
1767 * with an EAGAIN error.
1768 */
1da177e4
LT
1769struct sigqueue *sigqueue_alloc(void)
1770{
f84d49b2 1771 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1da177e4 1772
f84d49b2 1773 if (q)
1da177e4 1774 q->flags |= SIGQUEUE_PREALLOC;
f84d49b2
NO
1775
1776 return q;
1da177e4
LT
1777}
1778
1779void sigqueue_free(struct sigqueue *q)
1780{
1781 unsigned long flags;
60187d27
ON
1782 spinlock_t *lock = &current->sighand->siglock;
1783
1da177e4
LT
1784 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1785 /*
c8e85b4f
ON
1786 * We must hold ->siglock while testing q->list
1787 * to serialize with collect_signal() or with
da7978b0 1788 * __exit_signal()->flush_sigqueue().
1da177e4 1789 */
60187d27 1790 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1791 q->flags &= ~SIGQUEUE_PREALLOC;
1792 /*
1793 * If it is queued it will be freed when dequeued,
1794 * like the "regular" sigqueue.
1795 */
60187d27 1796 if (!list_empty(&q->list))
c8e85b4f 1797 q = NULL;
60187d27
ON
1798 spin_unlock_irqrestore(lock, flags);
1799
c8e85b4f
ON
1800 if (q)
1801 __sigqueue_free(q);
1da177e4
LT
1802}
1803
24122c7f 1804int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type)
9e3bd6c3 1805{
e62e6650 1806 int sig = q->info.si_signo;
2ca3515a 1807 struct sigpending *pending;
24122c7f 1808 struct task_struct *t;
e62e6650 1809 unsigned long flags;
163566f6 1810 int ret, result;
2ca3515a 1811
4cd4b6d4 1812 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1813
1814 ret = -1;
24122c7f
EB
1815 rcu_read_lock();
1816 t = pid_task(pid, type);
1817 if (!t || !likely(lock_task_sighand(t, &flags)))
e62e6650
ON
1818 goto ret;
1819
7e695a5e 1820 ret = 1; /* the signal is ignored */
163566f6 1821 result = TRACE_SIGNAL_IGNORED;
def8cf72 1822 if (!prepare_signal(sig, t, false))
e62e6650
ON
1823 goto out;
1824
1825 ret = 0;
9e3bd6c3
PE
1826 if (unlikely(!list_empty(&q->list))) {
1827 /*
1828 * If an SI_TIMER entry is already queue just increment
1829 * the overrun count.
1830 */
9e3bd6c3
PE
1831 BUG_ON(q->info.si_code != SI_TIMER);
1832 q->info.si_overrun++;
163566f6 1833 result = TRACE_SIGNAL_ALREADY_PENDING;
e62e6650 1834 goto out;
9e3bd6c3 1835 }
ba661292 1836 q->info.si_overrun = 0;
9e3bd6c3 1837
9e3bd6c3 1838 signalfd_notify(t, sig);
24122c7f 1839 pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1840 list_add_tail(&q->list, &pending->list);
1841 sigaddset(&pending->signal, sig);
07296149 1842 complete_signal(sig, t, type);
163566f6 1843 result = TRACE_SIGNAL_DELIVERED;
e62e6650 1844out:
24122c7f 1845 trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result);
e62e6650
ON
1846 unlock_task_sighand(t, &flags);
1847ret:
24122c7f 1848 rcu_read_unlock();
e62e6650 1849 return ret;
9e3bd6c3
PE
1850}
1851
1da177e4
LT
1852/*
1853 * Let a parent know about the death of a child.
1854 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6 1855 *
53c8f9f1
ON
1856 * Returns true if our parent ignored us and so we've switched to
1857 * self-reaping.
1da177e4 1858 */
53c8f9f1 1859bool do_notify_parent(struct task_struct *tsk, int sig)
1da177e4 1860{
ae7795bc 1861 struct kernel_siginfo info;
1da177e4
LT
1862 unsigned long flags;
1863 struct sighand_struct *psig;
53c8f9f1 1864 bool autoreap = false;
bde8285e 1865 u64 utime, stime;
1da177e4
LT
1866
1867 BUG_ON(sig == -1);
1868
1869 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1870 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 1871
d21142ec 1872 BUG_ON(!tsk->ptrace &&
1da177e4
LT
1873 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1874
b6e238dc
ON
1875 if (sig != SIGCHLD) {
1876 /*
1877 * This is only possible if parent == real_parent.
1878 * Check if it has changed security domain.
1879 */
1880 if (tsk->parent_exec_id != tsk->parent->self_exec_id)
1881 sig = SIGCHLD;
1882 }
1883
faf1f22b 1884 clear_siginfo(&info);
1da177e4
LT
1885 info.si_signo = sig;
1886 info.si_errno = 0;
b488893a 1887 /*
32084504
EB
1888 * We are under tasklist_lock here so our parent is tied to
1889 * us and cannot change.
b488893a 1890 *
32084504
EB
1891 * task_active_pid_ns will always return the same pid namespace
1892 * until a task passes through release_task.
b488893a
PE
1893 *
1894 * write_lock() currently calls preempt_disable() which is the
1895 * same as rcu_read_lock(), but according to Oleg, this is not
1896 * correct to rely on this
1897 */
1898 rcu_read_lock();
32084504 1899 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
54ba47ed
EB
1900 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1901 task_uid(tsk));
b488893a
PE
1902 rcu_read_unlock();
1903
bde8285e
FW
1904 task_cputime(tsk, &utime, &stime);
1905 info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime);
1906 info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime);
1da177e4
LT
1907
1908 info.si_status = tsk->exit_code & 0x7f;
1909 if (tsk->exit_code & 0x80)
1910 info.si_code = CLD_DUMPED;
1911 else if (tsk->exit_code & 0x7f)
1912 info.si_code = CLD_KILLED;
1913 else {
1914 info.si_code = CLD_EXITED;
1915 info.si_status = tsk->exit_code >> 8;
1916 }
1917
1918 psig = tsk->parent->sighand;
1919 spin_lock_irqsave(&psig->siglock, flags);
d21142ec 1920 if (!tsk->ptrace && sig == SIGCHLD &&
1da177e4
LT
1921 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1922 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1923 /*
1924 * We are exiting and our parent doesn't care. POSIX.1
1925 * defines special semantics for setting SIGCHLD to SIG_IGN
1926 * or setting the SA_NOCLDWAIT flag: we should be reaped
1927 * automatically and not left for our parent's wait4 call.
1928 * Rather than having the parent do it as a magic kind of
1929 * signal handler, we just set this to tell do_exit that we
1930 * can be cleaned up without becoming a zombie. Note that
1931 * we still call __wake_up_parent in this case, because a
1932 * blocked sys_wait4 might now return -ECHILD.
1933 *
1934 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1935 * is implementation-defined: we do (if you don't want
1936 * it, just use SIG_IGN instead).
1937 */
53c8f9f1 1938 autoreap = true;
1da177e4 1939 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
53c8f9f1 1940 sig = 0;
1da177e4 1941 }
53c8f9f1 1942 if (valid_signal(sig) && sig)
1da177e4
LT
1943 __group_send_sig_info(sig, &info, tsk->parent);
1944 __wake_up_parent(tsk, tsk->parent);
1945 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 1946
53c8f9f1 1947 return autoreap;
1da177e4
LT
1948}
1949
75b95953
TH
1950/**
1951 * do_notify_parent_cldstop - notify parent of stopped/continued state change
1952 * @tsk: task reporting the state change
1953 * @for_ptracer: the notification is for ptracer
1954 * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report
1955 *
1956 * Notify @tsk's parent that the stopped/continued state has changed. If
1957 * @for_ptracer is %false, @tsk's group leader notifies to its real parent.
1958 * If %true, @tsk reports to @tsk->parent which should be the ptracer.
1959 *
1960 * CONTEXT:
1961 * Must be called with tasklist_lock at least read locked.
1962 */
1963static void do_notify_parent_cldstop(struct task_struct *tsk,
1964 bool for_ptracer, int why)
1da177e4 1965{
ae7795bc 1966 struct kernel_siginfo info;
1da177e4 1967 unsigned long flags;
bc505a47 1968 struct task_struct *parent;
1da177e4 1969 struct sighand_struct *sighand;
bde8285e 1970 u64 utime, stime;
1da177e4 1971
75b95953 1972 if (for_ptracer) {
bc505a47 1973 parent = tsk->parent;
75b95953 1974 } else {
bc505a47
ON
1975 tsk = tsk->group_leader;
1976 parent = tsk->real_parent;
1977 }
1978
faf1f22b 1979 clear_siginfo(&info);
1da177e4
LT
1980 info.si_signo = SIGCHLD;
1981 info.si_errno = 0;
b488893a 1982 /*
5aba085e 1983 * see comment in do_notify_parent() about the following 4 lines
b488893a
PE
1984 */
1985 rcu_read_lock();
17cf22c3 1986 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent));
54ba47ed 1987 info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk));
b488893a
PE
1988 rcu_read_unlock();
1989
bde8285e
FW
1990 task_cputime(tsk, &utime, &stime);
1991 info.si_utime = nsec_to_clock_t(utime);
1992 info.si_stime = nsec_to_clock_t(stime);
1da177e4
LT
1993
1994 info.si_code = why;
1995 switch (why) {
1996 case CLD_CONTINUED:
1997 info.si_status = SIGCONT;
1998 break;
1999 case CLD_STOPPED:
2000 info.si_status = tsk->signal->group_exit_code & 0x7f;
2001 break;
2002 case CLD_TRAPPED:
2003 info.si_status = tsk->exit_code & 0x7f;
2004 break;
2005 default:
2006 BUG();
2007 }
2008
2009 sighand = parent->sighand;
2010 spin_lock_irqsave(&sighand->siglock, flags);
2011 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
2012 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
2013 __group_send_sig_info(SIGCHLD, &info, parent);
2014 /*
2015 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
2016 */
2017 __wake_up_parent(tsk, parent);
2018 spin_unlock_irqrestore(&sighand->siglock, flags);
2019}
2020
6527de95 2021static inline bool may_ptrace_stop(void)
d5f70c00 2022{
d21142ec 2023 if (!likely(current->ptrace))
6527de95 2024 return false;
d5f70c00
ON
2025 /*
2026 * Are we in the middle of do_coredump?
2027 * If so and our tracer is also part of the coredump stopping
2028 * is a deadlock situation, and pointless because our tracer
2029 * is dead so don't allow us to stop.
2030 * If SIGKILL was already sent before the caller unlocked
999d9fc1 2031 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00 2032 * is safe to enter schedule().
9899d11f
ON
2033 *
2034 * This is almost outdated, a task with the pending SIGKILL can't
2035 * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported
2036 * after SIGKILL was already dequeued.
d5f70c00 2037 */
999d9fc1 2038 if (unlikely(current->mm->core_state) &&
d5f70c00 2039 unlikely(current->mm == current->parent->mm))
6527de95 2040 return false;
d5f70c00 2041
6527de95 2042 return true;
d5f70c00
ON
2043}
2044
1a669c2f 2045/*
5aba085e 2046 * Return non-zero if there is a SIGKILL that should be waking us up.
1a669c2f
RM
2047 * Called with the siglock held.
2048 */
f99e9d8c 2049static bool sigkill_pending(struct task_struct *tsk)
1a669c2f 2050{
f99e9d8c
CB
2051 return sigismember(&tsk->pending.signal, SIGKILL) ||
2052 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
2053}
2054
1da177e4
LT
2055/*
2056 * This must be called with current->sighand->siglock held.
2057 *
2058 * This should be the path for all ptrace stops.
2059 * We always set current->last_siginfo while stopped here.
2060 * That makes it a way to test a stopped process for
2061 * being ptrace-stopped vs being job-control-stopped.
2062 *
20686a30
ON
2063 * If we actually decide not to stop at all because the tracer
2064 * is gone, we keep current->exit_code unless clear_code.
1da177e4 2065 */
ae7795bc 2066static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info)
b8401150
NK
2067 __releases(&current->sighand->siglock)
2068 __acquires(&current->sighand->siglock)
1da177e4 2069{
ceb6bd67
TH
2070 bool gstop_done = false;
2071
1a669c2f
RM
2072 if (arch_ptrace_stop_needed(exit_code, info)) {
2073 /*
2074 * The arch code has something special to do before a
2075 * ptrace stop. This is allowed to block, e.g. for faults
2076 * on user stack pages. We can't keep the siglock while
2077 * calling arch_ptrace_stop, so we must release it now.
2078 * To preserve proper semantics, we must do this before
2079 * any signal bookkeeping like checking group_stop_count.
2080 * Meanwhile, a SIGKILL could come in before we retake the
2081 * siglock. That must prevent us from sleeping in TASK_TRACED.
2082 * So after regaining the lock, we must check for SIGKILL.
2083 */
2084 spin_unlock_irq(&current->sighand->siglock);
2085 arch_ptrace_stop(exit_code, info);
2086 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
2087 if (sigkill_pending(current))
2088 return;
1a669c2f
RM
2089 }
2090
b5bf9a90
PZ
2091 set_special_state(TASK_TRACED);
2092
1da177e4 2093 /*
81be24b8
TH
2094 * We're committing to trapping. TRACED should be visible before
2095 * TRAPPING is cleared; otherwise, the tracer might fail do_wait().
2096 * Also, transition to TRACED and updates to ->jobctl should be
2097 * atomic with respect to siglock and should be done after the arch
2098 * hook as siglock is released and regrabbed across it.
b5bf9a90
PZ
2099 *
2100 * TRACER TRACEE
2101 *
2102 * ptrace_attach()
2103 * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED)
2104 * do_wait()
2105 * set_current_state() smp_wmb();
2106 * ptrace_do_wait()
2107 * wait_task_stopped()
2108 * task_stopped_code()
2109 * [L] task_is_traced() [S] task_clear_jobctl_trapping();
1da177e4 2110 */
b5bf9a90 2111 smp_wmb();
1da177e4
LT
2112
2113 current->last_siginfo = info;
2114 current->exit_code = exit_code;
2115
d79fdd6d 2116 /*
0ae8ce1c
TH
2117 * If @why is CLD_STOPPED, we're trapping to participate in a group
2118 * stop. Do the bookkeeping. Note that if SIGCONT was delievered
73ddff2b
TH
2119 * across siglock relocks since INTERRUPT was scheduled, PENDING
2120 * could be clear now. We act as if SIGCONT is received after
2121 * TASK_TRACED is entered - ignore it.
d79fdd6d 2122 */
a8f072c1 2123 if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING))
ceb6bd67 2124 gstop_done = task_participate_group_stop(current);
d79fdd6d 2125
fb1d910c 2126 /* any trap clears pending STOP trap, STOP trap clears NOTIFY */
73ddff2b 2127 task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP);
fb1d910c
TH
2128 if (info && info->si_code >> 8 == PTRACE_EVENT_STOP)
2129 task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY);
73ddff2b 2130
81be24b8 2131 /* entering a trap, clear TRAPPING */
a8f072c1 2132 task_clear_jobctl_trapping(current);
d79fdd6d 2133
1da177e4
LT
2134 spin_unlock_irq(&current->sighand->siglock);
2135 read_lock(&tasklist_lock);
3d749b9e 2136 if (may_ptrace_stop()) {
ceb6bd67
TH
2137 /*
2138 * Notify parents of the stop.
2139 *
2140 * While ptraced, there are two parents - the ptracer and
2141 * the real_parent of the group_leader. The ptracer should
2142 * know about every stop while the real parent is only
2143 * interested in the completion of group stop. The states
2144 * for the two don't interact with each other. Notify
2145 * separately unless they're gonna be duplicates.
2146 */
2147 do_notify_parent_cldstop(current, true, why);
bb3696da 2148 if (gstop_done && ptrace_reparented(current))
ceb6bd67
TH
2149 do_notify_parent_cldstop(current, false, why);
2150
53da1d94
MS
2151 /*
2152 * Don't want to allow preemption here, because
2153 * sys_ptrace() needs this task to be inactive.
2154 *
2155 * XXX: implement read_unlock_no_resched().
2156 */
2157 preempt_disable();
1da177e4 2158 read_unlock(&tasklist_lock);
53da1d94 2159 preempt_enable_no_resched();
76f969e8 2160 cgroup_enter_frozen();
5d8f72b5 2161 freezable_schedule();
05b28926 2162 cgroup_leave_frozen(true);
1da177e4
LT
2163 } else {
2164 /*
2165 * By the time we got the lock, our tracer went away.
6405f7f4 2166 * Don't drop the lock yet, another tracer may come.
ceb6bd67
TH
2167 *
2168 * If @gstop_done, the ptracer went away between group stop
2169 * completion and here. During detach, it would have set
a8f072c1
TH
2170 * JOBCTL_STOP_PENDING on us and we'll re-enter
2171 * TASK_STOPPED in do_signal_stop() on return, so notifying
2172 * the real parent of the group stop completion is enough.
1da177e4 2173 */
ceb6bd67
TH
2174 if (gstop_done)
2175 do_notify_parent_cldstop(current, false, why);
2176
9899d11f 2177 /* tasklist protects us from ptrace_freeze_traced() */
6405f7f4 2178 __set_current_state(TASK_RUNNING);
20686a30
ON
2179 if (clear_code)
2180 current->exit_code = 0;
6405f7f4 2181 read_unlock(&tasklist_lock);
1da177e4
LT
2182 }
2183
2184 /*
2185 * We are back. Now reacquire the siglock before touching
2186 * last_siginfo, so that we are sure to have synchronized with
2187 * any signal-sending on another CPU that wants to examine it.
2188 */
2189 spin_lock_irq(&current->sighand->siglock);
2190 current->last_siginfo = NULL;
2191
544b2c91
TH
2192 /* LISTENING can be set only during STOP traps, clear it */
2193 current->jobctl &= ~JOBCTL_LISTENING;
2194
1da177e4
LT
2195 /*
2196 * Queued signals ignored us while we were stopped for tracing.
2197 * So check for any that we should take before resuming user mode.
b74d0deb 2198 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 2199 */
b74d0deb 2200 recalc_sigpending_tsk(current);
1da177e4
LT
2201}
2202
3544d72a 2203static void ptrace_do_notify(int signr, int exit_code, int why)
1da177e4 2204{
ae7795bc 2205 kernel_siginfo_t info;
1da177e4 2206
faf1f22b 2207 clear_siginfo(&info);
3544d72a 2208 info.si_signo = signr;
1da177e4 2209 info.si_code = exit_code;
b488893a 2210 info.si_pid = task_pid_vnr(current);
078de5f7 2211 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
1da177e4
LT
2212
2213 /* Let the debugger run. */
3544d72a
TH
2214 ptrace_stop(exit_code, why, 1, &info);
2215}
2216
2217void ptrace_notify(int exit_code)
2218{
2219 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
f784e8a7
ON
2220 if (unlikely(current->task_works))
2221 task_work_run();
3544d72a 2222
1da177e4 2223 spin_lock_irq(&current->sighand->siglock);
3544d72a 2224 ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED);
1da177e4
LT
2225 spin_unlock_irq(&current->sighand->siglock);
2226}
2227
73ddff2b
TH
2228/**
2229 * do_signal_stop - handle group stop for SIGSTOP and other stop signals
2230 * @signr: signr causing group stop if initiating
2231 *
2232 * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr
2233 * and participate in it. If already set, participate in the existing
2234 * group stop. If participated in a group stop (and thus slept), %true is
2235 * returned with siglock released.
2236 *
2237 * If ptraced, this function doesn't handle stop itself. Instead,
2238 * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock
2239 * untouched. The caller must ensure that INTERRUPT trap handling takes
2240 * places afterwards.
2241 *
2242 * CONTEXT:
2243 * Must be called with @current->sighand->siglock held, which is released
2244 * on %true return.
2245 *
2246 * RETURNS:
2247 * %false if group stop is already cancelled or ptrace trap is scheduled.
2248 * %true if participated in group stop.
1da177e4 2249 */
73ddff2b
TH
2250static bool do_signal_stop(int signr)
2251 __releases(&current->sighand->siglock)
1da177e4
LT
2252{
2253 struct signal_struct *sig = current->signal;
1da177e4 2254
a8f072c1 2255 if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
b76808e6 2256 unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
f558b7e4
ON
2257 struct task_struct *t;
2258
a8f072c1
TH
2259 /* signr will be recorded in task->jobctl for retries */
2260 WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK);
d79fdd6d 2261
a8f072c1 2262 if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) ||
573cf9ad 2263 unlikely(signal_group_exit(sig)))
73ddff2b 2264 return false;
1da177e4 2265 /*
408a37de
TH
2266 * There is no group stop already in progress. We must
2267 * initiate one now.
2268 *
2269 * While ptraced, a task may be resumed while group stop is
2270 * still in effect and then receive a stop signal and
2271 * initiate another group stop. This deviates from the
2272 * usual behavior as two consecutive stop signals can't
780006ea
ON
2273 * cause two group stops when !ptraced. That is why we
2274 * also check !task_is_stopped(t) below.
408a37de
TH
2275 *
2276 * The condition can be distinguished by testing whether
2277 * SIGNAL_STOP_STOPPED is already set. Don't generate
2278 * group_exit_code in such case.
2279 *
2280 * This is not necessary for SIGNAL_STOP_CONTINUED because
2281 * an intervening stop signal is required to cause two
2282 * continued events regardless of ptrace.
1da177e4 2283 */
408a37de
TH
2284 if (!(sig->flags & SIGNAL_STOP_STOPPED))
2285 sig->group_exit_code = signr;
1da177e4 2286
7dd3db54
TH
2287 sig->group_stop_count = 0;
2288
2289 if (task_set_jobctl_pending(current, signr | gstop))
2290 sig->group_stop_count++;
1da177e4 2291
8d38f203
ON
2292 t = current;
2293 while_each_thread(current, t) {
1da177e4 2294 /*
a122b341
ON
2295 * Setting state to TASK_STOPPED for a group
2296 * stop is always done with the siglock held,
2297 * so this check has no races.
1da177e4 2298 */
7dd3db54
TH
2299 if (!task_is_stopped(t) &&
2300 task_set_jobctl_pending(t, signr | gstop)) {
ae6d2ed7 2301 sig->group_stop_count++;
fb1d910c
TH
2302 if (likely(!(t->ptrace & PT_SEIZED)))
2303 signal_wake_up(t, 0);
2304 else
2305 ptrace_trap_notify(t);
a122b341 2306 }
d79fdd6d 2307 }
1da177e4 2308 }
73ddff2b 2309
d21142ec 2310 if (likely(!current->ptrace)) {
5224fa36 2311 int notify = 0;
1da177e4 2312
5224fa36
TH
2313 /*
2314 * If there are no other threads in the group, or if there
2315 * is a group stop in progress and we are the last to stop,
2316 * report to the parent.
2317 */
2318 if (task_participate_group_stop(current))
2319 notify = CLD_STOPPED;
2320
b5bf9a90 2321 set_special_state(TASK_STOPPED);
5224fa36
TH
2322 spin_unlock_irq(&current->sighand->siglock);
2323
62bcf9d9
TH
2324 /*
2325 * Notify the parent of the group stop completion. Because
2326 * we're not holding either the siglock or tasklist_lock
2327 * here, ptracer may attach inbetween; however, this is for
2328 * group stop and should always be delivered to the real
2329 * parent of the group leader. The new ptracer will get
2330 * its notification when this task transitions into
2331 * TASK_TRACED.
2332 */
5224fa36
TH
2333 if (notify) {
2334 read_lock(&tasklist_lock);
62bcf9d9 2335 do_notify_parent_cldstop(current, false, notify);
5224fa36
TH
2336 read_unlock(&tasklist_lock);
2337 }
2338
2339 /* Now we don't run again until woken by SIGCONT or SIGKILL */
76f969e8 2340 cgroup_enter_frozen();
5d8f72b5 2341 freezable_schedule();
73ddff2b 2342 return true;
d79fdd6d 2343 } else {
73ddff2b
TH
2344 /*
2345 * While ptraced, group stop is handled by STOP trap.
2346 * Schedule it and let the caller deal with it.
2347 */
2348 task_set_jobctl_pending(current, JOBCTL_TRAP_STOP);
2349 return false;
ae6d2ed7 2350 }
73ddff2b 2351}
1da177e4 2352
73ddff2b
TH
2353/**
2354 * do_jobctl_trap - take care of ptrace jobctl traps
2355 *
3544d72a
TH
2356 * When PT_SEIZED, it's used for both group stop and explicit
2357 * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with
2358 * accompanying siginfo. If stopped, lower eight bits of exit_code contain
2359 * the stop signal; otherwise, %SIGTRAP.
2360 *
2361 * When !PT_SEIZED, it's used only for group stop trap with stop signal
2362 * number as exit_code and no siginfo.
73ddff2b
TH
2363 *
2364 * CONTEXT:
2365 * Must be called with @current->sighand->siglock held, which may be
2366 * released and re-acquired before returning with intervening sleep.
2367 */
2368static void do_jobctl_trap(void)
2369{
3544d72a 2370 struct signal_struct *signal = current->signal;
73ddff2b 2371 int signr = current->jobctl & JOBCTL_STOP_SIGMASK;
ae6d2ed7 2372
3544d72a
TH
2373 if (current->ptrace & PT_SEIZED) {
2374 if (!signal->group_stop_count &&
2375 !(signal->flags & SIGNAL_STOP_STOPPED))
2376 signr = SIGTRAP;
2377 WARN_ON_ONCE(!signr);
2378 ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8),
2379 CLD_STOPPED);
2380 } else {
2381 WARN_ON_ONCE(!signr);
2382 ptrace_stop(signr, CLD_STOPPED, 0, NULL);
2383 current->exit_code = 0;
ae6d2ed7 2384 }
1da177e4
LT
2385}
2386
76f969e8
RG
2387/**
2388 * do_freezer_trap - handle the freezer jobctl trap
2389 *
2390 * Puts the task into frozen state, if only the task is not about to quit.
2391 * In this case it drops JOBCTL_TRAP_FREEZE.
2392 *
2393 * CONTEXT:
2394 * Must be called with @current->sighand->siglock held,
2395 * which is always released before returning.
2396 */
2397static void do_freezer_trap(void)
2398 __releases(&current->sighand->siglock)
2399{
2400 /*
2401 * If there are other trap bits pending except JOBCTL_TRAP_FREEZE,
2402 * let's make another loop to give it a chance to be handled.
2403 * In any case, we'll return back.
2404 */
2405 if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) !=
2406 JOBCTL_TRAP_FREEZE) {
2407 spin_unlock_irq(&current->sighand->siglock);
2408 return;
2409 }
2410
2411 /*
2412 * Now we're sure that there is no pending fatal signal and no
2413 * pending traps. Clear TIF_SIGPENDING to not get out of schedule()
2414 * immediately (if there is a non-fatal signal pending), and
2415 * put the task into sleep.
2416 */
2417 __set_current_state(TASK_INTERRUPTIBLE);
2418 clear_thread_flag(TIF_SIGPENDING);
2419 spin_unlock_irq(&current->sighand->siglock);
2420 cgroup_enter_frozen();
2421 freezable_schedule();
2422}
2423
ae7795bc 2424static int ptrace_signal(int signr, kernel_siginfo_t *info)
18c98b65 2425{
8a352418
ON
2426 /*
2427 * We do not check sig_kernel_stop(signr) but set this marker
2428 * unconditionally because we do not know whether debugger will
2429 * change signr. This flag has no meaning unless we are going
2430 * to stop after return from ptrace_stop(). In this case it will
2431 * be checked in do_signal_stop(), we should only stop if it was
2432 * not cleared by SIGCONT while we were sleeping. See also the
2433 * comment in dequeue_signal().
2434 */
2435 current->jobctl |= JOBCTL_STOP_DEQUEUED;
fe1bc6a0 2436 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
2437
2438 /* We're back. Did the debugger cancel the sig? */
2439 signr = current->exit_code;
2440 if (signr == 0)
2441 return signr;
2442
2443 current->exit_code = 0;
2444
5aba085e
RD
2445 /*
2446 * Update the siginfo structure if the signal has
2447 * changed. If the debugger wanted something
2448 * specific in the siginfo structure then it should
2449 * have updated *info via PTRACE_SETSIGINFO.
2450 */
18c98b65 2451 if (signr != info->si_signo) {
faf1f22b 2452 clear_siginfo(info);
18c98b65
RM
2453 info->si_signo = signr;
2454 info->si_errno = 0;
2455 info->si_code = SI_USER;
6b550f94 2456 rcu_read_lock();
18c98b65 2457 info->si_pid = task_pid_vnr(current->parent);
54ba47ed
EB
2458 info->si_uid = from_kuid_munged(current_user_ns(),
2459 task_uid(current->parent));
6b550f94 2460 rcu_read_unlock();
18c98b65
RM
2461 }
2462
2463 /* If the (new) signal is now blocked, requeue it. */
2464 if (sigismember(&current->blocked, signr)) {
b21c5bd5 2465 send_signal(signr, info, current, PIDTYPE_PID);
18c98b65
RM
2466 signr = 0;
2467 }
2468
2469 return signr;
2470}
2471
20ab7218 2472bool get_signal(struct ksignal *ksig)
1da177e4 2473{
f6b76d4f
ON
2474 struct sighand_struct *sighand = current->sighand;
2475 struct signal_struct *signal = current->signal;
2476 int signr;
1da177e4 2477
f784e8a7
ON
2478 if (unlikely(current->task_works))
2479 task_work_run();
72667028 2480
0326f5a9 2481 if (unlikely(uprobe_deny_signal()))
20ab7218 2482 return false;
0326f5a9 2483
13b1c3d4 2484 /*
5d8f72b5
ON
2485 * Do this once, we can't return to user-mode if freezing() == T.
2486 * do_signal_stop() and ptrace_stop() do freezable_schedule() and
2487 * thus do not need another check after return.
13b1c3d4 2488 */
fc558a74
RW
2489 try_to_freeze();
2490
5d8f72b5 2491relock:
f6b76d4f 2492 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
2493 /*
2494 * Every stopped thread goes here after wakeup. Check to see if
2495 * we should notify the parent, prepare_signal(SIGCONT) encodes
2496 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
2497 */
f6b76d4f 2498 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
2499 int why;
2500
2501 if (signal->flags & SIGNAL_CLD_CONTINUED)
2502 why = CLD_CONTINUED;
2503 else
2504 why = CLD_STOPPED;
2505
f6b76d4f 2506 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 2507
ae6d2ed7 2508 spin_unlock_irq(&sighand->siglock);
fa00b80b 2509
ceb6bd67
TH
2510 /*
2511 * Notify the parent that we're continuing. This event is
2512 * always per-process and doesn't make whole lot of sense
2513 * for ptracers, who shouldn't consume the state via
2514 * wait(2) either, but, for backward compatibility, notify
2515 * the ptracer of the group leader too unless it's gonna be
2516 * a duplicate.
2517 */
edf2ed15 2518 read_lock(&tasklist_lock);
ceb6bd67
TH
2519 do_notify_parent_cldstop(current, false, why);
2520
bb3696da
ON
2521 if (ptrace_reparented(current->group_leader))
2522 do_notify_parent_cldstop(current->group_leader,
2523 true, why);
edf2ed15 2524 read_unlock(&tasklist_lock);
ceb6bd67 2525
e4420551
ON
2526 goto relock;
2527 }
2528
35634ffa 2529 /* Has this task already been marked for death? */
cf43a757
EB
2530 if (signal_group_exit(signal)) {
2531 ksig->info.si_signo = signr = SIGKILL;
2532 sigdelset(&current->pending.signal, SIGKILL);
2533 recalc_sigpending();
35634ffa 2534 goto fatal;
cf43a757 2535 }
35634ffa 2536
1da177e4
LT
2537 for (;;) {
2538 struct k_sigaction *ka;
1be53963 2539
dd1d6772
TH
2540 if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
2541 do_signal_stop(0))
7bcf6a2c 2542 goto relock;
1be53963 2543
76f969e8
RG
2544 if (unlikely(current->jobctl &
2545 (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) {
2546 if (current->jobctl & JOBCTL_TRAP_MASK) {
2547 do_jobctl_trap();
2548 spin_unlock_irq(&sighand->siglock);
2549 } else if (current->jobctl & JOBCTL_TRAP_FREEZE)
2550 do_freezer_trap();
2551
2552 goto relock;
2553 }
2554
2555 /*
2556 * If the task is leaving the frozen state, let's update
2557 * cgroup counters and reset the frozen bit.
2558 */
2559 if (unlikely(cgroup_task_frozen(current))) {
73ddff2b 2560 spin_unlock_irq(&sighand->siglock);
cb2c4cd8 2561 cgroup_leave_frozen(false);
73ddff2b
TH
2562 goto relock;
2563 }
1da177e4 2564
7146db33
EB
2565 /*
2566 * Signals generated by the execution of an instruction
2567 * need to be delivered before any other pending signals
2568 * so that the instruction pointer in the signal stack
2569 * frame points to the faulting instruction.
2570 */
2571 signr = dequeue_synchronous_signal(&ksig->info);
2572 if (!signr)
2573 signr = dequeue_signal(current, &current->blocked, &ksig->info);
7bcf6a2c 2574
dd1d6772
TH
2575 if (!signr)
2576 break; /* will return 0 */
7bcf6a2c 2577
8a352418 2578 if (unlikely(current->ptrace) && signr != SIGKILL) {
828b1f65 2579 signr = ptrace_signal(signr, &ksig->info);
dd1d6772
TH
2580 if (!signr)
2581 continue;
1da177e4
LT
2582 }
2583
dd1d6772
TH
2584 ka = &sighand->action[signr-1];
2585
f9d4257e 2586 /* Trace actually delivered signals. */
828b1f65 2587 trace_signal_deliver(signr, &ksig->info, ka);
f9d4257e 2588
1da177e4
LT
2589 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
2590 continue;
2591 if (ka->sa.sa_handler != SIG_DFL) {
2592 /* Run the handler. */
828b1f65 2593 ksig->ka = *ka;
1da177e4
LT
2594
2595 if (ka->sa.sa_flags & SA_ONESHOT)
2596 ka->sa.sa_handler = SIG_DFL;
2597
2598 break; /* will return non-zero "signr" value */
2599 }
2600
2601 /*
2602 * Now we are doing the default action for this signal.
2603 */
2604 if (sig_kernel_ignore(signr)) /* Default is nothing. */
2605 continue;
2606
84d73786 2607 /*
0fbc26a6 2608 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
2609 * Container-init gets no signals it doesn't want from same
2610 * container.
2611 *
2612 * Note that if global/container-init sees a sig_kernel_only()
2613 * signal here, the signal must have been generated internally
2614 * or must have come from an ancestor namespace. In either
2615 * case, the signal cannot be dropped.
84d73786 2616 */
fae5fa44 2617 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 2618 !sig_kernel_only(signr))
1da177e4
LT
2619 continue;
2620
2621 if (sig_kernel_stop(signr)) {
2622 /*
2623 * The default action is to stop all threads in
2624 * the thread group. The job control signals
2625 * do nothing in an orphaned pgrp, but SIGSTOP
2626 * always works. Note that siglock needs to be
2627 * dropped during the call to is_orphaned_pgrp()
2628 * because of lock ordering with tasklist_lock.
2629 * This allows an intervening SIGCONT to be posted.
2630 * We need to check for that and bail out if necessary.
2631 */
2632 if (signr != SIGSTOP) {
f6b76d4f 2633 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2634
2635 /* signals can be posted during this window */
2636
3e7cd6c4 2637 if (is_current_pgrp_orphaned())
1da177e4
LT
2638 goto relock;
2639
f6b76d4f 2640 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2641 }
2642
828b1f65 2643 if (likely(do_signal_stop(ksig->info.si_signo))) {
1da177e4
LT
2644 /* It released the siglock. */
2645 goto relock;
2646 }
2647
2648 /*
2649 * We didn't actually stop, due to a race
2650 * with SIGCONT or something like that.
2651 */
2652 continue;
2653 }
2654
35634ffa 2655 fatal:
f6b76d4f 2656 spin_unlock_irq(&sighand->siglock);
f2b31bb5
RG
2657 if (unlikely(cgroup_task_frozen(current)))
2658 cgroup_leave_frozen(true);
1da177e4
LT
2659
2660 /*
2661 * Anything else is fatal, maybe with a core dump.
2662 */
2663 current->flags |= PF_SIGNALED;
2dce81bf 2664
1da177e4 2665 if (sig_kernel_coredump(signr)) {
2dce81bf 2666 if (print_fatal_signals)
828b1f65 2667 print_fatal_signal(ksig->info.si_signo);
2b5faa4c 2668 proc_coredump_connector(current);
1da177e4
LT
2669 /*
2670 * If it was able to dump core, this kills all
2671 * other threads in the group and synchronizes with
2672 * their demise. If we lost the race with another
2673 * thread getting here, it set group_exit_code
2674 * first and our do_group_exit call below will use
2675 * that value and ignore the one we pass it.
2676 */
828b1f65 2677 do_coredump(&ksig->info);
1da177e4
LT
2678 }
2679
2680 /*
2681 * Death signals, no core dump.
2682 */
828b1f65 2683 do_group_exit(ksig->info.si_signo);
1da177e4
LT
2684 /* NOTREACHED */
2685 }
f6b76d4f 2686 spin_unlock_irq(&sighand->siglock);
828b1f65
RW
2687
2688 ksig->sig = signr;
2689 return ksig->sig > 0;
1da177e4
LT
2690}
2691
5e6292c0 2692/**
efee984c 2693 * signal_delivered -
10b1c7ac 2694 * @ksig: kernel signal struct
efee984c 2695 * @stepping: nonzero if debugger single-step or block-step in use
5e6292c0 2696 *
e227867f 2697 * This function should be called when a signal has successfully been
10b1c7ac 2698 * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask
efee984c 2699 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
10b1c7ac 2700 * is set in @ksig->ka.sa.sa_flags. Tracing is notified.
5e6292c0 2701 */
10b1c7ac 2702static void signal_delivered(struct ksignal *ksig, int stepping)
5e6292c0
MF
2703{
2704 sigset_t blocked;
2705
a610d6e6
AV
2706 /* A signal was successfully delivered, and the
2707 saved sigmask was stored on the signal frame,
2708 and will be restored by sigreturn. So we can
2709 simply clear the restore sigmask flag. */
2710 clear_restore_sigmask();
2711
10b1c7ac
RW
2712 sigorsets(&blocked, &current->blocked, &ksig->ka.sa.sa_mask);
2713 if (!(ksig->ka.sa.sa_flags & SA_NODEFER))
2714 sigaddset(&blocked, ksig->sig);
5e6292c0 2715 set_current_blocked(&blocked);
df5601f9 2716 tracehook_signal_handler(stepping);
5e6292c0
MF
2717}
2718
2ce5da17
AV
2719void signal_setup_done(int failed, struct ksignal *ksig, int stepping)
2720{
2721 if (failed)
cb44c9a0 2722 force_sigsegv(ksig->sig);
2ce5da17 2723 else
10b1c7ac 2724 signal_delivered(ksig, stepping);
2ce5da17
AV
2725}
2726
0edceb7b
ON
2727/*
2728 * It could be that complete_signal() picked us to notify about the
fec9993d
ON
2729 * group-wide signal. Other threads should be notified now to take
2730 * the shared signals in @which since we will not.
0edceb7b 2731 */
f646e227 2732static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
0edceb7b 2733{
f646e227 2734 sigset_t retarget;
0edceb7b
ON
2735 struct task_struct *t;
2736
f646e227
ON
2737 sigandsets(&retarget, &tsk->signal->shared_pending.signal, which);
2738 if (sigisemptyset(&retarget))
2739 return;
2740
0edceb7b
ON
2741 t = tsk;
2742 while_each_thread(tsk, t) {
fec9993d
ON
2743 if (t->flags & PF_EXITING)
2744 continue;
2745
2746 if (!has_pending_signals(&retarget, &t->blocked))
2747 continue;
2748 /* Remove the signals this thread can handle. */
2749 sigandsets(&retarget, &retarget, &t->blocked);
2750
2751 if (!signal_pending(t))
2752 signal_wake_up(t, 0);
2753
2754 if (sigisemptyset(&retarget))
2755 break;
0edceb7b
ON
2756 }
2757}
2758
d12619b5
ON
2759void exit_signals(struct task_struct *tsk)
2760{
2761 int group_stop = 0;
f646e227 2762 sigset_t unblocked;
d12619b5 2763
77e4ef99
TH
2764 /*
2765 * @tsk is about to have PF_EXITING set - lock out users which
2766 * expect stable threadgroup.
2767 */
780de9dd 2768 cgroup_threadgroup_change_begin(tsk);
77e4ef99 2769
5dee1707
ON
2770 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2771 tsk->flags |= PF_EXITING;
780de9dd 2772 cgroup_threadgroup_change_end(tsk);
5dee1707 2773 return;
d12619b5
ON
2774 }
2775
5dee1707 2776 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2777 /*
2778 * From now this task is not visible for group-wide signals,
2779 * see wants_signal(), do_signal_stop().
2780 */
2781 tsk->flags |= PF_EXITING;
77e4ef99 2782
780de9dd 2783 cgroup_threadgroup_change_end(tsk);
77e4ef99 2784
5dee1707
ON
2785 if (!signal_pending(tsk))
2786 goto out;
2787
f646e227
ON
2788 unblocked = tsk->blocked;
2789 signotset(&unblocked);
2790 retarget_shared_pending(tsk, &unblocked);
5dee1707 2791
a8f072c1 2792 if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) &&
e5c1902e 2793 task_participate_group_stop(tsk))
edf2ed15 2794 group_stop = CLD_STOPPED;
5dee1707 2795out:
d12619b5
ON
2796 spin_unlock_irq(&tsk->sighand->siglock);
2797
62bcf9d9
TH
2798 /*
2799 * If group stop has completed, deliver the notification. This
2800 * should always go to the real parent of the group leader.
2801 */
ae6d2ed7 2802 if (unlikely(group_stop)) {
d12619b5 2803 read_lock(&tasklist_lock);
62bcf9d9 2804 do_notify_parent_cldstop(tsk, false, group_stop);
d12619b5
ON
2805 read_unlock(&tasklist_lock);
2806 }
2807}
2808
1da177e4
LT
2809/*
2810 * System call entry points.
2811 */
2812
41c57892
RD
2813/**
2814 * sys_restart_syscall - restart a system call
2815 */
754fe8d2 2816SYSCALL_DEFINE0(restart_syscall)
1da177e4 2817{
f56141e3 2818 struct restart_block *restart = &current->restart_block;
1da177e4
LT
2819 return restart->fn(restart);
2820}
2821
2822long do_no_restart_syscall(struct restart_block *param)
2823{
2824 return -EINTR;
2825}
2826
b182801a
ON
2827static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2828{
2829 if (signal_pending(tsk) && !thread_group_empty(tsk)) {
2830 sigset_t newblocked;
2831 /* A set of now blocked but previously unblocked signals. */
702a5073 2832 sigandnsets(&newblocked, newset, &current->blocked);
b182801a
ON
2833 retarget_shared_pending(tsk, &newblocked);
2834 }
2835 tsk->blocked = *newset;
2836 recalc_sigpending();
2837}
2838
e6fa16ab
ON
2839/**
2840 * set_current_blocked - change current->blocked mask
2841 * @newset: new mask
2842 *
2843 * It is wrong to change ->blocked directly, this helper should be used
2844 * to ensure the process can't miss a shared signal we are going to block.
1da177e4 2845 */
77097ae5
AV
2846void set_current_blocked(sigset_t *newset)
2847{
77097ae5 2848 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
0c4a8423 2849 __set_current_blocked(newset);
77097ae5
AV
2850}
2851
2852void __set_current_blocked(const sigset_t *newset)
e6fa16ab
ON
2853{
2854 struct task_struct *tsk = current;
2855
c7be96af
WL
2856 /*
2857 * In case the signal mask hasn't changed, there is nothing we need
2858 * to do. The current->blocked shouldn't be modified by other task.
2859 */
2860 if (sigequalsets(&tsk->blocked, newset))
2861 return;
2862
e6fa16ab 2863 spin_lock_irq(&tsk->sighand->siglock);
b182801a 2864 __set_task_blocked(tsk, newset);
e6fa16ab
ON
2865 spin_unlock_irq(&tsk->sighand->siglock);
2866}
1da177e4
LT
2867
2868/*
2869 * This is also useful for kernel threads that want to temporarily
2870 * (or permanently) block certain signals.
2871 *
2872 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2873 * interface happily blocks "unblockable" signals like SIGKILL
2874 * and friends.
2875 */
2876int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2877{
73ef4aeb
ON
2878 struct task_struct *tsk = current;
2879 sigset_t newset;
1da177e4 2880
73ef4aeb 2881 /* Lockless, only current can change ->blocked, never from irq */
a26fd335 2882 if (oldset)
73ef4aeb 2883 *oldset = tsk->blocked;
a26fd335 2884
1da177e4
LT
2885 switch (how) {
2886 case SIG_BLOCK:
73ef4aeb 2887 sigorsets(&newset, &tsk->blocked, set);
1da177e4
LT
2888 break;
2889 case SIG_UNBLOCK:
702a5073 2890 sigandnsets(&newset, &tsk->blocked, set);
1da177e4
LT
2891 break;
2892 case SIG_SETMASK:
73ef4aeb 2893 newset = *set;
1da177e4
LT
2894 break;
2895 default:
73ef4aeb 2896 return -EINVAL;
1da177e4 2897 }
a26fd335 2898
77097ae5 2899 __set_current_blocked(&newset);
73ef4aeb 2900 return 0;
1da177e4 2901}
fb50f5a4 2902EXPORT_SYMBOL(sigprocmask);
1da177e4 2903
ded653cc
DD
2904/*
2905 * The api helps set app-provided sigmasks.
2906 *
2907 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2908 * epoll_pwait where a new sigmask is passed from userland for the syscalls.
2909 */
2910int set_user_sigmask(const sigset_t __user *usigmask, sigset_t *set,
2911 sigset_t *oldset, size_t sigsetsize)
2912{
2913 if (!usigmask)
2914 return 0;
2915
2916 if (sigsetsize != sizeof(sigset_t))
2917 return -EINVAL;
2918 if (copy_from_user(set, usigmask, sizeof(sigset_t)))
2919 return -EFAULT;
2920
2921 *oldset = current->blocked;
2922 set_current_blocked(set);
2923
2924 return 0;
2925}
2926EXPORT_SYMBOL(set_user_sigmask);
2927
2928#ifdef CONFIG_COMPAT
2929int set_compat_user_sigmask(const compat_sigset_t __user *usigmask,
2930 sigset_t *set, sigset_t *oldset,
2931 size_t sigsetsize)
2932{
2933 if (!usigmask)
2934 return 0;
2935
2936 if (sigsetsize != sizeof(compat_sigset_t))
2937 return -EINVAL;
2938 if (get_compat_sigset(set, usigmask))
2939 return -EFAULT;
2940
2941 *oldset = current->blocked;
2942 set_current_blocked(set);
2943
2944 return 0;
2945}
2946EXPORT_SYMBOL(set_compat_user_sigmask);
2947#endif
2948
854a6ed5
DD
2949/*
2950 * restore_user_sigmask:
2951 * usigmask: sigmask passed in from userland.
2952 * sigsaved: saved sigmask when the syscall started and changed the sigmask to
2953 * usigmask.
2954 *
2955 * This is useful for syscalls such as ppoll, pselect, io_pgetevents and
2956 * epoll_pwait where a new sigmask is passed in from userland for the syscalls.
2957 */
2958void restore_user_sigmask(const void __user *usigmask, sigset_t *sigsaved)
2959{
2960
2961 if (!usigmask)
2962 return;
2963 /*
2964 * When signals are pending, do not restore them here.
2965 * Restoring sigmask here can lead to delivering signals that the above
2966 * syscalls are intended to block because of the sigmask passed in.
2967 */
2968 if (signal_pending(current)) {
2969 current->saved_sigmask = *sigsaved;
2970 set_restore_sigmask();
2971 return;
2972 }
2973
2974 /*
2975 * This is needed because the fast syscall return path does not restore
2976 * saved_sigmask when signals are not pending.
2977 */
2978 set_current_blocked(sigsaved);
2979}
2980EXPORT_SYMBOL(restore_user_sigmask);
2981
41c57892
RD
2982/**
2983 * sys_rt_sigprocmask - change the list of currently blocked signals
2984 * @how: whether to add, remove, or set signals
ada9c933 2985 * @nset: stores pending signals
41c57892
RD
2986 * @oset: previous value of signal mask if non-null
2987 * @sigsetsize: size of sigset_t type
2988 */
bb7efee2 2989SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset,
17da2bd9 2990 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4 2991{
1da177e4 2992 sigset_t old_set, new_set;
bb7efee2 2993 int error;
1da177e4
LT
2994
2995 /* XXX: Don't preclude handling different sized sigset_t's. */
2996 if (sigsetsize != sizeof(sigset_t))
bb7efee2 2997 return -EINVAL;
1da177e4 2998
bb7efee2
ON
2999 old_set = current->blocked;
3000
3001 if (nset) {
3002 if (copy_from_user(&new_set, nset, sizeof(sigset_t)))
3003 return -EFAULT;
1da177e4
LT
3004 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3005
bb7efee2 3006 error = sigprocmask(how, &new_set, NULL);
1da177e4 3007 if (error)
bb7efee2
ON
3008 return error;
3009 }
1da177e4 3010
bb7efee2
ON
3011 if (oset) {
3012 if (copy_to_user(oset, &old_set, sizeof(sigset_t)))
3013 return -EFAULT;
1da177e4 3014 }
bb7efee2
ON
3015
3016 return 0;
1da177e4
LT
3017}
3018
322a56cb 3019#ifdef CONFIG_COMPAT
322a56cb
AV
3020COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset,
3021 compat_sigset_t __user *, oset, compat_size_t, sigsetsize)
1da177e4 3022{
322a56cb
AV
3023 sigset_t old_set = current->blocked;
3024
3025 /* XXX: Don't preclude handling different sized sigset_t's. */
3026 if (sigsetsize != sizeof(sigset_t))
3027 return -EINVAL;
3028
3029 if (nset) {
322a56cb
AV
3030 sigset_t new_set;
3031 int error;
3968cf62 3032 if (get_compat_sigset(&new_set, nset))
322a56cb 3033 return -EFAULT;
322a56cb
AV
3034 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3035
3036 error = sigprocmask(how, &new_set, NULL);
3037 if (error)
3038 return error;
3039 }
f454322e 3040 return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0;
322a56cb
AV
3041}
3042#endif
1da177e4 3043
b1d294c8 3044static void do_sigpending(sigset_t *set)
1da177e4 3045{
1da177e4 3046 spin_lock_irq(&current->sighand->siglock);
fe9c1db2 3047 sigorsets(set, &current->pending.signal,
1da177e4
LT
3048 &current->signal->shared_pending.signal);
3049 spin_unlock_irq(&current->sighand->siglock);
3050
3051 /* Outside the lock because only this thread touches it. */
fe9c1db2 3052 sigandsets(set, &current->blocked, set);
5aba085e 3053}
1da177e4 3054
41c57892
RD
3055/**
3056 * sys_rt_sigpending - examine a pending signal that has been raised
3057 * while blocked
20f22ab4 3058 * @uset: stores pending signals
41c57892
RD
3059 * @sigsetsize: size of sigset_t type or larger
3060 */
fe9c1db2 3061SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize)
1da177e4 3062{
fe9c1db2 3063 sigset_t set;
176826af
DL
3064
3065 if (sigsetsize > sizeof(*uset))
3066 return -EINVAL;
3067
b1d294c8
CB
3068 do_sigpending(&set);
3069
3070 if (copy_to_user(uset, &set, sigsetsize))
3071 return -EFAULT;
3072
3073 return 0;
fe9c1db2
AV
3074}
3075
3076#ifdef CONFIG_COMPAT
fe9c1db2
AV
3077COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset,
3078 compat_size_t, sigsetsize)
1da177e4 3079{
fe9c1db2 3080 sigset_t set;
176826af
DL
3081
3082 if (sigsetsize > sizeof(*uset))
3083 return -EINVAL;
3084
b1d294c8
CB
3085 do_sigpending(&set);
3086
3087 return put_compat_sigset(uset, &set, sigsetsize);
1da177e4 3088}
fe9c1db2 3089#endif
1da177e4 3090
4ce5f9c9
EB
3091static const struct {
3092 unsigned char limit, layout;
3093} sig_sicodes[] = {
3094 [SIGILL] = { NSIGILL, SIL_FAULT },
3095 [SIGFPE] = { NSIGFPE, SIL_FAULT },
3096 [SIGSEGV] = { NSIGSEGV, SIL_FAULT },
3097 [SIGBUS] = { NSIGBUS, SIL_FAULT },
3098 [SIGTRAP] = { NSIGTRAP, SIL_FAULT },
3099#if defined(SIGEMT)
3100 [SIGEMT] = { NSIGEMT, SIL_FAULT },
3101#endif
3102 [SIGCHLD] = { NSIGCHLD, SIL_CHLD },
3103 [SIGPOLL] = { NSIGPOLL, SIL_POLL },
3104 [SIGSYS] = { NSIGSYS, SIL_SYS },
3105};
3106
b2a2ab52 3107static bool known_siginfo_layout(unsigned sig, int si_code)
4ce5f9c9
EB
3108{
3109 if (si_code == SI_KERNEL)
3110 return true;
3111 else if ((si_code > SI_USER)) {
3112 if (sig_specific_sicodes(sig)) {
3113 if (si_code <= sig_sicodes[sig].limit)
3114 return true;
3115 }
3116 else if (si_code <= NSIGPOLL)
3117 return true;
3118 }
3119 else if (si_code >= SI_DETHREAD)
3120 return true;
3121 else if (si_code == SI_ASYNCNL)
3122 return true;
3123 return false;
3124}
3125
a3670058 3126enum siginfo_layout siginfo_layout(unsigned sig, int si_code)
cc731525
EB
3127{
3128 enum siginfo_layout layout = SIL_KILL;
3129 if ((si_code > SI_USER) && (si_code < SI_KERNEL)) {
4ce5f9c9
EB
3130 if ((sig < ARRAY_SIZE(sig_sicodes)) &&
3131 (si_code <= sig_sicodes[sig].limit)) {
3132 layout = sig_sicodes[sig].layout;
31931c93
EB
3133 /* Handle the exceptions */
3134 if ((sig == SIGBUS) &&
3135 (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO))
3136 layout = SIL_FAULT_MCEERR;
3137 else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR))
3138 layout = SIL_FAULT_BNDERR;
3139#ifdef SEGV_PKUERR
3140 else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR))
3141 layout = SIL_FAULT_PKUERR;
3142#endif
3143 }
cc731525
EB
3144 else if (si_code <= NSIGPOLL)
3145 layout = SIL_POLL;
3146 } else {
3147 if (si_code == SI_TIMER)
3148 layout = SIL_TIMER;
3149 else if (si_code == SI_SIGIO)
3150 layout = SIL_POLL;
3151 else if (si_code < 0)
3152 layout = SIL_RT;
cc731525
EB
3153 }
3154 return layout;
3155}
3156
4ce5f9c9
EB
3157static inline char __user *si_expansion(const siginfo_t __user *info)
3158{
3159 return ((char __user *)info) + sizeof(struct kernel_siginfo);
3160}
3161
ae7795bc 3162int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from)
1da177e4 3163{
4ce5f9c9 3164 char __user *expansion = si_expansion(to);
ae7795bc 3165 if (copy_to_user(to, from , sizeof(struct kernel_siginfo)))
1da177e4 3166 return -EFAULT;
4ce5f9c9 3167 if (clear_user(expansion, SI_EXPANSION_SIZE))
1da177e4 3168 return -EFAULT;
c999b933 3169 return 0;
1da177e4
LT
3170}
3171
601d5abf
EB
3172static int post_copy_siginfo_from_user(kernel_siginfo_t *info,
3173 const siginfo_t __user *from)
4cd2e0e7 3174{
601d5abf 3175 if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) {
4ce5f9c9
EB
3176 char __user *expansion = si_expansion(from);
3177 char buf[SI_EXPANSION_SIZE];
3178 int i;
3179 /*
3180 * An unknown si_code might need more than
3181 * sizeof(struct kernel_siginfo) bytes. Verify all of the
3182 * extra bytes are 0. This guarantees copy_siginfo_to_user
3183 * will return this data to userspace exactly.
3184 */
3185 if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE))
3186 return -EFAULT;
3187 for (i = 0; i < SI_EXPANSION_SIZE; i++) {
3188 if (buf[i] != 0)
3189 return -E2BIG;
3190 }
3191 }
4cd2e0e7
EB
3192 return 0;
3193}
3194
601d5abf
EB
3195static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to,
3196 const siginfo_t __user *from)
3197{
3198 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3199 return -EFAULT;
3200 to->si_signo = signo;
3201 return post_copy_siginfo_from_user(to, from);
3202}
3203
3204int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from)
3205{
3206 if (copy_from_user(to, from, sizeof(struct kernel_siginfo)))
3207 return -EFAULT;
3208 return post_copy_siginfo_from_user(to, from);
3209}
3210
212a36a1 3211#ifdef CONFIG_COMPAT
ea64d5ac 3212int copy_siginfo_to_user32(struct compat_siginfo __user *to,
ae7795bc 3213 const struct kernel_siginfo *from)
ea64d5ac
EB
3214#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
3215{
3216 return __copy_siginfo_to_user32(to, from, in_x32_syscall());
3217}
3218int __copy_siginfo_to_user32(struct compat_siginfo __user *to,
ae7795bc 3219 const struct kernel_siginfo *from, bool x32_ABI)
ea64d5ac
EB
3220#endif
3221{
3222 struct compat_siginfo new;
3223 memset(&new, 0, sizeof(new));
3224
3225 new.si_signo = from->si_signo;
3226 new.si_errno = from->si_errno;
3227 new.si_code = from->si_code;
3228 switch(siginfo_layout(from->si_signo, from->si_code)) {
3229 case SIL_KILL:
3230 new.si_pid = from->si_pid;
3231 new.si_uid = from->si_uid;
3232 break;
3233 case SIL_TIMER:
3234 new.si_tid = from->si_tid;
3235 new.si_overrun = from->si_overrun;
3236 new.si_int = from->si_int;
3237 break;
3238 case SIL_POLL:
3239 new.si_band = from->si_band;
3240 new.si_fd = from->si_fd;
3241 break;
3242 case SIL_FAULT:
3243 new.si_addr = ptr_to_compat(from->si_addr);
3244#ifdef __ARCH_SI_TRAPNO
3245 new.si_trapno = from->si_trapno;
3246#endif
31931c93
EB
3247 break;
3248 case SIL_FAULT_MCEERR:
3249 new.si_addr = ptr_to_compat(from->si_addr);
3250#ifdef __ARCH_SI_TRAPNO
3251 new.si_trapno = from->si_trapno;
ea64d5ac 3252#endif
31931c93
EB
3253 new.si_addr_lsb = from->si_addr_lsb;
3254 break;
3255 case SIL_FAULT_BNDERR:
3256 new.si_addr = ptr_to_compat(from->si_addr);
3257#ifdef __ARCH_SI_TRAPNO
3258 new.si_trapno = from->si_trapno;
ea64d5ac 3259#endif
31931c93
EB
3260 new.si_lower = ptr_to_compat(from->si_lower);
3261 new.si_upper = ptr_to_compat(from->si_upper);
3262 break;
3263 case SIL_FAULT_PKUERR:
3264 new.si_addr = ptr_to_compat(from->si_addr);
3265#ifdef __ARCH_SI_TRAPNO
3266 new.si_trapno = from->si_trapno;
ea64d5ac 3267#endif
31931c93 3268 new.si_pkey = from->si_pkey;
ea64d5ac
EB
3269 break;
3270 case SIL_CHLD:
3271 new.si_pid = from->si_pid;
3272 new.si_uid = from->si_uid;
3273 new.si_status = from->si_status;
3274#ifdef CONFIG_X86_X32_ABI
3275 if (x32_ABI) {
3276 new._sifields._sigchld_x32._utime = from->si_utime;
3277 new._sifields._sigchld_x32._stime = from->si_stime;
3278 } else
3279#endif
3280 {
3281 new.si_utime = from->si_utime;
3282 new.si_stime = from->si_stime;
3283 }
3284 break;
3285 case SIL_RT:
3286 new.si_pid = from->si_pid;
3287 new.si_uid = from->si_uid;
3288 new.si_int = from->si_int;
3289 break;
3290 case SIL_SYS:
3291 new.si_call_addr = ptr_to_compat(from->si_call_addr);
3292 new.si_syscall = from->si_syscall;
3293 new.si_arch = from->si_arch;
3294 break;
3295 }
3296
3297 if (copy_to_user(to, &new, sizeof(struct compat_siginfo)))
3298 return -EFAULT;
3299
3300 return 0;
3301}
3302
601d5abf
EB
3303static int post_copy_siginfo_from_user32(kernel_siginfo_t *to,
3304 const struct compat_siginfo *from)
212a36a1 3305{
212a36a1 3306 clear_siginfo(to);
601d5abf
EB
3307 to->si_signo = from->si_signo;
3308 to->si_errno = from->si_errno;
3309 to->si_code = from->si_code;
3310 switch(siginfo_layout(from->si_signo, from->si_code)) {
212a36a1 3311 case SIL_KILL:
601d5abf
EB
3312 to->si_pid = from->si_pid;
3313 to->si_uid = from->si_uid;
212a36a1
EB
3314 break;
3315 case SIL_TIMER:
601d5abf
EB
3316 to->si_tid = from->si_tid;
3317 to->si_overrun = from->si_overrun;
3318 to->si_int = from->si_int;
212a36a1
EB
3319 break;
3320 case SIL_POLL:
601d5abf
EB
3321 to->si_band = from->si_band;
3322 to->si_fd = from->si_fd;
212a36a1
EB
3323 break;
3324 case SIL_FAULT:
601d5abf 3325 to->si_addr = compat_ptr(from->si_addr);
212a36a1 3326#ifdef __ARCH_SI_TRAPNO
601d5abf 3327 to->si_trapno = from->si_trapno;
212a36a1 3328#endif
31931c93
EB
3329 break;
3330 case SIL_FAULT_MCEERR:
601d5abf 3331 to->si_addr = compat_ptr(from->si_addr);
31931c93 3332#ifdef __ARCH_SI_TRAPNO
601d5abf 3333 to->si_trapno = from->si_trapno;
212a36a1 3334#endif
601d5abf 3335 to->si_addr_lsb = from->si_addr_lsb;
31931c93
EB
3336 break;
3337 case SIL_FAULT_BNDERR:
601d5abf 3338 to->si_addr = compat_ptr(from->si_addr);
31931c93 3339#ifdef __ARCH_SI_TRAPNO
601d5abf 3340 to->si_trapno = from->si_trapno;
212a36a1 3341#endif
601d5abf
EB
3342 to->si_lower = compat_ptr(from->si_lower);
3343 to->si_upper = compat_ptr(from->si_upper);
31931c93
EB
3344 break;
3345 case SIL_FAULT_PKUERR:
601d5abf 3346 to->si_addr = compat_ptr(from->si_addr);
31931c93 3347#ifdef __ARCH_SI_TRAPNO
601d5abf 3348 to->si_trapno = from->si_trapno;
212a36a1 3349#endif
601d5abf 3350 to->si_pkey = from->si_pkey;
212a36a1
EB
3351 break;
3352 case SIL_CHLD:
601d5abf
EB
3353 to->si_pid = from->si_pid;
3354 to->si_uid = from->si_uid;
3355 to->si_status = from->si_status;
212a36a1
EB
3356#ifdef CONFIG_X86_X32_ABI
3357 if (in_x32_syscall()) {
601d5abf
EB
3358 to->si_utime = from->_sifields._sigchld_x32._utime;
3359 to->si_stime = from->_sifields._sigchld_x32._stime;
212a36a1
EB
3360 } else
3361#endif
3362 {
601d5abf
EB
3363 to->si_utime = from->si_utime;
3364 to->si_stime = from->si_stime;
212a36a1
EB
3365 }
3366 break;
3367 case SIL_RT:
601d5abf
EB
3368 to->si_pid = from->si_pid;
3369 to->si_uid = from->si_uid;
3370 to->si_int = from->si_int;
212a36a1
EB
3371 break;
3372 case SIL_SYS:
601d5abf
EB
3373 to->si_call_addr = compat_ptr(from->si_call_addr);
3374 to->si_syscall = from->si_syscall;
3375 to->si_arch = from->si_arch;
212a36a1
EB
3376 break;
3377 }
3378 return 0;
3379}
601d5abf
EB
3380
3381static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to,
3382 const struct compat_siginfo __user *ufrom)
3383{
3384 struct compat_siginfo from;
3385
3386 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3387 return -EFAULT;
3388
3389 from.si_signo = signo;
3390 return post_copy_siginfo_from_user32(to, &from);
3391}
3392
3393int copy_siginfo_from_user32(struct kernel_siginfo *to,
3394 const struct compat_siginfo __user *ufrom)
3395{
3396 struct compat_siginfo from;
3397
3398 if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo)))
3399 return -EFAULT;
3400
3401 return post_copy_siginfo_from_user32(to, &from);
3402}
212a36a1
EB
3403#endif /* CONFIG_COMPAT */
3404
943df148
ON
3405/**
3406 * do_sigtimedwait - wait for queued signals specified in @which
3407 * @which: queued signals to wait for
3408 * @info: if non-null, the signal's siginfo is returned here
3409 * @ts: upper bound on process time suspension
3410 */
ae7795bc 3411static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
49c39f84 3412 const struct timespec64 *ts)
943df148 3413{
2456e855 3414 ktime_t *to = NULL, timeout = KTIME_MAX;
943df148 3415 struct task_struct *tsk = current;
943df148 3416 sigset_t mask = *which;
2b1ecc3d 3417 int sig, ret = 0;
943df148
ON
3418
3419 if (ts) {
49c39f84 3420 if (!timespec64_valid(ts))
943df148 3421 return -EINVAL;
49c39f84 3422 timeout = timespec64_to_ktime(*ts);
2b1ecc3d 3423 to = &timeout;
943df148
ON
3424 }
3425
3426 /*
3427 * Invert the set of allowed signals to get those we want to block.
3428 */
3429 sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP));
3430 signotset(&mask);
3431
3432 spin_lock_irq(&tsk->sighand->siglock);
3433 sig = dequeue_signal(tsk, &mask, info);
2456e855 3434 if (!sig && timeout) {
943df148
ON
3435 /*
3436 * None ready, temporarily unblock those we're interested
3437 * while we are sleeping in so that we'll be awakened when
b182801a
ON
3438 * they arrive. Unblocking is always fine, we can avoid
3439 * set_current_blocked().
943df148
ON
3440 */
3441 tsk->real_blocked = tsk->blocked;
3442 sigandsets(&tsk->blocked, &tsk->blocked, &mask);
3443 recalc_sigpending();
3444 spin_unlock_irq(&tsk->sighand->siglock);
3445
2b1ecc3d
TG
3446 __set_current_state(TASK_INTERRUPTIBLE);
3447 ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns,
3448 HRTIMER_MODE_REL);
943df148 3449 spin_lock_irq(&tsk->sighand->siglock);
b182801a 3450 __set_task_blocked(tsk, &tsk->real_blocked);
6114041a 3451 sigemptyset(&tsk->real_blocked);
b182801a 3452 sig = dequeue_signal(tsk, &mask, info);
943df148
ON
3453 }
3454 spin_unlock_irq(&tsk->sighand->siglock);
3455
3456 if (sig)
3457 return sig;
2b1ecc3d 3458 return ret ? -EINTR : -EAGAIN;
943df148
ON
3459}
3460
41c57892
RD
3461/**
3462 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
3463 * in @uthese
3464 * @uthese: queued signals to wait for
3465 * @uinfo: if non-null, the signal's siginfo is returned here
3466 * @uts: upper bound on process time suspension
3467 * @sigsetsize: size of sigset_t type
3468 */
17da2bd9 3469SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
49c39f84
AB
3470 siginfo_t __user *, uinfo,
3471 const struct __kernel_timespec __user *, uts,
17da2bd9 3472 size_t, sigsetsize)
1da177e4 3473{
1da177e4 3474 sigset_t these;
49c39f84 3475 struct timespec64 ts;
ae7795bc 3476 kernel_siginfo_t info;
943df148 3477 int ret;
1da177e4
LT
3478
3479 /* XXX: Don't preclude handling different sized sigset_t's. */
3480 if (sigsetsize != sizeof(sigset_t))
3481 return -EINVAL;
3482
3483 if (copy_from_user(&these, uthese, sizeof(these)))
3484 return -EFAULT;
5aba085e 3485
1da177e4 3486 if (uts) {
49c39f84 3487 if (get_timespec64(&ts, uts))
1da177e4 3488 return -EFAULT;
1da177e4
LT
3489 }
3490
943df148 3491 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
1da177e4 3492
943df148
ON
3493 if (ret > 0 && uinfo) {
3494 if (copy_siginfo_to_user(uinfo, &info))
3495 ret = -EFAULT;
1da177e4
LT
3496 }
3497
3498 return ret;
3499}
3500
df8522a3
AB
3501#ifdef CONFIG_COMPAT_32BIT_TIME
3502SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese,
3503 siginfo_t __user *, uinfo,
3504 const struct old_timespec32 __user *, uts,
3505 size_t, sigsetsize)
3506{
3507 sigset_t these;
3508 struct timespec64 ts;
3509 kernel_siginfo_t info;
3510 int ret;
3511
3512 if (sigsetsize != sizeof(sigset_t))
3513 return -EINVAL;
3514
3515 if (copy_from_user(&these, uthese, sizeof(these)))
3516 return -EFAULT;
3517
3518 if (uts) {
3519 if (get_old_timespec32(&ts, uts))
3520 return -EFAULT;
3521 }
3522
3523 ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL);
3524
3525 if (ret > 0 && uinfo) {
3526 if (copy_siginfo_to_user(uinfo, &info))
3527 ret = -EFAULT;
3528 }
3529
3530 return ret;
3531}
3532#endif
3533
1b3c872c 3534#ifdef CONFIG_COMPAT
2367c4b5
AB
3535COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese,
3536 struct compat_siginfo __user *, uinfo,
3537 struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize)
3538{
3539 sigset_t s;
3540 struct timespec64 t;
3541 kernel_siginfo_t info;
3542 long ret;
3543
3544 if (sigsetsize != sizeof(sigset_t))
3545 return -EINVAL;
3546
3547 if (get_compat_sigset(&s, uthese))
3548 return -EFAULT;
3549
3550 if (uts) {
3551 if (get_timespec64(&t, uts))
3552 return -EFAULT;
3553 }
3554
3555 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3556
3557 if (ret > 0 && uinfo) {
3558 if (copy_siginfo_to_user32(uinfo, &info))
3559 ret = -EFAULT;
3560 }
3561
3562 return ret;
3563}
3564
3565#ifdef CONFIG_COMPAT_32BIT_TIME
8dabe724 3566COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese,
1b3c872c 3567 struct compat_siginfo __user *, uinfo,
9afc5eee 3568 struct old_timespec32 __user *, uts, compat_size_t, sigsetsize)
1b3c872c 3569{
1b3c872c 3570 sigset_t s;
49c39f84 3571 struct timespec64 t;
ae7795bc 3572 kernel_siginfo_t info;
1b3c872c
AV
3573 long ret;
3574
3575 if (sigsetsize != sizeof(sigset_t))
3576 return -EINVAL;
3577
3968cf62 3578 if (get_compat_sigset(&s, uthese))
1b3c872c 3579 return -EFAULT;
1b3c872c
AV
3580
3581 if (uts) {
49c39f84 3582 if (get_old_timespec32(&t, uts))
1b3c872c
AV
3583 return -EFAULT;
3584 }
3585
3586 ret = do_sigtimedwait(&s, &info, uts ? &t : NULL);
3587
3588 if (ret > 0 && uinfo) {
3589 if (copy_siginfo_to_user32(uinfo, &info))
3590 ret = -EFAULT;
3591 }
3592
3593 return ret;
3594}
3595#endif
2367c4b5 3596#endif
1b3c872c 3597
3eb39f47
CB
3598static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info)
3599{
3600 clear_siginfo(info);
3601 info->si_signo = sig;
3602 info->si_errno = 0;
3603 info->si_code = SI_USER;
3604 info->si_pid = task_tgid_vnr(current);
3605 info->si_uid = from_kuid_munged(current_user_ns(), current_uid());
3606}
3607
41c57892
RD
3608/**
3609 * sys_kill - send a signal to a process
3610 * @pid: the PID of the process
3611 * @sig: signal to be sent
3612 */
17da2bd9 3613SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4 3614{
ae7795bc 3615 struct kernel_siginfo info;
1da177e4 3616
3eb39f47 3617 prepare_kill_siginfo(sig, &info);
1da177e4
LT
3618
3619 return kill_something_info(sig, &info, pid);
3620}
3621
3eb39f47
CB
3622/*
3623 * Verify that the signaler and signalee either are in the same pid namespace
3624 * or that the signaler's pid namespace is an ancestor of the signalee's pid
3625 * namespace.
3626 */
3627static bool access_pidfd_pidns(struct pid *pid)
3628{
3629 struct pid_namespace *active = task_active_pid_ns(current);
3630 struct pid_namespace *p = ns_of_pid(pid);
3631
3632 for (;;) {
3633 if (!p)
3634 return false;
3635 if (p == active)
3636 break;
3637 p = p->parent;
3638 }
3639
3640 return true;
3641}
3642
3643static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, siginfo_t *info)
3644{
3645#ifdef CONFIG_COMPAT
3646 /*
3647 * Avoid hooking up compat syscalls and instead handle necessary
3648 * conversions here. Note, this is a stop-gap measure and should not be
3649 * considered a generic solution.
3650 */
3651 if (in_compat_syscall())
3652 return copy_siginfo_from_user32(
3653 kinfo, (struct compat_siginfo __user *)info);
3654#endif
3655 return copy_siginfo_from_user(kinfo, info);
3656}
3657
2151ad1b
CB
3658static struct pid *pidfd_to_pid(const struct file *file)
3659{
3660 if (file->f_op == &pidfd_fops)
3661 return file->private_data;
3662
3663 return tgid_pidfd_to_pid(file);
3664}
3665
3eb39f47
CB
3666/**
3667 * sys_pidfd_send_signal - send a signal to a process through a task file
3668 * descriptor
3669 * @pidfd: the file descriptor of the process
3670 * @sig: signal to be sent
3671 * @info: the signal info
3672 * @flags: future flags to be passed
3673 *
3674 * The syscall currently only signals via PIDTYPE_PID which covers
3675 * kill(<positive-pid>, <signal>. It does not signal threads or process
3676 * groups.
3677 * In order to extend the syscall to threads and process groups the @flags
3678 * argument should be used. In essence, the @flags argument will determine
3679 * what is signaled and not the file descriptor itself. Put in other words,
3680 * grouping is a property of the flags argument not a property of the file
3681 * descriptor.
3682 *
3683 * Return: 0 on success, negative errno on failure
3684 */
3685SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig,
3686 siginfo_t __user *, info, unsigned int, flags)
3687{
3688 int ret;
3689 struct fd f;
3690 struct pid *pid;
3691 kernel_siginfo_t kinfo;
3692
3693 /* Enforce flags be set to 0 until we add an extension. */
3694 if (flags)
3695 return -EINVAL;
3696
738a7832 3697 f = fdget(pidfd);
3eb39f47
CB
3698 if (!f.file)
3699 return -EBADF;
3700
3701 /* Is this a pidfd? */
2151ad1b 3702 pid = pidfd_to_pid(f.file);
3eb39f47
CB
3703 if (IS_ERR(pid)) {
3704 ret = PTR_ERR(pid);
3705 goto err;
3706 }
3707
3708 ret = -EINVAL;
3709 if (!access_pidfd_pidns(pid))
3710 goto err;
3711
3712 if (info) {
3713 ret = copy_siginfo_from_user_any(&kinfo, info);
3714 if (unlikely(ret))
3715 goto err;
3716
3717 ret = -EINVAL;
3718 if (unlikely(sig != kinfo.si_signo))
3719 goto err;
3720
556a888a
JH
3721 /* Only allow sending arbitrary signals to yourself. */
3722 ret = -EPERM;
3eb39f47 3723 if ((task_pid(current) != pid) &&
556a888a
JH
3724 (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL))
3725 goto err;
3eb39f47
CB
3726 } else {
3727 prepare_kill_siginfo(sig, &kinfo);
3728 }
3729
3730 ret = kill_pid_info(sig, &kinfo, pid);
3731
3732err:
3733 fdput(f);
3734 return ret;
3735}
3eb39f47 3736
30b4ae8a 3737static int
ae7795bc 3738do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info)
1da177e4 3739{
1da177e4 3740 struct task_struct *p;
30b4ae8a 3741 int error = -ESRCH;
1da177e4 3742
3547ff3a 3743 rcu_read_lock();
228ebcbe 3744 p = find_task_by_vpid(pid);
b488893a 3745 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 3746 error = check_kill_permission(sig, info, p);
1da177e4
LT
3747 /*
3748 * The null signal is a permissions and process existence
3749 * probe. No signal is actually delivered.
3750 */
4a30debf 3751 if (!error && sig) {
40b3b025 3752 error = do_send_sig_info(sig, info, p, PIDTYPE_PID);
4a30debf
ON
3753 /*
3754 * If lock_task_sighand() failed we pretend the task
3755 * dies after receiving the signal. The window is tiny,
3756 * and the signal is private anyway.
3757 */
3758 if (unlikely(error == -ESRCH))
3759 error = 0;
1da177e4
LT
3760 }
3761 }
3547ff3a 3762 rcu_read_unlock();
6dd69f10 3763
1da177e4
LT
3764 return error;
3765}
3766
30b4ae8a
TG
3767static int do_tkill(pid_t tgid, pid_t pid, int sig)
3768{
ae7795bc 3769 struct kernel_siginfo info;
30b4ae8a 3770
5f74972c 3771 clear_siginfo(&info);
30b4ae8a
TG
3772 info.si_signo = sig;
3773 info.si_errno = 0;
3774 info.si_code = SI_TKILL;
3775 info.si_pid = task_tgid_vnr(current);
078de5f7 3776 info.si_uid = from_kuid_munged(current_user_ns(), current_uid());
30b4ae8a
TG
3777
3778 return do_send_specific(tgid, pid, sig, &info);
3779}
3780
6dd69f10
VL
3781/**
3782 * sys_tgkill - send signal to one specific thread
3783 * @tgid: the thread group ID of the thread
3784 * @pid: the PID of the thread
3785 * @sig: signal to be sent
3786 *
72fd4a35 3787 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
3788 * exists but it's not belonging to the target process anymore. This
3789 * method solves the problem of threads exiting and PIDs getting reused.
3790 */
a5f8fa9e 3791SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
3792{
3793 /* This is only valid for single tasks */
3794 if (pid <= 0 || tgid <= 0)
3795 return -EINVAL;
3796
3797 return do_tkill(tgid, pid, sig);
3798}
3799
41c57892
RD
3800/**
3801 * sys_tkill - send signal to one specific task
3802 * @pid: the PID of the task
3803 * @sig: signal to be sent
3804 *
1da177e4
LT
3805 * Send a signal to only one task, even if it's a CLONE_THREAD task.
3806 */
a5f8fa9e 3807SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 3808{
1da177e4
LT
3809 /* This is only valid for single tasks */
3810 if (pid <= 0)
3811 return -EINVAL;
3812
6dd69f10 3813 return do_tkill(0, pid, sig);
1da177e4
LT
3814}
3815
ae7795bc 3816static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info)
75907d4d
AV
3817{
3818 /* Not even root can pretend to send signals from the kernel.
3819 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3820 */
66dd34ad 3821 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
69828dce 3822 (task_pid_vnr(current) != pid))
75907d4d 3823 return -EPERM;
69828dce 3824
75907d4d
AV
3825 /* POSIX.1b doesn't mention process groups. */
3826 return kill_proc_info(sig, info, pid);
3827}
3828
41c57892
RD
3829/**
3830 * sys_rt_sigqueueinfo - send signal information to a signal
3831 * @pid: the PID of the thread
3832 * @sig: signal to be sent
3833 * @uinfo: signal info to be sent
3834 */
a5f8fa9e
HC
3835SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
3836 siginfo_t __user *, uinfo)
1da177e4 3837{
ae7795bc 3838 kernel_siginfo_t info;
601d5abf 3839 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
3840 if (unlikely(ret))
3841 return ret;
75907d4d
AV
3842 return do_rt_sigqueueinfo(pid, sig, &info);
3843}
1da177e4 3844
75907d4d 3845#ifdef CONFIG_COMPAT
75907d4d
AV
3846COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3847 compat_pid_t, pid,
3848 int, sig,
3849 struct compat_siginfo __user *, uinfo)
3850{
ae7795bc 3851 kernel_siginfo_t info;
601d5abf 3852 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
75907d4d
AV
3853 if (unlikely(ret))
3854 return ret;
3855 return do_rt_sigqueueinfo(pid, sig, &info);
1da177e4 3856}
75907d4d 3857#endif
1da177e4 3858
ae7795bc 3859static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info)
62ab4505
TG
3860{
3861 /* This is only valid for single tasks */
3862 if (pid <= 0 || tgid <= 0)
3863 return -EINVAL;
3864
3865 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
3866 * Nor can they impersonate a kill()/tgkill(), which adds source info.
3867 */
69828dce
VD
3868 if ((info->si_code >= 0 || info->si_code == SI_TKILL) &&
3869 (task_pid_vnr(current) != pid))
62ab4505 3870 return -EPERM;
69828dce 3871
62ab4505
TG
3872 return do_send_specific(tgid, pid, sig, info);
3873}
3874
3875SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
3876 siginfo_t __user *, uinfo)
3877{
ae7795bc 3878 kernel_siginfo_t info;
601d5abf 3879 int ret = __copy_siginfo_from_user(sig, &info, uinfo);
4cd2e0e7
EB
3880 if (unlikely(ret))
3881 return ret;
62ab4505
TG
3882 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3883}
3884
9aae8fc0
AV
3885#ifdef CONFIG_COMPAT
3886COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3887 compat_pid_t, tgid,
3888 compat_pid_t, pid,
3889 int, sig,
3890 struct compat_siginfo __user *, uinfo)
3891{
ae7795bc 3892 kernel_siginfo_t info;
601d5abf 3893 int ret = __copy_siginfo_from_user32(sig, &info, uinfo);
4cd2e0e7
EB
3894 if (unlikely(ret))
3895 return ret;
9aae8fc0
AV
3896 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
3897}
3898#endif
3899
0341729b 3900/*
b4e74264 3901 * For kthreads only, must not be used if cloned with CLONE_SIGHAND
0341729b 3902 */
b4e74264 3903void kernel_sigaction(int sig, __sighandler_t action)
0341729b 3904{
ec5955b8 3905 spin_lock_irq(&current->sighand->siglock);
b4e74264
ON
3906 current->sighand->action[sig - 1].sa.sa_handler = action;
3907 if (action == SIG_IGN) {
3908 sigset_t mask;
0341729b 3909
b4e74264
ON
3910 sigemptyset(&mask);
3911 sigaddset(&mask, sig);
580d34e4 3912
b4e74264
ON
3913 flush_sigqueue_mask(&mask, &current->signal->shared_pending);
3914 flush_sigqueue_mask(&mask, &current->pending);
3915 recalc_sigpending();
3916 }
0341729b
ON
3917 spin_unlock_irq(&current->sighand->siglock);
3918}
b4e74264 3919EXPORT_SYMBOL(kernel_sigaction);
0341729b 3920
68463510
DS
3921void __weak sigaction_compat_abi(struct k_sigaction *act,
3922 struct k_sigaction *oact)
3923{
3924}
3925
88531f72 3926int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 3927{
afe2b038 3928 struct task_struct *p = current, *t;
1da177e4 3929 struct k_sigaction *k;
71fabd5e 3930 sigset_t mask;
1da177e4 3931
7ed20e1a 3932 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
3933 return -EINVAL;
3934
afe2b038 3935 k = &p->sighand->action[sig-1];
1da177e4 3936
afe2b038 3937 spin_lock_irq(&p->sighand->siglock);
1da177e4
LT
3938 if (oact)
3939 *oact = *k;
3940
68463510
DS
3941 sigaction_compat_abi(act, oact);
3942
1da177e4 3943 if (act) {
9ac95f2f
ON
3944 sigdelsetmask(&act->sa.sa_mask,
3945 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 3946 *k = *act;
1da177e4
LT
3947 /*
3948 * POSIX 3.3.1.3:
3949 * "Setting a signal action to SIG_IGN for a signal that is
3950 * pending shall cause the pending signal to be discarded,
3951 * whether or not it is blocked."
3952 *
3953 * "Setting a signal action to SIG_DFL for a signal that is
3954 * pending and whose default action is to ignore the signal
3955 * (for example, SIGCHLD), shall cause the pending signal to
3956 * be discarded, whether or not it is blocked"
3957 */
afe2b038 3958 if (sig_handler_ignored(sig_handler(p, sig), sig)) {
71fabd5e
GA
3959 sigemptyset(&mask);
3960 sigaddset(&mask, sig);
afe2b038
ON
3961 flush_sigqueue_mask(&mask, &p->signal->shared_pending);
3962 for_each_thread(p, t)
c09c1441 3963 flush_sigqueue_mask(&mask, &t->pending);
1da177e4 3964 }
1da177e4
LT
3965 }
3966
afe2b038 3967 spin_unlock_irq(&p->sighand->siglock);
1da177e4
LT
3968 return 0;
3969}
3970
c09c1441 3971static int
22839869
WD
3972do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp,
3973 size_t min_ss_size)
1da177e4 3974{
bcfe8ad8 3975 struct task_struct *t = current;
1da177e4 3976
bcfe8ad8
AV
3977 if (oss) {
3978 memset(oss, 0, sizeof(stack_t));
3979 oss->ss_sp = (void __user *) t->sas_ss_sp;
3980 oss->ss_size = t->sas_ss_size;
3981 oss->ss_flags = sas_ss_flags(sp) |
3982 (current->sas_ss_flags & SS_FLAG_BITS);
3983 }
1da177e4 3984
bcfe8ad8
AV
3985 if (ss) {
3986 void __user *ss_sp = ss->ss_sp;
3987 size_t ss_size = ss->ss_size;
3988 unsigned ss_flags = ss->ss_flags;
407bc16a 3989 int ss_mode;
1da177e4 3990
bcfe8ad8
AV
3991 if (unlikely(on_sig_stack(sp)))
3992 return -EPERM;
1da177e4 3993
407bc16a 3994 ss_mode = ss_flags & ~SS_FLAG_BITS;
bcfe8ad8
AV
3995 if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK &&
3996 ss_mode != 0))
3997 return -EINVAL;
1da177e4 3998
407bc16a 3999 if (ss_mode == SS_DISABLE) {
1da177e4
LT
4000 ss_size = 0;
4001 ss_sp = NULL;
4002 } else {
22839869 4003 if (unlikely(ss_size < min_ss_size))
bcfe8ad8 4004 return -ENOMEM;
1da177e4
LT
4005 }
4006
bcfe8ad8
AV
4007 t->sas_ss_sp = (unsigned long) ss_sp;
4008 t->sas_ss_size = ss_size;
4009 t->sas_ss_flags = ss_flags;
1da177e4 4010 }
bcfe8ad8 4011 return 0;
1da177e4 4012}
bcfe8ad8 4013
6bf9adfc
AV
4014SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss)
4015{
bcfe8ad8
AV
4016 stack_t new, old;
4017 int err;
4018 if (uss && copy_from_user(&new, uss, sizeof(stack_t)))
4019 return -EFAULT;
4020 err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL,
22839869
WD
4021 current_user_stack_pointer(),
4022 MINSIGSTKSZ);
bcfe8ad8
AV
4023 if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t)))
4024 err = -EFAULT;
4025 return err;
6bf9adfc 4026}
1da177e4 4027
5c49574f
AV
4028int restore_altstack(const stack_t __user *uss)
4029{
bcfe8ad8
AV
4030 stack_t new;
4031 if (copy_from_user(&new, uss, sizeof(stack_t)))
4032 return -EFAULT;
22839869
WD
4033 (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(),
4034 MINSIGSTKSZ);
5c49574f 4035 /* squash all but EFAULT for now */
bcfe8ad8 4036 return 0;
5c49574f
AV
4037}
4038
c40702c4
AV
4039int __save_altstack(stack_t __user *uss, unsigned long sp)
4040{
4041 struct task_struct *t = current;
2a742138
SS
4042 int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) |
4043 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4044 __put_user(t->sas_ss_size, &uss->ss_size);
2a742138
SS
4045 if (err)
4046 return err;
4047 if (t->sas_ss_flags & SS_AUTODISARM)
4048 sas_ss_reset(t);
4049 return 0;
c40702c4
AV
4050}
4051
90268439 4052#ifdef CONFIG_COMPAT
6203deb0
DB
4053static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr,
4054 compat_stack_t __user *uoss_ptr)
90268439
AV
4055{
4056 stack_t uss, uoss;
4057 int ret;
90268439
AV
4058
4059 if (uss_ptr) {
4060 compat_stack_t uss32;
90268439
AV
4061 if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t)))
4062 return -EFAULT;
4063 uss.ss_sp = compat_ptr(uss32.ss_sp);
4064 uss.ss_flags = uss32.ss_flags;
4065 uss.ss_size = uss32.ss_size;
4066 }
bcfe8ad8 4067 ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss,
22839869
WD
4068 compat_user_stack_pointer(),
4069 COMPAT_MINSIGSTKSZ);
90268439 4070 if (ret >= 0 && uoss_ptr) {
bcfe8ad8
AV
4071 compat_stack_t old;
4072 memset(&old, 0, sizeof(old));
4073 old.ss_sp = ptr_to_compat(uoss.ss_sp);
4074 old.ss_flags = uoss.ss_flags;
4075 old.ss_size = uoss.ss_size;
4076 if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t)))
90268439
AV
4077 ret = -EFAULT;
4078 }
4079 return ret;
4080}
4081
6203deb0
DB
4082COMPAT_SYSCALL_DEFINE2(sigaltstack,
4083 const compat_stack_t __user *, uss_ptr,
4084 compat_stack_t __user *, uoss_ptr)
4085{
4086 return do_compat_sigaltstack(uss_ptr, uoss_ptr);
4087}
4088
90268439
AV
4089int compat_restore_altstack(const compat_stack_t __user *uss)
4090{
6203deb0 4091 int err = do_compat_sigaltstack(uss, NULL);
90268439
AV
4092 /* squash all but -EFAULT for now */
4093 return err == -EFAULT ? err : 0;
4094}
c40702c4
AV
4095
4096int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp)
4097{
441398d3 4098 int err;
c40702c4 4099 struct task_struct *t = current;
441398d3
SS
4100 err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp),
4101 &uss->ss_sp) |
4102 __put_user(t->sas_ss_flags, &uss->ss_flags) |
c40702c4 4103 __put_user(t->sas_ss_size, &uss->ss_size);
441398d3
SS
4104 if (err)
4105 return err;
4106 if (t->sas_ss_flags & SS_AUTODISARM)
4107 sas_ss_reset(t);
4108 return 0;
c40702c4 4109}
90268439 4110#endif
1da177e4
LT
4111
4112#ifdef __ARCH_WANT_SYS_SIGPENDING
4113
41c57892
RD
4114/**
4115 * sys_sigpending - examine pending signals
d53238cd 4116 * @uset: where mask of pending signal is returned
41c57892 4117 */
d53238cd 4118SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset)
1da177e4 4119{
d53238cd 4120 sigset_t set;
d53238cd
DB
4121
4122 if (sizeof(old_sigset_t) > sizeof(*uset))
4123 return -EINVAL;
4124
b1d294c8
CB
4125 do_sigpending(&set);
4126
4127 if (copy_to_user(uset, &set, sizeof(old_sigset_t)))
4128 return -EFAULT;
4129
4130 return 0;
1da177e4
LT
4131}
4132
8f13621a
AV
4133#ifdef CONFIG_COMPAT
4134COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32)
4135{
4136 sigset_t set;
b1d294c8
CB
4137
4138 do_sigpending(&set);
4139
4140 return put_user(set.sig[0], set32);
8f13621a
AV
4141}
4142#endif
4143
1da177e4
LT
4144#endif
4145
4146#ifdef __ARCH_WANT_SYS_SIGPROCMASK
41c57892
RD
4147/**
4148 * sys_sigprocmask - examine and change blocked signals
4149 * @how: whether to add, remove, or set signals
b013c399 4150 * @nset: signals to add or remove (if non-null)
41c57892
RD
4151 * @oset: previous value of signal mask if non-null
4152 *
5aba085e
RD
4153 * Some platforms have their own version with special arguments;
4154 * others support only sys_rt_sigprocmask.
4155 */
1da177e4 4156
b013c399 4157SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
b290ebe2 4158 old_sigset_t __user *, oset)
1da177e4 4159{
1da177e4 4160 old_sigset_t old_set, new_set;
2e4f7c77 4161 sigset_t new_blocked;
1da177e4 4162
b013c399 4163 old_set = current->blocked.sig[0];
1da177e4 4164
b013c399
ON
4165 if (nset) {
4166 if (copy_from_user(&new_set, nset, sizeof(*nset)))
4167 return -EFAULT;
1da177e4 4168
2e4f7c77 4169 new_blocked = current->blocked;
1da177e4 4170
1da177e4 4171 switch (how) {
1da177e4 4172 case SIG_BLOCK:
2e4f7c77 4173 sigaddsetmask(&new_blocked, new_set);
1da177e4
LT
4174 break;
4175 case SIG_UNBLOCK:
2e4f7c77 4176 sigdelsetmask(&new_blocked, new_set);
1da177e4
LT
4177 break;
4178 case SIG_SETMASK:
2e4f7c77 4179 new_blocked.sig[0] = new_set;
1da177e4 4180 break;
2e4f7c77
ON
4181 default:
4182 return -EINVAL;
1da177e4
LT
4183 }
4184
0c4a8423 4185 set_current_blocked(&new_blocked);
b013c399
ON
4186 }
4187
4188 if (oset) {
1da177e4 4189 if (copy_to_user(oset, &old_set, sizeof(*oset)))
b013c399 4190 return -EFAULT;
1da177e4 4191 }
b013c399
ON
4192
4193 return 0;
1da177e4
LT
4194}
4195#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
4196
eaca6eae 4197#ifndef CONFIG_ODD_RT_SIGACTION
41c57892
RD
4198/**
4199 * sys_rt_sigaction - alter an action taken by a process
4200 * @sig: signal to be sent
f9fa0bc1
RD
4201 * @act: new sigaction
4202 * @oact: used to save the previous sigaction
41c57892
RD
4203 * @sigsetsize: size of sigset_t type
4204 */
d4e82042
HC
4205SYSCALL_DEFINE4(rt_sigaction, int, sig,
4206 const struct sigaction __user *, act,
4207 struct sigaction __user *, oact,
4208 size_t, sigsetsize)
1da177e4
LT
4209{
4210 struct k_sigaction new_sa, old_sa;
d8f993b3 4211 int ret;
1da177e4
LT
4212
4213 /* XXX: Don't preclude handling different sized sigset_t's. */
4214 if (sigsetsize != sizeof(sigset_t))
d8f993b3 4215 return -EINVAL;
1da177e4 4216
d8f993b3
CB
4217 if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
4218 return -EFAULT;
1da177e4
LT
4219
4220 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
d8f993b3
CB
4221 if (ret)
4222 return ret;
1da177e4 4223
d8f993b3
CB
4224 if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
4225 return -EFAULT;
4226
4227 return 0;
1da177e4 4228}
08d32fe5 4229#ifdef CONFIG_COMPAT
08d32fe5
AV
4230COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig,
4231 const struct compat_sigaction __user *, act,
4232 struct compat_sigaction __user *, oact,
4233 compat_size_t, sigsetsize)
4234{
4235 struct k_sigaction new_ka, old_ka;
08d32fe5
AV
4236#ifdef __ARCH_HAS_SA_RESTORER
4237 compat_uptr_t restorer;
4238#endif
4239 int ret;
4240
4241 /* XXX: Don't preclude handling different sized sigset_t's. */
4242 if (sigsetsize != sizeof(compat_sigset_t))
4243 return -EINVAL;
4244
4245 if (act) {
4246 compat_uptr_t handler;
4247 ret = get_user(handler, &act->sa_handler);
4248 new_ka.sa.sa_handler = compat_ptr(handler);
4249#ifdef __ARCH_HAS_SA_RESTORER
4250 ret |= get_user(restorer, &act->sa_restorer);
4251 new_ka.sa.sa_restorer = compat_ptr(restorer);
4252#endif
3968cf62 4253 ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask);
3ddc5b46 4254 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
08d32fe5
AV
4255 if (ret)
4256 return -EFAULT;
08d32fe5
AV
4257 }
4258
4259 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4260 if (!ret && oact) {
08d32fe5
AV
4261 ret = put_user(ptr_to_compat(old_ka.sa.sa_handler),
4262 &oact->sa_handler);
f454322e
DL
4263 ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask,
4264 sizeof(oact->sa_mask));
3ddc5b46 4265 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
08d32fe5
AV
4266#ifdef __ARCH_HAS_SA_RESTORER
4267 ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4268 &oact->sa_restorer);
4269#endif
4270 }
4271 return ret;
4272}
4273#endif
eaca6eae 4274#endif /* !CONFIG_ODD_RT_SIGACTION */
1da177e4 4275
495dfbf7
AV
4276#ifdef CONFIG_OLD_SIGACTION
4277SYSCALL_DEFINE3(sigaction, int, sig,
4278 const struct old_sigaction __user *, act,
4279 struct old_sigaction __user *, oact)
4280{
4281 struct k_sigaction new_ka, old_ka;
4282 int ret;
4283
4284 if (act) {
4285 old_sigset_t mask;
96d4f267 4286 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4287 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
4288 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
4289 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4290 __get_user(mask, &act->sa_mask))
4291 return -EFAULT;
4292#ifdef __ARCH_HAS_KA_RESTORER
4293 new_ka.ka_restorer = NULL;
4294#endif
4295 siginitset(&new_ka.sa.sa_mask, mask);
4296 }
4297
4298 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4299
4300 if (!ret && oact) {
96d4f267 4301 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4302 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
4303 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
4304 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4305 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4306 return -EFAULT;
4307 }
4308
4309 return ret;
4310}
4311#endif
4312#ifdef CONFIG_COMPAT_OLD_SIGACTION
4313COMPAT_SYSCALL_DEFINE3(sigaction, int, sig,
4314 const struct compat_old_sigaction __user *, act,
4315 struct compat_old_sigaction __user *, oact)
4316{
4317 struct k_sigaction new_ka, old_ka;
4318 int ret;
4319 compat_old_sigset_t mask;
4320 compat_uptr_t handler, restorer;
4321
4322 if (act) {
96d4f267 4323 if (!access_ok(act, sizeof(*act)) ||
495dfbf7
AV
4324 __get_user(handler, &act->sa_handler) ||
4325 __get_user(restorer, &act->sa_restorer) ||
4326 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
4327 __get_user(mask, &act->sa_mask))
4328 return -EFAULT;
4329
4330#ifdef __ARCH_HAS_KA_RESTORER
4331 new_ka.ka_restorer = NULL;
4332#endif
4333 new_ka.sa.sa_handler = compat_ptr(handler);
4334 new_ka.sa.sa_restorer = compat_ptr(restorer);
4335 siginitset(&new_ka.sa.sa_mask, mask);
4336 }
4337
4338 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
4339
4340 if (!ret && oact) {
96d4f267 4341 if (!access_ok(oact, sizeof(*oact)) ||
495dfbf7
AV
4342 __put_user(ptr_to_compat(old_ka.sa.sa_handler),
4343 &oact->sa_handler) ||
4344 __put_user(ptr_to_compat(old_ka.sa.sa_restorer),
4345 &oact->sa_restorer) ||
4346 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
4347 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
4348 return -EFAULT;
4349 }
4350 return ret;
4351}
4352#endif
1da177e4 4353
f6187769 4354#ifdef CONFIG_SGETMASK_SYSCALL
1da177e4
LT
4355
4356/*
4357 * For backwards compatibility. Functionality superseded by sigprocmask.
4358 */
a5f8fa9e 4359SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
4360{
4361 /* SMP safe */
4362 return current->blocked.sig[0];
4363}
4364
a5f8fa9e 4365SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4 4366{
c1095c6d
ON
4367 int old = current->blocked.sig[0];
4368 sigset_t newset;
1da177e4 4369
5ba53ff6 4370 siginitset(&newset, newmask);
c1095c6d 4371 set_current_blocked(&newset);
1da177e4
LT
4372
4373 return old;
4374}
f6187769 4375#endif /* CONFIG_SGETMASK_SYSCALL */
1da177e4
LT
4376
4377#ifdef __ARCH_WANT_SYS_SIGNAL
4378/*
4379 * For backwards compatibility. Functionality superseded by sigaction.
4380 */
a5f8fa9e 4381SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
4382{
4383 struct k_sigaction new_sa, old_sa;
4384 int ret;
4385
4386 new_sa.sa.sa_handler = handler;
4387 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 4388 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
4389
4390 ret = do_sigaction(sig, &new_sa, &old_sa);
4391
4392 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
4393}
4394#endif /* __ARCH_WANT_SYS_SIGNAL */
4395
4396#ifdef __ARCH_WANT_SYS_PAUSE
4397
a5f8fa9e 4398SYSCALL_DEFINE0(pause)
1da177e4 4399{
d92fcf05 4400 while (!signal_pending(current)) {
1df01355 4401 __set_current_state(TASK_INTERRUPTIBLE);
d92fcf05
ON
4402 schedule();
4403 }
1da177e4
LT
4404 return -ERESTARTNOHAND;
4405}
4406
4407#endif
4408
9d8a7652 4409static int sigsuspend(sigset_t *set)
68f3f16d 4410{
68f3f16d
AV
4411 current->saved_sigmask = current->blocked;
4412 set_current_blocked(set);
4413
823dd322
SL
4414 while (!signal_pending(current)) {
4415 __set_current_state(TASK_INTERRUPTIBLE);
4416 schedule();
4417 }
68f3f16d
AV
4418 set_restore_sigmask();
4419 return -ERESTARTNOHAND;
4420}
68f3f16d 4421
41c57892
RD
4422/**
4423 * sys_rt_sigsuspend - replace the signal mask for a value with the
4424 * @unewset value until a signal is received
4425 * @unewset: new signal mask value
4426 * @sigsetsize: size of sigset_t type
4427 */
d4e82042 4428SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
4429{
4430 sigset_t newset;
4431
4432 /* XXX: Don't preclude handling different sized sigset_t's. */
4433 if (sigsetsize != sizeof(sigset_t))
4434 return -EINVAL;
4435
4436 if (copy_from_user(&newset, unewset, sizeof(newset)))
4437 return -EFAULT;
68f3f16d 4438 return sigsuspend(&newset);
150256d8 4439}
ad4b65a4
AV
4440
4441#ifdef CONFIG_COMPAT
4442COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize)
4443{
ad4b65a4 4444 sigset_t newset;
ad4b65a4
AV
4445
4446 /* XXX: Don't preclude handling different sized sigset_t's. */
4447 if (sigsetsize != sizeof(sigset_t))
4448 return -EINVAL;
4449
3968cf62 4450 if (get_compat_sigset(&newset, unewset))
ad4b65a4 4451 return -EFAULT;
ad4b65a4 4452 return sigsuspend(&newset);
ad4b65a4
AV
4453}
4454#endif
150256d8 4455
0a0e8cdf
AV
4456#ifdef CONFIG_OLD_SIGSUSPEND
4457SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask)
4458{
4459 sigset_t blocked;
4460 siginitset(&blocked, mask);
4461 return sigsuspend(&blocked);
4462}
4463#endif
4464#ifdef CONFIG_OLD_SIGSUSPEND3
4465SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask)
4466{
4467 sigset_t blocked;
4468 siginitset(&blocked, mask);
4469 return sigsuspend(&blocked);
4470}
4471#endif
150256d8 4472
52f5684c 4473__weak const char *arch_vma_name(struct vm_area_struct *vma)
f269fdd1
DH
4474{
4475 return NULL;
4476}
4477
ae7795bc 4478static inline void siginfo_buildtime_checks(void)
1da177e4 4479{
aba1be2f 4480 BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE);
41b27154 4481
ae7795bc
EB
4482 /* Verify the offsets in the two siginfos match */
4483#define CHECK_OFFSET(field) \
4484 BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field))
4485
4486 /* kill */
4487 CHECK_OFFSET(si_pid);
4488 CHECK_OFFSET(si_uid);
4489
4490 /* timer */
4491 CHECK_OFFSET(si_tid);
4492 CHECK_OFFSET(si_overrun);
4493 CHECK_OFFSET(si_value);
4494
4495 /* rt */
4496 CHECK_OFFSET(si_pid);
4497 CHECK_OFFSET(si_uid);
4498 CHECK_OFFSET(si_value);
4499
4500 /* sigchld */
4501 CHECK_OFFSET(si_pid);
4502 CHECK_OFFSET(si_uid);
4503 CHECK_OFFSET(si_status);
4504 CHECK_OFFSET(si_utime);
4505 CHECK_OFFSET(si_stime);
4506
4507 /* sigfault */
4508 CHECK_OFFSET(si_addr);
4509 CHECK_OFFSET(si_addr_lsb);
4510 CHECK_OFFSET(si_lower);
4511 CHECK_OFFSET(si_upper);
4512 CHECK_OFFSET(si_pkey);
4513
4514 /* sigpoll */
4515 CHECK_OFFSET(si_band);
4516 CHECK_OFFSET(si_fd);
4517
4518 /* sigsys */
4519 CHECK_OFFSET(si_call_addr);
4520 CHECK_OFFSET(si_syscall);
4521 CHECK_OFFSET(si_arch);
4522#undef CHECK_OFFSET
70f1b0d3
EB
4523
4524 /* usb asyncio */
4525 BUILD_BUG_ON(offsetof(struct siginfo, si_pid) !=
4526 offsetof(struct siginfo, si_addr));
4527 if (sizeof(int) == sizeof(void __user *)) {
4528 BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) !=
4529 sizeof(void __user *));
4530 } else {
4531 BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) +
4532 sizeof_field(struct siginfo, si_uid)) !=
4533 sizeof(void __user *));
4534 BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) !=
4535 offsetof(struct siginfo, si_uid));
4536 }
4537#ifdef CONFIG_COMPAT
4538 BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) !=
4539 offsetof(struct compat_siginfo, si_addr));
4540 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4541 sizeof(compat_uptr_t));
4542 BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) !=
4543 sizeof_field(struct siginfo, si_pid));
4544#endif
ae7795bc
EB
4545}
4546
4547void __init signals_init(void)
4548{
4549 siginfo_buildtime_checks();
4550
0a31bd5f 4551 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 4552}
67fc4e0c
JW
4553
4554#ifdef CONFIG_KGDB_KDB
4555#include <linux/kdb.h>
4556/*
0b44bf9a 4557 * kdb_send_sig - Allows kdb to send signals without exposing
67fc4e0c
JW
4558 * signal internals. This function checks if the required locks are
4559 * available before calling the main signal code, to avoid kdb
4560 * deadlocks.
4561 */
0b44bf9a 4562void kdb_send_sig(struct task_struct *t, int sig)
67fc4e0c
JW
4563{
4564 static struct task_struct *kdb_prev_t;
0b44bf9a 4565 int new_t, ret;
67fc4e0c
JW
4566 if (!spin_trylock(&t->sighand->siglock)) {
4567 kdb_printf("Can't do kill command now.\n"
4568 "The sigmask lock is held somewhere else in "
4569 "kernel, try again later\n");
4570 return;
4571 }
67fc4e0c
JW
4572 new_t = kdb_prev_t != t;
4573 kdb_prev_t = t;
4574 if (t->state != TASK_RUNNING && new_t) {
0b44bf9a 4575 spin_unlock(&t->sighand->siglock);
67fc4e0c
JW
4576 kdb_printf("Process is not RUNNING, sending a signal from "
4577 "kdb risks deadlock\n"
4578 "on the run queue locks. "
4579 "The signal has _not_ been sent.\n"
4580 "Reissue the kill command if you want to risk "
4581 "the deadlock.\n");
4582 return;
4583 }
b213984b 4584 ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID);
0b44bf9a
EB
4585 spin_unlock(&t->sighand->siglock);
4586 if (ret)
67fc4e0c
JW
4587 kdb_printf("Fail to deliver Signal %d to process %d.\n",
4588 sig, t->pid);
4589 else
4590 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
4591}
4592#endif /* CONFIG_KGDB_KDB */