signal: Use GROUP_STOP_PENDING to stop once for a single group stop
[linux-2.6-block.git] / kernel / signal.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/signal.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson
7 *
8 * 2003-06-02 Jim Houston - Concurrent Computer Corp.
9 * Changes to use preallocated sigqueue structures
10 * to allow signals to be sent reliably.
11 */
12
1da177e4
LT
13#include <linux/slab.h>
14#include <linux/module.h>
1da177e4
LT
15#include <linux/init.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/tty.h>
19#include <linux/binfmts.h>
20#include <linux/security.h>
21#include <linux/syscalls.h>
22#include <linux/ptrace.h>
7ed20e1a 23#include <linux/signal.h>
fba2afaa 24#include <linux/signalfd.h>
f84d49b2 25#include <linux/ratelimit.h>
35de254d 26#include <linux/tracehook.h>
c59ede7b 27#include <linux/capability.h>
7dfb7103 28#include <linux/freezer.h>
84d73786
SB
29#include <linux/pid_namespace.h>
30#include <linux/nsproxy.h>
d1eb650f
MH
31#define CREATE_TRACE_POINTS
32#include <trace/events/signal.h>
84d73786 33
1da177e4
LT
34#include <asm/param.h>
35#include <asm/uaccess.h>
36#include <asm/unistd.h>
37#include <asm/siginfo.h>
e1396065 38#include "audit.h" /* audit_signal_info() */
1da177e4
LT
39
40/*
41 * SLAB caches for signal bits.
42 */
43
e18b890b 44static struct kmem_cache *sigqueue_cachep;
1da177e4 45
f84d49b2
NO
46int print_fatal_signals __read_mostly;
47
35de254d 48static void __user *sig_handler(struct task_struct *t, int sig)
93585eea 49{
35de254d
RM
50 return t->sighand->action[sig - 1].sa.sa_handler;
51}
93585eea 52
35de254d
RM
53static int sig_handler_ignored(void __user *handler, int sig)
54{
93585eea 55 /* Is it explicitly or implicitly ignored? */
93585eea
PE
56 return handler == SIG_IGN ||
57 (handler == SIG_DFL && sig_kernel_ignore(sig));
58}
1da177e4 59
921cf9f6
SB
60static int sig_task_ignored(struct task_struct *t, int sig,
61 int from_ancestor_ns)
1da177e4 62{
35de254d 63 void __user *handler;
1da177e4 64
f008faff
ON
65 handler = sig_handler(t, sig);
66
67 if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
921cf9f6 68 handler == SIG_DFL && !from_ancestor_ns)
f008faff
ON
69 return 1;
70
71 return sig_handler_ignored(handler, sig);
72}
73
921cf9f6 74static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
f008faff 75{
1da177e4
LT
76 /*
77 * Blocked signals are never ignored, since the
78 * signal handler may change by the time it is
79 * unblocked.
80 */
325d22df 81 if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
1da177e4
LT
82 return 0;
83
921cf9f6 84 if (!sig_task_ignored(t, sig, from_ancestor_ns))
35de254d
RM
85 return 0;
86
87 /*
88 * Tracers may want to know about even ignored signals.
89 */
43918f2b 90 return !tracehook_consider_ignored_signal(t, sig);
1da177e4
LT
91}
92
93/*
94 * Re-calculate pending state from the set of locally pending
95 * signals, globally pending signals, and blocked signals.
96 */
97static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98{
99 unsigned long ready;
100 long i;
101
102 switch (_NSIG_WORDS) {
103 default:
104 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105 ready |= signal->sig[i] &~ blocked->sig[i];
106 break;
107
108 case 4: ready = signal->sig[3] &~ blocked->sig[3];
109 ready |= signal->sig[2] &~ blocked->sig[2];
110 ready |= signal->sig[1] &~ blocked->sig[1];
111 ready |= signal->sig[0] &~ blocked->sig[0];
112 break;
113
114 case 2: ready = signal->sig[1] &~ blocked->sig[1];
115 ready |= signal->sig[0] &~ blocked->sig[0];
116 break;
117
118 case 1: ready = signal->sig[0] &~ blocked->sig[0];
119 }
120 return ready != 0;
121}
122
123#define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
7bb44ade 125static int recalc_sigpending_tsk(struct task_struct *t)
1da177e4 126{
39efa3ef 127 if ((t->group_stop & GROUP_STOP_PENDING) ||
1da177e4 128 PENDING(&t->pending, &t->blocked) ||
7bb44ade 129 PENDING(&t->signal->shared_pending, &t->blocked)) {
1da177e4 130 set_tsk_thread_flag(t, TIF_SIGPENDING);
7bb44ade
RM
131 return 1;
132 }
b74d0deb
RM
133 /*
134 * We must never clear the flag in another thread, or in current
135 * when it's possible the current syscall is returning -ERESTART*.
136 * So we don't clear it here, and only callers who know they should do.
137 */
7bb44ade
RM
138 return 0;
139}
140
141/*
142 * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143 * This is superfluous when called on current, the wakeup is a harmless no-op.
144 */
145void recalc_sigpending_and_wake(struct task_struct *t)
146{
147 if (recalc_sigpending_tsk(t))
148 signal_wake_up(t, 0);
1da177e4
LT
149}
150
151void recalc_sigpending(void)
152{
b787f7ba
RM
153 if (unlikely(tracehook_force_sigpending()))
154 set_thread_flag(TIF_SIGPENDING);
155 else if (!recalc_sigpending_tsk(current) && !freezing(current))
b74d0deb
RM
156 clear_thread_flag(TIF_SIGPENDING);
157
1da177e4
LT
158}
159
160/* Given the mask, find the first available signal that should be serviced. */
161
a27341cd
LT
162#define SYNCHRONOUS_MASK \
163 (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164 sigmask(SIGTRAP) | sigmask(SIGFPE))
165
fba2afaa 166int next_signal(struct sigpending *pending, sigset_t *mask)
1da177e4
LT
167{
168 unsigned long i, *s, *m, x;
169 int sig = 0;
f84d49b2 170
1da177e4
LT
171 s = pending->signal.sig;
172 m = mask->sig;
a27341cd
LT
173
174 /*
175 * Handle the first word specially: it contains the
176 * synchronous signals that need to be dequeued first.
177 */
178 x = *s &~ *m;
179 if (x) {
180 if (x & SYNCHRONOUS_MASK)
181 x &= SYNCHRONOUS_MASK;
182 sig = ffz(~x) + 1;
183 return sig;
184 }
185
1da177e4
LT
186 switch (_NSIG_WORDS) {
187 default:
a27341cd
LT
188 for (i = 1; i < _NSIG_WORDS; ++i) {
189 x = *++s &~ *++m;
190 if (!x)
191 continue;
192 sig = ffz(~x) + i*_NSIG_BPW + 1;
193 break;
194 }
1da177e4
LT
195 break;
196
a27341cd
LT
197 case 2:
198 x = s[1] &~ m[1];
199 if (!x)
1da177e4 200 break;
a27341cd 201 sig = ffz(~x) + _NSIG_BPW + 1;
1da177e4
LT
202 break;
203
a27341cd
LT
204 case 1:
205 /* Nothing to do */
1da177e4
LT
206 break;
207 }
f84d49b2 208
1da177e4
LT
209 return sig;
210}
211
f84d49b2
NO
212static inline void print_dropped_signal(int sig)
213{
214 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216 if (!print_fatal_signals)
217 return;
218
219 if (!__ratelimit(&ratelimit_state))
220 return;
221
222 printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223 current->comm, current->pid, sig);
224}
225
e5c1902e
TH
226/**
227 * task_clear_group_stop_pending - clear pending group stop
228 * @task: target task
229 *
230 * Clear group stop states for @task.
231 *
232 * CONTEXT:
233 * Must be called with @task->sighand->siglock held.
234 */
39efa3ef 235void task_clear_group_stop_pending(struct task_struct *task)
e5c1902e 236{
39efa3ef 237 task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
e5c1902e
TH
238}
239
240/**
241 * task_participate_group_stop - participate in a group stop
242 * @task: task participating in a group stop
243 *
39efa3ef
TH
244 * @task has GROUP_STOP_PENDING set and is participating in a group stop.
245 * Group stop states are cleared and the group stop count is consumed if
246 * %GROUP_STOP_CONSUME was set. If the consumption completes the group
247 * stop, the appropriate %SIGNAL_* flags are set.
e5c1902e
TH
248 *
249 * CONTEXT:
250 * Must be called with @task->sighand->siglock held.
251 */
252static bool task_participate_group_stop(struct task_struct *task)
253{
254 struct signal_struct *sig = task->signal;
255 bool consume = task->group_stop & GROUP_STOP_CONSUME;
256
39efa3ef
TH
257 WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
258
e5c1902e
TH
259 task_clear_group_stop_pending(task);
260
261 if (!consume)
262 return false;
263
264 if (!WARN_ON_ONCE(sig->group_stop_count == 0))
265 sig->group_stop_count--;
266
267 if (!sig->group_stop_count) {
268 sig->flags = SIGNAL_STOP_STOPPED;
269 return true;
270 }
271 return false;
272}
273
c69e8d9c
DH
274/*
275 * allocate a new signal queue record
276 * - this may be called without locks if and only if t == current, otherwise an
d84f4f99 277 * appopriate lock must be held to stop the target task from exiting
c69e8d9c 278 */
f84d49b2
NO
279static struct sigqueue *
280__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
1da177e4
LT
281{
282 struct sigqueue *q = NULL;
10b1fbdb 283 struct user_struct *user;
1da177e4 284
10b1fbdb 285 /*
7cf7db8d
TG
286 * Protect access to @t credentials. This can go away when all
287 * callers hold rcu read lock.
10b1fbdb 288 */
7cf7db8d 289 rcu_read_lock();
d84f4f99 290 user = get_uid(__task_cred(t)->user);
10b1fbdb 291 atomic_inc(&user->sigpending);
7cf7db8d 292 rcu_read_unlock();
f84d49b2 293
1da177e4 294 if (override_rlimit ||
10b1fbdb 295 atomic_read(&user->sigpending) <=
78d7d407 296 task_rlimit(t, RLIMIT_SIGPENDING)) {
1da177e4 297 q = kmem_cache_alloc(sigqueue_cachep, flags);
f84d49b2
NO
298 } else {
299 print_dropped_signal(sig);
300 }
301
1da177e4 302 if (unlikely(q == NULL)) {
10b1fbdb 303 atomic_dec(&user->sigpending);
d84f4f99 304 free_uid(user);
1da177e4
LT
305 } else {
306 INIT_LIST_HEAD(&q->list);
307 q->flags = 0;
d84f4f99 308 q->user = user;
1da177e4 309 }
d84f4f99
DH
310
311 return q;
1da177e4
LT
312}
313
514a01b8 314static void __sigqueue_free(struct sigqueue *q)
1da177e4
LT
315{
316 if (q->flags & SIGQUEUE_PREALLOC)
317 return;
318 atomic_dec(&q->user->sigpending);
319 free_uid(q->user);
320 kmem_cache_free(sigqueue_cachep, q);
321}
322
6a14c5c9 323void flush_sigqueue(struct sigpending *queue)
1da177e4
LT
324{
325 struct sigqueue *q;
326
327 sigemptyset(&queue->signal);
328 while (!list_empty(&queue->list)) {
329 q = list_entry(queue->list.next, struct sigqueue , list);
330 list_del_init(&q->list);
331 __sigqueue_free(q);
332 }
333}
334
335/*
336 * Flush all pending signals for a task.
337 */
3bcac026
DH
338void __flush_signals(struct task_struct *t)
339{
340 clear_tsk_thread_flag(t, TIF_SIGPENDING);
341 flush_sigqueue(&t->pending);
342 flush_sigqueue(&t->signal->shared_pending);
343}
344
c81addc9 345void flush_signals(struct task_struct *t)
1da177e4
LT
346{
347 unsigned long flags;
348
349 spin_lock_irqsave(&t->sighand->siglock, flags);
3bcac026 350 __flush_signals(t);
1da177e4
LT
351 spin_unlock_irqrestore(&t->sighand->siglock, flags);
352}
353
cbaffba1
ON
354static void __flush_itimer_signals(struct sigpending *pending)
355{
356 sigset_t signal, retain;
357 struct sigqueue *q, *n;
358
359 signal = pending->signal;
360 sigemptyset(&retain);
361
362 list_for_each_entry_safe(q, n, &pending->list, list) {
363 int sig = q->info.si_signo;
364
365 if (likely(q->info.si_code != SI_TIMER)) {
366 sigaddset(&retain, sig);
367 } else {
368 sigdelset(&signal, sig);
369 list_del_init(&q->list);
370 __sigqueue_free(q);
371 }
372 }
373
374 sigorsets(&pending->signal, &signal, &retain);
375}
376
377void flush_itimer_signals(void)
378{
379 struct task_struct *tsk = current;
380 unsigned long flags;
381
382 spin_lock_irqsave(&tsk->sighand->siglock, flags);
383 __flush_itimer_signals(&tsk->pending);
384 __flush_itimer_signals(&tsk->signal->shared_pending);
385 spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
386}
387
10ab825b
ON
388void ignore_signals(struct task_struct *t)
389{
390 int i;
391
392 for (i = 0; i < _NSIG; ++i)
393 t->sighand->action[i].sa.sa_handler = SIG_IGN;
394
395 flush_signals(t);
396}
397
1da177e4
LT
398/*
399 * Flush all handlers for a task.
400 */
401
402void
403flush_signal_handlers(struct task_struct *t, int force_default)
404{
405 int i;
406 struct k_sigaction *ka = &t->sighand->action[0];
407 for (i = _NSIG ; i != 0 ; i--) {
408 if (force_default || ka->sa.sa_handler != SIG_IGN)
409 ka->sa.sa_handler = SIG_DFL;
410 ka->sa.sa_flags = 0;
411 sigemptyset(&ka->sa.sa_mask);
412 ka++;
413 }
414}
415
abd4f750
MAS
416int unhandled_signal(struct task_struct *tsk, int sig)
417{
445a91d2 418 void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
b460cbc5 419 if (is_global_init(tsk))
abd4f750 420 return 1;
445a91d2 421 if (handler != SIG_IGN && handler != SIG_DFL)
abd4f750 422 return 0;
43918f2b 423 return !tracehook_consider_fatal_signal(tsk, sig);
abd4f750
MAS
424}
425
1da177e4
LT
426
427/* Notify the system that a driver wants to block all signals for this
428 * process, and wants to be notified if any signals at all were to be
429 * sent/acted upon. If the notifier routine returns non-zero, then the
430 * signal will be acted upon after all. If the notifier routine returns 0,
431 * then then signal will be blocked. Only one block per process is
432 * allowed. priv is a pointer to private data that the notifier routine
433 * can use to determine if the signal should be blocked or not. */
434
435void
436block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
437{
438 unsigned long flags;
439
440 spin_lock_irqsave(&current->sighand->siglock, flags);
441 current->notifier_mask = mask;
442 current->notifier_data = priv;
443 current->notifier = notifier;
444 spin_unlock_irqrestore(&current->sighand->siglock, flags);
445}
446
447/* Notify the system that blocking has ended. */
448
449void
450unblock_all_signals(void)
451{
452 unsigned long flags;
453
454 spin_lock_irqsave(&current->sighand->siglock, flags);
455 current->notifier = NULL;
456 current->notifier_data = NULL;
457 recalc_sigpending();
458 spin_unlock_irqrestore(&current->sighand->siglock, flags);
459}
460
100360f0 461static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
1da177e4
LT
462{
463 struct sigqueue *q, *first = NULL;
1da177e4 464
1da177e4
LT
465 /*
466 * Collect the siginfo appropriate to this signal. Check if
467 * there is another siginfo for the same signal.
468 */
469 list_for_each_entry(q, &list->list, list) {
470 if (q->info.si_signo == sig) {
d4434207
ON
471 if (first)
472 goto still_pending;
1da177e4
LT
473 first = q;
474 }
475 }
d4434207
ON
476
477 sigdelset(&list->signal, sig);
478
1da177e4 479 if (first) {
d4434207 480still_pending:
1da177e4
LT
481 list_del_init(&first->list);
482 copy_siginfo(info, &first->info);
483 __sigqueue_free(first);
1da177e4 484 } else {
1da177e4
LT
485 /* Ok, it wasn't in the queue. This must be
486 a fast-pathed signal or we must have been
487 out of queue space. So zero out the info.
488 */
1da177e4
LT
489 info->si_signo = sig;
490 info->si_errno = 0;
7486e5d9 491 info->si_code = SI_USER;
1da177e4
LT
492 info->si_pid = 0;
493 info->si_uid = 0;
494 }
1da177e4
LT
495}
496
497static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
498 siginfo_t *info)
499{
27d91e07 500 int sig = next_signal(pending, mask);
1da177e4 501
1da177e4
LT
502 if (sig) {
503 if (current->notifier) {
504 if (sigismember(current->notifier_mask, sig)) {
505 if (!(current->notifier)(current->notifier_data)) {
506 clear_thread_flag(TIF_SIGPENDING);
507 return 0;
508 }
509 }
510 }
511
100360f0 512 collect_signal(sig, pending, info);
1da177e4 513 }
1da177e4
LT
514
515 return sig;
516}
517
518/*
519 * Dequeue a signal and return the element to the caller, which is
520 * expected to free it.
521 *
522 * All callers have to hold the siglock.
523 */
524int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
525{
c5363d03 526 int signr;
caec4e8d
BH
527
528 /* We only dequeue private signals from ourselves, we don't let
529 * signalfd steal them
530 */
b8fceee1 531 signr = __dequeue_signal(&tsk->pending, mask, info);
8bfd9a7a 532 if (!signr) {
1da177e4
LT
533 signr = __dequeue_signal(&tsk->signal->shared_pending,
534 mask, info);
8bfd9a7a
TG
535 /*
536 * itimer signal ?
537 *
538 * itimers are process shared and we restart periodic
539 * itimers in the signal delivery path to prevent DoS
540 * attacks in the high resolution timer case. This is
541 * compliant with the old way of self restarting
542 * itimers, as the SIGALRM is a legacy signal and only
543 * queued once. Changing the restart behaviour to
544 * restart the timer in the signal dequeue path is
545 * reducing the timer noise on heavy loaded !highres
546 * systems too.
547 */
548 if (unlikely(signr == SIGALRM)) {
549 struct hrtimer *tmr = &tsk->signal->real_timer;
550
551 if (!hrtimer_is_queued(tmr) &&
552 tsk->signal->it_real_incr.tv64 != 0) {
553 hrtimer_forward(tmr, tmr->base->get_time(),
554 tsk->signal->it_real_incr);
555 hrtimer_restart(tmr);
556 }
557 }
558 }
c5363d03 559
b8fceee1 560 recalc_sigpending();
c5363d03
PE
561 if (!signr)
562 return 0;
563
564 if (unlikely(sig_kernel_stop(signr))) {
8bfd9a7a
TG
565 /*
566 * Set a marker that we have dequeued a stop signal. Our
567 * caller might release the siglock and then the pending
568 * stop signal it is about to process is no longer in the
569 * pending bitmasks, but must still be cleared by a SIGCONT
570 * (and overruled by a SIGKILL). So those cases clear this
571 * shared flag after we've set it. Note that this flag may
572 * remain set after the signal we return is ignored or
573 * handled. That doesn't matter because its only purpose
574 * is to alert stop-signal processing code when another
575 * processor has come along and cleared the flag.
576 */
92413d77 577 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
8bfd9a7a 578 }
c5363d03 579 if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
1da177e4
LT
580 /*
581 * Release the siglock to ensure proper locking order
582 * of timer locks outside of siglocks. Note, we leave
583 * irqs disabled here, since the posix-timers code is
584 * about to disable them again anyway.
585 */
586 spin_unlock(&tsk->sighand->siglock);
587 do_schedule_next_timer(info);
588 spin_lock(&tsk->sighand->siglock);
589 }
590 return signr;
591}
592
593/*
594 * Tell a process that it has a new active signal..
595 *
596 * NOTE! we rely on the previous spin_lock to
597 * lock interrupts for us! We can only be called with
598 * "siglock" held, and the local interrupt must
599 * have been disabled when that got acquired!
600 *
601 * No need to set need_resched since signal event passing
602 * goes through ->blocked
603 */
604void signal_wake_up(struct task_struct *t, int resume)
605{
606 unsigned int mask;
607
608 set_tsk_thread_flag(t, TIF_SIGPENDING);
609
610 /*
f021a3c2
MW
611 * For SIGKILL, we want to wake it up in the stopped/traced/killable
612 * case. We don't check t->state here because there is a race with it
1da177e4
LT
613 * executing another processor and just now entering stopped state.
614 * By using wake_up_state, we ensure the process will wake up and
615 * handle its death signal.
616 */
617 mask = TASK_INTERRUPTIBLE;
618 if (resume)
f021a3c2 619 mask |= TASK_WAKEKILL;
1da177e4
LT
620 if (!wake_up_state(t, mask))
621 kick_process(t);
622}
623
71fabd5e
GA
624/*
625 * Remove signals in mask from the pending set and queue.
626 * Returns 1 if any signals were found.
627 *
628 * All callers must be holding the siglock.
629 *
630 * This version takes a sigset mask and looks at all signals,
631 * not just those in the first mask word.
632 */
633static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
634{
635 struct sigqueue *q, *n;
636 sigset_t m;
637
638 sigandsets(&m, mask, &s->signal);
639 if (sigisemptyset(&m))
640 return 0;
641
642 signandsets(&s->signal, &s->signal, mask);
643 list_for_each_entry_safe(q, n, &s->list, list) {
644 if (sigismember(mask, q->info.si_signo)) {
645 list_del_init(&q->list);
646 __sigqueue_free(q);
647 }
648 }
649 return 1;
650}
1da177e4
LT
651/*
652 * Remove signals in mask from the pending set and queue.
653 * Returns 1 if any signals were found.
654 *
655 * All callers must be holding the siglock.
656 */
657static int rm_from_queue(unsigned long mask, struct sigpending *s)
658{
659 struct sigqueue *q, *n;
660
661 if (!sigtestsetmask(&s->signal, mask))
662 return 0;
663
664 sigdelsetmask(&s->signal, mask);
665 list_for_each_entry_safe(q, n, &s->list, list) {
666 if (q->info.si_signo < SIGRTMIN &&
667 (mask & sigmask(q->info.si_signo))) {
668 list_del_init(&q->list);
669 __sigqueue_free(q);
670 }
671 }
672 return 1;
673}
674
614c517d
ON
675static inline int is_si_special(const struct siginfo *info)
676{
677 return info <= SEND_SIG_FORCED;
678}
679
680static inline bool si_fromuser(const struct siginfo *info)
681{
682 return info == SEND_SIG_NOINFO ||
683 (!is_si_special(info) && SI_FROMUSER(info));
684}
685
1da177e4
LT
686/*
687 * Bad permissions for sending the signal
694f690d 688 * - the caller must hold the RCU read lock
1da177e4
LT
689 */
690static int check_kill_permission(int sig, struct siginfo *info,
691 struct task_struct *t)
692{
065add39 693 const struct cred *cred, *tcred;
2e2ba22e 694 struct pid *sid;
3b5e9e53
ON
695 int error;
696
7ed20e1a 697 if (!valid_signal(sig))
3b5e9e53
ON
698 return -EINVAL;
699
614c517d 700 if (!si_fromuser(info))
3b5e9e53 701 return 0;
e54dc243 702
3b5e9e53
ON
703 error = audit_signal_info(sig, t); /* Let audit system see the signal */
704 if (error)
1da177e4 705 return error;
3b5e9e53 706
065add39 707 cred = current_cred();
c69e8d9c 708 tcred = __task_cred(t);
065add39
ON
709 if (!same_thread_group(current, t) &&
710 (cred->euid ^ tcred->suid) &&
c69e8d9c
DH
711 (cred->euid ^ tcred->uid) &&
712 (cred->uid ^ tcred->suid) &&
713 (cred->uid ^ tcred->uid) &&
2e2ba22e
ON
714 !capable(CAP_KILL)) {
715 switch (sig) {
716 case SIGCONT:
2e2ba22e 717 sid = task_session(t);
2e2ba22e
ON
718 /*
719 * We don't return the error if sid == NULL. The
720 * task was unhashed, the caller must notice this.
721 */
722 if (!sid || sid == task_session(current))
723 break;
724 default:
725 return -EPERM;
726 }
727 }
c2f0c7c3 728
e54dc243 729 return security_task_kill(t, info, sig, 0);
1da177e4
LT
730}
731
1da177e4 732/*
7e695a5e
ON
733 * Handle magic process-wide effects of stop/continue signals. Unlike
734 * the signal actions, these happen immediately at signal-generation
1da177e4
LT
735 * time regardless of blocking, ignoring, or handling. This does the
736 * actual continuing for SIGCONT, but not the actual stopping for stop
7e695a5e
ON
737 * signals. The process stop is done as a signal action for SIG_DFL.
738 *
739 * Returns true if the signal should be actually delivered, otherwise
740 * it should be dropped.
1da177e4 741 */
921cf9f6 742static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
1da177e4 743{
ad16a460 744 struct signal_struct *signal = p->signal;
1da177e4
LT
745 struct task_struct *t;
746
7e695a5e 747 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
1da177e4 748 /*
7e695a5e 749 * The process is in the middle of dying, nothing to do.
1da177e4 750 */
7e695a5e 751 } else if (sig_kernel_stop(sig)) {
1da177e4
LT
752 /*
753 * This is a stop signal. Remove SIGCONT from all queues.
754 */
ad16a460 755 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
1da177e4
LT
756 t = p;
757 do {
758 rm_from_queue(sigmask(SIGCONT), &t->pending);
ad16a460 759 } while_each_thread(p, t);
1da177e4 760 } else if (sig == SIGCONT) {
fc321d2e 761 unsigned int why;
1da177e4
LT
762 /*
763 * Remove all stop signals from all queues,
764 * and wake all threads.
765 */
ad16a460 766 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
1da177e4
LT
767 t = p;
768 do {
769 unsigned int state;
39efa3ef
TH
770
771 task_clear_group_stop_pending(t);
772
1da177e4 773 rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
1da177e4
LT
774 /*
775 * If there is a handler for SIGCONT, we must make
776 * sure that no thread returns to user mode before
777 * we post the signal, in case it was the only
778 * thread eligible to run the signal handler--then
779 * it must not do anything between resuming and
780 * running the handler. With the TIF_SIGPENDING
781 * flag set, the thread will pause and acquire the
782 * siglock that we hold now and until we've queued
fc321d2e 783 * the pending signal.
1da177e4
LT
784 *
785 * Wake up the stopped thread _after_ setting
786 * TIF_SIGPENDING
787 */
f021a3c2 788 state = __TASK_STOPPED;
1da177e4
LT
789 if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
790 set_tsk_thread_flag(t, TIF_SIGPENDING);
791 state |= TASK_INTERRUPTIBLE;
792 }
793 wake_up_state(t, state);
ad16a460 794 } while_each_thread(p, t);
1da177e4 795
fc321d2e
ON
796 /*
797 * Notify the parent with CLD_CONTINUED if we were stopped.
798 *
799 * If we were in the middle of a group stop, we pretend it
800 * was already finished, and then continued. Since SIGCHLD
801 * doesn't queue we report only CLD_STOPPED, as if the next
802 * CLD_CONTINUED was dropped.
803 */
804 why = 0;
ad16a460 805 if (signal->flags & SIGNAL_STOP_STOPPED)
fc321d2e 806 why |= SIGNAL_CLD_CONTINUED;
ad16a460 807 else if (signal->group_stop_count)
fc321d2e
ON
808 why |= SIGNAL_CLD_STOPPED;
809
810 if (why) {
021e1ae3 811 /*
ae6d2ed7 812 * The first thread which returns from do_signal_stop()
021e1ae3
ON
813 * will take ->siglock, notice SIGNAL_CLD_MASK, and
814 * notify its parent. See get_signal_to_deliver().
815 */
ad16a460
ON
816 signal->flags = why | SIGNAL_STOP_CONTINUED;
817 signal->group_stop_count = 0;
818 signal->group_exit_code = 0;
1da177e4
LT
819 } else {
820 /*
821 * We are not stopped, but there could be a stop
822 * signal in the middle of being processed after
823 * being removed from the queue. Clear that too.
824 */
ad16a460 825 signal->flags &= ~SIGNAL_STOP_DEQUEUED;
1da177e4 826 }
1da177e4 827 }
7e695a5e 828
921cf9f6 829 return !sig_ignored(p, sig, from_ancestor_ns);
1da177e4
LT
830}
831
71f11dc0
ON
832/*
833 * Test if P wants to take SIG. After we've checked all threads with this,
834 * it's equivalent to finding no threads not blocking SIG. Any threads not
835 * blocking SIG were ruled out because they are not running and already
836 * have pending signals. Such threads will dequeue from the shared queue
837 * as soon as they're available, so putting the signal on the shared queue
838 * will be equivalent to sending it to one such thread.
839 */
840static inline int wants_signal(int sig, struct task_struct *p)
841{
842 if (sigismember(&p->blocked, sig))
843 return 0;
844 if (p->flags & PF_EXITING)
845 return 0;
846 if (sig == SIGKILL)
847 return 1;
848 if (task_is_stopped_or_traced(p))
849 return 0;
850 return task_curr(p) || !signal_pending(p);
851}
852
5fcd835b 853static void complete_signal(int sig, struct task_struct *p, int group)
71f11dc0
ON
854{
855 struct signal_struct *signal = p->signal;
856 struct task_struct *t;
857
858 /*
859 * Now find a thread we can wake up to take the signal off the queue.
860 *
861 * If the main thread wants the signal, it gets first crack.
862 * Probably the least surprising to the average bear.
863 */
864 if (wants_signal(sig, p))
865 t = p;
5fcd835b 866 else if (!group || thread_group_empty(p))
71f11dc0
ON
867 /*
868 * There is just one thread and it does not need to be woken.
869 * It will dequeue unblocked signals before it runs again.
870 */
871 return;
872 else {
873 /*
874 * Otherwise try to find a suitable thread.
875 */
876 t = signal->curr_target;
877 while (!wants_signal(sig, t)) {
878 t = next_thread(t);
879 if (t == signal->curr_target)
880 /*
881 * No thread needs to be woken.
882 * Any eligible threads will see
883 * the signal in the queue soon.
884 */
885 return;
886 }
887 signal->curr_target = t;
888 }
889
890 /*
891 * Found a killable thread. If the signal will be fatal,
892 * then start taking the whole group down immediately.
893 */
fae5fa44
ON
894 if (sig_fatal(p, sig) &&
895 !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
71f11dc0 896 !sigismember(&t->real_blocked, sig) &&
445a91d2 897 (sig == SIGKILL ||
43918f2b 898 !tracehook_consider_fatal_signal(t, sig))) {
71f11dc0
ON
899 /*
900 * This signal will be fatal to the whole group.
901 */
902 if (!sig_kernel_coredump(sig)) {
903 /*
904 * Start a group exit and wake everybody up.
905 * This way we don't have other threads
906 * running and doing things after a slower
907 * thread has the fatal signal pending.
908 */
909 signal->flags = SIGNAL_GROUP_EXIT;
910 signal->group_exit_code = sig;
911 signal->group_stop_count = 0;
912 t = p;
913 do {
39efa3ef 914 task_clear_group_stop_pending(t);
71f11dc0
ON
915 sigaddset(&t->pending.signal, SIGKILL);
916 signal_wake_up(t, 1);
917 } while_each_thread(p, t);
918 return;
919 }
920 }
921
922 /*
923 * The signal is already in the shared-pending queue.
924 * Tell the chosen thread to wake up and dequeue it.
925 */
926 signal_wake_up(t, sig == SIGKILL);
927 return;
928}
929
af7fff9c
PE
930static inline int legacy_queue(struct sigpending *signals, int sig)
931{
932 return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
933}
934
7978b567
SB
935static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
936 int group, int from_ancestor_ns)
1da177e4 937{
2ca3515a 938 struct sigpending *pending;
6e65acba 939 struct sigqueue *q;
7a0aeb14 940 int override_rlimit;
1da177e4 941
d1eb650f 942 trace_signal_generate(sig, info, t);
0a16b607 943
6e65acba 944 assert_spin_locked(&t->sighand->siglock);
921cf9f6
SB
945
946 if (!prepare_signal(sig, t, from_ancestor_ns))
7e695a5e 947 return 0;
2ca3515a
ON
948
949 pending = group ? &t->signal->shared_pending : &t->pending;
2acb024d
PE
950 /*
951 * Short-circuit ignored signals and support queuing
952 * exactly one non-rt signal, so that we can get more
953 * detailed information about the cause of the signal.
954 */
7e695a5e 955 if (legacy_queue(pending, sig))
2acb024d 956 return 0;
1da177e4
LT
957 /*
958 * fast-pathed signals for kernel-internal things like SIGSTOP
959 * or SIGKILL.
960 */
b67a1b9e 961 if (info == SEND_SIG_FORCED)
1da177e4
LT
962 goto out_set;
963
964 /* Real-time signals must be queued if sent by sigqueue, or
965 some other real-time mechanism. It is implementation
966 defined whether kill() does so. We attempt to do so, on
967 the principle of least surprise, but since kill is not
968 allowed to fail with EAGAIN when low on memory we just
969 make sure at least one signal gets delivered and don't
970 pass on the info struct. */
971
7a0aeb14
VN
972 if (sig < SIGRTMIN)
973 override_rlimit = (is_si_special(info) || info->si_code >= 0);
974 else
975 override_rlimit = 0;
976
f84d49b2 977 q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
7a0aeb14 978 override_rlimit);
1da177e4 979 if (q) {
2ca3515a 980 list_add_tail(&q->list, &pending->list);
1da177e4 981 switch ((unsigned long) info) {
b67a1b9e 982 case (unsigned long) SEND_SIG_NOINFO:
1da177e4
LT
983 q->info.si_signo = sig;
984 q->info.si_errno = 0;
985 q->info.si_code = SI_USER;
9cd4fd10 986 q->info.si_pid = task_tgid_nr_ns(current,
09bca05c 987 task_active_pid_ns(t));
76aac0e9 988 q->info.si_uid = current_uid();
1da177e4 989 break;
b67a1b9e 990 case (unsigned long) SEND_SIG_PRIV:
1da177e4
LT
991 q->info.si_signo = sig;
992 q->info.si_errno = 0;
993 q->info.si_code = SI_KERNEL;
994 q->info.si_pid = 0;
995 q->info.si_uid = 0;
996 break;
997 default:
998 copy_siginfo(&q->info, info);
6588c1e3
SB
999 if (from_ancestor_ns)
1000 q->info.si_pid = 0;
1da177e4
LT
1001 break;
1002 }
621d3121 1003 } else if (!is_si_special(info)) {
ba005e1f
MH
1004 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1005 /*
1006 * Queue overflow, abort. We may abort if the
1007 * signal was rt and sent by user using something
1008 * other than kill().
1009 */
1010 trace_signal_overflow_fail(sig, group, info);
1da177e4 1011 return -EAGAIN;
ba005e1f
MH
1012 } else {
1013 /*
1014 * This is a silent loss of information. We still
1015 * send the signal, but the *info bits are lost.
1016 */
1017 trace_signal_lose_info(sig, group, info);
1018 }
1da177e4
LT
1019 }
1020
1021out_set:
53c30337 1022 signalfd_notify(t, sig);
2ca3515a 1023 sigaddset(&pending->signal, sig);
4cd4b6d4
PE
1024 complete_signal(sig, t, group);
1025 return 0;
1da177e4
LT
1026}
1027
7978b567
SB
1028static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1029 int group)
1030{
921cf9f6
SB
1031 int from_ancestor_ns = 0;
1032
1033#ifdef CONFIG_PID_NS
dd34200a
ON
1034 from_ancestor_ns = si_fromuser(info) &&
1035 !task_pid_nr_ns(current, task_active_pid_ns(t));
921cf9f6
SB
1036#endif
1037
1038 return __send_signal(sig, info, t, group, from_ancestor_ns);
7978b567
SB
1039}
1040
45807a1d
IM
1041static void print_fatal_signal(struct pt_regs *regs, int signr)
1042{
1043 printk("%s/%d: potentially unexpected fatal signal %d.\n",
ba25f9dc 1044 current->comm, task_pid_nr(current), signr);
45807a1d 1045
ca5cd877 1046#if defined(__i386__) && !defined(__arch_um__)
65ea5b03 1047 printk("code at %08lx: ", regs->ip);
45807a1d
IM
1048 {
1049 int i;
1050 for (i = 0; i < 16; i++) {
1051 unsigned char insn;
1052
b45c6e76
AK
1053 if (get_user(insn, (unsigned char *)(regs->ip + i)))
1054 break;
45807a1d
IM
1055 printk("%02x ", insn);
1056 }
1057 }
1058#endif
1059 printk("\n");
3a9f84d3 1060 preempt_disable();
45807a1d 1061 show_regs(regs);
3a9f84d3 1062 preempt_enable();
45807a1d
IM
1063}
1064
1065static int __init setup_print_fatal_signals(char *str)
1066{
1067 get_option (&str, &print_fatal_signals);
1068
1069 return 1;
1070}
1071
1072__setup("print-fatal-signals=", setup_print_fatal_signals);
1da177e4 1073
4cd4b6d4
PE
1074int
1075__group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1076{
1077 return send_signal(sig, info, p, 1);
1078}
1079
1da177e4
LT
1080static int
1081specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1082{
4cd4b6d4 1083 return send_signal(sig, info, t, 0);
1da177e4
LT
1084}
1085
4a30debf
ON
1086int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1087 bool group)
1088{
1089 unsigned long flags;
1090 int ret = -ESRCH;
1091
1092 if (lock_task_sighand(p, &flags)) {
1093 ret = send_signal(sig, info, p, group);
1094 unlock_task_sighand(p, &flags);
1095 }
1096
1097 return ret;
1098}
1099
1da177e4
LT
1100/*
1101 * Force a signal that the process can't ignore: if necessary
1102 * we unblock the signal and change any SIG_IGN to SIG_DFL.
ae74c3b6
LT
1103 *
1104 * Note: If we unblock the signal, we always reset it to SIG_DFL,
1105 * since we do not want to have a signal handler that was blocked
1106 * be invoked when user space had explicitly blocked it.
1107 *
80fe728d
ON
1108 * We don't want to have recursive SIGSEGV's etc, for example,
1109 * that is why we also clear SIGNAL_UNKILLABLE.
1da177e4 1110 */
1da177e4
LT
1111int
1112force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1113{
1114 unsigned long int flags;
ae74c3b6
LT
1115 int ret, blocked, ignored;
1116 struct k_sigaction *action;
1da177e4
LT
1117
1118 spin_lock_irqsave(&t->sighand->siglock, flags);
ae74c3b6
LT
1119 action = &t->sighand->action[sig-1];
1120 ignored = action->sa.sa_handler == SIG_IGN;
1121 blocked = sigismember(&t->blocked, sig);
1122 if (blocked || ignored) {
1123 action->sa.sa_handler = SIG_DFL;
1124 if (blocked) {
1125 sigdelset(&t->blocked, sig);
7bb44ade 1126 recalc_sigpending_and_wake(t);
ae74c3b6 1127 }
1da177e4 1128 }
80fe728d
ON
1129 if (action->sa.sa_handler == SIG_DFL)
1130 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1da177e4
LT
1131 ret = specific_send_sig_info(sig, info, t);
1132 spin_unlock_irqrestore(&t->sighand->siglock, flags);
1133
1134 return ret;
1135}
1136
1da177e4
LT
1137/*
1138 * Nuke all other threads in the group.
1139 */
09faef11 1140int zap_other_threads(struct task_struct *p)
1da177e4 1141{
09faef11
ON
1142 struct task_struct *t = p;
1143 int count = 0;
1da177e4 1144
1da177e4
LT
1145 p->signal->group_stop_count = 0;
1146
09faef11 1147 while_each_thread(p, t) {
39efa3ef 1148 task_clear_group_stop_pending(t);
09faef11
ON
1149 count++;
1150
1151 /* Don't bother with already dead threads */
1da177e4
LT
1152 if (t->exit_state)
1153 continue;
1da177e4 1154 sigaddset(&t->pending.signal, SIGKILL);
1da177e4
LT
1155 signal_wake_up(t, 1);
1156 }
09faef11
ON
1157
1158 return count;
1da177e4
LT
1159}
1160
b8ed374e
NK
1161struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1162 unsigned long *flags)
f63ee72e
ON
1163{
1164 struct sighand_struct *sighand;
1165
1406f2d3 1166 rcu_read_lock();
f63ee72e
ON
1167 for (;;) {
1168 sighand = rcu_dereference(tsk->sighand);
1169 if (unlikely(sighand == NULL))
1170 break;
1171
1172 spin_lock_irqsave(&sighand->siglock, *flags);
1173 if (likely(sighand == tsk->sighand))
1174 break;
1175 spin_unlock_irqrestore(&sighand->siglock, *flags);
1176 }
1406f2d3 1177 rcu_read_unlock();
f63ee72e
ON
1178
1179 return sighand;
1180}
1181
c69e8d9c
DH
1182/*
1183 * send signal info to all the members of a group
c69e8d9c 1184 */
1da177e4
LT
1185int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1186{
694f690d
DH
1187 int ret;
1188
1189 rcu_read_lock();
1190 ret = check_kill_permission(sig, info, p);
1191 rcu_read_unlock();
f63ee72e 1192
4a30debf
ON
1193 if (!ret && sig)
1194 ret = do_send_sig_info(sig, info, p, true);
1da177e4
LT
1195
1196 return ret;
1197}
1198
1199/*
146a505d 1200 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1da177e4 1201 * control characters do (^C, ^Z etc)
c69e8d9c 1202 * - the caller must hold at least a readlock on tasklist_lock
1da177e4 1203 */
c4b92fc1 1204int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1da177e4
LT
1205{
1206 struct task_struct *p = NULL;
1207 int retval, success;
1208
1da177e4
LT
1209 success = 0;
1210 retval = -ESRCH;
c4b92fc1 1211 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1da177e4
LT
1212 int err = group_send_sig_info(sig, info, p);
1213 success |= !err;
1214 retval = err;
c4b92fc1 1215 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1da177e4
LT
1216 return success ? 0 : retval;
1217}
1218
c4b92fc1 1219int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1da177e4 1220{
d36174bc 1221 int error = -ESRCH;
1da177e4
LT
1222 struct task_struct *p;
1223
e56d0903 1224 rcu_read_lock();
d36174bc 1225retry:
c4b92fc1 1226 p = pid_task(pid, PIDTYPE_PID);
d36174bc 1227 if (p) {
1da177e4 1228 error = group_send_sig_info(sig, info, p);
d36174bc
ON
1229 if (unlikely(error == -ESRCH))
1230 /*
1231 * The task was unhashed in between, try again.
1232 * If it is dead, pid_task() will return NULL,
1233 * if we race with de_thread() it will find the
1234 * new leader.
1235 */
1236 goto retry;
1237 }
e56d0903 1238 rcu_read_unlock();
6ca25b55 1239
1da177e4
LT
1240 return error;
1241}
1242
c3de4b38
MW
1243int
1244kill_proc_info(int sig, struct siginfo *info, pid_t pid)
c4b92fc1
EB
1245{
1246 int error;
1247 rcu_read_lock();
b488893a 1248 error = kill_pid_info(sig, info, find_vpid(pid));
c4b92fc1
EB
1249 rcu_read_unlock();
1250 return error;
1251}
1252
2425c08b
EB
1253/* like kill_pid_info(), but doesn't use uid/euid of "current" */
1254int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
8f95dc58 1255 uid_t uid, uid_t euid, u32 secid)
46113830
HW
1256{
1257 int ret = -EINVAL;
1258 struct task_struct *p;
c69e8d9c 1259 const struct cred *pcred;
14d8c9f3 1260 unsigned long flags;
46113830
HW
1261
1262 if (!valid_signal(sig))
1263 return ret;
1264
14d8c9f3 1265 rcu_read_lock();
2425c08b 1266 p = pid_task(pid, PIDTYPE_PID);
46113830
HW
1267 if (!p) {
1268 ret = -ESRCH;
1269 goto out_unlock;
1270 }
c69e8d9c 1271 pcred = __task_cred(p);
614c517d 1272 if (si_fromuser(info) &&
c69e8d9c
DH
1273 euid != pcred->suid && euid != pcred->uid &&
1274 uid != pcred->suid && uid != pcred->uid) {
46113830
HW
1275 ret = -EPERM;
1276 goto out_unlock;
1277 }
8f95dc58
DQ
1278 ret = security_task_kill(p, info, sig, secid);
1279 if (ret)
1280 goto out_unlock;
14d8c9f3
TG
1281
1282 if (sig) {
1283 if (lock_task_sighand(p, &flags)) {
1284 ret = __send_signal(sig, info, p, 1, 0);
1285 unlock_task_sighand(p, &flags);
1286 } else
1287 ret = -ESRCH;
46113830
HW
1288 }
1289out_unlock:
14d8c9f3 1290 rcu_read_unlock();
46113830
HW
1291 return ret;
1292}
2425c08b 1293EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1da177e4
LT
1294
1295/*
1296 * kill_something_info() interprets pid in interesting ways just like kill(2).
1297 *
1298 * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1299 * is probably wrong. Should make it like BSD or SYSV.
1300 */
1301
bc64efd2 1302static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1da177e4 1303{
8d42db18 1304 int ret;
d5df763b
PE
1305
1306 if (pid > 0) {
1307 rcu_read_lock();
1308 ret = kill_pid_info(sig, info, find_vpid(pid));
1309 rcu_read_unlock();
1310 return ret;
1311 }
1312
1313 read_lock(&tasklist_lock);
1314 if (pid != -1) {
1315 ret = __kill_pgrp_info(sig, info,
1316 pid ? find_vpid(-pid) : task_pgrp(current));
1317 } else {
1da177e4
LT
1318 int retval = 0, count = 0;
1319 struct task_struct * p;
1320
1da177e4 1321 for_each_process(p) {
d25141a8
SB
1322 if (task_pid_vnr(p) > 1 &&
1323 !same_thread_group(p, current)) {
1da177e4
LT
1324 int err = group_send_sig_info(sig, info, p);
1325 ++count;
1326 if (err != -EPERM)
1327 retval = err;
1328 }
1329 }
8d42db18 1330 ret = count ? retval : -ESRCH;
1da177e4 1331 }
d5df763b
PE
1332 read_unlock(&tasklist_lock);
1333
8d42db18 1334 return ret;
1da177e4
LT
1335}
1336
1337/*
1338 * These are for backward compatibility with the rest of the kernel source.
1339 */
1340
1da177e4
LT
1341int
1342send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1343{
1da177e4
LT
1344 /*
1345 * Make sure legacy kernel users don't send in bad values
1346 * (normal paths check this in check_kill_permission).
1347 */
7ed20e1a 1348 if (!valid_signal(sig))
1da177e4
LT
1349 return -EINVAL;
1350
4a30debf 1351 return do_send_sig_info(sig, info, p, false);
1da177e4
LT
1352}
1353
b67a1b9e
ON
1354#define __si_special(priv) \
1355 ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1356
1da177e4
LT
1357int
1358send_sig(int sig, struct task_struct *p, int priv)
1359{
b67a1b9e 1360 return send_sig_info(sig, __si_special(priv), p);
1da177e4
LT
1361}
1362
1da177e4
LT
1363void
1364force_sig(int sig, struct task_struct *p)
1365{
b67a1b9e 1366 force_sig_info(sig, SEND_SIG_PRIV, p);
1da177e4
LT
1367}
1368
1369/*
1370 * When things go south during signal handling, we
1371 * will force a SIGSEGV. And if the signal that caused
1372 * the problem was already a SIGSEGV, we'll want to
1373 * make sure we don't even try to deliver the signal..
1374 */
1375int
1376force_sigsegv(int sig, struct task_struct *p)
1377{
1378 if (sig == SIGSEGV) {
1379 unsigned long flags;
1380 spin_lock_irqsave(&p->sighand->siglock, flags);
1381 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1382 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1383 }
1384 force_sig(SIGSEGV, p);
1385 return 0;
1386}
1387
c4b92fc1
EB
1388int kill_pgrp(struct pid *pid, int sig, int priv)
1389{
146a505d
PE
1390 int ret;
1391
1392 read_lock(&tasklist_lock);
1393 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1394 read_unlock(&tasklist_lock);
1395
1396 return ret;
c4b92fc1
EB
1397}
1398EXPORT_SYMBOL(kill_pgrp);
1399
1400int kill_pid(struct pid *pid, int sig, int priv)
1401{
1402 return kill_pid_info(sig, __si_special(priv), pid);
1403}
1404EXPORT_SYMBOL(kill_pid);
1405
1da177e4
LT
1406/*
1407 * These functions support sending signals using preallocated sigqueue
1408 * structures. This is needed "because realtime applications cannot
1409 * afford to lose notifications of asynchronous events, like timer
f84d49b2 1410 * expirations or I/O completions". In the case of Posix Timers
1da177e4
LT
1411 * we allocate the sigqueue structure from the timer_create. If this
1412 * allocation fails we are able to report the failure to the application
1413 * with an EAGAIN error.
1414 */
1da177e4
LT
1415struct sigqueue *sigqueue_alloc(void)
1416{
f84d49b2 1417 struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1da177e4 1418
f84d49b2 1419 if (q)
1da177e4 1420 q->flags |= SIGQUEUE_PREALLOC;
f84d49b2
NO
1421
1422 return q;
1da177e4
LT
1423}
1424
1425void sigqueue_free(struct sigqueue *q)
1426{
1427 unsigned long flags;
60187d27
ON
1428 spinlock_t *lock = &current->sighand->siglock;
1429
1da177e4
LT
1430 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1431 /*
c8e85b4f
ON
1432 * We must hold ->siglock while testing q->list
1433 * to serialize with collect_signal() or with
da7978b0 1434 * __exit_signal()->flush_sigqueue().
1da177e4 1435 */
60187d27 1436 spin_lock_irqsave(lock, flags);
c8e85b4f
ON
1437 q->flags &= ~SIGQUEUE_PREALLOC;
1438 /*
1439 * If it is queued it will be freed when dequeued,
1440 * like the "regular" sigqueue.
1441 */
60187d27 1442 if (!list_empty(&q->list))
c8e85b4f 1443 q = NULL;
60187d27
ON
1444 spin_unlock_irqrestore(lock, flags);
1445
c8e85b4f
ON
1446 if (q)
1447 __sigqueue_free(q);
1da177e4
LT
1448}
1449
ac5c2153 1450int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
9e3bd6c3 1451{
e62e6650 1452 int sig = q->info.si_signo;
2ca3515a 1453 struct sigpending *pending;
e62e6650
ON
1454 unsigned long flags;
1455 int ret;
2ca3515a 1456
4cd4b6d4 1457 BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
e62e6650
ON
1458
1459 ret = -1;
1460 if (!likely(lock_task_sighand(t, &flags)))
1461 goto ret;
1462
7e695a5e 1463 ret = 1; /* the signal is ignored */
921cf9f6 1464 if (!prepare_signal(sig, t, 0))
e62e6650
ON
1465 goto out;
1466
1467 ret = 0;
9e3bd6c3
PE
1468 if (unlikely(!list_empty(&q->list))) {
1469 /*
1470 * If an SI_TIMER entry is already queue just increment
1471 * the overrun count.
1472 */
9e3bd6c3
PE
1473 BUG_ON(q->info.si_code != SI_TIMER);
1474 q->info.si_overrun++;
e62e6650 1475 goto out;
9e3bd6c3 1476 }
ba661292 1477 q->info.si_overrun = 0;
9e3bd6c3 1478
9e3bd6c3 1479 signalfd_notify(t, sig);
2ca3515a 1480 pending = group ? &t->signal->shared_pending : &t->pending;
9e3bd6c3
PE
1481 list_add_tail(&q->list, &pending->list);
1482 sigaddset(&pending->signal, sig);
4cd4b6d4 1483 complete_signal(sig, t, group);
e62e6650
ON
1484out:
1485 unlock_task_sighand(t, &flags);
1486ret:
1487 return ret;
9e3bd6c3
PE
1488}
1489
1da177e4
LT
1490/*
1491 * Let a parent know about the death of a child.
1492 * For a stopped/continued status change, use do_notify_parent_cldstop instead.
2b2a1ff6
RM
1493 *
1494 * Returns -1 if our parent ignored us and so we've switched to
1495 * self-reaping, or else @sig.
1da177e4 1496 */
2b2a1ff6 1497int do_notify_parent(struct task_struct *tsk, int sig)
1da177e4
LT
1498{
1499 struct siginfo info;
1500 unsigned long flags;
1501 struct sighand_struct *psig;
1b04624f 1502 int ret = sig;
1da177e4
LT
1503
1504 BUG_ON(sig == -1);
1505
1506 /* do_notify_parent_cldstop should have been called instead. */
e1abb39c 1507 BUG_ON(task_is_stopped_or_traced(tsk));
1da177e4 1508
5cb11446 1509 BUG_ON(!task_ptrace(tsk) &&
1da177e4
LT
1510 (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1511
1512 info.si_signo = sig;
1513 info.si_errno = 0;
b488893a
PE
1514 /*
1515 * we are under tasklist_lock here so our parent is tied to
1516 * us and cannot exit and release its namespace.
1517 *
1518 * the only it can is to switch its nsproxy with sys_unshare,
1519 * bu uncharing pid namespaces is not allowed, so we'll always
1520 * see relevant namespace
1521 *
1522 * write_lock() currently calls preempt_disable() which is the
1523 * same as rcu_read_lock(), but according to Oleg, this is not
1524 * correct to rely on this
1525 */
1526 rcu_read_lock();
1527 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
c69e8d9c 1528 info.si_uid = __task_cred(tsk)->uid;
b488893a
PE
1529 rcu_read_unlock();
1530
32bd671d
PZ
1531 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1532 tsk->signal->utime));
1533 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1534 tsk->signal->stime));
1da177e4
LT
1535
1536 info.si_status = tsk->exit_code & 0x7f;
1537 if (tsk->exit_code & 0x80)
1538 info.si_code = CLD_DUMPED;
1539 else if (tsk->exit_code & 0x7f)
1540 info.si_code = CLD_KILLED;
1541 else {
1542 info.si_code = CLD_EXITED;
1543 info.si_status = tsk->exit_code >> 8;
1544 }
1545
1546 psig = tsk->parent->sighand;
1547 spin_lock_irqsave(&psig->siglock, flags);
5cb11446 1548 if (!task_ptrace(tsk) && sig == SIGCHLD &&
1da177e4
LT
1549 (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1550 (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1551 /*
1552 * We are exiting and our parent doesn't care. POSIX.1
1553 * defines special semantics for setting SIGCHLD to SIG_IGN
1554 * or setting the SA_NOCLDWAIT flag: we should be reaped
1555 * automatically and not left for our parent's wait4 call.
1556 * Rather than having the parent do it as a magic kind of
1557 * signal handler, we just set this to tell do_exit that we
1558 * can be cleaned up without becoming a zombie. Note that
1559 * we still call __wake_up_parent in this case, because a
1560 * blocked sys_wait4 might now return -ECHILD.
1561 *
1562 * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1563 * is implementation-defined: we do (if you don't want
1564 * it, just use SIG_IGN instead).
1565 */
1b04624f 1566 ret = tsk->exit_signal = -1;
1da177e4 1567 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
2b2a1ff6 1568 sig = -1;
1da177e4 1569 }
7ed20e1a 1570 if (valid_signal(sig) && sig > 0)
1da177e4
LT
1571 __group_send_sig_info(sig, &info, tsk->parent);
1572 __wake_up_parent(tsk, tsk->parent);
1573 spin_unlock_irqrestore(&psig->siglock, flags);
2b2a1ff6 1574
1b04624f 1575 return ret;
1da177e4
LT
1576}
1577
a1d5e21e 1578static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1da177e4
LT
1579{
1580 struct siginfo info;
1581 unsigned long flags;
bc505a47 1582 struct task_struct *parent;
1da177e4
LT
1583 struct sighand_struct *sighand;
1584
5cb11446 1585 if (task_ptrace(tsk))
bc505a47
ON
1586 parent = tsk->parent;
1587 else {
1588 tsk = tsk->group_leader;
1589 parent = tsk->real_parent;
1590 }
1591
1da177e4
LT
1592 info.si_signo = SIGCHLD;
1593 info.si_errno = 0;
b488893a
PE
1594 /*
1595 * see comment in do_notify_parent() abot the following 3 lines
1596 */
1597 rcu_read_lock();
d9265663 1598 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
c69e8d9c 1599 info.si_uid = __task_cred(tsk)->uid;
b488893a
PE
1600 rcu_read_unlock();
1601
d8878ba3
MK
1602 info.si_utime = cputime_to_clock_t(tsk->utime);
1603 info.si_stime = cputime_to_clock_t(tsk->stime);
1da177e4
LT
1604
1605 info.si_code = why;
1606 switch (why) {
1607 case CLD_CONTINUED:
1608 info.si_status = SIGCONT;
1609 break;
1610 case CLD_STOPPED:
1611 info.si_status = tsk->signal->group_exit_code & 0x7f;
1612 break;
1613 case CLD_TRAPPED:
1614 info.si_status = tsk->exit_code & 0x7f;
1615 break;
1616 default:
1617 BUG();
1618 }
1619
1620 sighand = parent->sighand;
1621 spin_lock_irqsave(&sighand->siglock, flags);
1622 if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1623 !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1624 __group_send_sig_info(SIGCHLD, &info, parent);
1625 /*
1626 * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1627 */
1628 __wake_up_parent(tsk, parent);
1629 spin_unlock_irqrestore(&sighand->siglock, flags);
1630}
1631
d5f70c00
ON
1632static inline int may_ptrace_stop(void)
1633{
5cb11446 1634 if (!likely(task_ptrace(current)))
d5f70c00 1635 return 0;
d5f70c00
ON
1636 /*
1637 * Are we in the middle of do_coredump?
1638 * If so and our tracer is also part of the coredump stopping
1639 * is a deadlock situation, and pointless because our tracer
1640 * is dead so don't allow us to stop.
1641 * If SIGKILL was already sent before the caller unlocked
999d9fc1 1642 * ->siglock we must see ->core_state != NULL. Otherwise it
d5f70c00
ON
1643 * is safe to enter schedule().
1644 */
999d9fc1 1645 if (unlikely(current->mm->core_state) &&
d5f70c00
ON
1646 unlikely(current->mm == current->parent->mm))
1647 return 0;
1648
1649 return 1;
1650}
1651
1a669c2f
RM
1652/*
1653 * Return nonzero if there is a SIGKILL that should be waking us up.
1654 * Called with the siglock held.
1655 */
1656static int sigkill_pending(struct task_struct *tsk)
1657{
3d749b9e
ON
1658 return sigismember(&tsk->pending.signal, SIGKILL) ||
1659 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1a669c2f
RM
1660}
1661
1da177e4
LT
1662/*
1663 * This must be called with current->sighand->siglock held.
1664 *
1665 * This should be the path for all ptrace stops.
1666 * We always set current->last_siginfo while stopped here.
1667 * That makes it a way to test a stopped process for
1668 * being ptrace-stopped vs being job-control-stopped.
1669 *
20686a30
ON
1670 * If we actually decide not to stop at all because the tracer
1671 * is gone, we keep current->exit_code unless clear_code.
1da177e4 1672 */
fe1bc6a0 1673static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
b8401150
NK
1674 __releases(&current->sighand->siglock)
1675 __acquires(&current->sighand->siglock)
1da177e4 1676{
1a669c2f
RM
1677 if (arch_ptrace_stop_needed(exit_code, info)) {
1678 /*
1679 * The arch code has something special to do before a
1680 * ptrace stop. This is allowed to block, e.g. for faults
1681 * on user stack pages. We can't keep the siglock while
1682 * calling arch_ptrace_stop, so we must release it now.
1683 * To preserve proper semantics, we must do this before
1684 * any signal bookkeeping like checking group_stop_count.
1685 * Meanwhile, a SIGKILL could come in before we retake the
1686 * siglock. That must prevent us from sleeping in TASK_TRACED.
1687 * So after regaining the lock, we must check for SIGKILL.
1688 */
1689 spin_unlock_irq(&current->sighand->siglock);
1690 arch_ptrace_stop(exit_code, info);
1691 spin_lock_irq(&current->sighand->siglock);
3d749b9e
ON
1692 if (sigkill_pending(current))
1693 return;
1a669c2f
RM
1694 }
1695
1da177e4
LT
1696 /*
1697 * If there is a group stop in progress,
1698 * we must participate in the bookkeeping.
1699 */
39efa3ef 1700 if (current->group_stop & GROUP_STOP_PENDING)
e5c1902e 1701 task_participate_group_stop(current);
1da177e4
LT
1702
1703 current->last_siginfo = info;
1704 current->exit_code = exit_code;
1705
1706 /* Let the debugger run. */
d9ae90ac 1707 __set_current_state(TASK_TRACED);
1da177e4
LT
1708 spin_unlock_irq(&current->sighand->siglock);
1709 read_lock(&tasklist_lock);
3d749b9e 1710 if (may_ptrace_stop()) {
fe1bc6a0 1711 do_notify_parent_cldstop(current, why);
53da1d94
MS
1712 /*
1713 * Don't want to allow preemption here, because
1714 * sys_ptrace() needs this task to be inactive.
1715 *
1716 * XXX: implement read_unlock_no_resched().
1717 */
1718 preempt_disable();
1da177e4 1719 read_unlock(&tasklist_lock);
53da1d94 1720 preempt_enable_no_resched();
1da177e4
LT
1721 schedule();
1722 } else {
1723 /*
1724 * By the time we got the lock, our tracer went away.
6405f7f4 1725 * Don't drop the lock yet, another tracer may come.
1da177e4 1726 */
6405f7f4 1727 __set_current_state(TASK_RUNNING);
20686a30
ON
1728 if (clear_code)
1729 current->exit_code = 0;
6405f7f4 1730 read_unlock(&tasklist_lock);
1da177e4
LT
1731 }
1732
13b1c3d4
RM
1733 /*
1734 * While in TASK_TRACED, we were considered "frozen enough".
1735 * Now that we woke up, it's crucial if we're supposed to be
1736 * frozen that we freeze now before running anything substantial.
1737 */
1738 try_to_freeze();
1739
1da177e4
LT
1740 /*
1741 * We are back. Now reacquire the siglock before touching
1742 * last_siginfo, so that we are sure to have synchronized with
1743 * any signal-sending on another CPU that wants to examine it.
1744 */
1745 spin_lock_irq(&current->sighand->siglock);
1746 current->last_siginfo = NULL;
1747
1748 /*
1749 * Queued signals ignored us while we were stopped for tracing.
1750 * So check for any that we should take before resuming user mode.
b74d0deb 1751 * This sets TIF_SIGPENDING, but never clears it.
1da177e4 1752 */
b74d0deb 1753 recalc_sigpending_tsk(current);
1da177e4
LT
1754}
1755
1756void ptrace_notify(int exit_code)
1757{
1758 siginfo_t info;
1759
1760 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1761
1762 memset(&info, 0, sizeof info);
1763 info.si_signo = SIGTRAP;
1764 info.si_code = exit_code;
b488893a 1765 info.si_pid = task_pid_vnr(current);
76aac0e9 1766 info.si_uid = current_uid();
1da177e4
LT
1767
1768 /* Let the debugger run. */
1769 spin_lock_irq(&current->sighand->siglock);
fe1bc6a0 1770 ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1da177e4
LT
1771 spin_unlock_irq(&current->sighand->siglock);
1772}
1773
1da177e4
LT
1774/*
1775 * This performs the stopping for SIGSTOP and other stop signals.
1776 * We have to stop all threads in the thread group.
1777 * Returns nonzero if we've actually stopped and released the siglock.
1778 * Returns zero if we didn't stop and still hold the siglock.
1779 */
a122b341 1780static int do_signal_stop(int signr)
1da177e4
LT
1781{
1782 struct signal_struct *sig = current->signal;
edf2ed15 1783 int notify = 0;
1da177e4 1784
39efa3ef
TH
1785 if (!(current->group_stop & GROUP_STOP_PENDING)) {
1786 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
f558b7e4
ON
1787 struct task_struct *t;
1788
2b201a9e 1789 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
573cf9ad 1790 unlikely(signal_group_exit(sig)))
f558b7e4 1791 return 0;
1da177e4
LT
1792 /*
1793 * There is no group stop already in progress.
a122b341 1794 * We must initiate one now.
1da177e4 1795 */
a122b341 1796 sig->group_exit_code = signr;
1da177e4 1797
e5c1902e 1798 current->group_stop = gstop;
ae6d2ed7 1799 sig->group_stop_count = 1;
a122b341 1800 for (t = next_thread(current); t != current; t = next_thread(t))
1da177e4 1801 /*
a122b341
ON
1802 * Setting state to TASK_STOPPED for a group
1803 * stop is always done with the siglock held,
1804 * so this check has no races.
1da177e4 1805 */
39efa3ef 1806 if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
e5c1902e 1807 t->group_stop = gstop;
ae6d2ed7 1808 sig->group_stop_count++;
a122b341 1809 signal_wake_up(t, 0);
e5c1902e
TH
1810 } else
1811 task_clear_group_stop_pending(t);
1da177e4 1812 }
ae6d2ed7
RM
1813 /*
1814 * If there are no other threads in the group, or if there is
1815 * a group stop in progress and we are the last to stop, report
1816 * to the parent. When ptraced, every thread reports itself.
1817 */
e5c1902e 1818 if (task_participate_group_stop(current))
edf2ed15 1819 notify = CLD_STOPPED;
edf2ed15
TH
1820 if (task_ptrace(current))
1821 notify = CLD_STOPPED;
1822
1823 current->exit_code = sig->group_exit_code;
1824 __set_current_state(TASK_STOPPED);
1825
ae6d2ed7 1826 spin_unlock_irq(&current->sighand->siglock);
1da177e4 1827
ae6d2ed7
RM
1828 if (notify) {
1829 read_lock(&tasklist_lock);
1830 do_notify_parent_cldstop(current, notify);
1831 read_unlock(&tasklist_lock);
1832 }
1833
1834 /* Now we don't run again until woken by SIGCONT or SIGKILL */
71db5eb9 1835 schedule();
ae6d2ed7
RM
1836
1837 tracehook_finish_jctl();
1838 current->exit_code = 0;
dac27f4a 1839
1da177e4
LT
1840 return 1;
1841}
1842
18c98b65
RM
1843static int ptrace_signal(int signr, siginfo_t *info,
1844 struct pt_regs *regs, void *cookie)
1845{
5cb11446 1846 if (!task_ptrace(current))
18c98b65
RM
1847 return signr;
1848
1849 ptrace_signal_deliver(regs, cookie);
1850
1851 /* Let the debugger run. */
fe1bc6a0 1852 ptrace_stop(signr, CLD_TRAPPED, 0, info);
18c98b65
RM
1853
1854 /* We're back. Did the debugger cancel the sig? */
1855 signr = current->exit_code;
1856 if (signr == 0)
1857 return signr;
1858
1859 current->exit_code = 0;
1860
1861 /* Update the siginfo structure if the signal has
1862 changed. If the debugger wanted something
1863 specific in the siginfo structure then it should
1864 have updated *info via PTRACE_SETSIGINFO. */
1865 if (signr != info->si_signo) {
1866 info->si_signo = signr;
1867 info->si_errno = 0;
1868 info->si_code = SI_USER;
1869 info->si_pid = task_pid_vnr(current->parent);
c69e8d9c 1870 info->si_uid = task_uid(current->parent);
18c98b65
RM
1871 }
1872
1873 /* If the (new) signal is now blocked, requeue it. */
1874 if (sigismember(&current->blocked, signr)) {
1875 specific_send_sig_info(signr, info, current);
1876 signr = 0;
1877 }
1878
1879 return signr;
1880}
1881
1da177e4
LT
1882int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1883 struct pt_regs *regs, void *cookie)
1884{
f6b76d4f
ON
1885 struct sighand_struct *sighand = current->sighand;
1886 struct signal_struct *signal = current->signal;
1887 int signr;
1da177e4 1888
13b1c3d4
RM
1889relock:
1890 /*
1891 * We'll jump back here after any time we were stopped in TASK_STOPPED.
1892 * While in TASK_STOPPED, we were considered "frozen enough".
1893 * Now that we woke up, it's crucial if we're supposed to be
1894 * frozen that we freeze now before running anything substantial.
1895 */
fc558a74
RW
1896 try_to_freeze();
1897
f6b76d4f 1898 spin_lock_irq(&sighand->siglock);
021e1ae3
ON
1899 /*
1900 * Every stopped thread goes here after wakeup. Check to see if
1901 * we should notify the parent, prepare_signal(SIGCONT) encodes
1902 * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1903 */
f6b76d4f 1904 if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
c672af35
TH
1905 int why;
1906
1907 if (signal->flags & SIGNAL_CLD_CONTINUED)
1908 why = CLD_CONTINUED;
1909 else
1910 why = CLD_STOPPED;
1911
f6b76d4f 1912 signal->flags &= ~SIGNAL_CLD_MASK;
e4420551 1913
ae6d2ed7 1914 spin_unlock_irq(&sighand->siglock);
fa00b80b 1915
edf2ed15
TH
1916 read_lock(&tasklist_lock);
1917 do_notify_parent_cldstop(current->group_leader, why);
1918 read_unlock(&tasklist_lock);
e4420551
ON
1919 goto relock;
1920 }
1921
1da177e4
LT
1922 for (;;) {
1923 struct k_sigaction *ka;
7bcf6a2c
RM
1924 /*
1925 * Tracing can induce an artifical signal and choose sigaction.
1926 * The return value in @signr determines the default action,
1927 * but @info->si_signo is the signal number we will report.
1928 */
1929 signr = tracehook_get_signal(current, regs, info, return_ka);
1930 if (unlikely(signr < 0))
1931 goto relock;
1932 if (unlikely(signr != 0))
1933 ka = return_ka;
1934 else {
39efa3ef
TH
1935 if (unlikely(current->group_stop &
1936 GROUP_STOP_PENDING) && do_signal_stop(0))
1be53963
ON
1937 goto relock;
1938
7bcf6a2c
RM
1939 signr = dequeue_signal(current, &current->blocked,
1940 info);
1da177e4 1941
18c98b65 1942 if (!signr)
7bcf6a2c
RM
1943 break; /* will return 0 */
1944
1945 if (signr != SIGKILL) {
1946 signr = ptrace_signal(signr, info,
1947 regs, cookie);
1948 if (!signr)
1949 continue;
1950 }
1951
1952 ka = &sighand->action[signr-1];
1da177e4
LT
1953 }
1954
f9d4257e
MH
1955 /* Trace actually delivered signals. */
1956 trace_signal_deliver(signr, info, ka);
1957
1da177e4
LT
1958 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */
1959 continue;
1960 if (ka->sa.sa_handler != SIG_DFL) {
1961 /* Run the handler. */
1962 *return_ka = *ka;
1963
1964 if (ka->sa.sa_flags & SA_ONESHOT)
1965 ka->sa.sa_handler = SIG_DFL;
1966
1967 break; /* will return non-zero "signr" value */
1968 }
1969
1970 /*
1971 * Now we are doing the default action for this signal.
1972 */
1973 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1974 continue;
1975
84d73786 1976 /*
0fbc26a6 1977 * Global init gets no signals it doesn't want.
b3bfa0cb
SB
1978 * Container-init gets no signals it doesn't want from same
1979 * container.
1980 *
1981 * Note that if global/container-init sees a sig_kernel_only()
1982 * signal here, the signal must have been generated internally
1983 * or must have come from an ancestor namespace. In either
1984 * case, the signal cannot be dropped.
84d73786 1985 */
fae5fa44 1986 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
b3bfa0cb 1987 !sig_kernel_only(signr))
1da177e4
LT
1988 continue;
1989
1990 if (sig_kernel_stop(signr)) {
1991 /*
1992 * The default action is to stop all threads in
1993 * the thread group. The job control signals
1994 * do nothing in an orphaned pgrp, but SIGSTOP
1995 * always works. Note that siglock needs to be
1996 * dropped during the call to is_orphaned_pgrp()
1997 * because of lock ordering with tasklist_lock.
1998 * This allows an intervening SIGCONT to be posted.
1999 * We need to check for that and bail out if necessary.
2000 */
2001 if (signr != SIGSTOP) {
f6b76d4f 2002 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2003
2004 /* signals can be posted during this window */
2005
3e7cd6c4 2006 if (is_current_pgrp_orphaned())
1da177e4
LT
2007 goto relock;
2008
f6b76d4f 2009 spin_lock_irq(&sighand->siglock);
1da177e4
LT
2010 }
2011
7bcf6a2c 2012 if (likely(do_signal_stop(info->si_signo))) {
1da177e4
LT
2013 /* It released the siglock. */
2014 goto relock;
2015 }
2016
2017 /*
2018 * We didn't actually stop, due to a race
2019 * with SIGCONT or something like that.
2020 */
2021 continue;
2022 }
2023
f6b76d4f 2024 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2025
2026 /*
2027 * Anything else is fatal, maybe with a core dump.
2028 */
2029 current->flags |= PF_SIGNALED;
2dce81bf 2030
1da177e4 2031 if (sig_kernel_coredump(signr)) {
2dce81bf 2032 if (print_fatal_signals)
7bcf6a2c 2033 print_fatal_signal(regs, info->si_signo);
1da177e4
LT
2034 /*
2035 * If it was able to dump core, this kills all
2036 * other threads in the group and synchronizes with
2037 * their demise. If we lost the race with another
2038 * thread getting here, it set group_exit_code
2039 * first and our do_group_exit call below will use
2040 * that value and ignore the one we pass it.
2041 */
7bcf6a2c 2042 do_coredump(info->si_signo, info->si_signo, regs);
1da177e4
LT
2043 }
2044
2045 /*
2046 * Death signals, no core dump.
2047 */
7bcf6a2c 2048 do_group_exit(info->si_signo);
1da177e4
LT
2049 /* NOTREACHED */
2050 }
f6b76d4f 2051 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
2052 return signr;
2053}
2054
d12619b5
ON
2055void exit_signals(struct task_struct *tsk)
2056{
2057 int group_stop = 0;
5dee1707 2058 struct task_struct *t;
d12619b5 2059
5dee1707
ON
2060 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2061 tsk->flags |= PF_EXITING;
2062 return;
d12619b5
ON
2063 }
2064
5dee1707 2065 spin_lock_irq(&tsk->sighand->siglock);
d12619b5
ON
2066 /*
2067 * From now this task is not visible for group-wide signals,
2068 * see wants_signal(), do_signal_stop().
2069 */
2070 tsk->flags |= PF_EXITING;
5dee1707
ON
2071 if (!signal_pending(tsk))
2072 goto out;
2073
2074 /* It could be that __group_complete_signal() choose us to
2075 * notify about group-wide signal. Another thread should be
2076 * woken now to take the signal since we will not.
2077 */
2078 for (t = tsk; (t = next_thread(t)) != tsk; )
2079 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2080 recalc_sigpending_and_wake(t);
2081
39efa3ef 2082 if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
e5c1902e 2083 task_participate_group_stop(tsk))
edf2ed15 2084 group_stop = CLD_STOPPED;
5dee1707 2085out:
d12619b5
ON
2086 spin_unlock_irq(&tsk->sighand->siglock);
2087
ae6d2ed7 2088 if (unlikely(group_stop)) {
d12619b5 2089 read_lock(&tasklist_lock);
ae6d2ed7 2090 do_notify_parent_cldstop(tsk, group_stop);
d12619b5
ON
2091 read_unlock(&tasklist_lock);
2092 }
2093}
2094
1da177e4
LT
2095EXPORT_SYMBOL(recalc_sigpending);
2096EXPORT_SYMBOL_GPL(dequeue_signal);
2097EXPORT_SYMBOL(flush_signals);
2098EXPORT_SYMBOL(force_sig);
1da177e4
LT
2099EXPORT_SYMBOL(send_sig);
2100EXPORT_SYMBOL(send_sig_info);
2101EXPORT_SYMBOL(sigprocmask);
2102EXPORT_SYMBOL(block_all_signals);
2103EXPORT_SYMBOL(unblock_all_signals);
2104
2105
2106/*
2107 * System call entry points.
2108 */
2109
754fe8d2 2110SYSCALL_DEFINE0(restart_syscall)
1da177e4
LT
2111{
2112 struct restart_block *restart = &current_thread_info()->restart_block;
2113 return restart->fn(restart);
2114}
2115
2116long do_no_restart_syscall(struct restart_block *param)
2117{
2118 return -EINTR;
2119}
2120
2121/*
2122 * We don't need to get the kernel lock - this is all local to this
2123 * particular thread.. (and that's good, because this is _heavily_
2124 * used by various programs)
2125 */
2126
2127/*
2128 * This is also useful for kernel threads that want to temporarily
2129 * (or permanently) block certain signals.
2130 *
2131 * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2132 * interface happily blocks "unblockable" signals like SIGKILL
2133 * and friends.
2134 */
2135int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2136{
2137 int error;
1da177e4
LT
2138
2139 spin_lock_irq(&current->sighand->siglock);
a26fd335
ON
2140 if (oldset)
2141 *oldset = current->blocked;
2142
1da177e4
LT
2143 error = 0;
2144 switch (how) {
2145 case SIG_BLOCK:
2146 sigorsets(&current->blocked, &current->blocked, set);
2147 break;
2148 case SIG_UNBLOCK:
2149 signandsets(&current->blocked, &current->blocked, set);
2150 break;
2151 case SIG_SETMASK:
2152 current->blocked = *set;
2153 break;
2154 default:
2155 error = -EINVAL;
2156 }
2157 recalc_sigpending();
2158 spin_unlock_irq(&current->sighand->siglock);
a26fd335 2159
1da177e4
LT
2160 return error;
2161}
2162
17da2bd9
HC
2163SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2164 sigset_t __user *, oset, size_t, sigsetsize)
1da177e4
LT
2165{
2166 int error = -EINVAL;
2167 sigset_t old_set, new_set;
2168
2169 /* XXX: Don't preclude handling different sized sigset_t's. */
2170 if (sigsetsize != sizeof(sigset_t))
2171 goto out;
2172
2173 if (set) {
2174 error = -EFAULT;
2175 if (copy_from_user(&new_set, set, sizeof(*set)))
2176 goto out;
2177 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2178
2179 error = sigprocmask(how, &new_set, &old_set);
2180 if (error)
2181 goto out;
2182 if (oset)
2183 goto set_old;
2184 } else if (oset) {
2185 spin_lock_irq(&current->sighand->siglock);
2186 old_set = current->blocked;
2187 spin_unlock_irq(&current->sighand->siglock);
2188
2189 set_old:
2190 error = -EFAULT;
2191 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2192 goto out;
2193 }
2194 error = 0;
2195out:
2196 return error;
2197}
2198
2199long do_sigpending(void __user *set, unsigned long sigsetsize)
2200{
2201 long error = -EINVAL;
2202 sigset_t pending;
2203
2204 if (sigsetsize > sizeof(sigset_t))
2205 goto out;
2206
2207 spin_lock_irq(&current->sighand->siglock);
2208 sigorsets(&pending, &current->pending.signal,
2209 &current->signal->shared_pending.signal);
2210 spin_unlock_irq(&current->sighand->siglock);
2211
2212 /* Outside the lock because only this thread touches it. */
2213 sigandsets(&pending, &current->blocked, &pending);
2214
2215 error = -EFAULT;
2216 if (!copy_to_user(set, &pending, sigsetsize))
2217 error = 0;
2218
2219out:
2220 return error;
2221}
2222
17da2bd9 2223SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
1da177e4
LT
2224{
2225 return do_sigpending(set, sigsetsize);
2226}
2227
2228#ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2229
2230int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2231{
2232 int err;
2233
2234 if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2235 return -EFAULT;
2236 if (from->si_code < 0)
2237 return __copy_to_user(to, from, sizeof(siginfo_t))
2238 ? -EFAULT : 0;
2239 /*
2240 * If you change siginfo_t structure, please be sure
2241 * this code is fixed accordingly.
fba2afaa
DL
2242 * Please remember to update the signalfd_copyinfo() function
2243 * inside fs/signalfd.c too, in case siginfo_t changes.
1da177e4
LT
2244 * It should never copy any pad contained in the structure
2245 * to avoid security leaks, but must copy the generic
2246 * 3 ints plus the relevant union member.
2247 */
2248 err = __put_user(from->si_signo, &to->si_signo);
2249 err |= __put_user(from->si_errno, &to->si_errno);
2250 err |= __put_user((short)from->si_code, &to->si_code);
2251 switch (from->si_code & __SI_MASK) {
2252 case __SI_KILL:
2253 err |= __put_user(from->si_pid, &to->si_pid);
2254 err |= __put_user(from->si_uid, &to->si_uid);
2255 break;
2256 case __SI_TIMER:
2257 err |= __put_user(from->si_tid, &to->si_tid);
2258 err |= __put_user(from->si_overrun, &to->si_overrun);
2259 err |= __put_user(from->si_ptr, &to->si_ptr);
2260 break;
2261 case __SI_POLL:
2262 err |= __put_user(from->si_band, &to->si_band);
2263 err |= __put_user(from->si_fd, &to->si_fd);
2264 break;
2265 case __SI_FAULT:
2266 err |= __put_user(from->si_addr, &to->si_addr);
2267#ifdef __ARCH_SI_TRAPNO
2268 err |= __put_user(from->si_trapno, &to->si_trapno);
a337fdac
AK
2269#endif
2270#ifdef BUS_MCEERR_AO
2271 /*
2272 * Other callers might not initialize the si_lsb field,
2273 * so check explicitely for the right codes here.
2274 */
2275 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2276 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
1da177e4
LT
2277#endif
2278 break;
2279 case __SI_CHLD:
2280 err |= __put_user(from->si_pid, &to->si_pid);
2281 err |= __put_user(from->si_uid, &to->si_uid);
2282 err |= __put_user(from->si_status, &to->si_status);
2283 err |= __put_user(from->si_utime, &to->si_utime);
2284 err |= __put_user(from->si_stime, &to->si_stime);
2285 break;
2286 case __SI_RT: /* This is not generated by the kernel as of now. */
2287 case __SI_MESGQ: /* But this is */
2288 err |= __put_user(from->si_pid, &to->si_pid);
2289 err |= __put_user(from->si_uid, &to->si_uid);
2290 err |= __put_user(from->si_ptr, &to->si_ptr);
2291 break;
2292 default: /* this is just in case for now ... */
2293 err |= __put_user(from->si_pid, &to->si_pid);
2294 err |= __put_user(from->si_uid, &to->si_uid);
2295 break;
2296 }
2297 return err;
2298}
2299
2300#endif
2301
17da2bd9
HC
2302SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2303 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2304 size_t, sigsetsize)
1da177e4
LT
2305{
2306 int ret, sig;
2307 sigset_t these;
2308 struct timespec ts;
2309 siginfo_t info;
2310 long timeout = 0;
2311
2312 /* XXX: Don't preclude handling different sized sigset_t's. */
2313 if (sigsetsize != sizeof(sigset_t))
2314 return -EINVAL;
2315
2316 if (copy_from_user(&these, uthese, sizeof(these)))
2317 return -EFAULT;
2318
2319 /*
2320 * Invert the set of allowed signals to get those we
2321 * want to block.
2322 */
2323 sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2324 signotset(&these);
2325
2326 if (uts) {
2327 if (copy_from_user(&ts, uts, sizeof(ts)))
2328 return -EFAULT;
2329 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2330 || ts.tv_sec < 0)
2331 return -EINVAL;
2332 }
2333
2334 spin_lock_irq(&current->sighand->siglock);
2335 sig = dequeue_signal(current, &these, &info);
2336 if (!sig) {
2337 timeout = MAX_SCHEDULE_TIMEOUT;
2338 if (uts)
2339 timeout = (timespec_to_jiffies(&ts)
2340 + (ts.tv_sec || ts.tv_nsec));
2341
2342 if (timeout) {
2343 /* None ready -- temporarily unblock those we're
2344 * interested while we are sleeping in so that we'll
2345 * be awakened when they arrive. */
2346 current->real_blocked = current->blocked;
2347 sigandsets(&current->blocked, &current->blocked, &these);
2348 recalc_sigpending();
2349 spin_unlock_irq(&current->sighand->siglock);
2350
75bcc8c5 2351 timeout = schedule_timeout_interruptible(timeout);
1da177e4 2352
1da177e4
LT
2353 spin_lock_irq(&current->sighand->siglock);
2354 sig = dequeue_signal(current, &these, &info);
2355 current->blocked = current->real_blocked;
2356 siginitset(&current->real_blocked, 0);
2357 recalc_sigpending();
2358 }
2359 }
2360 spin_unlock_irq(&current->sighand->siglock);
2361
2362 if (sig) {
2363 ret = sig;
2364 if (uinfo) {
2365 if (copy_siginfo_to_user(uinfo, &info))
2366 ret = -EFAULT;
2367 }
2368 } else {
2369 ret = -EAGAIN;
2370 if (timeout)
2371 ret = -EINTR;
2372 }
2373
2374 return ret;
2375}
2376
17da2bd9 2377SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
1da177e4
LT
2378{
2379 struct siginfo info;
2380
2381 info.si_signo = sig;
2382 info.si_errno = 0;
2383 info.si_code = SI_USER;
b488893a 2384 info.si_pid = task_tgid_vnr(current);
76aac0e9 2385 info.si_uid = current_uid();
1da177e4
LT
2386
2387 return kill_something_info(sig, &info, pid);
2388}
2389
30b4ae8a
TG
2390static int
2391do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
1da177e4 2392{
1da177e4 2393 struct task_struct *p;
30b4ae8a 2394 int error = -ESRCH;
1da177e4 2395
3547ff3a 2396 rcu_read_lock();
228ebcbe 2397 p = find_task_by_vpid(pid);
b488893a 2398 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
30b4ae8a 2399 error = check_kill_permission(sig, info, p);
1da177e4
LT
2400 /*
2401 * The null signal is a permissions and process existence
2402 * probe. No signal is actually delivered.
2403 */
4a30debf
ON
2404 if (!error && sig) {
2405 error = do_send_sig_info(sig, info, p, false);
2406 /*
2407 * If lock_task_sighand() failed we pretend the task
2408 * dies after receiving the signal. The window is tiny,
2409 * and the signal is private anyway.
2410 */
2411 if (unlikely(error == -ESRCH))
2412 error = 0;
1da177e4
LT
2413 }
2414 }
3547ff3a 2415 rcu_read_unlock();
6dd69f10 2416
1da177e4
LT
2417 return error;
2418}
2419
30b4ae8a
TG
2420static int do_tkill(pid_t tgid, pid_t pid, int sig)
2421{
2422 struct siginfo info;
2423
2424 info.si_signo = sig;
2425 info.si_errno = 0;
2426 info.si_code = SI_TKILL;
2427 info.si_pid = task_tgid_vnr(current);
2428 info.si_uid = current_uid();
2429
2430 return do_send_specific(tgid, pid, sig, &info);
2431}
2432
6dd69f10
VL
2433/**
2434 * sys_tgkill - send signal to one specific thread
2435 * @tgid: the thread group ID of the thread
2436 * @pid: the PID of the thread
2437 * @sig: signal to be sent
2438 *
72fd4a35 2439 * This syscall also checks the @tgid and returns -ESRCH even if the PID
6dd69f10
VL
2440 * exists but it's not belonging to the target process anymore. This
2441 * method solves the problem of threads exiting and PIDs getting reused.
2442 */
a5f8fa9e 2443SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
6dd69f10
VL
2444{
2445 /* This is only valid for single tasks */
2446 if (pid <= 0 || tgid <= 0)
2447 return -EINVAL;
2448
2449 return do_tkill(tgid, pid, sig);
2450}
2451
1da177e4
LT
2452/*
2453 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2454 */
a5f8fa9e 2455SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
1da177e4 2456{
1da177e4
LT
2457 /* This is only valid for single tasks */
2458 if (pid <= 0)
2459 return -EINVAL;
2460
6dd69f10 2461 return do_tkill(0, pid, sig);
1da177e4
LT
2462}
2463
a5f8fa9e
HC
2464SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2465 siginfo_t __user *, uinfo)
1da177e4
LT
2466{
2467 siginfo_t info;
2468
2469 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2470 return -EFAULT;
2471
2472 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
2473 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2474 */
2475 if (info.si_code != SI_QUEUE) {
2476 /* We used to allow any < 0 si_code */
2477 WARN_ON_ONCE(info.si_code < 0);
1da177e4 2478 return -EPERM;
da48524e 2479 }
1da177e4
LT
2480 info.si_signo = sig;
2481
2482 /* POSIX.1b doesn't mention process groups. */
2483 return kill_proc_info(sig, &info, pid);
2484}
2485
62ab4505
TG
2486long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2487{
2488 /* This is only valid for single tasks */
2489 if (pid <= 0 || tgid <= 0)
2490 return -EINVAL;
2491
2492 /* Not even root can pretend to send signals from the kernel.
da48524e
JT
2493 * Nor can they impersonate a kill()/tgkill(), which adds source info.
2494 */
2495 if (info->si_code != SI_QUEUE) {
2496 /* We used to allow any < 0 si_code */
2497 WARN_ON_ONCE(info->si_code < 0);
62ab4505 2498 return -EPERM;
da48524e 2499 }
62ab4505
TG
2500 info->si_signo = sig;
2501
2502 return do_send_specific(tgid, pid, sig, info);
2503}
2504
2505SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2506 siginfo_t __user *, uinfo)
2507{
2508 siginfo_t info;
2509
2510 if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2511 return -EFAULT;
2512
2513 return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2514}
2515
88531f72 2516int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
1da177e4 2517{
93585eea 2518 struct task_struct *t = current;
1da177e4 2519 struct k_sigaction *k;
71fabd5e 2520 sigset_t mask;
1da177e4 2521
7ed20e1a 2522 if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
1da177e4
LT
2523 return -EINVAL;
2524
93585eea 2525 k = &t->sighand->action[sig-1];
1da177e4
LT
2526
2527 spin_lock_irq(&current->sighand->siglock);
1da177e4
LT
2528 if (oact)
2529 *oact = *k;
2530
2531 if (act) {
9ac95f2f
ON
2532 sigdelsetmask(&act->sa.sa_mask,
2533 sigmask(SIGKILL) | sigmask(SIGSTOP));
88531f72 2534 *k = *act;
1da177e4
LT
2535 /*
2536 * POSIX 3.3.1.3:
2537 * "Setting a signal action to SIG_IGN for a signal that is
2538 * pending shall cause the pending signal to be discarded,
2539 * whether or not it is blocked."
2540 *
2541 * "Setting a signal action to SIG_DFL for a signal that is
2542 * pending and whose default action is to ignore the signal
2543 * (for example, SIGCHLD), shall cause the pending signal to
2544 * be discarded, whether or not it is blocked"
2545 */
35de254d 2546 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
71fabd5e
GA
2547 sigemptyset(&mask);
2548 sigaddset(&mask, sig);
2549 rm_from_queue_full(&mask, &t->signal->shared_pending);
1da177e4 2550 do {
71fabd5e 2551 rm_from_queue_full(&mask, &t->pending);
1da177e4
LT
2552 t = next_thread(t);
2553 } while (t != current);
1da177e4 2554 }
1da177e4
LT
2555 }
2556
2557 spin_unlock_irq(&current->sighand->siglock);
2558 return 0;
2559}
2560
2561int
2562do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2563{
2564 stack_t oss;
2565 int error;
2566
0083fc2c
LT
2567 oss.ss_sp = (void __user *) current->sas_ss_sp;
2568 oss.ss_size = current->sas_ss_size;
2569 oss.ss_flags = sas_ss_flags(sp);
1da177e4
LT
2570
2571 if (uss) {
2572 void __user *ss_sp;
2573 size_t ss_size;
2574 int ss_flags;
2575
2576 error = -EFAULT;
0dd8486b
LT
2577 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2578 goto out;
2579 error = __get_user(ss_sp, &uss->ss_sp) |
2580 __get_user(ss_flags, &uss->ss_flags) |
2581 __get_user(ss_size, &uss->ss_size);
2582 if (error)
1da177e4
LT
2583 goto out;
2584
2585 error = -EPERM;
2586 if (on_sig_stack(sp))
2587 goto out;
2588
2589 error = -EINVAL;
2590 /*
2591 *
2592 * Note - this code used to test ss_flags incorrectly
2593 * old code may have been written using ss_flags==0
2594 * to mean ss_flags==SS_ONSTACK (as this was the only
2595 * way that worked) - this fix preserves that older
2596 * mechanism
2597 */
2598 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2599 goto out;
2600
2601 if (ss_flags == SS_DISABLE) {
2602 ss_size = 0;
2603 ss_sp = NULL;
2604 } else {
2605 error = -ENOMEM;
2606 if (ss_size < MINSIGSTKSZ)
2607 goto out;
2608 }
2609
2610 current->sas_ss_sp = (unsigned long) ss_sp;
2611 current->sas_ss_size = ss_size;
2612 }
2613
0083fc2c 2614 error = 0;
1da177e4
LT
2615 if (uoss) {
2616 error = -EFAULT;
0083fc2c 2617 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1da177e4 2618 goto out;
0083fc2c
LT
2619 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2620 __put_user(oss.ss_size, &uoss->ss_size) |
2621 __put_user(oss.ss_flags, &uoss->ss_flags);
1da177e4
LT
2622 }
2623
1da177e4
LT
2624out:
2625 return error;
2626}
2627
2628#ifdef __ARCH_WANT_SYS_SIGPENDING
2629
b290ebe2 2630SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
1da177e4
LT
2631{
2632 return do_sigpending(set, sizeof(*set));
2633}
2634
2635#endif
2636
2637#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2638/* Some platforms have their own version with special arguments others
2639 support only sys_rt_sigprocmask. */
2640
b290ebe2
HC
2641SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2642 old_sigset_t __user *, oset)
1da177e4
LT
2643{
2644 int error;
2645 old_sigset_t old_set, new_set;
2646
2647 if (set) {
2648 error = -EFAULT;
2649 if (copy_from_user(&new_set, set, sizeof(*set)))
2650 goto out;
2651 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2652
2653 spin_lock_irq(&current->sighand->siglock);
2654 old_set = current->blocked.sig[0];
2655
2656 error = 0;
2657 switch (how) {
2658 default:
2659 error = -EINVAL;
2660 break;
2661 case SIG_BLOCK:
2662 sigaddsetmask(&current->blocked, new_set);
2663 break;
2664 case SIG_UNBLOCK:
2665 sigdelsetmask(&current->blocked, new_set);
2666 break;
2667 case SIG_SETMASK:
2668 current->blocked.sig[0] = new_set;
2669 break;
2670 }
2671
2672 recalc_sigpending();
2673 spin_unlock_irq(&current->sighand->siglock);
2674 if (error)
2675 goto out;
2676 if (oset)
2677 goto set_old;
2678 } else if (oset) {
2679 old_set = current->blocked.sig[0];
2680 set_old:
2681 error = -EFAULT;
2682 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2683 goto out;
2684 }
2685 error = 0;
2686out:
2687 return error;
2688}
2689#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2690
2691#ifdef __ARCH_WANT_SYS_RT_SIGACTION
d4e82042
HC
2692SYSCALL_DEFINE4(rt_sigaction, int, sig,
2693 const struct sigaction __user *, act,
2694 struct sigaction __user *, oact,
2695 size_t, sigsetsize)
1da177e4
LT
2696{
2697 struct k_sigaction new_sa, old_sa;
2698 int ret = -EINVAL;
2699
2700 /* XXX: Don't preclude handling different sized sigset_t's. */
2701 if (sigsetsize != sizeof(sigset_t))
2702 goto out;
2703
2704 if (act) {
2705 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2706 return -EFAULT;
2707 }
2708
2709 ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2710
2711 if (!ret && oact) {
2712 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2713 return -EFAULT;
2714 }
2715out:
2716 return ret;
2717}
2718#endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2719
2720#ifdef __ARCH_WANT_SYS_SGETMASK
2721
2722/*
2723 * For backwards compatibility. Functionality superseded by sigprocmask.
2724 */
a5f8fa9e 2725SYSCALL_DEFINE0(sgetmask)
1da177e4
LT
2726{
2727 /* SMP safe */
2728 return current->blocked.sig[0];
2729}
2730
a5f8fa9e 2731SYSCALL_DEFINE1(ssetmask, int, newmask)
1da177e4
LT
2732{
2733 int old;
2734
2735 spin_lock_irq(&current->sighand->siglock);
2736 old = current->blocked.sig[0];
2737
2738 siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2739 sigmask(SIGSTOP)));
2740 recalc_sigpending();
2741 spin_unlock_irq(&current->sighand->siglock);
2742
2743 return old;
2744}
2745#endif /* __ARCH_WANT_SGETMASK */
2746
2747#ifdef __ARCH_WANT_SYS_SIGNAL
2748/*
2749 * For backwards compatibility. Functionality superseded by sigaction.
2750 */
a5f8fa9e 2751SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
1da177e4
LT
2752{
2753 struct k_sigaction new_sa, old_sa;
2754 int ret;
2755
2756 new_sa.sa.sa_handler = handler;
2757 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
c70d3d70 2758 sigemptyset(&new_sa.sa.sa_mask);
1da177e4
LT
2759
2760 ret = do_sigaction(sig, &new_sa, &old_sa);
2761
2762 return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2763}
2764#endif /* __ARCH_WANT_SYS_SIGNAL */
2765
2766#ifdef __ARCH_WANT_SYS_PAUSE
2767
a5f8fa9e 2768SYSCALL_DEFINE0(pause)
1da177e4
LT
2769{
2770 current->state = TASK_INTERRUPTIBLE;
2771 schedule();
2772 return -ERESTARTNOHAND;
2773}
2774
2775#endif
2776
150256d8 2777#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
d4e82042 2778SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
150256d8
DW
2779{
2780 sigset_t newset;
2781
2782 /* XXX: Don't preclude handling different sized sigset_t's. */
2783 if (sigsetsize != sizeof(sigset_t))
2784 return -EINVAL;
2785
2786 if (copy_from_user(&newset, unewset, sizeof(newset)))
2787 return -EFAULT;
2788 sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2789
2790 spin_lock_irq(&current->sighand->siglock);
2791 current->saved_sigmask = current->blocked;
2792 current->blocked = newset;
2793 recalc_sigpending();
2794 spin_unlock_irq(&current->sighand->siglock);
2795
2796 current->state = TASK_INTERRUPTIBLE;
2797 schedule();
4e4c22c7 2798 set_restore_sigmask();
150256d8
DW
2799 return -ERESTARTNOHAND;
2800}
2801#endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2802
f269fdd1
DH
2803__attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2804{
2805 return NULL;
2806}
2807
1da177e4
LT
2808void __init signals_init(void)
2809{
0a31bd5f 2810 sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
1da177e4 2811}
67fc4e0c
JW
2812
2813#ifdef CONFIG_KGDB_KDB
2814#include <linux/kdb.h>
2815/*
2816 * kdb_send_sig_info - Allows kdb to send signals without exposing
2817 * signal internals. This function checks if the required locks are
2818 * available before calling the main signal code, to avoid kdb
2819 * deadlocks.
2820 */
2821void
2822kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2823{
2824 static struct task_struct *kdb_prev_t;
2825 int sig, new_t;
2826 if (!spin_trylock(&t->sighand->siglock)) {
2827 kdb_printf("Can't do kill command now.\n"
2828 "The sigmask lock is held somewhere else in "
2829 "kernel, try again later\n");
2830 return;
2831 }
2832 spin_unlock(&t->sighand->siglock);
2833 new_t = kdb_prev_t != t;
2834 kdb_prev_t = t;
2835 if (t->state != TASK_RUNNING && new_t) {
2836 kdb_printf("Process is not RUNNING, sending a signal from "
2837 "kdb risks deadlock\n"
2838 "on the run queue locks. "
2839 "The signal has _not_ been sent.\n"
2840 "Reissue the kill command if you want to risk "
2841 "the deadlock.\n");
2842 return;
2843 }
2844 sig = info->si_signo;
2845 if (send_sig_info(sig, info, t))
2846 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2847 sig, t->pid);
2848 else
2849 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2850}
2851#endif /* CONFIG_KGDB_KDB */