signal: Use GROUP_STOP_PENDING to stop once for a single group stop
[linux-2.6-block.git] / kernel / signal.c
1 /*
2  *  linux/kernel/signal.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  1997-11-02  Modified for POSIX.1b signals by Richard Henderson
7  *
8  *  2003-06-02  Jim Houston - Concurrent Computer Corp.
9  *              Changes to use preallocated sigqueue structures
10  *              to allow signals to be sent reliably.
11  */
12
13 #include <linux/slab.h>
14 #include <linux/module.h>
15 #include <linux/init.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/tty.h>
19 #include <linux/binfmts.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/ptrace.h>
23 #include <linux/signal.h>
24 #include <linux/signalfd.h>
25 #include <linux/ratelimit.h>
26 #include <linux/tracehook.h>
27 #include <linux/capability.h>
28 #include <linux/freezer.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/nsproxy.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/signal.h>
33
34 #include <asm/param.h>
35 #include <asm/uaccess.h>
36 #include <asm/unistd.h>
37 #include <asm/siginfo.h>
38 #include "audit.h"      /* audit_signal_info() */
39
40 /*
41  * SLAB caches for signal bits.
42  */
43
44 static struct kmem_cache *sigqueue_cachep;
45
46 int print_fatal_signals __read_mostly;
47
48 static void __user *sig_handler(struct task_struct *t, int sig)
49 {
50         return t->sighand->action[sig - 1].sa.sa_handler;
51 }
52
53 static int sig_handler_ignored(void __user *handler, int sig)
54 {
55         /* Is it explicitly or implicitly ignored? */
56         return handler == SIG_IGN ||
57                 (handler == SIG_DFL && sig_kernel_ignore(sig));
58 }
59
60 static int sig_task_ignored(struct task_struct *t, int sig,
61                 int from_ancestor_ns)
62 {
63         void __user *handler;
64
65         handler = sig_handler(t, sig);
66
67         if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) &&
68                         handler == SIG_DFL && !from_ancestor_ns)
69                 return 1;
70
71         return sig_handler_ignored(handler, sig);
72 }
73
74 static int sig_ignored(struct task_struct *t, int sig, int from_ancestor_ns)
75 {
76         /*
77          * Blocked signals are never ignored, since the
78          * signal handler may change by the time it is
79          * unblocked.
80          */
81         if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig))
82                 return 0;
83
84         if (!sig_task_ignored(t, sig, from_ancestor_ns))
85                 return 0;
86
87         /*
88          * Tracers may want to know about even ignored signals.
89          */
90         return !tracehook_consider_ignored_signal(t, sig);
91 }
92
93 /*
94  * Re-calculate pending state from the set of locally pending
95  * signals, globally pending signals, and blocked signals.
96  */
97 static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
98 {
99         unsigned long ready;
100         long i;
101
102         switch (_NSIG_WORDS) {
103         default:
104                 for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;)
105                         ready |= signal->sig[i] &~ blocked->sig[i];
106                 break;
107
108         case 4: ready  = signal->sig[3] &~ blocked->sig[3];
109                 ready |= signal->sig[2] &~ blocked->sig[2];
110                 ready |= signal->sig[1] &~ blocked->sig[1];
111                 ready |= signal->sig[0] &~ blocked->sig[0];
112                 break;
113
114         case 2: ready  = signal->sig[1] &~ blocked->sig[1];
115                 ready |= signal->sig[0] &~ blocked->sig[0];
116                 break;
117
118         case 1: ready  = signal->sig[0] &~ blocked->sig[0];
119         }
120         return ready != 0;
121 }
122
123 #define PENDING(p,b) has_pending_signals(&(p)->signal, (b))
124
125 static int recalc_sigpending_tsk(struct task_struct *t)
126 {
127         if ((t->group_stop & GROUP_STOP_PENDING) ||
128             PENDING(&t->pending, &t->blocked) ||
129             PENDING(&t->signal->shared_pending, &t->blocked)) {
130                 set_tsk_thread_flag(t, TIF_SIGPENDING);
131                 return 1;
132         }
133         /*
134          * We must never clear the flag in another thread, or in current
135          * when it's possible the current syscall is returning -ERESTART*.
136          * So we don't clear it here, and only callers who know they should do.
137          */
138         return 0;
139 }
140
141 /*
142  * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up.
143  * This is superfluous when called on current, the wakeup is a harmless no-op.
144  */
145 void recalc_sigpending_and_wake(struct task_struct *t)
146 {
147         if (recalc_sigpending_tsk(t))
148                 signal_wake_up(t, 0);
149 }
150
151 void recalc_sigpending(void)
152 {
153         if (unlikely(tracehook_force_sigpending()))
154                 set_thread_flag(TIF_SIGPENDING);
155         else if (!recalc_sigpending_tsk(current) && !freezing(current))
156                 clear_thread_flag(TIF_SIGPENDING);
157
158 }
159
160 /* Given the mask, find the first available signal that should be serviced. */
161
162 #define SYNCHRONOUS_MASK \
163         (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \
164          sigmask(SIGTRAP) | sigmask(SIGFPE))
165
166 int next_signal(struct sigpending *pending, sigset_t *mask)
167 {
168         unsigned long i, *s, *m, x;
169         int sig = 0;
170
171         s = pending->signal.sig;
172         m = mask->sig;
173
174         /*
175          * Handle the first word specially: it contains the
176          * synchronous signals that need to be dequeued first.
177          */
178         x = *s &~ *m;
179         if (x) {
180                 if (x & SYNCHRONOUS_MASK)
181                         x &= SYNCHRONOUS_MASK;
182                 sig = ffz(~x) + 1;
183                 return sig;
184         }
185
186         switch (_NSIG_WORDS) {
187         default:
188                 for (i = 1; i < _NSIG_WORDS; ++i) {
189                         x = *++s &~ *++m;
190                         if (!x)
191                                 continue;
192                         sig = ffz(~x) + i*_NSIG_BPW + 1;
193                         break;
194                 }
195                 break;
196
197         case 2:
198                 x = s[1] &~ m[1];
199                 if (!x)
200                         break;
201                 sig = ffz(~x) + _NSIG_BPW + 1;
202                 break;
203
204         case 1:
205                 /* Nothing to do */
206                 break;
207         }
208
209         return sig;
210 }
211
212 static inline void print_dropped_signal(int sig)
213 {
214         static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
215
216         if (!print_fatal_signals)
217                 return;
218
219         if (!__ratelimit(&ratelimit_state))
220                 return;
221
222         printk(KERN_INFO "%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n",
223                                 current->comm, current->pid, sig);
224 }
225
226 /**
227  * task_clear_group_stop_pending - clear pending group stop
228  * @task: target task
229  *
230  * Clear group stop states for @task.
231  *
232  * CONTEXT:
233  * Must be called with @task->sighand->siglock held.
234  */
235 void task_clear_group_stop_pending(struct task_struct *task)
236 {
237         task->group_stop &= ~(GROUP_STOP_PENDING | GROUP_STOP_CONSUME);
238 }
239
240 /**
241  * task_participate_group_stop - participate in a group stop
242  * @task: task participating in a group stop
243  *
244  * @task has GROUP_STOP_PENDING set and is participating in a group stop.
245  * Group stop states are cleared and the group stop count is consumed if
246  * %GROUP_STOP_CONSUME was set.  If the consumption completes the group
247  * stop, the appropriate %SIGNAL_* flags are set.
248  *
249  * CONTEXT:
250  * Must be called with @task->sighand->siglock held.
251  */
252 static bool task_participate_group_stop(struct task_struct *task)
253 {
254         struct signal_struct *sig = task->signal;
255         bool consume = task->group_stop & GROUP_STOP_CONSUME;
256
257         WARN_ON_ONCE(!(task->group_stop & GROUP_STOP_PENDING));
258
259         task_clear_group_stop_pending(task);
260
261         if (!consume)
262                 return false;
263
264         if (!WARN_ON_ONCE(sig->group_stop_count == 0))
265                 sig->group_stop_count--;
266
267         if (!sig->group_stop_count) {
268                 sig->flags = SIGNAL_STOP_STOPPED;
269                 return true;
270         }
271         return false;
272 }
273
274 /*
275  * allocate a new signal queue record
276  * - this may be called without locks if and only if t == current, otherwise an
277  *   appopriate lock must be held to stop the target task from exiting
278  */
279 static struct sigqueue *
280 __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
281 {
282         struct sigqueue *q = NULL;
283         struct user_struct *user;
284
285         /*
286          * Protect access to @t credentials. This can go away when all
287          * callers hold rcu read lock.
288          */
289         rcu_read_lock();
290         user = get_uid(__task_cred(t)->user);
291         atomic_inc(&user->sigpending);
292         rcu_read_unlock();
293
294         if (override_rlimit ||
295             atomic_read(&user->sigpending) <=
296                         task_rlimit(t, RLIMIT_SIGPENDING)) {
297                 q = kmem_cache_alloc(sigqueue_cachep, flags);
298         } else {
299                 print_dropped_signal(sig);
300         }
301
302         if (unlikely(q == NULL)) {
303                 atomic_dec(&user->sigpending);
304                 free_uid(user);
305         } else {
306                 INIT_LIST_HEAD(&q->list);
307                 q->flags = 0;
308                 q->user = user;
309         }
310
311         return q;
312 }
313
314 static void __sigqueue_free(struct sigqueue *q)
315 {
316         if (q->flags & SIGQUEUE_PREALLOC)
317                 return;
318         atomic_dec(&q->user->sigpending);
319         free_uid(q->user);
320         kmem_cache_free(sigqueue_cachep, q);
321 }
322
323 void flush_sigqueue(struct sigpending *queue)
324 {
325         struct sigqueue *q;
326
327         sigemptyset(&queue->signal);
328         while (!list_empty(&queue->list)) {
329                 q = list_entry(queue->list.next, struct sigqueue , list);
330                 list_del_init(&q->list);
331                 __sigqueue_free(q);
332         }
333 }
334
335 /*
336  * Flush all pending signals for a task.
337  */
338 void __flush_signals(struct task_struct *t)
339 {
340         clear_tsk_thread_flag(t, TIF_SIGPENDING);
341         flush_sigqueue(&t->pending);
342         flush_sigqueue(&t->signal->shared_pending);
343 }
344
345 void flush_signals(struct task_struct *t)
346 {
347         unsigned long flags;
348
349         spin_lock_irqsave(&t->sighand->siglock, flags);
350         __flush_signals(t);
351         spin_unlock_irqrestore(&t->sighand->siglock, flags);
352 }
353
354 static void __flush_itimer_signals(struct sigpending *pending)
355 {
356         sigset_t signal, retain;
357         struct sigqueue *q, *n;
358
359         signal = pending->signal;
360         sigemptyset(&retain);
361
362         list_for_each_entry_safe(q, n, &pending->list, list) {
363                 int sig = q->info.si_signo;
364
365                 if (likely(q->info.si_code != SI_TIMER)) {
366                         sigaddset(&retain, sig);
367                 } else {
368                         sigdelset(&signal, sig);
369                         list_del_init(&q->list);
370                         __sigqueue_free(q);
371                 }
372         }
373
374         sigorsets(&pending->signal, &signal, &retain);
375 }
376
377 void flush_itimer_signals(void)
378 {
379         struct task_struct *tsk = current;
380         unsigned long flags;
381
382         spin_lock_irqsave(&tsk->sighand->siglock, flags);
383         __flush_itimer_signals(&tsk->pending);
384         __flush_itimer_signals(&tsk->signal->shared_pending);
385         spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
386 }
387
388 void ignore_signals(struct task_struct *t)
389 {
390         int i;
391
392         for (i = 0; i < _NSIG; ++i)
393                 t->sighand->action[i].sa.sa_handler = SIG_IGN;
394
395         flush_signals(t);
396 }
397
398 /*
399  * Flush all handlers for a task.
400  */
401
402 void
403 flush_signal_handlers(struct task_struct *t, int force_default)
404 {
405         int i;
406         struct k_sigaction *ka = &t->sighand->action[0];
407         for (i = _NSIG ; i != 0 ; i--) {
408                 if (force_default || ka->sa.sa_handler != SIG_IGN)
409                         ka->sa.sa_handler = SIG_DFL;
410                 ka->sa.sa_flags = 0;
411                 sigemptyset(&ka->sa.sa_mask);
412                 ka++;
413         }
414 }
415
416 int unhandled_signal(struct task_struct *tsk, int sig)
417 {
418         void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
419         if (is_global_init(tsk))
420                 return 1;
421         if (handler != SIG_IGN && handler != SIG_DFL)
422                 return 0;
423         return !tracehook_consider_fatal_signal(tsk, sig);
424 }
425
426
427 /* Notify the system that a driver wants to block all signals for this
428  * process, and wants to be notified if any signals at all were to be
429  * sent/acted upon.  If the notifier routine returns non-zero, then the
430  * signal will be acted upon after all.  If the notifier routine returns 0,
431  * then then signal will be blocked.  Only one block per process is
432  * allowed.  priv is a pointer to private data that the notifier routine
433  * can use to determine if the signal should be blocked or not.  */
434
435 void
436 block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
437 {
438         unsigned long flags;
439
440         spin_lock_irqsave(&current->sighand->siglock, flags);
441         current->notifier_mask = mask;
442         current->notifier_data = priv;
443         current->notifier = notifier;
444         spin_unlock_irqrestore(&current->sighand->siglock, flags);
445 }
446
447 /* Notify the system that blocking has ended. */
448
449 void
450 unblock_all_signals(void)
451 {
452         unsigned long flags;
453
454         spin_lock_irqsave(&current->sighand->siglock, flags);
455         current->notifier = NULL;
456         current->notifier_data = NULL;
457         recalc_sigpending();
458         spin_unlock_irqrestore(&current->sighand->siglock, flags);
459 }
460
461 static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
462 {
463         struct sigqueue *q, *first = NULL;
464
465         /*
466          * Collect the siginfo appropriate to this signal.  Check if
467          * there is another siginfo for the same signal.
468         */
469         list_for_each_entry(q, &list->list, list) {
470                 if (q->info.si_signo == sig) {
471                         if (first)
472                                 goto still_pending;
473                         first = q;
474                 }
475         }
476
477         sigdelset(&list->signal, sig);
478
479         if (first) {
480 still_pending:
481                 list_del_init(&first->list);
482                 copy_siginfo(info, &first->info);
483                 __sigqueue_free(first);
484         } else {
485                 /* Ok, it wasn't in the queue.  This must be
486                    a fast-pathed signal or we must have been
487                    out of queue space.  So zero out the info.
488                  */
489                 info->si_signo = sig;
490                 info->si_errno = 0;
491                 info->si_code = SI_USER;
492                 info->si_pid = 0;
493                 info->si_uid = 0;
494         }
495 }
496
497 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
498                         siginfo_t *info)
499 {
500         int sig = next_signal(pending, mask);
501
502         if (sig) {
503                 if (current->notifier) {
504                         if (sigismember(current->notifier_mask, sig)) {
505                                 if (!(current->notifier)(current->notifier_data)) {
506                                         clear_thread_flag(TIF_SIGPENDING);
507                                         return 0;
508                                 }
509                         }
510                 }
511
512                 collect_signal(sig, pending, info);
513         }
514
515         return sig;
516 }
517
518 /*
519  * Dequeue a signal and return the element to the caller, which is 
520  * expected to free it.
521  *
522  * All callers have to hold the siglock.
523  */
524 int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
525 {
526         int signr;
527
528         /* We only dequeue private signals from ourselves, we don't let
529          * signalfd steal them
530          */
531         signr = __dequeue_signal(&tsk->pending, mask, info);
532         if (!signr) {
533                 signr = __dequeue_signal(&tsk->signal->shared_pending,
534                                          mask, info);
535                 /*
536                  * itimer signal ?
537                  *
538                  * itimers are process shared and we restart periodic
539                  * itimers in the signal delivery path to prevent DoS
540                  * attacks in the high resolution timer case. This is
541                  * compliant with the old way of self restarting
542                  * itimers, as the SIGALRM is a legacy signal and only
543                  * queued once. Changing the restart behaviour to
544                  * restart the timer in the signal dequeue path is
545                  * reducing the timer noise on heavy loaded !highres
546                  * systems too.
547                  */
548                 if (unlikely(signr == SIGALRM)) {
549                         struct hrtimer *tmr = &tsk->signal->real_timer;
550
551                         if (!hrtimer_is_queued(tmr) &&
552                             tsk->signal->it_real_incr.tv64 != 0) {
553                                 hrtimer_forward(tmr, tmr->base->get_time(),
554                                                 tsk->signal->it_real_incr);
555                                 hrtimer_restart(tmr);
556                         }
557                 }
558         }
559
560         recalc_sigpending();
561         if (!signr)
562                 return 0;
563
564         if (unlikely(sig_kernel_stop(signr))) {
565                 /*
566                  * Set a marker that we have dequeued a stop signal.  Our
567                  * caller might release the siglock and then the pending
568                  * stop signal it is about to process is no longer in the
569                  * pending bitmasks, but must still be cleared by a SIGCONT
570                  * (and overruled by a SIGKILL).  So those cases clear this
571                  * shared flag after we've set it.  Note that this flag may
572                  * remain set after the signal we return is ignored or
573                  * handled.  That doesn't matter because its only purpose
574                  * is to alert stop-signal processing code when another
575                  * processor has come along and cleared the flag.
576                  */
577                 tsk->signal->flags |= SIGNAL_STOP_DEQUEUED;
578         }
579         if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
580                 /*
581                  * Release the siglock to ensure proper locking order
582                  * of timer locks outside of siglocks.  Note, we leave
583                  * irqs disabled here, since the posix-timers code is
584                  * about to disable them again anyway.
585                  */
586                 spin_unlock(&tsk->sighand->siglock);
587                 do_schedule_next_timer(info);
588                 spin_lock(&tsk->sighand->siglock);
589         }
590         return signr;
591 }
592
593 /*
594  * Tell a process that it has a new active signal..
595  *
596  * NOTE! we rely on the previous spin_lock to
597  * lock interrupts for us! We can only be called with
598  * "siglock" held, and the local interrupt must
599  * have been disabled when that got acquired!
600  *
601  * No need to set need_resched since signal event passing
602  * goes through ->blocked
603  */
604 void signal_wake_up(struct task_struct *t, int resume)
605 {
606         unsigned int mask;
607
608         set_tsk_thread_flag(t, TIF_SIGPENDING);
609
610         /*
611          * For SIGKILL, we want to wake it up in the stopped/traced/killable
612          * case. We don't check t->state here because there is a race with it
613          * executing another processor and just now entering stopped state.
614          * By using wake_up_state, we ensure the process will wake up and
615          * handle its death signal.
616          */
617         mask = TASK_INTERRUPTIBLE;
618         if (resume)
619                 mask |= TASK_WAKEKILL;
620         if (!wake_up_state(t, mask))
621                 kick_process(t);
622 }
623
624 /*
625  * Remove signals in mask from the pending set and queue.
626  * Returns 1 if any signals were found.
627  *
628  * All callers must be holding the siglock.
629  *
630  * This version takes a sigset mask and looks at all signals,
631  * not just those in the first mask word.
632  */
633 static int rm_from_queue_full(sigset_t *mask, struct sigpending *s)
634 {
635         struct sigqueue *q, *n;
636         sigset_t m;
637
638         sigandsets(&m, mask, &s->signal);
639         if (sigisemptyset(&m))
640                 return 0;
641
642         signandsets(&s->signal, &s->signal, mask);
643         list_for_each_entry_safe(q, n, &s->list, list) {
644                 if (sigismember(mask, q->info.si_signo)) {
645                         list_del_init(&q->list);
646                         __sigqueue_free(q);
647                 }
648         }
649         return 1;
650 }
651 /*
652  * Remove signals in mask from the pending set and queue.
653  * Returns 1 if any signals were found.
654  *
655  * All callers must be holding the siglock.
656  */
657 static int rm_from_queue(unsigned long mask, struct sigpending *s)
658 {
659         struct sigqueue *q, *n;
660
661         if (!sigtestsetmask(&s->signal, mask))
662                 return 0;
663
664         sigdelsetmask(&s->signal, mask);
665         list_for_each_entry_safe(q, n, &s->list, list) {
666                 if (q->info.si_signo < SIGRTMIN &&
667                     (mask & sigmask(q->info.si_signo))) {
668                         list_del_init(&q->list);
669                         __sigqueue_free(q);
670                 }
671         }
672         return 1;
673 }
674
675 static inline int is_si_special(const struct siginfo *info)
676 {
677         return info <= SEND_SIG_FORCED;
678 }
679
680 static inline bool si_fromuser(const struct siginfo *info)
681 {
682         return info == SEND_SIG_NOINFO ||
683                 (!is_si_special(info) && SI_FROMUSER(info));
684 }
685
686 /*
687  * Bad permissions for sending the signal
688  * - the caller must hold the RCU read lock
689  */
690 static int check_kill_permission(int sig, struct siginfo *info,
691                                  struct task_struct *t)
692 {
693         const struct cred *cred, *tcred;
694         struct pid *sid;
695         int error;
696
697         if (!valid_signal(sig))
698                 return -EINVAL;
699
700         if (!si_fromuser(info))
701                 return 0;
702
703         error = audit_signal_info(sig, t); /* Let audit system see the signal */
704         if (error)
705                 return error;
706
707         cred = current_cred();
708         tcred = __task_cred(t);
709         if (!same_thread_group(current, t) &&
710             (cred->euid ^ tcred->suid) &&
711             (cred->euid ^ tcred->uid) &&
712             (cred->uid  ^ tcred->suid) &&
713             (cred->uid  ^ tcred->uid) &&
714             !capable(CAP_KILL)) {
715                 switch (sig) {
716                 case SIGCONT:
717                         sid = task_session(t);
718                         /*
719                          * We don't return the error if sid == NULL. The
720                          * task was unhashed, the caller must notice this.
721                          */
722                         if (!sid || sid == task_session(current))
723                                 break;
724                 default:
725                         return -EPERM;
726                 }
727         }
728
729         return security_task_kill(t, info, sig, 0);
730 }
731
732 /*
733  * Handle magic process-wide effects of stop/continue signals. Unlike
734  * the signal actions, these happen immediately at signal-generation
735  * time regardless of blocking, ignoring, or handling.  This does the
736  * actual continuing for SIGCONT, but not the actual stopping for stop
737  * signals. The process stop is done as a signal action for SIG_DFL.
738  *
739  * Returns true if the signal should be actually delivered, otherwise
740  * it should be dropped.
741  */
742 static int prepare_signal(int sig, struct task_struct *p, int from_ancestor_ns)
743 {
744         struct signal_struct *signal = p->signal;
745         struct task_struct *t;
746
747         if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) {
748                 /*
749                  * The process is in the middle of dying, nothing to do.
750                  */
751         } else if (sig_kernel_stop(sig)) {
752                 /*
753                  * This is a stop signal.  Remove SIGCONT from all queues.
754                  */
755                 rm_from_queue(sigmask(SIGCONT), &signal->shared_pending);
756                 t = p;
757                 do {
758                         rm_from_queue(sigmask(SIGCONT), &t->pending);
759                 } while_each_thread(p, t);
760         } else if (sig == SIGCONT) {
761                 unsigned int why;
762                 /*
763                  * Remove all stop signals from all queues,
764                  * and wake all threads.
765                  */
766                 rm_from_queue(SIG_KERNEL_STOP_MASK, &signal->shared_pending);
767                 t = p;
768                 do {
769                         unsigned int state;
770
771                         task_clear_group_stop_pending(t);
772
773                         rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
774                         /*
775                          * If there is a handler for SIGCONT, we must make
776                          * sure that no thread returns to user mode before
777                          * we post the signal, in case it was the only
778                          * thread eligible to run the signal handler--then
779                          * it must not do anything between resuming and
780                          * running the handler.  With the TIF_SIGPENDING
781                          * flag set, the thread will pause and acquire the
782                          * siglock that we hold now and until we've queued
783                          * the pending signal.
784                          *
785                          * Wake up the stopped thread _after_ setting
786                          * TIF_SIGPENDING
787                          */
788                         state = __TASK_STOPPED;
789                         if (sig_user_defined(t, SIGCONT) && !sigismember(&t->blocked, SIGCONT)) {
790                                 set_tsk_thread_flag(t, TIF_SIGPENDING);
791                                 state |= TASK_INTERRUPTIBLE;
792                         }
793                         wake_up_state(t, state);
794                 } while_each_thread(p, t);
795
796                 /*
797                  * Notify the parent with CLD_CONTINUED if we were stopped.
798                  *
799                  * If we were in the middle of a group stop, we pretend it
800                  * was already finished, and then continued. Since SIGCHLD
801                  * doesn't queue we report only CLD_STOPPED, as if the next
802                  * CLD_CONTINUED was dropped.
803                  */
804                 why = 0;
805                 if (signal->flags & SIGNAL_STOP_STOPPED)
806                         why |= SIGNAL_CLD_CONTINUED;
807                 else if (signal->group_stop_count)
808                         why |= SIGNAL_CLD_STOPPED;
809
810                 if (why) {
811                         /*
812                          * The first thread which returns from do_signal_stop()
813                          * will take ->siglock, notice SIGNAL_CLD_MASK, and
814                          * notify its parent. See get_signal_to_deliver().
815                          */
816                         signal->flags = why | SIGNAL_STOP_CONTINUED;
817                         signal->group_stop_count = 0;
818                         signal->group_exit_code = 0;
819                 } else {
820                         /*
821                          * We are not stopped, but there could be a stop
822                          * signal in the middle of being processed after
823                          * being removed from the queue.  Clear that too.
824                          */
825                         signal->flags &= ~SIGNAL_STOP_DEQUEUED;
826                 }
827         }
828
829         return !sig_ignored(p, sig, from_ancestor_ns);
830 }
831
832 /*
833  * Test if P wants to take SIG.  After we've checked all threads with this,
834  * it's equivalent to finding no threads not blocking SIG.  Any threads not
835  * blocking SIG were ruled out because they are not running and already
836  * have pending signals.  Such threads will dequeue from the shared queue
837  * as soon as they're available, so putting the signal on the shared queue
838  * will be equivalent to sending it to one such thread.
839  */
840 static inline int wants_signal(int sig, struct task_struct *p)
841 {
842         if (sigismember(&p->blocked, sig))
843                 return 0;
844         if (p->flags & PF_EXITING)
845                 return 0;
846         if (sig == SIGKILL)
847                 return 1;
848         if (task_is_stopped_or_traced(p))
849                 return 0;
850         return task_curr(p) || !signal_pending(p);
851 }
852
853 static void complete_signal(int sig, struct task_struct *p, int group)
854 {
855         struct signal_struct *signal = p->signal;
856         struct task_struct *t;
857
858         /*
859          * Now find a thread we can wake up to take the signal off the queue.
860          *
861          * If the main thread wants the signal, it gets first crack.
862          * Probably the least surprising to the average bear.
863          */
864         if (wants_signal(sig, p))
865                 t = p;
866         else if (!group || thread_group_empty(p))
867                 /*
868                  * There is just one thread and it does not need to be woken.
869                  * It will dequeue unblocked signals before it runs again.
870                  */
871                 return;
872         else {
873                 /*
874                  * Otherwise try to find a suitable thread.
875                  */
876                 t = signal->curr_target;
877                 while (!wants_signal(sig, t)) {
878                         t = next_thread(t);
879                         if (t == signal->curr_target)
880                                 /*
881                                  * No thread needs to be woken.
882                                  * Any eligible threads will see
883                                  * the signal in the queue soon.
884                                  */
885                                 return;
886                 }
887                 signal->curr_target = t;
888         }
889
890         /*
891          * Found a killable thread.  If the signal will be fatal,
892          * then start taking the whole group down immediately.
893          */
894         if (sig_fatal(p, sig) &&
895             !(signal->flags & (SIGNAL_UNKILLABLE | SIGNAL_GROUP_EXIT)) &&
896             !sigismember(&t->real_blocked, sig) &&
897             (sig == SIGKILL ||
898              !tracehook_consider_fatal_signal(t, sig))) {
899                 /*
900                  * This signal will be fatal to the whole group.
901                  */
902                 if (!sig_kernel_coredump(sig)) {
903                         /*
904                          * Start a group exit and wake everybody up.
905                          * This way we don't have other threads
906                          * running and doing things after a slower
907                          * thread has the fatal signal pending.
908                          */
909                         signal->flags = SIGNAL_GROUP_EXIT;
910                         signal->group_exit_code = sig;
911                         signal->group_stop_count = 0;
912                         t = p;
913                         do {
914                                 task_clear_group_stop_pending(t);
915                                 sigaddset(&t->pending.signal, SIGKILL);
916                                 signal_wake_up(t, 1);
917                         } while_each_thread(p, t);
918                         return;
919                 }
920         }
921
922         /*
923          * The signal is already in the shared-pending queue.
924          * Tell the chosen thread to wake up and dequeue it.
925          */
926         signal_wake_up(t, sig == SIGKILL);
927         return;
928 }
929
930 static inline int legacy_queue(struct sigpending *signals, int sig)
931 {
932         return (sig < SIGRTMIN) && sigismember(&signals->signal, sig);
933 }
934
935 static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
936                         int group, int from_ancestor_ns)
937 {
938         struct sigpending *pending;
939         struct sigqueue *q;
940         int override_rlimit;
941
942         trace_signal_generate(sig, info, t);
943
944         assert_spin_locked(&t->sighand->siglock);
945
946         if (!prepare_signal(sig, t, from_ancestor_ns))
947                 return 0;
948
949         pending = group ? &t->signal->shared_pending : &t->pending;
950         /*
951          * Short-circuit ignored signals and support queuing
952          * exactly one non-rt signal, so that we can get more
953          * detailed information about the cause of the signal.
954          */
955         if (legacy_queue(pending, sig))
956                 return 0;
957         /*
958          * fast-pathed signals for kernel-internal things like SIGSTOP
959          * or SIGKILL.
960          */
961         if (info == SEND_SIG_FORCED)
962                 goto out_set;
963
964         /* Real-time signals must be queued if sent by sigqueue, or
965            some other real-time mechanism.  It is implementation
966            defined whether kill() does so.  We attempt to do so, on
967            the principle of least surprise, but since kill is not
968            allowed to fail with EAGAIN when low on memory we just
969            make sure at least one signal gets delivered and don't
970            pass on the info struct.  */
971
972         if (sig < SIGRTMIN)
973                 override_rlimit = (is_si_special(info) || info->si_code >= 0);
974         else
975                 override_rlimit = 0;
976
977         q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
978                 override_rlimit);
979         if (q) {
980                 list_add_tail(&q->list, &pending->list);
981                 switch ((unsigned long) info) {
982                 case (unsigned long) SEND_SIG_NOINFO:
983                         q->info.si_signo = sig;
984                         q->info.si_errno = 0;
985                         q->info.si_code = SI_USER;
986                         q->info.si_pid = task_tgid_nr_ns(current,
987                                                         task_active_pid_ns(t));
988                         q->info.si_uid = current_uid();
989                         break;
990                 case (unsigned long) SEND_SIG_PRIV:
991                         q->info.si_signo = sig;
992                         q->info.si_errno = 0;
993                         q->info.si_code = SI_KERNEL;
994                         q->info.si_pid = 0;
995                         q->info.si_uid = 0;
996                         break;
997                 default:
998                         copy_siginfo(&q->info, info);
999                         if (from_ancestor_ns)
1000                                 q->info.si_pid = 0;
1001                         break;
1002                 }
1003         } else if (!is_si_special(info)) {
1004                 if (sig >= SIGRTMIN && info->si_code != SI_USER) {
1005                         /*
1006                          * Queue overflow, abort.  We may abort if the
1007                          * signal was rt and sent by user using something
1008                          * other than kill().
1009                          */
1010                         trace_signal_overflow_fail(sig, group, info);
1011                         return -EAGAIN;
1012                 } else {
1013                         /*
1014                          * This is a silent loss of information.  We still
1015                          * send the signal, but the *info bits are lost.
1016                          */
1017                         trace_signal_lose_info(sig, group, info);
1018                 }
1019         }
1020
1021 out_set:
1022         signalfd_notify(t, sig);
1023         sigaddset(&pending->signal, sig);
1024         complete_signal(sig, t, group);
1025         return 0;
1026 }
1027
1028 static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1029                         int group)
1030 {
1031         int from_ancestor_ns = 0;
1032
1033 #ifdef CONFIG_PID_NS
1034         from_ancestor_ns = si_fromuser(info) &&
1035                            !task_pid_nr_ns(current, task_active_pid_ns(t));
1036 #endif
1037
1038         return __send_signal(sig, info, t, group, from_ancestor_ns);
1039 }
1040
1041 static void print_fatal_signal(struct pt_regs *regs, int signr)
1042 {
1043         printk("%s/%d: potentially unexpected fatal signal %d.\n",
1044                 current->comm, task_pid_nr(current), signr);
1045
1046 #if defined(__i386__) && !defined(__arch_um__)
1047         printk("code at %08lx: ", regs->ip);
1048         {
1049                 int i;
1050                 for (i = 0; i < 16; i++) {
1051                         unsigned char insn;
1052
1053                         if (get_user(insn, (unsigned char *)(regs->ip + i)))
1054                                 break;
1055                         printk("%02x ", insn);
1056                 }
1057         }
1058 #endif
1059         printk("\n");
1060         preempt_disable();
1061         show_regs(regs);
1062         preempt_enable();
1063 }
1064
1065 static int __init setup_print_fatal_signals(char *str)
1066 {
1067         get_option (&str, &print_fatal_signals);
1068
1069         return 1;
1070 }
1071
1072 __setup("print-fatal-signals=", setup_print_fatal_signals);
1073
1074 int
1075 __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1076 {
1077         return send_signal(sig, info, p, 1);
1078 }
1079
1080 static int
1081 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1082 {
1083         return send_signal(sig, info, t, 0);
1084 }
1085
1086 int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
1087                         bool group)
1088 {
1089         unsigned long flags;
1090         int ret = -ESRCH;
1091
1092         if (lock_task_sighand(p, &flags)) {
1093                 ret = send_signal(sig, info, p, group);
1094                 unlock_task_sighand(p, &flags);
1095         }
1096
1097         return ret;
1098 }
1099
1100 /*
1101  * Force a signal that the process can't ignore: if necessary
1102  * we unblock the signal and change any SIG_IGN to SIG_DFL.
1103  *
1104  * Note: If we unblock the signal, we always reset it to SIG_DFL,
1105  * since we do not want to have a signal handler that was blocked
1106  * be invoked when user space had explicitly blocked it.
1107  *
1108  * We don't want to have recursive SIGSEGV's etc, for example,
1109  * that is why we also clear SIGNAL_UNKILLABLE.
1110  */
1111 int
1112 force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
1113 {
1114         unsigned long int flags;
1115         int ret, blocked, ignored;
1116         struct k_sigaction *action;
1117
1118         spin_lock_irqsave(&t->sighand->siglock, flags);
1119         action = &t->sighand->action[sig-1];
1120         ignored = action->sa.sa_handler == SIG_IGN;
1121         blocked = sigismember(&t->blocked, sig);
1122         if (blocked || ignored) {
1123                 action->sa.sa_handler = SIG_DFL;
1124                 if (blocked) {
1125                         sigdelset(&t->blocked, sig);
1126                         recalc_sigpending_and_wake(t);
1127                 }
1128         }
1129         if (action->sa.sa_handler == SIG_DFL)
1130                 t->signal->flags &= ~SIGNAL_UNKILLABLE;
1131         ret = specific_send_sig_info(sig, info, t);
1132         spin_unlock_irqrestore(&t->sighand->siglock, flags);
1133
1134         return ret;
1135 }
1136
1137 /*
1138  * Nuke all other threads in the group.
1139  */
1140 int zap_other_threads(struct task_struct *p)
1141 {
1142         struct task_struct *t = p;
1143         int count = 0;
1144
1145         p->signal->group_stop_count = 0;
1146
1147         while_each_thread(p, t) {
1148                 task_clear_group_stop_pending(t);
1149                 count++;
1150
1151                 /* Don't bother with already dead threads */
1152                 if (t->exit_state)
1153                         continue;
1154                 sigaddset(&t->pending.signal, SIGKILL);
1155                 signal_wake_up(t, 1);
1156         }
1157
1158         return count;
1159 }
1160
1161 struct sighand_struct *__lock_task_sighand(struct task_struct *tsk,
1162                                            unsigned long *flags)
1163 {
1164         struct sighand_struct *sighand;
1165
1166         rcu_read_lock();
1167         for (;;) {
1168                 sighand = rcu_dereference(tsk->sighand);
1169                 if (unlikely(sighand == NULL))
1170                         break;
1171
1172                 spin_lock_irqsave(&sighand->siglock, *flags);
1173                 if (likely(sighand == tsk->sighand))
1174                         break;
1175                 spin_unlock_irqrestore(&sighand->siglock, *flags);
1176         }
1177         rcu_read_unlock();
1178
1179         return sighand;
1180 }
1181
1182 /*
1183  * send signal info to all the members of a group
1184  */
1185 int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1186 {
1187         int ret;
1188
1189         rcu_read_lock();
1190         ret = check_kill_permission(sig, info, p);
1191         rcu_read_unlock();
1192
1193         if (!ret && sig)
1194                 ret = do_send_sig_info(sig, info, p, true);
1195
1196         return ret;
1197 }
1198
1199 /*
1200  * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1201  * control characters do (^C, ^Z etc)
1202  * - the caller must hold at least a readlock on tasklist_lock
1203  */
1204 int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1205 {
1206         struct task_struct *p = NULL;
1207         int retval, success;
1208
1209         success = 0;
1210         retval = -ESRCH;
1211         do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
1212                 int err = group_send_sig_info(sig, info, p);
1213                 success |= !err;
1214                 retval = err;
1215         } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
1216         return success ? 0 : retval;
1217 }
1218
1219 int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1220 {
1221         int error = -ESRCH;
1222         struct task_struct *p;
1223
1224         rcu_read_lock();
1225 retry:
1226         p = pid_task(pid, PIDTYPE_PID);
1227         if (p) {
1228                 error = group_send_sig_info(sig, info, p);
1229                 if (unlikely(error == -ESRCH))
1230                         /*
1231                          * The task was unhashed in between, try again.
1232                          * If it is dead, pid_task() will return NULL,
1233                          * if we race with de_thread() it will find the
1234                          * new leader.
1235                          */
1236                         goto retry;
1237         }
1238         rcu_read_unlock();
1239
1240         return error;
1241 }
1242
1243 int
1244 kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1245 {
1246         int error;
1247         rcu_read_lock();
1248         error = kill_pid_info(sig, info, find_vpid(pid));
1249         rcu_read_unlock();
1250         return error;
1251 }
1252
1253 /* like kill_pid_info(), but doesn't use uid/euid of "current" */
1254 int kill_pid_info_as_uid(int sig, struct siginfo *info, struct pid *pid,
1255                       uid_t uid, uid_t euid, u32 secid)
1256 {
1257         int ret = -EINVAL;
1258         struct task_struct *p;
1259         const struct cred *pcred;
1260         unsigned long flags;
1261
1262         if (!valid_signal(sig))
1263                 return ret;
1264
1265         rcu_read_lock();
1266         p = pid_task(pid, PIDTYPE_PID);
1267         if (!p) {
1268                 ret = -ESRCH;
1269                 goto out_unlock;
1270         }
1271         pcred = __task_cred(p);
1272         if (si_fromuser(info) &&
1273             euid != pcred->suid && euid != pcred->uid &&
1274             uid  != pcred->suid && uid  != pcred->uid) {
1275                 ret = -EPERM;
1276                 goto out_unlock;
1277         }
1278         ret = security_task_kill(p, info, sig, secid);
1279         if (ret)
1280                 goto out_unlock;
1281
1282         if (sig) {
1283                 if (lock_task_sighand(p, &flags)) {
1284                         ret = __send_signal(sig, info, p, 1, 0);
1285                         unlock_task_sighand(p, &flags);
1286                 } else
1287                         ret = -ESRCH;
1288         }
1289 out_unlock:
1290         rcu_read_unlock();
1291         return ret;
1292 }
1293 EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1294
1295 /*
1296  * kill_something_info() interprets pid in interesting ways just like kill(2).
1297  *
1298  * POSIX specifies that kill(-1,sig) is unspecified, but what we have
1299  * is probably wrong.  Should make it like BSD or SYSV.
1300  */
1301
1302 static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1303 {
1304         int ret;
1305
1306         if (pid > 0) {
1307                 rcu_read_lock();
1308                 ret = kill_pid_info(sig, info, find_vpid(pid));
1309                 rcu_read_unlock();
1310                 return ret;
1311         }
1312
1313         read_lock(&tasklist_lock);
1314         if (pid != -1) {
1315                 ret = __kill_pgrp_info(sig, info,
1316                                 pid ? find_vpid(-pid) : task_pgrp(current));
1317         } else {
1318                 int retval = 0, count = 0;
1319                 struct task_struct * p;
1320
1321                 for_each_process(p) {
1322                         if (task_pid_vnr(p) > 1 &&
1323                                         !same_thread_group(p, current)) {
1324                                 int err = group_send_sig_info(sig, info, p);
1325                                 ++count;
1326                                 if (err != -EPERM)
1327                                         retval = err;
1328                         }
1329                 }
1330                 ret = count ? retval : -ESRCH;
1331         }
1332         read_unlock(&tasklist_lock);
1333
1334         return ret;
1335 }
1336
1337 /*
1338  * These are for backward compatibility with the rest of the kernel source.
1339  */
1340
1341 int
1342 send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1343 {
1344         /*
1345          * Make sure legacy kernel users don't send in bad values
1346          * (normal paths check this in check_kill_permission).
1347          */
1348         if (!valid_signal(sig))
1349                 return -EINVAL;
1350
1351         return do_send_sig_info(sig, info, p, false);
1352 }
1353
1354 #define __si_special(priv) \
1355         ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO)
1356
1357 int
1358 send_sig(int sig, struct task_struct *p, int priv)
1359 {
1360         return send_sig_info(sig, __si_special(priv), p);
1361 }
1362
1363 void
1364 force_sig(int sig, struct task_struct *p)
1365 {
1366         force_sig_info(sig, SEND_SIG_PRIV, p);
1367 }
1368
1369 /*
1370  * When things go south during signal handling, we
1371  * will force a SIGSEGV. And if the signal that caused
1372  * the problem was already a SIGSEGV, we'll want to
1373  * make sure we don't even try to deliver the signal..
1374  */
1375 int
1376 force_sigsegv(int sig, struct task_struct *p)
1377 {
1378         if (sig == SIGSEGV) {
1379                 unsigned long flags;
1380                 spin_lock_irqsave(&p->sighand->siglock, flags);
1381                 p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL;
1382                 spin_unlock_irqrestore(&p->sighand->siglock, flags);
1383         }
1384         force_sig(SIGSEGV, p);
1385         return 0;
1386 }
1387
1388 int kill_pgrp(struct pid *pid, int sig, int priv)
1389 {
1390         int ret;
1391
1392         read_lock(&tasklist_lock);
1393         ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1394         read_unlock(&tasklist_lock);
1395
1396         return ret;
1397 }
1398 EXPORT_SYMBOL(kill_pgrp);
1399
1400 int kill_pid(struct pid *pid, int sig, int priv)
1401 {
1402         return kill_pid_info(sig, __si_special(priv), pid);
1403 }
1404 EXPORT_SYMBOL(kill_pid);
1405
1406 /*
1407  * These functions support sending signals using preallocated sigqueue
1408  * structures.  This is needed "because realtime applications cannot
1409  * afford to lose notifications of asynchronous events, like timer
1410  * expirations or I/O completions".  In the case of Posix Timers
1411  * we allocate the sigqueue structure from the timer_create.  If this
1412  * allocation fails we are able to report the failure to the application
1413  * with an EAGAIN error.
1414  */
1415 struct sigqueue *sigqueue_alloc(void)
1416 {
1417         struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0);
1418
1419         if (q)
1420                 q->flags |= SIGQUEUE_PREALLOC;
1421
1422         return q;
1423 }
1424
1425 void sigqueue_free(struct sigqueue *q)
1426 {
1427         unsigned long flags;
1428         spinlock_t *lock = &current->sighand->siglock;
1429
1430         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1431         /*
1432          * We must hold ->siglock while testing q->list
1433          * to serialize with collect_signal() or with
1434          * __exit_signal()->flush_sigqueue().
1435          */
1436         spin_lock_irqsave(lock, flags);
1437         q->flags &= ~SIGQUEUE_PREALLOC;
1438         /*
1439          * If it is queued it will be freed when dequeued,
1440          * like the "regular" sigqueue.
1441          */
1442         if (!list_empty(&q->list))
1443                 q = NULL;
1444         spin_unlock_irqrestore(lock, flags);
1445
1446         if (q)
1447                 __sigqueue_free(q);
1448 }
1449
1450 int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
1451 {
1452         int sig = q->info.si_signo;
1453         struct sigpending *pending;
1454         unsigned long flags;
1455         int ret;
1456
1457         BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
1458
1459         ret = -1;
1460         if (!likely(lock_task_sighand(t, &flags)))
1461                 goto ret;
1462
1463         ret = 1; /* the signal is ignored */
1464         if (!prepare_signal(sig, t, 0))
1465                 goto out;
1466
1467         ret = 0;
1468         if (unlikely(!list_empty(&q->list))) {
1469                 /*
1470                  * If an SI_TIMER entry is already queue just increment
1471                  * the overrun count.
1472                  */
1473                 BUG_ON(q->info.si_code != SI_TIMER);
1474                 q->info.si_overrun++;
1475                 goto out;
1476         }
1477         q->info.si_overrun = 0;
1478
1479         signalfd_notify(t, sig);
1480         pending = group ? &t->signal->shared_pending : &t->pending;
1481         list_add_tail(&q->list, &pending->list);
1482         sigaddset(&pending->signal, sig);
1483         complete_signal(sig, t, group);
1484 out:
1485         unlock_task_sighand(t, &flags);
1486 ret:
1487         return ret;
1488 }
1489
1490 /*
1491  * Let a parent know about the death of a child.
1492  * For a stopped/continued status change, use do_notify_parent_cldstop instead.
1493  *
1494  * Returns -1 if our parent ignored us and so we've switched to
1495  * self-reaping, or else @sig.
1496  */
1497 int do_notify_parent(struct task_struct *tsk, int sig)
1498 {
1499         struct siginfo info;
1500         unsigned long flags;
1501         struct sighand_struct *psig;
1502         int ret = sig;
1503
1504         BUG_ON(sig == -1);
1505
1506         /* do_notify_parent_cldstop should have been called instead.  */
1507         BUG_ON(task_is_stopped_or_traced(tsk));
1508
1509         BUG_ON(!task_ptrace(tsk) &&
1510                (tsk->group_leader != tsk || !thread_group_empty(tsk)));
1511
1512         info.si_signo = sig;
1513         info.si_errno = 0;
1514         /*
1515          * we are under tasklist_lock here so our parent is tied to
1516          * us and cannot exit and release its namespace.
1517          *
1518          * the only it can is to switch its nsproxy with sys_unshare,
1519          * bu uncharing pid namespaces is not allowed, so we'll always
1520          * see relevant namespace
1521          *
1522          * write_lock() currently calls preempt_disable() which is the
1523          * same as rcu_read_lock(), but according to Oleg, this is not
1524          * correct to rely on this
1525          */
1526         rcu_read_lock();
1527         info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns);
1528         info.si_uid = __task_cred(tsk)->uid;
1529         rcu_read_unlock();
1530
1531         info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime,
1532                                 tsk->signal->utime));
1533         info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime,
1534                                 tsk->signal->stime));
1535
1536         info.si_status = tsk->exit_code & 0x7f;
1537         if (tsk->exit_code & 0x80)
1538                 info.si_code = CLD_DUMPED;
1539         else if (tsk->exit_code & 0x7f)
1540                 info.si_code = CLD_KILLED;
1541         else {
1542                 info.si_code = CLD_EXITED;
1543                 info.si_status = tsk->exit_code >> 8;
1544         }
1545
1546         psig = tsk->parent->sighand;
1547         spin_lock_irqsave(&psig->siglock, flags);
1548         if (!task_ptrace(tsk) && sig == SIGCHLD &&
1549             (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
1550              (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
1551                 /*
1552                  * We are exiting and our parent doesn't care.  POSIX.1
1553                  * defines special semantics for setting SIGCHLD to SIG_IGN
1554                  * or setting the SA_NOCLDWAIT flag: we should be reaped
1555                  * automatically and not left for our parent's wait4 call.
1556                  * Rather than having the parent do it as a magic kind of
1557                  * signal handler, we just set this to tell do_exit that we
1558                  * can be cleaned up without becoming a zombie.  Note that
1559                  * we still call __wake_up_parent in this case, because a
1560                  * blocked sys_wait4 might now return -ECHILD.
1561                  *
1562                  * Whether we send SIGCHLD or not for SA_NOCLDWAIT
1563                  * is implementation-defined: we do (if you don't want
1564                  * it, just use SIG_IGN instead).
1565                  */
1566                 ret = tsk->exit_signal = -1;
1567                 if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN)
1568                         sig = -1;
1569         }
1570         if (valid_signal(sig) && sig > 0)
1571                 __group_send_sig_info(sig, &info, tsk->parent);
1572         __wake_up_parent(tsk, tsk->parent);
1573         spin_unlock_irqrestore(&psig->siglock, flags);
1574
1575         return ret;
1576 }
1577
1578 static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1579 {
1580         struct siginfo info;
1581         unsigned long flags;
1582         struct task_struct *parent;
1583         struct sighand_struct *sighand;
1584
1585         if (task_ptrace(tsk))
1586                 parent = tsk->parent;
1587         else {
1588                 tsk = tsk->group_leader;
1589                 parent = tsk->real_parent;
1590         }
1591
1592         info.si_signo = SIGCHLD;
1593         info.si_errno = 0;
1594         /*
1595          * see comment in do_notify_parent() abot the following 3 lines
1596          */
1597         rcu_read_lock();
1598         info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
1599         info.si_uid = __task_cred(tsk)->uid;
1600         rcu_read_unlock();
1601
1602         info.si_utime = cputime_to_clock_t(tsk->utime);
1603         info.si_stime = cputime_to_clock_t(tsk->stime);
1604
1605         info.si_code = why;
1606         switch (why) {
1607         case CLD_CONTINUED:
1608                 info.si_status = SIGCONT;
1609                 break;
1610         case CLD_STOPPED:
1611                 info.si_status = tsk->signal->group_exit_code & 0x7f;
1612                 break;
1613         case CLD_TRAPPED:
1614                 info.si_status = tsk->exit_code & 0x7f;
1615                 break;
1616         default:
1617                 BUG();
1618         }
1619
1620         sighand = parent->sighand;
1621         spin_lock_irqsave(&sighand->siglock, flags);
1622         if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN &&
1623             !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP))
1624                 __group_send_sig_info(SIGCHLD, &info, parent);
1625         /*
1626          * Even if SIGCHLD is not generated, we must wake up wait4 calls.
1627          */
1628         __wake_up_parent(tsk, parent);
1629         spin_unlock_irqrestore(&sighand->siglock, flags);
1630 }
1631
1632 static inline int may_ptrace_stop(void)
1633 {
1634         if (!likely(task_ptrace(current)))
1635                 return 0;
1636         /*
1637          * Are we in the middle of do_coredump?
1638          * If so and our tracer is also part of the coredump stopping
1639          * is a deadlock situation, and pointless because our tracer
1640          * is dead so don't allow us to stop.
1641          * If SIGKILL was already sent before the caller unlocked
1642          * ->siglock we must see ->core_state != NULL. Otherwise it
1643          * is safe to enter schedule().
1644          */
1645         if (unlikely(current->mm->core_state) &&
1646             unlikely(current->mm == current->parent->mm))
1647                 return 0;
1648
1649         return 1;
1650 }
1651
1652 /*
1653  * Return nonzero if there is a SIGKILL that should be waking us up.
1654  * Called with the siglock held.
1655  */
1656 static int sigkill_pending(struct task_struct *tsk)
1657 {
1658         return  sigismember(&tsk->pending.signal, SIGKILL) ||
1659                 sigismember(&tsk->signal->shared_pending.signal, SIGKILL);
1660 }
1661
1662 /*
1663  * This must be called with current->sighand->siglock held.
1664  *
1665  * This should be the path for all ptrace stops.
1666  * We always set current->last_siginfo while stopped here.
1667  * That makes it a way to test a stopped process for
1668  * being ptrace-stopped vs being job-control-stopped.
1669  *
1670  * If we actually decide not to stop at all because the tracer
1671  * is gone, we keep current->exit_code unless clear_code.
1672  */
1673 static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info)
1674         __releases(&current->sighand->siglock)
1675         __acquires(&current->sighand->siglock)
1676 {
1677         if (arch_ptrace_stop_needed(exit_code, info)) {
1678                 /*
1679                  * The arch code has something special to do before a
1680                  * ptrace stop.  This is allowed to block, e.g. for faults
1681                  * on user stack pages.  We can't keep the siglock while
1682                  * calling arch_ptrace_stop, so we must release it now.
1683                  * To preserve proper semantics, we must do this before
1684                  * any signal bookkeeping like checking group_stop_count.
1685                  * Meanwhile, a SIGKILL could come in before we retake the
1686                  * siglock.  That must prevent us from sleeping in TASK_TRACED.
1687                  * So after regaining the lock, we must check for SIGKILL.
1688                  */
1689                 spin_unlock_irq(&current->sighand->siglock);
1690                 arch_ptrace_stop(exit_code, info);
1691                 spin_lock_irq(&current->sighand->siglock);
1692                 if (sigkill_pending(current))
1693                         return;
1694         }
1695
1696         /*
1697          * If there is a group stop in progress,
1698          * we must participate in the bookkeeping.
1699          */
1700         if (current->group_stop & GROUP_STOP_PENDING)
1701                 task_participate_group_stop(current);
1702
1703         current->last_siginfo = info;
1704         current->exit_code = exit_code;
1705
1706         /* Let the debugger run.  */
1707         __set_current_state(TASK_TRACED);
1708         spin_unlock_irq(&current->sighand->siglock);
1709         read_lock(&tasklist_lock);
1710         if (may_ptrace_stop()) {
1711                 do_notify_parent_cldstop(current, why);
1712                 /*
1713                  * Don't want to allow preemption here, because
1714                  * sys_ptrace() needs this task to be inactive.
1715                  *
1716                  * XXX: implement read_unlock_no_resched().
1717                  */
1718                 preempt_disable();
1719                 read_unlock(&tasklist_lock);
1720                 preempt_enable_no_resched();
1721                 schedule();
1722         } else {
1723                 /*
1724                  * By the time we got the lock, our tracer went away.
1725                  * Don't drop the lock yet, another tracer may come.
1726                  */
1727                 __set_current_state(TASK_RUNNING);
1728                 if (clear_code)
1729                         current->exit_code = 0;
1730                 read_unlock(&tasklist_lock);
1731         }
1732
1733         /*
1734          * While in TASK_TRACED, we were considered "frozen enough".
1735          * Now that we woke up, it's crucial if we're supposed to be
1736          * frozen that we freeze now before running anything substantial.
1737          */
1738         try_to_freeze();
1739
1740         /*
1741          * We are back.  Now reacquire the siglock before touching
1742          * last_siginfo, so that we are sure to have synchronized with
1743          * any signal-sending on another CPU that wants to examine it.
1744          */
1745         spin_lock_irq(&current->sighand->siglock);
1746         current->last_siginfo = NULL;
1747
1748         /*
1749          * Queued signals ignored us while we were stopped for tracing.
1750          * So check for any that we should take before resuming user mode.
1751          * This sets TIF_SIGPENDING, but never clears it.
1752          */
1753         recalc_sigpending_tsk(current);
1754 }
1755
1756 void ptrace_notify(int exit_code)
1757 {
1758         siginfo_t info;
1759
1760         BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
1761
1762         memset(&info, 0, sizeof info);
1763         info.si_signo = SIGTRAP;
1764         info.si_code = exit_code;
1765         info.si_pid = task_pid_vnr(current);
1766         info.si_uid = current_uid();
1767
1768         /* Let the debugger run.  */
1769         spin_lock_irq(&current->sighand->siglock);
1770         ptrace_stop(exit_code, CLD_TRAPPED, 1, &info);
1771         spin_unlock_irq(&current->sighand->siglock);
1772 }
1773
1774 /*
1775  * This performs the stopping for SIGSTOP and other stop signals.
1776  * We have to stop all threads in the thread group.
1777  * Returns nonzero if we've actually stopped and released the siglock.
1778  * Returns zero if we didn't stop and still hold the siglock.
1779  */
1780 static int do_signal_stop(int signr)
1781 {
1782         struct signal_struct *sig = current->signal;
1783         int notify = 0;
1784
1785         if (!(current->group_stop & GROUP_STOP_PENDING)) {
1786                 unsigned int gstop = GROUP_STOP_PENDING | GROUP_STOP_CONSUME;
1787                 struct task_struct *t;
1788
1789                 if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
1790                     unlikely(signal_group_exit(sig)))
1791                         return 0;
1792                 /*
1793                  * There is no group stop already in progress.
1794                  * We must initiate one now.
1795                  */
1796                 sig->group_exit_code = signr;
1797
1798                 current->group_stop = gstop;
1799                 sig->group_stop_count = 1;
1800                 for (t = next_thread(current); t != current; t = next_thread(t))
1801                         /*
1802                          * Setting state to TASK_STOPPED for a group
1803                          * stop is always done with the siglock held,
1804                          * so this check has no races.
1805                          */
1806                         if (!(t->flags & PF_EXITING) && !task_is_stopped(t)) {
1807                                 t->group_stop = gstop;
1808                                 sig->group_stop_count++;
1809                                 signal_wake_up(t, 0);
1810                         } else
1811                                 task_clear_group_stop_pending(t);
1812         }
1813         /*
1814          * If there are no other threads in the group, or if there is
1815          * a group stop in progress and we are the last to stop, report
1816          * to the parent.  When ptraced, every thread reports itself.
1817          */
1818         if (task_participate_group_stop(current))
1819                 notify = CLD_STOPPED;
1820         if (task_ptrace(current))
1821                 notify = CLD_STOPPED;
1822
1823         current->exit_code = sig->group_exit_code;
1824         __set_current_state(TASK_STOPPED);
1825
1826         spin_unlock_irq(&current->sighand->siglock);
1827
1828         if (notify) {
1829                 read_lock(&tasklist_lock);
1830                 do_notify_parent_cldstop(current, notify);
1831                 read_unlock(&tasklist_lock);
1832         }
1833
1834         /* Now we don't run again until woken by SIGCONT or SIGKILL */
1835         schedule();
1836
1837         tracehook_finish_jctl();
1838         current->exit_code = 0;
1839
1840         return 1;
1841 }
1842
1843 static int ptrace_signal(int signr, siginfo_t *info,
1844                          struct pt_regs *regs, void *cookie)
1845 {
1846         if (!task_ptrace(current))
1847                 return signr;
1848
1849         ptrace_signal_deliver(regs, cookie);
1850
1851         /* Let the debugger run.  */
1852         ptrace_stop(signr, CLD_TRAPPED, 0, info);
1853
1854         /* We're back.  Did the debugger cancel the sig?  */
1855         signr = current->exit_code;
1856         if (signr == 0)
1857                 return signr;
1858
1859         current->exit_code = 0;
1860
1861         /* Update the siginfo structure if the signal has
1862            changed.  If the debugger wanted something
1863            specific in the siginfo structure then it should
1864            have updated *info via PTRACE_SETSIGINFO.  */
1865         if (signr != info->si_signo) {
1866                 info->si_signo = signr;
1867                 info->si_errno = 0;
1868                 info->si_code = SI_USER;
1869                 info->si_pid = task_pid_vnr(current->parent);
1870                 info->si_uid = task_uid(current->parent);
1871         }
1872
1873         /* If the (new) signal is now blocked, requeue it.  */
1874         if (sigismember(&current->blocked, signr)) {
1875                 specific_send_sig_info(signr, info, current);
1876                 signr = 0;
1877         }
1878
1879         return signr;
1880 }
1881
1882 int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
1883                           struct pt_regs *regs, void *cookie)
1884 {
1885         struct sighand_struct *sighand = current->sighand;
1886         struct signal_struct *signal = current->signal;
1887         int signr;
1888
1889 relock:
1890         /*
1891          * We'll jump back here after any time we were stopped in TASK_STOPPED.
1892          * While in TASK_STOPPED, we were considered "frozen enough".
1893          * Now that we woke up, it's crucial if we're supposed to be
1894          * frozen that we freeze now before running anything substantial.
1895          */
1896         try_to_freeze();
1897
1898         spin_lock_irq(&sighand->siglock);
1899         /*
1900          * Every stopped thread goes here after wakeup. Check to see if
1901          * we should notify the parent, prepare_signal(SIGCONT) encodes
1902          * the CLD_ si_code into SIGNAL_CLD_MASK bits.
1903          */
1904         if (unlikely(signal->flags & SIGNAL_CLD_MASK)) {
1905                 int why;
1906
1907                 if (signal->flags & SIGNAL_CLD_CONTINUED)
1908                         why = CLD_CONTINUED;
1909                 else
1910                         why = CLD_STOPPED;
1911
1912                 signal->flags &= ~SIGNAL_CLD_MASK;
1913
1914                 spin_unlock_irq(&sighand->siglock);
1915
1916                 read_lock(&tasklist_lock);
1917                 do_notify_parent_cldstop(current->group_leader, why);
1918                 read_unlock(&tasklist_lock);
1919                 goto relock;
1920         }
1921
1922         for (;;) {
1923                 struct k_sigaction *ka;
1924                 /*
1925                  * Tracing can induce an artifical signal and choose sigaction.
1926                  * The return value in @signr determines the default action,
1927                  * but @info->si_signo is the signal number we will report.
1928                  */
1929                 signr = tracehook_get_signal(current, regs, info, return_ka);
1930                 if (unlikely(signr < 0))
1931                         goto relock;
1932                 if (unlikely(signr != 0))
1933                         ka = return_ka;
1934                 else {
1935                         if (unlikely(current->group_stop &
1936                                      GROUP_STOP_PENDING) && do_signal_stop(0))
1937                                 goto relock;
1938
1939                         signr = dequeue_signal(current, &current->blocked,
1940                                                info);
1941
1942                         if (!signr)
1943                                 break; /* will return 0 */
1944
1945                         if (signr != SIGKILL) {
1946                                 signr = ptrace_signal(signr, info,
1947                                                       regs, cookie);
1948                                 if (!signr)
1949                                         continue;
1950                         }
1951
1952                         ka = &sighand->action[signr-1];
1953                 }
1954
1955                 /* Trace actually delivered signals. */
1956                 trace_signal_deliver(signr, info, ka);
1957
1958                 if (ka->sa.sa_handler == SIG_IGN) /* Do nothing.  */
1959                         continue;
1960                 if (ka->sa.sa_handler != SIG_DFL) {
1961                         /* Run the handler.  */
1962                         *return_ka = *ka;
1963
1964                         if (ka->sa.sa_flags & SA_ONESHOT)
1965                                 ka->sa.sa_handler = SIG_DFL;
1966
1967                         break; /* will return non-zero "signr" value */
1968                 }
1969
1970                 /*
1971                  * Now we are doing the default action for this signal.
1972                  */
1973                 if (sig_kernel_ignore(signr)) /* Default is nothing. */
1974                         continue;
1975
1976                 /*
1977                  * Global init gets no signals it doesn't want.
1978                  * Container-init gets no signals it doesn't want from same
1979                  * container.
1980                  *
1981                  * Note that if global/container-init sees a sig_kernel_only()
1982                  * signal here, the signal must have been generated internally
1983                  * or must have come from an ancestor namespace. In either
1984                  * case, the signal cannot be dropped.
1985                  */
1986                 if (unlikely(signal->flags & SIGNAL_UNKILLABLE) &&
1987                                 !sig_kernel_only(signr))
1988                         continue;
1989
1990                 if (sig_kernel_stop(signr)) {
1991                         /*
1992                          * The default action is to stop all threads in
1993                          * the thread group.  The job control signals
1994                          * do nothing in an orphaned pgrp, but SIGSTOP
1995                          * always works.  Note that siglock needs to be
1996                          * dropped during the call to is_orphaned_pgrp()
1997                          * because of lock ordering with tasklist_lock.
1998                          * This allows an intervening SIGCONT to be posted.
1999                          * We need to check for that and bail out if necessary.
2000                          */
2001                         if (signr != SIGSTOP) {
2002                                 spin_unlock_irq(&sighand->siglock);
2003
2004                                 /* signals can be posted during this window */
2005
2006                                 if (is_current_pgrp_orphaned())
2007                                         goto relock;
2008
2009                                 spin_lock_irq(&sighand->siglock);
2010                         }
2011
2012                         if (likely(do_signal_stop(info->si_signo))) {
2013                                 /* It released the siglock.  */
2014                                 goto relock;
2015                         }
2016
2017                         /*
2018                          * We didn't actually stop, due to a race
2019                          * with SIGCONT or something like that.
2020                          */
2021                         continue;
2022                 }
2023
2024                 spin_unlock_irq(&sighand->siglock);
2025
2026                 /*
2027                  * Anything else is fatal, maybe with a core dump.
2028                  */
2029                 current->flags |= PF_SIGNALED;
2030
2031                 if (sig_kernel_coredump(signr)) {
2032                         if (print_fatal_signals)
2033                                 print_fatal_signal(regs, info->si_signo);
2034                         /*
2035                          * If it was able to dump core, this kills all
2036                          * other threads in the group and synchronizes with
2037                          * their demise.  If we lost the race with another
2038                          * thread getting here, it set group_exit_code
2039                          * first and our do_group_exit call below will use
2040                          * that value and ignore the one we pass it.
2041                          */
2042                         do_coredump(info->si_signo, info->si_signo, regs);
2043                 }
2044
2045                 /*
2046                  * Death signals, no core dump.
2047                  */
2048                 do_group_exit(info->si_signo);
2049                 /* NOTREACHED */
2050         }
2051         spin_unlock_irq(&sighand->siglock);
2052         return signr;
2053 }
2054
2055 void exit_signals(struct task_struct *tsk)
2056 {
2057         int group_stop = 0;
2058         struct task_struct *t;
2059
2060         if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
2061                 tsk->flags |= PF_EXITING;
2062                 return;
2063         }
2064
2065         spin_lock_irq(&tsk->sighand->siglock);
2066         /*
2067          * From now this task is not visible for group-wide signals,
2068          * see wants_signal(), do_signal_stop().
2069          */
2070         tsk->flags |= PF_EXITING;
2071         if (!signal_pending(tsk))
2072                 goto out;
2073
2074         /* It could be that __group_complete_signal() choose us to
2075          * notify about group-wide signal. Another thread should be
2076          * woken now to take the signal since we will not.
2077          */
2078         for (t = tsk; (t = next_thread(t)) != tsk; )
2079                 if (!signal_pending(t) && !(t->flags & PF_EXITING))
2080                         recalc_sigpending_and_wake(t);
2081
2082         if (unlikely(tsk->group_stop & GROUP_STOP_PENDING) &&
2083             task_participate_group_stop(tsk))
2084                 group_stop = CLD_STOPPED;
2085 out:
2086         spin_unlock_irq(&tsk->sighand->siglock);
2087
2088         if (unlikely(group_stop)) {
2089                 read_lock(&tasklist_lock);
2090                 do_notify_parent_cldstop(tsk, group_stop);
2091                 read_unlock(&tasklist_lock);
2092         }
2093 }
2094
2095 EXPORT_SYMBOL(recalc_sigpending);
2096 EXPORT_SYMBOL_GPL(dequeue_signal);
2097 EXPORT_SYMBOL(flush_signals);
2098 EXPORT_SYMBOL(force_sig);
2099 EXPORT_SYMBOL(send_sig);
2100 EXPORT_SYMBOL(send_sig_info);
2101 EXPORT_SYMBOL(sigprocmask);
2102 EXPORT_SYMBOL(block_all_signals);
2103 EXPORT_SYMBOL(unblock_all_signals);
2104
2105
2106 /*
2107  * System call entry points.
2108  */
2109
2110 SYSCALL_DEFINE0(restart_syscall)
2111 {
2112         struct restart_block *restart = &current_thread_info()->restart_block;
2113         return restart->fn(restart);
2114 }
2115
2116 long do_no_restart_syscall(struct restart_block *param)
2117 {
2118         return -EINTR;
2119 }
2120
2121 /*
2122  * We don't need to get the kernel lock - this is all local to this
2123  * particular thread.. (and that's good, because this is _heavily_
2124  * used by various programs)
2125  */
2126
2127 /*
2128  * This is also useful for kernel threads that want to temporarily
2129  * (or permanently) block certain signals.
2130  *
2131  * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel
2132  * interface happily blocks "unblockable" signals like SIGKILL
2133  * and friends.
2134  */
2135 int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2136 {
2137         int error;
2138
2139         spin_lock_irq(&current->sighand->siglock);
2140         if (oldset)
2141                 *oldset = current->blocked;
2142
2143         error = 0;
2144         switch (how) {
2145         case SIG_BLOCK:
2146                 sigorsets(&current->blocked, &current->blocked, set);
2147                 break;
2148         case SIG_UNBLOCK:
2149                 signandsets(&current->blocked, &current->blocked, set);
2150                 break;
2151         case SIG_SETMASK:
2152                 current->blocked = *set;
2153                 break;
2154         default:
2155                 error = -EINVAL;
2156         }
2157         recalc_sigpending();
2158         spin_unlock_irq(&current->sighand->siglock);
2159
2160         return error;
2161 }
2162
2163 SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2164                 sigset_t __user *, oset, size_t, sigsetsize)
2165 {
2166         int error = -EINVAL;
2167         sigset_t old_set, new_set;
2168
2169         /* XXX: Don't preclude handling different sized sigset_t's.  */
2170         if (sigsetsize != sizeof(sigset_t))
2171                 goto out;
2172
2173         if (set) {
2174                 error = -EFAULT;
2175                 if (copy_from_user(&new_set, set, sizeof(*set)))
2176                         goto out;
2177                 sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP));
2178
2179                 error = sigprocmask(how, &new_set, &old_set);
2180                 if (error)
2181                         goto out;
2182                 if (oset)
2183                         goto set_old;
2184         } else if (oset) {
2185                 spin_lock_irq(&current->sighand->siglock);
2186                 old_set = current->blocked;
2187                 spin_unlock_irq(&current->sighand->siglock);
2188
2189         set_old:
2190                 error = -EFAULT;
2191                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2192                         goto out;
2193         }
2194         error = 0;
2195 out:
2196         return error;
2197 }
2198
2199 long do_sigpending(void __user *set, unsigned long sigsetsize)
2200 {
2201         long error = -EINVAL;
2202         sigset_t pending;
2203
2204         if (sigsetsize > sizeof(sigset_t))
2205                 goto out;
2206
2207         spin_lock_irq(&current->sighand->siglock);
2208         sigorsets(&pending, &current->pending.signal,
2209                   &current->signal->shared_pending.signal);
2210         spin_unlock_irq(&current->sighand->siglock);
2211
2212         /* Outside the lock because only this thread touches it.  */
2213         sigandsets(&pending, &current->blocked, &pending);
2214
2215         error = -EFAULT;
2216         if (!copy_to_user(set, &pending, sigsetsize))
2217                 error = 0;
2218
2219 out:
2220         return error;
2221 }       
2222
2223 SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2224 {
2225         return do_sigpending(set, sigsetsize);
2226 }
2227
2228 #ifndef HAVE_ARCH_COPY_SIGINFO_TO_USER
2229
2230 int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2231 {
2232         int err;
2233
2234         if (!access_ok (VERIFY_WRITE, to, sizeof(siginfo_t)))
2235                 return -EFAULT;
2236         if (from->si_code < 0)
2237                 return __copy_to_user(to, from, sizeof(siginfo_t))
2238                         ? -EFAULT : 0;
2239         /*
2240          * If you change siginfo_t structure, please be sure
2241          * this code is fixed accordingly.
2242          * Please remember to update the signalfd_copyinfo() function
2243          * inside fs/signalfd.c too, in case siginfo_t changes.
2244          * It should never copy any pad contained in the structure
2245          * to avoid security leaks, but must copy the generic
2246          * 3 ints plus the relevant union member.
2247          */
2248         err = __put_user(from->si_signo, &to->si_signo);
2249         err |= __put_user(from->si_errno, &to->si_errno);
2250         err |= __put_user((short)from->si_code, &to->si_code);
2251         switch (from->si_code & __SI_MASK) {
2252         case __SI_KILL:
2253                 err |= __put_user(from->si_pid, &to->si_pid);
2254                 err |= __put_user(from->si_uid, &to->si_uid);
2255                 break;
2256         case __SI_TIMER:
2257                  err |= __put_user(from->si_tid, &to->si_tid);
2258                  err |= __put_user(from->si_overrun, &to->si_overrun);
2259                  err |= __put_user(from->si_ptr, &to->si_ptr);
2260                 break;
2261         case __SI_POLL:
2262                 err |= __put_user(from->si_band, &to->si_band);
2263                 err |= __put_user(from->si_fd, &to->si_fd);
2264                 break;
2265         case __SI_FAULT:
2266                 err |= __put_user(from->si_addr, &to->si_addr);
2267 #ifdef __ARCH_SI_TRAPNO
2268                 err |= __put_user(from->si_trapno, &to->si_trapno);
2269 #endif
2270 #ifdef BUS_MCEERR_AO
2271                 /* 
2272                  * Other callers might not initialize the si_lsb field,
2273                  * so check explicitely for the right codes here.
2274                  */
2275                 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2276                         err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2277 #endif
2278                 break;
2279         case __SI_CHLD:
2280                 err |= __put_user(from->si_pid, &to->si_pid);
2281                 err |= __put_user(from->si_uid, &to->si_uid);
2282                 err |= __put_user(from->si_status, &to->si_status);
2283                 err |= __put_user(from->si_utime, &to->si_utime);
2284                 err |= __put_user(from->si_stime, &to->si_stime);
2285                 break;
2286         case __SI_RT: /* This is not generated by the kernel as of now. */
2287         case __SI_MESGQ: /* But this is */
2288                 err |= __put_user(from->si_pid, &to->si_pid);
2289                 err |= __put_user(from->si_uid, &to->si_uid);
2290                 err |= __put_user(from->si_ptr, &to->si_ptr);
2291                 break;
2292         default: /* this is just in case for now ... */
2293                 err |= __put_user(from->si_pid, &to->si_pid);
2294                 err |= __put_user(from->si_uid, &to->si_uid);
2295                 break;
2296         }
2297         return err;
2298 }
2299
2300 #endif
2301
2302 SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2303                 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2304                 size_t, sigsetsize)
2305 {
2306         int ret, sig;
2307         sigset_t these;
2308         struct timespec ts;
2309         siginfo_t info;
2310         long timeout = 0;
2311
2312         /* XXX: Don't preclude handling different sized sigset_t's.  */
2313         if (sigsetsize != sizeof(sigset_t))
2314                 return -EINVAL;
2315
2316         if (copy_from_user(&these, uthese, sizeof(these)))
2317                 return -EFAULT;
2318                 
2319         /*
2320          * Invert the set of allowed signals to get those we
2321          * want to block.
2322          */
2323         sigdelsetmask(&these, sigmask(SIGKILL)|sigmask(SIGSTOP));
2324         signotset(&these);
2325
2326         if (uts) {
2327                 if (copy_from_user(&ts, uts, sizeof(ts)))
2328                         return -EFAULT;
2329                 if (ts.tv_nsec >= 1000000000L || ts.tv_nsec < 0
2330                     || ts.tv_sec < 0)
2331                         return -EINVAL;
2332         }
2333
2334         spin_lock_irq(&current->sighand->siglock);
2335         sig = dequeue_signal(current, &these, &info);
2336         if (!sig) {
2337                 timeout = MAX_SCHEDULE_TIMEOUT;
2338                 if (uts)
2339                         timeout = (timespec_to_jiffies(&ts)
2340                                    + (ts.tv_sec || ts.tv_nsec));
2341
2342                 if (timeout) {
2343                         /* None ready -- temporarily unblock those we're
2344                          * interested while we are sleeping in so that we'll
2345                          * be awakened when they arrive.  */
2346                         current->real_blocked = current->blocked;
2347                         sigandsets(&current->blocked, &current->blocked, &these);
2348                         recalc_sigpending();
2349                         spin_unlock_irq(&current->sighand->siglock);
2350
2351                         timeout = schedule_timeout_interruptible(timeout);
2352
2353                         spin_lock_irq(&current->sighand->siglock);
2354                         sig = dequeue_signal(current, &these, &info);
2355                         current->blocked = current->real_blocked;
2356                         siginitset(&current->real_blocked, 0);
2357                         recalc_sigpending();
2358                 }
2359         }
2360         spin_unlock_irq(&current->sighand->siglock);
2361
2362         if (sig) {
2363                 ret = sig;
2364                 if (uinfo) {
2365                         if (copy_siginfo_to_user(uinfo, &info))
2366                                 ret = -EFAULT;
2367                 }
2368         } else {
2369                 ret = -EAGAIN;
2370                 if (timeout)
2371                         ret = -EINTR;
2372         }
2373
2374         return ret;
2375 }
2376
2377 SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2378 {
2379         struct siginfo info;
2380
2381         info.si_signo = sig;
2382         info.si_errno = 0;
2383         info.si_code = SI_USER;
2384         info.si_pid = task_tgid_vnr(current);
2385         info.si_uid = current_uid();
2386
2387         return kill_something_info(sig, &info, pid);
2388 }
2389
2390 static int
2391 do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
2392 {
2393         struct task_struct *p;
2394         int error = -ESRCH;
2395
2396         rcu_read_lock();
2397         p = find_task_by_vpid(pid);
2398         if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
2399                 error = check_kill_permission(sig, info, p);
2400                 /*
2401                  * The null signal is a permissions and process existence
2402                  * probe.  No signal is actually delivered.
2403                  */
2404                 if (!error && sig) {
2405                         error = do_send_sig_info(sig, info, p, false);
2406                         /*
2407                          * If lock_task_sighand() failed we pretend the task
2408                          * dies after receiving the signal. The window is tiny,
2409                          * and the signal is private anyway.
2410                          */
2411                         if (unlikely(error == -ESRCH))
2412                                 error = 0;
2413                 }
2414         }
2415         rcu_read_unlock();
2416
2417         return error;
2418 }
2419
2420 static int do_tkill(pid_t tgid, pid_t pid, int sig)
2421 {
2422         struct siginfo info;
2423
2424         info.si_signo = sig;
2425         info.si_errno = 0;
2426         info.si_code = SI_TKILL;
2427         info.si_pid = task_tgid_vnr(current);
2428         info.si_uid = current_uid();
2429
2430         return do_send_specific(tgid, pid, sig, &info);
2431 }
2432
2433 /**
2434  *  sys_tgkill - send signal to one specific thread
2435  *  @tgid: the thread group ID of the thread
2436  *  @pid: the PID of the thread
2437  *  @sig: signal to be sent
2438  *
2439  *  This syscall also checks the @tgid and returns -ESRCH even if the PID
2440  *  exists but it's not belonging to the target process anymore. This
2441  *  method solves the problem of threads exiting and PIDs getting reused.
2442  */
2443 SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2444 {
2445         /* This is only valid for single tasks */
2446         if (pid <= 0 || tgid <= 0)
2447                 return -EINVAL;
2448
2449         return do_tkill(tgid, pid, sig);
2450 }
2451
2452 /*
2453  *  Send a signal to only one task, even if it's a CLONE_THREAD task.
2454  */
2455 SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2456 {
2457         /* This is only valid for single tasks */
2458         if (pid <= 0)
2459                 return -EINVAL;
2460
2461         return do_tkill(0, pid, sig);
2462 }
2463
2464 SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2465                 siginfo_t __user *, uinfo)
2466 {
2467         siginfo_t info;
2468
2469         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2470                 return -EFAULT;
2471
2472         /* Not even root can pretend to send signals from the kernel.
2473          * Nor can they impersonate a kill()/tgkill(), which adds source info.
2474          */
2475         if (info.si_code != SI_QUEUE) {
2476                 /* We used to allow any < 0 si_code */
2477                 WARN_ON_ONCE(info.si_code < 0);
2478                 return -EPERM;
2479         }
2480         info.si_signo = sig;
2481
2482         /* POSIX.1b doesn't mention process groups.  */
2483         return kill_proc_info(sig, &info, pid);
2484 }
2485
2486 long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, siginfo_t *info)
2487 {
2488         /* This is only valid for single tasks */
2489         if (pid <= 0 || tgid <= 0)
2490                 return -EINVAL;
2491
2492         /* Not even root can pretend to send signals from the kernel.
2493          * Nor can they impersonate a kill()/tgkill(), which adds source info.
2494          */
2495         if (info->si_code != SI_QUEUE) {
2496                 /* We used to allow any < 0 si_code */
2497                 WARN_ON_ONCE(info->si_code < 0);
2498                 return -EPERM;
2499         }
2500         info->si_signo = sig;
2501
2502         return do_send_specific(tgid, pid, sig, info);
2503 }
2504
2505 SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig,
2506                 siginfo_t __user *, uinfo)
2507 {
2508         siginfo_t info;
2509
2510         if (copy_from_user(&info, uinfo, sizeof(siginfo_t)))
2511                 return -EFAULT;
2512
2513         return do_rt_tgsigqueueinfo(tgid, pid, sig, &info);
2514 }
2515
2516 int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
2517 {
2518         struct task_struct *t = current;
2519         struct k_sigaction *k;
2520         sigset_t mask;
2521
2522         if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig)))
2523                 return -EINVAL;
2524
2525         k = &t->sighand->action[sig-1];
2526
2527         spin_lock_irq(&current->sighand->siglock);
2528         if (oact)
2529                 *oact = *k;
2530
2531         if (act) {
2532                 sigdelsetmask(&act->sa.sa_mask,
2533                               sigmask(SIGKILL) | sigmask(SIGSTOP));
2534                 *k = *act;
2535                 /*
2536                  * POSIX 3.3.1.3:
2537                  *  "Setting a signal action to SIG_IGN for a signal that is
2538                  *   pending shall cause the pending signal to be discarded,
2539                  *   whether or not it is blocked."
2540                  *
2541                  *  "Setting a signal action to SIG_DFL for a signal that is
2542                  *   pending and whose default action is to ignore the signal
2543                  *   (for example, SIGCHLD), shall cause the pending signal to
2544                  *   be discarded, whether or not it is blocked"
2545                  */
2546                 if (sig_handler_ignored(sig_handler(t, sig), sig)) {
2547                         sigemptyset(&mask);
2548                         sigaddset(&mask, sig);
2549                         rm_from_queue_full(&mask, &t->signal->shared_pending);
2550                         do {
2551                                 rm_from_queue_full(&mask, &t->pending);
2552                                 t = next_thread(t);
2553                         } while (t != current);
2554                 }
2555         }
2556
2557         spin_unlock_irq(&current->sighand->siglock);
2558         return 0;
2559 }
2560
2561 int 
2562 do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long sp)
2563 {
2564         stack_t oss;
2565         int error;
2566
2567         oss.ss_sp = (void __user *) current->sas_ss_sp;
2568         oss.ss_size = current->sas_ss_size;
2569         oss.ss_flags = sas_ss_flags(sp);
2570
2571         if (uss) {
2572                 void __user *ss_sp;
2573                 size_t ss_size;
2574                 int ss_flags;
2575
2576                 error = -EFAULT;
2577                 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2578                         goto out;
2579                 error = __get_user(ss_sp, &uss->ss_sp) |
2580                         __get_user(ss_flags, &uss->ss_flags) |
2581                         __get_user(ss_size, &uss->ss_size);
2582                 if (error)
2583                         goto out;
2584
2585                 error = -EPERM;
2586                 if (on_sig_stack(sp))
2587                         goto out;
2588
2589                 error = -EINVAL;
2590                 /*
2591                  *
2592                  * Note - this code used to test ss_flags incorrectly
2593                  *        old code may have been written using ss_flags==0
2594                  *        to mean ss_flags==SS_ONSTACK (as this was the only
2595                  *        way that worked) - this fix preserves that older
2596                  *        mechanism
2597                  */
2598                 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2599                         goto out;
2600
2601                 if (ss_flags == SS_DISABLE) {
2602                         ss_size = 0;
2603                         ss_sp = NULL;
2604                 } else {
2605                         error = -ENOMEM;
2606                         if (ss_size < MINSIGSTKSZ)
2607                                 goto out;
2608                 }
2609
2610                 current->sas_ss_sp = (unsigned long) ss_sp;
2611                 current->sas_ss_size = ss_size;
2612         }
2613
2614         error = 0;
2615         if (uoss) {
2616                 error = -EFAULT;
2617                 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2618                         goto out;
2619                 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2620                         __put_user(oss.ss_size, &uoss->ss_size) |
2621                         __put_user(oss.ss_flags, &uoss->ss_flags);
2622         }
2623
2624 out:
2625         return error;
2626 }
2627
2628 #ifdef __ARCH_WANT_SYS_SIGPENDING
2629
2630 SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2631 {
2632         return do_sigpending(set, sizeof(*set));
2633 }
2634
2635 #endif
2636
2637 #ifdef __ARCH_WANT_SYS_SIGPROCMASK
2638 /* Some platforms have their own version with special arguments others
2639    support only sys_rt_sigprocmask.  */
2640
2641 SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2642                 old_sigset_t __user *, oset)
2643 {
2644         int error;
2645         old_sigset_t old_set, new_set;
2646
2647         if (set) {
2648                 error = -EFAULT;
2649                 if (copy_from_user(&new_set, set, sizeof(*set)))
2650                         goto out;
2651                 new_set &= ~(sigmask(SIGKILL) | sigmask(SIGSTOP));
2652
2653                 spin_lock_irq(&current->sighand->siglock);
2654                 old_set = current->blocked.sig[0];
2655
2656                 error = 0;
2657                 switch (how) {
2658                 default:
2659                         error = -EINVAL;
2660                         break;
2661                 case SIG_BLOCK:
2662                         sigaddsetmask(&current->blocked, new_set);
2663                         break;
2664                 case SIG_UNBLOCK:
2665                         sigdelsetmask(&current->blocked, new_set);
2666                         break;
2667                 case SIG_SETMASK:
2668                         current->blocked.sig[0] = new_set;
2669                         break;
2670                 }
2671
2672                 recalc_sigpending();
2673                 spin_unlock_irq(&current->sighand->siglock);
2674                 if (error)
2675                         goto out;
2676                 if (oset)
2677                         goto set_old;
2678         } else if (oset) {
2679                 old_set = current->blocked.sig[0];
2680         set_old:
2681                 error = -EFAULT;
2682                 if (copy_to_user(oset, &old_set, sizeof(*oset)))
2683                         goto out;
2684         }
2685         error = 0;
2686 out:
2687         return error;
2688 }
2689 #endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2690
2691 #ifdef __ARCH_WANT_SYS_RT_SIGACTION
2692 SYSCALL_DEFINE4(rt_sigaction, int, sig,
2693                 const struct sigaction __user *, act,
2694                 struct sigaction __user *, oact,
2695                 size_t, sigsetsize)
2696 {
2697         struct k_sigaction new_sa, old_sa;
2698         int ret = -EINVAL;
2699
2700         /* XXX: Don't preclude handling different sized sigset_t's.  */
2701         if (sigsetsize != sizeof(sigset_t))
2702                 goto out;
2703
2704         if (act) {
2705                 if (copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa)))
2706                         return -EFAULT;
2707         }
2708
2709         ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL);
2710
2711         if (!ret && oact) {
2712                 if (copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa)))
2713                         return -EFAULT;
2714         }
2715 out:
2716         return ret;
2717 }
2718 #endif /* __ARCH_WANT_SYS_RT_SIGACTION */
2719
2720 #ifdef __ARCH_WANT_SYS_SGETMASK
2721
2722 /*
2723  * For backwards compatibility.  Functionality superseded by sigprocmask.
2724  */
2725 SYSCALL_DEFINE0(sgetmask)
2726 {
2727         /* SMP safe */
2728         return current->blocked.sig[0];
2729 }
2730
2731 SYSCALL_DEFINE1(ssetmask, int, newmask)
2732 {
2733         int old;
2734
2735         spin_lock_irq(&current->sighand->siglock);
2736         old = current->blocked.sig[0];
2737
2738         siginitset(&current->blocked, newmask & ~(sigmask(SIGKILL)|
2739                                                   sigmask(SIGSTOP)));
2740         recalc_sigpending();
2741         spin_unlock_irq(&current->sighand->siglock);
2742
2743         return old;
2744 }
2745 #endif /* __ARCH_WANT_SGETMASK */
2746
2747 #ifdef __ARCH_WANT_SYS_SIGNAL
2748 /*
2749  * For backwards compatibility.  Functionality superseded by sigaction.
2750  */
2751 SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler)
2752 {
2753         struct k_sigaction new_sa, old_sa;
2754         int ret;
2755
2756         new_sa.sa.sa_handler = handler;
2757         new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
2758         sigemptyset(&new_sa.sa.sa_mask);
2759
2760         ret = do_sigaction(sig, &new_sa, &old_sa);
2761
2762         return ret ? ret : (unsigned long)old_sa.sa.sa_handler;
2763 }
2764 #endif /* __ARCH_WANT_SYS_SIGNAL */
2765
2766 #ifdef __ARCH_WANT_SYS_PAUSE
2767
2768 SYSCALL_DEFINE0(pause)
2769 {
2770         current->state = TASK_INTERRUPTIBLE;
2771         schedule();
2772         return -ERESTARTNOHAND;
2773 }
2774
2775 #endif
2776
2777 #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2778 SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2779 {
2780         sigset_t newset;
2781
2782         /* XXX: Don't preclude handling different sized sigset_t's.  */
2783         if (sigsetsize != sizeof(sigset_t))
2784                 return -EINVAL;
2785
2786         if (copy_from_user(&newset, unewset, sizeof(newset)))
2787                 return -EFAULT;
2788         sigdelsetmask(&newset, sigmask(SIGKILL)|sigmask(SIGSTOP));
2789
2790         spin_lock_irq(&current->sighand->siglock);
2791         current->saved_sigmask = current->blocked;
2792         current->blocked = newset;
2793         recalc_sigpending();
2794         spin_unlock_irq(&current->sighand->siglock);
2795
2796         current->state = TASK_INTERRUPTIBLE;
2797         schedule();
2798         set_restore_sigmask();
2799         return -ERESTARTNOHAND;
2800 }
2801 #endif /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
2802
2803 __attribute__((weak)) const char *arch_vma_name(struct vm_area_struct *vma)
2804 {
2805         return NULL;
2806 }
2807
2808 void __init signals_init(void)
2809 {
2810         sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC);
2811 }
2812
2813 #ifdef CONFIG_KGDB_KDB
2814 #include <linux/kdb.h>
2815 /*
2816  * kdb_send_sig_info - Allows kdb to send signals without exposing
2817  * signal internals.  This function checks if the required locks are
2818  * available before calling the main signal code, to avoid kdb
2819  * deadlocks.
2820  */
2821 void
2822 kdb_send_sig_info(struct task_struct *t, struct siginfo *info)
2823 {
2824         static struct task_struct *kdb_prev_t;
2825         int sig, new_t;
2826         if (!spin_trylock(&t->sighand->siglock)) {
2827                 kdb_printf("Can't do kill command now.\n"
2828                            "The sigmask lock is held somewhere else in "
2829                            "kernel, try again later\n");
2830                 return;
2831         }
2832         spin_unlock(&t->sighand->siglock);
2833         new_t = kdb_prev_t != t;
2834         kdb_prev_t = t;
2835         if (t->state != TASK_RUNNING && new_t) {
2836                 kdb_printf("Process is not RUNNING, sending a signal from "
2837                            "kdb risks deadlock\n"
2838                            "on the run queue locks. "
2839                            "The signal has _not_ been sent.\n"
2840                            "Reissue the kill command if you want to risk "
2841                            "the deadlock.\n");
2842                 return;
2843         }
2844         sig = info->si_signo;
2845         if (send_sig_info(sig, info, t))
2846                 kdb_printf("Fail to deliver Signal %d to process %d.\n",
2847                            sig, t->pid);
2848         else
2849                 kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid);
2850 }
2851 #endif  /* CONFIG_KGDB_KDB */