Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * linux/kernel/signal.c | |
4 | * | |
5 | * Copyright (C) 1991, 1992 Linus Torvalds | |
6 | * | |
7 | * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson | |
8 | * | |
9 | * 2003-06-02 Jim Houston - Concurrent Computer Corp. | |
10 | * Changes to use preallocated sigqueue structures | |
11 | * to allow signals to be sent reliably. | |
12 | */ | |
13 | ||
1da177e4 | 14 | #include <linux/slab.h> |
9984de1a | 15 | #include <linux/export.h> |
1da177e4 | 16 | #include <linux/init.h> |
589ee628 | 17 | #include <linux/sched/mm.h> |
8703e8a4 | 18 | #include <linux/sched/user.h> |
b17b0153 | 19 | #include <linux/sched/debug.h> |
29930025 | 20 | #include <linux/sched/task.h> |
68db0cf1 | 21 | #include <linux/sched/task_stack.h> |
32ef5517 | 22 | #include <linux/sched/cputime.h> |
3eb39f47 | 23 | #include <linux/file.h> |
1da177e4 | 24 | #include <linux/fs.h> |
3eb39f47 | 25 | #include <linux/proc_fs.h> |
1da177e4 LT |
26 | #include <linux/tty.h> |
27 | #include <linux/binfmts.h> | |
179899fd | 28 | #include <linux/coredump.h> |
1da177e4 LT |
29 | #include <linux/security.h> |
30 | #include <linux/syscalls.h> | |
31 | #include <linux/ptrace.h> | |
7ed20e1a | 32 | #include <linux/signal.h> |
fba2afaa | 33 | #include <linux/signalfd.h> |
f84d49b2 | 34 | #include <linux/ratelimit.h> |
35de254d | 35 | #include <linux/tracehook.h> |
c59ede7b | 36 | #include <linux/capability.h> |
7dfb7103 | 37 | #include <linux/freezer.h> |
84d73786 SB |
38 | #include <linux/pid_namespace.h> |
39 | #include <linux/nsproxy.h> | |
6b550f94 | 40 | #include <linux/user_namespace.h> |
0326f5a9 | 41 | #include <linux/uprobes.h> |
90268439 | 42 | #include <linux/compat.h> |
2b5faa4c | 43 | #include <linux/cn_proc.h> |
52f5684c | 44 | #include <linux/compiler.h> |
31ea70e0 | 45 | #include <linux/posix-timers.h> |
76f969e8 | 46 | #include <linux/cgroup.h> |
b48345aa | 47 | #include <linux/audit.h> |
52f5684c | 48 | |
d1eb650f MH |
49 | #define CREATE_TRACE_POINTS |
50 | #include <trace/events/signal.h> | |
84d73786 | 51 | |
1da177e4 | 52 | #include <asm/param.h> |
7c0f6ba6 | 53 | #include <linux/uaccess.h> |
1da177e4 LT |
54 | #include <asm/unistd.h> |
55 | #include <asm/siginfo.h> | |
d550bbd4 | 56 | #include <asm/cacheflush.h> |
307d522f | 57 | #include <asm/syscall.h> /* for syscall_get_* */ |
1da177e4 LT |
58 | |
59 | /* | |
60 | * SLAB caches for signal bits. | |
61 | */ | |
62 | ||
e18b890b | 63 | static struct kmem_cache *sigqueue_cachep; |
1da177e4 | 64 | |
f84d49b2 NO |
65 | int print_fatal_signals __read_mostly; |
66 | ||
35de254d | 67 | static void __user *sig_handler(struct task_struct *t, int sig) |
93585eea | 68 | { |
35de254d RM |
69 | return t->sighand->action[sig - 1].sa.sa_handler; |
70 | } | |
93585eea | 71 | |
e4a8b4ef | 72 | static inline bool sig_handler_ignored(void __user *handler, int sig) |
35de254d | 73 | { |
93585eea | 74 | /* Is it explicitly or implicitly ignored? */ |
93585eea | 75 | return handler == SIG_IGN || |
e4a8b4ef | 76 | (handler == SIG_DFL && sig_kernel_ignore(sig)); |
93585eea | 77 | } |
1da177e4 | 78 | |
41aaa481 | 79 | static bool sig_task_ignored(struct task_struct *t, int sig, bool force) |
1da177e4 | 80 | { |
35de254d | 81 | void __user *handler; |
1da177e4 | 82 | |
f008faff ON |
83 | handler = sig_handler(t, sig); |
84 | ||
86989c41 EB |
85 | /* SIGKILL and SIGSTOP may not be sent to the global init */ |
86 | if (unlikely(is_global_init(t) && sig_kernel_only(sig))) | |
87 | return true; | |
88 | ||
f008faff | 89 | if (unlikely(t->signal->flags & SIGNAL_UNKILLABLE) && |
ac253850 | 90 | handler == SIG_DFL && !(force && sig_kernel_only(sig))) |
41aaa481 | 91 | return true; |
f008faff | 92 | |
33da8e7c | 93 | /* Only allow kernel generated signals to this kthread */ |
e8b33b8c | 94 | if (unlikely((t->flags & PF_KTHREAD) && |
33da8e7c EB |
95 | (handler == SIG_KTHREAD_KERNEL) && !force)) |
96 | return true; | |
97 | ||
f008faff ON |
98 | return sig_handler_ignored(handler, sig); |
99 | } | |
100 | ||
6a0cdcd7 | 101 | static bool sig_ignored(struct task_struct *t, int sig, bool force) |
f008faff | 102 | { |
1da177e4 LT |
103 | /* |
104 | * Blocked signals are never ignored, since the | |
105 | * signal handler may change by the time it is | |
106 | * unblocked. | |
107 | */ | |
325d22df | 108 | if (sigismember(&t->blocked, sig) || sigismember(&t->real_blocked, sig)) |
6a0cdcd7 | 109 | return false; |
1da177e4 | 110 | |
35de254d | 111 | /* |
628c1bcb ON |
112 | * Tracers may want to know about even ignored signal unless it |
113 | * is SIGKILL which can't be reported anyway but can be ignored | |
114 | * by SIGNAL_UNKILLABLE task. | |
35de254d | 115 | */ |
628c1bcb | 116 | if (t->ptrace && sig != SIGKILL) |
6a0cdcd7 | 117 | return false; |
628c1bcb ON |
118 | |
119 | return sig_task_ignored(t, sig, force); | |
1da177e4 LT |
120 | } |
121 | ||
122 | /* | |
123 | * Re-calculate pending state from the set of locally pending | |
124 | * signals, globally pending signals, and blocked signals. | |
125 | */ | |
938696a8 | 126 | static inline bool has_pending_signals(sigset_t *signal, sigset_t *blocked) |
1da177e4 LT |
127 | { |
128 | unsigned long ready; | |
129 | long i; | |
130 | ||
131 | switch (_NSIG_WORDS) { | |
132 | default: | |
133 | for (i = _NSIG_WORDS, ready = 0; --i >= 0 ;) | |
134 | ready |= signal->sig[i] &~ blocked->sig[i]; | |
135 | break; | |
136 | ||
137 | case 4: ready = signal->sig[3] &~ blocked->sig[3]; | |
138 | ready |= signal->sig[2] &~ blocked->sig[2]; | |
139 | ready |= signal->sig[1] &~ blocked->sig[1]; | |
140 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
141 | break; | |
142 | ||
143 | case 2: ready = signal->sig[1] &~ blocked->sig[1]; | |
144 | ready |= signal->sig[0] &~ blocked->sig[0]; | |
145 | break; | |
146 | ||
147 | case 1: ready = signal->sig[0] &~ blocked->sig[0]; | |
148 | } | |
149 | return ready != 0; | |
150 | } | |
151 | ||
152 | #define PENDING(p,b) has_pending_signals(&(p)->signal, (b)) | |
153 | ||
09ae854e | 154 | static bool recalc_sigpending_tsk(struct task_struct *t) |
1da177e4 | 155 | { |
76f969e8 | 156 | if ((t->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) || |
1da177e4 | 157 | PENDING(&t->pending, &t->blocked) || |
76f969e8 RG |
158 | PENDING(&t->signal->shared_pending, &t->blocked) || |
159 | cgroup_task_frozen(t)) { | |
1da177e4 | 160 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
09ae854e | 161 | return true; |
7bb44ade | 162 | } |
09ae854e | 163 | |
b74d0deb RM |
164 | /* |
165 | * We must never clear the flag in another thread, or in current | |
166 | * when it's possible the current syscall is returning -ERESTART*. | |
167 | * So we don't clear it here, and only callers who know they should do. | |
168 | */ | |
09ae854e | 169 | return false; |
7bb44ade RM |
170 | } |
171 | ||
172 | /* | |
173 | * After recalculating TIF_SIGPENDING, we need to make sure the task wakes up. | |
174 | * This is superfluous when called on current, the wakeup is a harmless no-op. | |
175 | */ | |
176 | void recalc_sigpending_and_wake(struct task_struct *t) | |
177 | { | |
178 | if (recalc_sigpending_tsk(t)) | |
179 | signal_wake_up(t, 0); | |
1da177e4 LT |
180 | } |
181 | ||
182 | void recalc_sigpending(void) | |
183 | { | |
8df1947c | 184 | if (!recalc_sigpending_tsk(current) && !freezing(current)) |
b74d0deb RM |
185 | clear_thread_flag(TIF_SIGPENDING); |
186 | ||
1da177e4 | 187 | } |
fb50f5a4 | 188 | EXPORT_SYMBOL(recalc_sigpending); |
1da177e4 | 189 | |
088fe47c EB |
190 | void calculate_sigpending(void) |
191 | { | |
192 | /* Have any signals or users of TIF_SIGPENDING been delayed | |
193 | * until after fork? | |
194 | */ | |
195 | spin_lock_irq(¤t->sighand->siglock); | |
196 | set_tsk_thread_flag(current, TIF_SIGPENDING); | |
197 | recalc_sigpending(); | |
198 | spin_unlock_irq(¤t->sighand->siglock); | |
199 | } | |
200 | ||
1da177e4 LT |
201 | /* Given the mask, find the first available signal that should be serviced. */ |
202 | ||
a27341cd LT |
203 | #define SYNCHRONOUS_MASK \ |
204 | (sigmask(SIGSEGV) | sigmask(SIGBUS) | sigmask(SIGILL) | \ | |
a0727e8c | 205 | sigmask(SIGTRAP) | sigmask(SIGFPE) | sigmask(SIGSYS)) |
a27341cd | 206 | |
fba2afaa | 207 | int next_signal(struct sigpending *pending, sigset_t *mask) |
1da177e4 LT |
208 | { |
209 | unsigned long i, *s, *m, x; | |
210 | int sig = 0; | |
f84d49b2 | 211 | |
1da177e4 LT |
212 | s = pending->signal.sig; |
213 | m = mask->sig; | |
a27341cd LT |
214 | |
215 | /* | |
216 | * Handle the first word specially: it contains the | |
217 | * synchronous signals that need to be dequeued first. | |
218 | */ | |
219 | x = *s &~ *m; | |
220 | if (x) { | |
221 | if (x & SYNCHRONOUS_MASK) | |
222 | x &= SYNCHRONOUS_MASK; | |
223 | sig = ffz(~x) + 1; | |
224 | return sig; | |
225 | } | |
226 | ||
1da177e4 LT |
227 | switch (_NSIG_WORDS) { |
228 | default: | |
a27341cd LT |
229 | for (i = 1; i < _NSIG_WORDS; ++i) { |
230 | x = *++s &~ *++m; | |
231 | if (!x) | |
232 | continue; | |
233 | sig = ffz(~x) + i*_NSIG_BPW + 1; | |
234 | break; | |
235 | } | |
1da177e4 LT |
236 | break; |
237 | ||
a27341cd LT |
238 | case 2: |
239 | x = s[1] &~ m[1]; | |
240 | if (!x) | |
1da177e4 | 241 | break; |
a27341cd | 242 | sig = ffz(~x) + _NSIG_BPW + 1; |
1da177e4 LT |
243 | break; |
244 | ||
a27341cd LT |
245 | case 1: |
246 | /* Nothing to do */ | |
1da177e4 LT |
247 | break; |
248 | } | |
f84d49b2 | 249 | |
1da177e4 LT |
250 | return sig; |
251 | } | |
252 | ||
f84d49b2 NO |
253 | static inline void print_dropped_signal(int sig) |
254 | { | |
255 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | |
256 | ||
257 | if (!print_fatal_signals) | |
258 | return; | |
259 | ||
260 | if (!__ratelimit(&ratelimit_state)) | |
261 | return; | |
262 | ||
747800ef | 263 | pr_info("%s/%d: reached RLIMIT_SIGPENDING, dropped signal %d\n", |
f84d49b2 NO |
264 | current->comm, current->pid, sig); |
265 | } | |
266 | ||
d79fdd6d | 267 | /** |
7dd3db54 | 268 | * task_set_jobctl_pending - set jobctl pending bits |
d79fdd6d | 269 | * @task: target task |
7dd3db54 | 270 | * @mask: pending bits to set |
d79fdd6d | 271 | * |
7dd3db54 TH |
272 | * Clear @mask from @task->jobctl. @mask must be subset of |
273 | * %JOBCTL_PENDING_MASK | %JOBCTL_STOP_CONSUME | %JOBCTL_STOP_SIGMASK | | |
274 | * %JOBCTL_TRAPPING. If stop signo is being set, the existing signo is | |
275 | * cleared. If @task is already being killed or exiting, this function | |
276 | * becomes noop. | |
277 | * | |
278 | * CONTEXT: | |
279 | * Must be called with @task->sighand->siglock held. | |
280 | * | |
281 | * RETURNS: | |
282 | * %true if @mask is set, %false if made noop because @task was dying. | |
283 | */ | |
b76808e6 | 284 | bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask) |
7dd3db54 TH |
285 | { |
286 | BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME | | |
287 | JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING)); | |
288 | BUG_ON((mask & JOBCTL_TRAPPING) && !(mask & JOBCTL_PENDING_MASK)); | |
289 | ||
1e4cf0d3 | 290 | if (unlikely(fatal_signal_pending(task) || (task->flags & PF_EXITING))) |
7dd3db54 TH |
291 | return false; |
292 | ||
293 | if (mask & JOBCTL_STOP_SIGMASK) | |
294 | task->jobctl &= ~JOBCTL_STOP_SIGMASK; | |
295 | ||
296 | task->jobctl |= mask; | |
297 | return true; | |
298 | } | |
299 | ||
d79fdd6d | 300 | /** |
a8f072c1 | 301 | * task_clear_jobctl_trapping - clear jobctl trapping bit |
d79fdd6d TH |
302 | * @task: target task |
303 | * | |
a8f072c1 TH |
304 | * If JOBCTL_TRAPPING is set, a ptracer is waiting for us to enter TRACED. |
305 | * Clear it and wake up the ptracer. Note that we don't need any further | |
306 | * locking. @task->siglock guarantees that @task->parent points to the | |
307 | * ptracer. | |
d79fdd6d TH |
308 | * |
309 | * CONTEXT: | |
310 | * Must be called with @task->sighand->siglock held. | |
311 | */ | |
73ddff2b | 312 | void task_clear_jobctl_trapping(struct task_struct *task) |
d79fdd6d | 313 | { |
a8f072c1 TH |
314 | if (unlikely(task->jobctl & JOBCTL_TRAPPING)) { |
315 | task->jobctl &= ~JOBCTL_TRAPPING; | |
650226bd | 316 | smp_mb(); /* advised by wake_up_bit() */ |
62c124ff | 317 | wake_up_bit(&task->jobctl, JOBCTL_TRAPPING_BIT); |
d79fdd6d TH |
318 | } |
319 | } | |
320 | ||
e5c1902e | 321 | /** |
3759a0d9 | 322 | * task_clear_jobctl_pending - clear jobctl pending bits |
e5c1902e | 323 | * @task: target task |
3759a0d9 | 324 | * @mask: pending bits to clear |
e5c1902e | 325 | * |
3759a0d9 TH |
326 | * Clear @mask from @task->jobctl. @mask must be subset of |
327 | * %JOBCTL_PENDING_MASK. If %JOBCTL_STOP_PENDING is being cleared, other | |
328 | * STOP bits are cleared together. | |
e5c1902e | 329 | * |
6dfca329 TH |
330 | * If clearing of @mask leaves no stop or trap pending, this function calls |
331 | * task_clear_jobctl_trapping(). | |
e5c1902e TH |
332 | * |
333 | * CONTEXT: | |
334 | * Must be called with @task->sighand->siglock held. | |
335 | */ | |
b76808e6 | 336 | void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask) |
e5c1902e | 337 | { |
3759a0d9 TH |
338 | BUG_ON(mask & ~JOBCTL_PENDING_MASK); |
339 | ||
340 | if (mask & JOBCTL_STOP_PENDING) | |
341 | mask |= JOBCTL_STOP_CONSUME | JOBCTL_STOP_DEQUEUED; | |
342 | ||
343 | task->jobctl &= ~mask; | |
6dfca329 TH |
344 | |
345 | if (!(task->jobctl & JOBCTL_PENDING_MASK)) | |
346 | task_clear_jobctl_trapping(task); | |
e5c1902e TH |
347 | } |
348 | ||
349 | /** | |
350 | * task_participate_group_stop - participate in a group stop | |
351 | * @task: task participating in a group stop | |
352 | * | |
a8f072c1 | 353 | * @task has %JOBCTL_STOP_PENDING set and is participating in a group stop. |
39efa3ef | 354 | * Group stop states are cleared and the group stop count is consumed if |
a8f072c1 | 355 | * %JOBCTL_STOP_CONSUME was set. If the consumption completes the group |
68d8681e | 356 | * stop, the appropriate `SIGNAL_*` flags are set. |
e5c1902e TH |
357 | * |
358 | * CONTEXT: | |
359 | * Must be called with @task->sighand->siglock held. | |
244056f9 TH |
360 | * |
361 | * RETURNS: | |
362 | * %true if group stop completion should be notified to the parent, %false | |
363 | * otherwise. | |
e5c1902e TH |
364 | */ |
365 | static bool task_participate_group_stop(struct task_struct *task) | |
366 | { | |
367 | struct signal_struct *sig = task->signal; | |
a8f072c1 | 368 | bool consume = task->jobctl & JOBCTL_STOP_CONSUME; |
e5c1902e | 369 | |
a8f072c1 | 370 | WARN_ON_ONCE(!(task->jobctl & JOBCTL_STOP_PENDING)); |
39efa3ef | 371 | |
3759a0d9 | 372 | task_clear_jobctl_pending(task, JOBCTL_STOP_PENDING); |
e5c1902e TH |
373 | |
374 | if (!consume) | |
375 | return false; | |
376 | ||
377 | if (!WARN_ON_ONCE(sig->group_stop_count == 0)) | |
378 | sig->group_stop_count--; | |
379 | ||
244056f9 TH |
380 | /* |
381 | * Tell the caller to notify completion iff we are entering into a | |
382 | * fresh group stop. Read comment in do_signal_stop() for details. | |
383 | */ | |
384 | if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) { | |
2d39b3cd | 385 | signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED); |
e5c1902e TH |
386 | return true; |
387 | } | |
388 | return false; | |
389 | } | |
390 | ||
924de3b8 EB |
391 | void task_join_group_stop(struct task_struct *task) |
392 | { | |
7b3c36fc ON |
393 | unsigned long mask = current->jobctl & JOBCTL_STOP_SIGMASK; |
394 | struct signal_struct *sig = current->signal; | |
395 | ||
396 | if (sig->group_stop_count) { | |
397 | sig->group_stop_count++; | |
398 | mask |= JOBCTL_STOP_CONSUME; | |
399 | } else if (!(sig->flags & SIGNAL_STOP_STOPPED)) | |
400 | return; | |
401 | ||
924de3b8 | 402 | /* Have the new thread join an on-going signal group stop */ |
7b3c36fc | 403 | task_set_jobctl_pending(task, mask | JOBCTL_STOP_PENDING); |
924de3b8 EB |
404 | } |
405 | ||
c69e8d9c DH |
406 | /* |
407 | * allocate a new signal queue record | |
408 | * - this may be called without locks if and only if t == current, otherwise an | |
5aba085e | 409 | * appropriate lock must be held to stop the target task from exiting |
c69e8d9c | 410 | */ |
f84d49b2 | 411 | static struct sigqueue * |
69995ebb TG |
412 | __sigqueue_alloc(int sig, struct task_struct *t, gfp_t gfp_flags, |
413 | int override_rlimit, const unsigned int sigqueue_flags) | |
1da177e4 LT |
414 | { |
415 | struct sigqueue *q = NULL; | |
d6469690 AG |
416 | struct ucounts *ucounts = NULL; |
417 | long sigpending; | |
1da177e4 | 418 | |
10b1fbdb | 419 | /* |
7cf7db8d TG |
420 | * Protect access to @t credentials. This can go away when all |
421 | * callers hold rcu read lock. | |
fda31c50 LT |
422 | * |
423 | * NOTE! A pending signal will hold on to the user refcount, | |
424 | * and we get/put the refcount only when the sigpending count | |
425 | * changes from/to zero. | |
10b1fbdb | 426 | */ |
7cf7db8d | 427 | rcu_read_lock(); |
d6469690 AG |
428 | ucounts = task_ucounts(t); |
429 | sigpending = inc_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1); | |
f3791f4d AG |
430 | switch (sigpending) { |
431 | case 1: | |
432 | if (likely(get_ucounts(ucounts))) | |
433 | break; | |
434 | fallthrough; | |
435 | case LONG_MAX: | |
436 | /* | |
437 | * we need to decrease the ucount in the userns tree on any | |
438 | * failure to avoid counts leaking. | |
439 | */ | |
440 | dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1); | |
441 | rcu_read_unlock(); | |
442 | return NULL; | |
443 | } | |
7cf7db8d | 444 | rcu_read_unlock(); |
f84d49b2 | 445 | |
f3791f4d | 446 | if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) { |
b4b27b9e | 447 | q = kmem_cache_alloc(sigqueue_cachep, gfp_flags); |
f84d49b2 NO |
448 | } else { |
449 | print_dropped_signal(sig); | |
450 | } | |
451 | ||
1da177e4 | 452 | if (unlikely(q == NULL)) { |
f3791f4d | 453 | if (dec_rlimit_ucounts(ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) |
d6469690 | 454 | put_ucounts(ucounts); |
1da177e4 LT |
455 | } else { |
456 | INIT_LIST_HEAD(&q->list); | |
69995ebb | 457 | q->flags = sigqueue_flags; |
d6469690 | 458 | q->ucounts = ucounts; |
1da177e4 | 459 | } |
d84f4f99 | 460 | return q; |
1da177e4 LT |
461 | } |
462 | ||
514a01b8 | 463 | static void __sigqueue_free(struct sigqueue *q) |
1da177e4 LT |
464 | { |
465 | if (q->flags & SIGQUEUE_PREALLOC) | |
466 | return; | |
d6469690 AG |
467 | if (q->ucounts && dec_rlimit_ucounts(q->ucounts, UCOUNT_RLIMIT_SIGPENDING, 1)) { |
468 | put_ucounts(q->ucounts); | |
469 | q->ucounts = NULL; | |
470 | } | |
b4b27b9e | 471 | kmem_cache_free(sigqueue_cachep, q); |
1da177e4 LT |
472 | } |
473 | ||
6a14c5c9 | 474 | void flush_sigqueue(struct sigpending *queue) |
1da177e4 LT |
475 | { |
476 | struct sigqueue *q; | |
477 | ||
478 | sigemptyset(&queue->signal); | |
479 | while (!list_empty(&queue->list)) { | |
480 | q = list_entry(queue->list.next, struct sigqueue , list); | |
481 | list_del_init(&q->list); | |
482 | __sigqueue_free(q); | |
483 | } | |
484 | } | |
485 | ||
486 | /* | |
9e7c8f8c | 487 | * Flush all pending signals for this kthread. |
1da177e4 | 488 | */ |
c81addc9 | 489 | void flush_signals(struct task_struct *t) |
1da177e4 LT |
490 | { |
491 | unsigned long flags; | |
492 | ||
493 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
9e7c8f8c ON |
494 | clear_tsk_thread_flag(t, TIF_SIGPENDING); |
495 | flush_sigqueue(&t->pending); | |
496 | flush_sigqueue(&t->signal->shared_pending); | |
1da177e4 LT |
497 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
498 | } | |
fb50f5a4 | 499 | EXPORT_SYMBOL(flush_signals); |
1da177e4 | 500 | |
baa73d9e | 501 | #ifdef CONFIG_POSIX_TIMERS |
cbaffba1 ON |
502 | static void __flush_itimer_signals(struct sigpending *pending) |
503 | { | |
504 | sigset_t signal, retain; | |
505 | struct sigqueue *q, *n; | |
506 | ||
507 | signal = pending->signal; | |
508 | sigemptyset(&retain); | |
509 | ||
510 | list_for_each_entry_safe(q, n, &pending->list, list) { | |
511 | int sig = q->info.si_signo; | |
512 | ||
513 | if (likely(q->info.si_code != SI_TIMER)) { | |
514 | sigaddset(&retain, sig); | |
515 | } else { | |
516 | sigdelset(&signal, sig); | |
517 | list_del_init(&q->list); | |
518 | __sigqueue_free(q); | |
519 | } | |
520 | } | |
521 | ||
522 | sigorsets(&pending->signal, &signal, &retain); | |
523 | } | |
524 | ||
525 | void flush_itimer_signals(void) | |
526 | { | |
527 | struct task_struct *tsk = current; | |
528 | unsigned long flags; | |
529 | ||
530 | spin_lock_irqsave(&tsk->sighand->siglock, flags); | |
531 | __flush_itimer_signals(&tsk->pending); | |
532 | __flush_itimer_signals(&tsk->signal->shared_pending); | |
533 | spin_unlock_irqrestore(&tsk->sighand->siglock, flags); | |
534 | } | |
baa73d9e | 535 | #endif |
cbaffba1 | 536 | |
10ab825b ON |
537 | void ignore_signals(struct task_struct *t) |
538 | { | |
539 | int i; | |
540 | ||
541 | for (i = 0; i < _NSIG; ++i) | |
542 | t->sighand->action[i].sa.sa_handler = SIG_IGN; | |
543 | ||
544 | flush_signals(t); | |
545 | } | |
546 | ||
1da177e4 LT |
547 | /* |
548 | * Flush all handlers for a task. | |
549 | */ | |
550 | ||
551 | void | |
552 | flush_signal_handlers(struct task_struct *t, int force_default) | |
553 | { | |
554 | int i; | |
555 | struct k_sigaction *ka = &t->sighand->action[0]; | |
556 | for (i = _NSIG ; i != 0 ; i--) { | |
557 | if (force_default || ka->sa.sa_handler != SIG_IGN) | |
558 | ka->sa.sa_handler = SIG_DFL; | |
559 | ka->sa.sa_flags = 0; | |
522cff14 | 560 | #ifdef __ARCH_HAS_SA_RESTORER |
2ca39528 KC |
561 | ka->sa.sa_restorer = NULL; |
562 | #endif | |
1da177e4 LT |
563 | sigemptyset(&ka->sa.sa_mask); |
564 | ka++; | |
565 | } | |
566 | } | |
567 | ||
67a48a24 | 568 | bool unhandled_signal(struct task_struct *tsk, int sig) |
abd4f750 | 569 | { |
445a91d2 | 570 | void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; |
b460cbc5 | 571 | if (is_global_init(tsk)) |
67a48a24 CB |
572 | return true; |
573 | ||
445a91d2 | 574 | if (handler != SIG_IGN && handler != SIG_DFL) |
67a48a24 CB |
575 | return false; |
576 | ||
a288eecc TH |
577 | /* if ptraced, let the tracer determine */ |
578 | return !tsk->ptrace; | |
abd4f750 MAS |
579 | } |
580 | ||
ae7795bc | 581 | static void collect_signal(int sig, struct sigpending *list, kernel_siginfo_t *info, |
57db7e4a | 582 | bool *resched_timer) |
1da177e4 LT |
583 | { |
584 | struct sigqueue *q, *first = NULL; | |
1da177e4 | 585 | |
1da177e4 LT |
586 | /* |
587 | * Collect the siginfo appropriate to this signal. Check if | |
588 | * there is another siginfo for the same signal. | |
589 | */ | |
590 | list_for_each_entry(q, &list->list, list) { | |
591 | if (q->info.si_signo == sig) { | |
d4434207 ON |
592 | if (first) |
593 | goto still_pending; | |
1da177e4 LT |
594 | first = q; |
595 | } | |
596 | } | |
d4434207 ON |
597 | |
598 | sigdelset(&list->signal, sig); | |
599 | ||
1da177e4 | 600 | if (first) { |
d4434207 | 601 | still_pending: |
1da177e4 LT |
602 | list_del_init(&first->list); |
603 | copy_siginfo(info, &first->info); | |
57db7e4a EB |
604 | |
605 | *resched_timer = | |
606 | (first->flags & SIGQUEUE_PREALLOC) && | |
607 | (info->si_code == SI_TIMER) && | |
608 | (info->si_sys_private); | |
609 | ||
1da177e4 | 610 | __sigqueue_free(first); |
1da177e4 | 611 | } else { |
5aba085e RD |
612 | /* |
613 | * Ok, it wasn't in the queue. This must be | |
614 | * a fast-pathed signal or we must have been | |
615 | * out of queue space. So zero out the info. | |
1da177e4 | 616 | */ |
faf1f22b | 617 | clear_siginfo(info); |
1da177e4 LT |
618 | info->si_signo = sig; |
619 | info->si_errno = 0; | |
7486e5d9 | 620 | info->si_code = SI_USER; |
1da177e4 LT |
621 | info->si_pid = 0; |
622 | info->si_uid = 0; | |
623 | } | |
1da177e4 LT |
624 | } |
625 | ||
626 | static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, | |
ae7795bc | 627 | kernel_siginfo_t *info, bool *resched_timer) |
1da177e4 | 628 | { |
27d91e07 | 629 | int sig = next_signal(pending, mask); |
1da177e4 | 630 | |
2e01fabe | 631 | if (sig) |
57db7e4a | 632 | collect_signal(sig, pending, info, resched_timer); |
1da177e4 LT |
633 | return sig; |
634 | } | |
635 | ||
636 | /* | |
5aba085e | 637 | * Dequeue a signal and return the element to the caller, which is |
1da177e4 LT |
638 | * expected to free it. |
639 | * | |
640 | * All callers have to hold the siglock. | |
641 | */ | |
ae7795bc | 642 | int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info) |
1da177e4 | 643 | { |
57db7e4a | 644 | bool resched_timer = false; |
c5363d03 | 645 | int signr; |
caec4e8d BH |
646 | |
647 | /* We only dequeue private signals from ourselves, we don't let | |
648 | * signalfd steal them | |
649 | */ | |
57db7e4a | 650 | signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer); |
8bfd9a7a | 651 | if (!signr) { |
1da177e4 | 652 | signr = __dequeue_signal(&tsk->signal->shared_pending, |
57db7e4a | 653 | mask, info, &resched_timer); |
baa73d9e | 654 | #ifdef CONFIG_POSIX_TIMERS |
8bfd9a7a TG |
655 | /* |
656 | * itimer signal ? | |
657 | * | |
658 | * itimers are process shared and we restart periodic | |
659 | * itimers in the signal delivery path to prevent DoS | |
660 | * attacks in the high resolution timer case. This is | |
5aba085e | 661 | * compliant with the old way of self-restarting |
8bfd9a7a TG |
662 | * itimers, as the SIGALRM is a legacy signal and only |
663 | * queued once. Changing the restart behaviour to | |
664 | * restart the timer in the signal dequeue path is | |
665 | * reducing the timer noise on heavy loaded !highres | |
666 | * systems too. | |
667 | */ | |
668 | if (unlikely(signr == SIGALRM)) { | |
669 | struct hrtimer *tmr = &tsk->signal->real_timer; | |
670 | ||
671 | if (!hrtimer_is_queued(tmr) && | |
2456e855 | 672 | tsk->signal->it_real_incr != 0) { |
8bfd9a7a TG |
673 | hrtimer_forward(tmr, tmr->base->get_time(), |
674 | tsk->signal->it_real_incr); | |
675 | hrtimer_restart(tmr); | |
676 | } | |
677 | } | |
baa73d9e | 678 | #endif |
8bfd9a7a | 679 | } |
c5363d03 | 680 | |
b8fceee1 | 681 | recalc_sigpending(); |
c5363d03 PE |
682 | if (!signr) |
683 | return 0; | |
684 | ||
685 | if (unlikely(sig_kernel_stop(signr))) { | |
8bfd9a7a TG |
686 | /* |
687 | * Set a marker that we have dequeued a stop signal. Our | |
688 | * caller might release the siglock and then the pending | |
689 | * stop signal it is about to process is no longer in the | |
690 | * pending bitmasks, but must still be cleared by a SIGCONT | |
691 | * (and overruled by a SIGKILL). So those cases clear this | |
692 | * shared flag after we've set it. Note that this flag may | |
693 | * remain set after the signal we return is ignored or | |
694 | * handled. That doesn't matter because its only purpose | |
695 | * is to alert stop-signal processing code when another | |
696 | * processor has come along and cleared the flag. | |
697 | */ | |
a8f072c1 | 698 | current->jobctl |= JOBCTL_STOP_DEQUEUED; |
8bfd9a7a | 699 | } |
baa73d9e | 700 | #ifdef CONFIG_POSIX_TIMERS |
57db7e4a | 701 | if (resched_timer) { |
1da177e4 LT |
702 | /* |
703 | * Release the siglock to ensure proper locking order | |
704 | * of timer locks outside of siglocks. Note, we leave | |
705 | * irqs disabled here, since the posix-timers code is | |
706 | * about to disable them again anyway. | |
707 | */ | |
708 | spin_unlock(&tsk->sighand->siglock); | |
96fe3b07 | 709 | posixtimer_rearm(info); |
1da177e4 | 710 | spin_lock(&tsk->sighand->siglock); |
9943d3ac EB |
711 | |
712 | /* Don't expose the si_sys_private value to userspace */ | |
713 | info->si_sys_private = 0; | |
1da177e4 | 714 | } |
baa73d9e | 715 | #endif |
1da177e4 LT |
716 | return signr; |
717 | } | |
fb50f5a4 | 718 | EXPORT_SYMBOL_GPL(dequeue_signal); |
1da177e4 | 719 | |
7146db33 EB |
720 | static int dequeue_synchronous_signal(kernel_siginfo_t *info) |
721 | { | |
722 | struct task_struct *tsk = current; | |
723 | struct sigpending *pending = &tsk->pending; | |
724 | struct sigqueue *q, *sync = NULL; | |
725 | ||
726 | /* | |
727 | * Might a synchronous signal be in the queue? | |
728 | */ | |
729 | if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK)) | |
730 | return 0; | |
731 | ||
732 | /* | |
733 | * Return the first synchronous signal in the queue. | |
734 | */ | |
735 | list_for_each_entry(q, &pending->list, list) { | |
7665a47f | 736 | /* Synchronous signals have a positive si_code */ |
7146db33 EB |
737 | if ((q->info.si_code > SI_USER) && |
738 | (sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) { | |
739 | sync = q; | |
740 | goto next; | |
741 | } | |
742 | } | |
743 | return 0; | |
744 | next: | |
745 | /* | |
746 | * Check if there is another siginfo for the same signal. | |
747 | */ | |
748 | list_for_each_entry_continue(q, &pending->list, list) { | |
749 | if (q->info.si_signo == sync->info.si_signo) | |
750 | goto still_pending; | |
751 | } | |
752 | ||
753 | sigdelset(&pending->signal, sync->info.si_signo); | |
754 | recalc_sigpending(); | |
755 | still_pending: | |
756 | list_del_init(&sync->list); | |
757 | copy_siginfo(info, &sync->info); | |
758 | __sigqueue_free(sync); | |
759 | return info->si_signo; | |
760 | } | |
761 | ||
1da177e4 LT |
762 | /* |
763 | * Tell a process that it has a new active signal.. | |
764 | * | |
765 | * NOTE! we rely on the previous spin_lock to | |
766 | * lock interrupts for us! We can only be called with | |
767 | * "siglock" held, and the local interrupt must | |
768 | * have been disabled when that got acquired! | |
769 | * | |
770 | * No need to set need_resched since signal event passing | |
771 | * goes through ->blocked | |
772 | */ | |
910ffdb1 | 773 | void signal_wake_up_state(struct task_struct *t, unsigned int state) |
1da177e4 | 774 | { |
1da177e4 | 775 | set_tsk_thread_flag(t, TIF_SIGPENDING); |
1da177e4 | 776 | /* |
910ffdb1 | 777 | * TASK_WAKEKILL also means wake it up in the stopped/traced/killable |
f021a3c2 | 778 | * case. We don't check t->state here because there is a race with it |
1da177e4 LT |
779 | * executing another processor and just now entering stopped state. |
780 | * By using wake_up_state, we ensure the process will wake up and | |
781 | * handle its death signal. | |
782 | */ | |
910ffdb1 | 783 | if (!wake_up_state(t, state | TASK_INTERRUPTIBLE)) |
1da177e4 LT |
784 | kick_process(t); |
785 | } | |
786 | ||
71fabd5e GA |
787 | /* |
788 | * Remove signals in mask from the pending set and queue. | |
789 | * Returns 1 if any signals were found. | |
790 | * | |
791 | * All callers must be holding the siglock. | |
71fabd5e | 792 | */ |
8f11351e | 793 | static void flush_sigqueue_mask(sigset_t *mask, struct sigpending *s) |
71fabd5e GA |
794 | { |
795 | struct sigqueue *q, *n; | |
796 | sigset_t m; | |
797 | ||
798 | sigandsets(&m, mask, &s->signal); | |
799 | if (sigisemptyset(&m)) | |
8f11351e | 800 | return; |
71fabd5e | 801 | |
702a5073 | 802 | sigandnsets(&s->signal, &s->signal, mask); |
71fabd5e GA |
803 | list_for_each_entry_safe(q, n, &s->list, list) { |
804 | if (sigismember(mask, q->info.si_signo)) { | |
805 | list_del_init(&q->list); | |
806 | __sigqueue_free(q); | |
807 | } | |
808 | } | |
71fabd5e | 809 | } |
1da177e4 | 810 | |
ae7795bc | 811 | static inline int is_si_special(const struct kernel_siginfo *info) |
614c517d | 812 | { |
4ff4c31a | 813 | return info <= SEND_SIG_PRIV; |
614c517d ON |
814 | } |
815 | ||
ae7795bc | 816 | static inline bool si_fromuser(const struct kernel_siginfo *info) |
614c517d ON |
817 | { |
818 | return info == SEND_SIG_NOINFO || | |
819 | (!is_si_special(info) && SI_FROMUSER(info)); | |
820 | } | |
821 | ||
39fd3393 SH |
822 | /* |
823 | * called with RCU read lock from check_kill_permission() | |
824 | */ | |
2a9b9094 | 825 | static bool kill_ok_by_cred(struct task_struct *t) |
39fd3393 SH |
826 | { |
827 | const struct cred *cred = current_cred(); | |
828 | const struct cred *tcred = __task_cred(t); | |
829 | ||
2a9b9094 CB |
830 | return uid_eq(cred->euid, tcred->suid) || |
831 | uid_eq(cred->euid, tcred->uid) || | |
832 | uid_eq(cred->uid, tcred->suid) || | |
833 | uid_eq(cred->uid, tcred->uid) || | |
834 | ns_capable(tcred->user_ns, CAP_KILL); | |
39fd3393 SH |
835 | } |
836 | ||
1da177e4 LT |
837 | /* |
838 | * Bad permissions for sending the signal | |
694f690d | 839 | * - the caller must hold the RCU read lock |
1da177e4 | 840 | */ |
ae7795bc | 841 | static int check_kill_permission(int sig, struct kernel_siginfo *info, |
1da177e4 LT |
842 | struct task_struct *t) |
843 | { | |
2e2ba22e | 844 | struct pid *sid; |
3b5e9e53 ON |
845 | int error; |
846 | ||
7ed20e1a | 847 | if (!valid_signal(sig)) |
3b5e9e53 ON |
848 | return -EINVAL; |
849 | ||
614c517d | 850 | if (!si_fromuser(info)) |
3b5e9e53 | 851 | return 0; |
e54dc243 | 852 | |
3b5e9e53 ON |
853 | error = audit_signal_info(sig, t); /* Let audit system see the signal */ |
854 | if (error) | |
1da177e4 | 855 | return error; |
3b5e9e53 | 856 | |
065add39 | 857 | if (!same_thread_group(current, t) && |
39fd3393 | 858 | !kill_ok_by_cred(t)) { |
2e2ba22e ON |
859 | switch (sig) { |
860 | case SIGCONT: | |
2e2ba22e | 861 | sid = task_session(t); |
2e2ba22e ON |
862 | /* |
863 | * We don't return the error if sid == NULL. The | |
864 | * task was unhashed, the caller must notice this. | |
865 | */ | |
866 | if (!sid || sid == task_session(current)) | |
867 | break; | |
df561f66 | 868 | fallthrough; |
2e2ba22e ON |
869 | default: |
870 | return -EPERM; | |
871 | } | |
872 | } | |
c2f0c7c3 | 873 | |
6b4f3d01 | 874 | return security_task_kill(t, info, sig, NULL); |
1da177e4 LT |
875 | } |
876 | ||
fb1d910c TH |
877 | /** |
878 | * ptrace_trap_notify - schedule trap to notify ptracer | |
879 | * @t: tracee wanting to notify tracer | |
880 | * | |
881 | * This function schedules sticky ptrace trap which is cleared on the next | |
882 | * TRAP_STOP to notify ptracer of an event. @t must have been seized by | |
883 | * ptracer. | |
884 | * | |
544b2c91 TH |
885 | * If @t is running, STOP trap will be taken. If trapped for STOP and |
886 | * ptracer is listening for events, tracee is woken up so that it can | |
887 | * re-trap for the new event. If trapped otherwise, STOP trap will be | |
888 | * eventually taken without returning to userland after the existing traps | |
889 | * are finished by PTRACE_CONT. | |
fb1d910c TH |
890 | * |
891 | * CONTEXT: | |
892 | * Must be called with @task->sighand->siglock held. | |
893 | */ | |
894 | static void ptrace_trap_notify(struct task_struct *t) | |
895 | { | |
896 | WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); | |
897 | assert_spin_locked(&t->sighand->siglock); | |
898 | ||
899 | task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); | |
910ffdb1 | 900 | ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); |
fb1d910c TH |
901 | } |
902 | ||
1da177e4 | 903 | /* |
7e695a5e ON |
904 | * Handle magic process-wide effects of stop/continue signals. Unlike |
905 | * the signal actions, these happen immediately at signal-generation | |
1da177e4 LT |
906 | * time regardless of blocking, ignoring, or handling. This does the |
907 | * actual continuing for SIGCONT, but not the actual stopping for stop | |
7e695a5e ON |
908 | * signals. The process stop is done as a signal action for SIG_DFL. |
909 | * | |
910 | * Returns true if the signal should be actually delivered, otherwise | |
911 | * it should be dropped. | |
1da177e4 | 912 | */ |
403bad72 | 913 | static bool prepare_signal(int sig, struct task_struct *p, bool force) |
1da177e4 | 914 | { |
ad16a460 | 915 | struct signal_struct *signal = p->signal; |
1da177e4 | 916 | struct task_struct *t; |
9490592f | 917 | sigset_t flush; |
1da177e4 | 918 | |
403bad72 | 919 | if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) { |
5fa534c9 | 920 | if (!(signal->flags & SIGNAL_GROUP_EXIT)) |
403bad72 | 921 | return sig == SIGKILL; |
1da177e4 | 922 | /* |
7e695a5e | 923 | * The process is in the middle of dying, nothing to do. |
1da177e4 | 924 | */ |
7e695a5e | 925 | } else if (sig_kernel_stop(sig)) { |
1da177e4 LT |
926 | /* |
927 | * This is a stop signal. Remove SIGCONT from all queues. | |
928 | */ | |
9490592f | 929 | siginitset(&flush, sigmask(SIGCONT)); |
c09c1441 | 930 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f | 931 | for_each_thread(p, t) |
c09c1441 | 932 | flush_sigqueue_mask(&flush, &t->pending); |
1da177e4 | 933 | } else if (sig == SIGCONT) { |
fc321d2e | 934 | unsigned int why; |
1da177e4 | 935 | /* |
1deac632 | 936 | * Remove all stop signals from all queues, wake all threads. |
1da177e4 | 937 | */ |
9490592f | 938 | siginitset(&flush, SIG_KERNEL_STOP_MASK); |
c09c1441 | 939 | flush_sigqueue_mask(&flush, &signal->shared_pending); |
9490592f | 940 | for_each_thread(p, t) { |
c09c1441 | 941 | flush_sigqueue_mask(&flush, &t->pending); |
3759a0d9 | 942 | task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); |
fb1d910c TH |
943 | if (likely(!(t->ptrace & PT_SEIZED))) |
944 | wake_up_state(t, __TASK_STOPPED); | |
945 | else | |
946 | ptrace_trap_notify(t); | |
9490592f | 947 | } |
1da177e4 | 948 | |
fc321d2e ON |
949 | /* |
950 | * Notify the parent with CLD_CONTINUED if we were stopped. | |
951 | * | |
952 | * If we were in the middle of a group stop, we pretend it | |
953 | * was already finished, and then continued. Since SIGCHLD | |
954 | * doesn't queue we report only CLD_STOPPED, as if the next | |
955 | * CLD_CONTINUED was dropped. | |
956 | */ | |
957 | why = 0; | |
ad16a460 | 958 | if (signal->flags & SIGNAL_STOP_STOPPED) |
fc321d2e | 959 | why |= SIGNAL_CLD_CONTINUED; |
ad16a460 | 960 | else if (signal->group_stop_count) |
fc321d2e ON |
961 | why |= SIGNAL_CLD_STOPPED; |
962 | ||
963 | if (why) { | |
021e1ae3 | 964 | /* |
ae6d2ed7 | 965 | * The first thread which returns from do_signal_stop() |
021e1ae3 | 966 | * will take ->siglock, notice SIGNAL_CLD_MASK, and |
2e58f57d | 967 | * notify its parent. See get_signal(). |
021e1ae3 | 968 | */ |
2d39b3cd | 969 | signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED); |
ad16a460 ON |
970 | signal->group_stop_count = 0; |
971 | signal->group_exit_code = 0; | |
1da177e4 | 972 | } |
1da177e4 | 973 | } |
7e695a5e | 974 | |
def8cf72 | 975 | return !sig_ignored(p, sig, force); |
1da177e4 LT |
976 | } |
977 | ||
71f11dc0 ON |
978 | /* |
979 | * Test if P wants to take SIG. After we've checked all threads with this, | |
980 | * it's equivalent to finding no threads not blocking SIG. Any threads not | |
981 | * blocking SIG were ruled out because they are not running and already | |
982 | * have pending signals. Such threads will dequeue from the shared queue | |
983 | * as soon as they're available, so putting the signal on the shared queue | |
984 | * will be equivalent to sending it to one such thread. | |
985 | */ | |
acd14e62 | 986 | static inline bool wants_signal(int sig, struct task_struct *p) |
71f11dc0 ON |
987 | { |
988 | if (sigismember(&p->blocked, sig)) | |
acd14e62 CB |
989 | return false; |
990 | ||
71f11dc0 | 991 | if (p->flags & PF_EXITING) |
acd14e62 CB |
992 | return false; |
993 | ||
71f11dc0 | 994 | if (sig == SIGKILL) |
acd14e62 CB |
995 | return true; |
996 | ||
71f11dc0 | 997 | if (task_is_stopped_or_traced(p)) |
acd14e62 CB |
998 | return false; |
999 | ||
5c251e9d | 1000 | return task_curr(p) || !task_sigpending(p); |
71f11dc0 ON |
1001 | } |
1002 | ||
07296149 | 1003 | static void complete_signal(int sig, struct task_struct *p, enum pid_type type) |
71f11dc0 ON |
1004 | { |
1005 | struct signal_struct *signal = p->signal; | |
1006 | struct task_struct *t; | |
1007 | ||
1008 | /* | |
1009 | * Now find a thread we can wake up to take the signal off the queue. | |
1010 | * | |
1011 | * If the main thread wants the signal, it gets first crack. | |
1012 | * Probably the least surprising to the average bear. | |
1013 | */ | |
1014 | if (wants_signal(sig, p)) | |
1015 | t = p; | |
07296149 | 1016 | else if ((type == PIDTYPE_PID) || thread_group_empty(p)) |
71f11dc0 ON |
1017 | /* |
1018 | * There is just one thread and it does not need to be woken. | |
1019 | * It will dequeue unblocked signals before it runs again. | |
1020 | */ | |
1021 | return; | |
1022 | else { | |
1023 | /* | |
1024 | * Otherwise try to find a suitable thread. | |
1025 | */ | |
1026 | t = signal->curr_target; | |
1027 | while (!wants_signal(sig, t)) { | |
1028 | t = next_thread(t); | |
1029 | if (t == signal->curr_target) | |
1030 | /* | |
1031 | * No thread needs to be woken. | |
1032 | * Any eligible threads will see | |
1033 | * the signal in the queue soon. | |
1034 | */ | |
1035 | return; | |
1036 | } | |
1037 | signal->curr_target = t; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Found a killable thread. If the signal will be fatal, | |
1042 | * then start taking the whole group down immediately. | |
1043 | */ | |
fae5fa44 | 1044 | if (sig_fatal(p, sig) && |
42691579 | 1045 | !(signal->flags & SIGNAL_GROUP_EXIT) && |
71f11dc0 | 1046 | !sigismember(&t->real_blocked, sig) && |
42691579 | 1047 | (sig == SIGKILL || !p->ptrace)) { |
71f11dc0 ON |
1048 | /* |
1049 | * This signal will be fatal to the whole group. | |
1050 | */ | |
1051 | if (!sig_kernel_coredump(sig)) { | |
1052 | /* | |
1053 | * Start a group exit and wake everybody up. | |
1054 | * This way we don't have other threads | |
1055 | * running and doing things after a slower | |
1056 | * thread has the fatal signal pending. | |
1057 | */ | |
1058 | signal->flags = SIGNAL_GROUP_EXIT; | |
1059 | signal->group_exit_code = sig; | |
1060 | signal->group_stop_count = 0; | |
1061 | t = p; | |
1062 | do { | |
6dfca329 | 1063 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
71f11dc0 ON |
1064 | sigaddset(&t->pending.signal, SIGKILL); |
1065 | signal_wake_up(t, 1); | |
1066 | } while_each_thread(p, t); | |
1067 | return; | |
1068 | } | |
1069 | } | |
1070 | ||
1071 | /* | |
1072 | * The signal is already in the shared-pending queue. | |
1073 | * Tell the chosen thread to wake up and dequeue it. | |
1074 | */ | |
1075 | signal_wake_up(t, sig == SIGKILL); | |
1076 | return; | |
1077 | } | |
1078 | ||
a19e2c01 | 1079 | static inline bool legacy_queue(struct sigpending *signals, int sig) |
af7fff9c PE |
1080 | { |
1081 | return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); | |
1082 | } | |
1083 | ||
ae7795bc | 1084 | static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, |
8ad23dea | 1085 | enum pid_type type, bool force) |
1da177e4 | 1086 | { |
2ca3515a | 1087 | struct sigpending *pending; |
6e65acba | 1088 | struct sigqueue *q; |
7a0aeb14 | 1089 | int override_rlimit; |
6c303d3a | 1090 | int ret = 0, result; |
0a16b607 | 1091 | |
6e65acba | 1092 | assert_spin_locked(&t->sighand->siglock); |
921cf9f6 | 1093 | |
6c303d3a | 1094 | result = TRACE_SIGNAL_IGNORED; |
8ad23dea | 1095 | if (!prepare_signal(sig, t, force)) |
6c303d3a | 1096 | goto ret; |
2ca3515a | 1097 | |
5a883cee | 1098 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
2acb024d PE |
1099 | /* |
1100 | * Short-circuit ignored signals and support queuing | |
1101 | * exactly one non-rt signal, so that we can get more | |
1102 | * detailed information about the cause of the signal. | |
1103 | */ | |
6c303d3a | 1104 | result = TRACE_SIGNAL_ALREADY_PENDING; |
7e695a5e | 1105 | if (legacy_queue(pending, sig)) |
6c303d3a ON |
1106 | goto ret; |
1107 | ||
1108 | result = TRACE_SIGNAL_DELIVERED; | |
1da177e4 | 1109 | /* |
a692933a | 1110 | * Skip useless siginfo allocation for SIGKILL and kernel threads. |
1da177e4 | 1111 | */ |
e8b33b8c | 1112 | if ((sig == SIGKILL) || (t->flags & PF_KTHREAD)) |
1da177e4 LT |
1113 | goto out_set; |
1114 | ||
5aba085e RD |
1115 | /* |
1116 | * Real-time signals must be queued if sent by sigqueue, or | |
1117 | * some other real-time mechanism. It is implementation | |
1118 | * defined whether kill() does so. We attempt to do so, on | |
1119 | * the principle of least surprise, but since kill is not | |
1120 | * allowed to fail with EAGAIN when low on memory we just | |
1121 | * make sure at least one signal gets delivered and don't | |
1122 | * pass on the info struct. | |
1123 | */ | |
7a0aeb14 VN |
1124 | if (sig < SIGRTMIN) |
1125 | override_rlimit = (is_si_special(info) || info->si_code >= 0); | |
1126 | else | |
1127 | override_rlimit = 0; | |
1128 | ||
69995ebb TG |
1129 | q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit, 0); |
1130 | ||
1da177e4 | 1131 | if (q) { |
2ca3515a | 1132 | list_add_tail(&q->list, &pending->list); |
1da177e4 | 1133 | switch ((unsigned long) info) { |
b67a1b9e | 1134 | case (unsigned long) SEND_SIG_NOINFO: |
faf1f22b | 1135 | clear_siginfo(&q->info); |
1da177e4 LT |
1136 | q->info.si_signo = sig; |
1137 | q->info.si_errno = 0; | |
1138 | q->info.si_code = SI_USER; | |
9cd4fd10 | 1139 | q->info.si_pid = task_tgid_nr_ns(current, |
09bca05c | 1140 | task_active_pid_ns(t)); |
7a0cf094 EB |
1141 | rcu_read_lock(); |
1142 | q->info.si_uid = | |
1143 | from_kuid_munged(task_cred_xxx(t, user_ns), | |
1144 | current_uid()); | |
1145 | rcu_read_unlock(); | |
1da177e4 | 1146 | break; |
b67a1b9e | 1147 | case (unsigned long) SEND_SIG_PRIV: |
faf1f22b | 1148 | clear_siginfo(&q->info); |
1da177e4 LT |
1149 | q->info.si_signo = sig; |
1150 | q->info.si_errno = 0; | |
1151 | q->info.si_code = SI_KERNEL; | |
1152 | q->info.si_pid = 0; | |
1153 | q->info.si_uid = 0; | |
1154 | break; | |
1155 | default: | |
1156 | copy_siginfo(&q->info, info); | |
1157 | break; | |
1158 | } | |
8917bef3 EB |
1159 | } else if (!is_si_special(info) && |
1160 | sig >= SIGRTMIN && info->si_code != SI_USER) { | |
1161 | /* | |
1162 | * Queue overflow, abort. We may abort if the | |
1163 | * signal was rt and sent by user using something | |
1164 | * other than kill(). | |
1165 | */ | |
1166 | result = TRACE_SIGNAL_OVERFLOW_FAIL; | |
1167 | ret = -EAGAIN; | |
1168 | goto ret; | |
1169 | } else { | |
1170 | /* | |
1171 | * This is a silent loss of information. We still | |
1172 | * send the signal, but the *info bits are lost. | |
1173 | */ | |
1174 | result = TRACE_SIGNAL_LOSE_INFO; | |
1da177e4 LT |
1175 | } |
1176 | ||
1177 | out_set: | |
53c30337 | 1178 | signalfd_notify(t, sig); |
2ca3515a | 1179 | sigaddset(&pending->signal, sig); |
c3ad2c3b EB |
1180 | |
1181 | /* Let multiprocess signals appear after on-going forks */ | |
1182 | if (type > PIDTYPE_TGID) { | |
1183 | struct multiprocess_signals *delayed; | |
1184 | hlist_for_each_entry(delayed, &t->signal->multiprocess, node) { | |
1185 | sigset_t *signal = &delayed->signal; | |
1186 | /* Can't queue both a stop and a continue signal */ | |
1187 | if (sig == SIGCONT) | |
1188 | sigdelsetmask(signal, SIG_KERNEL_STOP_MASK); | |
1189 | else if (sig_kernel_stop(sig)) | |
1190 | sigdelset(signal, SIGCONT); | |
1191 | sigaddset(signal, sig); | |
1192 | } | |
1193 | } | |
1194 | ||
07296149 | 1195 | complete_signal(sig, t, type); |
6c303d3a | 1196 | ret: |
5a883cee | 1197 | trace_signal_generate(sig, info, t, type != PIDTYPE_PID, result); |
6c303d3a | 1198 | return ret; |
1da177e4 LT |
1199 | } |
1200 | ||
7a0cf094 EB |
1201 | static inline bool has_si_pid_and_uid(struct kernel_siginfo *info) |
1202 | { | |
1203 | bool ret = false; | |
1204 | switch (siginfo_layout(info->si_signo, info->si_code)) { | |
1205 | case SIL_KILL: | |
1206 | case SIL_CHLD: | |
1207 | case SIL_RT: | |
1208 | ret = true; | |
1209 | break; | |
1210 | case SIL_TIMER: | |
1211 | case SIL_POLL: | |
1212 | case SIL_FAULT: | |
9abcabe3 | 1213 | case SIL_FAULT_TRAPNO: |
7a0cf094 EB |
1214 | case SIL_FAULT_MCEERR: |
1215 | case SIL_FAULT_BNDERR: | |
1216 | case SIL_FAULT_PKUERR: | |
f4ac7302 | 1217 | case SIL_FAULT_PERF_EVENT: |
7a0cf094 EB |
1218 | case SIL_SYS: |
1219 | ret = false; | |
1220 | break; | |
1221 | } | |
1222 | return ret; | |
1223 | } | |
1224 | ||
ae7795bc | 1225 | static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, |
b213984b | 1226 | enum pid_type type) |
7978b567 | 1227 | { |
8ad23dea EB |
1228 | /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ |
1229 | bool force = false; | |
921cf9f6 | 1230 | |
8ad23dea EB |
1231 | if (info == SEND_SIG_NOINFO) { |
1232 | /* Force if sent from an ancestor pid namespace */ | |
1233 | force = !task_pid_nr_ns(current, task_active_pid_ns(t)); | |
1234 | } else if (info == SEND_SIG_PRIV) { | |
1235 | /* Don't ignore kernel generated signals */ | |
1236 | force = true; | |
1237 | } else if (has_si_pid_and_uid(info)) { | |
1238 | /* SIGKILL and SIGSTOP is special or has ids */ | |
7a0cf094 EB |
1239 | struct user_namespace *t_user_ns; |
1240 | ||
1241 | rcu_read_lock(); | |
1242 | t_user_ns = task_cred_xxx(t, user_ns); | |
1243 | if (current_user_ns() != t_user_ns) { | |
1244 | kuid_t uid = make_kuid(current_user_ns(), info->si_uid); | |
1245 | info->si_uid = from_kuid_munged(t_user_ns, uid); | |
1246 | } | |
1247 | rcu_read_unlock(); | |
921cf9f6 | 1248 | |
8ad23dea EB |
1249 | /* A kernel generated signal? */ |
1250 | force = (info->si_code == SI_KERNEL); | |
1251 | ||
1252 | /* From an ancestor pid namespace? */ | |
1253 | if (!task_pid_nr_ns(current, task_active_pid_ns(t))) { | |
7a0cf094 | 1254 | info->si_pid = 0; |
8ad23dea EB |
1255 | force = true; |
1256 | } | |
7a0cf094 | 1257 | } |
8ad23dea | 1258 | return __send_signal(sig, info, t, type, force); |
7978b567 SB |
1259 | } |
1260 | ||
4aaefee5 | 1261 | static void print_fatal_signal(int signr) |
45807a1d | 1262 | { |
4aaefee5 | 1263 | struct pt_regs *regs = signal_pt_regs(); |
747800ef | 1264 | pr_info("potentially unexpected fatal signal %d.\n", signr); |
45807a1d | 1265 | |
ca5cd877 | 1266 | #if defined(__i386__) && !defined(__arch_um__) |
747800ef | 1267 | pr_info("code at %08lx: ", regs->ip); |
45807a1d IM |
1268 | { |
1269 | int i; | |
1270 | for (i = 0; i < 16; i++) { | |
1271 | unsigned char insn; | |
1272 | ||
b45c6e76 AK |
1273 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
1274 | break; | |
747800ef | 1275 | pr_cont("%02x ", insn); |
45807a1d IM |
1276 | } |
1277 | } | |
747800ef | 1278 | pr_cont("\n"); |
45807a1d | 1279 | #endif |
3a9f84d3 | 1280 | preempt_disable(); |
45807a1d | 1281 | show_regs(regs); |
3a9f84d3 | 1282 | preempt_enable(); |
45807a1d IM |
1283 | } |
1284 | ||
1285 | static int __init setup_print_fatal_signals(char *str) | |
1286 | { | |
1287 | get_option (&str, &print_fatal_signals); | |
1288 | ||
1289 | return 1; | |
1290 | } | |
1291 | ||
1292 | __setup("print-fatal-signals=", setup_print_fatal_signals); | |
1da177e4 | 1293 | |
4cd4b6d4 | 1294 | int |
ae7795bc | 1295 | __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) |
4cd4b6d4 | 1296 | { |
b213984b | 1297 | return send_signal(sig, info, p, PIDTYPE_TGID); |
4cd4b6d4 PE |
1298 | } |
1299 | ||
ae7795bc | 1300 | int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, |
40b3b025 | 1301 | enum pid_type type) |
4a30debf ON |
1302 | { |
1303 | unsigned long flags; | |
1304 | int ret = -ESRCH; | |
1305 | ||
1306 | if (lock_task_sighand(p, &flags)) { | |
b213984b | 1307 | ret = send_signal(sig, info, p, type); |
4a30debf ON |
1308 | unlock_task_sighand(p, &flags); |
1309 | } | |
1310 | ||
1311 | return ret; | |
1312 | } | |
1313 | ||
1da177e4 LT |
1314 | /* |
1315 | * Force a signal that the process can't ignore: if necessary | |
1316 | * we unblock the signal and change any SIG_IGN to SIG_DFL. | |
ae74c3b6 LT |
1317 | * |
1318 | * Note: If we unblock the signal, we always reset it to SIG_DFL, | |
1319 | * since we do not want to have a signal handler that was blocked | |
1320 | * be invoked when user space had explicitly blocked it. | |
1321 | * | |
80fe728d ON |
1322 | * We don't want to have recursive SIGSEGV's etc, for example, |
1323 | * that is why we also clear SIGNAL_UNKILLABLE. | |
1da177e4 | 1324 | */ |
59c0e696 | 1325 | static int |
307d522f | 1326 | force_sig_info_to_task(struct kernel_siginfo *info, struct task_struct *t, bool sigdfl) |
1da177e4 LT |
1327 | { |
1328 | unsigned long int flags; | |
ae74c3b6 LT |
1329 | int ret, blocked, ignored; |
1330 | struct k_sigaction *action; | |
59c0e696 | 1331 | int sig = info->si_signo; |
1da177e4 LT |
1332 | |
1333 | spin_lock_irqsave(&t->sighand->siglock, flags); | |
ae74c3b6 LT |
1334 | action = &t->sighand->action[sig-1]; |
1335 | ignored = action->sa.sa_handler == SIG_IGN; | |
1336 | blocked = sigismember(&t->blocked, sig); | |
307d522f | 1337 | if (blocked || ignored || sigdfl) { |
ae74c3b6 LT |
1338 | action->sa.sa_handler = SIG_DFL; |
1339 | if (blocked) { | |
1340 | sigdelset(&t->blocked, sig); | |
7bb44ade | 1341 | recalc_sigpending_and_wake(t); |
ae74c3b6 | 1342 | } |
1da177e4 | 1343 | } |
eb61b591 JI |
1344 | /* |
1345 | * Don't clear SIGNAL_UNKILLABLE for traced tasks, users won't expect | |
1346 | * debugging to leave init killable. | |
1347 | */ | |
1348 | if (action->sa.sa_handler == SIG_DFL && !t->ptrace) | |
80fe728d | 1349 | t->signal->flags &= ~SIGNAL_UNKILLABLE; |
b21c5bd5 | 1350 | ret = send_signal(sig, info, t, PIDTYPE_PID); |
1da177e4 LT |
1351 | spin_unlock_irqrestore(&t->sighand->siglock, flags); |
1352 | ||
1353 | return ret; | |
1354 | } | |
1355 | ||
a89e9b8a | 1356 | int force_sig_info(struct kernel_siginfo *info) |
59c0e696 | 1357 | { |
307d522f | 1358 | return force_sig_info_to_task(info, current, false); |
59c0e696 EB |
1359 | } |
1360 | ||
1da177e4 LT |
1361 | /* |
1362 | * Nuke all other threads in the group. | |
1363 | */ | |
09faef11 | 1364 | int zap_other_threads(struct task_struct *p) |
1da177e4 | 1365 | { |
09faef11 ON |
1366 | struct task_struct *t = p; |
1367 | int count = 0; | |
1da177e4 | 1368 | |
1da177e4 LT |
1369 | p->signal->group_stop_count = 0; |
1370 | ||
09faef11 | 1371 | while_each_thread(p, t) { |
6dfca329 | 1372 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); |
09faef11 ON |
1373 | count++; |
1374 | ||
1375 | /* Don't bother with already dead threads */ | |
1da177e4 LT |
1376 | if (t->exit_state) |
1377 | continue; | |
1da177e4 | 1378 | sigaddset(&t->pending.signal, SIGKILL); |
1da177e4 LT |
1379 | signal_wake_up(t, 1); |
1380 | } | |
09faef11 ON |
1381 | |
1382 | return count; | |
1da177e4 LT |
1383 | } |
1384 | ||
b8ed374e NK |
1385 | struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, |
1386 | unsigned long *flags) | |
f63ee72e ON |
1387 | { |
1388 | struct sighand_struct *sighand; | |
1389 | ||
59dc6f3c | 1390 | rcu_read_lock(); |
f63ee72e ON |
1391 | for (;;) { |
1392 | sighand = rcu_dereference(tsk->sighand); | |
59dc6f3c | 1393 | if (unlikely(sighand == NULL)) |
f63ee72e | 1394 | break; |
59dc6f3c | 1395 | |
392809b2 ON |
1396 | /* |
1397 | * This sighand can be already freed and even reused, but | |
5f0d5a3a | 1398 | * we rely on SLAB_TYPESAFE_BY_RCU and sighand_ctor() which |
392809b2 ON |
1399 | * initializes ->siglock: this slab can't go away, it has |
1400 | * the same object type, ->siglock can't be reinitialized. | |
1401 | * | |
1402 | * We need to ensure that tsk->sighand is still the same | |
1403 | * after we take the lock, we can race with de_thread() or | |
1404 | * __exit_signal(). In the latter case the next iteration | |
1405 | * must see ->sighand == NULL. | |
1406 | */ | |
59dc6f3c | 1407 | spin_lock_irqsave(&sighand->siglock, *flags); |
913292c9 | 1408 | if (likely(sighand == rcu_access_pointer(tsk->sighand))) |
f63ee72e | 1409 | break; |
59dc6f3c | 1410 | spin_unlock_irqrestore(&sighand->siglock, *flags); |
f63ee72e | 1411 | } |
59dc6f3c | 1412 | rcu_read_unlock(); |
f63ee72e ON |
1413 | |
1414 | return sighand; | |
1415 | } | |
1416 | ||
a5dec9f8 FW |
1417 | #ifdef CONFIG_LOCKDEP |
1418 | void lockdep_assert_task_sighand_held(struct task_struct *task) | |
1419 | { | |
1420 | struct sighand_struct *sighand; | |
1421 | ||
1422 | rcu_read_lock(); | |
1423 | sighand = rcu_dereference(task->sighand); | |
1424 | if (sighand) | |
1425 | lockdep_assert_held(&sighand->siglock); | |
1426 | else | |
1427 | WARN_ON_ONCE(1); | |
1428 | rcu_read_unlock(); | |
1429 | } | |
1430 | #endif | |
1431 | ||
c69e8d9c DH |
1432 | /* |
1433 | * send signal info to all the members of a group | |
c69e8d9c | 1434 | */ |
ae7795bc EB |
1435 | int group_send_sig_info(int sig, struct kernel_siginfo *info, |
1436 | struct task_struct *p, enum pid_type type) | |
1da177e4 | 1437 | { |
694f690d DH |
1438 | int ret; |
1439 | ||
1440 | rcu_read_lock(); | |
1441 | ret = check_kill_permission(sig, info, p); | |
1442 | rcu_read_unlock(); | |
f63ee72e | 1443 | |
4a30debf | 1444 | if (!ret && sig) |
40b3b025 | 1445 | ret = do_send_sig_info(sig, info, p, type); |
1da177e4 LT |
1446 | |
1447 | return ret; | |
1448 | } | |
1449 | ||
1450 | /* | |
146a505d | 1451 | * __kill_pgrp_info() sends a signal to a process group: this is what the tty |
1da177e4 | 1452 | * control characters do (^C, ^Z etc) |
c69e8d9c | 1453 | * - the caller must hold at least a readlock on tasklist_lock |
1da177e4 | 1454 | */ |
ae7795bc | 1455 | int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp) |
1da177e4 LT |
1456 | { |
1457 | struct task_struct *p = NULL; | |
1458 | int retval, success; | |
1459 | ||
1da177e4 LT |
1460 | success = 0; |
1461 | retval = -ESRCH; | |
c4b92fc1 | 1462 | do_each_pid_task(pgrp, PIDTYPE_PGID, p) { |
01024980 | 1463 | int err = group_send_sig_info(sig, info, p, PIDTYPE_PGID); |
1da177e4 LT |
1464 | success |= !err; |
1465 | retval = err; | |
c4b92fc1 | 1466 | } while_each_pid_task(pgrp, PIDTYPE_PGID, p); |
1da177e4 LT |
1467 | return success ? 0 : retval; |
1468 | } | |
1469 | ||
ae7795bc | 1470 | int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid) |
1da177e4 | 1471 | { |
d36174bc | 1472 | int error = -ESRCH; |
1da177e4 LT |
1473 | struct task_struct *p; |
1474 | ||
eca1a089 PM |
1475 | for (;;) { |
1476 | rcu_read_lock(); | |
1477 | p = pid_task(pid, PIDTYPE_PID); | |
1478 | if (p) | |
01024980 | 1479 | error = group_send_sig_info(sig, info, p, PIDTYPE_TGID); |
eca1a089 PM |
1480 | rcu_read_unlock(); |
1481 | if (likely(!p || error != -ESRCH)) | |
1482 | return error; | |
6ca25b55 | 1483 | |
eca1a089 PM |
1484 | /* |
1485 | * The task was unhashed in between, try again. If it | |
1486 | * is dead, pid_task() will return NULL, if we race with | |
1487 | * de_thread() it will find the new leader. | |
1488 | */ | |
1489 | } | |
1da177e4 LT |
1490 | } |
1491 | ||
ae7795bc | 1492 | static int kill_proc_info(int sig, struct kernel_siginfo *info, pid_t pid) |
c4b92fc1 EB |
1493 | { |
1494 | int error; | |
1495 | rcu_read_lock(); | |
b488893a | 1496 | error = kill_pid_info(sig, info, find_vpid(pid)); |
c4b92fc1 EB |
1497 | rcu_read_unlock(); |
1498 | return error; | |
1499 | } | |
1500 | ||
bb17fcca CB |
1501 | static inline bool kill_as_cred_perm(const struct cred *cred, |
1502 | struct task_struct *target) | |
d178bc3a SH |
1503 | { |
1504 | const struct cred *pcred = __task_cred(target); | |
bb17fcca CB |
1505 | |
1506 | return uid_eq(cred->euid, pcred->suid) || | |
1507 | uid_eq(cred->euid, pcred->uid) || | |
1508 | uid_eq(cred->uid, pcred->suid) || | |
1509 | uid_eq(cred->uid, pcred->uid); | |
d178bc3a SH |
1510 | } |
1511 | ||
70f1b0d3 EB |
1512 | /* |
1513 | * The usb asyncio usage of siginfo is wrong. The glibc support | |
1514 | * for asyncio which uses SI_ASYNCIO assumes the layout is SIL_RT. | |
1515 | * AKA after the generic fields: | |
1516 | * kernel_pid_t si_pid; | |
1517 | * kernel_uid32_t si_uid; | |
1518 | * sigval_t si_value; | |
1519 | * | |
1520 | * Unfortunately when usb generates SI_ASYNCIO it assumes the layout | |
1521 | * after the generic fields is: | |
1522 | * void __user *si_addr; | |
1523 | * | |
1524 | * This is a practical problem when there is a 64bit big endian kernel | |
1525 | * and a 32bit userspace. As the 32bit address will encoded in the low | |
1526 | * 32bits of the pointer. Those low 32bits will be stored at higher | |
1527 | * address than appear in a 32 bit pointer. So userspace will not | |
1528 | * see the address it was expecting for it's completions. | |
1529 | * | |
1530 | * There is nothing in the encoding that can allow | |
1531 | * copy_siginfo_to_user32 to detect this confusion of formats, so | |
1532 | * handle this by requiring the caller of kill_pid_usb_asyncio to | |
1533 | * notice when this situration takes place and to store the 32bit | |
1534 | * pointer in sival_int, instead of sival_addr of the sigval_t addr | |
1535 | * parameter. | |
1536 | */ | |
1537 | int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, | |
1538 | struct pid *pid, const struct cred *cred) | |
46113830 | 1539 | { |
70f1b0d3 | 1540 | struct kernel_siginfo info; |
46113830 | 1541 | struct task_struct *p; |
14d8c9f3 | 1542 | unsigned long flags; |
70f1b0d3 EB |
1543 | int ret = -EINVAL; |
1544 | ||
eaec2b0b ZL |
1545 | if (!valid_signal(sig)) |
1546 | return ret; | |
1547 | ||
70f1b0d3 EB |
1548 | clear_siginfo(&info); |
1549 | info.si_signo = sig; | |
1550 | info.si_errno = errno; | |
1551 | info.si_code = SI_ASYNCIO; | |
1552 | *((sigval_t *)&info.si_pid) = addr; | |
46113830 | 1553 | |
14d8c9f3 | 1554 | rcu_read_lock(); |
2425c08b | 1555 | p = pid_task(pid, PIDTYPE_PID); |
46113830 HW |
1556 | if (!p) { |
1557 | ret = -ESRCH; | |
1558 | goto out_unlock; | |
1559 | } | |
70f1b0d3 | 1560 | if (!kill_as_cred_perm(cred, p)) { |
46113830 HW |
1561 | ret = -EPERM; |
1562 | goto out_unlock; | |
1563 | } | |
70f1b0d3 | 1564 | ret = security_task_kill(p, &info, sig, cred); |
8f95dc58 DQ |
1565 | if (ret) |
1566 | goto out_unlock; | |
14d8c9f3 TG |
1567 | |
1568 | if (sig) { | |
1569 | if (lock_task_sighand(p, &flags)) { | |
8ad23dea | 1570 | ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false); |
14d8c9f3 TG |
1571 | unlock_task_sighand(p, &flags); |
1572 | } else | |
1573 | ret = -ESRCH; | |
46113830 HW |
1574 | } |
1575 | out_unlock: | |
14d8c9f3 | 1576 | rcu_read_unlock(); |
46113830 HW |
1577 | return ret; |
1578 | } | |
70f1b0d3 | 1579 | EXPORT_SYMBOL_GPL(kill_pid_usb_asyncio); |
1da177e4 LT |
1580 | |
1581 | /* | |
1582 | * kill_something_info() interprets pid in interesting ways just like kill(2). | |
1583 | * | |
1584 | * POSIX specifies that kill(-1,sig) is unspecified, but what we have | |
1585 | * is probably wrong. Should make it like BSD or SYSV. | |
1586 | */ | |
1587 | ||
ae7795bc | 1588 | static int kill_something_info(int sig, struct kernel_siginfo *info, pid_t pid) |
1da177e4 | 1589 | { |
8d42db18 | 1590 | int ret; |
d5df763b | 1591 | |
3075afdf ZL |
1592 | if (pid > 0) |
1593 | return kill_proc_info(sig, info, pid); | |
d5df763b | 1594 | |
4ea77014 | 1595 | /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */ |
1596 | if (pid == INT_MIN) | |
1597 | return -ESRCH; | |
1598 | ||
d5df763b PE |
1599 | read_lock(&tasklist_lock); |
1600 | if (pid != -1) { | |
1601 | ret = __kill_pgrp_info(sig, info, | |
1602 | pid ? find_vpid(-pid) : task_pgrp(current)); | |
1603 | } else { | |
1da177e4 LT |
1604 | int retval = 0, count = 0; |
1605 | struct task_struct * p; | |
1606 | ||
1da177e4 | 1607 | for_each_process(p) { |
d25141a8 SB |
1608 | if (task_pid_vnr(p) > 1 && |
1609 | !same_thread_group(p, current)) { | |
01024980 EB |
1610 | int err = group_send_sig_info(sig, info, p, |
1611 | PIDTYPE_MAX); | |
1da177e4 LT |
1612 | ++count; |
1613 | if (err != -EPERM) | |
1614 | retval = err; | |
1615 | } | |
1616 | } | |
8d42db18 | 1617 | ret = count ? retval : -ESRCH; |
1da177e4 | 1618 | } |
d5df763b PE |
1619 | read_unlock(&tasklist_lock); |
1620 | ||
8d42db18 | 1621 | return ret; |
1da177e4 LT |
1622 | } |
1623 | ||
1624 | /* | |
1625 | * These are for backward compatibility with the rest of the kernel source. | |
1626 | */ | |
1627 | ||
ae7795bc | 1628 | int send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) |
1da177e4 | 1629 | { |
1da177e4 LT |
1630 | /* |
1631 | * Make sure legacy kernel users don't send in bad values | |
1632 | * (normal paths check this in check_kill_permission). | |
1633 | */ | |
7ed20e1a | 1634 | if (!valid_signal(sig)) |
1da177e4 LT |
1635 | return -EINVAL; |
1636 | ||
40b3b025 | 1637 | return do_send_sig_info(sig, info, p, PIDTYPE_PID); |
1da177e4 | 1638 | } |
fb50f5a4 | 1639 | EXPORT_SYMBOL(send_sig_info); |
1da177e4 | 1640 | |
b67a1b9e ON |
1641 | #define __si_special(priv) \ |
1642 | ((priv) ? SEND_SIG_PRIV : SEND_SIG_NOINFO) | |
1643 | ||
1da177e4 LT |
1644 | int |
1645 | send_sig(int sig, struct task_struct *p, int priv) | |
1646 | { | |
b67a1b9e | 1647 | return send_sig_info(sig, __si_special(priv), p); |
1da177e4 | 1648 | } |
fb50f5a4 | 1649 | EXPORT_SYMBOL(send_sig); |
1da177e4 | 1650 | |
3cf5d076 | 1651 | void force_sig(int sig) |
1da177e4 | 1652 | { |
ffafd23b EB |
1653 | struct kernel_siginfo info; |
1654 | ||
1655 | clear_siginfo(&info); | |
1656 | info.si_signo = sig; | |
1657 | info.si_errno = 0; | |
1658 | info.si_code = SI_KERNEL; | |
1659 | info.si_pid = 0; | |
1660 | info.si_uid = 0; | |
a89e9b8a | 1661 | force_sig_info(&info); |
1da177e4 | 1662 | } |
fb50f5a4 | 1663 | EXPORT_SYMBOL(force_sig); |
1da177e4 LT |
1664 | |
1665 | /* | |
1666 | * When things go south during signal handling, we | |
1667 | * will force a SIGSEGV. And if the signal that caused | |
1668 | * the problem was already a SIGSEGV, we'll want to | |
1669 | * make sure we don't even try to deliver the signal.. | |
1670 | */ | |
cb44c9a0 | 1671 | void force_sigsegv(int sig) |
1da177e4 | 1672 | { |
cb44c9a0 EB |
1673 | struct task_struct *p = current; |
1674 | ||
1da177e4 LT |
1675 | if (sig == SIGSEGV) { |
1676 | unsigned long flags; | |
1677 | spin_lock_irqsave(&p->sighand->siglock, flags); | |
1678 | p->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; | |
1679 | spin_unlock_irqrestore(&p->sighand->siglock, flags); | |
1680 | } | |
3cf5d076 | 1681 | force_sig(SIGSEGV); |
1da177e4 LT |
1682 | } |
1683 | ||
91ca180d | 1684 | int force_sig_fault_to_task(int sig, int code, void __user *addr |
f8ec6601 EB |
1685 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
1686 | , struct task_struct *t) | |
1687 | { | |
ae7795bc | 1688 | struct kernel_siginfo info; |
f8ec6601 EB |
1689 | |
1690 | clear_siginfo(&info); | |
1691 | info.si_signo = sig; | |
1692 | info.si_errno = 0; | |
1693 | info.si_code = code; | |
1694 | info.si_addr = addr; | |
f8ec6601 EB |
1695 | #ifdef __ia64__ |
1696 | info.si_imm = imm; | |
1697 | info.si_flags = flags; | |
1698 | info.si_isr = isr; | |
1699 | #endif | |
307d522f | 1700 | return force_sig_info_to_task(&info, t, false); |
f8ec6601 EB |
1701 | } |
1702 | ||
91ca180d | 1703 | int force_sig_fault(int sig, int code, void __user *addr |
2e1661d2 | 1704 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)) |
91ca180d EB |
1705 | { |
1706 | return force_sig_fault_to_task(sig, code, addr | |
2e1661d2 | 1707 | ___ARCH_SI_IA64(imm, flags, isr), current); |
f8ec6601 EB |
1708 | } |
1709 | ||
1710 | int send_sig_fault(int sig, int code, void __user *addr | |
f8ec6601 EB |
1711 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) |
1712 | , struct task_struct *t) | |
1713 | { | |
ae7795bc | 1714 | struct kernel_siginfo info; |
f8ec6601 EB |
1715 | |
1716 | clear_siginfo(&info); | |
1717 | info.si_signo = sig; | |
1718 | info.si_errno = 0; | |
1719 | info.si_code = code; | |
1720 | info.si_addr = addr; | |
f8ec6601 EB |
1721 | #ifdef __ia64__ |
1722 | info.si_imm = imm; | |
1723 | info.si_flags = flags; | |
1724 | info.si_isr = isr; | |
1725 | #endif | |
1726 | return send_sig_info(info.si_signo, &info, t); | |
1727 | } | |
1728 | ||
f8eac901 | 1729 | int force_sig_mceerr(int code, void __user *addr, short lsb) |
38246735 | 1730 | { |
ae7795bc | 1731 | struct kernel_siginfo info; |
38246735 EB |
1732 | |
1733 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); | |
1734 | clear_siginfo(&info); | |
1735 | info.si_signo = SIGBUS; | |
1736 | info.si_errno = 0; | |
1737 | info.si_code = code; | |
1738 | info.si_addr = addr; | |
1739 | info.si_addr_lsb = lsb; | |
a89e9b8a | 1740 | return force_sig_info(&info); |
38246735 EB |
1741 | } |
1742 | ||
1743 | int send_sig_mceerr(int code, void __user *addr, short lsb, struct task_struct *t) | |
1744 | { | |
ae7795bc | 1745 | struct kernel_siginfo info; |
38246735 EB |
1746 | |
1747 | WARN_ON((code != BUS_MCEERR_AO) && (code != BUS_MCEERR_AR)); | |
1748 | clear_siginfo(&info); | |
1749 | info.si_signo = SIGBUS; | |
1750 | info.si_errno = 0; | |
1751 | info.si_code = code; | |
1752 | info.si_addr = addr; | |
1753 | info.si_addr_lsb = lsb; | |
1754 | return send_sig_info(info.si_signo, &info, t); | |
1755 | } | |
1756 | EXPORT_SYMBOL(send_sig_mceerr); | |
38246735 | 1757 | |
38246735 EB |
1758 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper) |
1759 | { | |
ae7795bc | 1760 | struct kernel_siginfo info; |
38246735 EB |
1761 | |
1762 | clear_siginfo(&info); | |
1763 | info.si_signo = SIGSEGV; | |
1764 | info.si_errno = 0; | |
1765 | info.si_code = SEGV_BNDERR; | |
1766 | info.si_addr = addr; | |
1767 | info.si_lower = lower; | |
1768 | info.si_upper = upper; | |
a89e9b8a | 1769 | return force_sig_info(&info); |
38246735 | 1770 | } |
38246735 EB |
1771 | |
1772 | #ifdef SEGV_PKUERR | |
1773 | int force_sig_pkuerr(void __user *addr, u32 pkey) | |
1774 | { | |
ae7795bc | 1775 | struct kernel_siginfo info; |
38246735 EB |
1776 | |
1777 | clear_siginfo(&info); | |
1778 | info.si_signo = SIGSEGV; | |
1779 | info.si_errno = 0; | |
1780 | info.si_code = SEGV_PKUERR; | |
1781 | info.si_addr = addr; | |
1782 | info.si_pkey = pkey; | |
a89e9b8a | 1783 | return force_sig_info(&info); |
38246735 EB |
1784 | } |
1785 | #endif | |
f8ec6601 | 1786 | |
af5eeab7 EB |
1787 | int force_sig_perf(void __user *addr, u32 type, u64 sig_data) |
1788 | { | |
1789 | struct kernel_siginfo info; | |
1790 | ||
1791 | clear_siginfo(&info); | |
0683b531 EB |
1792 | info.si_signo = SIGTRAP; |
1793 | info.si_errno = 0; | |
1794 | info.si_code = TRAP_PERF; | |
1795 | info.si_addr = addr; | |
1796 | info.si_perf_data = sig_data; | |
1797 | info.si_perf_type = type; | |
1798 | ||
af5eeab7 EB |
1799 | return force_sig_info(&info); |
1800 | } | |
1801 | ||
307d522f EB |
1802 | /** |
1803 | * force_sig_seccomp - signals the task to allow in-process syscall emulation | |
1804 | * @syscall: syscall number to send to userland | |
1805 | * @reason: filter-supplied reason code to send to userland (via si_errno) | |
1806 | * | |
1807 | * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. | |
1808 | */ | |
1809 | int force_sig_seccomp(int syscall, int reason, bool force_coredump) | |
1810 | { | |
1811 | struct kernel_siginfo info; | |
1812 | ||
1813 | clear_siginfo(&info); | |
1814 | info.si_signo = SIGSYS; | |
1815 | info.si_code = SYS_SECCOMP; | |
1816 | info.si_call_addr = (void __user *)KSTK_EIP(current); | |
1817 | info.si_errno = reason; | |
1818 | info.si_arch = syscall_get_arch(current); | |
1819 | info.si_syscall = syscall; | |
1820 | return force_sig_info_to_task(&info, current, force_coredump); | |
1821 | } | |
1822 | ||
f71dd7dc EB |
1823 | /* For the crazy architectures that include trap information in |
1824 | * the errno field, instead of an actual errno value. | |
1825 | */ | |
1826 | int force_sig_ptrace_errno_trap(int errno, void __user *addr) | |
1827 | { | |
ae7795bc | 1828 | struct kernel_siginfo info; |
f71dd7dc EB |
1829 | |
1830 | clear_siginfo(&info); | |
1831 | info.si_signo = SIGTRAP; | |
1832 | info.si_errno = errno; | |
1833 | info.si_code = TRAP_HWBKPT; | |
1834 | info.si_addr = addr; | |
a89e9b8a | 1835 | return force_sig_info(&info); |
f71dd7dc EB |
1836 | } |
1837 | ||
2c9f7eaf EB |
1838 | /* For the rare architectures that include trap information using |
1839 | * si_trapno. | |
1840 | */ | |
1841 | int force_sig_fault_trapno(int sig, int code, void __user *addr, int trapno) | |
1842 | { | |
1843 | struct kernel_siginfo info; | |
1844 | ||
1845 | clear_siginfo(&info); | |
1846 | info.si_signo = sig; | |
1847 | info.si_errno = 0; | |
1848 | info.si_code = code; | |
1849 | info.si_addr = addr; | |
1850 | info.si_trapno = trapno; | |
1851 | return force_sig_info(&info); | |
1852 | } | |
1853 | ||
7de5f68d EB |
1854 | /* For the rare architectures that include trap information using |
1855 | * si_trapno. | |
1856 | */ | |
1857 | int send_sig_fault_trapno(int sig, int code, void __user *addr, int trapno, | |
1858 | struct task_struct *t) | |
1859 | { | |
1860 | struct kernel_siginfo info; | |
1861 | ||
1862 | clear_siginfo(&info); | |
1863 | info.si_signo = sig; | |
1864 | info.si_errno = 0; | |
1865 | info.si_code = code; | |
1866 | info.si_addr = addr; | |
1867 | info.si_trapno = trapno; | |
1868 | return send_sig_info(info.si_signo, &info, t); | |
1869 | } | |
1870 | ||
c4b92fc1 EB |
1871 | int kill_pgrp(struct pid *pid, int sig, int priv) |
1872 | { | |
146a505d PE |
1873 | int ret; |
1874 | ||
1875 | read_lock(&tasklist_lock); | |
1876 | ret = __kill_pgrp_info(sig, __si_special(priv), pid); | |
1877 | read_unlock(&tasklist_lock); | |
1878 | ||
1879 | return ret; | |
c4b92fc1 EB |
1880 | } |
1881 | EXPORT_SYMBOL(kill_pgrp); | |
1882 | ||
1883 | int kill_pid(struct pid *pid, int sig, int priv) | |
1884 | { | |
1885 | return kill_pid_info(sig, __si_special(priv), pid); | |
1886 | } | |
1887 | EXPORT_SYMBOL(kill_pid); | |
1888 | ||
1da177e4 LT |
1889 | /* |
1890 | * These functions support sending signals using preallocated sigqueue | |
1891 | * structures. This is needed "because realtime applications cannot | |
1892 | * afford to lose notifications of asynchronous events, like timer | |
5aba085e | 1893 | * expirations or I/O completions". In the case of POSIX Timers |
1da177e4 LT |
1894 | * we allocate the sigqueue structure from the timer_create. If this |
1895 | * allocation fails we are able to report the failure to the application | |
1896 | * with an EAGAIN error. | |
1897 | */ | |
1da177e4 LT |
1898 | struct sigqueue *sigqueue_alloc(void) |
1899 | { | |
69995ebb | 1900 | return __sigqueue_alloc(-1, current, GFP_KERNEL, 0, SIGQUEUE_PREALLOC); |
1da177e4 LT |
1901 | } |
1902 | ||
1903 | void sigqueue_free(struct sigqueue *q) | |
1904 | { | |
1905 | unsigned long flags; | |
60187d27 ON |
1906 | spinlock_t *lock = ¤t->sighand->siglock; |
1907 | ||
1da177e4 LT |
1908 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
1909 | /* | |
c8e85b4f ON |
1910 | * We must hold ->siglock while testing q->list |
1911 | * to serialize with collect_signal() or with | |
da7978b0 | 1912 | * __exit_signal()->flush_sigqueue(). |
1da177e4 | 1913 | */ |
60187d27 | 1914 | spin_lock_irqsave(lock, flags); |
c8e85b4f ON |
1915 | q->flags &= ~SIGQUEUE_PREALLOC; |
1916 | /* | |
1917 | * If it is queued it will be freed when dequeued, | |
1918 | * like the "regular" sigqueue. | |
1919 | */ | |
60187d27 | 1920 | if (!list_empty(&q->list)) |
c8e85b4f | 1921 | q = NULL; |
60187d27 ON |
1922 | spin_unlock_irqrestore(lock, flags); |
1923 | ||
c8e85b4f ON |
1924 | if (q) |
1925 | __sigqueue_free(q); | |
1da177e4 LT |
1926 | } |
1927 | ||
24122c7f | 1928 | int send_sigqueue(struct sigqueue *q, struct pid *pid, enum pid_type type) |
9e3bd6c3 | 1929 | { |
e62e6650 | 1930 | int sig = q->info.si_signo; |
2ca3515a | 1931 | struct sigpending *pending; |
24122c7f | 1932 | struct task_struct *t; |
e62e6650 | 1933 | unsigned long flags; |
163566f6 | 1934 | int ret, result; |
2ca3515a | 1935 | |
4cd4b6d4 | 1936 | BUG_ON(!(q->flags & SIGQUEUE_PREALLOC)); |
e62e6650 ON |
1937 | |
1938 | ret = -1; | |
24122c7f EB |
1939 | rcu_read_lock(); |
1940 | t = pid_task(pid, type); | |
1941 | if (!t || !likely(lock_task_sighand(t, &flags))) | |
e62e6650 ON |
1942 | goto ret; |
1943 | ||
7e695a5e | 1944 | ret = 1; /* the signal is ignored */ |
163566f6 | 1945 | result = TRACE_SIGNAL_IGNORED; |
def8cf72 | 1946 | if (!prepare_signal(sig, t, false)) |
e62e6650 ON |
1947 | goto out; |
1948 | ||
1949 | ret = 0; | |
9e3bd6c3 PE |
1950 | if (unlikely(!list_empty(&q->list))) { |
1951 | /* | |
1952 | * If an SI_TIMER entry is already queue just increment | |
1953 | * the overrun count. | |
1954 | */ | |
9e3bd6c3 PE |
1955 | BUG_ON(q->info.si_code != SI_TIMER); |
1956 | q->info.si_overrun++; | |
163566f6 | 1957 | result = TRACE_SIGNAL_ALREADY_PENDING; |
e62e6650 | 1958 | goto out; |
9e3bd6c3 | 1959 | } |
ba661292 | 1960 | q->info.si_overrun = 0; |
9e3bd6c3 | 1961 | |
9e3bd6c3 | 1962 | signalfd_notify(t, sig); |
24122c7f | 1963 | pending = (type != PIDTYPE_PID) ? &t->signal->shared_pending : &t->pending; |
9e3bd6c3 PE |
1964 | list_add_tail(&q->list, &pending->list); |
1965 | sigaddset(&pending->signal, sig); | |
07296149 | 1966 | complete_signal(sig, t, type); |
163566f6 | 1967 | result = TRACE_SIGNAL_DELIVERED; |
e62e6650 | 1968 | out: |
24122c7f | 1969 | trace_signal_generate(sig, &q->info, t, type != PIDTYPE_PID, result); |
e62e6650 ON |
1970 | unlock_task_sighand(t, &flags); |
1971 | ret: | |
24122c7f | 1972 | rcu_read_unlock(); |
e62e6650 | 1973 | return ret; |
9e3bd6c3 PE |
1974 | } |
1975 | ||
b53b0b9d JFG |
1976 | static void do_notify_pidfd(struct task_struct *task) |
1977 | { | |
1978 | struct pid *pid; | |
1979 | ||
1caf7d50 | 1980 | WARN_ON(task->exit_state == 0); |
b53b0b9d JFG |
1981 | pid = task_pid(task); |
1982 | wake_up_all(&pid->wait_pidfd); | |
1983 | } | |
1984 | ||
1da177e4 LT |
1985 | /* |
1986 | * Let a parent know about the death of a child. | |
1987 | * For a stopped/continued status change, use do_notify_parent_cldstop instead. | |
2b2a1ff6 | 1988 | * |
53c8f9f1 ON |
1989 | * Returns true if our parent ignored us and so we've switched to |
1990 | * self-reaping. | |
1da177e4 | 1991 | */ |
53c8f9f1 | 1992 | bool do_notify_parent(struct task_struct *tsk, int sig) |
1da177e4 | 1993 | { |
ae7795bc | 1994 | struct kernel_siginfo info; |
1da177e4 LT |
1995 | unsigned long flags; |
1996 | struct sighand_struct *psig; | |
53c8f9f1 | 1997 | bool autoreap = false; |
bde8285e | 1998 | u64 utime, stime; |
1da177e4 LT |
1999 | |
2000 | BUG_ON(sig == -1); | |
2001 | ||
2002 | /* do_notify_parent_cldstop should have been called instead. */ | |
e1abb39c | 2003 | BUG_ON(task_is_stopped_or_traced(tsk)); |
1da177e4 | 2004 | |
d21142ec | 2005 | BUG_ON(!tsk->ptrace && |
1da177e4 LT |
2006 | (tsk->group_leader != tsk || !thread_group_empty(tsk))); |
2007 | ||
b53b0b9d JFG |
2008 | /* Wake up all pidfd waiters */ |
2009 | do_notify_pidfd(tsk); | |
2010 | ||
b6e238dc ON |
2011 | if (sig != SIGCHLD) { |
2012 | /* | |
2013 | * This is only possible if parent == real_parent. | |
2014 | * Check if it has changed security domain. | |
2015 | */ | |
d1e7fd64 | 2016 | if (tsk->parent_exec_id != READ_ONCE(tsk->parent->self_exec_id)) |
b6e238dc ON |
2017 | sig = SIGCHLD; |
2018 | } | |
2019 | ||
faf1f22b | 2020 | clear_siginfo(&info); |
1da177e4 LT |
2021 | info.si_signo = sig; |
2022 | info.si_errno = 0; | |
b488893a | 2023 | /* |
32084504 EB |
2024 | * We are under tasklist_lock here so our parent is tied to |
2025 | * us and cannot change. | |
b488893a | 2026 | * |
32084504 EB |
2027 | * task_active_pid_ns will always return the same pid namespace |
2028 | * until a task passes through release_task. | |
b488893a PE |
2029 | * |
2030 | * write_lock() currently calls preempt_disable() which is the | |
2031 | * same as rcu_read_lock(), but according to Oleg, this is not | |
2032 | * correct to rely on this | |
2033 | */ | |
2034 | rcu_read_lock(); | |
32084504 | 2035 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent)); |
54ba47ed EB |
2036 | info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), |
2037 | task_uid(tsk)); | |
b488893a PE |
2038 | rcu_read_unlock(); |
2039 | ||
bde8285e FW |
2040 | task_cputime(tsk, &utime, &stime); |
2041 | info.si_utime = nsec_to_clock_t(utime + tsk->signal->utime); | |
2042 | info.si_stime = nsec_to_clock_t(stime + tsk->signal->stime); | |
1da177e4 LT |
2043 | |
2044 | info.si_status = tsk->exit_code & 0x7f; | |
2045 | if (tsk->exit_code & 0x80) | |
2046 | info.si_code = CLD_DUMPED; | |
2047 | else if (tsk->exit_code & 0x7f) | |
2048 | info.si_code = CLD_KILLED; | |
2049 | else { | |
2050 | info.si_code = CLD_EXITED; | |
2051 | info.si_status = tsk->exit_code >> 8; | |
2052 | } | |
2053 | ||
2054 | psig = tsk->parent->sighand; | |
2055 | spin_lock_irqsave(&psig->siglock, flags); | |
d21142ec | 2056 | if (!tsk->ptrace && sig == SIGCHLD && |
1da177e4 LT |
2057 | (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || |
2058 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) { | |
2059 | /* | |
2060 | * We are exiting and our parent doesn't care. POSIX.1 | |
2061 | * defines special semantics for setting SIGCHLD to SIG_IGN | |
2062 | * or setting the SA_NOCLDWAIT flag: we should be reaped | |
2063 | * automatically and not left for our parent's wait4 call. | |
2064 | * Rather than having the parent do it as a magic kind of | |
2065 | * signal handler, we just set this to tell do_exit that we | |
2066 | * can be cleaned up without becoming a zombie. Note that | |
2067 | * we still call __wake_up_parent in this case, because a | |
2068 | * blocked sys_wait4 might now return -ECHILD. | |
2069 | * | |
2070 | * Whether we send SIGCHLD or not for SA_NOCLDWAIT | |
2071 | * is implementation-defined: we do (if you don't want | |
2072 | * it, just use SIG_IGN instead). | |
2073 | */ | |
53c8f9f1 | 2074 | autoreap = true; |
1da177e4 | 2075 | if (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN) |
53c8f9f1 | 2076 | sig = 0; |
1da177e4 | 2077 | } |
61e713bd EB |
2078 | /* |
2079 | * Send with __send_signal as si_pid and si_uid are in the | |
2080 | * parent's namespaces. | |
2081 | */ | |
53c8f9f1 | 2082 | if (valid_signal(sig) && sig) |
61e713bd | 2083 | __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); |
1da177e4 LT |
2084 | __wake_up_parent(tsk, tsk->parent); |
2085 | spin_unlock_irqrestore(&psig->siglock, flags); | |
2b2a1ff6 | 2086 | |
53c8f9f1 | 2087 | return autoreap; |
1da177e4 LT |
2088 | } |
2089 | ||
75b95953 TH |
2090 | /** |
2091 | * do_notify_parent_cldstop - notify parent of stopped/continued state change | |
2092 | * @tsk: task reporting the state change | |
2093 | * @for_ptracer: the notification is for ptracer | |
2094 | * @why: CLD_{CONTINUED|STOPPED|TRAPPED} to report | |
2095 | * | |
2096 | * Notify @tsk's parent that the stopped/continued state has changed. If | |
2097 | * @for_ptracer is %false, @tsk's group leader notifies to its real parent. | |
2098 | * If %true, @tsk reports to @tsk->parent which should be the ptracer. | |
2099 | * | |
2100 | * CONTEXT: | |
2101 | * Must be called with tasklist_lock at least read locked. | |
2102 | */ | |
2103 | static void do_notify_parent_cldstop(struct task_struct *tsk, | |
2104 | bool for_ptracer, int why) | |
1da177e4 | 2105 | { |
ae7795bc | 2106 | struct kernel_siginfo info; |
1da177e4 | 2107 | unsigned long flags; |
bc505a47 | 2108 | struct task_struct *parent; |
1da177e4 | 2109 | struct sighand_struct *sighand; |
bde8285e | 2110 | u64 utime, stime; |
1da177e4 | 2111 | |
75b95953 | 2112 | if (for_ptracer) { |
bc505a47 | 2113 | parent = tsk->parent; |
75b95953 | 2114 | } else { |
bc505a47 ON |
2115 | tsk = tsk->group_leader; |
2116 | parent = tsk->real_parent; | |
2117 | } | |
2118 | ||
faf1f22b | 2119 | clear_siginfo(&info); |
1da177e4 LT |
2120 | info.si_signo = SIGCHLD; |
2121 | info.si_errno = 0; | |
b488893a | 2122 | /* |
5aba085e | 2123 | * see comment in do_notify_parent() about the following 4 lines |
b488893a PE |
2124 | */ |
2125 | rcu_read_lock(); | |
17cf22c3 | 2126 | info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(parent)); |
54ba47ed | 2127 | info.si_uid = from_kuid_munged(task_cred_xxx(parent, user_ns), task_uid(tsk)); |
b488893a PE |
2128 | rcu_read_unlock(); |
2129 | ||
bde8285e FW |
2130 | task_cputime(tsk, &utime, &stime); |
2131 | info.si_utime = nsec_to_clock_t(utime); | |
2132 | info.si_stime = nsec_to_clock_t(stime); | |
1da177e4 LT |
2133 | |
2134 | info.si_code = why; | |
2135 | switch (why) { | |
2136 | case CLD_CONTINUED: | |
2137 | info.si_status = SIGCONT; | |
2138 | break; | |
2139 | case CLD_STOPPED: | |
2140 | info.si_status = tsk->signal->group_exit_code & 0x7f; | |
2141 | break; | |
2142 | case CLD_TRAPPED: | |
2143 | info.si_status = tsk->exit_code & 0x7f; | |
2144 | break; | |
2145 | default: | |
2146 | BUG(); | |
2147 | } | |
2148 | ||
2149 | sighand = parent->sighand; | |
2150 | spin_lock_irqsave(&sighand->siglock, flags); | |
2151 | if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && | |
2152 | !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) | |
2153 | __group_send_sig_info(SIGCHLD, &info, parent); | |
2154 | /* | |
2155 | * Even if SIGCHLD is not generated, we must wake up wait4 calls. | |
2156 | */ | |
2157 | __wake_up_parent(tsk, parent); | |
2158 | spin_unlock_irqrestore(&sighand->siglock, flags); | |
2159 | } | |
2160 | ||
6527de95 | 2161 | static inline bool may_ptrace_stop(void) |
d5f70c00 | 2162 | { |
d21142ec | 2163 | if (!likely(current->ptrace)) |
6527de95 | 2164 | return false; |
d5f70c00 ON |
2165 | /* |
2166 | * Are we in the middle of do_coredump? | |
2167 | * If so and our tracer is also part of the coredump stopping | |
2168 | * is a deadlock situation, and pointless because our tracer | |
2169 | * is dead so don't allow us to stop. | |
2170 | * If SIGKILL was already sent before the caller unlocked | |
999d9fc1 | 2171 | * ->siglock we must see ->core_state != NULL. Otherwise it |
d5f70c00 | 2172 | * is safe to enter schedule(). |
9899d11f ON |
2173 | * |
2174 | * This is almost outdated, a task with the pending SIGKILL can't | |
2175 | * block in TASK_TRACED. But PTRACE_EVENT_EXIT can be reported | |
2176 | * after SIGKILL was already dequeued. | |
d5f70c00 | 2177 | */ |
999d9fc1 | 2178 | if (unlikely(current->mm->core_state) && |
d5f70c00 | 2179 | unlikely(current->mm == current->parent->mm)) |
6527de95 | 2180 | return false; |
d5f70c00 | 2181 | |
6527de95 | 2182 | return true; |
d5f70c00 ON |
2183 | } |
2184 | ||
1a669c2f | 2185 | /* |
5aba085e | 2186 | * Return non-zero if there is a SIGKILL that should be waking us up. |
1a669c2f RM |
2187 | * Called with the siglock held. |
2188 | */ | |
f99e9d8c | 2189 | static bool sigkill_pending(struct task_struct *tsk) |
1a669c2f | 2190 | { |
f99e9d8c CB |
2191 | return sigismember(&tsk->pending.signal, SIGKILL) || |
2192 | sigismember(&tsk->signal->shared_pending.signal, SIGKILL); | |
1a669c2f RM |
2193 | } |
2194 | ||
1da177e4 LT |
2195 | /* |
2196 | * This must be called with current->sighand->siglock held. | |
2197 | * | |
2198 | * This should be the path for all ptrace stops. | |
2199 | * We always set current->last_siginfo while stopped here. | |
2200 | * That makes it a way to test a stopped process for | |
2201 | * being ptrace-stopped vs being job-control-stopped. | |
2202 | * | |
20686a30 ON |
2203 | * If we actually decide not to stop at all because the tracer |
2204 | * is gone, we keep current->exit_code unless clear_code. | |
1da177e4 | 2205 | */ |
ae7795bc | 2206 | static void ptrace_stop(int exit_code, int why, int clear_code, kernel_siginfo_t *info) |
b8401150 NK |
2207 | __releases(¤t->sighand->siglock) |
2208 | __acquires(¤t->sighand->siglock) | |
1da177e4 | 2209 | { |
ceb6bd67 TH |
2210 | bool gstop_done = false; |
2211 | ||
1a669c2f RM |
2212 | if (arch_ptrace_stop_needed(exit_code, info)) { |
2213 | /* | |
2214 | * The arch code has something special to do before a | |
2215 | * ptrace stop. This is allowed to block, e.g. for faults | |
2216 | * on user stack pages. We can't keep the siglock while | |
2217 | * calling arch_ptrace_stop, so we must release it now. | |
2218 | * To preserve proper semantics, we must do this before | |
2219 | * any signal bookkeeping like checking group_stop_count. | |
2220 | * Meanwhile, a SIGKILL could come in before we retake the | |
2221 | * siglock. That must prevent us from sleeping in TASK_TRACED. | |
2222 | * So after regaining the lock, we must check for SIGKILL. | |
2223 | */ | |
2224 | spin_unlock_irq(¤t->sighand->siglock); | |
2225 | arch_ptrace_stop(exit_code, info); | |
2226 | spin_lock_irq(¤t->sighand->siglock); | |
3d749b9e ON |
2227 | if (sigkill_pending(current)) |
2228 | return; | |
1a669c2f RM |
2229 | } |
2230 | ||
b5bf9a90 PZ |
2231 | set_special_state(TASK_TRACED); |
2232 | ||
1da177e4 | 2233 | /* |
81be24b8 TH |
2234 | * We're committing to trapping. TRACED should be visible before |
2235 | * TRAPPING is cleared; otherwise, the tracer might fail do_wait(). | |
2236 | * Also, transition to TRACED and updates to ->jobctl should be | |
2237 | * atomic with respect to siglock and should be done after the arch | |
2238 | * hook as siglock is released and regrabbed across it. | |
b5bf9a90 PZ |
2239 | * |
2240 | * TRACER TRACEE | |
2241 | * | |
2242 | * ptrace_attach() | |
2243 | * [L] wait_on_bit(JOBCTL_TRAPPING) [S] set_special_state(TRACED) | |
2244 | * do_wait() | |
2245 | * set_current_state() smp_wmb(); | |
2246 | * ptrace_do_wait() | |
2247 | * wait_task_stopped() | |
2248 | * task_stopped_code() | |
2249 | * [L] task_is_traced() [S] task_clear_jobctl_trapping(); | |
1da177e4 | 2250 | */ |
b5bf9a90 | 2251 | smp_wmb(); |
1da177e4 LT |
2252 | |
2253 | current->last_siginfo = info; | |
2254 | current->exit_code = exit_code; | |
2255 | ||
d79fdd6d | 2256 | /* |
0ae8ce1c TH |
2257 | * If @why is CLD_STOPPED, we're trapping to participate in a group |
2258 | * stop. Do the bookkeeping. Note that if SIGCONT was delievered | |
73ddff2b TH |
2259 | * across siglock relocks since INTERRUPT was scheduled, PENDING |
2260 | * could be clear now. We act as if SIGCONT is received after | |
2261 | * TASK_TRACED is entered - ignore it. | |
d79fdd6d | 2262 | */ |
a8f072c1 | 2263 | if (why == CLD_STOPPED && (current->jobctl & JOBCTL_STOP_PENDING)) |
ceb6bd67 | 2264 | gstop_done = task_participate_group_stop(current); |
d79fdd6d | 2265 | |
fb1d910c | 2266 | /* any trap clears pending STOP trap, STOP trap clears NOTIFY */ |
73ddff2b | 2267 | task_clear_jobctl_pending(current, JOBCTL_TRAP_STOP); |
fb1d910c TH |
2268 | if (info && info->si_code >> 8 == PTRACE_EVENT_STOP) |
2269 | task_clear_jobctl_pending(current, JOBCTL_TRAP_NOTIFY); | |
73ddff2b | 2270 | |
81be24b8 | 2271 | /* entering a trap, clear TRAPPING */ |
a8f072c1 | 2272 | task_clear_jobctl_trapping(current); |
d79fdd6d | 2273 | |
1da177e4 LT |
2274 | spin_unlock_irq(¤t->sighand->siglock); |
2275 | read_lock(&tasklist_lock); | |
3d749b9e | 2276 | if (may_ptrace_stop()) { |
ceb6bd67 TH |
2277 | /* |
2278 | * Notify parents of the stop. | |
2279 | * | |
2280 | * While ptraced, there are two parents - the ptracer and | |
2281 | * the real_parent of the group_leader. The ptracer should | |
2282 | * know about every stop while the real parent is only | |
2283 | * interested in the completion of group stop. The states | |
2284 | * for the two don't interact with each other. Notify | |
2285 | * separately unless they're gonna be duplicates. | |
2286 | */ | |
2287 | do_notify_parent_cldstop(current, true, why); | |
bb3696da | 2288 | if (gstop_done && ptrace_reparented(current)) |
ceb6bd67 TH |
2289 | do_notify_parent_cldstop(current, false, why); |
2290 | ||
53da1d94 MS |
2291 | /* |
2292 | * Don't want to allow preemption here, because | |
2293 | * sys_ptrace() needs this task to be inactive. | |
2294 | * | |
2295 | * XXX: implement read_unlock_no_resched(). | |
2296 | */ | |
2297 | preempt_disable(); | |
1da177e4 | 2298 | read_unlock(&tasklist_lock); |
76f969e8 | 2299 | cgroup_enter_frozen(); |
937c6b27 | 2300 | preempt_enable_no_resched(); |
5d8f72b5 | 2301 | freezable_schedule(); |
05b28926 | 2302 | cgroup_leave_frozen(true); |
1da177e4 LT |
2303 | } else { |
2304 | /* | |
2305 | * By the time we got the lock, our tracer went away. | |
6405f7f4 | 2306 | * Don't drop the lock yet, another tracer may come. |
ceb6bd67 TH |
2307 | * |
2308 | * If @gstop_done, the ptracer went away between group stop | |
2309 | * completion and here. During detach, it would have set | |
a8f072c1 TH |
2310 | * JOBCTL_STOP_PENDING on us and we'll re-enter |
2311 | * TASK_STOPPED in do_signal_stop() on return, so notifying | |
2312 | * the real parent of the group stop completion is enough. | |
1da177e4 | 2313 | */ |
ceb6bd67 TH |
2314 | if (gstop_done) |
2315 | do_notify_parent_cldstop(current, false, why); | |
2316 | ||
9899d11f | 2317 | /* tasklist protects us from ptrace_freeze_traced() */ |
6405f7f4 | 2318 | __set_current_state(TASK_RUNNING); |
20686a30 ON |
2319 | if (clear_code) |
2320 | current->exit_code = 0; | |
6405f7f4 | 2321 | read_unlock(&tasklist_lock); |
1da177e4 LT |
2322 | } |
2323 | ||
2324 | /* | |
2325 | * We are back. Now reacquire the siglock before touching | |
2326 | * last_siginfo, so that we are sure to have synchronized with | |
2327 | * any signal-sending on another CPU that wants to examine it. | |
2328 | */ | |
2329 | spin_lock_irq(¤t->sighand->siglock); | |
2330 | current->last_siginfo = NULL; | |
2331 | ||
544b2c91 TH |
2332 | /* LISTENING can be set only during STOP traps, clear it */ |
2333 | current->jobctl &= ~JOBCTL_LISTENING; | |
2334 | ||
1da177e4 LT |
2335 | /* |
2336 | * Queued signals ignored us while we were stopped for tracing. | |
2337 | * So check for any that we should take before resuming user mode. | |
b74d0deb | 2338 | * This sets TIF_SIGPENDING, but never clears it. |
1da177e4 | 2339 | */ |
b74d0deb | 2340 | recalc_sigpending_tsk(current); |
1da177e4 LT |
2341 | } |
2342 | ||
3544d72a | 2343 | static void ptrace_do_notify(int signr, int exit_code, int why) |
1da177e4 | 2344 | { |
ae7795bc | 2345 | kernel_siginfo_t info; |
1da177e4 | 2346 | |
faf1f22b | 2347 | clear_siginfo(&info); |
3544d72a | 2348 | info.si_signo = signr; |
1da177e4 | 2349 | info.si_code = exit_code; |
b488893a | 2350 | info.si_pid = task_pid_vnr(current); |
078de5f7 | 2351 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
1da177e4 LT |
2352 | |
2353 | /* Let the debugger run. */ | |
3544d72a TH |
2354 | ptrace_stop(exit_code, why, 1, &info); |
2355 | } | |
2356 | ||
2357 | void ptrace_notify(int exit_code) | |
2358 | { | |
2359 | BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); | |
f784e8a7 ON |
2360 | if (unlikely(current->task_works)) |
2361 | task_work_run(); | |
3544d72a | 2362 | |
1da177e4 | 2363 | spin_lock_irq(¤t->sighand->siglock); |
3544d72a | 2364 | ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); |
1da177e4 LT |
2365 | spin_unlock_irq(¤t->sighand->siglock); |
2366 | } | |
2367 | ||
73ddff2b TH |
2368 | /** |
2369 | * do_signal_stop - handle group stop for SIGSTOP and other stop signals | |
2370 | * @signr: signr causing group stop if initiating | |
2371 | * | |
2372 | * If %JOBCTL_STOP_PENDING is not set yet, initiate group stop with @signr | |
2373 | * and participate in it. If already set, participate in the existing | |
2374 | * group stop. If participated in a group stop (and thus slept), %true is | |
2375 | * returned with siglock released. | |
2376 | * | |
2377 | * If ptraced, this function doesn't handle stop itself. Instead, | |
2378 | * %JOBCTL_TRAP_STOP is scheduled and %false is returned with siglock | |
2379 | * untouched. The caller must ensure that INTERRUPT trap handling takes | |
2380 | * places afterwards. | |
2381 | * | |
2382 | * CONTEXT: | |
2383 | * Must be called with @current->sighand->siglock held, which is released | |
2384 | * on %true return. | |
2385 | * | |
2386 | * RETURNS: | |
2387 | * %false if group stop is already cancelled or ptrace trap is scheduled. | |
2388 | * %true if participated in group stop. | |
1da177e4 | 2389 | */ |
73ddff2b TH |
2390 | static bool do_signal_stop(int signr) |
2391 | __releases(¤t->sighand->siglock) | |
1da177e4 LT |
2392 | { |
2393 | struct signal_struct *sig = current->signal; | |
1da177e4 | 2394 | |
a8f072c1 | 2395 | if (!(current->jobctl & JOBCTL_STOP_PENDING)) { |
b76808e6 | 2396 | unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME; |
f558b7e4 ON |
2397 | struct task_struct *t; |
2398 | ||
a8f072c1 TH |
2399 | /* signr will be recorded in task->jobctl for retries */ |
2400 | WARN_ON_ONCE(signr & ~JOBCTL_STOP_SIGMASK); | |
d79fdd6d | 2401 | |
a8f072c1 | 2402 | if (!likely(current->jobctl & JOBCTL_STOP_DEQUEUED) || |
573cf9ad | 2403 | unlikely(signal_group_exit(sig))) |
73ddff2b | 2404 | return false; |
1da177e4 | 2405 | /* |
408a37de TH |
2406 | * There is no group stop already in progress. We must |
2407 | * initiate one now. | |
2408 | * | |
2409 | * While ptraced, a task may be resumed while group stop is | |
2410 | * still in effect and then receive a stop signal and | |
2411 | * initiate another group stop. This deviates from the | |
2412 | * usual behavior as two consecutive stop signals can't | |
780006ea ON |
2413 | * cause two group stops when !ptraced. That is why we |
2414 | * also check !task_is_stopped(t) below. | |
408a37de TH |
2415 | * |
2416 | * The condition can be distinguished by testing whether | |
2417 | * SIGNAL_STOP_STOPPED is already set. Don't generate | |
2418 | * group_exit_code in such case. | |
2419 | * | |
2420 | * This is not necessary for SIGNAL_STOP_CONTINUED because | |
2421 | * an intervening stop signal is required to cause two | |
2422 | * continued events regardless of ptrace. | |
1da177e4 | 2423 | */ |
408a37de TH |
2424 | if (!(sig->flags & SIGNAL_STOP_STOPPED)) |
2425 | sig->group_exit_code = signr; | |
1da177e4 | 2426 | |
7dd3db54 TH |
2427 | sig->group_stop_count = 0; |
2428 | ||
2429 | if (task_set_jobctl_pending(current, signr | gstop)) | |
2430 | sig->group_stop_count++; | |
1da177e4 | 2431 | |
8d38f203 ON |
2432 | t = current; |
2433 | while_each_thread(current, t) { | |
1da177e4 | 2434 | /* |
a122b341 ON |
2435 | * Setting state to TASK_STOPPED for a group |
2436 | * stop is always done with the siglock held, | |
2437 | * so this check has no races. | |
1da177e4 | 2438 | */ |
7dd3db54 TH |
2439 | if (!task_is_stopped(t) && |
2440 | task_set_jobctl_pending(t, signr | gstop)) { | |
ae6d2ed7 | 2441 | sig->group_stop_count++; |
fb1d910c TH |
2442 | if (likely(!(t->ptrace & PT_SEIZED))) |
2443 | signal_wake_up(t, 0); | |
2444 | else | |
2445 | ptrace_trap_notify(t); | |
a122b341 | 2446 | } |
d79fdd6d | 2447 | } |
1da177e4 | 2448 | } |
73ddff2b | 2449 | |
d21142ec | 2450 | if (likely(!current->ptrace)) { |
5224fa36 | 2451 | int notify = 0; |
1da177e4 | 2452 | |
5224fa36 TH |
2453 | /* |
2454 | * If there are no other threads in the group, or if there | |
2455 | * is a group stop in progress and we are the last to stop, | |
2456 | * report to the parent. | |
2457 | */ | |
2458 | if (task_participate_group_stop(current)) | |
2459 | notify = CLD_STOPPED; | |
2460 | ||
b5bf9a90 | 2461 | set_special_state(TASK_STOPPED); |
5224fa36 TH |
2462 | spin_unlock_irq(¤t->sighand->siglock); |
2463 | ||
62bcf9d9 TH |
2464 | /* |
2465 | * Notify the parent of the group stop completion. Because | |
2466 | * we're not holding either the siglock or tasklist_lock | |
2467 | * here, ptracer may attach inbetween; however, this is for | |
2468 | * group stop and should always be delivered to the real | |
2469 | * parent of the group leader. The new ptracer will get | |
2470 | * its notification when this task transitions into | |
2471 | * TASK_TRACED. | |
2472 | */ | |
5224fa36 TH |
2473 | if (notify) { |
2474 | read_lock(&tasklist_lock); | |
62bcf9d9 | 2475 | do_notify_parent_cldstop(current, false, notify); |
5224fa36 TH |
2476 | read_unlock(&tasklist_lock); |
2477 | } | |
2478 | ||
2479 | /* Now we don't run again until woken by SIGCONT or SIGKILL */ | |
76f969e8 | 2480 | cgroup_enter_frozen(); |
5d8f72b5 | 2481 | freezable_schedule(); |
73ddff2b | 2482 | return true; |
d79fdd6d | 2483 | } else { |
73ddff2b TH |
2484 | /* |
2485 | * While ptraced, group stop is handled by STOP trap. | |
2486 | * Schedule it and let the caller deal with it. | |
2487 | */ | |
2488 | task_set_jobctl_pending(current, JOBCTL_TRAP_STOP); | |
2489 | return false; | |
ae6d2ed7 | 2490 | } |
73ddff2b | 2491 | } |
1da177e4 | 2492 | |
73ddff2b TH |
2493 | /** |
2494 | * do_jobctl_trap - take care of ptrace jobctl traps | |
2495 | * | |
3544d72a TH |
2496 | * When PT_SEIZED, it's used for both group stop and explicit |
2497 | * SEIZE/INTERRUPT traps. Both generate PTRACE_EVENT_STOP trap with | |
2498 | * accompanying siginfo. If stopped, lower eight bits of exit_code contain | |
2499 | * the stop signal; otherwise, %SIGTRAP. | |
2500 | * | |
2501 | * When !PT_SEIZED, it's used only for group stop trap with stop signal | |
2502 | * number as exit_code and no siginfo. | |
73ddff2b TH |
2503 | * |
2504 | * CONTEXT: | |
2505 | * Must be called with @current->sighand->siglock held, which may be | |
2506 | * released and re-acquired before returning with intervening sleep. | |
2507 | */ | |
2508 | static void do_jobctl_trap(void) | |
2509 | { | |
3544d72a | 2510 | struct signal_struct *signal = current->signal; |
73ddff2b | 2511 | int signr = current->jobctl & JOBCTL_STOP_SIGMASK; |
ae6d2ed7 | 2512 | |
3544d72a TH |
2513 | if (current->ptrace & PT_SEIZED) { |
2514 | if (!signal->group_stop_count && | |
2515 | !(signal->flags & SIGNAL_STOP_STOPPED)) | |
2516 | signr = SIGTRAP; | |
2517 | WARN_ON_ONCE(!signr); | |
2518 | ptrace_do_notify(signr, signr | (PTRACE_EVENT_STOP << 8), | |
2519 | CLD_STOPPED); | |
2520 | } else { | |
2521 | WARN_ON_ONCE(!signr); | |
2522 | ptrace_stop(signr, CLD_STOPPED, 0, NULL); | |
2523 | current->exit_code = 0; | |
ae6d2ed7 | 2524 | } |
1da177e4 LT |
2525 | } |
2526 | ||
76f969e8 RG |
2527 | /** |
2528 | * do_freezer_trap - handle the freezer jobctl trap | |
2529 | * | |
2530 | * Puts the task into frozen state, if only the task is not about to quit. | |
2531 | * In this case it drops JOBCTL_TRAP_FREEZE. | |
2532 | * | |
2533 | * CONTEXT: | |
2534 | * Must be called with @current->sighand->siglock held, | |
2535 | * which is always released before returning. | |
2536 | */ | |
2537 | static void do_freezer_trap(void) | |
2538 | __releases(¤t->sighand->siglock) | |
2539 | { | |
2540 | /* | |
2541 | * If there are other trap bits pending except JOBCTL_TRAP_FREEZE, | |
2542 | * let's make another loop to give it a chance to be handled. | |
2543 | * In any case, we'll return back. | |
2544 | */ | |
2545 | if ((current->jobctl & (JOBCTL_PENDING_MASK | JOBCTL_TRAP_FREEZE)) != | |
2546 | JOBCTL_TRAP_FREEZE) { | |
2547 | spin_unlock_irq(¤t->sighand->siglock); | |
2548 | return; | |
2549 | } | |
2550 | ||
2551 | /* | |
2552 | * Now we're sure that there is no pending fatal signal and no | |
2553 | * pending traps. Clear TIF_SIGPENDING to not get out of schedule() | |
2554 | * immediately (if there is a non-fatal signal pending), and | |
2555 | * put the task into sleep. | |
2556 | */ | |
2557 | __set_current_state(TASK_INTERRUPTIBLE); | |
2558 | clear_thread_flag(TIF_SIGPENDING); | |
2559 | spin_unlock_irq(¤t->sighand->siglock); | |
2560 | cgroup_enter_frozen(); | |
2561 | freezable_schedule(); | |
2562 | } | |
2563 | ||
ae7795bc | 2564 | static int ptrace_signal(int signr, kernel_siginfo_t *info) |
18c98b65 | 2565 | { |
8a352418 ON |
2566 | /* |
2567 | * We do not check sig_kernel_stop(signr) but set this marker | |
2568 | * unconditionally because we do not know whether debugger will | |
2569 | * change signr. This flag has no meaning unless we are going | |
2570 | * to stop after return from ptrace_stop(). In this case it will | |
2571 | * be checked in do_signal_stop(), we should only stop if it was | |
2572 | * not cleared by SIGCONT while we were sleeping. See also the | |
2573 | * comment in dequeue_signal(). | |
2574 | */ | |
2575 | current->jobctl |= JOBCTL_STOP_DEQUEUED; | |
fe1bc6a0 | 2576 | ptrace_stop(signr, CLD_TRAPPED, 0, info); |
18c98b65 RM |
2577 | |
2578 | /* We're back. Did the debugger cancel the sig? */ | |
2579 | signr = current->exit_code; | |
2580 | if (signr == 0) | |
2581 | return signr; | |
2582 | ||
2583 | current->exit_code = 0; | |
2584 | ||
5aba085e RD |
2585 | /* |
2586 | * Update the siginfo structure if the signal has | |
2587 | * changed. If the debugger wanted something | |
2588 | * specific in the siginfo structure then it should | |
2589 | * have updated *info via PTRACE_SETSIGINFO. | |
2590 | */ | |
18c98b65 | 2591 | if (signr != info->si_signo) { |
faf1f22b | 2592 | clear_siginfo(info); |
18c98b65 RM |
2593 | info->si_signo = signr; |
2594 | info->si_errno = 0; | |
2595 | info->si_code = SI_USER; | |
6b550f94 | 2596 | rcu_read_lock(); |
18c98b65 | 2597 | info->si_pid = task_pid_vnr(current->parent); |
54ba47ed EB |
2598 | info->si_uid = from_kuid_munged(current_user_ns(), |
2599 | task_uid(current->parent)); | |
6b550f94 | 2600 | rcu_read_unlock(); |
18c98b65 RM |
2601 | } |
2602 | ||
2603 | /* If the (new) signal is now blocked, requeue it. */ | |
2604 | if (sigismember(¤t->blocked, signr)) { | |
b21c5bd5 | 2605 | send_signal(signr, info, current, PIDTYPE_PID); |
18c98b65 RM |
2606 | signr = 0; |
2607 | } | |
2608 | ||
2609 | return signr; | |
2610 | } | |
2611 | ||
6ac05e83 PC |
2612 | static void hide_si_addr_tag_bits(struct ksignal *ksig) |
2613 | { | |
2614 | switch (siginfo_layout(ksig->sig, ksig->info.si_code)) { | |
2615 | case SIL_FAULT: | |
9abcabe3 | 2616 | case SIL_FAULT_TRAPNO: |
6ac05e83 PC |
2617 | case SIL_FAULT_MCEERR: |
2618 | case SIL_FAULT_BNDERR: | |
2619 | case SIL_FAULT_PKUERR: | |
f4ac7302 | 2620 | case SIL_FAULT_PERF_EVENT: |
6ac05e83 PC |
2621 | ksig->info.si_addr = arch_untagged_si_addr( |
2622 | ksig->info.si_addr, ksig->sig, ksig->info.si_code); | |
2623 | break; | |
2624 | case SIL_KILL: | |
2625 | case SIL_TIMER: | |
2626 | case SIL_POLL: | |
2627 | case SIL_CHLD: | |
2628 | case SIL_RT: | |
2629 | case SIL_SYS: | |
2630 | break; | |
2631 | } | |
2632 | } | |
2633 | ||
20ab7218 | 2634 | bool get_signal(struct ksignal *ksig) |
1da177e4 | 2635 | { |
f6b76d4f ON |
2636 | struct sighand_struct *sighand = current->sighand; |
2637 | struct signal_struct *signal = current->signal; | |
2638 | int signr; | |
1da177e4 | 2639 | |
35d0b389 JA |
2640 | if (unlikely(current->task_works)) |
2641 | task_work_run(); | |
2642 | ||
12db8b69 JA |
2643 | /* |
2644 | * For non-generic architectures, check for TIF_NOTIFY_SIGNAL so | |
2645 | * that the arch handlers don't all have to do it. If we get here | |
2646 | * without TIF_SIGPENDING, just exit after running signal work. | |
2647 | */ | |
12db8b69 JA |
2648 | if (!IS_ENABLED(CONFIG_GENERIC_ENTRY)) { |
2649 | if (test_thread_flag(TIF_NOTIFY_SIGNAL)) | |
2650 | tracehook_notify_signal(); | |
2651 | if (!task_sigpending(current)) | |
2652 | return false; | |
2653 | } | |
12db8b69 | 2654 | |
0326f5a9 | 2655 | if (unlikely(uprobe_deny_signal())) |
20ab7218 | 2656 | return false; |
0326f5a9 | 2657 | |
13b1c3d4 | 2658 | /* |
5d8f72b5 ON |
2659 | * Do this once, we can't return to user-mode if freezing() == T. |
2660 | * do_signal_stop() and ptrace_stop() do freezable_schedule() and | |
2661 | * thus do not need another check after return. | |
13b1c3d4 | 2662 | */ |
fc558a74 RW |
2663 | try_to_freeze(); |
2664 | ||
5d8f72b5 | 2665 | relock: |
f6b76d4f | 2666 | spin_lock_irq(&sighand->siglock); |
e91b4816 | 2667 | |
021e1ae3 ON |
2668 | /* |
2669 | * Every stopped thread goes here after wakeup. Check to see if | |
2670 | * we should notify the parent, prepare_signal(SIGCONT) encodes | |
2671 | * the CLD_ si_code into SIGNAL_CLD_MASK bits. | |
2672 | */ | |
f6b76d4f | 2673 | if (unlikely(signal->flags & SIGNAL_CLD_MASK)) { |
c672af35 TH |
2674 | int why; |
2675 | ||
2676 | if (signal->flags & SIGNAL_CLD_CONTINUED) | |
2677 | why = CLD_CONTINUED; | |
2678 | else | |
2679 | why = CLD_STOPPED; | |
2680 | ||
f6b76d4f | 2681 | signal->flags &= ~SIGNAL_CLD_MASK; |
e4420551 | 2682 | |
ae6d2ed7 | 2683 | spin_unlock_irq(&sighand->siglock); |
fa00b80b | 2684 | |
ceb6bd67 TH |
2685 | /* |
2686 | * Notify the parent that we're continuing. This event is | |
2687 | * always per-process and doesn't make whole lot of sense | |
2688 | * for ptracers, who shouldn't consume the state via | |
2689 | * wait(2) either, but, for backward compatibility, notify | |
2690 | * the ptracer of the group leader too unless it's gonna be | |
2691 | * a duplicate. | |
2692 | */ | |
edf2ed15 | 2693 | read_lock(&tasklist_lock); |
ceb6bd67 TH |
2694 | do_notify_parent_cldstop(current, false, why); |
2695 | ||
bb3696da ON |
2696 | if (ptrace_reparented(current->group_leader)) |
2697 | do_notify_parent_cldstop(current->group_leader, | |
2698 | true, why); | |
edf2ed15 | 2699 | read_unlock(&tasklist_lock); |
ceb6bd67 | 2700 | |
e4420551 ON |
2701 | goto relock; |
2702 | } | |
2703 | ||
35634ffa | 2704 | /* Has this task already been marked for death? */ |
cf43a757 EB |
2705 | if (signal_group_exit(signal)) { |
2706 | ksig->info.si_signo = signr = SIGKILL; | |
2707 | sigdelset(¤t->pending.signal, SIGKILL); | |
98af37d6 ZW |
2708 | trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO, |
2709 | &sighand->action[SIGKILL - 1]); | |
cf43a757 | 2710 | recalc_sigpending(); |
35634ffa | 2711 | goto fatal; |
cf43a757 | 2712 | } |
35634ffa | 2713 | |
1da177e4 LT |
2714 | for (;;) { |
2715 | struct k_sigaction *ka; | |
1be53963 | 2716 | |
dd1d6772 TH |
2717 | if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) && |
2718 | do_signal_stop(0)) | |
7bcf6a2c | 2719 | goto relock; |
1be53963 | 2720 | |
76f969e8 RG |
2721 | if (unlikely(current->jobctl & |
2722 | (JOBCTL_TRAP_MASK | JOBCTL_TRAP_FREEZE))) { | |
2723 | if (current->jobctl & JOBCTL_TRAP_MASK) { | |
2724 | do_jobctl_trap(); | |
2725 | spin_unlock_irq(&sighand->siglock); | |
2726 | } else if (current->jobctl & JOBCTL_TRAP_FREEZE) | |
2727 | do_freezer_trap(); | |
2728 | ||
2729 | goto relock; | |
2730 | } | |
2731 | ||
2732 | /* | |
2733 | * If the task is leaving the frozen state, let's update | |
2734 | * cgroup counters and reset the frozen bit. | |
2735 | */ | |
2736 | if (unlikely(cgroup_task_frozen(current))) { | |
73ddff2b | 2737 | spin_unlock_irq(&sighand->siglock); |
cb2c4cd8 | 2738 | cgroup_leave_frozen(false); |
73ddff2b TH |
2739 | goto relock; |
2740 | } | |
1da177e4 | 2741 | |
7146db33 EB |
2742 | /* |
2743 | * Signals generated by the execution of an instruction | |
2744 | * need to be delivered before any other pending signals | |
2745 | * so that the instruction pointer in the signal stack | |
2746 | * frame points to the faulting instruction. | |
2747 | */ | |
2748 | signr = dequeue_synchronous_signal(&ksig->info); | |
2749 | if (!signr) | |
2750 | signr = dequeue_signal(current, ¤t->blocked, &ksig->info); | |
7bcf6a2c | 2751 | |
dd1d6772 TH |
2752 | if (!signr) |
2753 | break; /* will return 0 */ | |
7bcf6a2c | 2754 | |
8a352418 | 2755 | if (unlikely(current->ptrace) && signr != SIGKILL) { |
828b1f65 | 2756 | signr = ptrace_signal(signr, &ksig->info); |
dd1d6772 TH |
2757 | if (!signr) |
2758 | continue; | |
1da177e4 LT |
2759 | } |
2760 | ||
dd1d6772 TH |
2761 | ka = &sighand->action[signr-1]; |
2762 | ||
f9d4257e | 2763 | /* Trace actually delivered signals. */ |
828b1f65 | 2764 | trace_signal_deliver(signr, &ksig->info, ka); |
f9d4257e | 2765 | |
1da177e4 LT |
2766 | if (ka->sa.sa_handler == SIG_IGN) /* Do nothing. */ |
2767 | continue; | |
2768 | if (ka->sa.sa_handler != SIG_DFL) { | |
2769 | /* Run the handler. */ | |
828b1f65 | 2770 | ksig->ka = *ka; |
1da177e4 LT |
2771 | |
2772 | if (ka->sa.sa_flags & SA_ONESHOT) | |
2773 | ka->sa.sa_handler = SIG_DFL; | |
2774 | ||
2775 | break; /* will return non-zero "signr" value */ | |
2776 | } | |
2777 | ||
2778 | /* | |
2779 | * Now we are doing the default action for this signal. | |
2780 | */ | |
2781 | if (sig_kernel_ignore(signr)) /* Default is nothing. */ | |
2782 | continue; | |
2783 | ||
84d73786 | 2784 | /* |
0fbc26a6 | 2785 | * Global init gets no signals it doesn't want. |
b3bfa0cb SB |
2786 | * Container-init gets no signals it doesn't want from same |
2787 | * container. | |
2788 | * | |
2789 | * Note that if global/container-init sees a sig_kernel_only() | |
2790 | * signal here, the signal must have been generated internally | |
2791 | * or must have come from an ancestor namespace. In either | |
2792 | * case, the signal cannot be dropped. | |
84d73786 | 2793 | */ |
fae5fa44 | 2794 | if (unlikely(signal->flags & SIGNAL_UNKILLABLE) && |
b3bfa0cb | 2795 | !sig_kernel_only(signr)) |
1da177e4 LT |
2796 | continue; |
2797 | ||
2798 | if (sig_kernel_stop(signr)) { | |
2799 | /* | |
2800 | * The default action is to stop all threads in | |
2801 | * the thread group. The job control signals | |
2802 | * do nothing in an orphaned pgrp, but SIGSTOP | |
2803 | * always works. Note that siglock needs to be | |
2804 | * dropped during the call to is_orphaned_pgrp() | |
2805 | * because of lock ordering with tasklist_lock. | |
2806 | * This allows an intervening SIGCONT to be posted. | |
2807 | * We need to check for that and bail out if necessary. | |
2808 | */ | |
2809 | if (signr != SIGSTOP) { | |
f6b76d4f | 2810 | spin_unlock_irq(&sighand->siglock); |
1da177e4 LT |
2811 | |
2812 | /* signals can be posted during this window */ | |
2813 | ||
3e7cd6c4 | 2814 | if (is_current_pgrp_orphaned()) |
1da177e4 LT |
2815 | goto relock; |
2816 | ||
f6b76d4f | 2817 | spin_lock_irq(&sighand->siglock); |
1da177e4 LT |
2818 | } |
2819 | ||
828b1f65 | 2820 | if (likely(do_signal_stop(ksig->info.si_signo))) { |
1da177e4 LT |
2821 | /* It released the siglock. */ |
2822 | goto relock; | |
2823 | } | |
2824 | ||
2825 | /* | |
2826 | * We didn't actually stop, due to a race | |
2827 | * with SIGCONT or something like that. | |
2828 | */ | |
2829 | continue; | |
2830 | } | |
2831 | ||
35634ffa | 2832 | fatal: |
f6b76d4f | 2833 | spin_unlock_irq(&sighand->siglock); |
f2b31bb5 RG |
2834 | if (unlikely(cgroup_task_frozen(current))) |
2835 | cgroup_leave_frozen(true); | |
1da177e4 LT |
2836 | |
2837 | /* | |
2838 | * Anything else is fatal, maybe with a core dump. | |
2839 | */ | |
2840 | current->flags |= PF_SIGNALED; | |
2dce81bf | 2841 | |
1da177e4 | 2842 | if (sig_kernel_coredump(signr)) { |
2dce81bf | 2843 | if (print_fatal_signals) |
828b1f65 | 2844 | print_fatal_signal(ksig->info.si_signo); |
2b5faa4c | 2845 | proc_coredump_connector(current); |
1da177e4 LT |
2846 | /* |
2847 | * If it was able to dump core, this kills all | |
2848 | * other threads in the group and synchronizes with | |
2849 | * their demise. If we lost the race with another | |
2850 | * thread getting here, it set group_exit_code | |
2851 | * first and our do_group_exit call below will use | |
2852 | * that value and ignore the one we pass it. | |
2853 | */ | |
828b1f65 | 2854 | do_coredump(&ksig->info); |
1da177e4 LT |
2855 | } |
2856 | ||
10442994 JA |
2857 | /* |
2858 | * PF_IO_WORKER threads will catch and exit on fatal signals | |
2859 | * themselves. They have cleanup that must be performed, so | |
2860 | * we cannot call do_exit() on their behalf. | |
2861 | */ | |
2862 | if (current->flags & PF_IO_WORKER) | |
2863 | goto out; | |
2864 | ||
1da177e4 LT |
2865 | /* |
2866 | * Death signals, no core dump. | |
2867 | */ | |
828b1f65 | 2868 | do_group_exit(ksig->info.si_signo); |
1da177e4 LT |
2869 | /* NOTREACHED */ |
2870 | } | |
f6b76d4f | 2871 | spin_unlock_irq(&sighand->siglock); |
10442994 | 2872 | out: |
828b1f65 | 2873 | ksig->sig = signr; |
6ac05e83 PC |
2874 | |
2875 | if (!(ksig->ka.sa.sa_flags & SA_EXPOSE_TAGBITS)) | |
2876 | hide_si_addr_tag_bits(ksig); | |
2877 | ||
828b1f65 | 2878 | return ksig->sig > 0; |
1da177e4 LT |
2879 | } |
2880 | ||
5e6292c0 | 2881 | /** |
efee984c | 2882 | * signal_delivered - |
10b1c7ac | 2883 | * @ksig: kernel signal struct |
efee984c | 2884 | * @stepping: nonzero if debugger single-step or block-step in use |
5e6292c0 | 2885 | * |
e227867f | 2886 | * This function should be called when a signal has successfully been |
10b1c7ac | 2887 | * delivered. It updates the blocked signals accordingly (@ksig->ka.sa.sa_mask |
efee984c | 2888 | * is always blocked, and the signal itself is blocked unless %SA_NODEFER |
10b1c7ac | 2889 | * is set in @ksig->ka.sa.sa_flags. Tracing is notified. |
5e6292c0 | 2890 | */ |
10b1c7ac | 2891 | static void signal_delivered(struct ksignal *ksig, int stepping) |
5e6292c0 MF |
2892 | { |
2893 | sigset_t blocked; | |
2894 | ||
a610d6e6 AV |
2895 | /* A signal was successfully delivered, and the |
2896 | saved sigmask was stored on the signal frame, | |
2897 | and will be restored by sigreturn. So we can | |
2898 | simply clear the restore sigmask flag. */ | |
2899 | clear_restore_sigmask(); | |
2900 | ||
10b1c7ac RW |
2901 | sigorsets(&blocked, ¤t->blocked, &ksig->ka.sa.sa_mask); |
2902 | if (!(ksig->ka.sa.sa_flags & SA_NODEFER)) | |
2903 | sigaddset(&blocked, ksig->sig); | |
5e6292c0 | 2904 | set_current_blocked(&blocked); |
97c885d5 AV |
2905 | if (current->sas_ss_flags & SS_AUTODISARM) |
2906 | sas_ss_reset(current); | |
df5601f9 | 2907 | tracehook_signal_handler(stepping); |
5e6292c0 MF |
2908 | } |
2909 | ||
2ce5da17 AV |
2910 | void signal_setup_done(int failed, struct ksignal *ksig, int stepping) |
2911 | { | |
2912 | if (failed) | |
cb44c9a0 | 2913 | force_sigsegv(ksig->sig); |
2ce5da17 | 2914 | else |
10b1c7ac | 2915 | signal_delivered(ksig, stepping); |
2ce5da17 AV |
2916 | } |
2917 | ||
0edceb7b ON |
2918 | /* |
2919 | * It could be that complete_signal() picked us to notify about the | |
fec9993d ON |
2920 | * group-wide signal. Other threads should be notified now to take |
2921 | * the shared signals in @which since we will not. | |
0edceb7b | 2922 | */ |
f646e227 | 2923 | static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which) |
0edceb7b | 2924 | { |
f646e227 | 2925 | sigset_t retarget; |
0edceb7b ON |
2926 | struct task_struct *t; |
2927 | ||
f646e227 ON |
2928 | sigandsets(&retarget, &tsk->signal->shared_pending.signal, which); |
2929 | if (sigisemptyset(&retarget)) | |
2930 | return; | |
2931 | ||
0edceb7b ON |
2932 | t = tsk; |
2933 | while_each_thread(tsk, t) { | |
fec9993d ON |
2934 | if (t->flags & PF_EXITING) |
2935 | continue; | |
2936 | ||
2937 | if (!has_pending_signals(&retarget, &t->blocked)) | |
2938 | continue; | |
2939 | /* Remove the signals this thread can handle. */ | |
2940 | sigandsets(&retarget, &retarget, &t->blocked); | |
2941 | ||
5c251e9d | 2942 | if (!task_sigpending(t)) |
fec9993d ON |
2943 | signal_wake_up(t, 0); |
2944 | ||
2945 | if (sigisemptyset(&retarget)) | |
2946 | break; | |
0edceb7b ON |
2947 | } |
2948 | } | |
2949 | ||
d12619b5 ON |
2950 | void exit_signals(struct task_struct *tsk) |
2951 | { | |
2952 | int group_stop = 0; | |
f646e227 | 2953 | sigset_t unblocked; |
d12619b5 | 2954 | |
77e4ef99 TH |
2955 | /* |
2956 | * @tsk is about to have PF_EXITING set - lock out users which | |
2957 | * expect stable threadgroup. | |
2958 | */ | |
780de9dd | 2959 | cgroup_threadgroup_change_begin(tsk); |
77e4ef99 | 2960 | |
5dee1707 ON |
2961 | if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) { |
2962 | tsk->flags |= PF_EXITING; | |
780de9dd | 2963 | cgroup_threadgroup_change_end(tsk); |
5dee1707 | 2964 | return; |
d12619b5 ON |
2965 | } |
2966 | ||
5dee1707 | 2967 | spin_lock_irq(&tsk->sighand->siglock); |
d12619b5 ON |
2968 | /* |
2969 | * From now this task is not visible for group-wide signals, | |
2970 | * see wants_signal(), do_signal_stop(). | |
2971 | */ | |
2972 | tsk->flags |= PF_EXITING; | |
77e4ef99 | 2973 | |
780de9dd | 2974 | cgroup_threadgroup_change_end(tsk); |
77e4ef99 | 2975 | |
5c251e9d | 2976 | if (!task_sigpending(tsk)) |
5dee1707 ON |
2977 | goto out; |
2978 | ||
f646e227 ON |
2979 | unblocked = tsk->blocked; |
2980 | signotset(&unblocked); | |
2981 | retarget_shared_pending(tsk, &unblocked); | |
5dee1707 | 2982 | |
a8f072c1 | 2983 | if (unlikely(tsk->jobctl & JOBCTL_STOP_PENDING) && |
e5c1902e | 2984 | task_participate_group_stop(tsk)) |
edf2ed15 | 2985 | group_stop = CLD_STOPPED; |
5dee1707 | 2986 | out: |
d12619b5 ON |
2987 | spin_unlock_irq(&tsk->sighand->siglock); |
2988 | ||
62bcf9d9 TH |
2989 | /* |
2990 | * If group stop has completed, deliver the notification. This | |
2991 | * should always go to the real parent of the group leader. | |
2992 | */ | |
ae6d2ed7 | 2993 | if (unlikely(group_stop)) { |
d12619b5 | 2994 | read_lock(&tasklist_lock); |
62bcf9d9 | 2995 | do_notify_parent_cldstop(tsk, false, group_stop); |
d12619b5 ON |
2996 | read_unlock(&tasklist_lock); |
2997 | } | |
2998 | } | |
2999 | ||
1da177e4 LT |
3000 | /* |
3001 | * System call entry points. | |
3002 | */ | |
3003 | ||
41c57892 RD |
3004 | /** |
3005 | * sys_restart_syscall - restart a system call | |
3006 | */ | |
754fe8d2 | 3007 | SYSCALL_DEFINE0(restart_syscall) |
1da177e4 | 3008 | { |
f56141e3 | 3009 | struct restart_block *restart = ¤t->restart_block; |
1da177e4 LT |
3010 | return restart->fn(restart); |
3011 | } | |
3012 | ||
3013 | long do_no_restart_syscall(struct restart_block *param) | |
3014 | { | |
3015 | return -EINTR; | |
3016 | } | |
3017 | ||
b182801a ON |
3018 | static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset) |
3019 | { | |
5c251e9d | 3020 | if (task_sigpending(tsk) && !thread_group_empty(tsk)) { |
b182801a ON |
3021 | sigset_t newblocked; |
3022 | /* A set of now blocked but previously unblocked signals. */ | |
702a5073 | 3023 | sigandnsets(&newblocked, newset, ¤t->blocked); |
b182801a ON |
3024 | retarget_shared_pending(tsk, &newblocked); |
3025 | } | |
3026 | tsk->blocked = *newset; | |
3027 | recalc_sigpending(); | |
3028 | } | |
3029 | ||
e6fa16ab ON |
3030 | /** |
3031 | * set_current_blocked - change current->blocked mask | |
3032 | * @newset: new mask | |
3033 | * | |
3034 | * It is wrong to change ->blocked directly, this helper should be used | |
3035 | * to ensure the process can't miss a shared signal we are going to block. | |
1da177e4 | 3036 | */ |
77097ae5 AV |
3037 | void set_current_blocked(sigset_t *newset) |
3038 | { | |
77097ae5 | 3039 | sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP)); |
0c4a8423 | 3040 | __set_current_blocked(newset); |
77097ae5 AV |
3041 | } |
3042 | ||
3043 | void __set_current_blocked(const sigset_t *newset) | |
e6fa16ab ON |
3044 | { |
3045 | struct task_struct *tsk = current; | |
3046 | ||
c7be96af WL |
3047 | /* |
3048 | * In case the signal mask hasn't changed, there is nothing we need | |
3049 | * to do. The current->blocked shouldn't be modified by other task. | |
3050 | */ | |
3051 | if (sigequalsets(&tsk->blocked, newset)) | |
3052 | return; | |
3053 | ||
e6fa16ab | 3054 | spin_lock_irq(&tsk->sighand->siglock); |
b182801a | 3055 | __set_task_blocked(tsk, newset); |
e6fa16ab ON |
3056 | spin_unlock_irq(&tsk->sighand->siglock); |
3057 | } | |
1da177e4 LT |
3058 | |
3059 | /* | |
3060 | * This is also useful for kernel threads that want to temporarily | |
3061 | * (or permanently) block certain signals. | |
3062 | * | |
3063 | * NOTE! Unlike the user-mode sys_sigprocmask(), the kernel | |
3064 | * interface happily blocks "unblockable" signals like SIGKILL | |
3065 | * and friends. | |
3066 | */ | |
3067 | int sigprocmask(int how, sigset_t *set, sigset_t *oldset) | |
3068 | { | |
73ef4aeb ON |
3069 | struct task_struct *tsk = current; |
3070 | sigset_t newset; | |
1da177e4 | 3071 | |
73ef4aeb | 3072 | /* Lockless, only current can change ->blocked, never from irq */ |
a26fd335 | 3073 | if (oldset) |
73ef4aeb | 3074 | *oldset = tsk->blocked; |
a26fd335 | 3075 | |
1da177e4 LT |
3076 | switch (how) { |
3077 | case SIG_BLOCK: | |
73ef4aeb | 3078 | sigorsets(&newset, &tsk->blocked, set); |
1da177e4 LT |
3079 | break; |
3080 | case SIG_UNBLOCK: | |
702a5073 | 3081 | sigandnsets(&newset, &tsk->blocked, set); |
1da177e4 LT |
3082 | break; |
3083 | case SIG_SETMASK: | |
73ef4aeb | 3084 | newset = *set; |
1da177e4 LT |
3085 | break; |
3086 | default: | |
73ef4aeb | 3087 | return -EINVAL; |
1da177e4 | 3088 | } |
a26fd335 | 3089 | |
77097ae5 | 3090 | __set_current_blocked(&newset); |
73ef4aeb | 3091 | return 0; |
1da177e4 | 3092 | } |
fb50f5a4 | 3093 | EXPORT_SYMBOL(sigprocmask); |
1da177e4 | 3094 | |
ded653cc DD |
3095 | /* |
3096 | * The api helps set app-provided sigmasks. | |
3097 | * | |
3098 | * This is useful for syscalls such as ppoll, pselect, io_pgetevents and | |
3099 | * epoll_pwait where a new sigmask is passed from userland for the syscalls. | |
b772434b ON |
3100 | * |
3101 | * Note that it does set_restore_sigmask() in advance, so it must be always | |
3102 | * paired with restore_saved_sigmask_unless() before return from syscall. | |
ded653cc | 3103 | */ |
b772434b | 3104 | int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize) |
ded653cc | 3105 | { |
b772434b | 3106 | sigset_t kmask; |
ded653cc | 3107 | |
b772434b ON |
3108 | if (!umask) |
3109 | return 0; | |
ded653cc DD |
3110 | if (sigsetsize != sizeof(sigset_t)) |
3111 | return -EINVAL; | |
b772434b | 3112 | if (copy_from_user(&kmask, umask, sizeof(sigset_t))) |
ded653cc DD |
3113 | return -EFAULT; |
3114 | ||
b772434b ON |
3115 | set_restore_sigmask(); |
3116 | current->saved_sigmask = current->blocked; | |
3117 | set_current_blocked(&kmask); | |
ded653cc DD |
3118 | |
3119 | return 0; | |
3120 | } | |
ded653cc DD |
3121 | |
3122 | #ifdef CONFIG_COMPAT | |
b772434b | 3123 | int set_compat_user_sigmask(const compat_sigset_t __user *umask, |
ded653cc DD |
3124 | size_t sigsetsize) |
3125 | { | |
b772434b | 3126 | sigset_t kmask; |
ded653cc | 3127 | |
b772434b ON |
3128 | if (!umask) |
3129 | return 0; | |
ded653cc DD |
3130 | if (sigsetsize != sizeof(compat_sigset_t)) |
3131 | return -EINVAL; | |
b772434b | 3132 | if (get_compat_sigset(&kmask, umask)) |
ded653cc DD |
3133 | return -EFAULT; |
3134 | ||
b772434b ON |
3135 | set_restore_sigmask(); |
3136 | current->saved_sigmask = current->blocked; | |
3137 | set_current_blocked(&kmask); | |
ded653cc DD |
3138 | |
3139 | return 0; | |
3140 | } | |
ded653cc DD |
3141 | #endif |
3142 | ||
41c57892 RD |
3143 | /** |
3144 | * sys_rt_sigprocmask - change the list of currently blocked signals | |
3145 | * @how: whether to add, remove, or set signals | |
ada9c933 | 3146 | * @nset: stores pending signals |
41c57892 RD |
3147 | * @oset: previous value of signal mask if non-null |
3148 | * @sigsetsize: size of sigset_t type | |
3149 | */ | |
bb7efee2 | 3150 | SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, nset, |
17da2bd9 | 3151 | sigset_t __user *, oset, size_t, sigsetsize) |
1da177e4 | 3152 | { |
1da177e4 | 3153 | sigset_t old_set, new_set; |
bb7efee2 | 3154 | int error; |
1da177e4 LT |
3155 | |
3156 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3157 | if (sigsetsize != sizeof(sigset_t)) | |
bb7efee2 | 3158 | return -EINVAL; |
1da177e4 | 3159 | |
bb7efee2 ON |
3160 | old_set = current->blocked; |
3161 | ||
3162 | if (nset) { | |
3163 | if (copy_from_user(&new_set, nset, sizeof(sigset_t))) | |
3164 | return -EFAULT; | |
1da177e4 LT |
3165 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3166 | ||
bb7efee2 | 3167 | error = sigprocmask(how, &new_set, NULL); |
1da177e4 | 3168 | if (error) |
bb7efee2 ON |
3169 | return error; |
3170 | } | |
1da177e4 | 3171 | |
bb7efee2 ON |
3172 | if (oset) { |
3173 | if (copy_to_user(oset, &old_set, sizeof(sigset_t))) | |
3174 | return -EFAULT; | |
1da177e4 | 3175 | } |
bb7efee2 ON |
3176 | |
3177 | return 0; | |
1da177e4 LT |
3178 | } |
3179 | ||
322a56cb | 3180 | #ifdef CONFIG_COMPAT |
322a56cb AV |
3181 | COMPAT_SYSCALL_DEFINE4(rt_sigprocmask, int, how, compat_sigset_t __user *, nset, |
3182 | compat_sigset_t __user *, oset, compat_size_t, sigsetsize) | |
1da177e4 | 3183 | { |
322a56cb AV |
3184 | sigset_t old_set = current->blocked; |
3185 | ||
3186 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3187 | if (sigsetsize != sizeof(sigset_t)) | |
3188 | return -EINVAL; | |
3189 | ||
3190 | if (nset) { | |
322a56cb AV |
3191 | sigset_t new_set; |
3192 | int error; | |
3968cf62 | 3193 | if (get_compat_sigset(&new_set, nset)) |
322a56cb | 3194 | return -EFAULT; |
322a56cb AV |
3195 | sigdelsetmask(&new_set, sigmask(SIGKILL)|sigmask(SIGSTOP)); |
3196 | ||
3197 | error = sigprocmask(how, &new_set, NULL); | |
3198 | if (error) | |
3199 | return error; | |
3200 | } | |
f454322e | 3201 | return oset ? put_compat_sigset(oset, &old_set, sizeof(*oset)) : 0; |
322a56cb AV |
3202 | } |
3203 | #endif | |
1da177e4 | 3204 | |
b1d294c8 | 3205 | static void do_sigpending(sigset_t *set) |
1da177e4 | 3206 | { |
1da177e4 | 3207 | spin_lock_irq(¤t->sighand->siglock); |
fe9c1db2 | 3208 | sigorsets(set, ¤t->pending.signal, |
1da177e4 LT |
3209 | ¤t->signal->shared_pending.signal); |
3210 | spin_unlock_irq(¤t->sighand->siglock); | |
3211 | ||
3212 | /* Outside the lock because only this thread touches it. */ | |
fe9c1db2 | 3213 | sigandsets(set, ¤t->blocked, set); |
5aba085e | 3214 | } |
1da177e4 | 3215 | |
41c57892 RD |
3216 | /** |
3217 | * sys_rt_sigpending - examine a pending signal that has been raised | |
3218 | * while blocked | |
20f22ab4 | 3219 | * @uset: stores pending signals |
41c57892 RD |
3220 | * @sigsetsize: size of sigset_t type or larger |
3221 | */ | |
fe9c1db2 | 3222 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
1da177e4 | 3223 | { |
fe9c1db2 | 3224 | sigset_t set; |
176826af DL |
3225 | |
3226 | if (sigsetsize > sizeof(*uset)) | |
3227 | return -EINVAL; | |
3228 | ||
b1d294c8 CB |
3229 | do_sigpending(&set); |
3230 | ||
3231 | if (copy_to_user(uset, &set, sigsetsize)) | |
3232 | return -EFAULT; | |
3233 | ||
3234 | return 0; | |
fe9c1db2 AV |
3235 | } |
3236 | ||
3237 | #ifdef CONFIG_COMPAT | |
fe9c1db2 AV |
3238 | COMPAT_SYSCALL_DEFINE2(rt_sigpending, compat_sigset_t __user *, uset, |
3239 | compat_size_t, sigsetsize) | |
1da177e4 | 3240 | { |
fe9c1db2 | 3241 | sigset_t set; |
176826af DL |
3242 | |
3243 | if (sigsetsize > sizeof(*uset)) | |
3244 | return -EINVAL; | |
3245 | ||
b1d294c8 CB |
3246 | do_sigpending(&set); |
3247 | ||
3248 | return put_compat_sigset(uset, &set, sigsetsize); | |
1da177e4 | 3249 | } |
fe9c1db2 | 3250 | #endif |
1da177e4 | 3251 | |
4ce5f9c9 EB |
3252 | static const struct { |
3253 | unsigned char limit, layout; | |
3254 | } sig_sicodes[] = { | |
3255 | [SIGILL] = { NSIGILL, SIL_FAULT }, | |
3256 | [SIGFPE] = { NSIGFPE, SIL_FAULT }, | |
3257 | [SIGSEGV] = { NSIGSEGV, SIL_FAULT }, | |
3258 | [SIGBUS] = { NSIGBUS, SIL_FAULT }, | |
3259 | [SIGTRAP] = { NSIGTRAP, SIL_FAULT }, | |
3260 | #if defined(SIGEMT) | |
3261 | [SIGEMT] = { NSIGEMT, SIL_FAULT }, | |
3262 | #endif | |
3263 | [SIGCHLD] = { NSIGCHLD, SIL_CHLD }, | |
3264 | [SIGPOLL] = { NSIGPOLL, SIL_POLL }, | |
3265 | [SIGSYS] = { NSIGSYS, SIL_SYS }, | |
3266 | }; | |
3267 | ||
b2a2ab52 | 3268 | static bool known_siginfo_layout(unsigned sig, int si_code) |
4ce5f9c9 EB |
3269 | { |
3270 | if (si_code == SI_KERNEL) | |
3271 | return true; | |
3272 | else if ((si_code > SI_USER)) { | |
3273 | if (sig_specific_sicodes(sig)) { | |
3274 | if (si_code <= sig_sicodes[sig].limit) | |
3275 | return true; | |
3276 | } | |
3277 | else if (si_code <= NSIGPOLL) | |
3278 | return true; | |
3279 | } | |
3280 | else if (si_code >= SI_DETHREAD) | |
3281 | return true; | |
3282 | else if (si_code == SI_ASYNCNL) | |
3283 | return true; | |
3284 | return false; | |
3285 | } | |
3286 | ||
a3670058 | 3287 | enum siginfo_layout siginfo_layout(unsigned sig, int si_code) |
cc731525 EB |
3288 | { |
3289 | enum siginfo_layout layout = SIL_KILL; | |
3290 | if ((si_code > SI_USER) && (si_code < SI_KERNEL)) { | |
4ce5f9c9 EB |
3291 | if ((sig < ARRAY_SIZE(sig_sicodes)) && |
3292 | (si_code <= sig_sicodes[sig].limit)) { | |
3293 | layout = sig_sicodes[sig].layout; | |
31931c93 EB |
3294 | /* Handle the exceptions */ |
3295 | if ((sig == SIGBUS) && | |
3296 | (si_code >= BUS_MCEERR_AR) && (si_code <= BUS_MCEERR_AO)) | |
3297 | layout = SIL_FAULT_MCEERR; | |
3298 | else if ((sig == SIGSEGV) && (si_code == SEGV_BNDERR)) | |
3299 | layout = SIL_FAULT_BNDERR; | |
3300 | #ifdef SEGV_PKUERR | |
3301 | else if ((sig == SIGSEGV) && (si_code == SEGV_PKUERR)) | |
3302 | layout = SIL_FAULT_PKUERR; | |
3303 | #endif | |
ed8e5080 | 3304 | else if ((sig == SIGTRAP) && (si_code == TRAP_PERF)) |
f4ac7302 | 3305 | layout = SIL_FAULT_PERF_EVENT; |
2c9f7eaf EB |
3306 | else if (IS_ENABLED(CONFIG_SPARC) && |
3307 | (sig == SIGILL) && (si_code == ILL_ILLTRP)) | |
3308 | layout = SIL_FAULT_TRAPNO; | |
7de5f68d EB |
3309 | else if (IS_ENABLED(CONFIG_ALPHA) && |
3310 | ((sig == SIGFPE) || | |
3311 | ((sig == SIGTRAP) && (si_code == TRAP_UNK)))) | |
9abcabe3 | 3312 | layout = SIL_FAULT_TRAPNO; |
31931c93 | 3313 | } |
cc731525 EB |
3314 | else if (si_code <= NSIGPOLL) |
3315 | layout = SIL_POLL; | |
3316 | } else { | |
3317 | if (si_code == SI_TIMER) | |
3318 | layout = SIL_TIMER; | |
3319 | else if (si_code == SI_SIGIO) | |
3320 | layout = SIL_POLL; | |
3321 | else if (si_code < 0) | |
3322 | layout = SIL_RT; | |
cc731525 EB |
3323 | } |
3324 | return layout; | |
3325 | } | |
3326 | ||
4ce5f9c9 EB |
3327 | static inline char __user *si_expansion(const siginfo_t __user *info) |
3328 | { | |
3329 | return ((char __user *)info) + sizeof(struct kernel_siginfo); | |
3330 | } | |
3331 | ||
ae7795bc | 3332 | int copy_siginfo_to_user(siginfo_t __user *to, const kernel_siginfo_t *from) |
1da177e4 | 3333 | { |
4ce5f9c9 | 3334 | char __user *expansion = si_expansion(to); |
ae7795bc | 3335 | if (copy_to_user(to, from , sizeof(struct kernel_siginfo))) |
1da177e4 | 3336 | return -EFAULT; |
4ce5f9c9 | 3337 | if (clear_user(expansion, SI_EXPANSION_SIZE)) |
1da177e4 | 3338 | return -EFAULT; |
c999b933 | 3339 | return 0; |
1da177e4 LT |
3340 | } |
3341 | ||
601d5abf EB |
3342 | static int post_copy_siginfo_from_user(kernel_siginfo_t *info, |
3343 | const siginfo_t __user *from) | |
4cd2e0e7 | 3344 | { |
601d5abf | 3345 | if (unlikely(!known_siginfo_layout(info->si_signo, info->si_code))) { |
4ce5f9c9 EB |
3346 | char __user *expansion = si_expansion(from); |
3347 | char buf[SI_EXPANSION_SIZE]; | |
3348 | int i; | |
3349 | /* | |
3350 | * An unknown si_code might need more than | |
3351 | * sizeof(struct kernel_siginfo) bytes. Verify all of the | |
3352 | * extra bytes are 0. This guarantees copy_siginfo_to_user | |
3353 | * will return this data to userspace exactly. | |
3354 | */ | |
3355 | if (copy_from_user(&buf, expansion, SI_EXPANSION_SIZE)) | |
3356 | return -EFAULT; | |
3357 | for (i = 0; i < SI_EXPANSION_SIZE; i++) { | |
3358 | if (buf[i] != 0) | |
3359 | return -E2BIG; | |
3360 | } | |
3361 | } | |
4cd2e0e7 EB |
3362 | return 0; |
3363 | } | |
3364 | ||
601d5abf EB |
3365 | static int __copy_siginfo_from_user(int signo, kernel_siginfo_t *to, |
3366 | const siginfo_t __user *from) | |
3367 | { | |
3368 | if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) | |
3369 | return -EFAULT; | |
3370 | to->si_signo = signo; | |
3371 | return post_copy_siginfo_from_user(to, from); | |
3372 | } | |
3373 | ||
3374 | int copy_siginfo_from_user(kernel_siginfo_t *to, const siginfo_t __user *from) | |
3375 | { | |
3376 | if (copy_from_user(to, from, sizeof(struct kernel_siginfo))) | |
3377 | return -EFAULT; | |
3378 | return post_copy_siginfo_from_user(to, from); | |
3379 | } | |
3380 | ||
212a36a1 | 3381 | #ifdef CONFIG_COMPAT |
c3b3f524 CH |
3382 | /** |
3383 | * copy_siginfo_to_external32 - copy a kernel siginfo into a compat user siginfo | |
3384 | * @to: compat siginfo destination | |
3385 | * @from: kernel siginfo source | |
3386 | * | |
3387 | * Note: This function does not work properly for the SIGCHLD on x32, but | |
3388 | * fortunately it doesn't have to. The only valid callers for this function are | |
3389 | * copy_siginfo_to_user32, which is overriden for x32 and the coredump code. | |
3390 | * The latter does not care because SIGCHLD will never cause a coredump. | |
3391 | */ | |
3392 | void copy_siginfo_to_external32(struct compat_siginfo *to, | |
3393 | const struct kernel_siginfo *from) | |
ea64d5ac | 3394 | { |
c3b3f524 | 3395 | memset(to, 0, sizeof(*to)); |
ea64d5ac | 3396 | |
c3b3f524 CH |
3397 | to->si_signo = from->si_signo; |
3398 | to->si_errno = from->si_errno; | |
3399 | to->si_code = from->si_code; | |
ea64d5ac EB |
3400 | switch(siginfo_layout(from->si_signo, from->si_code)) { |
3401 | case SIL_KILL: | |
c3b3f524 CH |
3402 | to->si_pid = from->si_pid; |
3403 | to->si_uid = from->si_uid; | |
ea64d5ac EB |
3404 | break; |
3405 | case SIL_TIMER: | |
c3b3f524 CH |
3406 | to->si_tid = from->si_tid; |
3407 | to->si_overrun = from->si_overrun; | |
3408 | to->si_int = from->si_int; | |
ea64d5ac EB |
3409 | break; |
3410 | case SIL_POLL: | |
c3b3f524 CH |
3411 | to->si_band = from->si_band; |
3412 | to->si_fd = from->si_fd; | |
ea64d5ac EB |
3413 | break; |
3414 | case SIL_FAULT: | |
c3b3f524 | 3415 | to->si_addr = ptr_to_compat(from->si_addr); |
9abcabe3 EB |
3416 | break; |
3417 | case SIL_FAULT_TRAPNO: | |
3418 | to->si_addr = ptr_to_compat(from->si_addr); | |
c3b3f524 | 3419 | to->si_trapno = from->si_trapno; |
31931c93 EB |
3420 | break; |
3421 | case SIL_FAULT_MCEERR: | |
c3b3f524 | 3422 | to->si_addr = ptr_to_compat(from->si_addr); |
c3b3f524 | 3423 | to->si_addr_lsb = from->si_addr_lsb; |
31931c93 EB |
3424 | break; |
3425 | case SIL_FAULT_BNDERR: | |
c3b3f524 | 3426 | to->si_addr = ptr_to_compat(from->si_addr); |
c3b3f524 CH |
3427 | to->si_lower = ptr_to_compat(from->si_lower); |
3428 | to->si_upper = ptr_to_compat(from->si_upper); | |
31931c93 EB |
3429 | break; |
3430 | case SIL_FAULT_PKUERR: | |
c3b3f524 | 3431 | to->si_addr = ptr_to_compat(from->si_addr); |
c3b3f524 | 3432 | to->si_pkey = from->si_pkey; |
ea64d5ac | 3433 | break; |
f4ac7302 | 3434 | case SIL_FAULT_PERF_EVENT: |
fb6cc127 | 3435 | to->si_addr = ptr_to_compat(from->si_addr); |
0683b531 EB |
3436 | to->si_perf_data = from->si_perf_data; |
3437 | to->si_perf_type = from->si_perf_type; | |
fb6cc127 | 3438 | break; |
ea64d5ac | 3439 | case SIL_CHLD: |
c3b3f524 CH |
3440 | to->si_pid = from->si_pid; |
3441 | to->si_uid = from->si_uid; | |
3442 | to->si_status = from->si_status; | |
3443 | to->si_utime = from->si_utime; | |
3444 | to->si_stime = from->si_stime; | |
ea64d5ac EB |
3445 | break; |
3446 | case SIL_RT: | |
c3b3f524 CH |
3447 | to->si_pid = from->si_pid; |
3448 | to->si_uid = from->si_uid; | |
3449 | to->si_int = from->si_int; | |
ea64d5ac EB |
3450 | break; |
3451 | case SIL_SYS: | |
c3b3f524 CH |
3452 | to->si_call_addr = ptr_to_compat(from->si_call_addr); |
3453 | to->si_syscall = from->si_syscall; | |
3454 | to->si_arch = from->si_arch; | |
ea64d5ac EB |
3455 | break; |
3456 | } | |
c3b3f524 | 3457 | } |
ea64d5ac | 3458 | |
c3b3f524 CH |
3459 | int __copy_siginfo_to_user32(struct compat_siginfo __user *to, |
3460 | const struct kernel_siginfo *from) | |
3461 | { | |
3462 | struct compat_siginfo new; | |
3463 | ||
3464 | copy_siginfo_to_external32(&new, from); | |
ea64d5ac EB |
3465 | if (copy_to_user(to, &new, sizeof(struct compat_siginfo))) |
3466 | return -EFAULT; | |
ea64d5ac EB |
3467 | return 0; |
3468 | } | |
3469 | ||
601d5abf EB |
3470 | static int post_copy_siginfo_from_user32(kernel_siginfo_t *to, |
3471 | const struct compat_siginfo *from) | |
212a36a1 | 3472 | { |
212a36a1 | 3473 | clear_siginfo(to); |
601d5abf EB |
3474 | to->si_signo = from->si_signo; |
3475 | to->si_errno = from->si_errno; | |
3476 | to->si_code = from->si_code; | |
3477 | switch(siginfo_layout(from->si_signo, from->si_code)) { | |
212a36a1 | 3478 | case SIL_KILL: |
601d5abf EB |
3479 | to->si_pid = from->si_pid; |
3480 | to->si_uid = from->si_uid; | |
212a36a1 EB |
3481 | break; |
3482 | case SIL_TIMER: | |
601d5abf EB |
3483 | to->si_tid = from->si_tid; |
3484 | to->si_overrun = from->si_overrun; | |
3485 | to->si_int = from->si_int; | |
212a36a1 EB |
3486 | break; |
3487 | case SIL_POLL: | |
601d5abf EB |
3488 | to->si_band = from->si_band; |
3489 | to->si_fd = from->si_fd; | |
212a36a1 EB |
3490 | break; |
3491 | case SIL_FAULT: | |
601d5abf | 3492 | to->si_addr = compat_ptr(from->si_addr); |
9abcabe3 EB |
3493 | break; |
3494 | case SIL_FAULT_TRAPNO: | |
3495 | to->si_addr = compat_ptr(from->si_addr); | |
601d5abf | 3496 | to->si_trapno = from->si_trapno; |
31931c93 EB |
3497 | break; |
3498 | case SIL_FAULT_MCEERR: | |
601d5abf | 3499 | to->si_addr = compat_ptr(from->si_addr); |
601d5abf | 3500 | to->si_addr_lsb = from->si_addr_lsb; |
31931c93 EB |
3501 | break; |
3502 | case SIL_FAULT_BNDERR: | |
601d5abf | 3503 | to->si_addr = compat_ptr(from->si_addr); |
601d5abf EB |
3504 | to->si_lower = compat_ptr(from->si_lower); |
3505 | to->si_upper = compat_ptr(from->si_upper); | |
31931c93 EB |
3506 | break; |
3507 | case SIL_FAULT_PKUERR: | |
601d5abf | 3508 | to->si_addr = compat_ptr(from->si_addr); |
601d5abf | 3509 | to->si_pkey = from->si_pkey; |
212a36a1 | 3510 | break; |
f4ac7302 | 3511 | case SIL_FAULT_PERF_EVENT: |
fb6cc127 | 3512 | to->si_addr = compat_ptr(from->si_addr); |
0683b531 EB |
3513 | to->si_perf_data = from->si_perf_data; |
3514 | to->si_perf_type = from->si_perf_type; | |
fb6cc127 | 3515 | break; |
212a36a1 | 3516 | case SIL_CHLD: |
601d5abf EB |
3517 | to->si_pid = from->si_pid; |
3518 | to->si_uid = from->si_uid; | |
3519 | to->si_status = from->si_status; | |
212a36a1 EB |
3520 | #ifdef CONFIG_X86_X32_ABI |
3521 | if (in_x32_syscall()) { | |
601d5abf EB |
3522 | to->si_utime = from->_sifields._sigchld_x32._utime; |
3523 | to->si_stime = from->_sifields._sigchld_x32._stime; | |
212a36a1 EB |
3524 | } else |
3525 | #endif | |
3526 | { | |
601d5abf EB |
3527 | to->si_utime = from->si_utime; |
3528 | to->si_stime = from->si_stime; | |
212a36a1 EB |
3529 | } |
3530 | break; | |
3531 | case SIL_RT: | |
601d5abf EB |
3532 | to->si_pid = from->si_pid; |
3533 | to->si_uid = from->si_uid; | |
3534 | to->si_int = from->si_int; | |
212a36a1 EB |
3535 | break; |
3536 | case SIL_SYS: | |
601d5abf EB |
3537 | to->si_call_addr = compat_ptr(from->si_call_addr); |
3538 | to->si_syscall = from->si_syscall; | |
3539 | to->si_arch = from->si_arch; | |
212a36a1 EB |
3540 | break; |
3541 | } | |
3542 | return 0; | |
3543 | } | |
601d5abf EB |
3544 | |
3545 | static int __copy_siginfo_from_user32(int signo, struct kernel_siginfo *to, | |
3546 | const struct compat_siginfo __user *ufrom) | |
3547 | { | |
3548 | struct compat_siginfo from; | |
3549 | ||
3550 | if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) | |
3551 | return -EFAULT; | |
3552 | ||
3553 | from.si_signo = signo; | |
3554 | return post_copy_siginfo_from_user32(to, &from); | |
3555 | } | |
3556 | ||
3557 | int copy_siginfo_from_user32(struct kernel_siginfo *to, | |
3558 | const struct compat_siginfo __user *ufrom) | |
3559 | { | |
3560 | struct compat_siginfo from; | |
3561 | ||
3562 | if (copy_from_user(&from, ufrom, sizeof(struct compat_siginfo))) | |
3563 | return -EFAULT; | |
3564 | ||
3565 | return post_copy_siginfo_from_user32(to, &from); | |
3566 | } | |
212a36a1 EB |
3567 | #endif /* CONFIG_COMPAT */ |
3568 | ||
943df148 ON |
3569 | /** |
3570 | * do_sigtimedwait - wait for queued signals specified in @which | |
3571 | * @which: queued signals to wait for | |
3572 | * @info: if non-null, the signal's siginfo is returned here | |
3573 | * @ts: upper bound on process time suspension | |
3574 | */ | |
ae7795bc | 3575 | static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info, |
49c39f84 | 3576 | const struct timespec64 *ts) |
943df148 | 3577 | { |
2456e855 | 3578 | ktime_t *to = NULL, timeout = KTIME_MAX; |
943df148 | 3579 | struct task_struct *tsk = current; |
943df148 | 3580 | sigset_t mask = *which; |
2b1ecc3d | 3581 | int sig, ret = 0; |
943df148 ON |
3582 | |
3583 | if (ts) { | |
49c39f84 | 3584 | if (!timespec64_valid(ts)) |
943df148 | 3585 | return -EINVAL; |
49c39f84 | 3586 | timeout = timespec64_to_ktime(*ts); |
2b1ecc3d | 3587 | to = &timeout; |
943df148 ON |
3588 | } |
3589 | ||
3590 | /* | |
3591 | * Invert the set of allowed signals to get those we want to block. | |
3592 | */ | |
3593 | sigdelsetmask(&mask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
3594 | signotset(&mask); | |
3595 | ||
3596 | spin_lock_irq(&tsk->sighand->siglock); | |
3597 | sig = dequeue_signal(tsk, &mask, info); | |
2456e855 | 3598 | if (!sig && timeout) { |
943df148 ON |
3599 | /* |
3600 | * None ready, temporarily unblock those we're interested | |
3601 | * while we are sleeping in so that we'll be awakened when | |
b182801a ON |
3602 | * they arrive. Unblocking is always fine, we can avoid |
3603 | * set_current_blocked(). | |
943df148 ON |
3604 | */ |
3605 | tsk->real_blocked = tsk->blocked; | |
3606 | sigandsets(&tsk->blocked, &tsk->blocked, &mask); | |
3607 | recalc_sigpending(); | |
3608 | spin_unlock_irq(&tsk->sighand->siglock); | |
3609 | ||
2b1ecc3d TG |
3610 | __set_current_state(TASK_INTERRUPTIBLE); |
3611 | ret = freezable_schedule_hrtimeout_range(to, tsk->timer_slack_ns, | |
3612 | HRTIMER_MODE_REL); | |
943df148 | 3613 | spin_lock_irq(&tsk->sighand->siglock); |
b182801a | 3614 | __set_task_blocked(tsk, &tsk->real_blocked); |
6114041a | 3615 | sigemptyset(&tsk->real_blocked); |
b182801a | 3616 | sig = dequeue_signal(tsk, &mask, info); |
943df148 ON |
3617 | } |
3618 | spin_unlock_irq(&tsk->sighand->siglock); | |
3619 | ||
3620 | if (sig) | |
3621 | return sig; | |
2b1ecc3d | 3622 | return ret ? -EINTR : -EAGAIN; |
943df148 ON |
3623 | } |
3624 | ||
41c57892 RD |
3625 | /** |
3626 | * sys_rt_sigtimedwait - synchronously wait for queued signals specified | |
3627 | * in @uthese | |
3628 | * @uthese: queued signals to wait for | |
3629 | * @uinfo: if non-null, the signal's siginfo is returned here | |
3630 | * @uts: upper bound on process time suspension | |
3631 | * @sigsetsize: size of sigset_t type | |
3632 | */ | |
17da2bd9 | 3633 | SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, |
49c39f84 AB |
3634 | siginfo_t __user *, uinfo, |
3635 | const struct __kernel_timespec __user *, uts, | |
17da2bd9 | 3636 | size_t, sigsetsize) |
1da177e4 | 3637 | { |
1da177e4 | 3638 | sigset_t these; |
49c39f84 | 3639 | struct timespec64 ts; |
ae7795bc | 3640 | kernel_siginfo_t info; |
943df148 | 3641 | int ret; |
1da177e4 LT |
3642 | |
3643 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
3644 | if (sigsetsize != sizeof(sigset_t)) | |
3645 | return -EINVAL; | |
3646 | ||
3647 | if (copy_from_user(&these, uthese, sizeof(these))) | |
3648 | return -EFAULT; | |
5aba085e | 3649 | |
1da177e4 | 3650 | if (uts) { |
49c39f84 | 3651 | if (get_timespec64(&ts, uts)) |
1da177e4 | 3652 | return -EFAULT; |
1da177e4 LT |
3653 | } |
3654 | ||
943df148 | 3655 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); |
1da177e4 | 3656 | |
943df148 ON |
3657 | if (ret > 0 && uinfo) { |
3658 | if (copy_siginfo_to_user(uinfo, &info)) | |
3659 | ret = -EFAULT; | |
1da177e4 LT |
3660 | } |
3661 | ||
3662 | return ret; | |
3663 | } | |
3664 | ||
df8522a3 AB |
3665 | #ifdef CONFIG_COMPAT_32BIT_TIME |
3666 | SYSCALL_DEFINE4(rt_sigtimedwait_time32, const sigset_t __user *, uthese, | |
3667 | siginfo_t __user *, uinfo, | |
3668 | const struct old_timespec32 __user *, uts, | |
3669 | size_t, sigsetsize) | |
3670 | { | |
3671 | sigset_t these; | |
3672 | struct timespec64 ts; | |
3673 | kernel_siginfo_t info; | |
3674 | int ret; | |
3675 | ||
3676 | if (sigsetsize != sizeof(sigset_t)) | |
3677 | return -EINVAL; | |
3678 | ||
3679 | if (copy_from_user(&these, uthese, sizeof(these))) | |
3680 | return -EFAULT; | |
3681 | ||
3682 | if (uts) { | |
3683 | if (get_old_timespec32(&ts, uts)) | |
3684 | return -EFAULT; | |
3685 | } | |
3686 | ||
3687 | ret = do_sigtimedwait(&these, &info, uts ? &ts : NULL); | |
3688 | ||
3689 | if (ret > 0 && uinfo) { | |
3690 | if (copy_siginfo_to_user(uinfo, &info)) | |
3691 | ret = -EFAULT; | |
3692 | } | |
3693 | ||
3694 | return ret; | |
3695 | } | |
3696 | #endif | |
3697 | ||
1b3c872c | 3698 | #ifdef CONFIG_COMPAT |
2367c4b5 AB |
3699 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time64, compat_sigset_t __user *, uthese, |
3700 | struct compat_siginfo __user *, uinfo, | |
3701 | struct __kernel_timespec __user *, uts, compat_size_t, sigsetsize) | |
3702 | { | |
3703 | sigset_t s; | |
3704 | struct timespec64 t; | |
3705 | kernel_siginfo_t info; | |
3706 | long ret; | |
3707 | ||
3708 | if (sigsetsize != sizeof(sigset_t)) | |
3709 | return -EINVAL; | |
3710 | ||
3711 | if (get_compat_sigset(&s, uthese)) | |
3712 | return -EFAULT; | |
3713 | ||
3714 | if (uts) { | |
3715 | if (get_timespec64(&t, uts)) | |
3716 | return -EFAULT; | |
3717 | } | |
3718 | ||
3719 | ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); | |
3720 | ||
3721 | if (ret > 0 && uinfo) { | |
3722 | if (copy_siginfo_to_user32(uinfo, &info)) | |
3723 | ret = -EFAULT; | |
3724 | } | |
3725 | ||
3726 | return ret; | |
3727 | } | |
3728 | ||
3729 | #ifdef CONFIG_COMPAT_32BIT_TIME | |
8dabe724 | 3730 | COMPAT_SYSCALL_DEFINE4(rt_sigtimedwait_time32, compat_sigset_t __user *, uthese, |
1b3c872c | 3731 | struct compat_siginfo __user *, uinfo, |
9afc5eee | 3732 | struct old_timespec32 __user *, uts, compat_size_t, sigsetsize) |
1b3c872c | 3733 | { |
1b3c872c | 3734 | sigset_t s; |
49c39f84 | 3735 | struct timespec64 t; |
ae7795bc | 3736 | kernel_siginfo_t info; |
1b3c872c AV |
3737 | long ret; |
3738 | ||
3739 | if (sigsetsize != sizeof(sigset_t)) | |
3740 | return -EINVAL; | |
3741 | ||
3968cf62 | 3742 | if (get_compat_sigset(&s, uthese)) |
1b3c872c | 3743 | return -EFAULT; |
1b3c872c AV |
3744 | |
3745 | if (uts) { | |
49c39f84 | 3746 | if (get_old_timespec32(&t, uts)) |
1b3c872c AV |
3747 | return -EFAULT; |
3748 | } | |
3749 | ||
3750 | ret = do_sigtimedwait(&s, &info, uts ? &t : NULL); | |
3751 | ||
3752 | if (ret > 0 && uinfo) { | |
3753 | if (copy_siginfo_to_user32(uinfo, &info)) | |
3754 | ret = -EFAULT; | |
3755 | } | |
3756 | ||
3757 | return ret; | |
3758 | } | |
3759 | #endif | |
2367c4b5 | 3760 | #endif |
1b3c872c | 3761 | |
3eb39f47 CB |
3762 | static inline void prepare_kill_siginfo(int sig, struct kernel_siginfo *info) |
3763 | { | |
3764 | clear_siginfo(info); | |
3765 | info->si_signo = sig; | |
3766 | info->si_errno = 0; | |
3767 | info->si_code = SI_USER; | |
3768 | info->si_pid = task_tgid_vnr(current); | |
3769 | info->si_uid = from_kuid_munged(current_user_ns(), current_uid()); | |
3770 | } | |
3771 | ||
41c57892 RD |
3772 | /** |
3773 | * sys_kill - send a signal to a process | |
3774 | * @pid: the PID of the process | |
3775 | * @sig: signal to be sent | |
3776 | */ | |
17da2bd9 | 3777 | SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) |
1da177e4 | 3778 | { |
ae7795bc | 3779 | struct kernel_siginfo info; |
1da177e4 | 3780 | |
3eb39f47 | 3781 | prepare_kill_siginfo(sig, &info); |
1da177e4 LT |
3782 | |
3783 | return kill_something_info(sig, &info, pid); | |
3784 | } | |
3785 | ||
3eb39f47 CB |
3786 | /* |
3787 | * Verify that the signaler and signalee either are in the same pid namespace | |
3788 | * or that the signaler's pid namespace is an ancestor of the signalee's pid | |
3789 | * namespace. | |
3790 | */ | |
3791 | static bool access_pidfd_pidns(struct pid *pid) | |
3792 | { | |
3793 | struct pid_namespace *active = task_active_pid_ns(current); | |
3794 | struct pid_namespace *p = ns_of_pid(pid); | |
3795 | ||
3796 | for (;;) { | |
3797 | if (!p) | |
3798 | return false; | |
3799 | if (p == active) | |
3800 | break; | |
3801 | p = p->parent; | |
3802 | } | |
3803 | ||
3804 | return true; | |
3805 | } | |
3806 | ||
adc5d875 JH |
3807 | static int copy_siginfo_from_user_any(kernel_siginfo_t *kinfo, |
3808 | siginfo_t __user *info) | |
3eb39f47 CB |
3809 | { |
3810 | #ifdef CONFIG_COMPAT | |
3811 | /* | |
3812 | * Avoid hooking up compat syscalls and instead handle necessary | |
3813 | * conversions here. Note, this is a stop-gap measure and should not be | |
3814 | * considered a generic solution. | |
3815 | */ | |
3816 | if (in_compat_syscall()) | |
3817 | return copy_siginfo_from_user32( | |
3818 | kinfo, (struct compat_siginfo __user *)info); | |
3819 | #endif | |
3820 | return copy_siginfo_from_user(kinfo, info); | |
3821 | } | |
3822 | ||
2151ad1b CB |
3823 | static struct pid *pidfd_to_pid(const struct file *file) |
3824 | { | |
3695eae5 CB |
3825 | struct pid *pid; |
3826 | ||
3827 | pid = pidfd_pid(file); | |
3828 | if (!IS_ERR(pid)) | |
3829 | return pid; | |
2151ad1b CB |
3830 | |
3831 | return tgid_pidfd_to_pid(file); | |
3832 | } | |
3833 | ||
3eb39f47 | 3834 | /** |
c732327f CB |
3835 | * sys_pidfd_send_signal - Signal a process through a pidfd |
3836 | * @pidfd: file descriptor of the process | |
3837 | * @sig: signal to send | |
3838 | * @info: signal info | |
3839 | * @flags: future flags | |
3eb39f47 CB |
3840 | * |
3841 | * The syscall currently only signals via PIDTYPE_PID which covers | |
3842 | * kill(<positive-pid>, <signal>. It does not signal threads or process | |
3843 | * groups. | |
3844 | * In order to extend the syscall to threads and process groups the @flags | |
3845 | * argument should be used. In essence, the @flags argument will determine | |
3846 | * what is signaled and not the file descriptor itself. Put in other words, | |
3847 | * grouping is a property of the flags argument not a property of the file | |
3848 | * descriptor. | |
3849 | * | |
3850 | * Return: 0 on success, negative errno on failure | |
3851 | */ | |
3852 | SYSCALL_DEFINE4(pidfd_send_signal, int, pidfd, int, sig, | |
3853 | siginfo_t __user *, info, unsigned int, flags) | |
3854 | { | |
3855 | int ret; | |
3856 | struct fd f; | |
3857 | struct pid *pid; | |
3858 | kernel_siginfo_t kinfo; | |
3859 | ||
3860 | /* Enforce flags be set to 0 until we add an extension. */ | |
3861 | if (flags) | |
3862 | return -EINVAL; | |
3863 | ||
738a7832 | 3864 | f = fdget(pidfd); |
3eb39f47 CB |
3865 | if (!f.file) |
3866 | return -EBADF; | |
3867 | ||
3868 | /* Is this a pidfd? */ | |
2151ad1b | 3869 | pid = pidfd_to_pid(f.file); |
3eb39f47 CB |
3870 | if (IS_ERR(pid)) { |
3871 | ret = PTR_ERR(pid); | |
3872 | goto err; | |
3873 | } | |
3874 | ||
3875 | ret = -EINVAL; | |
3876 | if (!access_pidfd_pidns(pid)) | |
3877 | goto err; | |
3878 | ||
3879 | if (info) { | |
3880 | ret = copy_siginfo_from_user_any(&kinfo, info); | |
3881 | if (unlikely(ret)) | |
3882 | goto err; | |
3883 | ||
3884 | ret = -EINVAL; | |
3885 | if (unlikely(sig != kinfo.si_signo)) | |
3886 | goto err; | |
3887 | ||
556a888a JH |
3888 | /* Only allow sending arbitrary signals to yourself. */ |
3889 | ret = -EPERM; | |
3eb39f47 | 3890 | if ((task_pid(current) != pid) && |
556a888a JH |
3891 | (kinfo.si_code >= 0 || kinfo.si_code == SI_TKILL)) |
3892 | goto err; | |
3eb39f47 CB |
3893 | } else { |
3894 | prepare_kill_siginfo(sig, &kinfo); | |
3895 | } | |
3896 | ||
3897 | ret = kill_pid_info(sig, &kinfo, pid); | |
3898 | ||
3899 | err: | |
3900 | fdput(f); | |
3901 | return ret; | |
3902 | } | |
3eb39f47 | 3903 | |
30b4ae8a | 3904 | static int |
ae7795bc | 3905 | do_send_specific(pid_t tgid, pid_t pid, int sig, struct kernel_siginfo *info) |
1da177e4 | 3906 | { |
1da177e4 | 3907 | struct task_struct *p; |
30b4ae8a | 3908 | int error = -ESRCH; |
1da177e4 | 3909 | |
3547ff3a | 3910 | rcu_read_lock(); |
228ebcbe | 3911 | p = find_task_by_vpid(pid); |
b488893a | 3912 | if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) { |
30b4ae8a | 3913 | error = check_kill_permission(sig, info, p); |
1da177e4 LT |
3914 | /* |
3915 | * The null signal is a permissions and process existence | |
3916 | * probe. No signal is actually delivered. | |
3917 | */ | |
4a30debf | 3918 | if (!error && sig) { |
40b3b025 | 3919 | error = do_send_sig_info(sig, info, p, PIDTYPE_PID); |
4a30debf ON |
3920 | /* |
3921 | * If lock_task_sighand() failed we pretend the task | |
3922 | * dies after receiving the signal. The window is tiny, | |
3923 | * and the signal is private anyway. | |
3924 | */ | |
3925 | if (unlikely(error == -ESRCH)) | |
3926 | error = 0; | |
1da177e4 LT |
3927 | } |
3928 | } | |
3547ff3a | 3929 | rcu_read_unlock(); |
6dd69f10 | 3930 | |
1da177e4 LT |
3931 | return error; |
3932 | } | |
3933 | ||
30b4ae8a TG |
3934 | static int do_tkill(pid_t tgid, pid_t pid, int sig) |
3935 | { | |
ae7795bc | 3936 | struct kernel_siginfo info; |
30b4ae8a | 3937 | |
5f74972c | 3938 | clear_siginfo(&info); |
30b4ae8a TG |
3939 | info.si_signo = sig; |
3940 | info.si_errno = 0; | |
3941 | info.si_code = SI_TKILL; | |
3942 | info.si_pid = task_tgid_vnr(current); | |
078de5f7 | 3943 | info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); |
30b4ae8a TG |
3944 | |
3945 | return do_send_specific(tgid, pid, sig, &info); | |
3946 | } | |
3947 | ||
6dd69f10 VL |
3948 | /** |
3949 | * sys_tgkill - send signal to one specific thread | |
3950 | * @tgid: the thread group ID of the thread | |
3951 | * @pid: the PID of the thread | |
3952 | * @sig: signal to be sent | |
3953 | * | |
72fd4a35 | 3954 | * This syscall also checks the @tgid and returns -ESRCH even if the PID |
6dd69f10 VL |
3955 | * exists but it's not belonging to the target process anymore. This |
3956 | * method solves the problem of threads exiting and PIDs getting reused. | |
3957 | */ | |
a5f8fa9e | 3958 | SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) |
6dd69f10 VL |
3959 | { |
3960 | /* This is only valid for single tasks */ | |
3961 | if (pid <= 0 || tgid <= 0) | |
3962 | return -EINVAL; | |
3963 | ||
3964 | return do_tkill(tgid, pid, sig); | |
3965 | } | |
3966 | ||
41c57892 RD |
3967 | /** |
3968 | * sys_tkill - send signal to one specific task | |
3969 | * @pid: the PID of the task | |
3970 | * @sig: signal to be sent | |
3971 | * | |
1da177e4 LT |
3972 | * Send a signal to only one task, even if it's a CLONE_THREAD task. |
3973 | */ | |
a5f8fa9e | 3974 | SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) |
1da177e4 | 3975 | { |
1da177e4 LT |
3976 | /* This is only valid for single tasks */ |
3977 | if (pid <= 0) | |
3978 | return -EINVAL; | |
3979 | ||
6dd69f10 | 3980 | return do_tkill(0, pid, sig); |
1da177e4 LT |
3981 | } |
3982 | ||
ae7795bc | 3983 | static int do_rt_sigqueueinfo(pid_t pid, int sig, kernel_siginfo_t *info) |
75907d4d AV |
3984 | { |
3985 | /* Not even root can pretend to send signals from the kernel. | |
3986 | * Nor can they impersonate a kill()/tgkill(), which adds source info. | |
3987 | */ | |
66dd34ad | 3988 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
69828dce | 3989 | (task_pid_vnr(current) != pid)) |
75907d4d | 3990 | return -EPERM; |
69828dce | 3991 | |
75907d4d AV |
3992 | /* POSIX.1b doesn't mention process groups. */ |
3993 | return kill_proc_info(sig, info, pid); | |
3994 | } | |
3995 | ||
41c57892 RD |
3996 | /** |
3997 | * sys_rt_sigqueueinfo - send signal information to a signal | |
3998 | * @pid: the PID of the thread | |
3999 | * @sig: signal to be sent | |
4000 | * @uinfo: signal info to be sent | |
4001 | */ | |
a5f8fa9e HC |
4002 | SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, |
4003 | siginfo_t __user *, uinfo) | |
1da177e4 | 4004 | { |
ae7795bc | 4005 | kernel_siginfo_t info; |
601d5abf | 4006 | int ret = __copy_siginfo_from_user(sig, &info, uinfo); |
4cd2e0e7 EB |
4007 | if (unlikely(ret)) |
4008 | return ret; | |
75907d4d AV |
4009 | return do_rt_sigqueueinfo(pid, sig, &info); |
4010 | } | |
1da177e4 | 4011 | |
75907d4d | 4012 | #ifdef CONFIG_COMPAT |
75907d4d AV |
4013 | COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo, |
4014 | compat_pid_t, pid, | |
4015 | int, sig, | |
4016 | struct compat_siginfo __user *, uinfo) | |
4017 | { | |
ae7795bc | 4018 | kernel_siginfo_t info; |
601d5abf | 4019 | int ret = __copy_siginfo_from_user32(sig, &info, uinfo); |
75907d4d AV |
4020 | if (unlikely(ret)) |
4021 | return ret; | |
4022 | return do_rt_sigqueueinfo(pid, sig, &info); | |
1da177e4 | 4023 | } |
75907d4d | 4024 | #endif |
1da177e4 | 4025 | |
ae7795bc | 4026 | static int do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig, kernel_siginfo_t *info) |
62ab4505 TG |
4027 | { |
4028 | /* This is only valid for single tasks */ | |
4029 | if (pid <= 0 || tgid <= 0) | |
4030 | return -EINVAL; | |
4031 | ||
4032 | /* Not even root can pretend to send signals from the kernel. | |
da48524e JT |
4033 | * Nor can they impersonate a kill()/tgkill(), which adds source info. |
4034 | */ | |
69828dce VD |
4035 | if ((info->si_code >= 0 || info->si_code == SI_TKILL) && |
4036 | (task_pid_vnr(current) != pid)) | |
62ab4505 | 4037 | return -EPERM; |
69828dce | 4038 | |
62ab4505 TG |
4039 | return do_send_specific(tgid, pid, sig, info); |
4040 | } | |
4041 | ||
4042 | SYSCALL_DEFINE4(rt_tgsigqueueinfo, pid_t, tgid, pid_t, pid, int, sig, | |
4043 | siginfo_t __user *, uinfo) | |
4044 | { | |
ae7795bc | 4045 | kernel_siginfo_t info; |
601d5abf | 4046 | int ret = __copy_siginfo_from_user(sig, &info, uinfo); |
4cd2e0e7 EB |
4047 | if (unlikely(ret)) |
4048 | return ret; | |
62ab4505 TG |
4049 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
4050 | } | |
4051 | ||
9aae8fc0 AV |
4052 | #ifdef CONFIG_COMPAT |
4053 | COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo, | |
4054 | compat_pid_t, tgid, | |
4055 | compat_pid_t, pid, | |
4056 | int, sig, | |
4057 | struct compat_siginfo __user *, uinfo) | |
4058 | { | |
ae7795bc | 4059 | kernel_siginfo_t info; |
601d5abf | 4060 | int ret = __copy_siginfo_from_user32(sig, &info, uinfo); |
4cd2e0e7 EB |
4061 | if (unlikely(ret)) |
4062 | return ret; | |
9aae8fc0 AV |
4063 | return do_rt_tgsigqueueinfo(tgid, pid, sig, &info); |
4064 | } | |
4065 | #endif | |
4066 | ||
0341729b | 4067 | /* |
b4e74264 | 4068 | * For kthreads only, must not be used if cloned with CLONE_SIGHAND |
0341729b | 4069 | */ |
b4e74264 | 4070 | void kernel_sigaction(int sig, __sighandler_t action) |
0341729b | 4071 | { |
ec5955b8 | 4072 | spin_lock_irq(¤t->sighand->siglock); |
b4e74264 ON |
4073 | current->sighand->action[sig - 1].sa.sa_handler = action; |
4074 | if (action == SIG_IGN) { | |
4075 | sigset_t mask; | |
0341729b | 4076 | |
b4e74264 ON |
4077 | sigemptyset(&mask); |
4078 | sigaddset(&mask, sig); | |
580d34e4 | 4079 | |
b4e74264 ON |
4080 | flush_sigqueue_mask(&mask, ¤t->signal->shared_pending); |
4081 | flush_sigqueue_mask(&mask, ¤t->pending); | |
4082 | recalc_sigpending(); | |
4083 | } | |
0341729b ON |
4084 | spin_unlock_irq(¤t->sighand->siglock); |
4085 | } | |
b4e74264 | 4086 | EXPORT_SYMBOL(kernel_sigaction); |
0341729b | 4087 | |
68463510 DS |
4088 | void __weak sigaction_compat_abi(struct k_sigaction *act, |
4089 | struct k_sigaction *oact) | |
4090 | { | |
4091 | } | |
4092 | ||
88531f72 | 4093 | int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact) |
1da177e4 | 4094 | { |
afe2b038 | 4095 | struct task_struct *p = current, *t; |
1da177e4 | 4096 | struct k_sigaction *k; |
71fabd5e | 4097 | sigset_t mask; |
1da177e4 | 4098 | |
7ed20e1a | 4099 | if (!valid_signal(sig) || sig < 1 || (act && sig_kernel_only(sig))) |
1da177e4 LT |
4100 | return -EINVAL; |
4101 | ||
afe2b038 | 4102 | k = &p->sighand->action[sig-1]; |
1da177e4 | 4103 | |
afe2b038 | 4104 | spin_lock_irq(&p->sighand->siglock); |
1da177e4 LT |
4105 | if (oact) |
4106 | *oact = *k; | |
4107 | ||
a54f0dfd PC |
4108 | /* |
4109 | * Make sure that we never accidentally claim to support SA_UNSUPPORTED, | |
4110 | * e.g. by having an architecture use the bit in their uapi. | |
4111 | */ | |
4112 | BUILD_BUG_ON(UAPI_SA_FLAGS & SA_UNSUPPORTED); | |
4113 | ||
23acdc76 PC |
4114 | /* |
4115 | * Clear unknown flag bits in order to allow userspace to detect missing | |
4116 | * support for flag bits and to allow the kernel to use non-uapi bits | |
4117 | * internally. | |
4118 | */ | |
4119 | if (act) | |
4120 | act->sa.sa_flags &= UAPI_SA_FLAGS; | |
4121 | if (oact) | |
4122 | oact->sa.sa_flags &= UAPI_SA_FLAGS; | |
4123 | ||
68463510 DS |
4124 | sigaction_compat_abi(act, oact); |
4125 | ||
1da177e4 | 4126 | if (act) { |
9ac95f2f ON |
4127 | sigdelsetmask(&act->sa.sa_mask, |
4128 | sigmask(SIGKILL) | sigmask(SIGSTOP)); | |
88531f72 | 4129 | *k = *act; |
1da177e4 LT |
4130 | /* |
4131 | * POSIX 3.3.1.3: | |
4132 | * "Setting a signal action to SIG_IGN for a signal that is | |
4133 | * pending shall cause the pending signal to be discarded, | |
4134 | * whether or not it is blocked." | |
4135 | * | |
4136 | * "Setting a signal action to SIG_DFL for a signal that is | |
4137 | * pending and whose default action is to ignore the signal | |
4138 | * (for example, SIGCHLD), shall cause the pending signal to | |
4139 | * be discarded, whether or not it is blocked" | |
4140 | */ | |
afe2b038 | 4141 | if (sig_handler_ignored(sig_handler(p, sig), sig)) { |
71fabd5e GA |
4142 | sigemptyset(&mask); |
4143 | sigaddset(&mask, sig); | |
afe2b038 ON |
4144 | flush_sigqueue_mask(&mask, &p->signal->shared_pending); |
4145 | for_each_thread(p, t) | |
c09c1441 | 4146 | flush_sigqueue_mask(&mask, &t->pending); |
1da177e4 | 4147 | } |
1da177e4 LT |
4148 | } |
4149 | ||
afe2b038 | 4150 | spin_unlock_irq(&p->sighand->siglock); |
1da177e4 LT |
4151 | return 0; |
4152 | } | |
4153 | ||
c09c1441 | 4154 | static int |
22839869 WD |
4155 | do_sigaltstack (const stack_t *ss, stack_t *oss, unsigned long sp, |
4156 | size_t min_ss_size) | |
1da177e4 | 4157 | { |
bcfe8ad8 | 4158 | struct task_struct *t = current; |
1da177e4 | 4159 | |
bcfe8ad8 AV |
4160 | if (oss) { |
4161 | memset(oss, 0, sizeof(stack_t)); | |
4162 | oss->ss_sp = (void __user *) t->sas_ss_sp; | |
4163 | oss->ss_size = t->sas_ss_size; | |
4164 | oss->ss_flags = sas_ss_flags(sp) | | |
4165 | (current->sas_ss_flags & SS_FLAG_BITS); | |
4166 | } | |
1da177e4 | 4167 | |
bcfe8ad8 AV |
4168 | if (ss) { |
4169 | void __user *ss_sp = ss->ss_sp; | |
4170 | size_t ss_size = ss->ss_size; | |
4171 | unsigned ss_flags = ss->ss_flags; | |
407bc16a | 4172 | int ss_mode; |
1da177e4 | 4173 | |
bcfe8ad8 AV |
4174 | if (unlikely(on_sig_stack(sp))) |
4175 | return -EPERM; | |
1da177e4 | 4176 | |
407bc16a | 4177 | ss_mode = ss_flags & ~SS_FLAG_BITS; |
bcfe8ad8 AV |
4178 | if (unlikely(ss_mode != SS_DISABLE && ss_mode != SS_ONSTACK && |
4179 | ss_mode != 0)) | |
4180 | return -EINVAL; | |
1da177e4 | 4181 | |
407bc16a | 4182 | if (ss_mode == SS_DISABLE) { |
1da177e4 LT |
4183 | ss_size = 0; |
4184 | ss_sp = NULL; | |
4185 | } else { | |
22839869 | 4186 | if (unlikely(ss_size < min_ss_size)) |
bcfe8ad8 | 4187 | return -ENOMEM; |
1da177e4 LT |
4188 | } |
4189 | ||
bcfe8ad8 AV |
4190 | t->sas_ss_sp = (unsigned long) ss_sp; |
4191 | t->sas_ss_size = ss_size; | |
4192 | t->sas_ss_flags = ss_flags; | |
1da177e4 | 4193 | } |
bcfe8ad8 | 4194 | return 0; |
1da177e4 | 4195 | } |
bcfe8ad8 | 4196 | |
6bf9adfc AV |
4197 | SYSCALL_DEFINE2(sigaltstack,const stack_t __user *,uss, stack_t __user *,uoss) |
4198 | { | |
bcfe8ad8 AV |
4199 | stack_t new, old; |
4200 | int err; | |
4201 | if (uss && copy_from_user(&new, uss, sizeof(stack_t))) | |
4202 | return -EFAULT; | |
4203 | err = do_sigaltstack(uss ? &new : NULL, uoss ? &old : NULL, | |
22839869 WD |
4204 | current_user_stack_pointer(), |
4205 | MINSIGSTKSZ); | |
bcfe8ad8 AV |
4206 | if (!err && uoss && copy_to_user(uoss, &old, sizeof(stack_t))) |
4207 | err = -EFAULT; | |
4208 | return err; | |
6bf9adfc | 4209 | } |
1da177e4 | 4210 | |
5c49574f AV |
4211 | int restore_altstack(const stack_t __user *uss) |
4212 | { | |
bcfe8ad8 AV |
4213 | stack_t new; |
4214 | if (copy_from_user(&new, uss, sizeof(stack_t))) | |
4215 | return -EFAULT; | |
22839869 WD |
4216 | (void)do_sigaltstack(&new, NULL, current_user_stack_pointer(), |
4217 | MINSIGSTKSZ); | |
5c49574f | 4218 | /* squash all but EFAULT for now */ |
bcfe8ad8 | 4219 | return 0; |
5c49574f AV |
4220 | } |
4221 | ||
c40702c4 AV |
4222 | int __save_altstack(stack_t __user *uss, unsigned long sp) |
4223 | { | |
4224 | struct task_struct *t = current; | |
2a742138 SS |
4225 | int err = __put_user((void __user *)t->sas_ss_sp, &uss->ss_sp) | |
4226 | __put_user(t->sas_ss_flags, &uss->ss_flags) | | |
c40702c4 | 4227 | __put_user(t->sas_ss_size, &uss->ss_size); |
97c885d5 | 4228 | return err; |
c40702c4 AV |
4229 | } |
4230 | ||
90268439 | 4231 | #ifdef CONFIG_COMPAT |
6203deb0 DB |
4232 | static int do_compat_sigaltstack(const compat_stack_t __user *uss_ptr, |
4233 | compat_stack_t __user *uoss_ptr) | |
90268439 AV |
4234 | { |
4235 | stack_t uss, uoss; | |
4236 | int ret; | |
90268439 AV |
4237 | |
4238 | if (uss_ptr) { | |
4239 | compat_stack_t uss32; | |
90268439 AV |
4240 | if (copy_from_user(&uss32, uss_ptr, sizeof(compat_stack_t))) |
4241 | return -EFAULT; | |
4242 | uss.ss_sp = compat_ptr(uss32.ss_sp); | |
4243 | uss.ss_flags = uss32.ss_flags; | |
4244 | uss.ss_size = uss32.ss_size; | |
4245 | } | |
bcfe8ad8 | 4246 | ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, |
22839869 WD |
4247 | compat_user_stack_pointer(), |
4248 | COMPAT_MINSIGSTKSZ); | |
90268439 | 4249 | if (ret >= 0 && uoss_ptr) { |
bcfe8ad8 AV |
4250 | compat_stack_t old; |
4251 | memset(&old, 0, sizeof(old)); | |
4252 | old.ss_sp = ptr_to_compat(uoss.ss_sp); | |
4253 | old.ss_flags = uoss.ss_flags; | |
4254 | old.ss_size = uoss.ss_size; | |
4255 | if (copy_to_user(uoss_ptr, &old, sizeof(compat_stack_t))) | |
90268439 AV |
4256 | ret = -EFAULT; |
4257 | } | |
4258 | return ret; | |
4259 | } | |
4260 | ||
6203deb0 DB |
4261 | COMPAT_SYSCALL_DEFINE2(sigaltstack, |
4262 | const compat_stack_t __user *, uss_ptr, | |
4263 | compat_stack_t __user *, uoss_ptr) | |
4264 | { | |
4265 | return do_compat_sigaltstack(uss_ptr, uoss_ptr); | |
4266 | } | |
4267 | ||
90268439 AV |
4268 | int compat_restore_altstack(const compat_stack_t __user *uss) |
4269 | { | |
6203deb0 | 4270 | int err = do_compat_sigaltstack(uss, NULL); |
90268439 AV |
4271 | /* squash all but -EFAULT for now */ |
4272 | return err == -EFAULT ? err : 0; | |
4273 | } | |
c40702c4 AV |
4274 | |
4275 | int __compat_save_altstack(compat_stack_t __user *uss, unsigned long sp) | |
4276 | { | |
441398d3 | 4277 | int err; |
c40702c4 | 4278 | struct task_struct *t = current; |
441398d3 SS |
4279 | err = __put_user(ptr_to_compat((void __user *)t->sas_ss_sp), |
4280 | &uss->ss_sp) | | |
4281 | __put_user(t->sas_ss_flags, &uss->ss_flags) | | |
c40702c4 | 4282 | __put_user(t->sas_ss_size, &uss->ss_size); |
97c885d5 | 4283 | return err; |
c40702c4 | 4284 | } |
90268439 | 4285 | #endif |
1da177e4 LT |
4286 | |
4287 | #ifdef __ARCH_WANT_SYS_SIGPENDING | |
4288 | ||
41c57892 RD |
4289 | /** |
4290 | * sys_sigpending - examine pending signals | |
d53238cd | 4291 | * @uset: where mask of pending signal is returned |
41c57892 | 4292 | */ |
d53238cd | 4293 | SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, uset) |
1da177e4 | 4294 | { |
d53238cd | 4295 | sigset_t set; |
d53238cd DB |
4296 | |
4297 | if (sizeof(old_sigset_t) > sizeof(*uset)) | |
4298 | return -EINVAL; | |
4299 | ||
b1d294c8 CB |
4300 | do_sigpending(&set); |
4301 | ||
4302 | if (copy_to_user(uset, &set, sizeof(old_sigset_t))) | |
4303 | return -EFAULT; | |
4304 | ||
4305 | return 0; | |
1da177e4 LT |
4306 | } |
4307 | ||
8f13621a AV |
4308 | #ifdef CONFIG_COMPAT |
4309 | COMPAT_SYSCALL_DEFINE1(sigpending, compat_old_sigset_t __user *, set32) | |
4310 | { | |
4311 | sigset_t set; | |
b1d294c8 CB |
4312 | |
4313 | do_sigpending(&set); | |
4314 | ||
4315 | return put_user(set.sig[0], set32); | |
8f13621a AV |
4316 | } |
4317 | #endif | |
4318 | ||
1da177e4 LT |
4319 | #endif |
4320 | ||
4321 | #ifdef __ARCH_WANT_SYS_SIGPROCMASK | |
41c57892 RD |
4322 | /** |
4323 | * sys_sigprocmask - examine and change blocked signals | |
4324 | * @how: whether to add, remove, or set signals | |
b013c399 | 4325 | * @nset: signals to add or remove (if non-null) |
41c57892 RD |
4326 | * @oset: previous value of signal mask if non-null |
4327 | * | |
5aba085e RD |
4328 | * Some platforms have their own version with special arguments; |
4329 | * others support only sys_rt_sigprocmask. | |
4330 | */ | |
1da177e4 | 4331 | |
b013c399 | 4332 | SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset, |
b290ebe2 | 4333 | old_sigset_t __user *, oset) |
1da177e4 | 4334 | { |
1da177e4 | 4335 | old_sigset_t old_set, new_set; |
2e4f7c77 | 4336 | sigset_t new_blocked; |
1da177e4 | 4337 | |
b013c399 | 4338 | old_set = current->blocked.sig[0]; |
1da177e4 | 4339 | |
b013c399 ON |
4340 | if (nset) { |
4341 | if (copy_from_user(&new_set, nset, sizeof(*nset))) | |
4342 | return -EFAULT; | |
1da177e4 | 4343 | |
2e4f7c77 | 4344 | new_blocked = current->blocked; |
1da177e4 | 4345 | |
1da177e4 | 4346 | switch (how) { |
1da177e4 | 4347 | case SIG_BLOCK: |
2e4f7c77 | 4348 | sigaddsetmask(&new_blocked, new_set); |
1da177e4 LT |
4349 | break; |
4350 | case SIG_UNBLOCK: | |
2e4f7c77 | 4351 | sigdelsetmask(&new_blocked, new_set); |
1da177e4 LT |
4352 | break; |
4353 | case SIG_SETMASK: | |
2e4f7c77 | 4354 | new_blocked.sig[0] = new_set; |
1da177e4 | 4355 | break; |
2e4f7c77 ON |
4356 | default: |
4357 | return -EINVAL; | |
1da177e4 LT |
4358 | } |
4359 | ||
0c4a8423 | 4360 | set_current_blocked(&new_blocked); |
b013c399 ON |
4361 | } |
4362 | ||
4363 | if (oset) { | |
1da177e4 | 4364 | if (copy_to_user(oset, &old_set, sizeof(*oset))) |
b013c399 | 4365 | return -EFAULT; |
1da177e4 | 4366 | } |
b013c399 ON |
4367 | |
4368 | return 0; | |
1da177e4 LT |
4369 | } |
4370 | #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ | |
4371 | ||
eaca6eae | 4372 | #ifndef CONFIG_ODD_RT_SIGACTION |
41c57892 RD |
4373 | /** |
4374 | * sys_rt_sigaction - alter an action taken by a process | |
4375 | * @sig: signal to be sent | |
f9fa0bc1 RD |
4376 | * @act: new sigaction |
4377 | * @oact: used to save the previous sigaction | |
41c57892 RD |
4378 | * @sigsetsize: size of sigset_t type |
4379 | */ | |
d4e82042 HC |
4380 | SYSCALL_DEFINE4(rt_sigaction, int, sig, |
4381 | const struct sigaction __user *, act, | |
4382 | struct sigaction __user *, oact, | |
4383 | size_t, sigsetsize) | |
1da177e4 LT |
4384 | { |
4385 | struct k_sigaction new_sa, old_sa; | |
d8f993b3 | 4386 | int ret; |
1da177e4 LT |
4387 | |
4388 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4389 | if (sigsetsize != sizeof(sigset_t)) | |
d8f993b3 | 4390 | return -EINVAL; |
1da177e4 | 4391 | |
d8f993b3 CB |
4392 | if (act && copy_from_user(&new_sa.sa, act, sizeof(new_sa.sa))) |
4393 | return -EFAULT; | |
1da177e4 LT |
4394 | |
4395 | ret = do_sigaction(sig, act ? &new_sa : NULL, oact ? &old_sa : NULL); | |
d8f993b3 CB |
4396 | if (ret) |
4397 | return ret; | |
1da177e4 | 4398 | |
d8f993b3 CB |
4399 | if (oact && copy_to_user(oact, &old_sa.sa, sizeof(old_sa.sa))) |
4400 | return -EFAULT; | |
4401 | ||
4402 | return 0; | |
1da177e4 | 4403 | } |
08d32fe5 | 4404 | #ifdef CONFIG_COMPAT |
08d32fe5 AV |
4405 | COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, |
4406 | const struct compat_sigaction __user *, act, | |
4407 | struct compat_sigaction __user *, oact, | |
4408 | compat_size_t, sigsetsize) | |
4409 | { | |
4410 | struct k_sigaction new_ka, old_ka; | |
08d32fe5 AV |
4411 | #ifdef __ARCH_HAS_SA_RESTORER |
4412 | compat_uptr_t restorer; | |
4413 | #endif | |
4414 | int ret; | |
4415 | ||
4416 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4417 | if (sigsetsize != sizeof(compat_sigset_t)) | |
4418 | return -EINVAL; | |
4419 | ||
4420 | if (act) { | |
4421 | compat_uptr_t handler; | |
4422 | ret = get_user(handler, &act->sa_handler); | |
4423 | new_ka.sa.sa_handler = compat_ptr(handler); | |
4424 | #ifdef __ARCH_HAS_SA_RESTORER | |
4425 | ret |= get_user(restorer, &act->sa_restorer); | |
4426 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
4427 | #endif | |
3968cf62 | 4428 | ret |= get_compat_sigset(&new_ka.sa.sa_mask, &act->sa_mask); |
3ddc5b46 | 4429 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
08d32fe5 AV |
4430 | if (ret) |
4431 | return -EFAULT; | |
08d32fe5 AV |
4432 | } |
4433 | ||
4434 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
4435 | if (!ret && oact) { | |
08d32fe5 AV |
4436 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
4437 | &oact->sa_handler); | |
f454322e DL |
4438 | ret |= put_compat_sigset(&oact->sa_mask, &old_ka.sa.sa_mask, |
4439 | sizeof(oact->sa_mask)); | |
3ddc5b46 | 4440 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
08d32fe5 AV |
4441 | #ifdef __ARCH_HAS_SA_RESTORER |
4442 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
4443 | &oact->sa_restorer); | |
4444 | #endif | |
4445 | } | |
4446 | return ret; | |
4447 | } | |
4448 | #endif | |
eaca6eae | 4449 | #endif /* !CONFIG_ODD_RT_SIGACTION */ |
1da177e4 | 4450 | |
495dfbf7 AV |
4451 | #ifdef CONFIG_OLD_SIGACTION |
4452 | SYSCALL_DEFINE3(sigaction, int, sig, | |
4453 | const struct old_sigaction __user *, act, | |
4454 | struct old_sigaction __user *, oact) | |
4455 | { | |
4456 | struct k_sigaction new_ka, old_ka; | |
4457 | int ret; | |
4458 | ||
4459 | if (act) { | |
4460 | old_sigset_t mask; | |
96d4f267 | 4461 | if (!access_ok(act, sizeof(*act)) || |
495dfbf7 AV |
4462 | __get_user(new_ka.sa.sa_handler, &act->sa_handler) || |
4463 | __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || | |
4464 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
4465 | __get_user(mask, &act->sa_mask)) | |
4466 | return -EFAULT; | |
4467 | #ifdef __ARCH_HAS_KA_RESTORER | |
4468 | new_ka.ka_restorer = NULL; | |
4469 | #endif | |
4470 | siginitset(&new_ka.sa.sa_mask, mask); | |
4471 | } | |
4472 | ||
4473 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
4474 | ||
4475 | if (!ret && oact) { | |
96d4f267 | 4476 | if (!access_ok(oact, sizeof(*oact)) || |
495dfbf7 AV |
4477 | __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || |
4478 | __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || | |
4479 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
4480 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
4481 | return -EFAULT; | |
4482 | } | |
4483 | ||
4484 | return ret; | |
4485 | } | |
4486 | #endif | |
4487 | #ifdef CONFIG_COMPAT_OLD_SIGACTION | |
4488 | COMPAT_SYSCALL_DEFINE3(sigaction, int, sig, | |
4489 | const struct compat_old_sigaction __user *, act, | |
4490 | struct compat_old_sigaction __user *, oact) | |
4491 | { | |
4492 | struct k_sigaction new_ka, old_ka; | |
4493 | int ret; | |
4494 | compat_old_sigset_t mask; | |
4495 | compat_uptr_t handler, restorer; | |
4496 | ||
4497 | if (act) { | |
96d4f267 | 4498 | if (!access_ok(act, sizeof(*act)) || |
495dfbf7 AV |
4499 | __get_user(handler, &act->sa_handler) || |
4500 | __get_user(restorer, &act->sa_restorer) || | |
4501 | __get_user(new_ka.sa.sa_flags, &act->sa_flags) || | |
4502 | __get_user(mask, &act->sa_mask)) | |
4503 | return -EFAULT; | |
4504 | ||
4505 | #ifdef __ARCH_HAS_KA_RESTORER | |
4506 | new_ka.ka_restorer = NULL; | |
4507 | #endif | |
4508 | new_ka.sa.sa_handler = compat_ptr(handler); | |
4509 | new_ka.sa.sa_restorer = compat_ptr(restorer); | |
4510 | siginitset(&new_ka.sa.sa_mask, mask); | |
4511 | } | |
4512 | ||
4513 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | |
4514 | ||
4515 | if (!ret && oact) { | |
96d4f267 | 4516 | if (!access_ok(oact, sizeof(*oact)) || |
495dfbf7 AV |
4517 | __put_user(ptr_to_compat(old_ka.sa.sa_handler), |
4518 | &oact->sa_handler) || | |
4519 | __put_user(ptr_to_compat(old_ka.sa.sa_restorer), | |
4520 | &oact->sa_restorer) || | |
4521 | __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || | |
4522 | __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) | |
4523 | return -EFAULT; | |
4524 | } | |
4525 | return ret; | |
4526 | } | |
4527 | #endif | |
1da177e4 | 4528 | |
f6187769 | 4529 | #ifdef CONFIG_SGETMASK_SYSCALL |
1da177e4 LT |
4530 | |
4531 | /* | |
4532 | * For backwards compatibility. Functionality superseded by sigprocmask. | |
4533 | */ | |
a5f8fa9e | 4534 | SYSCALL_DEFINE0(sgetmask) |
1da177e4 LT |
4535 | { |
4536 | /* SMP safe */ | |
4537 | return current->blocked.sig[0]; | |
4538 | } | |
4539 | ||
a5f8fa9e | 4540 | SYSCALL_DEFINE1(ssetmask, int, newmask) |
1da177e4 | 4541 | { |
c1095c6d ON |
4542 | int old = current->blocked.sig[0]; |
4543 | sigset_t newset; | |
1da177e4 | 4544 | |
5ba53ff6 | 4545 | siginitset(&newset, newmask); |
c1095c6d | 4546 | set_current_blocked(&newset); |
1da177e4 LT |
4547 | |
4548 | return old; | |
4549 | } | |
f6187769 | 4550 | #endif /* CONFIG_SGETMASK_SYSCALL */ |
1da177e4 LT |
4551 | |
4552 | #ifdef __ARCH_WANT_SYS_SIGNAL | |
4553 | /* | |
4554 | * For backwards compatibility. Functionality superseded by sigaction. | |
4555 | */ | |
a5f8fa9e | 4556 | SYSCALL_DEFINE2(signal, int, sig, __sighandler_t, handler) |
1da177e4 LT |
4557 | { |
4558 | struct k_sigaction new_sa, old_sa; | |
4559 | int ret; | |
4560 | ||
4561 | new_sa.sa.sa_handler = handler; | |
4562 | new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK; | |
c70d3d70 | 4563 | sigemptyset(&new_sa.sa.sa_mask); |
1da177e4 LT |
4564 | |
4565 | ret = do_sigaction(sig, &new_sa, &old_sa); | |
4566 | ||
4567 | return ret ? ret : (unsigned long)old_sa.sa.sa_handler; | |
4568 | } | |
4569 | #endif /* __ARCH_WANT_SYS_SIGNAL */ | |
4570 | ||
4571 | #ifdef __ARCH_WANT_SYS_PAUSE | |
4572 | ||
a5f8fa9e | 4573 | SYSCALL_DEFINE0(pause) |
1da177e4 | 4574 | { |
d92fcf05 | 4575 | while (!signal_pending(current)) { |
1df01355 | 4576 | __set_current_state(TASK_INTERRUPTIBLE); |
d92fcf05 ON |
4577 | schedule(); |
4578 | } | |
1da177e4 LT |
4579 | return -ERESTARTNOHAND; |
4580 | } | |
4581 | ||
4582 | #endif | |
4583 | ||
9d8a7652 | 4584 | static int sigsuspend(sigset_t *set) |
68f3f16d | 4585 | { |
68f3f16d AV |
4586 | current->saved_sigmask = current->blocked; |
4587 | set_current_blocked(set); | |
4588 | ||
823dd322 SL |
4589 | while (!signal_pending(current)) { |
4590 | __set_current_state(TASK_INTERRUPTIBLE); | |
4591 | schedule(); | |
4592 | } | |
68f3f16d AV |
4593 | set_restore_sigmask(); |
4594 | return -ERESTARTNOHAND; | |
4595 | } | |
68f3f16d | 4596 | |
41c57892 RD |
4597 | /** |
4598 | * sys_rt_sigsuspend - replace the signal mask for a value with the | |
4599 | * @unewset value until a signal is received | |
4600 | * @unewset: new signal mask value | |
4601 | * @sigsetsize: size of sigset_t type | |
4602 | */ | |
d4e82042 | 4603 | SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) |
150256d8 DW |
4604 | { |
4605 | sigset_t newset; | |
4606 | ||
4607 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4608 | if (sigsetsize != sizeof(sigset_t)) | |
4609 | return -EINVAL; | |
4610 | ||
4611 | if (copy_from_user(&newset, unewset, sizeof(newset))) | |
4612 | return -EFAULT; | |
68f3f16d | 4613 | return sigsuspend(&newset); |
150256d8 | 4614 | } |
ad4b65a4 AV |
4615 | |
4616 | #ifdef CONFIG_COMPAT | |
4617 | COMPAT_SYSCALL_DEFINE2(rt_sigsuspend, compat_sigset_t __user *, unewset, compat_size_t, sigsetsize) | |
4618 | { | |
ad4b65a4 | 4619 | sigset_t newset; |
ad4b65a4 AV |
4620 | |
4621 | /* XXX: Don't preclude handling different sized sigset_t's. */ | |
4622 | if (sigsetsize != sizeof(sigset_t)) | |
4623 | return -EINVAL; | |
4624 | ||
3968cf62 | 4625 | if (get_compat_sigset(&newset, unewset)) |
ad4b65a4 | 4626 | return -EFAULT; |
ad4b65a4 | 4627 | return sigsuspend(&newset); |
ad4b65a4 AV |
4628 | } |
4629 | #endif | |
150256d8 | 4630 | |
0a0e8cdf AV |
4631 | #ifdef CONFIG_OLD_SIGSUSPEND |
4632 | SYSCALL_DEFINE1(sigsuspend, old_sigset_t, mask) | |
4633 | { | |
4634 | sigset_t blocked; | |
4635 | siginitset(&blocked, mask); | |
4636 | return sigsuspend(&blocked); | |
4637 | } | |
4638 | #endif | |
4639 | #ifdef CONFIG_OLD_SIGSUSPEND3 | |
4640 | SYSCALL_DEFINE3(sigsuspend, int, unused1, int, unused2, old_sigset_t, mask) | |
4641 | { | |
4642 | sigset_t blocked; | |
4643 | siginitset(&blocked, mask); | |
4644 | return sigsuspend(&blocked); | |
4645 | } | |
4646 | #endif | |
150256d8 | 4647 | |
52f5684c | 4648 | __weak const char *arch_vma_name(struct vm_area_struct *vma) |
f269fdd1 DH |
4649 | { |
4650 | return NULL; | |
4651 | } | |
4652 | ||
ae7795bc | 4653 | static inline void siginfo_buildtime_checks(void) |
1da177e4 | 4654 | { |
aba1be2f | 4655 | BUILD_BUG_ON(sizeof(struct siginfo) != SI_MAX_SIZE); |
41b27154 | 4656 | |
ae7795bc EB |
4657 | /* Verify the offsets in the two siginfos match */ |
4658 | #define CHECK_OFFSET(field) \ | |
4659 | BUILD_BUG_ON(offsetof(siginfo_t, field) != offsetof(kernel_siginfo_t, field)) | |
4660 | ||
4661 | /* kill */ | |
4662 | CHECK_OFFSET(si_pid); | |
4663 | CHECK_OFFSET(si_uid); | |
4664 | ||
4665 | /* timer */ | |
4666 | CHECK_OFFSET(si_tid); | |
4667 | CHECK_OFFSET(si_overrun); | |
4668 | CHECK_OFFSET(si_value); | |
4669 | ||
4670 | /* rt */ | |
4671 | CHECK_OFFSET(si_pid); | |
4672 | CHECK_OFFSET(si_uid); | |
4673 | CHECK_OFFSET(si_value); | |
4674 | ||
4675 | /* sigchld */ | |
4676 | CHECK_OFFSET(si_pid); | |
4677 | CHECK_OFFSET(si_uid); | |
4678 | CHECK_OFFSET(si_status); | |
4679 | CHECK_OFFSET(si_utime); | |
4680 | CHECK_OFFSET(si_stime); | |
4681 | ||
4682 | /* sigfault */ | |
4683 | CHECK_OFFSET(si_addr); | |
add0b32e | 4684 | CHECK_OFFSET(si_trapno); |
ae7795bc EB |
4685 | CHECK_OFFSET(si_addr_lsb); |
4686 | CHECK_OFFSET(si_lower); | |
4687 | CHECK_OFFSET(si_upper); | |
4688 | CHECK_OFFSET(si_pkey); | |
0683b531 EB |
4689 | CHECK_OFFSET(si_perf_data); |
4690 | CHECK_OFFSET(si_perf_type); | |
ae7795bc EB |
4691 | |
4692 | /* sigpoll */ | |
4693 | CHECK_OFFSET(si_band); | |
4694 | CHECK_OFFSET(si_fd); | |
4695 | ||
4696 | /* sigsys */ | |
4697 | CHECK_OFFSET(si_call_addr); | |
4698 | CHECK_OFFSET(si_syscall); | |
4699 | CHECK_OFFSET(si_arch); | |
4700 | #undef CHECK_OFFSET | |
70f1b0d3 EB |
4701 | |
4702 | /* usb asyncio */ | |
4703 | BUILD_BUG_ON(offsetof(struct siginfo, si_pid) != | |
4704 | offsetof(struct siginfo, si_addr)); | |
4705 | if (sizeof(int) == sizeof(void __user *)) { | |
4706 | BUILD_BUG_ON(sizeof_field(struct siginfo, si_pid) != | |
4707 | sizeof(void __user *)); | |
4708 | } else { | |
4709 | BUILD_BUG_ON((sizeof_field(struct siginfo, si_pid) + | |
4710 | sizeof_field(struct siginfo, si_uid)) != | |
4711 | sizeof(void __user *)); | |
4712 | BUILD_BUG_ON(offsetofend(struct siginfo, si_pid) != | |
4713 | offsetof(struct siginfo, si_uid)); | |
4714 | } | |
4715 | #ifdef CONFIG_COMPAT | |
4716 | BUILD_BUG_ON(offsetof(struct compat_siginfo, si_pid) != | |
4717 | offsetof(struct compat_siginfo, si_addr)); | |
4718 | BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != | |
4719 | sizeof(compat_uptr_t)); | |
4720 | BUILD_BUG_ON(sizeof_field(struct compat_siginfo, si_pid) != | |
4721 | sizeof_field(struct siginfo, si_pid)); | |
4722 | #endif | |
ae7795bc EB |
4723 | } |
4724 | ||
4725 | void __init signals_init(void) | |
4726 | { | |
4727 | siginfo_buildtime_checks(); | |
4728 | ||
5f58c398 | 4729 | sigqueue_cachep = KMEM_CACHE(sigqueue, SLAB_PANIC | SLAB_ACCOUNT); |
1da177e4 | 4730 | } |
67fc4e0c JW |
4731 | |
4732 | #ifdef CONFIG_KGDB_KDB | |
4733 | #include <linux/kdb.h> | |
4734 | /* | |
0b44bf9a | 4735 | * kdb_send_sig - Allows kdb to send signals without exposing |
67fc4e0c JW |
4736 | * signal internals. This function checks if the required locks are |
4737 | * available before calling the main signal code, to avoid kdb | |
4738 | * deadlocks. | |
4739 | */ | |
0b44bf9a | 4740 | void kdb_send_sig(struct task_struct *t, int sig) |
67fc4e0c JW |
4741 | { |
4742 | static struct task_struct *kdb_prev_t; | |
0b44bf9a | 4743 | int new_t, ret; |
67fc4e0c JW |
4744 | if (!spin_trylock(&t->sighand->siglock)) { |
4745 | kdb_printf("Can't do kill command now.\n" | |
4746 | "The sigmask lock is held somewhere else in " | |
4747 | "kernel, try again later\n"); | |
4748 | return; | |
4749 | } | |
67fc4e0c JW |
4750 | new_t = kdb_prev_t != t; |
4751 | kdb_prev_t = t; | |
b03fbd4f | 4752 | if (!task_is_running(t) && new_t) { |
0b44bf9a | 4753 | spin_unlock(&t->sighand->siglock); |
67fc4e0c JW |
4754 | kdb_printf("Process is not RUNNING, sending a signal from " |
4755 | "kdb risks deadlock\n" | |
4756 | "on the run queue locks. " | |
4757 | "The signal has _not_ been sent.\n" | |
4758 | "Reissue the kill command if you want to risk " | |
4759 | "the deadlock.\n"); | |
4760 | return; | |
4761 | } | |
b213984b | 4762 | ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); |
0b44bf9a EB |
4763 | spin_unlock(&t->sighand->siglock); |
4764 | if (ret) | |
67fc4e0c JW |
4765 | kdb_printf("Fail to deliver Signal %d to process %d.\n", |
4766 | sig, t->pid); | |
4767 | else | |
4768 | kdb_printf("Signal %d is sent to process %d.\n", sig, t->pid); | |
4769 | } | |
4770 | #endif /* CONFIG_KGDB_KDB */ |