Merge tag 'drm-next-2024-05-25' of https://gitlab.freedesktop.org/drm/kernel
[linux-2.6-block.git] / kernel / exit.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/exit.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
1da177e4
LT
8#include <linux/mm.h>
9#include <linux/slab.h>
4eb5aaa3 10#include <linux/sched/autogroup.h>
6e84f315 11#include <linux/sched/mm.h>
03441a34 12#include <linux/sched/stat.h>
29930025 13#include <linux/sched/task.h>
68db0cf1 14#include <linux/sched/task_stack.h>
32ef5517 15#include <linux/sched/cputime.h>
1da177e4 16#include <linux/interrupt.h>
1da177e4 17#include <linux/module.h>
c59ede7b 18#include <linux/capability.h>
1da177e4
LT
19#include <linux/completion.h>
20#include <linux/personality.h>
21#include <linux/tty.h>
da9cbc87 22#include <linux/iocontext.h>
1da177e4 23#include <linux/key.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/acct.h>
8f0ab514 26#include <linux/tsacct_kern.h>
1da177e4 27#include <linux/file.h>
9f3acc31 28#include <linux/fdtable.h>
80d26af8 29#include <linux/freezer.h>
1da177e4 30#include <linux/binfmts.h>
ab516013 31#include <linux/nsproxy.h>
84d73786 32#include <linux/pid_namespace.h>
1da177e4
LT
33#include <linux/ptrace.h>
34#include <linux/profile.h>
35#include <linux/mount.h>
36#include <linux/proc_fs.h>
49d769d5 37#include <linux/kthread.h>
1da177e4 38#include <linux/mempolicy.h>
c757249a 39#include <linux/taskstats_kern.h>
ca74e92b 40#include <linux/delayacct.h>
b4f48b63 41#include <linux/cgroup.h>
1da177e4 42#include <linux/syscalls.h>
7ed20e1a 43#include <linux/signal.h>
6a14c5c9 44#include <linux/posix-timers.h>
9f46080c 45#include <linux/cn_proc.h>
de5097c2 46#include <linux/mutex.h>
0771dfef 47#include <linux/futex.h>
b92ce558 48#include <linux/pipe_fs_i.h>
fa84cb93 49#include <linux/audit.h> /* for audit_free() */
83cc5ed3 50#include <linux/resource.h>
6eaeeaba 51#include <linux/task_io_accounting_ops.h>
355f841a
EB
52#include <linux/blkdev.h>
53#include <linux/task_work.h>
5ad4e53b 54#include <linux/fs_struct.h>
d84f4f99 55#include <linux/init_task.h>
cdd6c482 56#include <linux/perf_event.h>
ad8d75ff 57#include <trace/events/sched.h>
24f1e32c 58#include <linux/hw_breakpoint.h>
3d5992d2 59#include <linux/oom.h>
54848d73 60#include <linux/writeback.h>
40401530 61#include <linux/shm.h>
5c9a8750 62#include <linux/kcov.h>
50b5e49c 63#include <linux/kmsan.h>
53d3eaa3 64#include <linux/random.h>
8f95c90c 65#include <linux/rcuwait.h>
7e95a225 66#include <linux/compat.h>
b1b6b5a3 67#include <linux/io_uring.h>
670721c7 68#include <linux/kprobes.h>
54ecbe6f 69#include <linux/rethook.h>
9db89b41 70#include <linux/sysfs.h>
fd593511 71#include <linux/user_events.h>
7c0f6ba6 72#include <linux/uaccess.h>
6dfeff09
MWO
73
74#include <uapi/linux/wait.h>
75
1da177e4 76#include <asm/unistd.h>
1da177e4
LT
77#include <asm/mmu_context.h>
78
2e521a20
JA
79#include "exit.h"
80
d4ccd54d
JH
81/*
82 * The default value should be high enough to not crash a system that randomly
83 * crashes its kernel from time to time, but low enough to at least not permit
84 * overflowing 32-bit refcounts or the ldsem writer count.
85 */
86static unsigned int oops_limit = 10000;
87
88#ifdef CONFIG_SYSCTL
89static struct ctl_table kern_exit_table[] = {
90 {
91 .procname = "oops_limit",
92 .data = &oops_limit,
93 .maxlen = sizeof(oops_limit),
94 .mode = 0644,
95 .proc_handler = proc_douintvec,
96 },
d4ccd54d
JH
97};
98
99static __init int kernel_exit_sysctls_init(void)
100{
101 register_sysctl_init("kernel", kern_exit_table);
102 return 0;
103}
104late_initcall(kernel_exit_sysctls_init);
105#endif
106
9db89b41
KC
107static atomic_t oops_count = ATOMIC_INIT(0);
108
109#ifdef CONFIG_SYSFS
110static ssize_t oops_count_show(struct kobject *kobj, struct kobj_attribute *attr,
111 char *page)
112{
113 return sysfs_emit(page, "%d\n", atomic_read(&oops_count));
114}
115
116static struct kobj_attribute oops_count_attr = __ATTR_RO(oops_count);
117
118static __init int kernel_exit_sysfs_init(void)
119{
120 sysfs_add_file_to_group(kernel_kobj, &oops_count_attr.attr, NULL);
121 return 0;
122}
123late_initcall(kernel_exit_sysfs_init);
124#endif
125
d40e48e0 126static void __unhash_process(struct task_struct *p, bool group_dead)
1da177e4
LT
127{
128 nr_threads--;
50d75f8d 129 detach_pid(p, PIDTYPE_PID);
d40e48e0 130 if (group_dead) {
6883f81a 131 detach_pid(p, PIDTYPE_TGID);
1da177e4
LT
132 detach_pid(p, PIDTYPE_PGID);
133 detach_pid(p, PIDTYPE_SID);
c97d9893 134
5e85d4ab 135 list_del_rcu(&p->tasks);
9cd80bbb 136 list_del_init(&p->sibling);
909ea964 137 __this_cpu_dec(process_counts);
1da177e4 138 }
0c740d0a 139 list_del_rcu(&p->thread_node);
1da177e4
LT
140}
141
6a14c5c9
ON
142/*
143 * This function expects the tasklist_lock write-locked.
144 */
145static void __exit_signal(struct task_struct *tsk)
146{
147 struct signal_struct *sig = tsk->signal;
d40e48e0 148 bool group_dead = thread_group_leader(tsk);
6a14c5c9 149 struct sighand_struct *sighand;
3f649ab7 150 struct tty_struct *tty;
5613fda9 151 u64 utime, stime;
6a14c5c9 152
d11c563d 153 sighand = rcu_dereference_check(tsk->sighand,
db1466b3 154 lockdep_tasklist_lock_is_held());
6a14c5c9
ON
155 spin_lock(&sighand->siglock);
156
baa73d9e 157#ifdef CONFIG_POSIX_TIMERS
6a14c5c9 158 posix_cpu_timers_exit(tsk);
b95e31c0 159 if (group_dead)
6a14c5c9 160 posix_cpu_timers_exit_group(tsk);
baa73d9e 161#endif
e0a70217 162
baa73d9e
NP
163 if (group_dead) {
164 tty = sig->tty;
165 sig->tty = NULL;
166 } else {
6a14c5c9
ON
167 /*
168 * If there is any task waiting for the group exit
169 * then notify it:
170 */
d344193a 171 if (sig->notify_count > 0 && !--sig->notify_count)
60700e38 172 wake_up_process(sig->group_exec_task);
6db840fa 173
6a14c5c9
ON
174 if (tsk == sig->curr_target)
175 sig->curr_target = next_thread(tsk);
6a14c5c9
ON
176 }
177
53d3eaa3
NP
178 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
179 sizeof(unsigned long long));
180
90ed9cbe 181 /*
26e75b5c
ON
182 * Accumulate here the counters for all threads as they die. We could
183 * skip the group leader because it is the last user of signal_struct,
184 * but we want to avoid the race with thread_group_cputime() which can
185 * see the empty ->thread_head list.
90ed9cbe
RR
186 */
187 task_cputime(tsk, &utime, &stime);
e78c3496 188 write_seqlock(&sig->stats_lock);
90ed9cbe
RR
189 sig->utime += utime;
190 sig->stime += stime;
191 sig->gtime += task_gtime(tsk);
192 sig->min_flt += tsk->min_flt;
193 sig->maj_flt += tsk->maj_flt;
194 sig->nvcsw += tsk->nvcsw;
195 sig->nivcsw += tsk->nivcsw;
196 sig->inblock += task_io_get_inblock(tsk);
197 sig->oublock += task_io_get_oublock(tsk);
198 task_io_accounting_add(&sig->ioac, &tsk->ioac);
199 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
b3ac022c 200 sig->nr_threads--;
d40e48e0 201 __unhash_process(tsk, group_dead);
e78c3496 202 write_sequnlock(&sig->stats_lock);
5876700c 203
da7978b0
ON
204 /*
205 * Do this under ->siglock, we can race with another thread
206 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
207 */
208 flush_sigqueue(&tsk->pending);
a7e5328a 209 tsk->sighand = NULL;
6a14c5c9 210 spin_unlock(&sighand->siglock);
6a14c5c9 211
a7e5328a 212 __cleanup_sighand(sighand);
a0be55de 213 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
d40e48e0 214 if (group_dead) {
6a14c5c9 215 flush_sigqueue(&sig->shared_pending);
4ada856f 216 tty_kref_put(tty);
6a14c5c9
ON
217 }
218}
219
8c7904a0
EB
220static void delayed_put_task_struct(struct rcu_head *rhp)
221{
0a16b607
MD
222 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
223
670721c7 224 kprobe_flush_task(tsk);
54ecbe6f 225 rethook_flush_task(tsk);
4e231c79 226 perf_event_delayed_put(tsk);
0a16b607
MD
227 trace_sched_process_free(tsk);
228 put_task_struct(tsk);
8c7904a0
EB
229}
230
3fbd7ee2
EB
231void put_task_struct_rcu_user(struct task_struct *task)
232{
233 if (refcount_dec_and_test(&task->rcu_users))
234 call_rcu(&task->rcu, delayed_put_task_struct);
235}
f470021a 236
2be9880d
KW
237void __weak release_thread(struct task_struct *dead_task)
238{
239}
240
a0be55de 241void release_task(struct task_struct *p)
1da177e4 242{
36c8b586 243 struct task_struct *leader;
7bc3e6e5 244 struct pid *thread_pid;
1da177e4 245 int zap_leader;
1f09f974 246repeat:
c69e8d9c 247 /* don't need to get the RCU readlock here - the process is dead and
d11c563d
PM
248 * can't be modifying its own credentials. But shut RCU-lockdep up */
249 rcu_read_lock();
21d1c5e3 250 dec_rlimit_ucounts(task_ucounts(p), UCOUNT_RLIMIT_NPROC, 1);
d11c563d 251 rcu_read_unlock();
c69e8d9c 252
6b115bf5 253 cgroup_release(p);
0203026b 254
1da177e4 255 write_lock_irq(&tasklist_lock);
a288eecc 256 ptrace_release_task(p);
7bc3e6e5 257 thread_pid = get_pid(p->thread_pid);
1da177e4 258 __exit_signal(p);
35f5cad8 259
1da177e4
LT
260 /*
261 * If we are the last non-leader member of the thread
262 * group, and the leader is zombie, then notify the
263 * group leader's parent process. (if it wants notification.)
264 */
265 zap_leader = 0;
266 leader = p->group_leader;
a0be55de
IA
267 if (leader != p && thread_group_empty(leader)
268 && leader->exit_state == EXIT_ZOMBIE) {
1da177e4
LT
269 /*
270 * If we were the last child thread and the leader has
271 * exited already, and the leader's parent ignores SIGCHLD,
272 * then we are the one who should release the leader.
dae33574 273 */
86773473 274 zap_leader = do_notify_parent(leader, leader->exit_signal);
dae33574
RM
275 if (zap_leader)
276 leader->exit_state = EXIT_DEAD;
1da177e4
LT
277 }
278
1da177e4 279 write_unlock_irq(&tasklist_lock);
3a15fb6e 280 seccomp_filter_release(p);
7bc3e6e5 281 proc_flush_pid(thread_pid);
6ade99ec 282 put_pid(thread_pid);
1da177e4 283 release_thread(p);
3fbd7ee2 284 put_task_struct_rcu_user(p);
1da177e4
LT
285
286 p = leader;
287 if (unlikely(zap_leader))
288 goto repeat;
289}
290
9d9a6ebf 291int rcuwait_wake_up(struct rcuwait *w)
8f95c90c 292{
9d9a6ebf 293 int ret = 0;
8f95c90c
DB
294 struct task_struct *task;
295
296 rcu_read_lock();
297
298 /*
299 * Order condition vs @task, such that everything prior to the load
300 * of @task is visible. This is the condition as to why the user called
c9d64a1b 301 * rcuwait_wake() in the first place. Pairs with set_current_state()
8f95c90c
DB
302 * barrier (A) in rcuwait_wait_event().
303 *
304 * WAIT WAKE
305 * [S] tsk = current [S] cond = true
306 * MB (A) MB (B)
307 * [L] cond [L] tsk
308 */
6dc080ee 309 smp_mb(); /* (B) */
8f95c90c 310
8f95c90c
DB
311 task = rcu_dereference(w->task);
312 if (task)
9d9a6ebf 313 ret = wake_up_process(task);
8f95c90c 314 rcu_read_unlock();
9d9a6ebf
DB
315
316 return ret;
8f95c90c 317}
ac8dec42 318EXPORT_SYMBOL_GPL(rcuwait_wake_up);
8f95c90c 319
1da177e4
LT
320/*
321 * Determine if a process group is "orphaned", according to the POSIX
322 * definition in 2.2.2.52. Orphaned process groups are not to be affected
323 * by terminal-generated stop signals. Newly orphaned process groups are
324 * to receive a SIGHUP and a SIGCONT.
325 *
326 * "I ask you, have you ever known what it is to be an orphan?"
327 */
a0be55de
IA
328static int will_become_orphaned_pgrp(struct pid *pgrp,
329 struct task_struct *ignored_task)
1da177e4
LT
330{
331 struct task_struct *p;
1da177e4 332
0475ac08 333 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
05e83df6
ON
334 if ((p == ignored_task) ||
335 (p->exit_state && thread_group_empty(p)) ||
336 is_global_init(p->real_parent))
1da177e4 337 continue;
05e83df6 338
0475ac08 339 if (task_pgrp(p->real_parent) != pgrp &&
05e83df6
ON
340 task_session(p->real_parent) == task_session(p))
341 return 0;
0475ac08 342 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
05e83df6
ON
343
344 return 1;
1da177e4
LT
345}
346
3e7cd6c4 347int is_current_pgrp_orphaned(void)
1da177e4
LT
348{
349 int retval;
350
351 read_lock(&tasklist_lock);
3e7cd6c4 352 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
1da177e4
LT
353 read_unlock(&tasklist_lock);
354
355 return retval;
356}
357
961c4675 358static bool has_stopped_jobs(struct pid *pgrp)
1da177e4 359{
1da177e4
LT
360 struct task_struct *p;
361
0475ac08 362 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
961c4675
ON
363 if (p->signal->flags & SIGNAL_STOP_STOPPED)
364 return true;
0475ac08 365 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
961c4675
ON
366
367 return false;
1da177e4
LT
368}
369
f49ee505
ON
370/*
371 * Check to see if any process groups have become orphaned as
372 * a result of our exiting, and if they have any stopped jobs,
373 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
374 */
375static void
376kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
377{
378 struct pid *pgrp = task_pgrp(tsk);
379 struct task_struct *ignored_task = tsk;
380
381 if (!parent)
a0be55de
IA
382 /* exit: our father is in a different pgrp than
383 * we are and we were the only connection outside.
384 */
f49ee505
ON
385 parent = tsk->real_parent;
386 else
387 /* reparent: our child is in a different pgrp than
388 * we are, and it was the only connection outside.
389 */
390 ignored_task = NULL;
391
392 if (task_pgrp(parent) != pgrp &&
393 task_session(parent) == task_session(tsk) &&
394 will_become_orphaned_pgrp(pgrp, ignored_task) &&
395 has_stopped_jobs(pgrp)) {
396 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
397 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
398 }
399}
400
92307383 401static void coredump_task_exit(struct task_struct *tsk)
d67e03e3
EB
402{
403 struct core_state *core_state;
404
405 /*
406 * Serialize with any possible pending coredump.
0258b5fd 407 * We must hold siglock around checking core_state
92307383 408 * and setting PF_POSTCOREDUMP. The core-inducing thread
d67e03e3 409 * will increment ->nr_threads for each thread in the
92307383 410 * group without PF_POSTCOREDUMP set.
d67e03e3 411 */
0258b5fd 412 spin_lock_irq(&tsk->sighand->siglock);
92307383 413 tsk->flags |= PF_POSTCOREDUMP;
0258b5fd
EB
414 core_state = tsk->signal->core_state;
415 spin_unlock_irq(&tsk->sighand->siglock);
240a1853 416 if (core_state) {
d67e03e3
EB
417 struct core_thread self;
418
d67e03e3
EB
419 self.task = current;
420 if (self.task->flags & PF_SIGNALED)
421 self.next = xchg(&core_state->dumper.next, &self);
422 else
423 self.task = NULL;
424 /*
425 * Implies mb(), the result of xchg() must be visible
426 * to core_state->dumper.
427 */
428 if (atomic_dec_and_test(&core_state->nr_threads))
429 complete(&core_state->startup);
430
431 for (;;) {
f5d39b02 432 set_current_state(TASK_UNINTERRUPTIBLE|TASK_FREEZABLE);
d67e03e3
EB
433 if (!self.task) /* see coredump_finish() */
434 break;
f5d39b02 435 schedule();
d67e03e3
EB
436 }
437 __set_current_state(TASK_RUNNING);
d67e03e3
EB
438 }
439}
440
f98bafa0 441#ifdef CONFIG_MEMCG
cf475ad2 442/*
733eda7a 443 * A task is exiting. If it owned this mm, find a new owner for the mm.
cf475ad2 444 */
cf475ad2
BS
445void mm_update_next_owner(struct mm_struct *mm)
446{
447 struct task_struct *c, *g, *p = current;
448
449retry:
733eda7a
KH
450 /*
451 * If the exiting or execing task is not the owner, it's
452 * someone else's problem.
453 */
454 if (mm->owner != p)
cf475ad2 455 return;
733eda7a
KH
456 /*
457 * The current owner is exiting/execing and there are no other
458 * candidates. Do not leave the mm pointing to a possibly
459 * freed task structure.
460 */
461 if (atomic_read(&mm->mm_users) <= 1) {
987717e5 462 WRITE_ONCE(mm->owner, NULL);
733eda7a
KH
463 return;
464 }
cf475ad2
BS
465
466 read_lock(&tasklist_lock);
467 /*
468 * Search in the children
469 */
470 list_for_each_entry(c, &p->children, sibling) {
471 if (c->mm == mm)
472 goto assign_new_owner;
473 }
474
475 /*
476 * Search in the siblings
477 */
dea33cfd 478 list_for_each_entry(c, &p->real_parent->children, sibling) {
cf475ad2
BS
479 if (c->mm == mm)
480 goto assign_new_owner;
481 }
482
483 /*
f87fb599 484 * Search through everything else, we should not get here often.
cf475ad2 485 */
39af1765
ON
486 for_each_process(g) {
487 if (g->flags & PF_KTHREAD)
488 continue;
489 for_each_thread(g, c) {
490 if (c->mm == mm)
491 goto assign_new_owner;
492 if (c->mm)
493 break;
494 }
f87fb599 495 }
cf475ad2 496 read_unlock(&tasklist_lock);
31a78f23
BS
497 /*
498 * We found no owner yet mm_users > 1: this implies that we are
499 * most likely racing with swapoff (try_to_unuse()) or /proc or
e5991371 500 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
31a78f23 501 */
987717e5 502 WRITE_ONCE(mm->owner, NULL);
cf475ad2
BS
503 return;
504
505assign_new_owner:
506 BUG_ON(c == p);
507 get_task_struct(c);
508 /*
509 * The task_lock protects c->mm from changing.
510 * We always want mm->owner->mm == mm
511 */
512 task_lock(c);
e5991371
HD
513 /*
514 * Delay read_unlock() till we have the task_lock()
515 * to ensure that c does not slip away underneath us
516 */
517 read_unlock(&tasklist_lock);
cf475ad2
BS
518 if (c->mm != mm) {
519 task_unlock(c);
520 put_task_struct(c);
521 goto retry;
522 }
987717e5 523 WRITE_ONCE(mm->owner, c);
bd74fdae 524 lru_gen_migrate_mm(mm);
cf475ad2
BS
525 task_unlock(c);
526 put_task_struct(c);
527}
f98bafa0 528#endif /* CONFIG_MEMCG */
cf475ad2 529
1da177e4
LT
530/*
531 * Turn us into a lazy TLB process if we
532 * aren't already..
533 */
0039962a 534static void exit_mm(void)
1da177e4 535{
0039962a 536 struct mm_struct *mm = current->mm;
1da177e4 537
4610ba7a 538 exit_mm_release(current, mm);
1da177e4
LT
539 if (!mm)
540 return;
d8ed45c5 541 mmap_read_lock(mm);
aa464ba9 542 mmgrab_lazy_tlb(mm);
0039962a 543 BUG_ON(mm != current->active_mm);
1da177e4 544 /* more a memory barrier than a real lock */
0039962a 545 task_lock(current);
5bc78502
MD
546 /*
547 * When a thread stops operating on an address space, the loop
548 * in membarrier_private_expedited() may not observe that
549 * tsk->mm, and the loop in membarrier_global_expedited() may
550 * not observe a MEMBARRIER_STATE_GLOBAL_EXPEDITED
551 * rq->membarrier_state, so those would not issue an IPI.
552 * Membarrier requires a memory barrier after accessing
553 * user-space memory, before clearing tsk->mm or the
554 * rq->membarrier_state.
555 */
556 smp_mb__after_spinlock();
557 local_irq_disable();
0039962a 558 current->mm = NULL;
5bc78502 559 membarrier_update_current_mm(NULL);
1da177e4 560 enter_lazy_tlb(mm, current);
5bc78502 561 local_irq_enable();
0039962a 562 task_unlock(current);
5bc78502 563 mmap_read_unlock(mm);
cf475ad2 564 mm_update_next_owner(mm);
1da177e4 565 mmput(mm);
c32b3cbe 566 if (test_thread_flag(TIF_MEMDIE))
38531201 567 exit_oom_victim();
1da177e4
LT
568}
569
c9dc05bf
ON
570static struct task_struct *find_alive_thread(struct task_struct *p)
571{
572 struct task_struct *t;
573
574 for_each_thread(p, t) {
575 if (!(t->flags & PF_EXITING))
576 return t;
577 }
578 return NULL;
579}
580
8fb335e0
AV
581static struct task_struct *find_child_reaper(struct task_struct *father,
582 struct list_head *dead)
1109909c
ON
583 __releases(&tasklist_lock)
584 __acquires(&tasklist_lock)
585{
586 struct pid_namespace *pid_ns = task_active_pid_ns(father);
587 struct task_struct *reaper = pid_ns->child_reaper;
8fb335e0 588 struct task_struct *p, *n;
1109909c
ON
589
590 if (likely(reaper != father))
591 return reaper;
592
c9dc05bf
ON
593 reaper = find_alive_thread(father);
594 if (reaper) {
1109909c
ON
595 pid_ns->child_reaper = reaper;
596 return reaper;
597 }
598
599 write_unlock_irq(&tasklist_lock);
8fb335e0
AV
600
601 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
602 list_del_init(&p->ptrace_entry);
603 release_task(p);
604 }
605
1109909c
ON
606 zap_pid_ns_processes(pid_ns);
607 write_lock_irq(&tasklist_lock);
608
609 return father;
610}
611
1da177e4 612/*
ebec18a6
LP
613 * When we die, we re-parent all our children, and try to:
614 * 1. give them to another thread in our thread group, if such a member exists
615 * 2. give it to the first ancestor process which prctl'd itself as a
616 * child_subreaper for its children (like a service manager)
617 * 3. give it to the init process (PID 1) in our pid namespace
1da177e4 618 */
1109909c
ON
619static struct task_struct *find_new_reaper(struct task_struct *father,
620 struct task_struct *child_reaper)
1da177e4 621{
c9dc05bf 622 struct task_struct *thread, *reaper;
1da177e4 623
c9dc05bf
ON
624 thread = find_alive_thread(father);
625 if (thread)
950bbabb 626 return thread;
1da177e4 627
7d24e2df 628 if (father->signal->has_child_subreaper) {
c6c70f44 629 unsigned int ns_level = task_pid(father)->level;
ebec18a6 630 /*
175aed3f 631 * Find the first ->is_child_subreaper ancestor in our pid_ns.
c6c70f44
ON
632 * We can't check reaper != child_reaper to ensure we do not
633 * cross the namespaces, the exiting parent could be injected
634 * by setns() + fork().
635 * We check pid->level, this is slightly more efficient than
636 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
ebec18a6 637 */
c6c70f44
ON
638 for (reaper = father->real_parent;
639 task_pid(reaper)->level == ns_level;
ebec18a6 640 reaper = reaper->real_parent) {
175aed3f 641 if (reaper == &init_task)
ebec18a6
LP
642 break;
643 if (!reaper->signal->is_child_subreaper)
644 continue;
c9dc05bf
ON
645 thread = find_alive_thread(reaper);
646 if (thread)
647 return thread;
ebec18a6 648 }
1da177e4 649 }
762a24be 650
1109909c 651 return child_reaper;
950bbabb
ON
652}
653
5dfc80be
ON
654/*
655* Any that need to be release_task'd are put on the @dead list.
656 */
9cd80bbb 657static void reparent_leader(struct task_struct *father, struct task_struct *p,
5dfc80be
ON
658 struct list_head *dead)
659{
2831096e 660 if (unlikely(p->exit_state == EXIT_DEAD))
5dfc80be
ON
661 return;
662
abd50b39 663 /* We don't want people slaying init. */
5dfc80be
ON
664 p->exit_signal = SIGCHLD;
665
666 /* If it has exited notify the new parent about this child's death. */
d21142ec 667 if (!p->ptrace &&
5dfc80be 668 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
86773473 669 if (do_notify_parent(p, p->exit_signal)) {
5dfc80be 670 p->exit_state = EXIT_DEAD;
dc2fd4b0 671 list_add(&p->ptrace_entry, dead);
5dfc80be
ON
672 }
673 }
674
675 kill_orphaned_pgrp(p, father);
676}
677
482a3767
ON
678/*
679 * This does two things:
680 *
681 * A. Make init inherit all the child processes
682 * B. Check to see if any process groups have become orphaned
683 * as a result of our exiting, and if they have any stopped
684 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
685 */
686static void forget_original_parent(struct task_struct *father,
687 struct list_head *dead)
1da177e4 688{
482a3767 689 struct task_struct *p, *t, *reaper;
762a24be 690
7c8bd232 691 if (unlikely(!list_empty(&father->ptraced)))
482a3767 692 exit_ptrace(father, dead);
f470021a 693
7c8bd232 694 /* Can drop and reacquire tasklist_lock */
8fb335e0 695 reaper = find_child_reaper(father, dead);
ad9e206a 696 if (list_empty(&father->children))
482a3767 697 return;
1109909c
ON
698
699 reaper = find_new_reaper(father, reaper);
2831096e 700 list_for_each_entry(p, &father->children, sibling) {
57a05918 701 for_each_thread(p, t) {
22a34c6f
MB
702 RCU_INIT_POINTER(t->real_parent, reaper);
703 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
57a05918 704 if (likely(!t->ptrace))
9cd80bbb 705 t->parent = t->real_parent;
9cd80bbb
ON
706 if (t->pdeath_signal)
707 group_send_sig_info(t->pdeath_signal,
01024980
EB
708 SEND_SIG_NOINFO, t,
709 PIDTYPE_TGID);
57a05918 710 }
2831096e
ON
711 /*
712 * If this is a threaded reparent there is no need to
713 * notify anyone anything has happened.
714 */
715 if (!same_thread_group(reaper, father))
482a3767 716 reparent_leader(father, p, dead);
1da177e4 717 }
2831096e 718 list_splice_tail_init(&father->children, &reaper->children);
1da177e4
LT
719}
720
721/*
722 * Send signals to all our closest relatives so that they know
723 * to properly mourn us..
724 */
821c7de7 725static void exit_notify(struct task_struct *tsk, int group_dead)
1da177e4 726{
53c8f9f1 727 bool autoreap;
482a3767
ON
728 struct task_struct *p, *n;
729 LIST_HEAD(dead);
1da177e4 730
762a24be 731 write_lock_irq(&tasklist_lock);
482a3767
ON
732 forget_original_parent(tsk, &dead);
733
821c7de7
ON
734 if (group_dead)
735 kill_orphaned_pgrp(tsk->group_leader, NULL);
1da177e4 736
b191d649 737 tsk->exit_state = EXIT_ZOMBIE;
64bef697
ON
738 /*
739 * sub-thread or delay_group_leader(), wake up the
740 * PIDFD_THREAD waiters.
741 */
742 if (!thread_group_empty(tsk))
743 do_notify_pidfd(tsk);
744
45cdf5cc
ON
745 if (unlikely(tsk->ptrace)) {
746 int sig = thread_group_leader(tsk) &&
747 thread_group_empty(tsk) &&
748 !ptrace_reparented(tsk) ?
749 tsk->exit_signal : SIGCHLD;
750 autoreap = do_notify_parent(tsk, sig);
751 } else if (thread_group_leader(tsk)) {
752 autoreap = thread_group_empty(tsk) &&
753 do_notify_parent(tsk, tsk->exit_signal);
754 } else {
755 autoreap = true;
756 }
1da177e4 757
30b692d3
CB
758 if (autoreap) {
759 tsk->exit_state = EXIT_DEAD;
6c66e7db 760 list_add(&tsk->ptrace_entry, &dead);
30b692d3 761 }
1da177e4 762
9c339168
ON
763 /* mt-exec, de_thread() is waiting for group leader */
764 if (unlikely(tsk->signal->notify_count < 0))
60700e38 765 wake_up_process(tsk->signal->group_exec_task);
1da177e4
LT
766 write_unlock_irq(&tasklist_lock);
767
482a3767
ON
768 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
769 list_del_init(&p->ptrace_entry);
770 release_task(p);
771 }
1da177e4
LT
772}
773
e18eecb8
JD
774#ifdef CONFIG_DEBUG_STACK_USAGE
775static void check_stack_usage(void)
776{
777 static DEFINE_SPINLOCK(low_water_lock);
778 static int lowest_to_date = THREAD_SIZE;
e18eecb8
JD
779 unsigned long free;
780
7c9f8861 781 free = stack_not_used(current);
e18eecb8
JD
782
783 if (free >= lowest_to_date)
784 return;
785
786 spin_lock(&low_water_lock);
787 if (free < lowest_to_date) {
627393d4 788 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
a0be55de 789 current->comm, task_pid_nr(current), free);
e18eecb8
JD
790 lowest_to_date = free;
791 }
792 spin_unlock(&low_water_lock);
793}
794#else
795static inline void check_stack_usage(void) {}
796#endif
797
d80f7d7b
EB
798static void synchronize_group_exit(struct task_struct *tsk, long code)
799{
800 struct sighand_struct *sighand = tsk->sighand;
801 struct signal_struct *signal = tsk->signal;
802
803 spin_lock_irq(&sighand->siglock);
804 signal->quick_threads--;
805 if ((signal->quick_threads == 0) &&
806 !(signal->flags & SIGNAL_GROUP_EXIT)) {
807 signal->flags = SIGNAL_GROUP_EXIT;
808 signal->group_exit_code = code;
809 signal->group_stop_count = 0;
810 }
811 spin_unlock_irq(&sighand->siglock);
812}
813
9af6528e 814void __noreturn do_exit(long code)
1da177e4
LT
815{
816 struct task_struct *tsk = current;
817 int group_dead;
818
001c28e5
NP
819 WARN_ON(irqs_disabled());
820
d80f7d7b
EB
821 synchronize_group_exit(tsk, code);
822
b1f866b0 823 WARN_ON(tsk->plug);
22e2c507 824
586b58ca 825 kcov_task_exit(tsk);
50b5e49c 826 kmsan_task_exit(tsk);
586b58ca 827
92307383 828 coredump_task_exit(tsk);
a288eecc 829 ptrace_event(PTRACE_EVENT_EXIT, code);
fd593511 830 user_events_exit(tsk);
1da177e4 831
f552a27a 832 io_uring_files_cancel();
d12619b5 833 exit_signals(tsk); /* sets PF_EXITING */
1da177e4 834
51229b49 835 acct_update_integrals(tsk);
1da177e4 836 group_dead = atomic_dec_and_test(&tsk->signal->live);
c3068951 837 if (group_dead) {
43cf75d9 838 /*
839 * If the last thread of global init has exited, panic
840 * immediately to get a useable coredump.
841 */
842 if (unlikely(is_global_init(tsk)))
843 panic("Attempted to kill init! exitcode=0x%08x\n",
844 tsk->signal->group_exit_code ?: (int)code);
845
baa73d9e 846#ifdef CONFIG_POSIX_TIMERS
778e9a9c 847 hrtimer_cancel(&tsk->signal->real_timer);
d5b36a4d 848 exit_itimers(tsk);
baa73d9e 849#endif
1f10206c
JP
850 if (tsk->mm)
851 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
c3068951 852 }
f6ec29a4 853 acct_collect(code, group_dead);
522ed776
MT
854 if (group_dead)
855 tty_audit_exit();
a4ff8dba 856 audit_free(tsk);
115085ea 857
48d212a2 858 tsk->exit_code = code;
115085ea 859 taskstats_exit(tsk, group_dead);
c757249a 860
0039962a 861 exit_mm();
1da177e4 862
0e464814 863 if (group_dead)
f6ec29a4 864 acct_process();
0a16b607
MD
865 trace_sched_process_exit(tsk);
866
1da177e4 867 exit_sem(tsk);
b34a6b1d 868 exit_shm(tsk);
1ec7f1dd
AV
869 exit_files(tsk);
870 exit_fs(tsk);
c39df5fa
ON
871 if (group_dead)
872 disassociate_ctty(1);
8aac6270 873 exit_task_namespaces(tsk);
ed3e694d 874 exit_task_work(tsk);
e6464694 875 exit_thread(tsk);
0b3fcf17
SE
876
877 /*
878 * Flush inherited counters to the parent - before the parent
879 * gets woken up by child-exit notifications.
880 *
881 * because of cgroup mode, must be called before cgroup_exit()
882 */
883 perf_event_exit_task(tsk);
884
8e5bfa8c 885 sched_autogroup_exit_task(tsk);
1ec41830 886 cgroup_exit(tsk);
1da177e4 887
24f1e32c
FW
888 /*
889 * FIXME: do that only when needed, using sched_exit tracepoint
890 */
7c8df286 891 flush_ptrace_hw_breakpoint(tsk);
33b2fb30 892
ccdd29ff 893 exit_tasks_rcu_start();
821c7de7 894 exit_notify(tsk, group_dead);
ef982393 895 proc_exit_connector(tsk);
c11600e4 896 mpol_put_task_policy(tsk);
42b2dd0a 897#ifdef CONFIG_FUTEX
c87e2837
IM
898 if (unlikely(current->pi_state_cache))
899 kfree(current->pi_state_cache);
42b2dd0a 900#endif
de5097c2 901 /*
9a11b49a 902 * Make sure we are holding no locks:
de5097c2 903 */
1b1d2fb4 904 debug_check_no_locks_held();
1da177e4 905
afc847b7 906 if (tsk->io_context)
b69f2292 907 exit_io_context(tsk);
afc847b7 908
b92ce558 909 if (tsk->splice_pipe)
4b8a8f1e 910 free_pipe_info(tsk->splice_pipe);
b92ce558 911
5640f768
ED
912 if (tsk->task_frag.page)
913 put_page(tsk->task_frag.page);
914
1a03d3f1 915 exit_task_stack_account(tsk);
e0e81739 916
4bcb8232 917 check_stack_usage();
7407251a 918 preempt_disable();
54848d73
WF
919 if (tsk->nr_dirtied)
920 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
f41d911f 921 exit_rcu();
ccdd29ff 922 exit_tasks_rcu_finish();
b5740f4b 923
b09be676 924 lockdep_free_task(tsk);
9af6528e 925 do_task_dead();
1da177e4 926}
012914da 927
0e25498f
EB
928void __noreturn make_task_dead(int signr)
929{
930 /*
931 * Take the task off the cpu after something catastrophic has
932 * happened.
05ea0424
EB
933 *
934 * We can get here from a kernel oops, sometimes with preemption off.
935 * Start by checking for critical errors.
936 * Then fix up important state like USER_DS and preemption.
937 * Then do everything else.
0e25498f 938 */
05ea0424 939 struct task_struct *tsk = current;
7535b832 940 unsigned int limit;
05ea0424
EB
941
942 if (unlikely(in_interrupt()))
943 panic("Aiee, killing interrupt handler!");
944 if (unlikely(!tsk->pid))
945 panic("Attempted to kill the idle task!");
946
001c28e5
NP
947 if (unlikely(irqs_disabled())) {
948 pr_info("note: %s[%d] exited with irqs disabled\n",
949 current->comm, task_pid_nr(current));
950 local_irq_enable();
951 }
05ea0424
EB
952 if (unlikely(in_atomic())) {
953 pr_info("note: %s[%d] exited with preempt_count %d\n",
954 current->comm, task_pid_nr(current),
955 preempt_count());
956 preempt_count_set(PREEMPT_ENABLED);
957 }
958
d4ccd54d
JH
959 /*
960 * Every time the system oopses, if the oops happens while a reference
961 * to an object was held, the reference leaks.
962 * If the oops doesn't also leak memory, repeated oopsing can cause
963 * reference counters to wrap around (if they're not using refcount_t).
964 * This means that repeated oopsing can make unexploitable-looking bugs
965 * exploitable through repeated oopsing.
966 * To make sure this can't happen, place an upper bound on how often the
967 * kernel may oops without panic().
968 */
7535b832
KC
969 limit = READ_ONCE(oops_limit);
970 if (atomic_inc_return(&oops_count) >= limit && limit)
971 panic("Oopsed too often (kernel.oops_limit is %d)", limit);
d4ccd54d 972
05ea0424
EB
973 /*
974 * We're taking recursive faults here in make_task_dead. Safest is to just
975 * leave this task alone and wait for reboot.
976 */
977 if (unlikely(tsk->flags & PF_EXITING)) {
978 pr_alert("Fixing recursive fault but reboot is needed!\n");
979 futex_exit_recursive(tsk);
912616f1
EB
980 tsk->exit_state = EXIT_DEAD;
981 refcount_inc(&tsk->rcu_users);
7f80a2fd 982 do_task_dead();
05ea0424
EB
983 }
984
0e25498f
EB
985 do_exit(signr);
986}
987
754fe8d2 988SYSCALL_DEFINE1(exit, int, error_code)
1da177e4
LT
989{
990 do_exit((error_code&0xff)<<8);
991}
992
1da177e4
LT
993/*
994 * Take down every thread in the group. This is called by fatal signals
995 * as well as by sys_exit_group (below).
996 */
eae654f1 997void __noreturn
1da177e4
LT
998do_group_exit(int exit_code)
999{
bfc4b089
ON
1000 struct signal_struct *sig = current->signal;
1001
49697335 1002 if (sig->flags & SIGNAL_GROUP_EXIT)
bfc4b089 1003 exit_code = sig->group_exit_code;
49697335
EB
1004 else if (sig->group_exec_task)
1005 exit_code = 0;
cbe9dac3 1006 else {
1da177e4 1007 struct sighand_struct *const sighand = current->sighand;
a0be55de 1008
1da177e4 1009 spin_lock_irq(&sighand->siglock);
49697335 1010 if (sig->flags & SIGNAL_GROUP_EXIT)
1da177e4
LT
1011 /* Another thread got here before we took the lock. */
1012 exit_code = sig->group_exit_code;
49697335
EB
1013 else if (sig->group_exec_task)
1014 exit_code = 0;
1da177e4 1015 else {
1da177e4 1016 sig->group_exit_code = exit_code;
ed5d2cac 1017 sig->flags = SIGNAL_GROUP_EXIT;
1da177e4
LT
1018 zap_other_threads(current);
1019 }
1020 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
1021 }
1022
1023 do_exit(exit_code);
1024 /* NOTREACHED */
1025}
1026
1027/*
1028 * this kills every thread in the thread group. Note that any externally
1029 * wait4()-ing process will get the correct exit code - even if this
1030 * thread is not the thread group leader.
1031 */
754fe8d2 1032SYSCALL_DEFINE1(exit_group, int, error_code)
1da177e4
LT
1033{
1034 do_group_exit((error_code & 0xff) << 8);
2ed7c03e
HC
1035 /* NOTREACHED */
1036 return 0;
1da177e4
LT
1037}
1038
989264f4 1039static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1da177e4 1040{
5c01ba49
ON
1041 return wo->wo_type == PIDTYPE_MAX ||
1042 task_pid_type(p, wo->wo_type) == wo->wo_pid;
1043}
1da177e4 1044
bf959931
ON
1045static int
1046eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
5c01ba49
ON
1047{
1048 if (!eligible_pid(wo, p))
1049 return 0;
bf959931
ON
1050
1051 /*
1052 * Wait for all children (clone and not) if __WALL is set or
1053 * if it is traced by us.
1054 */
1055 if (ptrace || (wo->wo_flags & __WALL))
1056 return 1;
1057
1058 /*
1059 * Otherwise, wait for clone children *only* if __WCLONE is set;
1060 * otherwise, wait for non-clone children *only*.
1061 *
1062 * Note: a "clone" child here is one that reports to its parent
1063 * using a signal other than SIGCHLD, or a non-leader thread which
1064 * we can only see if it is traced by us.
1065 */
1066 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1da177e4 1067 return 0;
1da177e4 1068
14dd0b81 1069 return 1;
1da177e4
LT
1070}
1071
1da177e4
LT
1072/*
1073 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
1074 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1075 * the lock and this task is uninteresting. If we return nonzero, we have
1076 * released the lock and the system call should return.
1077 */
9e8ae01d 1078static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1da177e4 1079{
67d7ddde 1080 int state, status;
6c5f3e7b 1081 pid_t pid = task_pid_vnr(p);
43e13cc1 1082 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
67d7ddde 1083 struct waitid_info *infop;
1da177e4 1084
9e8ae01d 1085 if (!likely(wo->wo_flags & WEXITED))
98abed02
RM
1086 return 0;
1087
9e8ae01d 1088 if (unlikely(wo->wo_flags & WNOWAIT)) {
907c311f
EB
1089 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1090 ? p->signal->group_exit_code : p->exit_code;
1da177e4
LT
1091 get_task_struct(p);
1092 read_unlock(&tasklist_lock);
1029a2b5 1093 sched_annotate_sleep();
e61a2502
AV
1094 if (wo->wo_rusage)
1095 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
bb380ec3 1096 put_task_struct(p);
76d9871e 1097 goto out_info;
1da177e4 1098 }
1da177e4 1099 /*
abd50b39 1100 * Move the task's state to DEAD/TRACE, only one thread can do this.
1da177e4 1101 */
f6507f83
ON
1102 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1103 EXIT_TRACE : EXIT_DEAD;
abd50b39 1104 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1da177e4 1105 return 0;
986094df
ON
1106 /*
1107 * We own this thread, nobody else can reap it.
1108 */
1109 read_unlock(&tasklist_lock);
1110 sched_annotate_sleep();
f6507f83 1111
befca967 1112 /*
f6507f83 1113 * Check thread_group_leader() to exclude the traced sub-threads.
befca967 1114 */
f6507f83 1115 if (state == EXIT_DEAD && thread_group_leader(p)) {
f953ccd0
ON
1116 struct signal_struct *sig = p->signal;
1117 struct signal_struct *psig = current->signal;
1f10206c 1118 unsigned long maxrss;
5613fda9 1119 u64 tgutime, tgstime;
3795e161 1120
1da177e4
LT
1121 /*
1122 * The resource counters for the group leader are in its
1123 * own task_struct. Those for dead threads in the group
1124 * are in its signal_struct, as are those for the child
1125 * processes it has previously reaped. All these
1126 * accumulate in the parent's signal_struct c* fields.
1127 *
1128 * We don't bother to take a lock here to protect these
f953ccd0
ON
1129 * p->signal fields because the whole thread group is dead
1130 * and nobody can change them.
1131 *
dcca3475 1132 * psig->stats_lock also protects us from our sub-threads
c1be35a1 1133 * which can reap other children at the same time.
0cf55e1e 1134 *
a0be55de
IA
1135 * We use thread_group_cputime_adjusted() to get times for
1136 * the thread group, which consolidates times for all threads
1137 * in the group including the group leader.
1da177e4 1138 */
e80d0a1a 1139 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
c1be35a1 1140 write_seqlock_irq(&psig->stats_lock);
64861634
MS
1141 psig->cutime += tgutime + sig->cutime;
1142 psig->cstime += tgstime + sig->cstime;
6fac4829 1143 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
3795e161
JJ
1144 psig->cmin_flt +=
1145 p->min_flt + sig->min_flt + sig->cmin_flt;
1146 psig->cmaj_flt +=
1147 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1148 psig->cnvcsw +=
1149 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1150 psig->cnivcsw +=
1151 p->nivcsw + sig->nivcsw + sig->cnivcsw;
6eaeeaba
ED
1152 psig->cinblock +=
1153 task_io_get_inblock(p) +
1154 sig->inblock + sig->cinblock;
1155 psig->coublock +=
1156 task_io_get_oublock(p) +
1157 sig->oublock + sig->coublock;
1f10206c
JP
1158 maxrss = max(sig->maxrss, sig->cmaxrss);
1159 if (psig->cmaxrss < maxrss)
1160 psig->cmaxrss = maxrss;
5995477a
AR
1161 task_io_accounting_add(&psig->ioac, &p->ioac);
1162 task_io_accounting_add(&psig->ioac, &sig->ioac);
c1be35a1 1163 write_sequnlock_irq(&psig->stats_lock);
1da177e4
LT
1164 }
1165
ce72a16f
AV
1166 if (wo->wo_rusage)
1167 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1da177e4
LT
1168 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1169 ? p->signal->group_exit_code : p->exit_code;
359566fa 1170 wo->wo_stat = status;
2f4e6e2a 1171
b4360690 1172 if (state == EXIT_TRACE) {
1da177e4 1173 write_lock_irq(&tasklist_lock);
2f4e6e2a
ON
1174 /* We dropped tasklist, ptracer could die and untrace */
1175 ptrace_unlink(p);
b4360690
ON
1176
1177 /* If parent wants a zombie, don't release it now */
1178 state = EXIT_ZOMBIE;
1179 if (do_notify_parent(p, p->exit_signal))
1180 state = EXIT_DEAD;
abd50b39 1181 p->exit_state = state;
1da177e4
LT
1182 write_unlock_irq(&tasklist_lock);
1183 }
abd50b39 1184 if (state == EXIT_DEAD)
1da177e4 1185 release_task(p);
2f4e6e2a 1186
76d9871e
AV
1187out_info:
1188 infop = wo->wo_info;
1189 if (infop) {
1190 if ((status & 0x7f) == 0) {
1191 infop->cause = CLD_EXITED;
1192 infop->status = status >> 8;
1193 } else {
1194 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1195 infop->status = status & 0x7f;
1196 }
1197 infop->pid = pid;
1198 infop->uid = uid;
1199 }
1200
67d7ddde 1201 return pid;
1da177e4
LT
1202}
1203
90bc8d8b
ON
1204static int *task_stopped_code(struct task_struct *p, bool ptrace)
1205{
1206 if (ptrace) {
570ac933 1207 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
90bc8d8b
ON
1208 return &p->exit_code;
1209 } else {
1210 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1211 return &p->signal->group_exit_code;
1212 }
1213 return NULL;
1214}
1215
19e27463
TH
1216/**
1217 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1218 * @wo: wait options
1219 * @ptrace: is the wait for ptrace
1220 * @p: task to wait for
1221 *
1222 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1223 *
1224 * CONTEXT:
1225 * read_lock(&tasklist_lock), which is released if return value is
1226 * non-zero. Also, grabs and releases @p->sighand->siglock.
1227 *
1228 * RETURNS:
1229 * 0 if wait condition didn't exist and search for other wait conditions
1230 * should continue. Non-zero return, -errno on failure and @p's pid on
1231 * success, implies that tasklist_lock is released and wait condition
1232 * search should terminate.
1da177e4 1233 */
9e8ae01d
ON
1234static int wait_task_stopped(struct wait_opts *wo,
1235 int ptrace, struct task_struct *p)
1da177e4 1236{
67d7ddde
AV
1237 struct waitid_info *infop;
1238 int exit_code, *p_code, why;
ee7c82da 1239 uid_t uid = 0; /* unneeded, required by compiler */
c8950783 1240 pid_t pid;
1da177e4 1241
47918025
ON
1242 /*
1243 * Traditionally we see ptrace'd stopped tasks regardless of options.
1244 */
9e8ae01d 1245 if (!ptrace && !(wo->wo_flags & WUNTRACED))
98abed02
RM
1246 return 0;
1247
19e27463
TH
1248 if (!task_stopped_code(p, ptrace))
1249 return 0;
1250
ee7c82da
ON
1251 exit_code = 0;
1252 spin_lock_irq(&p->sighand->siglock);
1253
90bc8d8b
ON
1254 p_code = task_stopped_code(p, ptrace);
1255 if (unlikely(!p_code))
ee7c82da
ON
1256 goto unlock_sig;
1257
90bc8d8b 1258 exit_code = *p_code;
ee7c82da
ON
1259 if (!exit_code)
1260 goto unlock_sig;
1261
9e8ae01d 1262 if (!unlikely(wo->wo_flags & WNOWAIT))
90bc8d8b 1263 *p_code = 0;
ee7c82da 1264
8ca937a6 1265 uid = from_kuid_munged(current_user_ns(), task_uid(p));
ee7c82da
ON
1266unlock_sig:
1267 spin_unlock_irq(&p->sighand->siglock);
1268 if (!exit_code)
1da177e4
LT
1269 return 0;
1270
1271 /*
1272 * Now we are pretty sure this task is interesting.
1273 * Make sure it doesn't get reaped out from under us while we
1274 * give up the lock and then examine it below. We don't want to
1275 * keep holding onto the tasklist_lock while we call getrusage and
1276 * possibly take page faults for user memory.
1277 */
1278 get_task_struct(p);
6c5f3e7b 1279 pid = task_pid_vnr(p);
f470021a 1280 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1da177e4 1281 read_unlock(&tasklist_lock);
1029a2b5 1282 sched_annotate_sleep();
e61a2502
AV
1283 if (wo->wo_rusage)
1284 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
bb380ec3 1285 put_task_struct(p);
1da177e4 1286
bb380ec3
AV
1287 if (likely(!(wo->wo_flags & WNOWAIT)))
1288 wo->wo_stat = (exit_code << 8) | 0x7f;
1da177e4 1289
9e8ae01d 1290 infop = wo->wo_info;
67d7ddde
AV
1291 if (infop) {
1292 infop->cause = why;
1293 infop->status = exit_code;
1294 infop->pid = pid;
1295 infop->uid = uid;
1296 }
67d7ddde 1297 return pid;
1da177e4
LT
1298}
1299
1300/*
1301 * Handle do_wait work for one task in a live, non-stopped state.
1302 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1303 * the lock and this task is uninteresting. If we return nonzero, we have
1304 * released the lock and the system call should return.
1305 */
9e8ae01d 1306static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1da177e4 1307{
bb380ec3 1308 struct waitid_info *infop;
1da177e4
LT
1309 pid_t pid;
1310 uid_t uid;
1311
9e8ae01d 1312 if (!unlikely(wo->wo_flags & WCONTINUED))
98abed02
RM
1313 return 0;
1314
1da177e4
LT
1315 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1316 return 0;
1317
1318 spin_lock_irq(&p->sighand->siglock);
1319 /* Re-check with the lock held. */
1320 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1321 spin_unlock_irq(&p->sighand->siglock);
1322 return 0;
1323 }
9e8ae01d 1324 if (!unlikely(wo->wo_flags & WNOWAIT))
1da177e4 1325 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
8ca937a6 1326 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1da177e4
LT
1327 spin_unlock_irq(&p->sighand->siglock);
1328
6c5f3e7b 1329 pid = task_pid_vnr(p);
1da177e4
LT
1330 get_task_struct(p);
1331 read_unlock(&tasklist_lock);
1029a2b5 1332 sched_annotate_sleep();
e61a2502
AV
1333 if (wo->wo_rusage)
1334 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
bb380ec3 1335 put_task_struct(p);
1da177e4 1336
bb380ec3
AV
1337 infop = wo->wo_info;
1338 if (!infop) {
359566fa 1339 wo->wo_stat = 0xffff;
1da177e4 1340 } else {
bb380ec3
AV
1341 infop->cause = CLD_CONTINUED;
1342 infop->pid = pid;
1343 infop->uid = uid;
1344 infop->status = SIGCONT;
1da177e4 1345 }
bb380ec3 1346 return pid;
1da177e4
LT
1347}
1348
98abed02
RM
1349/*
1350 * Consider @p for a wait by @parent.
1351 *
9e8ae01d 1352 * -ECHILD should be in ->notask_error before the first call.
98abed02
RM
1353 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1354 * Returns zero if the search for a child should continue;
9e8ae01d 1355 * then ->notask_error is 0 if @p is an eligible child,
3a2f5a59 1356 * or still -ECHILD.
98abed02 1357 */
b6e763f0
ON
1358static int wait_consider_task(struct wait_opts *wo, int ptrace,
1359 struct task_struct *p)
98abed02 1360{
3245d6ac
ON
1361 /*
1362 * We can race with wait_task_zombie() from another thread.
1363 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1364 * can't confuse the checks below.
1365 */
6aa7de05 1366 int exit_state = READ_ONCE(p->exit_state);
b3ab0316
ON
1367 int ret;
1368
3245d6ac 1369 if (unlikely(exit_state == EXIT_DEAD))
b3ab0316
ON
1370 return 0;
1371
bf959931 1372 ret = eligible_child(wo, ptrace, p);
14dd0b81 1373 if (!ret)
98abed02
RM
1374 return ret;
1375
3245d6ac 1376 if (unlikely(exit_state == EXIT_TRACE)) {
50b8d257 1377 /*
abd50b39
ON
1378 * ptrace == 0 means we are the natural parent. In this case
1379 * we should clear notask_error, debugger will notify us.
50b8d257 1380 */
abd50b39 1381 if (likely(!ptrace))
50b8d257 1382 wo->notask_error = 0;
823b018e 1383 return 0;
50b8d257 1384 }
823b018e 1385
377d75da
ON
1386 if (likely(!ptrace) && unlikely(p->ptrace)) {
1387 /*
1388 * If it is traced by its real parent's group, just pretend
1389 * the caller is ptrace_do_wait() and reap this child if it
1390 * is zombie.
1391 *
1392 * This also hides group stop state from real parent; otherwise
1393 * a single stop can be reported twice as group and ptrace stop.
1394 * If a ptracer wants to distinguish these two events for its
1395 * own children it should create a separate process which takes
1396 * the role of real parent.
1397 */
1398 if (!ptrace_reparented(p))
1399 ptrace = 1;
1400 }
1401
45cb24a1 1402 /* slay zombie? */
3245d6ac 1403 if (exit_state == EXIT_ZOMBIE) {
9b84cca2 1404 /* we don't reap group leaders with subthreads */
7c733eb3
ON
1405 if (!delay_group_leader(p)) {
1406 /*
1407 * A zombie ptracee is only visible to its ptracer.
1408 * Notification and reaping will be cascaded to the
1409 * real parent when the ptracer detaches.
1410 */
1411 if (unlikely(ptrace) || likely(!p->ptrace))
1412 return wait_task_zombie(wo, p);
1413 }
98abed02 1414
f470021a 1415 /*
9b84cca2
TH
1416 * Allow access to stopped/continued state via zombie by
1417 * falling through. Clearing of notask_error is complex.
1418 *
1419 * When !@ptrace:
1420 *
1421 * If WEXITED is set, notask_error should naturally be
1422 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1423 * so, if there are live subthreads, there are events to
1424 * wait for. If all subthreads are dead, it's still safe
1425 * to clear - this function will be called again in finite
1426 * amount time once all the subthreads are released and
1427 * will then return without clearing.
1428 *
1429 * When @ptrace:
1430 *
1431 * Stopped state is per-task and thus can't change once the
1432 * target task dies. Only continued and exited can happen.
1433 * Clear notask_error if WCONTINUED | WEXITED.
1434 */
1435 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1436 wo->notask_error = 0;
1437 } else {
1438 /*
1439 * @p is alive and it's gonna stop, continue or exit, so
1440 * there always is something to wait for.
f470021a 1441 */
9e8ae01d 1442 wo->notask_error = 0;
f470021a
RM
1443 }
1444
98abed02 1445 /*
45cb24a1
TH
1446 * Wait for stopped. Depending on @ptrace, different stopped state
1447 * is used and the two don't interact with each other.
98abed02 1448 */
19e27463
TH
1449 ret = wait_task_stopped(wo, ptrace, p);
1450 if (ret)
1451 return ret;
98abed02
RM
1452
1453 /*
45cb24a1
TH
1454 * Wait for continued. There's only one continued state and the
1455 * ptracer can consume it which can confuse the real parent. Don't
1456 * use WCONTINUED from ptracer. You don't need or want it.
98abed02 1457 */
9e8ae01d 1458 return wait_task_continued(wo, p);
98abed02
RM
1459}
1460
1461/*
1462 * Do the work of do_wait() for one thread in the group, @tsk.
1463 *
9e8ae01d 1464 * -ECHILD should be in ->notask_error before the first call.
98abed02
RM
1465 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1466 * Returns zero if the search for a child should continue; then
9e8ae01d 1467 * ->notask_error is 0 if there were any eligible children,
3a2f5a59 1468 * or still -ECHILD.
98abed02 1469 */
9e8ae01d 1470static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
98abed02
RM
1471{
1472 struct task_struct *p;
1473
1474 list_for_each_entry(p, &tsk->children, sibling) {
9cd80bbb 1475 int ret = wait_consider_task(wo, 0, p);
a0be55de 1476
9cd80bbb
ON
1477 if (ret)
1478 return ret;
98abed02
RM
1479 }
1480
1481 return 0;
1482}
1483
9e8ae01d 1484static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
98abed02
RM
1485{
1486 struct task_struct *p;
1487
f470021a 1488 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
b6e763f0 1489 int ret = wait_consider_task(wo, 1, p);
a0be55de 1490
f470021a 1491 if (ret)
98abed02 1492 return ret;
98abed02
RM
1493 }
1494
1495 return 0;
1496}
1497
2e521a20 1498bool pid_child_should_wake(struct wait_opts *wo, struct task_struct *p)
9d900d4e
JA
1499{
1500 if (!eligible_pid(wo, p))
1501 return false;
1502
1503 if ((wo->wo_flags & __WNOTHREAD) && wo->child_wait.private != p->parent)
1504 return false;
1505
1506 return true;
1507}
1508
ac6424b9 1509static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
0b7570e7
ON
1510 int sync, void *key)
1511{
1512 struct wait_opts *wo = container_of(wait, struct wait_opts,
1513 child_wait);
1514 struct task_struct *p = key;
1515
9d900d4e
JA
1516 if (pid_child_should_wake(wo, p))
1517 return default_wake_function(wait, mode, sync, key);
0b7570e7 1518
9d900d4e 1519 return 0;
0b7570e7
ON
1520}
1521
a7f0765e
ON
1522void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1523{
0b7570e7 1524 __wake_up_sync_key(&parent->signal->wait_chldexit,
ce4dd442 1525 TASK_INTERRUPTIBLE, p);
a7f0765e
ON
1526}
1527
5449162a
JN
1528static bool is_effectively_child(struct wait_opts *wo, bool ptrace,
1529 struct task_struct *target)
1530{
1531 struct task_struct *parent =
1532 !ptrace ? target->real_parent : target->parent;
1533
1534 return current == parent || (!(wo->wo_flags & __WNOTHREAD) &&
1535 same_thread_group(current, parent));
1536}
1537
1538/*
1539 * Optimization for waiting on PIDTYPE_PID. No need to iterate through child
1540 * and tracee lists to find the target task.
1541 */
1542static int do_wait_pid(struct wait_opts *wo)
1543{
1544 bool ptrace;
1545 struct task_struct *target;
1546 int retval;
1547
1548 ptrace = false;
1549 target = pid_task(wo->wo_pid, PIDTYPE_TGID);
1550 if (target && is_effectively_child(wo, ptrace, target)) {
1551 retval = wait_consider_task(wo, ptrace, target);
1552 if (retval)
1553 return retval;
1554 }
1555
1556 ptrace = true;
1557 target = pid_task(wo->wo_pid, PIDTYPE_PID);
1558 if (target && target->ptrace &&
1559 is_effectively_child(wo, ptrace, target)) {
1560 retval = wait_consider_task(wo, ptrace, target);
1561 if (retval)
1562 return retval;
1563 }
1564
1565 return 0;
1566}
1567
2e521a20 1568long __do_wait(struct wait_opts *wo)
1da177e4 1569{
06a101ca 1570 long retval;
0a16b607 1571
98abed02 1572 /*
3da56d16 1573 * If there is nothing that can match our criteria, just get out.
9e8ae01d
ON
1574 * We will clear ->notask_error to zero if we see any child that
1575 * might later match our criteria, even if we are not able to reap
1576 * it yet.
98abed02 1577 */
64a16caf 1578 wo->notask_error = -ECHILD;
9e8ae01d 1579 if ((wo->wo_type < PIDTYPE_MAX) &&
1722c14a 1580 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
64a16caf 1581 goto notask;
161550d7 1582
1da177e4 1583 read_lock(&tasklist_lock);
9e8ae01d 1584
5449162a
JN
1585 if (wo->wo_type == PIDTYPE_PID) {
1586 retval = do_wait_pid(wo);
64a16caf 1587 if (retval)
06a101ca 1588 return retval;
5449162a
JN
1589 } else {
1590 struct task_struct *tsk = current;
1591
1592 do {
1593 retval = do_wait_thread(wo, tsk);
1594 if (retval)
06a101ca 1595 return retval;
98abed02 1596
5449162a
JN
1597 retval = ptrace_do_wait(wo, tsk);
1598 if (retval)
06a101ca 1599 return retval;
5449162a
JN
1600
1601 if (wo->wo_flags & __WNOTHREAD)
1602 break;
1603 } while_each_thread(current, tsk);
1604 }
1da177e4 1605 read_unlock(&tasklist_lock);
f2cc3eb1 1606
64a16caf 1607notask:
9e8ae01d 1608 retval = wo->notask_error;
06a101ca
JA
1609 if (!retval && !(wo->wo_flags & WNOHANG))
1610 return -ERESTARTSYS;
1611
1612 return retval;
1613}
1614
1615static long do_wait(struct wait_opts *wo)
1616{
1617 int retval;
1618
1619 trace_sched_process_wait(wo->wo_pid);
1620
1621 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1622 wo->child_wait.private = current;
1623 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1624
1625 do {
1626 set_current_state(TASK_INTERRUPTIBLE);
1627 retval = __do_wait(wo);
1628 if (retval != -ERESTARTSYS)
1629 break;
1630 if (signal_pending(current))
1631 break;
1632 schedule();
1633 } while (1);
1634
f95d39d1 1635 __set_current_state(TASK_RUNNING);
0b7570e7 1636 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1da177e4
LT
1637 return retval;
1638}
1639
2e521a20
JA
1640int kernel_waitid_prepare(struct wait_opts *wo, int which, pid_t upid,
1641 struct waitid_info *infop, int options,
1642 struct rusage *ru)
1da177e4 1643{
eda7e9d4 1644 unsigned int f_flags = 0;
161550d7
EB
1645 struct pid *pid = NULL;
1646 enum pid_type type;
1da177e4 1647
91c4e8ea
ON
1648 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1649 __WNOTHREAD|__WCLONE|__WALL))
1da177e4
LT
1650 return -EINVAL;
1651 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1652 return -EINVAL;
1653
1654 switch (which) {
1655 case P_ALL:
161550d7 1656 type = PIDTYPE_MAX;
1da177e4
LT
1657 break;
1658 case P_PID:
161550d7
EB
1659 type = PIDTYPE_PID;
1660 if (upid <= 0)
1da177e4 1661 return -EINVAL;
3695eae5
CB
1662
1663 pid = find_get_pid(upid);
1da177e4
LT
1664 break;
1665 case P_PGID:
161550d7 1666 type = PIDTYPE_PGID;
821cc7b0 1667 if (upid < 0)
1da177e4 1668 return -EINVAL;
3695eae5 1669
821cc7b0
EB
1670 if (upid)
1671 pid = find_get_pid(upid);
1672 else
1673 pid = get_task_pid(current, PIDTYPE_PGID);
3695eae5
CB
1674 break;
1675 case P_PIDFD:
1676 type = PIDTYPE_PID;
1677 if (upid < 0)
1da177e4 1678 return -EINVAL;
3695eae5 1679
ba7d25f3 1680 pid = pidfd_get_pid(upid, &f_flags);
3695eae5
CB
1681 if (IS_ERR(pid))
1682 return PTR_ERR(pid);
ba7d25f3 1683
1da177e4
LT
1684 break;
1685 default:
1686 return -EINVAL;
1687 }
1688
eda7e9d4
JA
1689 wo->wo_type = type;
1690 wo->wo_pid = pid;
1691 wo->wo_flags = options;
1692 wo->wo_info = infop;
1693 wo->wo_rusage = ru;
ba7d25f3 1694 if (f_flags & O_NONBLOCK)
eda7e9d4
JA
1695 wo->wo_flags |= WNOHANG;
1696
1697 return 0;
1698}
1699
1700static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
1701 int options, struct rusage *ru)
1702{
1703 struct wait_opts wo;
1704 long ret;
1705
1706 ret = kernel_waitid_prepare(&wo, which, upid, infop, options, ru);
1707 if (ret)
1708 return ret;
ba7d25f3 1709
9e8ae01d 1710 ret = do_wait(&wo);
eda7e9d4 1711 if (!ret && !(options & WNOHANG) && (wo.wo_flags & WNOHANG))
ba7d25f3 1712 ret = -EAGAIN;
dfe16dfa 1713
eda7e9d4 1714 put_pid(wo.wo_pid);
1da177e4
LT
1715 return ret;
1716}
1717
ce72a16f
AV
1718SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1719 infop, int, options, struct rusage __user *, ru)
1720{
1721 struct rusage r;
67d7ddde
AV
1722 struct waitid_info info = {.status = 0};
1723 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
634a8160 1724 int signo = 0;
6c85501f 1725
634a8160
AV
1726 if (err > 0) {
1727 signo = SIGCHLD;
1728 err = 0;
ce72a16f
AV
1729 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1730 return -EFAULT;
1731 }
67d7ddde
AV
1732 if (!infop)
1733 return err;
1734
41cd7805 1735 if (!user_write_access_begin(infop, sizeof(*infop)))
1c9fec47 1736 return -EFAULT;
96ca579a 1737
634a8160 1738 unsafe_put_user(signo, &infop->si_signo, Efault);
4c48abe9 1739 unsafe_put_user(0, &infop->si_errno, Efault);
cc731525 1740 unsafe_put_user(info.cause, &infop->si_code, Efault);
4c48abe9
AV
1741 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1742 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1743 unsafe_put_user(info.status, &infop->si_status, Efault);
41cd7805 1744 user_write_access_end();
ce72a16f 1745 return err;
4c48abe9 1746Efault:
41cd7805 1747 user_write_access_end();
4c48abe9 1748 return -EFAULT;
ce72a16f
AV
1749}
1750
92ebce5a
AV
1751long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1752 struct rusage *ru)
1da177e4 1753{
9e8ae01d 1754 struct wait_opts wo;
161550d7
EB
1755 struct pid *pid = NULL;
1756 enum pid_type type;
1da177e4
LT
1757 long ret;
1758
1759 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1760 __WNOTHREAD|__WCLONE|__WALL))
1761 return -EINVAL;
161550d7 1762
dd83c161 1763 /* -INT_MIN is not defined */
1764 if (upid == INT_MIN)
1765 return -ESRCH;
1766
161550d7
EB
1767 if (upid == -1)
1768 type = PIDTYPE_MAX;
1769 else if (upid < 0) {
1770 type = PIDTYPE_PGID;
1771 pid = find_get_pid(-upid);
1772 } else if (upid == 0) {
1773 type = PIDTYPE_PGID;
2ae448ef 1774 pid = get_task_pid(current, PIDTYPE_PGID);
161550d7
EB
1775 } else /* upid > 0 */ {
1776 type = PIDTYPE_PID;
1777 pid = find_get_pid(upid);
1778 }
1779
9e8ae01d
ON
1780 wo.wo_type = type;
1781 wo.wo_pid = pid;
1782 wo.wo_flags = options | WEXITED;
1783 wo.wo_info = NULL;
359566fa 1784 wo.wo_stat = 0;
9e8ae01d
ON
1785 wo.wo_rusage = ru;
1786 ret = do_wait(&wo);
161550d7 1787 put_pid(pid);
359566fa
AV
1788 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1789 ret = -EFAULT;
1da177e4 1790
1da177e4
LT
1791 return ret;
1792}
1793
8043fc14
CH
1794int kernel_wait(pid_t pid, int *stat)
1795{
1796 struct wait_opts wo = {
1797 .wo_type = PIDTYPE_PID,
1798 .wo_pid = find_get_pid(pid),
1799 .wo_flags = WEXITED,
1800 };
1801 int ret;
1802
1803 ret = do_wait(&wo);
1804 if (ret > 0 && wo.wo_stat)
1805 *stat = wo.wo_stat;
1806 put_pid(wo.wo_pid);
1807 return ret;
1808}
1809
ce72a16f
AV
1810SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1811 int, options, struct rusage __user *, ru)
1812{
1813 struct rusage r;
1814 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1815
1816 if (err > 0) {
1817 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1818 return -EFAULT;
1819 }
1820 return err;
1821}
1822
1da177e4
LT
1823#ifdef __ARCH_WANT_SYS_WAITPID
1824
1825/*
1826 * sys_waitpid() remains for compatibility. waitpid() should be
1827 * implemented by calling sys_wait4() from libc.a.
1828 */
17da2bd9 1829SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1da177e4 1830{
d300b610 1831 return kernel_wait4(pid, stat_addr, options, NULL);
1da177e4
LT
1832}
1833
1834#endif
7e95a225
AV
1835
1836#ifdef CONFIG_COMPAT
1837COMPAT_SYSCALL_DEFINE4(wait4,
1838 compat_pid_t, pid,
1839 compat_uint_t __user *, stat_addr,
1840 int, options,
1841 struct compat_rusage __user *, ru)
1842{
ce72a16f
AV
1843 struct rusage r;
1844 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1845 if (err > 0) {
1846 if (ru && put_compat_rusage(&r, ru))
1847 return -EFAULT;
7e95a225 1848 }
ce72a16f 1849 return err;
7e95a225
AV
1850}
1851
1852COMPAT_SYSCALL_DEFINE5(waitid,
1853 int, which, compat_pid_t, pid,
1854 struct compat_siginfo __user *, infop, int, options,
1855 struct compat_rusage __user *, uru)
1856{
7e95a225 1857 struct rusage ru;
67d7ddde
AV
1858 struct waitid_info info = {.status = 0};
1859 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
634a8160
AV
1860 int signo = 0;
1861 if (err > 0) {
1862 signo = SIGCHLD;
1863 err = 0;
6c85501f
AV
1864 if (uru) {
1865 /* kernel_waitid() overwrites everything in ru */
1866 if (COMPAT_USE_64BIT_TIME)
1867 err = copy_to_user(uru, &ru, sizeof(ru));
1868 else
1869 err = put_compat_rusage(&ru, uru);
1870 if (err)
1871 return -EFAULT;
1872 }
7e95a225
AV
1873 }
1874
4c48abe9
AV
1875 if (!infop)
1876 return err;
1877
41cd7805 1878 if (!user_write_access_begin(infop, sizeof(*infop)))
1c9fec47 1879 return -EFAULT;
96ca579a 1880
634a8160 1881 unsafe_put_user(signo, &infop->si_signo, Efault);
4c48abe9 1882 unsafe_put_user(0, &infop->si_errno, Efault);
cc731525 1883 unsafe_put_user(info.cause, &infop->si_code, Efault);
4c48abe9
AV
1884 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1885 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1886 unsafe_put_user(info.status, &infop->si_status, Efault);
41cd7805 1887 user_write_access_end();
67d7ddde 1888 return err;
4c48abe9 1889Efault:
41cd7805 1890 user_write_access_end();
4c48abe9 1891 return -EFAULT;
7e95a225
AV
1892}
1893#endif
7c2c11b2 1894
c27cd083
MR
1895/*
1896 * This needs to be __function_aligned as GCC implicitly makes any
1897 * implementation of abort() cold and drops alignment specified by
1898 * -falign-functions=N.
1899 *
1900 * See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=88345#c11
1901 */
1902__weak __function_aligned void abort(void)
7c2c11b2
SM
1903{
1904 BUG();
1905
1906 /* if that doesn't kill us, halt */
1907 panic("Oops failed to kill thread");
1908}
dc8635b7 1909EXPORT_SYMBOL(abort);