sched/fair: Exclude the current CPU from find_new_ilb()
[linux-block.git] / kernel / exit.c
CommitLineData
457c8996 1// SPDX-License-Identifier: GPL-2.0-only
1da177e4
LT
2/*
3 * linux/kernel/exit.c
4 *
5 * Copyright (C) 1991, 1992 Linus Torvalds
6 */
7
1da177e4
LT
8#include <linux/mm.h>
9#include <linux/slab.h>
4eb5aaa3 10#include <linux/sched/autogroup.h>
6e84f315 11#include <linux/sched/mm.h>
03441a34 12#include <linux/sched/stat.h>
29930025 13#include <linux/sched/task.h>
68db0cf1 14#include <linux/sched/task_stack.h>
32ef5517 15#include <linux/sched/cputime.h>
1da177e4 16#include <linux/interrupt.h>
1da177e4 17#include <linux/module.h>
c59ede7b 18#include <linux/capability.h>
1da177e4
LT
19#include <linux/completion.h>
20#include <linux/personality.h>
21#include <linux/tty.h>
da9cbc87 22#include <linux/iocontext.h>
1da177e4 23#include <linux/key.h>
1da177e4
LT
24#include <linux/cpu.h>
25#include <linux/acct.h>
8f0ab514 26#include <linux/tsacct_kern.h>
1da177e4 27#include <linux/file.h>
9f3acc31 28#include <linux/fdtable.h>
80d26af8 29#include <linux/freezer.h>
1da177e4 30#include <linux/binfmts.h>
ab516013 31#include <linux/nsproxy.h>
84d73786 32#include <linux/pid_namespace.h>
1da177e4
LT
33#include <linux/ptrace.h>
34#include <linux/profile.h>
35#include <linux/mount.h>
36#include <linux/proc_fs.h>
49d769d5 37#include <linux/kthread.h>
1da177e4 38#include <linux/mempolicy.h>
c757249a 39#include <linux/taskstats_kern.h>
ca74e92b 40#include <linux/delayacct.h>
b4f48b63 41#include <linux/cgroup.h>
1da177e4 42#include <linux/syscalls.h>
7ed20e1a 43#include <linux/signal.h>
6a14c5c9 44#include <linux/posix-timers.h>
9f46080c 45#include <linux/cn_proc.h>
de5097c2 46#include <linux/mutex.h>
0771dfef 47#include <linux/futex.h>
b92ce558 48#include <linux/pipe_fs_i.h>
fa84cb93 49#include <linux/audit.h> /* for audit_free() */
83cc5ed3 50#include <linux/resource.h>
0d67a46d 51#include <linux/blkdev.h>
6eaeeaba 52#include <linux/task_io_accounting_ops.h>
30199f5a 53#include <linux/tracehook.h>
5ad4e53b 54#include <linux/fs_struct.h>
d84f4f99 55#include <linux/init_task.h>
cdd6c482 56#include <linux/perf_event.h>
ad8d75ff 57#include <trace/events/sched.h>
24f1e32c 58#include <linux/hw_breakpoint.h>
3d5992d2 59#include <linux/oom.h>
54848d73 60#include <linux/writeback.h>
40401530 61#include <linux/shm.h>
5c9a8750 62#include <linux/kcov.h>
53d3eaa3 63#include <linux/random.h>
8f95c90c 64#include <linux/rcuwait.h>
7e95a225 65#include <linux/compat.h>
1da177e4 66
7c0f6ba6 67#include <linux/uaccess.h>
1da177e4 68#include <asm/unistd.h>
1da177e4
LT
69#include <asm/mmu_context.h>
70
d40e48e0 71static void __unhash_process(struct task_struct *p, bool group_dead)
1da177e4
LT
72{
73 nr_threads--;
50d75f8d 74 detach_pid(p, PIDTYPE_PID);
d40e48e0 75 if (group_dead) {
6883f81a 76 detach_pid(p, PIDTYPE_TGID);
1da177e4
LT
77 detach_pid(p, PIDTYPE_PGID);
78 detach_pid(p, PIDTYPE_SID);
c97d9893 79
5e85d4ab 80 list_del_rcu(&p->tasks);
9cd80bbb 81 list_del_init(&p->sibling);
909ea964 82 __this_cpu_dec(process_counts);
1da177e4 83 }
47e65328 84 list_del_rcu(&p->thread_group);
0c740d0a 85 list_del_rcu(&p->thread_node);
1da177e4
LT
86}
87
6a14c5c9
ON
88/*
89 * This function expects the tasklist_lock write-locked.
90 */
91static void __exit_signal(struct task_struct *tsk)
92{
93 struct signal_struct *sig = tsk->signal;
d40e48e0 94 bool group_dead = thread_group_leader(tsk);
6a14c5c9 95 struct sighand_struct *sighand;
3f649ab7 96 struct tty_struct *tty;
5613fda9 97 u64 utime, stime;
6a14c5c9 98
d11c563d 99 sighand = rcu_dereference_check(tsk->sighand,
db1466b3 100 lockdep_tasklist_lock_is_held());
6a14c5c9
ON
101 spin_lock(&sighand->siglock);
102
baa73d9e 103#ifdef CONFIG_POSIX_TIMERS
6a14c5c9 104 posix_cpu_timers_exit(tsk);
b95e31c0 105 if (group_dead)
6a14c5c9 106 posix_cpu_timers_exit_group(tsk);
baa73d9e 107#endif
e0a70217 108
baa73d9e
NP
109 if (group_dead) {
110 tty = sig->tty;
111 sig->tty = NULL;
112 } else {
6a14c5c9
ON
113 /*
114 * If there is any task waiting for the group exit
115 * then notify it:
116 */
d344193a 117 if (sig->notify_count > 0 && !--sig->notify_count)
6a14c5c9 118 wake_up_process(sig->group_exit_task);
6db840fa 119
6a14c5c9
ON
120 if (tsk == sig->curr_target)
121 sig->curr_target = next_thread(tsk);
6a14c5c9
ON
122 }
123
53d3eaa3
NP
124 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
125 sizeof(unsigned long long));
126
90ed9cbe 127 /*
26e75b5c
ON
128 * Accumulate here the counters for all threads as they die. We could
129 * skip the group leader because it is the last user of signal_struct,
130 * but we want to avoid the race with thread_group_cputime() which can
131 * see the empty ->thread_head list.
90ed9cbe
RR
132 */
133 task_cputime(tsk, &utime, &stime);
e78c3496 134 write_seqlock(&sig->stats_lock);
90ed9cbe
RR
135 sig->utime += utime;
136 sig->stime += stime;
137 sig->gtime += task_gtime(tsk);
138 sig->min_flt += tsk->min_flt;
139 sig->maj_flt += tsk->maj_flt;
140 sig->nvcsw += tsk->nvcsw;
141 sig->nivcsw += tsk->nivcsw;
142 sig->inblock += task_io_get_inblock(tsk);
143 sig->oublock += task_io_get_oublock(tsk);
144 task_io_accounting_add(&sig->ioac, &tsk->ioac);
145 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
b3ac022c 146 sig->nr_threads--;
d40e48e0 147 __unhash_process(tsk, group_dead);
e78c3496 148 write_sequnlock(&sig->stats_lock);
5876700c 149
da7978b0
ON
150 /*
151 * Do this under ->siglock, we can race with another thread
152 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
153 */
154 flush_sigqueue(&tsk->pending);
a7e5328a 155 tsk->sighand = NULL;
6a14c5c9 156 spin_unlock(&sighand->siglock);
6a14c5c9 157
a7e5328a 158 __cleanup_sighand(sighand);
a0be55de 159 clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
d40e48e0 160 if (group_dead) {
6a14c5c9 161 flush_sigqueue(&sig->shared_pending);
4ada856f 162 tty_kref_put(tty);
6a14c5c9
ON
163 }
164}
165
8c7904a0
EB
166static void delayed_put_task_struct(struct rcu_head *rhp)
167{
0a16b607
MD
168 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
169
4e231c79 170 perf_event_delayed_put(tsk);
0a16b607
MD
171 trace_sched_process_free(tsk);
172 put_task_struct(tsk);
8c7904a0
EB
173}
174
3fbd7ee2
EB
175void put_task_struct_rcu_user(struct task_struct *task)
176{
177 if (refcount_dec_and_test(&task->rcu_users))
178 call_rcu(&task->rcu, delayed_put_task_struct);
179}
f470021a 180
a0be55de 181void release_task(struct task_struct *p)
1da177e4 182{
36c8b586 183 struct task_struct *leader;
7bc3e6e5 184 struct pid *thread_pid;
1da177e4 185 int zap_leader;
1f09f974 186repeat:
c69e8d9c 187 /* don't need to get the RCU readlock here - the process is dead and
d11c563d
PM
188 * can't be modifying its own credentials. But shut RCU-lockdep up */
189 rcu_read_lock();
c69e8d9c 190 atomic_dec(&__task_cred(p)->user->processes);
d11c563d 191 rcu_read_unlock();
c69e8d9c 192
6b115bf5 193 cgroup_release(p);
0203026b 194
1da177e4 195 write_lock_irq(&tasklist_lock);
a288eecc 196 ptrace_release_task(p);
7bc3e6e5 197 thread_pid = get_pid(p->thread_pid);
1da177e4 198 __exit_signal(p);
35f5cad8 199
1da177e4
LT
200 /*
201 * If we are the last non-leader member of the thread
202 * group, and the leader is zombie, then notify the
203 * group leader's parent process. (if it wants notification.)
204 */
205 zap_leader = 0;
206 leader = p->group_leader;
a0be55de
IA
207 if (leader != p && thread_group_empty(leader)
208 && leader->exit_state == EXIT_ZOMBIE) {
1da177e4
LT
209 /*
210 * If we were the last child thread and the leader has
211 * exited already, and the leader's parent ignores SIGCHLD,
212 * then we are the one who should release the leader.
dae33574 213 */
86773473 214 zap_leader = do_notify_parent(leader, leader->exit_signal);
dae33574
RM
215 if (zap_leader)
216 leader->exit_state = EXIT_DEAD;
1da177e4
LT
217 }
218
1da177e4 219 write_unlock_irq(&tasklist_lock);
3a15fb6e 220 seccomp_filter_release(p);
7bc3e6e5 221 proc_flush_pid(thread_pid);
6ade99ec 222 put_pid(thread_pid);
1da177e4 223 release_thread(p);
3fbd7ee2 224 put_task_struct_rcu_user(p);
1da177e4
LT
225
226 p = leader;
227 if (unlikely(zap_leader))
228 goto repeat;
229}
230
9d9a6ebf 231int rcuwait_wake_up(struct rcuwait *w)
8f95c90c 232{
9d9a6ebf 233 int ret = 0;
8f95c90c
DB
234 struct task_struct *task;
235
236 rcu_read_lock();
237
238 /*
239 * Order condition vs @task, such that everything prior to the load
240 * of @task is visible. This is the condition as to why the user called
c9d64a1b 241 * rcuwait_wake() in the first place. Pairs with set_current_state()
8f95c90c
DB
242 * barrier (A) in rcuwait_wait_event().
243 *
244 * WAIT WAKE
245 * [S] tsk = current [S] cond = true
246 * MB (A) MB (B)
247 * [L] cond [L] tsk
248 */
6dc080ee 249 smp_mb(); /* (B) */
8f95c90c 250
8f95c90c
DB
251 task = rcu_dereference(w->task);
252 if (task)
9d9a6ebf 253 ret = wake_up_process(task);
8f95c90c 254 rcu_read_unlock();
9d9a6ebf
DB
255
256 return ret;
8f95c90c 257}
ac8dec42 258EXPORT_SYMBOL_GPL(rcuwait_wake_up);
8f95c90c 259
1da177e4
LT
260/*
261 * Determine if a process group is "orphaned", according to the POSIX
262 * definition in 2.2.2.52. Orphaned process groups are not to be affected
263 * by terminal-generated stop signals. Newly orphaned process groups are
264 * to receive a SIGHUP and a SIGCONT.
265 *
266 * "I ask you, have you ever known what it is to be an orphan?"
267 */
a0be55de
IA
268static int will_become_orphaned_pgrp(struct pid *pgrp,
269 struct task_struct *ignored_task)
1da177e4
LT
270{
271 struct task_struct *p;
1da177e4 272
0475ac08 273 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
05e83df6
ON
274 if ((p == ignored_task) ||
275 (p->exit_state && thread_group_empty(p)) ||
276 is_global_init(p->real_parent))
1da177e4 277 continue;
05e83df6 278
0475ac08 279 if (task_pgrp(p->real_parent) != pgrp &&
05e83df6
ON
280 task_session(p->real_parent) == task_session(p))
281 return 0;
0475ac08 282 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
05e83df6
ON
283
284 return 1;
1da177e4
LT
285}
286
3e7cd6c4 287int is_current_pgrp_orphaned(void)
1da177e4
LT
288{
289 int retval;
290
291 read_lock(&tasklist_lock);
3e7cd6c4 292 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
1da177e4
LT
293 read_unlock(&tasklist_lock);
294
295 return retval;
296}
297
961c4675 298static bool has_stopped_jobs(struct pid *pgrp)
1da177e4 299{
1da177e4
LT
300 struct task_struct *p;
301
0475ac08 302 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
961c4675
ON
303 if (p->signal->flags & SIGNAL_STOP_STOPPED)
304 return true;
0475ac08 305 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
961c4675
ON
306
307 return false;
1da177e4
LT
308}
309
f49ee505
ON
310/*
311 * Check to see if any process groups have become orphaned as
312 * a result of our exiting, and if they have any stopped jobs,
313 * send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
314 */
315static void
316kill_orphaned_pgrp(struct task_struct *tsk, struct task_struct *parent)
317{
318 struct pid *pgrp = task_pgrp(tsk);
319 struct task_struct *ignored_task = tsk;
320
321 if (!parent)
a0be55de
IA
322 /* exit: our father is in a different pgrp than
323 * we are and we were the only connection outside.
324 */
f49ee505
ON
325 parent = tsk->real_parent;
326 else
327 /* reparent: our child is in a different pgrp than
328 * we are, and it was the only connection outside.
329 */
330 ignored_task = NULL;
331
332 if (task_pgrp(parent) != pgrp &&
333 task_session(parent) == task_session(tsk) &&
334 will_become_orphaned_pgrp(pgrp, ignored_task) &&
335 has_stopped_jobs(pgrp)) {
336 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
337 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
338 }
339}
340
f98bafa0 341#ifdef CONFIG_MEMCG
cf475ad2 342/*
733eda7a 343 * A task is exiting. If it owned this mm, find a new owner for the mm.
cf475ad2 344 */
cf475ad2
BS
345void mm_update_next_owner(struct mm_struct *mm)
346{
347 struct task_struct *c, *g, *p = current;
348
349retry:
733eda7a
KH
350 /*
351 * If the exiting or execing task is not the owner, it's
352 * someone else's problem.
353 */
354 if (mm->owner != p)
cf475ad2 355 return;
733eda7a
KH
356 /*
357 * The current owner is exiting/execing and there are no other
358 * candidates. Do not leave the mm pointing to a possibly
359 * freed task structure.
360 */
361 if (atomic_read(&mm->mm_users) <= 1) {
987717e5 362 WRITE_ONCE(mm->owner, NULL);
733eda7a
KH
363 return;
364 }
cf475ad2
BS
365
366 read_lock(&tasklist_lock);
367 /*
368 * Search in the children
369 */
370 list_for_each_entry(c, &p->children, sibling) {
371 if (c->mm == mm)
372 goto assign_new_owner;
373 }
374
375 /*
376 * Search in the siblings
377 */
dea33cfd 378 list_for_each_entry(c, &p->real_parent->children, sibling) {
cf475ad2
BS
379 if (c->mm == mm)
380 goto assign_new_owner;
381 }
382
383 /*
f87fb599 384 * Search through everything else, we should not get here often.
cf475ad2 385 */
39af1765
ON
386 for_each_process(g) {
387 if (g->flags & PF_KTHREAD)
388 continue;
389 for_each_thread(g, c) {
390 if (c->mm == mm)
391 goto assign_new_owner;
392 if (c->mm)
393 break;
394 }
f87fb599 395 }
cf475ad2 396 read_unlock(&tasklist_lock);
31a78f23
BS
397 /*
398 * We found no owner yet mm_users > 1: this implies that we are
399 * most likely racing with swapoff (try_to_unuse()) or /proc or
e5991371 400 * ptrace or page migration (get_task_mm()). Mark owner as NULL.
31a78f23 401 */
987717e5 402 WRITE_ONCE(mm->owner, NULL);
cf475ad2
BS
403 return;
404
405assign_new_owner:
406 BUG_ON(c == p);
407 get_task_struct(c);
408 /*
409 * The task_lock protects c->mm from changing.
410 * We always want mm->owner->mm == mm
411 */
412 task_lock(c);
e5991371
HD
413 /*
414 * Delay read_unlock() till we have the task_lock()
415 * to ensure that c does not slip away underneath us
416 */
417 read_unlock(&tasklist_lock);
cf475ad2
BS
418 if (c->mm != mm) {
419 task_unlock(c);
420 put_task_struct(c);
421 goto retry;
422 }
987717e5 423 WRITE_ONCE(mm->owner, c);
cf475ad2
BS
424 task_unlock(c);
425 put_task_struct(c);
426}
f98bafa0 427#endif /* CONFIG_MEMCG */
cf475ad2 428
1da177e4
LT
429/*
430 * Turn us into a lazy TLB process if we
431 * aren't already..
432 */
0039962a 433static void exit_mm(void)
1da177e4 434{
0039962a 435 struct mm_struct *mm = current->mm;
b564daf8 436 struct core_state *core_state;
1da177e4 437
4610ba7a 438 exit_mm_release(current, mm);
1da177e4
LT
439 if (!mm)
440 return;
4fe7efdb 441 sync_mm_rss(mm);
1da177e4
LT
442 /*
443 * Serialize with any possible pending coredump.
c1e8d7c6 444 * We must hold mmap_lock around checking core_state
1da177e4 445 * and clearing tsk->mm. The core-inducing thread
999d9fc1 446 * will increment ->nr_threads for each thread in the
1da177e4
LT
447 * group with ->mm != NULL.
448 */
d8ed45c5 449 mmap_read_lock(mm);
b564daf8
ON
450 core_state = mm->core_state;
451 if (core_state) {
452 struct core_thread self;
a0be55de 453
d8ed45c5 454 mmap_read_unlock(mm);
1da177e4 455
0039962a 456 self.task = current;
b564daf8
ON
457 self.next = xchg(&core_state->dumper.next, &self);
458 /*
459 * Implies mb(), the result of xchg() must be visible
460 * to core_state->dumper.
461 */
462 if (atomic_dec_and_test(&core_state->nr_threads))
463 complete(&core_state->startup);
1da177e4 464
a94e2d40 465 for (;;) {
642fa448 466 set_current_state(TASK_UNINTERRUPTIBLE);
a94e2d40
ON
467 if (!self.task) /* see coredump_finish() */
468 break;
80d26af8 469 freezable_schedule();
a94e2d40 470 }
642fa448 471 __set_current_state(TASK_RUNNING);
d8ed45c5 472 mmap_read_lock(mm);
1da177e4 473 }
f1f10076 474 mmgrab(mm);
0039962a 475 BUG_ON(mm != current->active_mm);
1da177e4 476 /* more a memory barrier than a real lock */
0039962a
DB
477 task_lock(current);
478 current->mm = NULL;
d8ed45c5 479 mmap_read_unlock(mm);
1da177e4 480 enter_lazy_tlb(mm, current);
0039962a 481 task_unlock(current);
cf475ad2 482 mm_update_next_owner(mm);
1da177e4 483 mmput(mm);
c32b3cbe 484 if (test_thread_flag(TIF_MEMDIE))
38531201 485 exit_oom_victim();
1da177e4
LT
486}
487
c9dc05bf
ON
488static struct task_struct *find_alive_thread(struct task_struct *p)
489{
490 struct task_struct *t;
491
492 for_each_thread(p, t) {
493 if (!(t->flags & PF_EXITING))
494 return t;
495 }
496 return NULL;
497}
498
8fb335e0
AV
499static struct task_struct *find_child_reaper(struct task_struct *father,
500 struct list_head *dead)
1109909c
ON
501 __releases(&tasklist_lock)
502 __acquires(&tasklist_lock)
503{
504 struct pid_namespace *pid_ns = task_active_pid_ns(father);
505 struct task_struct *reaper = pid_ns->child_reaper;
8fb335e0 506 struct task_struct *p, *n;
1109909c
ON
507
508 if (likely(reaper != father))
509 return reaper;
510
c9dc05bf
ON
511 reaper = find_alive_thread(father);
512 if (reaper) {
1109909c
ON
513 pid_ns->child_reaper = reaper;
514 return reaper;
515 }
516
517 write_unlock_irq(&tasklist_lock);
8fb335e0
AV
518
519 list_for_each_entry_safe(p, n, dead, ptrace_entry) {
520 list_del_init(&p->ptrace_entry);
521 release_task(p);
522 }
523
1109909c
ON
524 zap_pid_ns_processes(pid_ns);
525 write_lock_irq(&tasklist_lock);
526
527 return father;
528}
529
1da177e4 530/*
ebec18a6
LP
531 * When we die, we re-parent all our children, and try to:
532 * 1. give them to another thread in our thread group, if such a member exists
533 * 2. give it to the first ancestor process which prctl'd itself as a
534 * child_subreaper for its children (like a service manager)
535 * 3. give it to the init process (PID 1) in our pid namespace
1da177e4 536 */
1109909c
ON
537static struct task_struct *find_new_reaper(struct task_struct *father,
538 struct task_struct *child_reaper)
1da177e4 539{
c9dc05bf 540 struct task_struct *thread, *reaper;
1da177e4 541
c9dc05bf
ON
542 thread = find_alive_thread(father);
543 if (thread)
950bbabb 544 return thread;
1da177e4 545
7d24e2df 546 if (father->signal->has_child_subreaper) {
c6c70f44 547 unsigned int ns_level = task_pid(father)->level;
ebec18a6 548 /*
175aed3f 549 * Find the first ->is_child_subreaper ancestor in our pid_ns.
c6c70f44
ON
550 * We can't check reaper != child_reaper to ensure we do not
551 * cross the namespaces, the exiting parent could be injected
552 * by setns() + fork().
553 * We check pid->level, this is slightly more efficient than
554 * task_active_pid_ns(reaper) != task_active_pid_ns(father).
ebec18a6 555 */
c6c70f44
ON
556 for (reaper = father->real_parent;
557 task_pid(reaper)->level == ns_level;
ebec18a6 558 reaper = reaper->real_parent) {
175aed3f 559 if (reaper == &init_task)
ebec18a6
LP
560 break;
561 if (!reaper->signal->is_child_subreaper)
562 continue;
c9dc05bf
ON
563 thread = find_alive_thread(reaper);
564 if (thread)
565 return thread;
ebec18a6 566 }
1da177e4 567 }
762a24be 568
1109909c 569 return child_reaper;
950bbabb
ON
570}
571
5dfc80be
ON
572/*
573* Any that need to be release_task'd are put on the @dead list.
574 */
9cd80bbb 575static void reparent_leader(struct task_struct *father, struct task_struct *p,
5dfc80be
ON
576 struct list_head *dead)
577{
2831096e 578 if (unlikely(p->exit_state == EXIT_DEAD))
5dfc80be
ON
579 return;
580
abd50b39 581 /* We don't want people slaying init. */
5dfc80be
ON
582 p->exit_signal = SIGCHLD;
583
584 /* If it has exited notify the new parent about this child's death. */
d21142ec 585 if (!p->ptrace &&
5dfc80be 586 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
86773473 587 if (do_notify_parent(p, p->exit_signal)) {
5dfc80be 588 p->exit_state = EXIT_DEAD;
dc2fd4b0 589 list_add(&p->ptrace_entry, dead);
5dfc80be
ON
590 }
591 }
592
593 kill_orphaned_pgrp(p, father);
594}
595
482a3767
ON
596/*
597 * This does two things:
598 *
599 * A. Make init inherit all the child processes
600 * B. Check to see if any process groups have become orphaned
601 * as a result of our exiting, and if they have any stopped
602 * jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
603 */
604static void forget_original_parent(struct task_struct *father,
605 struct list_head *dead)
1da177e4 606{
482a3767 607 struct task_struct *p, *t, *reaper;
762a24be 608
7c8bd232 609 if (unlikely(!list_empty(&father->ptraced)))
482a3767 610 exit_ptrace(father, dead);
f470021a 611
7c8bd232 612 /* Can drop and reacquire tasklist_lock */
8fb335e0 613 reaper = find_child_reaper(father, dead);
ad9e206a 614 if (list_empty(&father->children))
482a3767 615 return;
1109909c
ON
616
617 reaper = find_new_reaper(father, reaper);
2831096e 618 list_for_each_entry(p, &father->children, sibling) {
57a05918 619 for_each_thread(p, t) {
22a34c6f
MB
620 RCU_INIT_POINTER(t->real_parent, reaper);
621 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
57a05918 622 if (likely(!t->ptrace))
9cd80bbb 623 t->parent = t->real_parent;
9cd80bbb
ON
624 if (t->pdeath_signal)
625 group_send_sig_info(t->pdeath_signal,
01024980
EB
626 SEND_SIG_NOINFO, t,
627 PIDTYPE_TGID);
57a05918 628 }
2831096e
ON
629 /*
630 * If this is a threaded reparent there is no need to
631 * notify anyone anything has happened.
632 */
633 if (!same_thread_group(reaper, father))
482a3767 634 reparent_leader(father, p, dead);
1da177e4 635 }
2831096e 636 list_splice_tail_init(&father->children, &reaper->children);
1da177e4
LT
637}
638
639/*
640 * Send signals to all our closest relatives so that they know
641 * to properly mourn us..
642 */
821c7de7 643static void exit_notify(struct task_struct *tsk, int group_dead)
1da177e4 644{
53c8f9f1 645 bool autoreap;
482a3767
ON
646 struct task_struct *p, *n;
647 LIST_HEAD(dead);
1da177e4 648
762a24be 649 write_lock_irq(&tasklist_lock);
482a3767
ON
650 forget_original_parent(tsk, &dead);
651
821c7de7
ON
652 if (group_dead)
653 kill_orphaned_pgrp(tsk->group_leader, NULL);
1da177e4 654
b191d649 655 tsk->exit_state = EXIT_ZOMBIE;
45cdf5cc
ON
656 if (unlikely(tsk->ptrace)) {
657 int sig = thread_group_leader(tsk) &&
658 thread_group_empty(tsk) &&
659 !ptrace_reparented(tsk) ?
660 tsk->exit_signal : SIGCHLD;
661 autoreap = do_notify_parent(tsk, sig);
662 } else if (thread_group_leader(tsk)) {
663 autoreap = thread_group_empty(tsk) &&
664 do_notify_parent(tsk, tsk->exit_signal);
665 } else {
666 autoreap = true;
667 }
1da177e4 668
30b692d3
CB
669 if (autoreap) {
670 tsk->exit_state = EXIT_DEAD;
6c66e7db 671 list_add(&tsk->ptrace_entry, &dead);
30b692d3 672 }
1da177e4 673
9c339168
ON
674 /* mt-exec, de_thread() is waiting for group leader */
675 if (unlikely(tsk->signal->notify_count < 0))
6db840fa 676 wake_up_process(tsk->signal->group_exit_task);
1da177e4
LT
677 write_unlock_irq(&tasklist_lock);
678
482a3767
ON
679 list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
680 list_del_init(&p->ptrace_entry);
681 release_task(p);
682 }
1da177e4
LT
683}
684
e18eecb8
JD
685#ifdef CONFIG_DEBUG_STACK_USAGE
686static void check_stack_usage(void)
687{
688 static DEFINE_SPINLOCK(low_water_lock);
689 static int lowest_to_date = THREAD_SIZE;
e18eecb8
JD
690 unsigned long free;
691
7c9f8861 692 free = stack_not_used(current);
e18eecb8
JD
693
694 if (free >= lowest_to_date)
695 return;
696
697 spin_lock(&low_water_lock);
698 if (free < lowest_to_date) {
627393d4 699 pr_info("%s (%d) used greatest stack depth: %lu bytes left\n",
a0be55de 700 current->comm, task_pid_nr(current), free);
e18eecb8
JD
701 lowest_to_date = free;
702 }
703 spin_unlock(&low_water_lock);
704}
705#else
706static inline void check_stack_usage(void) {}
707#endif
708
9af6528e 709void __noreturn do_exit(long code)
1da177e4
LT
710{
711 struct task_struct *tsk = current;
712 int group_dead;
713
586b58ca
JH
714 /*
715 * We can get here from a kernel oops, sometimes with preemption off.
716 * Start by checking for critical errors.
717 * Then fix up important state like USER_DS and preemption.
718 * Then do everything else.
719 */
1da177e4 720
73c10101 721 WARN_ON(blk_needs_flush_plug(tsk));
22e2c507 722
1da177e4
LT
723 if (unlikely(in_interrupt()))
724 panic("Aiee, killing interrupt handler!");
725 if (unlikely(!tsk->pid))
726 panic("Attempted to kill the idle task!");
1da177e4 727
33dd94ae
NE
728 /*
729 * If do_exit is called because this processes oopsed, it's possible
730 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
731 * continuing. Amongst other possible reasons, this is to prevent
732 * mm_release()->clear_child_tid() from writing to a user-controlled
733 * kernel address.
734 */
fe814175 735 force_uaccess_begin();
33dd94ae 736
586b58ca
JH
737 if (unlikely(in_atomic())) {
738 pr_info("note: %s[%d] exited with preempt_count %d\n",
739 current->comm, task_pid_nr(current),
740 preempt_count());
741 preempt_count_set(PREEMPT_ENABLED);
742 }
743
744 profile_task_exit(tsk);
745 kcov_task_exit(tsk);
746
a288eecc 747 ptrace_event(PTRACE_EVENT_EXIT, code);
1da177e4 748
e0e81739
DH
749 validate_creds_for_do_exit(tsk);
750
df164db5
AN
751 /*
752 * We're taking recursive faults here in do_exit. Safest is to just
753 * leave this task alone and wait for reboot.
754 */
755 if (unlikely(tsk->flags & PF_EXITING)) {
a0be55de 756 pr_alert("Fixing recursive fault but reboot is needed!\n");
18f69438 757 futex_exit_recursive(tsk);
df164db5
AN
758 set_current_state(TASK_UNINTERRUPTIBLE);
759 schedule();
760 }
761
d12619b5 762 exit_signals(tsk); /* sets PF_EXITING */
1da177e4 763
48d212a2
LT
764 /* sync mm's RSS info before statistics gathering */
765 if (tsk->mm)
766 sync_mm_rss(tsk->mm);
51229b49 767 acct_update_integrals(tsk);
1da177e4 768 group_dead = atomic_dec_and_test(&tsk->signal->live);
c3068951 769 if (group_dead) {
43cf75d9 770 /*
771 * If the last thread of global init has exited, panic
772 * immediately to get a useable coredump.
773 */
774 if (unlikely(is_global_init(tsk)))
775 panic("Attempted to kill init! exitcode=0x%08x\n",
776 tsk->signal->group_exit_code ?: (int)code);
777
baa73d9e 778#ifdef CONFIG_POSIX_TIMERS
778e9a9c 779 hrtimer_cancel(&tsk->signal->real_timer);
25f407f0 780 exit_itimers(tsk->signal);
baa73d9e 781#endif
1f10206c
JP
782 if (tsk->mm)
783 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm);
c3068951 784 }
f6ec29a4 785 acct_collect(code, group_dead);
522ed776
MT
786 if (group_dead)
787 tty_audit_exit();
a4ff8dba 788 audit_free(tsk);
115085ea 789
48d212a2 790 tsk->exit_code = code;
115085ea 791 taskstats_exit(tsk, group_dead);
c757249a 792
0039962a 793 exit_mm();
1da177e4 794
0e464814 795 if (group_dead)
f6ec29a4 796 acct_process();
0a16b607
MD
797 trace_sched_process_exit(tsk);
798
1da177e4 799 exit_sem(tsk);
b34a6b1d 800 exit_shm(tsk);
1ec7f1dd
AV
801 exit_files(tsk);
802 exit_fs(tsk);
c39df5fa
ON
803 if (group_dead)
804 disassociate_ctty(1);
8aac6270 805 exit_task_namespaces(tsk);
ed3e694d 806 exit_task_work(tsk);
e6464694 807 exit_thread(tsk);
0b3fcf17
SE
808
809 /*
810 * Flush inherited counters to the parent - before the parent
811 * gets woken up by child-exit notifications.
812 *
813 * because of cgroup mode, must be called before cgroup_exit()
814 */
815 perf_event_exit_task(tsk);
816
8e5bfa8c 817 sched_autogroup_exit_task(tsk);
1ec41830 818 cgroup_exit(tsk);
1da177e4 819
24f1e32c
FW
820 /*
821 * FIXME: do that only when needed, using sched_exit tracepoint
822 */
7c8df286 823 flush_ptrace_hw_breakpoint(tsk);
33b2fb30 824
ccdd29ff 825 exit_tasks_rcu_start();
821c7de7 826 exit_notify(tsk, group_dead);
ef982393 827 proc_exit_connector(tsk);
c11600e4 828 mpol_put_task_policy(tsk);
42b2dd0a 829#ifdef CONFIG_FUTEX
c87e2837
IM
830 if (unlikely(current->pi_state_cache))
831 kfree(current->pi_state_cache);
42b2dd0a 832#endif
de5097c2 833 /*
9a11b49a 834 * Make sure we are holding no locks:
de5097c2 835 */
1b1d2fb4 836 debug_check_no_locks_held();
1da177e4 837
afc847b7 838 if (tsk->io_context)
b69f2292 839 exit_io_context(tsk);
afc847b7 840
b92ce558 841 if (tsk->splice_pipe)
4b8a8f1e 842 free_pipe_info(tsk->splice_pipe);
b92ce558 843
5640f768
ED
844 if (tsk->task_frag.page)
845 put_page(tsk->task_frag.page);
846
e0e81739
DH
847 validate_creds_for_do_exit(tsk);
848
4bcb8232 849 check_stack_usage();
7407251a 850 preempt_disable();
54848d73
WF
851 if (tsk->nr_dirtied)
852 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
f41d911f 853 exit_rcu();
ccdd29ff 854 exit_tasks_rcu_finish();
b5740f4b 855
b09be676 856 lockdep_free_task(tsk);
9af6528e 857 do_task_dead();
1da177e4 858}
012914da
RA
859EXPORT_SYMBOL_GPL(do_exit);
860
9402c95f 861void complete_and_exit(struct completion *comp, long code)
1da177e4
LT
862{
863 if (comp)
864 complete(comp);
55a101f8 865
1da177e4
LT
866 do_exit(code);
867}
1da177e4
LT
868EXPORT_SYMBOL(complete_and_exit);
869
754fe8d2 870SYSCALL_DEFINE1(exit, int, error_code)
1da177e4
LT
871{
872 do_exit((error_code&0xff)<<8);
873}
874
1da177e4
LT
875/*
876 * Take down every thread in the group. This is called by fatal signals
877 * as well as by sys_exit_group (below).
878 */
9402c95f 879void
1da177e4
LT
880do_group_exit(int exit_code)
881{
bfc4b089
ON
882 struct signal_struct *sig = current->signal;
883
1da177e4
LT
884 BUG_ON(exit_code & 0x80); /* core dumps don't get here */
885
bfc4b089
ON
886 if (signal_group_exit(sig))
887 exit_code = sig->group_exit_code;
1da177e4 888 else if (!thread_group_empty(current)) {
1da177e4 889 struct sighand_struct *const sighand = current->sighand;
a0be55de 890
1da177e4 891 spin_lock_irq(&sighand->siglock);
ed5d2cac 892 if (signal_group_exit(sig))
1da177e4
LT
893 /* Another thread got here before we took the lock. */
894 exit_code = sig->group_exit_code;
895 else {
1da177e4 896 sig->group_exit_code = exit_code;
ed5d2cac 897 sig->flags = SIGNAL_GROUP_EXIT;
1da177e4
LT
898 zap_other_threads(current);
899 }
900 spin_unlock_irq(&sighand->siglock);
1da177e4
LT
901 }
902
903 do_exit(exit_code);
904 /* NOTREACHED */
905}
906
907/*
908 * this kills every thread in the thread group. Note that any externally
909 * wait4()-ing process will get the correct exit code - even if this
910 * thread is not the thread group leader.
911 */
754fe8d2 912SYSCALL_DEFINE1(exit_group, int, error_code)
1da177e4
LT
913{
914 do_group_exit((error_code & 0xff) << 8);
2ed7c03e
HC
915 /* NOTREACHED */
916 return 0;
1da177e4
LT
917}
918
67d7ddde
AV
919struct waitid_info {
920 pid_t pid;
921 uid_t uid;
922 int status;
923 int cause;
924};
925
9e8ae01d
ON
926struct wait_opts {
927 enum pid_type wo_type;
9e8ae01d 928 int wo_flags;
e1eb1ebc 929 struct pid *wo_pid;
9e8ae01d 930
67d7ddde 931 struct waitid_info *wo_info;
359566fa 932 int wo_stat;
ce72a16f 933 struct rusage *wo_rusage;
9e8ae01d 934
ac6424b9 935 wait_queue_entry_t child_wait;
9e8ae01d
ON
936 int notask_error;
937};
938
989264f4 939static int eligible_pid(struct wait_opts *wo, struct task_struct *p)
1da177e4 940{
5c01ba49
ON
941 return wo->wo_type == PIDTYPE_MAX ||
942 task_pid_type(p, wo->wo_type) == wo->wo_pid;
943}
1da177e4 944
bf959931
ON
945static int
946eligible_child(struct wait_opts *wo, bool ptrace, struct task_struct *p)
5c01ba49
ON
947{
948 if (!eligible_pid(wo, p))
949 return 0;
bf959931
ON
950
951 /*
952 * Wait for all children (clone and not) if __WALL is set or
953 * if it is traced by us.
954 */
955 if (ptrace || (wo->wo_flags & __WALL))
956 return 1;
957
958 /*
959 * Otherwise, wait for clone children *only* if __WCLONE is set;
960 * otherwise, wait for non-clone children *only*.
961 *
962 * Note: a "clone" child here is one that reports to its parent
963 * using a signal other than SIGCHLD, or a non-leader thread which
964 * we can only see if it is traced by us.
965 */
966 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE))
1da177e4 967 return 0;
1da177e4 968
14dd0b81 969 return 1;
1da177e4
LT
970}
971
1da177e4
LT
972/*
973 * Handle sys_wait4 work for one task in state EXIT_ZOMBIE. We hold
974 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
975 * the lock and this task is uninteresting. If we return nonzero, we have
976 * released the lock and the system call should return.
977 */
9e8ae01d 978static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1da177e4 979{
67d7ddde 980 int state, status;
6c5f3e7b 981 pid_t pid = task_pid_vnr(p);
43e13cc1 982 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
67d7ddde 983 struct waitid_info *infop;
1da177e4 984
9e8ae01d 985 if (!likely(wo->wo_flags & WEXITED))
98abed02
RM
986 return 0;
987
9e8ae01d 988 if (unlikely(wo->wo_flags & WNOWAIT)) {
76d9871e 989 status = p->exit_code;
1da177e4
LT
990 get_task_struct(p);
991 read_unlock(&tasklist_lock);
1029a2b5 992 sched_annotate_sleep();
e61a2502
AV
993 if (wo->wo_rusage)
994 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
bb380ec3 995 put_task_struct(p);
76d9871e 996 goto out_info;
1da177e4 997 }
1da177e4 998 /*
abd50b39 999 * Move the task's state to DEAD/TRACE, only one thread can do this.
1da177e4 1000 */
f6507f83
ON
1001 state = (ptrace_reparented(p) && thread_group_leader(p)) ?
1002 EXIT_TRACE : EXIT_DEAD;
abd50b39 1003 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE)
1da177e4 1004 return 0;
986094df
ON
1005 /*
1006 * We own this thread, nobody else can reap it.
1007 */
1008 read_unlock(&tasklist_lock);
1009 sched_annotate_sleep();
f6507f83 1010
befca967 1011 /*
f6507f83 1012 * Check thread_group_leader() to exclude the traced sub-threads.
befca967 1013 */
f6507f83 1014 if (state == EXIT_DEAD && thread_group_leader(p)) {
f953ccd0
ON
1015 struct signal_struct *sig = p->signal;
1016 struct signal_struct *psig = current->signal;
1f10206c 1017 unsigned long maxrss;
5613fda9 1018 u64 tgutime, tgstime;
3795e161 1019
1da177e4
LT
1020 /*
1021 * The resource counters for the group leader are in its
1022 * own task_struct. Those for dead threads in the group
1023 * are in its signal_struct, as are those for the child
1024 * processes it has previously reaped. All these
1025 * accumulate in the parent's signal_struct c* fields.
1026 *
1027 * We don't bother to take a lock here to protect these
f953ccd0
ON
1028 * p->signal fields because the whole thread group is dead
1029 * and nobody can change them.
1030 *
1031 * psig->stats_lock also protects us from our sub-theads
1032 * which can reap other children at the same time. Until
1033 * we change k_getrusage()-like users to rely on this lock
1034 * we have to take ->siglock as well.
0cf55e1e 1035 *
a0be55de
IA
1036 * We use thread_group_cputime_adjusted() to get times for
1037 * the thread group, which consolidates times for all threads
1038 * in the group including the group leader.
1da177e4 1039 */
e80d0a1a 1040 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
f953ccd0 1041 spin_lock_irq(&current->sighand->siglock);
e78c3496 1042 write_seqlock(&psig->stats_lock);
64861634
MS
1043 psig->cutime += tgutime + sig->cutime;
1044 psig->cstime += tgstime + sig->cstime;
6fac4829 1045 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime;
3795e161
JJ
1046 psig->cmin_flt +=
1047 p->min_flt + sig->min_flt + sig->cmin_flt;
1048 psig->cmaj_flt +=
1049 p->maj_flt + sig->maj_flt + sig->cmaj_flt;
1050 psig->cnvcsw +=
1051 p->nvcsw + sig->nvcsw + sig->cnvcsw;
1052 psig->cnivcsw +=
1053 p->nivcsw + sig->nivcsw + sig->cnivcsw;
6eaeeaba
ED
1054 psig->cinblock +=
1055 task_io_get_inblock(p) +
1056 sig->inblock + sig->cinblock;
1057 psig->coublock +=
1058 task_io_get_oublock(p) +
1059 sig->oublock + sig->coublock;
1f10206c
JP
1060 maxrss = max(sig->maxrss, sig->cmaxrss);
1061 if (psig->cmaxrss < maxrss)
1062 psig->cmaxrss = maxrss;
5995477a
AR
1063 task_io_accounting_add(&psig->ioac, &p->ioac);
1064 task_io_accounting_add(&psig->ioac, &sig->ioac);
e78c3496 1065 write_sequnlock(&psig->stats_lock);
f953ccd0 1066 spin_unlock_irq(&current->sighand->siglock);
1da177e4
LT
1067 }
1068
ce72a16f
AV
1069 if (wo->wo_rusage)
1070 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
1da177e4
LT
1071 status = (p->signal->flags & SIGNAL_GROUP_EXIT)
1072 ? p->signal->group_exit_code : p->exit_code;
359566fa 1073 wo->wo_stat = status;
2f4e6e2a 1074
b4360690 1075 if (state == EXIT_TRACE) {
1da177e4 1076 write_lock_irq(&tasklist_lock);
2f4e6e2a
ON
1077 /* We dropped tasklist, ptracer could die and untrace */
1078 ptrace_unlink(p);
b4360690
ON
1079
1080 /* If parent wants a zombie, don't release it now */
1081 state = EXIT_ZOMBIE;
1082 if (do_notify_parent(p, p->exit_signal))
1083 state = EXIT_DEAD;
abd50b39 1084 p->exit_state = state;
1da177e4
LT
1085 write_unlock_irq(&tasklist_lock);
1086 }
abd50b39 1087 if (state == EXIT_DEAD)
1da177e4 1088 release_task(p);
2f4e6e2a 1089
76d9871e
AV
1090out_info:
1091 infop = wo->wo_info;
1092 if (infop) {
1093 if ((status & 0x7f) == 0) {
1094 infop->cause = CLD_EXITED;
1095 infop->status = status >> 8;
1096 } else {
1097 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED;
1098 infop->status = status & 0x7f;
1099 }
1100 infop->pid = pid;
1101 infop->uid = uid;
1102 }
1103
67d7ddde 1104 return pid;
1da177e4
LT
1105}
1106
90bc8d8b
ON
1107static int *task_stopped_code(struct task_struct *p, bool ptrace)
1108{
1109 if (ptrace) {
570ac933 1110 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING))
90bc8d8b
ON
1111 return &p->exit_code;
1112 } else {
1113 if (p->signal->flags & SIGNAL_STOP_STOPPED)
1114 return &p->signal->group_exit_code;
1115 }
1116 return NULL;
1117}
1118
19e27463
TH
1119/**
1120 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1121 * @wo: wait options
1122 * @ptrace: is the wait for ptrace
1123 * @p: task to wait for
1124 *
1125 * Handle sys_wait4() work for %p in state %TASK_STOPPED or %TASK_TRACED.
1126 *
1127 * CONTEXT:
1128 * read_lock(&tasklist_lock), which is released if return value is
1129 * non-zero. Also, grabs and releases @p->sighand->siglock.
1130 *
1131 * RETURNS:
1132 * 0 if wait condition didn't exist and search for other wait conditions
1133 * should continue. Non-zero return, -errno on failure and @p's pid on
1134 * success, implies that tasklist_lock is released and wait condition
1135 * search should terminate.
1da177e4 1136 */
9e8ae01d
ON
1137static int wait_task_stopped(struct wait_opts *wo,
1138 int ptrace, struct task_struct *p)
1da177e4 1139{
67d7ddde
AV
1140 struct waitid_info *infop;
1141 int exit_code, *p_code, why;
ee7c82da 1142 uid_t uid = 0; /* unneeded, required by compiler */
c8950783 1143 pid_t pid;
1da177e4 1144
47918025
ON
1145 /*
1146 * Traditionally we see ptrace'd stopped tasks regardless of options.
1147 */
9e8ae01d 1148 if (!ptrace && !(wo->wo_flags & WUNTRACED))
98abed02
RM
1149 return 0;
1150
19e27463
TH
1151 if (!task_stopped_code(p, ptrace))
1152 return 0;
1153
ee7c82da
ON
1154 exit_code = 0;
1155 spin_lock_irq(&p->sighand->siglock);
1156
90bc8d8b
ON
1157 p_code = task_stopped_code(p, ptrace);
1158 if (unlikely(!p_code))
ee7c82da
ON
1159 goto unlock_sig;
1160
90bc8d8b 1161 exit_code = *p_code;
ee7c82da
ON
1162 if (!exit_code)
1163 goto unlock_sig;
1164
9e8ae01d 1165 if (!unlikely(wo->wo_flags & WNOWAIT))
90bc8d8b 1166 *p_code = 0;
ee7c82da 1167
8ca937a6 1168 uid = from_kuid_munged(current_user_ns(), task_uid(p));
ee7c82da
ON
1169unlock_sig:
1170 spin_unlock_irq(&p->sighand->siglock);
1171 if (!exit_code)
1da177e4
LT
1172 return 0;
1173
1174 /*
1175 * Now we are pretty sure this task is interesting.
1176 * Make sure it doesn't get reaped out from under us while we
1177 * give up the lock and then examine it below. We don't want to
1178 * keep holding onto the tasklist_lock while we call getrusage and
1179 * possibly take page faults for user memory.
1180 */
1181 get_task_struct(p);
6c5f3e7b 1182 pid = task_pid_vnr(p);
f470021a 1183 why = ptrace ? CLD_TRAPPED : CLD_STOPPED;
1da177e4 1184 read_unlock(&tasklist_lock);
1029a2b5 1185 sched_annotate_sleep();
e61a2502
AV
1186 if (wo->wo_rusage)
1187 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
bb380ec3 1188 put_task_struct(p);
1da177e4 1189
bb380ec3
AV
1190 if (likely(!(wo->wo_flags & WNOWAIT)))
1191 wo->wo_stat = (exit_code << 8) | 0x7f;
1da177e4 1192
9e8ae01d 1193 infop = wo->wo_info;
67d7ddde
AV
1194 if (infop) {
1195 infop->cause = why;
1196 infop->status = exit_code;
1197 infop->pid = pid;
1198 infop->uid = uid;
1199 }
67d7ddde 1200 return pid;
1da177e4
LT
1201}
1202
1203/*
1204 * Handle do_wait work for one task in a live, non-stopped state.
1205 * read_lock(&tasklist_lock) on entry. If we return zero, we still hold
1206 * the lock and this task is uninteresting. If we return nonzero, we have
1207 * released the lock and the system call should return.
1208 */
9e8ae01d 1209static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1da177e4 1210{
bb380ec3 1211 struct waitid_info *infop;
1da177e4
LT
1212 pid_t pid;
1213 uid_t uid;
1214
9e8ae01d 1215 if (!unlikely(wo->wo_flags & WCONTINUED))
98abed02
RM
1216 return 0;
1217
1da177e4
LT
1218 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1219 return 0;
1220
1221 spin_lock_irq(&p->sighand->siglock);
1222 /* Re-check with the lock held. */
1223 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) {
1224 spin_unlock_irq(&p->sighand->siglock);
1225 return 0;
1226 }
9e8ae01d 1227 if (!unlikely(wo->wo_flags & WNOWAIT))
1da177e4 1228 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
8ca937a6 1229 uid = from_kuid_munged(current_user_ns(), task_uid(p));
1da177e4
LT
1230 spin_unlock_irq(&p->sighand->siglock);
1231
6c5f3e7b 1232 pid = task_pid_vnr(p);
1da177e4
LT
1233 get_task_struct(p);
1234 read_unlock(&tasklist_lock);
1029a2b5 1235 sched_annotate_sleep();
e61a2502
AV
1236 if (wo->wo_rusage)
1237 getrusage(p, RUSAGE_BOTH, wo->wo_rusage);
bb380ec3 1238 put_task_struct(p);
1da177e4 1239
bb380ec3
AV
1240 infop = wo->wo_info;
1241 if (!infop) {
359566fa 1242 wo->wo_stat = 0xffff;
1da177e4 1243 } else {
bb380ec3
AV
1244 infop->cause = CLD_CONTINUED;
1245 infop->pid = pid;
1246 infop->uid = uid;
1247 infop->status = SIGCONT;
1da177e4 1248 }
bb380ec3 1249 return pid;
1da177e4
LT
1250}
1251
98abed02
RM
1252/*
1253 * Consider @p for a wait by @parent.
1254 *
9e8ae01d 1255 * -ECHILD should be in ->notask_error before the first call.
98abed02
RM
1256 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1257 * Returns zero if the search for a child should continue;
9e8ae01d 1258 * then ->notask_error is 0 if @p is an eligible child,
3a2f5a59 1259 * or still -ECHILD.
98abed02 1260 */
b6e763f0
ON
1261static int wait_consider_task(struct wait_opts *wo, int ptrace,
1262 struct task_struct *p)
98abed02 1263{
3245d6ac
ON
1264 /*
1265 * We can race with wait_task_zombie() from another thread.
1266 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
1267 * can't confuse the checks below.
1268 */
6aa7de05 1269 int exit_state = READ_ONCE(p->exit_state);
b3ab0316
ON
1270 int ret;
1271
3245d6ac 1272 if (unlikely(exit_state == EXIT_DEAD))
b3ab0316
ON
1273 return 0;
1274
bf959931 1275 ret = eligible_child(wo, ptrace, p);
14dd0b81 1276 if (!ret)
98abed02
RM
1277 return ret;
1278
3245d6ac 1279 if (unlikely(exit_state == EXIT_TRACE)) {
50b8d257 1280 /*
abd50b39
ON
1281 * ptrace == 0 means we are the natural parent. In this case
1282 * we should clear notask_error, debugger will notify us.
50b8d257 1283 */
abd50b39 1284 if (likely(!ptrace))
50b8d257 1285 wo->notask_error = 0;
823b018e 1286 return 0;
50b8d257 1287 }
823b018e 1288
377d75da
ON
1289 if (likely(!ptrace) && unlikely(p->ptrace)) {
1290 /*
1291 * If it is traced by its real parent's group, just pretend
1292 * the caller is ptrace_do_wait() and reap this child if it
1293 * is zombie.
1294 *
1295 * This also hides group stop state from real parent; otherwise
1296 * a single stop can be reported twice as group and ptrace stop.
1297 * If a ptracer wants to distinguish these two events for its
1298 * own children it should create a separate process which takes
1299 * the role of real parent.
1300 */
1301 if (!ptrace_reparented(p))
1302 ptrace = 1;
1303 }
1304
45cb24a1 1305 /* slay zombie? */
3245d6ac 1306 if (exit_state == EXIT_ZOMBIE) {
9b84cca2 1307 /* we don't reap group leaders with subthreads */
7c733eb3
ON
1308 if (!delay_group_leader(p)) {
1309 /*
1310 * A zombie ptracee is only visible to its ptracer.
1311 * Notification and reaping will be cascaded to the
1312 * real parent when the ptracer detaches.
1313 */
1314 if (unlikely(ptrace) || likely(!p->ptrace))
1315 return wait_task_zombie(wo, p);
1316 }
98abed02 1317
f470021a 1318 /*
9b84cca2
TH
1319 * Allow access to stopped/continued state via zombie by
1320 * falling through. Clearing of notask_error is complex.
1321 *
1322 * When !@ptrace:
1323 *
1324 * If WEXITED is set, notask_error should naturally be
1325 * cleared. If not, subset of WSTOPPED|WCONTINUED is set,
1326 * so, if there are live subthreads, there are events to
1327 * wait for. If all subthreads are dead, it's still safe
1328 * to clear - this function will be called again in finite
1329 * amount time once all the subthreads are released and
1330 * will then return without clearing.
1331 *
1332 * When @ptrace:
1333 *
1334 * Stopped state is per-task and thus can't change once the
1335 * target task dies. Only continued and exited can happen.
1336 * Clear notask_error if WCONTINUED | WEXITED.
1337 */
1338 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED)))
1339 wo->notask_error = 0;
1340 } else {
1341 /*
1342 * @p is alive and it's gonna stop, continue or exit, so
1343 * there always is something to wait for.
f470021a 1344 */
9e8ae01d 1345 wo->notask_error = 0;
f470021a
RM
1346 }
1347
98abed02 1348 /*
45cb24a1
TH
1349 * Wait for stopped. Depending on @ptrace, different stopped state
1350 * is used and the two don't interact with each other.
98abed02 1351 */
19e27463
TH
1352 ret = wait_task_stopped(wo, ptrace, p);
1353 if (ret)
1354 return ret;
98abed02
RM
1355
1356 /*
45cb24a1
TH
1357 * Wait for continued. There's only one continued state and the
1358 * ptracer can consume it which can confuse the real parent. Don't
1359 * use WCONTINUED from ptracer. You don't need or want it.
98abed02 1360 */
9e8ae01d 1361 return wait_task_continued(wo, p);
98abed02
RM
1362}
1363
1364/*
1365 * Do the work of do_wait() for one thread in the group, @tsk.
1366 *
9e8ae01d 1367 * -ECHILD should be in ->notask_error before the first call.
98abed02
RM
1368 * Returns nonzero for a final return, when we have unlocked tasklist_lock.
1369 * Returns zero if the search for a child should continue; then
9e8ae01d 1370 * ->notask_error is 0 if there were any eligible children,
3a2f5a59 1371 * or still -ECHILD.
98abed02 1372 */
9e8ae01d 1373static int do_wait_thread(struct wait_opts *wo, struct task_struct *tsk)
98abed02
RM
1374{
1375 struct task_struct *p;
1376
1377 list_for_each_entry(p, &tsk->children, sibling) {
9cd80bbb 1378 int ret = wait_consider_task(wo, 0, p);
a0be55de 1379
9cd80bbb
ON
1380 if (ret)
1381 return ret;
98abed02
RM
1382 }
1383
1384 return 0;
1385}
1386
9e8ae01d 1387static int ptrace_do_wait(struct wait_opts *wo, struct task_struct *tsk)
98abed02
RM
1388{
1389 struct task_struct *p;
1390
f470021a 1391 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) {
b6e763f0 1392 int ret = wait_consider_task(wo, 1, p);
a0be55de 1393
f470021a 1394 if (ret)
98abed02 1395 return ret;
98abed02
RM
1396 }
1397
1398 return 0;
1399}
1400
ac6424b9 1401static int child_wait_callback(wait_queue_entry_t *wait, unsigned mode,
0b7570e7
ON
1402 int sync, void *key)
1403{
1404 struct wait_opts *wo = container_of(wait, struct wait_opts,
1405 child_wait);
1406 struct task_struct *p = key;
1407
5c01ba49 1408 if (!eligible_pid(wo, p))
0b7570e7
ON
1409 return 0;
1410
b4fe5182
ON
1411 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent)
1412 return 0;
1413
0b7570e7
ON
1414 return default_wake_function(wait, mode, sync, key);
1415}
1416
a7f0765e
ON
1417void __wake_up_parent(struct task_struct *p, struct task_struct *parent)
1418{
0b7570e7 1419 __wake_up_sync_key(&parent->signal->wait_chldexit,
ce4dd442 1420 TASK_INTERRUPTIBLE, p);
a7f0765e
ON
1421}
1422
9e8ae01d 1423static long do_wait(struct wait_opts *wo)
1da177e4 1424{
1da177e4 1425 struct task_struct *tsk;
98abed02 1426 int retval;
1da177e4 1427
9e8ae01d 1428 trace_sched_process_wait(wo->wo_pid);
0a16b607 1429
0b7570e7
ON
1430 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback);
1431 wo->child_wait.private = current;
1432 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1da177e4 1433repeat:
98abed02 1434 /*
3da56d16 1435 * If there is nothing that can match our criteria, just get out.
9e8ae01d
ON
1436 * We will clear ->notask_error to zero if we see any child that
1437 * might later match our criteria, even if we are not able to reap
1438 * it yet.
98abed02 1439 */
64a16caf 1440 wo->notask_error = -ECHILD;
9e8ae01d 1441 if ((wo->wo_type < PIDTYPE_MAX) &&
1722c14a 1442 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type)))
64a16caf 1443 goto notask;
161550d7 1444
f95d39d1 1445 set_current_state(TASK_INTERRUPTIBLE);
1da177e4
LT
1446 read_lock(&tasklist_lock);
1447 tsk = current;
1448 do {
64a16caf
ON
1449 retval = do_wait_thread(wo, tsk);
1450 if (retval)
1451 goto end;
9e8ae01d 1452
64a16caf
ON
1453 retval = ptrace_do_wait(wo, tsk);
1454 if (retval)
98abed02 1455 goto end;
98abed02 1456
9e8ae01d 1457 if (wo->wo_flags & __WNOTHREAD)
1da177e4 1458 break;
a3f6dfb7 1459 } while_each_thread(current, tsk);
1da177e4 1460 read_unlock(&tasklist_lock);
f2cc3eb1 1461
64a16caf 1462notask:
9e8ae01d
ON
1463 retval = wo->notask_error;
1464 if (!retval && !(wo->wo_flags & WNOHANG)) {
1da177e4 1465 retval = -ERESTARTSYS;
98abed02
RM
1466 if (!signal_pending(current)) {
1467 schedule();
1468 goto repeat;
1469 }
1da177e4 1470 }
1da177e4 1471end:
f95d39d1 1472 __set_current_state(TASK_RUNNING);
0b7570e7 1473 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait);
1da177e4
LT
1474 return retval;
1475}
1476
67d7ddde 1477static long kernel_waitid(int which, pid_t upid, struct waitid_info *infop,
ce72a16f 1478 int options, struct rusage *ru)
1da177e4 1479{
9e8ae01d 1480 struct wait_opts wo;
161550d7
EB
1481 struct pid *pid = NULL;
1482 enum pid_type type;
1da177e4 1483 long ret;
ba7d25f3 1484 unsigned int f_flags = 0;
1da177e4 1485
91c4e8ea
ON
1486 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED|
1487 __WNOTHREAD|__WCLONE|__WALL))
1da177e4
LT
1488 return -EINVAL;
1489 if (!(options & (WEXITED|WSTOPPED|WCONTINUED)))
1490 return -EINVAL;
1491
1492 switch (which) {
1493 case P_ALL:
161550d7 1494 type = PIDTYPE_MAX;
1da177e4
LT
1495 break;
1496 case P_PID:
161550d7
EB
1497 type = PIDTYPE_PID;
1498 if (upid <= 0)
1da177e4 1499 return -EINVAL;
3695eae5
CB
1500
1501 pid = find_get_pid(upid);
1da177e4
LT
1502 break;
1503 case P_PGID:
161550d7 1504 type = PIDTYPE_PGID;
821cc7b0 1505 if (upid < 0)
1da177e4 1506 return -EINVAL;
3695eae5 1507
821cc7b0
EB
1508 if (upid)
1509 pid = find_get_pid(upid);
1510 else
1511 pid = get_task_pid(current, PIDTYPE_PGID);
3695eae5
CB
1512 break;
1513 case P_PIDFD:
1514 type = PIDTYPE_PID;
1515 if (upid < 0)
1da177e4 1516 return -EINVAL;
3695eae5 1517
ba7d25f3 1518 pid = pidfd_get_pid(upid, &f_flags);
3695eae5
CB
1519 if (IS_ERR(pid))
1520 return PTR_ERR(pid);
ba7d25f3 1521
1da177e4
LT
1522 break;
1523 default:
1524 return -EINVAL;
1525 }
1526
9e8ae01d
ON
1527 wo.wo_type = type;
1528 wo.wo_pid = pid;
1529 wo.wo_flags = options;
1530 wo.wo_info = infop;
9e8ae01d 1531 wo.wo_rusage = ru;
ba7d25f3
CB
1532 if (f_flags & O_NONBLOCK)
1533 wo.wo_flags |= WNOHANG;
1534
9e8ae01d 1535 ret = do_wait(&wo);
ba7d25f3
CB
1536 if (!ret && !(options & WNOHANG) && (f_flags & O_NONBLOCK))
1537 ret = -EAGAIN;
dfe16dfa 1538
161550d7 1539 put_pid(pid);
1da177e4
LT
1540 return ret;
1541}
1542
ce72a16f
AV
1543SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *,
1544 infop, int, options, struct rusage __user *, ru)
1545{
1546 struct rusage r;
67d7ddde
AV
1547 struct waitid_info info = {.status = 0};
1548 long err = kernel_waitid(which, upid, &info, options, ru ? &r : NULL);
634a8160 1549 int signo = 0;
6c85501f 1550
634a8160
AV
1551 if (err > 0) {
1552 signo = SIGCHLD;
1553 err = 0;
ce72a16f
AV
1554 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1555 return -EFAULT;
1556 }
67d7ddde
AV
1557 if (!infop)
1558 return err;
1559
41cd7805 1560 if (!user_write_access_begin(infop, sizeof(*infop)))
1c9fec47 1561 return -EFAULT;
96ca579a 1562
634a8160 1563 unsafe_put_user(signo, &infop->si_signo, Efault);
4c48abe9 1564 unsafe_put_user(0, &infop->si_errno, Efault);
cc731525 1565 unsafe_put_user(info.cause, &infop->si_code, Efault);
4c48abe9
AV
1566 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1567 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1568 unsafe_put_user(info.status, &infop->si_status, Efault);
41cd7805 1569 user_write_access_end();
ce72a16f 1570 return err;
4c48abe9 1571Efault:
41cd7805 1572 user_write_access_end();
4c48abe9 1573 return -EFAULT;
ce72a16f
AV
1574}
1575
92ebce5a
AV
1576long kernel_wait4(pid_t upid, int __user *stat_addr, int options,
1577 struct rusage *ru)
1da177e4 1578{
9e8ae01d 1579 struct wait_opts wo;
161550d7
EB
1580 struct pid *pid = NULL;
1581 enum pid_type type;
1da177e4
LT
1582 long ret;
1583
1584 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1585 __WNOTHREAD|__WCLONE|__WALL))
1586 return -EINVAL;
161550d7 1587
dd83c161 1588 /* -INT_MIN is not defined */
1589 if (upid == INT_MIN)
1590 return -ESRCH;
1591
161550d7
EB
1592 if (upid == -1)
1593 type = PIDTYPE_MAX;
1594 else if (upid < 0) {
1595 type = PIDTYPE_PGID;
1596 pid = find_get_pid(-upid);
1597 } else if (upid == 0) {
1598 type = PIDTYPE_PGID;
2ae448ef 1599 pid = get_task_pid(current, PIDTYPE_PGID);
161550d7
EB
1600 } else /* upid > 0 */ {
1601 type = PIDTYPE_PID;
1602 pid = find_get_pid(upid);
1603 }
1604
9e8ae01d
ON
1605 wo.wo_type = type;
1606 wo.wo_pid = pid;
1607 wo.wo_flags = options | WEXITED;
1608 wo.wo_info = NULL;
359566fa 1609 wo.wo_stat = 0;
9e8ae01d
ON
1610 wo.wo_rusage = ru;
1611 ret = do_wait(&wo);
161550d7 1612 put_pid(pid);
359566fa
AV
1613 if (ret > 0 && stat_addr && put_user(wo.wo_stat, stat_addr))
1614 ret = -EFAULT;
1da177e4 1615
1da177e4
LT
1616 return ret;
1617}
1618
8043fc14
CH
1619int kernel_wait(pid_t pid, int *stat)
1620{
1621 struct wait_opts wo = {
1622 .wo_type = PIDTYPE_PID,
1623 .wo_pid = find_get_pid(pid),
1624 .wo_flags = WEXITED,
1625 };
1626 int ret;
1627
1628 ret = do_wait(&wo);
1629 if (ret > 0 && wo.wo_stat)
1630 *stat = wo.wo_stat;
1631 put_pid(wo.wo_pid);
1632 return ret;
1633}
1634
ce72a16f
AV
1635SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
1636 int, options, struct rusage __user *, ru)
1637{
1638 struct rusage r;
1639 long err = kernel_wait4(upid, stat_addr, options, ru ? &r : NULL);
1640
1641 if (err > 0) {
1642 if (ru && copy_to_user(ru, &r, sizeof(struct rusage)))
1643 return -EFAULT;
1644 }
1645 return err;
1646}
1647
1da177e4
LT
1648#ifdef __ARCH_WANT_SYS_WAITPID
1649
1650/*
1651 * sys_waitpid() remains for compatibility. waitpid() should be
1652 * implemented by calling sys_wait4() from libc.a.
1653 */
17da2bd9 1654SYSCALL_DEFINE3(waitpid, pid_t, pid, int __user *, stat_addr, int, options)
1da177e4 1655{
d300b610 1656 return kernel_wait4(pid, stat_addr, options, NULL);
1da177e4
LT
1657}
1658
1659#endif
7e95a225
AV
1660
1661#ifdef CONFIG_COMPAT
1662COMPAT_SYSCALL_DEFINE4(wait4,
1663 compat_pid_t, pid,
1664 compat_uint_t __user *, stat_addr,
1665 int, options,
1666 struct compat_rusage __user *, ru)
1667{
ce72a16f
AV
1668 struct rusage r;
1669 long err = kernel_wait4(pid, stat_addr, options, ru ? &r : NULL);
1670 if (err > 0) {
1671 if (ru && put_compat_rusage(&r, ru))
1672 return -EFAULT;
7e95a225 1673 }
ce72a16f 1674 return err;
7e95a225
AV
1675}
1676
1677COMPAT_SYSCALL_DEFINE5(waitid,
1678 int, which, compat_pid_t, pid,
1679 struct compat_siginfo __user *, infop, int, options,
1680 struct compat_rusage __user *, uru)
1681{
7e95a225 1682 struct rusage ru;
67d7ddde
AV
1683 struct waitid_info info = {.status = 0};
1684 long err = kernel_waitid(which, pid, &info, options, uru ? &ru : NULL);
634a8160
AV
1685 int signo = 0;
1686 if (err > 0) {
1687 signo = SIGCHLD;
1688 err = 0;
6c85501f
AV
1689 if (uru) {
1690 /* kernel_waitid() overwrites everything in ru */
1691 if (COMPAT_USE_64BIT_TIME)
1692 err = copy_to_user(uru, &ru, sizeof(ru));
1693 else
1694 err = put_compat_rusage(&ru, uru);
1695 if (err)
1696 return -EFAULT;
1697 }
7e95a225
AV
1698 }
1699
4c48abe9
AV
1700 if (!infop)
1701 return err;
1702
41cd7805 1703 if (!user_write_access_begin(infop, sizeof(*infop)))
1c9fec47 1704 return -EFAULT;
96ca579a 1705
634a8160 1706 unsafe_put_user(signo, &infop->si_signo, Efault);
4c48abe9 1707 unsafe_put_user(0, &infop->si_errno, Efault);
cc731525 1708 unsafe_put_user(info.cause, &infop->si_code, Efault);
4c48abe9
AV
1709 unsafe_put_user(info.pid, &infop->si_pid, Efault);
1710 unsafe_put_user(info.uid, &infop->si_uid, Efault);
1711 unsafe_put_user(info.status, &infop->si_status, Efault);
41cd7805 1712 user_write_access_end();
67d7ddde 1713 return err;
4c48abe9 1714Efault:
41cd7805 1715 user_write_access_end();
4c48abe9 1716 return -EFAULT;
7e95a225
AV
1717}
1718#endif
7c2c11b2 1719
38fd525a
EB
1720/**
1721 * thread_group_exited - check that a thread group has exited
1722 * @pid: tgid of thread group to be checked.
1723 *
1724 * Test if the thread group represented by tgid has exited (all
1725 * threads are zombies, dead or completely gone).
1726 *
1727 * Return: true if the thread group has exited. false otherwise.
1728 */
1729bool thread_group_exited(struct pid *pid)
1730{
1731 struct task_struct *task;
1732 bool exited;
1733
1734 rcu_read_lock();
1735 task = pid_task(pid, PIDTYPE_PID);
1736 exited = !task ||
1737 (READ_ONCE(task->exit_state) && thread_group_empty(task));
1738 rcu_read_unlock();
1739
1740 return exited;
1741}
1742EXPORT_SYMBOL(thread_group_exited);
1743
7c2c11b2
SM
1744__weak void abort(void)
1745{
1746 BUG();
1747
1748 /* if that doesn't kill us, halt */
1749 panic("Oops failed to kill thread");
1750}
dc8635b7 1751EXPORT_SYMBOL(abort);