Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/exit.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | */ | |
6 | ||
1da177e4 LT |
7 | #include <linux/mm.h> |
8 | #include <linux/slab.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/smp_lock.h> | |
11 | #include <linux/module.h> | |
c59ede7b | 12 | #include <linux/capability.h> |
1da177e4 LT |
13 | #include <linux/completion.h> |
14 | #include <linux/personality.h> | |
15 | #include <linux/tty.h> | |
16 | #include <linux/namespace.h> | |
17 | #include <linux/key.h> | |
18 | #include <linux/security.h> | |
19 | #include <linux/cpu.h> | |
20 | #include <linux/acct.h> | |
21 | #include <linux/file.h> | |
22 | #include <linux/binfmts.h> | |
23 | #include <linux/ptrace.h> | |
24 | #include <linux/profile.h> | |
25 | #include <linux/mount.h> | |
26 | #include <linux/proc_fs.h> | |
27 | #include <linux/mempolicy.h> | |
c757249a | 28 | #include <linux/taskstats_kern.h> |
ca74e92b | 29 | #include <linux/delayacct.h> |
1da177e4 LT |
30 | #include <linux/cpuset.h> |
31 | #include <linux/syscalls.h> | |
7ed20e1a | 32 | #include <linux/signal.h> |
6a14c5c9 | 33 | #include <linux/posix-timers.h> |
9f46080c | 34 | #include <linux/cn_proc.h> |
de5097c2 | 35 | #include <linux/mutex.h> |
0771dfef | 36 | #include <linux/futex.h> |
34f192c6 | 37 | #include <linux/compat.h> |
b92ce558 | 38 | #include <linux/pipe_fs_i.h> |
fa84cb93 | 39 | #include <linux/audit.h> /* for audit_free() */ |
83cc5ed3 | 40 | #include <linux/resource.h> |
0d67a46d | 41 | #include <linux/blkdev.h> |
1da177e4 LT |
42 | |
43 | #include <asm/uaccess.h> | |
44 | #include <asm/unistd.h> | |
45 | #include <asm/pgtable.h> | |
46 | #include <asm/mmu_context.h> | |
47 | ||
48 | extern void sem_exit (void); | |
49 | extern struct task_struct *child_reaper; | |
50 | ||
408b664a AB |
51 | static void exit_mm(struct task_struct * tsk); |
52 | ||
1da177e4 LT |
53 | static void __unhash_process(struct task_struct *p) |
54 | { | |
55 | nr_threads--; | |
56 | detach_pid(p, PIDTYPE_PID); | |
1da177e4 LT |
57 | if (thread_group_leader(p)) { |
58 | detach_pid(p, PIDTYPE_PGID); | |
59 | detach_pid(p, PIDTYPE_SID); | |
c97d9893 | 60 | |
5e85d4ab | 61 | list_del_rcu(&p->tasks); |
73b9ebfe | 62 | __get_cpu_var(process_counts)--; |
1da177e4 | 63 | } |
47e65328 | 64 | list_del_rcu(&p->thread_group); |
c97d9893 | 65 | remove_parent(p); |
1da177e4 LT |
66 | } |
67 | ||
6a14c5c9 ON |
68 | /* |
69 | * This function expects the tasklist_lock write-locked. | |
70 | */ | |
71 | static void __exit_signal(struct task_struct *tsk) | |
72 | { | |
73 | struct signal_struct *sig = tsk->signal; | |
74 | struct sighand_struct *sighand; | |
75 | ||
76 | BUG_ON(!sig); | |
77 | BUG_ON(!atomic_read(&sig->count)); | |
78 | ||
79 | rcu_read_lock(); | |
80 | sighand = rcu_dereference(tsk->sighand); | |
81 | spin_lock(&sighand->siglock); | |
82 | ||
83 | posix_cpu_timers_exit(tsk); | |
84 | if (atomic_dec_and_test(&sig->count)) | |
85 | posix_cpu_timers_exit_group(tsk); | |
86 | else { | |
87 | /* | |
88 | * If there is any task waiting for the group exit | |
89 | * then notify it: | |
90 | */ | |
91 | if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) { | |
92 | wake_up_process(sig->group_exit_task); | |
93 | sig->group_exit_task = NULL; | |
94 | } | |
95 | if (tsk == sig->curr_target) | |
96 | sig->curr_target = next_thread(tsk); | |
97 | /* | |
98 | * Accumulate here the counters for all threads but the | |
99 | * group leader as they die, so they can be added into | |
100 | * the process-wide totals when those are taken. | |
101 | * The group leader stays around as a zombie as long | |
102 | * as there are other threads. When it gets reaped, | |
103 | * the exit.c code will add its counts into these totals. | |
104 | * We won't ever get here for the group leader, since it | |
105 | * will have been the last reference on the signal_struct. | |
106 | */ | |
107 | sig->utime = cputime_add(sig->utime, tsk->utime); | |
108 | sig->stime = cputime_add(sig->stime, tsk->stime); | |
109 | sig->min_flt += tsk->min_flt; | |
110 | sig->maj_flt += tsk->maj_flt; | |
111 | sig->nvcsw += tsk->nvcsw; | |
112 | sig->nivcsw += tsk->nivcsw; | |
113 | sig->sched_time += tsk->sched_time; | |
114 | sig = NULL; /* Marker for below. */ | |
115 | } | |
116 | ||
5876700c ON |
117 | __unhash_process(tsk); |
118 | ||
6a14c5c9 | 119 | tsk->signal = NULL; |
a7e5328a | 120 | tsk->sighand = NULL; |
6a14c5c9 ON |
121 | spin_unlock(&sighand->siglock); |
122 | rcu_read_unlock(); | |
123 | ||
a7e5328a | 124 | __cleanup_sighand(sighand); |
6a14c5c9 ON |
125 | clear_tsk_thread_flag(tsk,TIF_SIGPENDING); |
126 | flush_sigqueue(&tsk->pending); | |
127 | if (sig) { | |
128 | flush_sigqueue(&sig->shared_pending); | |
129 | __cleanup_signal(sig); | |
130 | } | |
131 | } | |
132 | ||
8c7904a0 EB |
133 | static void delayed_put_task_struct(struct rcu_head *rhp) |
134 | { | |
135 | put_task_struct(container_of(rhp, struct task_struct, rcu)); | |
136 | } | |
137 | ||
1da177e4 LT |
138 | void release_task(struct task_struct * p) |
139 | { | |
36c8b586 | 140 | struct task_struct *leader; |
1da177e4 | 141 | int zap_leader; |
1f09f974 | 142 | repeat: |
1da177e4 | 143 | atomic_dec(&p->user->processes); |
1da177e4 | 144 | write_lock_irq(&tasklist_lock); |
1f09f974 | 145 | ptrace_unlink(p); |
1da177e4 LT |
146 | BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children)); |
147 | __exit_signal(p); | |
35f5cad8 | 148 | |
1da177e4 LT |
149 | /* |
150 | * If we are the last non-leader member of the thread | |
151 | * group, and the leader is zombie, then notify the | |
152 | * group leader's parent process. (if it wants notification.) | |
153 | */ | |
154 | zap_leader = 0; | |
155 | leader = p->group_leader; | |
156 | if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { | |
157 | BUG_ON(leader->exit_signal == -1); | |
158 | do_notify_parent(leader, leader->exit_signal); | |
159 | /* | |
160 | * If we were the last child thread and the leader has | |
161 | * exited already, and the leader's parent ignores SIGCHLD, | |
162 | * then we are the one who should release the leader. | |
163 | * | |
164 | * do_notify_parent() will have marked it self-reaping in | |
165 | * that case. | |
166 | */ | |
167 | zap_leader = (leader->exit_signal == -1); | |
168 | } | |
169 | ||
170 | sched_exit(p); | |
171 | write_unlock_irq(&tasklist_lock); | |
48e6484d | 172 | proc_flush_task(p); |
1da177e4 | 173 | release_thread(p); |
8c7904a0 | 174 | call_rcu(&p->rcu, delayed_put_task_struct); |
1da177e4 LT |
175 | |
176 | p = leader; | |
177 | if (unlikely(zap_leader)) | |
178 | goto repeat; | |
179 | } | |
180 | ||
1da177e4 LT |
181 | /* |
182 | * This checks not only the pgrp, but falls back on the pid if no | |
183 | * satisfactory pgrp is found. I dunno - gdb doesn't work correctly | |
184 | * without this... | |
185 | */ | |
186 | int session_of_pgrp(int pgrp) | |
187 | { | |
188 | struct task_struct *p; | |
189 | int sid = -1; | |
190 | ||
191 | read_lock(&tasklist_lock); | |
192 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
193 | if (p->signal->session > 0) { | |
194 | sid = p->signal->session; | |
195 | goto out; | |
196 | } | |
197 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
198 | p = find_task_by_pid(pgrp); | |
199 | if (p) | |
200 | sid = p->signal->session; | |
201 | out: | |
202 | read_unlock(&tasklist_lock); | |
203 | ||
204 | return sid; | |
205 | } | |
206 | ||
207 | /* | |
208 | * Determine if a process group is "orphaned", according to the POSIX | |
209 | * definition in 2.2.2.52. Orphaned process groups are not to be affected | |
210 | * by terminal-generated stop signals. Newly orphaned process groups are | |
211 | * to receive a SIGHUP and a SIGCONT. | |
212 | * | |
213 | * "I ask you, have you ever known what it is to be an orphan?" | |
214 | */ | |
36c8b586 | 215 | static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) |
1da177e4 LT |
216 | { |
217 | struct task_struct *p; | |
218 | int ret = 1; | |
219 | ||
220 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
221 | if (p == ignored_task | |
222 | || p->exit_state | |
f400e198 | 223 | || is_init(p->real_parent)) |
1da177e4 LT |
224 | continue; |
225 | if (process_group(p->real_parent) != pgrp | |
226 | && p->real_parent->signal->session == p->signal->session) { | |
227 | ret = 0; | |
228 | break; | |
229 | } | |
230 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
231 | return ret; /* (sighing) "Often!" */ | |
232 | } | |
233 | ||
234 | int is_orphaned_pgrp(int pgrp) | |
235 | { | |
236 | int retval; | |
237 | ||
238 | read_lock(&tasklist_lock); | |
239 | retval = will_become_orphaned_pgrp(pgrp, NULL); | |
240 | read_unlock(&tasklist_lock); | |
241 | ||
242 | return retval; | |
243 | } | |
244 | ||
858119e1 | 245 | static int has_stopped_jobs(int pgrp) |
1da177e4 LT |
246 | { |
247 | int retval = 0; | |
248 | struct task_struct *p; | |
249 | ||
250 | do_each_task_pid(pgrp, PIDTYPE_PGID, p) { | |
251 | if (p->state != TASK_STOPPED) | |
252 | continue; | |
1da177e4 LT |
253 | retval = 1; |
254 | break; | |
255 | } while_each_task_pid(pgrp, PIDTYPE_PGID, p); | |
256 | return retval; | |
257 | } | |
258 | ||
259 | /** | |
4dc3b16b | 260 | * reparent_to_init - Reparent the calling kernel thread to the init task. |
1da177e4 LT |
261 | * |
262 | * If a kernel thread is launched as a result of a system call, or if | |
263 | * it ever exits, it should generally reparent itself to init so that | |
264 | * it is correctly cleaned up on exit. | |
265 | * | |
266 | * The various task state such as scheduling policy and priority may have | |
267 | * been inherited from a user process, so we reset them to sane values here. | |
268 | * | |
269 | * NOTE that reparent_to_init() gives the caller full capabilities. | |
270 | */ | |
858119e1 | 271 | static void reparent_to_init(void) |
1da177e4 LT |
272 | { |
273 | write_lock_irq(&tasklist_lock); | |
274 | ||
275 | ptrace_unlink(current); | |
276 | /* Reparent to init */ | |
9b678ece | 277 | remove_parent(current); |
1da177e4 LT |
278 | current->parent = child_reaper; |
279 | current->real_parent = child_reaper; | |
9b678ece | 280 | add_parent(current); |
1da177e4 LT |
281 | |
282 | /* Set the exit signal to SIGCHLD so we signal init on exit */ | |
283 | current->exit_signal = SIGCHLD; | |
284 | ||
1c573afe | 285 | if (!has_rt_policy(current) && (task_nice(current) < 0)) |
1da177e4 LT |
286 | set_user_nice(current, 0); |
287 | /* cpus_allowed? */ | |
288 | /* rt_priority? */ | |
289 | /* signals? */ | |
290 | security_task_reparent_to_init(current); | |
291 | memcpy(current->signal->rlim, init_task.signal->rlim, | |
292 | sizeof(current->signal->rlim)); | |
293 | atomic_inc(&(INIT_USER->__count)); | |
294 | write_unlock_irq(&tasklist_lock); | |
295 | switch_uid(INIT_USER); | |
296 | } | |
297 | ||
298 | void __set_special_pids(pid_t session, pid_t pgrp) | |
299 | { | |
e19f247a | 300 | struct task_struct *curr = current->group_leader; |
1da177e4 LT |
301 | |
302 | if (curr->signal->session != session) { | |
303 | detach_pid(curr, PIDTYPE_SID); | |
304 | curr->signal->session = session; | |
305 | attach_pid(curr, PIDTYPE_SID, session); | |
306 | } | |
307 | if (process_group(curr) != pgrp) { | |
308 | detach_pid(curr, PIDTYPE_PGID); | |
309 | curr->signal->pgrp = pgrp; | |
310 | attach_pid(curr, PIDTYPE_PGID, pgrp); | |
311 | } | |
312 | } | |
313 | ||
314 | void set_special_pids(pid_t session, pid_t pgrp) | |
315 | { | |
316 | write_lock_irq(&tasklist_lock); | |
317 | __set_special_pids(session, pgrp); | |
318 | write_unlock_irq(&tasklist_lock); | |
319 | } | |
320 | ||
321 | /* | |
322 | * Let kernel threads use this to say that they | |
323 | * allow a certain signal (since daemonize() will | |
324 | * have disabled all of them by default). | |
325 | */ | |
326 | int allow_signal(int sig) | |
327 | { | |
7ed20e1a | 328 | if (!valid_signal(sig) || sig < 1) |
1da177e4 LT |
329 | return -EINVAL; |
330 | ||
331 | spin_lock_irq(¤t->sighand->siglock); | |
332 | sigdelset(¤t->blocked, sig); | |
333 | if (!current->mm) { | |
334 | /* Kernel threads handle their own signals. | |
335 | Let the signal code know it'll be handled, so | |
336 | that they don't get converted to SIGKILL or | |
337 | just silently dropped */ | |
338 | current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; | |
339 | } | |
340 | recalc_sigpending(); | |
341 | spin_unlock_irq(¤t->sighand->siglock); | |
342 | return 0; | |
343 | } | |
344 | ||
345 | EXPORT_SYMBOL(allow_signal); | |
346 | ||
347 | int disallow_signal(int sig) | |
348 | { | |
7ed20e1a | 349 | if (!valid_signal(sig) || sig < 1) |
1da177e4 LT |
350 | return -EINVAL; |
351 | ||
352 | spin_lock_irq(¤t->sighand->siglock); | |
353 | sigaddset(¤t->blocked, sig); | |
354 | recalc_sigpending(); | |
355 | spin_unlock_irq(¤t->sighand->siglock); | |
356 | return 0; | |
357 | } | |
358 | ||
359 | EXPORT_SYMBOL(disallow_signal); | |
360 | ||
361 | /* | |
362 | * Put all the gunge required to become a kernel thread without | |
363 | * attached user resources in one place where it belongs. | |
364 | */ | |
365 | ||
366 | void daemonize(const char *name, ...) | |
367 | { | |
368 | va_list args; | |
369 | struct fs_struct *fs; | |
370 | sigset_t blocked; | |
371 | ||
372 | va_start(args, name); | |
373 | vsnprintf(current->comm, sizeof(current->comm), name, args); | |
374 | va_end(args); | |
375 | ||
376 | /* | |
377 | * If we were started as result of loading a module, close all of the | |
378 | * user space pages. We don't need them, and if we didn't close them | |
379 | * they would be locked into memory. | |
380 | */ | |
381 | exit_mm(current); | |
382 | ||
383 | set_special_pids(1, 1); | |
70522e12 | 384 | mutex_lock(&tty_mutex); |
1da177e4 | 385 | current->signal->tty = NULL; |
70522e12 | 386 | mutex_unlock(&tty_mutex); |
1da177e4 LT |
387 | |
388 | /* Block and flush all signals */ | |
389 | sigfillset(&blocked); | |
390 | sigprocmask(SIG_BLOCK, &blocked, NULL); | |
391 | flush_signals(current); | |
392 | ||
393 | /* Become as one with the init task */ | |
394 | ||
395 | exit_fs(current); /* current->fs->count--; */ | |
396 | fs = init_task.fs; | |
397 | current->fs = fs; | |
398 | atomic_inc(&fs->count); | |