[PATCH] kill SET_LINKS/REMOVE_LINKS
[linux-2.6-block.git] / kernel / exit.c
CommitLineData
1da177e4
LT
1/*
2 * linux/kernel/exit.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7#include <linux/config.h>
8#include <linux/mm.h>
9#include <linux/slab.h>
10#include <linux/interrupt.h>
11#include <linux/smp_lock.h>
12#include <linux/module.h>
c59ede7b 13#include <linux/capability.h>
1da177e4
LT
14#include <linux/completion.h>
15#include <linux/personality.h>
16#include <linux/tty.h>
17#include <linux/namespace.h>
18#include <linux/key.h>
19#include <linux/security.h>
20#include <linux/cpu.h>
21#include <linux/acct.h>
22#include <linux/file.h>
23#include <linux/binfmts.h>
24#include <linux/ptrace.h>
25#include <linux/profile.h>
26#include <linux/mount.h>
27#include <linux/proc_fs.h>
28#include <linux/mempolicy.h>
29#include <linux/cpuset.h>
30#include <linux/syscalls.h>
7ed20e1a 31#include <linux/signal.h>
9f46080c 32#include <linux/cn_proc.h>
de5097c2 33#include <linux/mutex.h>
0771dfef 34#include <linux/futex.h>
34f192c6 35#include <linux/compat.h>
1da177e4
LT
36
37#include <asm/uaccess.h>
38#include <asm/unistd.h>
39#include <asm/pgtable.h>
40#include <asm/mmu_context.h>
41
42extern void sem_exit (void);
43extern struct task_struct *child_reaper;
44
45int getrusage(struct task_struct *, int, struct rusage __user *);
46
408b664a
AB
47static void exit_mm(struct task_struct * tsk);
48
1da177e4
LT
49static void __unhash_process(struct task_struct *p)
50{
51 nr_threads--;
52 detach_pid(p, PIDTYPE_PID);
53 detach_pid(p, PIDTYPE_TGID);
54 if (thread_group_leader(p)) {
55 detach_pid(p, PIDTYPE_PGID);
56 detach_pid(p, PIDTYPE_SID);
c97d9893
ON
57
58 list_del_init(&p->tasks);
1da177e4
LT
59 if (p->pid)
60 __get_cpu_var(process_counts)--;
61 }
62
c97d9893 63 remove_parent(p);
1da177e4
LT
64}
65
66void release_task(struct task_struct * p)
67{
68 int zap_leader;
69 task_t *leader;
70 struct dentry *proc_dentry;
71
72repeat:
73 atomic_dec(&p->user->processes);
74 spin_lock(&p->proc_lock);
75 proc_dentry = proc_pid_unhash(p);
76 write_lock_irq(&tasklist_lock);
77 if (unlikely(p->ptrace))
78 __ptrace_unlink(p);
79 BUG_ON(!list_empty(&p->ptrace_list) || !list_empty(&p->ptrace_children));
80 __exit_signal(p);
71a2224d
CL
81 /*
82 * Note that the fastpath in sys_times depends on __exit_signal having
83 * updated the counters before a task is removed from the tasklist of
84 * the process by __unhash_process.
85 */
1da177e4
LT
86 __unhash_process(p);
87
88 /*
89 * If we are the last non-leader member of the thread
90 * group, and the leader is zombie, then notify the
91 * group leader's parent process. (if it wants notification.)
92 */
93 zap_leader = 0;
94 leader = p->group_leader;
95 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
96 BUG_ON(leader->exit_signal == -1);
97 do_notify_parent(leader, leader->exit_signal);
98 /*
99 * If we were the last child thread and the leader has
100 * exited already, and the leader's parent ignores SIGCHLD,
101 * then we are the one who should release the leader.
102 *
103 * do_notify_parent() will have marked it self-reaping in
104 * that case.
105 */
106 zap_leader = (leader->exit_signal == -1);
107 }
108
109 sched_exit(p);
110 write_unlock_irq(&tasklist_lock);
111 spin_unlock(&p->proc_lock);
112 proc_pid_flush(proc_dentry);
113 release_thread(p);
114 put_task_struct(p);
115
116 p = leader;
117 if (unlikely(zap_leader))
118 goto repeat;
119}
120
121/* we are using it only for SMP init */
122
123void unhash_process(struct task_struct *p)
124{
125 struct dentry *proc_dentry;
126
127 spin_lock(&p->proc_lock);
128 proc_dentry = proc_pid_unhash(p);
129 write_lock_irq(&tasklist_lock);
130 __unhash_process(p);
131 write_unlock_irq(&tasklist_lock);
132 spin_unlock(&p->proc_lock);
133 proc_pid_flush(proc_dentry);
134}
135
136/*
137 * This checks not only the pgrp, but falls back on the pid if no
138 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
139 * without this...
140 */
141int session_of_pgrp(int pgrp)
142{
143 struct task_struct *p;
144 int sid = -1;
145
146 read_lock(&tasklist_lock);
147 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
148 if (p->signal->session > 0) {
149 sid = p->signal->session;
150 goto out;
151 }
152 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
153 p = find_task_by_pid(pgrp);
154 if (p)
155 sid = p->signal->session;
156out:
157 read_unlock(&tasklist_lock);
158
159 return sid;
160}
161
162/*
163 * Determine if a process group is "orphaned", according to the POSIX
164 * definition in 2.2.2.52. Orphaned process groups are not to be affected
165 * by terminal-generated stop signals. Newly orphaned process groups are
166 * to receive a SIGHUP and a SIGCONT.
167 *
168 * "I ask you, have you ever known what it is to be an orphan?"
169 */
170static int will_become_orphaned_pgrp(int pgrp, task_t *ignored_task)
171{
172 struct task_struct *p;
173 int ret = 1;
174
175 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
176 if (p == ignored_task
177 || p->exit_state
178 || p->real_parent->pid == 1)
179 continue;
180 if (process_group(p->real_parent) != pgrp
181 && p->real_parent->signal->session == p->signal->session) {
182 ret = 0;
183 break;
184 }
185 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
186 return ret; /* (sighing) "Often!" */
187}
188
189int is_orphaned_pgrp(int pgrp)
190{
191 int retval;
192
193 read_lock(&tasklist_lock);
194 retval = will_become_orphaned_pgrp(pgrp, NULL);
195 read_unlock(&tasklist_lock);
196
197 return retval;
198}
199
858119e1 200static int has_stopped_jobs(int pgrp)
1da177e4
LT
201{
202 int retval = 0;
203 struct task_struct *p;
204
205 do_each_task_pid(pgrp, PIDTYPE_PGID, p) {
206 if (p->state != TASK_STOPPED)
207 continue;
208
209 /* If p is stopped by a debugger on a signal that won't
210 stop it, then don't count p as stopped. This isn't
211 perfect but it's a good approximation. */
212 if (unlikely (p->ptrace)
213 && p->exit_code != SIGSTOP
214 && p->exit_code != SIGTSTP
215 && p->exit_code != SIGTTOU
216 && p->exit_code != SIGTTIN)
217 continue;
218
219 retval = 1;
220 break;
221 } while_each_task_pid(pgrp, PIDTYPE_PGID, p);
222 return retval;
223}
224
225/**
4dc3b16b 226 * reparent_to_init - Reparent the calling kernel thread to the init task.
1da177e4
LT
227 *
228 * If a kernel thread is launched as a result of a system call, or if
229 * it ever exits, it should generally reparent itself to init so that
230 * it is correctly cleaned up on exit.
231 *
232 * The various task state such as scheduling policy and priority may have
233 * been inherited from a user process, so we reset them to sane values here.
234 *
235 * NOTE that reparent_to_init() gives the caller full capabilities.
236 */
858119e1 237static void reparent_to_init(void)
1da177e4
LT
238{
239 write_lock_irq(&tasklist_lock);
240
241 ptrace_unlink(current);
242 /* Reparent to init */
9b678ece 243 remove_parent(current);
1da177e4
LT
244 current->parent = child_reaper;
245 current->real_parent = child_reaper;
9b678ece 246 add_parent(current);
1da177e4
LT
247
248 /* Set the exit signal to SIGCHLD so we signal init on exit */
249 current->exit_signal = SIGCHLD;
250
b0a9499c
IM
251 if ((current->policy == SCHED_NORMAL ||
252 current->policy == SCHED_BATCH)
253 && (task_nice(current) < 0))
1da177e4
LT
254 set_user_nice(current, 0);
255 /* cpus_allowed? */
256 /* rt_priority? */
257 /* signals? */
258 security_task_reparent_to_init(current);
259 memcpy(current->signal->rlim, init_task.signal->rlim,
260 sizeof(current->signal->rlim));
261 atomic_inc(&(INIT_USER->__count));
262 write_unlock_irq(&tasklist_lock);
263 switch_uid(INIT_USER);
264}
265
266void __set_special_pids(pid_t session, pid_t pgrp)
267{
e19f247a 268 struct task_struct *curr = current->group_leader;
1da177e4
LT
269
270 if (curr->signal->session != session) {
271 detach_pid(curr, PIDTYPE_SID);
272 curr->signal->session = session;
273 attach_pid(curr, PIDTYPE_SID, session);
274 }
275 if (process_group(curr) != pgrp) {
276 detach_pid(curr, PIDTYPE_PGID);
277 curr->signal->pgrp = pgrp;
278 attach_pid(curr, PIDTYPE_PGID, pgrp);
279 }
280}
281
282void set_special_pids(pid_t session, pid_t pgrp)
283{
284 write_lock_irq(&tasklist_lock);
285 __set_special_pids(session, pgrp);
286 write_unlock_irq(&tasklist_lock);
287}
288
289/*
290 * Let kernel threads use this to say that they
291 * allow a certain signal (since daemonize() will
292 * have disabled all of them by default).
293 */
294int allow_signal(int sig)
295{
7ed20e1a 296 if (!valid_signal(sig) || sig < 1)
1da177e4
LT
297 return -EINVAL;
298
299 spin_lock_irq(&current->sighand->siglock);
300 sigdelset(&current->blocked, sig);
301 if (!current->mm) {
302 /* Kernel threads handle their own signals.
303 Let the signal code know it'll be handled, so
304 that they don't get converted to SIGKILL or
305 just silently dropped */
306 current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
307 }
308 recalc_sigpending();
309 spin_unlock_irq(&current->sighand->siglock);
310 return 0;
311}
312
313EXPORT_SYMBOL(allow_signal);
314
315int disallow_signal(int sig)
316{
7ed20e1a 317 if (!valid_signal(sig) || sig < 1)
1da177e4
LT
318 return -EINVAL;
319
320 spin_lock_irq(&current->sighand->siglock);
321 sigaddset(&current->blocked, sig);
322 recalc_sigpending();
323 spin_unlock_irq(&current->sighand->siglock);
324 return 0;
325}
326
327EXPORT_SYMBOL(disallow_signal);
328
329/*
330 * Put all the gunge required to become a kernel thread without
331 * attached user resources in one place where it belongs.
332 */
333
334void daemonize(const char *name, ...)
335{
336 va_list args;
337 struct fs_struct *fs;
338 sigset_t blocked;
339
340 va_start(args, name);
341 vsnprintf(current->comm, sizeof(current->comm), name, args);
342 va_end(args);
343
344 /*
345 * If we were started as result of loading a module, close all of the
346 * user space pages. We don't need them, and if we didn't close them
347 * they would be locked into memory.
348 */
349 exit_mm(current);
350
351 set_special_pids(1, 1);
70522e12 352 mutex_lock(&tty_mutex);
1da177e4 353 current->signal->tty = NULL;
70522e12 354 mutex_unlock(&tty_mutex);
1da177e4
LT
355
356 /* Block and flush all signals */
357 sigfillset(&blocked);
358 sigprocmask(SIG_BLOCK, &blocked, NULL);
359 flush_signals(current);
360
361 /* Become as one with the init task */
362
363 exit_fs(current); /* current->fs->count--; */
364 fs = init_task.fs;
365 current->fs = fs;
366 atomic_inc(&fs->count);