fs: move dcache sysctls to its own file
[linux-2.6-block.git] / kernel / exit.c
index f975cd8a2ed821302d904a82814a72f01f610ff9..b00a25bb4ab939fb54e29785f9365c01c1447080 100644 (file)
@@ -116,7 +116,7 @@ static void __exit_signal(struct task_struct *tsk)
                 * then notify it:
                 */
                if (sig->notify_count > 0 && !--sig->notify_count)
-                       wake_up_process(sig->group_exit_task);
+                       wake_up_process(sig->group_exec_task);
 
                if (tsk == sig->curr_target)
                        sig->curr_target = next_thread(tsk);
@@ -697,7 +697,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
 
        /* mt-exec, de_thread() is waiting for group leader */
        if (unlikely(tsk->signal->notify_count < 0))
-               wake_up_process(tsk->signal->group_exit_task);
+               wake_up_process(tsk->signal->group_exec_task);
        write_unlock_irq(&tasklist_lock);
 
        list_for_each_entry_safe(p, n, &dead, ptrace_entry) {
@@ -737,7 +737,20 @@ void __noreturn do_exit(long code)
 
        WARN_ON(blk_needs_flush_plug(tsk));
 
-       profile_task_exit(tsk);
+       /*
+        * If do_dead is called because this processes oopsed, it's possible
+        * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
+        * continuing. Amongst other possible reasons, this is to prevent
+        * mm_release()->clear_child_tid() from writing to a user-controlled
+        * kernel address.
+        *
+        * On uptodate architectures force_uaccess_begin is a noop.  On
+        * architectures that still have set_fs/get_fs in addition to handling
+        * oopses handles kernel threads that run as set_fs(KERNEL_DS) by
+        * default.
+        */
+       force_uaccess_begin();
+
        kcov_task_exit(tsk);
 
        coredump_task_exit(tsk);
@@ -843,7 +856,6 @@ void __noreturn do_exit(long code)
        lockdep_free_task(tsk);
        do_task_dead();
 }
-EXPORT_SYMBOL_GPL(do_exit);
 
 void __noreturn make_task_dead(int signr)
 {
@@ -863,15 +875,6 @@ void __noreturn make_task_dead(int signr)
        if (unlikely(!tsk->pid))
                panic("Attempted to kill the idle task!");
 
-       /*
-        * If make_task_dead is called because this processes oopsed, it's possible
-        * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
-        * continuing. Amongst other possible reasons, this is to prevent
-        * mm_release()->clear_child_tid() from writing to a user-controlled
-        * kernel address.
-        */
-       force_uaccess_begin();
-
        if (unlikely(in_atomic())) {
                pr_info("note: %s[%d] exited with preempt_count %d\n",
                        current->comm, task_pid_nr(current),
@@ -886,21 +889,14 @@ void __noreturn make_task_dead(int signr)
        if (unlikely(tsk->flags & PF_EXITING)) {
                pr_alert("Fixing recursive fault but reboot is needed!\n");
                futex_exit_recursive(tsk);
+               tsk->exit_state = EXIT_DEAD;
+               refcount_inc(&tsk->rcu_users);
                do_task_dead();
        }
 
        do_exit(signr);
 }
 
-void complete_and_exit(struct completion *comp, long code)
-{
-       if (comp)
-               complete(comp);
-
-       do_exit(code);
-}
-EXPORT_SYMBOL(complete_and_exit);
-
 SYSCALL_DEFINE1(exit, int, error_code)
 {
        do_exit((error_code&0xff)<<8);
@@ -915,17 +911,19 @@ do_group_exit(int exit_code)
 {
        struct signal_struct *sig = current->signal;
 
-       BUG_ON(exit_code & 0x80); /* core dumps don't get here */
-
-       if (signal_group_exit(sig))
+       if (sig->flags & SIGNAL_GROUP_EXIT)
                exit_code = sig->group_exit_code;
+       else if (sig->group_exec_task)
+               exit_code = 0;
        else if (!thread_group_empty(current)) {
                struct sighand_struct *const sighand = current->sighand;
 
                spin_lock_irq(&sighand->siglock);
-               if (signal_group_exit(sig))
+               if (sig->flags & SIGNAL_GROUP_EXIT)
                        /* Another thread got here before we took the lock.  */
                        exit_code = sig->group_exit_code;
+               else if (sig->group_exec_task)
+                       exit_code = 0;
                else {
                        sig->group_exit_code = exit_code;
                        sig->flags = SIGNAL_GROUP_EXIT;
@@ -1020,7 +1018,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
                return 0;
 
        if (unlikely(wo->wo_flags & WNOWAIT)) {
-               status = p->exit_code;
+               status = (p->signal->flags & SIGNAL_GROUP_EXIT)
+                       ? p->signal->group_exit_code : p->exit_code;
                get_task_struct(p);
                read_unlock(&tasklist_lock);
                sched_annotate_sleep();