coredump: ensure all coredumping tasks have SIGNAL_GROUP_COREDUMP
authorOleg Nesterov <oleg@redhat.com>
Sat, 7 Nov 2015 00:32:31 +0000 (16:32 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 7 Nov 2015 01:50:42 +0000 (17:50 -0800)
task_will_free_mem() is wrong in many ways, and in particular the
SIGNAL_GROUP_COREDUMP check is not reliable: a task can participate in the
coredumping without SIGNAL_GROUP_COREDUMP bit set.

change zap_threads() paths to always set SIGNAL_GROUP_COREDUMP even if
other CLONE_VM processes can't react to SIGKILL.  Fortunately, at least
oom-kill case if fine; it kills all tasks sharing the same mm, so it
should also kill the process which actually dumps the core.

The change in prepare_signal() is not strictly necessary, it just ensures
that the patch does not bring another subtle behavioural change.  But it
reminds us that this SIGNAL_GROUP_EXIT/COREDUMP case needs more changes.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Kyle Walker <kwalker@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Stanislav Kozina <skozina@redhat.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/coredump.c
kernel/signal.c

index a8f75640ac86ec2d29cd55a253b2c3c12c7bac9b..c66bb0572265c1490adbb48c6cf671dd711db517 100644 (file)
@@ -280,11 +280,13 @@ out:
        return ispipe;
 }
 
-static int zap_process(struct task_struct *start, int exit_code)
+static int zap_process(struct task_struct *start, int exit_code, int flags)
 {
        struct task_struct *t;
        int nr = 0;
 
+       /* ignore all signals except SIGKILL, see prepare_signal() */
+       start->signal->flags = SIGNAL_GROUP_COREDUMP | flags;
        start->signal->group_exit_code = exit_code;
        start->signal->group_stop_count = 0;
 
@@ -311,10 +313,8 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        spin_lock_irq(&tsk->sighand->siglock);
        if (!signal_group_exit(tsk->signal)) {
                mm->core_state = core_state;
-               nr = zap_process(tsk, exit_code);
                tsk->signal->group_exit_task = tsk;
-               /* ignore all signals except SIGKILL, see prepare_signal() */
-               tsk->signal->flags = SIGNAL_GROUP_COREDUMP;
+               nr = zap_process(tsk, exit_code, 0);
                clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
        }
        spin_unlock_irq(&tsk->sighand->siglock);
@@ -365,8 +365,8 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
                        if (p->mm) {
                                if (unlikely(p->mm == mm)) {
                                        lock_task_sighand(p, &flags);
-                                       nr += zap_process(p, exit_code);
-                                       p->signal->flags = SIGNAL_GROUP_EXIT;
+                                       nr += zap_process(p, exit_code,
+                                                         SIGNAL_GROUP_EXIT);
                                        unlock_task_sighand(p, &flags);
                                }
                                break;
index f2cbd4ed5cd48403c910906194c834ad183f4c2f..c0b01fe24bbd3fae555b2e479e86f9199f3a7c3b 100644 (file)
@@ -788,7 +788,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
        sigset_t flush;
 
        if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
-               if (signal->flags & SIGNAL_GROUP_COREDUMP)
+               if (!(signal->flags & SIGNAL_GROUP_EXIT))
                        return sig == SIGKILL;
                /*
                 * The process is in the middle of dying, nothing to do.