smb3: do not error on fsync when readonly
[linux-block.git] / fs / coredump.c
index 3224dee44d30ed3206ec3881545a540d6c59db56..a6b3c196cdef54e476a78a4ce09be9655e020d17 100644 (file)
@@ -359,7 +359,7 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
 
        for_each_thread(start, t) {
                task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
-               if (t != current && t->mm) {
+               if (t != current && !(t->flags & PF_POSTCOREDUMP)) {
                        sigaddset(&t->pending.signal, SIGKILL);
                        signal_wake_up(t, 1);
                        nr++;
@@ -369,99 +369,34 @@ static int zap_process(struct task_struct *start, int exit_code, int flags)
        return nr;
 }
 
-static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
+static int zap_threads(struct task_struct *tsk,
                        struct core_state *core_state, int exit_code)
 {
-       struct task_struct *g, *p;
-       unsigned long flags;
        int nr = -EAGAIN;
 
        spin_lock_irq(&tsk->sighand->siglock);
        if (!signal_group_exit(tsk->signal)) {
-               mm->core_state = core_state;
+               tsk->signal->core_state = core_state;
                tsk->signal->group_exit_task = tsk;
                nr = zap_process(tsk, exit_code, 0);
                clear_tsk_thread_flag(tsk, TIF_SIGPENDING);
+               tsk->flags |= PF_DUMPCORE;
+               atomic_set(&core_state->nr_threads, nr);
        }
        spin_unlock_irq(&tsk->sighand->siglock);
-       if (unlikely(nr < 0))
-               return nr;
-
-       tsk->flags |= PF_DUMPCORE;
-       if (atomic_read(&mm->mm_users) == nr + 1)
-               goto done;
-       /*
-        * We should find and kill all tasks which use this mm, and we should
-        * count them correctly into ->nr_threads. We don't take tasklist
-        * lock, but this is safe wrt:
-        *
-        * fork:
-        *      None of sub-threads can fork after zap_process(leader). All
-        *      processes which were created before this point should be
-        *      visible to zap_threads() because copy_process() adds the new
-        *      process to the tail of init_task.tasks list, and lock/unlock
-        *      of ->siglock provides a memory barrier.
-        *
-        * do_exit:
-        *      The caller holds mm->mmap_lock. This means that the task which
-        *      uses this mm can't pass exit_mm(), so it can't exit or clear
-        *      its ->mm.
-        *
-        * de_thread:
-        *      It does list_replace_rcu(&leader->tasks, &current->tasks),
-        *      we must see either old or new leader, this does not matter.
-        *      However, it can change p->sighand, so lock_task_sighand(p)
-        *      must be used. Since p->mm != NULL and we hold ->mmap_lock
-        *      it can't fail.
-        *
-        *      Note also that "g" can be the old leader with ->mm == NULL
-        *      and already unhashed and thus removed from ->thread_group.
-        *      This is OK, __unhash_process()->list_del_rcu() does not
-        *      clear the ->next pointer, we will find the new leader via
-        *      next_thread().
-        */
-       rcu_read_lock();
-       for_each_process(g) {
-               if (g == tsk->group_leader)
-                       continue;
-               if (g->flags & PF_KTHREAD)
-                       continue;
-
-               for_each_thread(g, p) {
-                       if (unlikely(!p->mm))
-                               continue;
-                       if (unlikely(p->mm == mm)) {
-                               lock_task_sighand(p, &flags);
-                               nr += zap_process(p, exit_code,
-                                                       SIGNAL_GROUP_EXIT);
-                               unlock_task_sighand(p, &flags);
-                       }
-                       break;
-               }
-       }
-       rcu_read_unlock();
-done:
-       atomic_set(&core_state->nr_threads, nr);
        return nr;
 }
 
 static int coredump_wait(int exit_code, struct core_state *core_state)
 {
        struct task_struct *tsk = current;
-       struct mm_struct *mm = tsk->mm;
        int core_waiters = -EBUSY;
 
        init_completion(&core_state->startup);
        core_state->dumper.task = tsk;
        core_state->dumper.next = NULL;
 
-       if (mmap_write_lock_killable(mm))
-               return -EINTR;
-
-       if (!mm->core_state)
-               core_waiters = zap_threads(tsk, mm, core_state, exit_code);
-       mmap_write_unlock(mm);
-
+       core_waiters = zap_threads(tsk, core_state, exit_code);
        if (core_waiters > 0) {
                struct core_thread *ptr;
 
@@ -483,7 +418,7 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
        return core_waiters;
 }
 
-static void coredump_finish(struct mm_struct *mm, bool core_dumped)
+static void coredump_finish(bool core_dumped)
 {
        struct core_thread *curr, *next;
        struct task_struct *task;
@@ -493,22 +428,21 @@ static void coredump_finish(struct mm_struct *mm, bool core_dumped)
                current->signal->group_exit_code |= 0x80;
        current->signal->group_exit_task = NULL;
        current->signal->flags = SIGNAL_GROUP_EXIT;
+       next = current->signal->core_state->dumper.next;
+       current->signal->core_state = NULL;
        spin_unlock_irq(&current->sighand->siglock);
 
-       next = mm->core_state->dumper.next;
        while ((curr = next) != NULL) {
                next = curr->next;
                task = curr->task;
                /*
-                * see exit_mm(), curr->task must not see
+                * see coredump_task_exit(), curr->task must not see
                 * ->task == NULL before we read ->next.
                 */
                smp_mb();
                curr->task = NULL;
                wake_up_process(task);
        }
-
-       mm->core_state = NULL;
 }
 
 static bool dump_interrupted(void)
@@ -839,7 +773,7 @@ fail_dropcount:
 fail_unlock:
        kfree(argv);
        kfree(cn.corename);
-       coredump_finish(mm, core_dumped);
+       coredump_finish(core_dumped);
        revert_creds(old_cred);
 fail_creds:
        put_cred(cred);