Merge tag 'signal-for-v5.20' of git://git.kernel.org/pub/scm/linux/kernel/git/ebieder...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 9 Oct 2022 23:14:15 +0000 (16:14 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 9 Oct 2022 23:14:15 +0000 (16:14 -0700)
Pull ptrace update from Eric Biederman:
 "ptrace: Stop supporting SIGKILL for PTRACE_EVENT_EXIT

  Recently I had a conversation where it was pointed out to me that
  SIGKILL sent to a tracee stropped in PTRACE_EVENT_EXIT is quite
  difficult for a tracer to handle.

  Keeping SIGKILL working after the process has been killed is pain from
  an implementation point of view.

  So since the debuggers don't want this behavior let's see if we can
  remove this wart for the userspace API

  If a regression is detected it should only need to be the last change
  that is the reverted. The other two are just general cleanups that
  make the last patch simpler"

* tag 'signal-for-v5.20' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace:
  signal: Drop signals received after a fatal signal has been processed
  signal: Guarantee that SIGNAL_GROUP_EXIT is set on process exit
  signal: Ensure SIGNAL_GROUP_EXIT gets set in do_group_exit

fs/coredump.c
include/linux/sched/signal.h
kernel/exit.c
kernel/fork.c
kernel/signal.c

index 3538f3a63965df169906e744a6af9e43dadd1ae0..7c6bb4e9db20dbd1e9890fc4b842c015d08e40b0 100644 (file)
@@ -354,7 +354,7 @@ static int zap_process(struct task_struct *start, int exit_code)
        struct task_struct *t;
        int nr = 0;
 
-       /* ignore all signals except SIGKILL, see prepare_signal() */
+       /* Allow SIGKILL, see prepare_signal() */
        start->signal->flags = SIGNAL_GROUP_EXIT;
        start->signal->group_exit_code = exit_code;
        start->signal->group_stop_count = 0;
index cafbe03eed017224b790714dd9c2dfaa3ae16256..20099268fa257f40d8d45e7fa795cb8492b9ca34 100644 (file)
@@ -94,6 +94,7 @@ struct signal_struct {
        refcount_t              sigcnt;
        atomic_t                live;
        int                     nr_threads;
+       int                     quick_threads;
        struct list_head        thread_head;
 
        wait_queue_head_t       wait_chldexit;  /* for wait4() */
index 84021b24f79e3d1d2dc11fe70bf69cf620c7fc85..4f7424523bac9ae65745d69350217bfa17d56083 100644 (file)
@@ -733,11 +733,29 @@ static void check_stack_usage(void)
 static inline void check_stack_usage(void) {}
 #endif
 
+static void synchronize_group_exit(struct task_struct *tsk, long code)
+{
+       struct sighand_struct *sighand = tsk->sighand;
+       struct signal_struct *signal = tsk->signal;
+
+       spin_lock_irq(&sighand->siglock);
+       signal->quick_threads--;
+       if ((signal->quick_threads == 0) &&
+           !(signal->flags & SIGNAL_GROUP_EXIT)) {
+               signal->flags = SIGNAL_GROUP_EXIT;
+               signal->group_exit_code = code;
+               signal->group_stop_count = 0;
+       }
+       spin_unlock_irq(&sighand->siglock);
+}
+
 void __noreturn do_exit(long code)
 {
        struct task_struct *tsk = current;
        int group_dead;
 
+       synchronize_group_exit(tsk, code);
+
        WARN_ON(tsk->plug);
 
        kcov_task_exit(tsk);
@@ -905,7 +923,7 @@ do_group_exit(int exit_code)
                exit_code = sig->group_exit_code;
        else if (sig->group_exec_task)
                exit_code = 0;
-       else if (!thread_group_empty(current)) {
+       else {
                struct sighand_struct *const sighand = current->sighand;
 
                spin_lock_irq(&sighand->siglock);
index 2b6bd511c6ed1cdff351f63a6f657e912ffe2ac1..0c53b7e62d0108fa272ac26336a013655bee7d48 100644 (file)
@@ -1693,6 +1693,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
                return -ENOMEM;
 
        sig->nr_threads = 1;
+       sig->quick_threads = 1;
        atomic_set(&sig->live, 1);
        refcount_set(&sig->sigcnt, 1);
 
@@ -2460,6 +2461,7 @@ static __latent_entropy struct task_struct *copy_process(
                        __this_cpu_inc(process_counts);
                } else {
                        current->signal->nr_threads++;
+                       current->signal->quick_threads++;
                        atomic_inc(&current->signal->live);
                        refcount_inc(&current->signal->sigcnt);
                        task_join_group_stop(p);
index 6f86fda5e432aeb32ae5dbca2c4264a2781b3dde..8a0f114d00e0f937d5fd4fdee27969c4bd9f7529 100644 (file)
@@ -913,8 +913,9 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
                if (signal->core_state)
                        return sig == SIGKILL;
                /*
-                * The process is in the middle of dying, nothing to do.
+                * The process is in the middle of dying, drop the signal.
                 */
+               return false;
        } else if (sig_kernel_stop(sig)) {
                /*
                 * This is a stop signal.  Remove SIGCONT from all queues.