Merge SA_IMMUTABLE-fixes-for-v5.16-rc2
authorEric W. Biederman <ebiederm@xmission.com>
Fri, 3 Dec 2021 18:39:58 +0000 (12:39 -0600)
committerEric W. Biederman <ebiederm@xmission.com>
Fri, 3 Dec 2021 21:36:59 +0000 (15:36 -0600)
I completed the first batch of signal changes for v5.17 against
v5.16-rc1 before the SA_IMMUTABLE fixes where completed.  Which leaves
me with two lines of development that I want on my signal development
branch both rooted at v5.16-rc1.  Especially as I am hoping
to reach the point of being able to remove SA_IMMUTABLE.

Linus merged my SA_IMUTABLE fixes as:
7af959b5d5c8 ("Merge branch 'SA_IMMUTABLE-fixes-for-v5.16-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace")

To avoid rebasing the development changes that are currently complete I am
merging the work I sent upstream to Linus to make my life simpler.

The SA_IMMUTABLE changes as they are described in Linus's merge commit.

Pull exit-vs-signal handling fixes from Eric Biederman:
 "This is a small set of changes where debuggers were no longer able to
  intercept synchronous SIGTRAP and SIGSEGV, introduced by the exit
  cleanups.

  This is essentially the change you suggested with all of i's dotted
  and the t's crossed so that ptrace can intercept all of the cases it
  has been able to intercept the past, and all of the cases that made it
  to exit without giving ptrace a chance still don't give ptrace a
  chance"

* 'SA_IMMUTABLE-fixes-for-v5.16-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm/user-namespace:
  signal: Replace force_fatal_sig with force_exit_sig when in doubt
  signal: Don't always set SA_IMMUTABLE for forced signals

Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
fs/signalfd.c
include/linux/sched/signal.h
kernel/signal.c

index 040e1cf9052826ef08bf269aeb63d11c073d16b4..74f134cd1ff6b77d38b1a35aa7532010423a72c4 100644 (file)
@@ -165,11 +165,12 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
 static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info,
                                int nonblock)
 {
+       enum pid_type type;
        ssize_t ret;
        DECLARE_WAITQUEUE(wait, current);
 
        spin_lock_irq(&current->sighand->siglock);
-       ret = dequeue_signal(current, &ctx->sigmask, info);
+       ret = dequeue_signal(current, &ctx->sigmask, info, &type);
        switch (ret) {
        case 0:
                if (!nonblock)
@@ -184,7 +185,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info
        add_wait_queue(&current->sighand->signalfd_wqh, &wait);
        for (;;) {
                set_current_state(TASK_INTERRUPTIBLE);
-               ret = dequeue_signal(current, &ctx->sigmask, info);
+               ret = dequeue_signal(current, &ctx->sigmask, info, &type);
                if (ret != 0)
                        break;
                if (signal_pending(current)) {
index 33a50642cf41c04a538817a5c2507baec91c2a4e..fa26d2a5841306845bde56b1fe03480b14107d5c 100644 (file)
@@ -286,17 +286,18 @@ static inline int signal_group_exit(const struct signal_struct *sig)
 extern void flush_signals(struct task_struct *);
 extern void ignore_signals(struct task_struct *);
 extern void flush_signal_handlers(struct task_struct *, int force_default);
-extern int dequeue_signal(struct task_struct *task,
-                         sigset_t *mask, kernel_siginfo_t *info);
+extern int dequeue_signal(struct task_struct *task, sigset_t *mask,
+                         kernel_siginfo_t *info, enum pid_type *type);
 
 static inline int kernel_dequeue_signal(void)
 {
        struct task_struct *task = current;
        kernel_siginfo_t __info;
+       enum pid_type __type;
        int ret;
 
        spin_lock_irq(&task->sighand->siglock);
-       ret = dequeue_signal(task, &task->blocked, &__info);
+       ret = dequeue_signal(task, &task->blocked, &__info, &__type);
        spin_unlock_irq(&task->sighand->siglock);
 
        return ret;
index a629b11bf3e0d9e5d02ba7341094c8b313ec3244..8272cac5f429503e1e024e71e0b1dd364e99ddd4 100644 (file)
@@ -626,7 +626,8 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
  *
  * All callers have to hold the siglock.
  */
-int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *info)
+int dequeue_signal(struct task_struct *tsk, sigset_t *mask,
+                  kernel_siginfo_t *info, enum pid_type *type)
 {
        bool resched_timer = false;
        int signr;
@@ -634,8 +635,10 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
        /* We only dequeue private signals from ourselves, we don't let
         * signalfd steal them
         */
+       *type = PIDTYPE_PID;
        signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
        if (!signr) {
+               *type = PIDTYPE_TGID;
                signr = __dequeue_signal(&tsk->signal->shared_pending,
                                         mask, info, &resched_timer);
 #ifdef CONFIG_POSIX_TIMERS
@@ -2544,7 +2547,7 @@ static void do_freezer_trap(void)
        freezable_schedule();
 }
 
-static int ptrace_signal(int signr, kernel_siginfo_t *info)
+static int ptrace_signal(int signr, kernel_siginfo_t *info, enum pid_type type)
 {
        /*
         * We do not check sig_kernel_stop(signr) but set this marker
@@ -2584,8 +2587,9 @@ static int ptrace_signal(int signr, kernel_siginfo_t *info)
        }
 
        /* If the (new) signal is now blocked, requeue it.  */
-       if (sigismember(&current->blocked, signr)) {
-               send_signal(signr, info, current, PIDTYPE_PID);
+       if (sigismember(&current->blocked, signr) ||
+           fatal_signal_pending(current)) {
+               send_signal(signr, info, current, type);
                signr = 0;
        }
 
@@ -2684,18 +2688,19 @@ relock:
                goto relock;
        }
 
-       /* Has this task already been marked for death? */
-       if (signal_group_exit(signal)) {
-               ksig->info.si_signo = signr = SIGKILL;
-               sigdelset(&current->pending.signal, SIGKILL);
-               trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
-                               &sighand->action[SIGKILL - 1]);
-               recalc_sigpending();
-               goto fatal;
-       }
-
        for (;;) {
                struct k_sigaction *ka;
+               enum pid_type type;
+
+               /* Has this task already been marked for death? */
+               if (signal_group_exit(signal)) {
+                       ksig->info.si_signo = signr = SIGKILL;
+                       sigdelset(&current->pending.signal, SIGKILL);
+                       trace_signal_deliver(SIGKILL, SEND_SIG_NOINFO,
+                               &sighand->action[SIGKILL - 1]);
+                       recalc_sigpending();
+                       goto fatal;
+               }
 
                if (unlikely(current->jobctl & JOBCTL_STOP_PENDING) &&
                    do_signal_stop(0))
@@ -2728,16 +2733,18 @@ relock:
                 * so that the instruction pointer in the signal stack
                 * frame points to the faulting instruction.
                 */
+               type = PIDTYPE_PID;
                signr = dequeue_synchronous_signal(&ksig->info);
                if (!signr)
-                       signr = dequeue_signal(current, &current->blocked, &ksig->info);
+                       signr = dequeue_signal(current, &current->blocked,
+                                              &ksig->info, &type);
 
                if (!signr)
                        break; /* will return 0 */
 
                if (unlikely(current->ptrace) && (signr != SIGKILL) &&
                    !(sighand->action[signr -1].sa.sa_flags & SA_IMMUTABLE)) {
-                       signr = ptrace_signal(signr, &ksig->info);
+                       signr = ptrace_signal(signr, &ksig->info, type);
                        if (!signr)
                                continue;
                }
@@ -3562,6 +3569,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
        ktime_t *to = NULL, timeout = KTIME_MAX;
        struct task_struct *tsk = current;
        sigset_t mask = *which;
+       enum pid_type type;
        int sig, ret = 0;
 
        if (ts) {
@@ -3578,7 +3586,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
        signotset(&mask);
 
        spin_lock_irq(&tsk->sighand->siglock);
-       sig = dequeue_signal(tsk, &mask, info);
+       sig = dequeue_signal(tsk, &mask, info, &type);
        if (!sig && timeout) {
                /*
                 * None ready, temporarily unblock those we're interested
@@ -3597,7 +3605,7 @@ static int do_sigtimedwait(const sigset_t *which, kernel_siginfo_t *info,
                spin_lock_irq(&tsk->sighand->siglock);
                __set_task_blocked(tsk, &tsk->real_blocked);
                sigemptyset(&tsk->real_blocked);
-               sig = dequeue_signal(tsk, &mask, info);
+               sig = dequeue_signal(tsk, &mask, info, &type);
        }
        spin_unlock_irq(&tsk->sighand->siglock);