introduce for_other_threads(p, t)
authorOleg Nesterov <oleg@redhat.com>
Mon, 30 Oct 2023 15:57:10 +0000 (16:57 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Mon, 11 Dec 2023 01:21:25 +0000 (17:21 -0800)
Cosmetic, but imho it makes the usage look more clear and simple, the new
helper doesn't require to initialize "t".

After this change while_each_thread() has only 3 users, and it is only
used in the do/while loops.

Link: https://lkml.kernel.org/r/20231030155710.GA9095@redhat.com
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Reviewed-by: Christian Brauner <brauner@kernel.org>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
fs/exec.c
include/linux/sched/signal.h
kernel/signal.c

index 4aa19b24f2810a428a91c30ab96f931b5f505954..ee43597cb45311bec9e3bc03269fe62b799a2f57 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1578,11 +1578,10 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
         * will be able to manipulate the current directory, etc.
         * It would be nice to force an unshare instead...
         */
-       t = p;
        n_fs = 1;
        spin_lock(&p->fs->lock);
        rcu_read_lock();
-       while_each_thread(p, t) {
+       for_other_threads(p, t) {
                if (t->fs == p->fs)
                        n_fs++;
        }
index 3499c1a8b9295a0b3366f4261a30eb11c38d577c..41d6759d6a4ac1cfaa6be3e65c4e82e6d7ddc32f 100644 (file)
@@ -646,6 +646,9 @@ extern bool current_is_single_threaded(void);
 #define while_each_thread(g, t) \
        while ((t = next_thread(t)) != g)
 
+#define for_other_threads(p, t)        \
+       for (t = p; (t = next_thread(t)) != p; )
+
 #define __for_each_thread(signal, t)   \
        list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
                lockdep_is_held(&tasklist_lock))
index 47a7602dfe8df43e786b7b33119b71599372b151..5aa216e841a292dbb570b219f01869765d6d13f5 100644 (file)
@@ -1376,12 +1376,12 @@ int force_sig_info(struct kernel_siginfo *info)
  */
 int zap_other_threads(struct task_struct *p)
 {
-       struct task_struct *t = p;
+       struct task_struct *t;
        int count = 0;
 
        p->signal->group_stop_count = 0;
 
-       while_each_thread(p, t) {
+       for_other_threads(p, t) {
                task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK);
                /* Don't require de_thread to wait for the vhost_worker */
                if ((t->flags & (PF_IO_WORKER | PF_USER_WORKER)) != PF_USER_WORKER)
@@ -2465,12 +2465,10 @@ static bool do_signal_stop(int signr)
                        sig->group_exit_code = signr;
 
                sig->group_stop_count = 0;
-
                if (task_set_jobctl_pending(current, signr | gstop))
                        sig->group_stop_count++;
 
-               t = current;
-               while_each_thread(current, t) {
+               for_other_threads(current, t) {
                        /*
                         * Setting state to TASK_STOPPED for a group
                         * stop is always done with the siglock held,
@@ -2966,8 +2964,7 @@ static void retarget_shared_pending(struct task_struct *tsk, sigset_t *which)
        if (sigisemptyset(&retarget))
                return;
 
-       t = tsk;
-       while_each_thread(tsk, t) {
+       for_other_threads(tsk, t) {
                if (t->flags & PF_EXITING)
                        continue;