sched/core: Fix migrate_swap() vs. hotplug
authorPeter Zijlstra <peterz@infradead.org>
Thu, 5 Jun 2025 10:00:09 +0000 (12:00 +0200)
committerPeter Zijlstra <peterz@infradead.org>
Tue, 1 Jul 2025 13:02:03 +0000 (15:02 +0200)
On Mon, Jun 02, 2025 at 03:22:13PM +0800, Kuyo Chang wrote:

> So, the potential race scenario is:
>
>  CPU0 CPU1
>  // doing migrate_swap(cpu0/cpu1)
>  stop_two_cpus()
>    ...
>   // doing _cpu_down()
>        sched_cpu_deactivate()
>  set_cpu_active(cpu, false);
>  balance_push_set(cpu, true);
>  cpu_stop_queue_two_works
>      __cpu_stop_queue_work(stopper1,...);
>      __cpu_stop_queue_work(stopper2,..);
>  stop_cpus_in_progress -> true
>  preempt_enable();
>  ...
>  1st balance_push
>  stop_one_cpu_nowait
>  cpu_stop_queue_work
>  __cpu_stop_queue_work
>  list_add_tail  -> 1st add push_work
>  wake_up_q(&wakeq);  -> "wakeq is empty.
>  This implies that the stopper is at wakeq@migrate_swap."
>  preempt_disable
>  wake_up_q(&wakeq);
>          wake_up_process // wakeup migrate/0
>      try_to_wake_up
>          ttwu_queue
>              ttwu_queue_cond ->meet below case
>                  if (cpu == smp_processor_id())
>           return false;
>  ttwu_do_activate
>  //migrate/0 wakeup done
>  wake_up_process // wakeup migrate/1
>             try_to_wake_up
>      ttwu_queue
>  ttwu_queue_cond
>          ttwu_queue_wakelist
>  __ttwu_queue_wakelist
>  __smp_call_single_queue
>  preempt_enable();
>
>  2nd balance_push
>  stop_one_cpu_nowait
>  cpu_stop_queue_work
>  __cpu_stop_queue_work
>  list_add_tail  -> 2nd add push_work, so the double list add is detected
>  ...
>  ...
>  cpu1 get ipi, do sched_ttwu_pending, wakeup migrate/1
>

So this balance_push() is part of schedule(), and schedule() is supposed
to switch to stopper task, but because of this race condition, stopper
task is stuck in WAKING state and not actually visible to be picked.

Therefore CPU1 can do another schedule() and end up doing another
balance_push() even though the last one hasn't been done yet.

This is a confluence of fail, where both wake_q and ttwu_wakelist can
cause crucial wakeups to be delayed, resulting in the malfunction of
balance_push.

Since there is only a single stopper thread to be woken, the wake_q
doesn't really add anything here, and can be removed in favour of
direct wakeups of the stopper thread.

Then add a clause to ttwu_queue_cond() to ensure the stopper threads
are never queued / delayed.

Of all 3 moving parts, the last addition was the balance_push()
machinery, so pick that as the point the bug was introduced.

Fixes: 2558aacff858 ("sched/hotplug: Ensure only per-cpu kthreads run during hotplug")
Reported-by: Kuyo Chang <kuyo.chang@mediatek.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Kuyo Chang <kuyo.chang@mediatek.com>
Link: https://lkml.kernel.org/r/20250605100009.GO39944@noisy.programming.kicks-ass.net
kernel/sched/core.c
kernel/stop_machine.c

index cd80b66a960a0c5ab5b15baa99fb664f594f4d15..ec68fc686bd74292e0edba3b0229f5db590ab038 100644 (file)
@@ -3943,6 +3943,11 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
        if (!scx_allow_ttwu_queue(p))
                return false;
 
+#ifdef CONFIG_SMP
+       if (p->sched_class == &stop_sched_class)
+               return false;
+#endif
+
        /*
         * Do not complicate things with the async wake_list while the CPU is
         * in hotplug state.
index 5d2d0562115b31701dc67d9760e6715fce75a90c..3fe6b0c99f3d89a4adb3cc3bf18c5b22fa096a3c 100644 (file)
@@ -82,18 +82,15 @@ static void cpu_stop_signal_done(struct cpu_stop_done *done)
 }
 
 static void __cpu_stop_queue_work(struct cpu_stopper *stopper,
-                                       struct cpu_stop_work *work,
-                                       struct wake_q_head *wakeq)
+                                 struct cpu_stop_work *work)
 {
        list_add_tail(&work->list, &stopper->works);
-       wake_q_add(wakeq, stopper->thread);
 }
 
 /* queue @work to @stopper.  if offline, @work is completed immediately */
 static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       DEFINE_WAKE_Q(wakeq);
        unsigned long flags;
        bool enabled;
 
@@ -101,12 +98,13 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work)
        raw_spin_lock_irqsave(&stopper->lock, flags);
        enabled = stopper->enabled;
        if (enabled)
-               __cpu_stop_queue_work(stopper, work, &wakeq);
+               __cpu_stop_queue_work(stopper, work);
        else if (work->done)
                cpu_stop_signal_done(work->done);
        raw_spin_unlock_irqrestore(&stopper->lock, flags);
 
-       wake_up_q(&wakeq);
+       if (enabled)
+               wake_up_process(stopper->thread);
        preempt_enable();
 
        return enabled;
@@ -264,7 +262,6 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1,
 {
        struct cpu_stopper *stopper1 = per_cpu_ptr(&cpu_stopper, cpu1);
        struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2);
-       DEFINE_WAKE_Q(wakeq);
        int err;
 
 retry:
@@ -300,8 +297,8 @@ retry:
        }
 
        err = 0;
-       __cpu_stop_queue_work(stopper1, work1, &wakeq);
-       __cpu_stop_queue_work(stopper2, work2, &wakeq);
+       __cpu_stop_queue_work(stopper1, work1);
+       __cpu_stop_queue_work(stopper2, work2);
 
 unlock:
        raw_spin_unlock(&stopper2->lock);
@@ -316,7 +313,10 @@ unlock:
                goto retry;
        }
 
-       wake_up_q(&wakeq);
+       if (!err) {
+               wake_up_process(stopper1->thread);
+               wake_up_process(stopper2->thread);
+       }
        preempt_enable();
 
        return err;