stop_machine: Ensure that a queued callback will be called before cpu_stop_park()
authorOleg Nesterov <oleg@redhat.com>
Thu, 8 Oct 2015 14:51:31 +0000 (16:51 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 20 Oct 2015 08:23:53 +0000 (10:23 +0200)
cpu_stop_queue_work() checks stopper->enabled before it queues the
work, but ->enabled == T can only guarantee cpu_stop_signal_done()
if we race with cpu_down().

This is not enough for stop_two_cpus() or stop_machine(), they will
deadlock if multi_cpu_stop() won't be called by one of the target
CPU's. stop_machine/stop_cpus are fine, they rely on stop_cpus_mutex.
But stop_two_cpus() has to check cpu_active() to avoid the same race
with hotplug, and this check is very unobvious and probably not even
correct if we race with cpu_up().

Change cpu_down() pass to clear ->enabled before cpu_stopper_thread()
flushes the pending ->works and returns with KTHREAD_SHOULD_PARK set.

Note also that smpboot_thread_call() calls cpu_stop_unpark() which
sets enabled == T at CPU_ONLINE stage, so this CPU can't go away until
cpu_stopper_thread() is called at least once. This all means that if
cpu_stop_queue_work() succeeds, we know that work->fn() will be called.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: heiko.carstens@de.ibm.com
Link: http://lkml.kernel.org/r/20151008145131.GA18139@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
include/linux/stop_machine.h
kernel/cpu.c
kernel/stop_machine.c

index 414d924318ce1ba57a448a485d8594a99a9012d5..7b76362b381ce9f70affb3facb1cde4328dee283 100644 (file)
@@ -33,6 +33,7 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
                         struct cpu_stop_work *work_buf);
 int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
 int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
+void stop_machine_park(int cpu);
 
 #else  /* CONFIG_SMP */
 
index 050c63472f03f7e357eb20ac4ef4e815a0155eae..c85df2775b735895d0637c416c712f507828dd9d 100644 (file)
@@ -344,7 +344,7 @@ static int take_cpu_down(void *_param)
        /* Give up timekeeping duties */
        tick_handover_do_timer();
        /* Park the stopper thread */
-       kthread_park(current);
+       stop_machine_park((long)param->hcpu);
        return 0;
 }
 
index 12484e5d5c88769058610aca529924ea9e882aff..6a402098d4ab1b640540a0ed24ba1ba33d3e4257 100644 (file)
@@ -452,6 +452,18 @@ repeat:
        }
 }
 
+void stop_machine_park(int cpu)
+{
+       struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
+       /*
+        * Lockless. cpu_stopper_thread() will take stopper->lock and flush
+        * the pending works before it parks, until then it is fine to queue
+        * the new works.
+        */
+       stopper->enabled = false;
+       kthread_park(stopper->thread);
+}
+
 extern void sched_set_stop_task(int cpu, struct task_struct *stop);
 
 static void cpu_stop_create(unsigned int cpu)
@@ -462,17 +474,8 @@ static void cpu_stop_create(unsigned int cpu)
 static void cpu_stop_park(unsigned int cpu)
 {
        struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu);
-       struct cpu_stop_work *work, *tmp;
-       unsigned long flags;
 
-       /* drain remaining works */
-       spin_lock_irqsave(&stopper->lock, flags);
-       list_for_each_entry_safe(work, tmp, &stopper->works, list) {
-               list_del_init(&work->list);
-               cpu_stop_signal_done(work->done, false);
-       }
-       stopper->enabled = false;
-       spin_unlock_irqrestore(&stopper->lock, flags);
+       WARN_ON(!list_empty(&stopper->works));
 }
 
 static void cpu_stop_unpark(unsigned int cpu)