workqueue: better define locking rules around worker creation / destruction
authorTejun Heo <tj@kernel.org>
Thu, 14 Mar 2013 02:47:39 +0000 (19:47 -0700)
committerTejun Heo <tj@kernel.org>
Thu, 14 Mar 2013 02:47:39 +0000 (19:47 -0700)
When a manager creates or destroys workers, the operations are always
done with the manager_mutex held; however, initial worker creation or
worker destruction during pool release don't grab the mutex.  They are
still correct as initial worker creation doesn't require
synchronization and grabbing manager_arb provides enough exclusion for
pool release path.

Still, let's make everyone follow the same rules for consistency and
such that lockdep annotations can be added.

Update create_and_start_worker() and put_unbound_pool() to grab
manager_mutex around thread creation and destruction respectively and
add lockdep assertions to create_worker() and destroy_worker().

This patch doesn't introduce any visible behavior changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/workqueue.c

index cac710646cbc86518c77754ee623967d94bdd7bb..ce1ab069c5fee6dd7c07fb32d14ae9c8ae4468db 100644 (file)
@@ -1715,6 +1715,8 @@ static struct worker *create_worker(struct worker_pool *pool)
        struct worker *worker = NULL;
        int id = -1;
 
+       lockdep_assert_held(&pool->manager_mutex);
+
        spin_lock_irq(&pool->lock);
        while (ida_get_new(&pool->worker_ida, &id)) {
                spin_unlock_irq(&pool->lock);
@@ -1796,12 +1798,14 @@ static void start_worker(struct worker *worker)
  * create_and_start_worker - create and start a worker for a pool
  * @pool: the target pool
  *
- * Create and start a new worker for @pool.
+ * Grab the managership of @pool and create and start a new worker for it.
  */
 static int create_and_start_worker(struct worker_pool *pool)
 {
        struct worker *worker;
 
+       mutex_lock(&pool->manager_mutex);
+
        worker = create_worker(pool);
        if (worker) {
                spin_lock_irq(&pool->lock);
@@ -1809,6 +1813,8 @@ static int create_and_start_worker(struct worker_pool *pool)
                spin_unlock_irq(&pool->lock);
        }
 
+       mutex_unlock(&pool->manager_mutex);
+
        return worker ? 0 : -ENOMEM;
 }
 
@@ -1826,6 +1832,9 @@ static void destroy_worker(struct worker *worker)
        struct worker_pool *pool = worker->pool;
        int id = worker->id;
 
+       lockdep_assert_held(&pool->manager_mutex);
+       lockdep_assert_held(&pool->lock);
+
        /* sanity check frenzy */
        if (WARN_ON(worker->current_work) ||
            WARN_ON(!list_empty(&worker->scheduled)))
@@ -3531,6 +3540,7 @@ static void put_unbound_pool(struct worker_pool *pool)
         * manager_mutex.
         */
        mutex_lock(&pool->manager_arb);
+       mutex_lock(&pool->manager_mutex);
        spin_lock_irq(&pool->lock);
 
        while ((worker = first_worker(pool)))
@@ -3538,6 +3548,7 @@ static void put_unbound_pool(struct worker_pool *pool)
        WARN_ON(pool->nr_workers || pool->nr_idle);
 
        spin_unlock_irq(&pool->lock);
+       mutex_unlock(&pool->manager_mutex);
        mutex_unlock(&pool->manager_arb);
 
        /* shut down the timers */