block: remove unnecessary ioc nested locking
authorJohn Ogness <john.ogness@linutronix.de>
Fri, 19 Jun 2020 15:17:17 +0000 (17:23 +0206)
committerJens Axboe <axboe@kernel.dk>
Thu, 16 Jul 2020 16:22:15 +0000 (10:22 -0600)
The legacy CFQ IO scheduler could call put_io_context() in its exit_icq()
elevator callback. This led to a lockdep warning, which was fixed in
commit d8c66c5d5924 ("block: fix lockdep warning on io_context release
put_io_context()") by using a nested subclass for the ioc spinlock.
However, with commit f382fb0bcef4 ("block: remove legacy IO schedulers")
the CFQ IO scheduler no longer exists.

The BFQ IO scheduler also implements the exit_icq() elevator callback but
does not call put_io_context().

The nested subclass for the ioc spinlock is no longer needed. Since it
existed as an exception and no longer applies, remove the nested subclass
usage.

Signed-off-by: John Ogness <john.ogness@linutronix.de>
Reviewed-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-ioc.c

index 9df50fb507caf953b4b86eccfbf90465cf851eb8..5dbcfa1b872e1dbea779f2c6276facfde081a356 100644 (file)
@@ -96,15 +96,7 @@ static void ioc_release_fn(struct work_struct *work)
 {
        struct io_context *ioc = container_of(work, struct io_context,
                                              release_work);
-       unsigned long flags;
-
-       /*
-        * Exiting icq may call into put_io_context() through elevator
-        * which will trigger lockdep warning.  The ioc's are guaranteed to
-        * be different, use a different locking subclass here.  Use
-        * irqsave variant as there's no spin_lock_irq_nested().
-        */
-       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+       spin_lock_irq(&ioc->lock);
 
        while (!hlist_empty(&ioc->icq_list)) {
                struct io_cq *icq = hlist_entry(ioc->icq_list.first,
@@ -115,13 +107,13 @@ static void ioc_release_fn(struct work_struct *work)
                        ioc_destroy_icq(icq);
                        spin_unlock(&q->queue_lock);
                } else {
-                       spin_unlock_irqrestore(&ioc->lock, flags);
+                       spin_unlock_irq(&ioc->lock);
                        cpu_relax();
-                       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+                       spin_lock_irq(&ioc->lock);
                }
        }
 
-       spin_unlock_irqrestore(&ioc->lock, flags);
+       spin_unlock_irq(&ioc->lock);
 
        kmem_cache_free(iocontext_cachep, ioc);
 }
@@ -170,7 +162,6 @@ void put_io_context(struct io_context *ioc)
  */
 void put_io_context_active(struct io_context *ioc)
 {
-       unsigned long flags;
        struct io_cq *icq;
 
        if (!atomic_dec_and_test(&ioc->active_ref)) {
@@ -178,19 +169,14 @@ void put_io_context_active(struct io_context *ioc)
                return;
        }
 
-       /*
-        * Need ioc lock to walk icq_list and q lock to exit icq.  Perform
-        * reverse double locking.  Read comment in ioc_release_fn() for
-        * explanation on the nested locking annotation.
-        */
-       spin_lock_irqsave_nested(&ioc->lock, flags, 1);
+       spin_lock_irq(&ioc->lock);
        hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) {
                if (icq->flags & ICQ_EXITED)
                        continue;
 
                ioc_exit_icq(icq);
        }
-       spin_unlock_irqrestore(&ioc->lock, flags);
+       spin_unlock_irq(&ioc->lock);
 
        put_io_context(ioc);
 }