dm rq: fix the starting and stopping of blk-mq queues
authorMike Snitzer <snitzer@redhat.com>
Tue, 2 Aug 2016 16:51:11 +0000 (12:51 -0400)
committerMike Snitzer <snitzer@redhat.com>
Tue, 2 Aug 2016 20:21:36 +0000 (16:21 -0400)
Improve dm_stop_queue() to cancel any requeue_work.  Also, have
dm_start_queue() and dm_stop_queue() clear/set the QUEUE_FLAG_STOPPED
for the blk-mq request_queue.

On suspend dm_stop_queue() handles stopping the blk-mq request_queue
BUT: even though the hw_queues are marked BLK_MQ_S_STOPPED at that point
there is still a race that is allowing block/blk-mq.c to call ->queue_rq
against a hctx that it really shouldn't.  Add a check to
dm_mq_queue_rq() that guards against this rarity (albeit _not_
race-free).

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Cc: stable@vger.kernel.org # must patch dm.c on < 4.8 kernels
drivers/md/dm-rq.c

index 7a9661868496e52b46848382aa7971fb9f6ff61c..1ca7463e8bb2b26c799f63f5d54a4a33d570704c 100644 (file)
@@ -78,6 +78,7 @@ void dm_start_queue(struct request_queue *q)
        if (!q->mq_ops)
                dm_old_start_queue(q);
        else {
+               queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, q);
                blk_mq_start_stopped_hw_queues(q, true);
                blk_mq_kick_requeue_list(q);
        }
@@ -101,8 +102,14 @@ void dm_stop_queue(struct request_queue *q)
 {
        if (!q->mq_ops)
                dm_old_stop_queue(q);
-       else
+       else {
+               spin_lock_irq(q->queue_lock);
+               queue_flag_set(QUEUE_FLAG_STOPPED, q);
+               spin_unlock_irq(q->queue_lock);
+
+               blk_mq_cancel_requeue_work(q);
                blk_mq_stop_hw_queues(q);
+       }
 }
 
 static struct dm_rq_target_io *alloc_old_rq_tio(struct mapped_device *md,
@@ -864,6 +871,17 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
                dm_put_live_table(md, srcu_idx);
        }
 
+       /*
+        * On suspend dm_stop_queue() handles stopping the blk-mq
+        * request_queue BUT: even though the hw_queues are marked
+        * BLK_MQ_S_STOPPED at that point there is still a race that
+        * is allowing block/blk-mq.c to call ->queue_rq against a
+        * hctx that it really shouldn't.  The following check guards
+        * against this rarity (albeit _not_ race-free).
+        */
+       if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
+               return BLK_MQ_RQ_QUEUE_BUSY;
+
        if (ti->type->busy && ti->type->busy(ti))
                return BLK_MQ_RQ_QUEUE_BUSY;