blk-mq: remove blk_mq_delay_queue()
authorMing Lei <ming.lei@redhat.com>
Sun, 8 Apr 2018 09:48:11 +0000 (17:48 +0800)
committerJens Axboe <axboe@kernel.dk>
Tue, 10 Apr 2018 14:38:46 +0000 (08:38 -0600)
No driver uses this interface any more, so remove it.

Cc: Stefan Haberland <sth@linux.vnet.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq-debugfs.c
block/blk-mq.c
include/linux/blk-mq.h

index 58b3b79cbe83d09bc63784c4eebf458088ce8a98..3080e18cb8590538084615d368b968ff8349ed9a 100644 (file)
@@ -235,7 +235,6 @@ static const char *const hctx_state_name[] = {
        HCTX_STATE_NAME(STOPPED),
        HCTX_STATE_NAME(TAG_ACTIVE),
        HCTX_STATE_NAME(SCHED_RESTART),
-       HCTX_STATE_NAME(START_ON_RUN),
 };
 #undef HCTX_STATE_NAME
 
index e05bd10d5c84423bffe894c43848617258deff3b..c2c6d276da3a2cac18a169a5ffe556dec90c814f 100644 (file)
@@ -1562,40 +1562,14 @@ static void blk_mq_run_work_fn(struct work_struct *work)
        hctx = container_of(work, struct blk_mq_hw_ctx, run_work.work);
 
        /*
-        * If we are stopped, don't run the queue. The exception is if
-        * BLK_MQ_S_START_ON_RUN is set. For that case, we auto-clear
-        * the STOPPED bit and run it.
+        * If we are stopped, don't run the queue.
         */
-       if (test_bit(BLK_MQ_S_STOPPED, &hctx->state)) {
-               if (!test_bit(BLK_MQ_S_START_ON_RUN, &hctx->state))
-                       return;
-
-               clear_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
+       if (test_bit(BLK_MQ_S_STOPPED, &hctx->state))
                clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
-       }
 
        __blk_mq_run_hw_queue(hctx);
 }
 
-
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
-{
-       if (WARN_ON_ONCE(!blk_mq_hw_queue_mapped(hctx)))
-               return;
-
-       /*
-        * Stop the hw queue, then modify currently delayed work.
-        * This should prevent us from running the queue prematurely.
-        * Mark the queue as auto-clearing STOPPED when it runs.
-        */
-       blk_mq_stop_hw_queue(hctx);
-       set_bit(BLK_MQ_S_START_ON_RUN, &hctx->state);
-       kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx),
-                                       &hctx->run_work,
-                                       msecs_to_jiffies(msecs));
-}
-EXPORT_SYMBOL(blk_mq_delay_queue);
-
 static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
                                            struct request *rq,
                                            bool at_head)
index 8efcf49796a3daeabe592f94ff6dc300071f2f86..e3986f4b34615fa7c50c6d412976594ebc3a7fdf 100644 (file)
@@ -183,7 +183,6 @@ enum {
        BLK_MQ_S_STOPPED        = 0,
        BLK_MQ_S_TAG_ACTIVE     = 1,
        BLK_MQ_S_SCHED_RESTART  = 2,
-       BLK_MQ_S_START_ON_RUN   = 3,
 
        BLK_MQ_MAX_DEPTH        = 10240,
 
@@ -270,7 +269,6 @@ void blk_mq_unquiesce_queue(struct request_queue *q);
 void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
-void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
                busy_tag_iter_fn *fn, void *priv);
 void blk_mq_freeze_queue(struct request_queue *q);