blk-mq: mark ctx as pending at batch in flush plug path
authorMing Lei <ming.lei@canonical.com>
Tue, 20 Oct 2015 15:13:57 +0000 (23:13 +0800)
committerJens Axboe <axboe@fb.com>
Wed, 21 Oct 2015 21:00:58 +0000 (15:00 -0600)
Most of times, flush plug should be the hottest I/O path,
so mark ctx as pending after all requests in the list are
inserted.

Reviewed-by: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Ming Lei <ming.lei@canonical.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-mq.c

index 24c528f182eaaccf0e9331b165a9df2755dee6b3..159e69bd2c3c83e165f9b3b6aa8c12b05b61ce89 100644 (file)
@@ -990,18 +990,25 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
 }
 EXPORT_SYMBOL(blk_mq_delay_queue);
 
-static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
-                                   struct request *rq, bool at_head)
+static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
+                                           struct blk_mq_ctx *ctx,
+                                           struct request *rq,
+                                           bool at_head)
 {
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-
        trace_block_rq_insert(hctx->queue, rq);
 
        if (at_head)
                list_add(&rq->queuelist, &ctx->rq_list);
        else
                list_add_tail(&rq->queuelist, &ctx->rq_list);
+}
+
+static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
+                                   struct request *rq, bool at_head)
+{
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
 
+       __blk_mq_insert_req_list(hctx, ctx, rq, at_head);
        blk_mq_hctx_mark_pending(hctx, ctx);
 }
 
@@ -1057,8 +1064,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
                rq = list_first_entry(list, struct request, queuelist);
                list_del_init(&rq->queuelist);
                rq->mq_ctx = ctx;
-               __blk_mq_insert_request(hctx, rq, false);
+               __blk_mq_insert_req_list(hctx, ctx, rq, false);
        }
+       blk_mq_hctx_mark_pending(hctx, ctx);
        spin_unlock(&ctx->lock);
 
        blk_mq_run_hw_queue(hctx, from_schedule);