blk-mq: don't special case flush inserts for blk-mq-sched
authorJens Axboe <axboe@fb.com>
Fri, 17 Feb 2017 18:38:36 +0000 (11:38 -0700)
committerJens Axboe <axboe@fb.com>
Fri, 17 Feb 2017 19:35:47 +0000 (12:35 -0700)
The current request insertion machinery works just fine for
directly inserting flushes, so no need to special case
this anymore.

Signed-off-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Omar Sandoval <osandov@fb.com>
block/blk-mq.c

index 6baa0c9fc06dce7fa1e79821d6278e78d8e32742..ee8c6f9f1d4debd20eaadcf06e31c6d0679159ed 100644 (file)
@@ -1434,12 +1434,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
-               blk_mq_put_ctx(data.ctx);
+               if (q->elevator)
+                       goto elv_insert;
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               blk_mq_run_hw_queue(data.hctx, true);
-               goto done;
+               goto run_queue;
        }
 
        plug = current->plug;
@@ -1489,6 +1488,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        }
 
        if (q->elevator) {
+elv_insert:
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
@@ -1502,6 +1502,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
+run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
        blk_mq_put_ctx(data.ctx);
@@ -1557,12 +1558,11 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
-               blk_mq_put_ctx(data.ctx);
+               if (q->elevator)
+                       goto elv_insert;
                blk_mq_bio_to_request(rq, bio);
-               blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               blk_mq_run_hw_queue(data.hctx, true);
-               goto done;
+               goto run_queue;
        }
 
        /*
@@ -1600,6 +1600,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        }
 
        if (q->elevator) {
+elv_insert:
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
@@ -1613,6 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
+run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }