blk-mq: simplify blk_mq_free_request
authorChristoph Hellwig <hch@lst.de>
Fri, 16 Jun 2017 16:15:22 +0000 (18:15 +0200)
committerJens Axboe <axboe@kernel.dk>
Sun, 18 Jun 2017 16:08:55 +0000 (10:08 -0600)
Merge three functions only tail-called by blk_mq_free_request into
blk_mq_free_request.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
block/blk-mq.h

index 9df7e0394a48176e756c4004f2c4907bbfe053bd..0b17351fccfcf97714b5d2223d49e36462cb5cd4 100644 (file)
@@ -395,12 +395,24 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
 }
 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);
 
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                            struct request *rq)
+void blk_mq_free_request(struct request *rq)
 {
-       const int sched_tag = rq->internal_tag;
        struct request_queue *q = rq->q;
+       struct elevator_queue *e = q->elevator;
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+       const int sched_tag = rq->internal_tag;
 
+       if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
+               if (e && e->type->ops.mq.finish_request)
+                       e->type->ops.mq.finish_request(rq);
+               if (rq->elv.icq) {
+                       put_io_context(rq->elv.icq->ioc);
+                       rq->elv.icq = NULL;
+               }
+       }
+
+       ctx->rq_completed[rq_is_sync(rq)]++;
        if (rq->rq_flags & RQF_MQ_INFLIGHT)
                atomic_dec(&hctx->nr_active);
 
@@ -416,38 +428,6 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
        blk_mq_sched_restart(hctx);
        blk_queue_exit(q);
 }
-
-static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx,
-                                    struct request *rq)
-{
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-
-       ctx->rq_completed[rq_is_sync(rq)]++;
-       __blk_mq_finish_request(hctx, ctx, rq);
-}
-
-void blk_mq_finish_request(struct request *rq)
-{
-       blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq);
-}
-EXPORT_SYMBOL_GPL(blk_mq_finish_request);
-
-void blk_mq_free_request(struct request *rq)
-{
-       struct request_queue *q = rq->q;
-       struct elevator_queue *e = q->elevator;
-
-       if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) {
-               if (e && e->type->ops.mq.finish_request)
-                       e->type->ops.mq.finish_request(rq);
-               if (rq->elv.icq) {
-                       put_io_context(rq->elv.icq->ioc);
-                       rq->elv.icq = NULL;
-               }
-       }
-
-       blk_mq_finish_request(rq);
-}
 EXPORT_SYMBOL_GPL(blk_mq_free_request);
 
 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
index 806fed53f607e0039ccf26fad86f68e74e2d086f..6a509a8eb3fb1810e7adcae43450bfd48eeba0f4 100644 (file)
@@ -131,9 +131,6 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
 /*
  * Internal helpers for request allocation/init/free
  */
-void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
-                               struct request *rq);
-void blk_mq_finish_request(struct request *rq);
 struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
                                        unsigned int op);