blk-mq: centralise related handling into blk_mq_get_driver_tag
authorMing Lei <ming.lei@redhat.com>
Mon, 6 Jul 2020 14:41:11 +0000 (22:41 +0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 8 Jul 2020 22:06:42 +0000 (16:06 -0600)
Move .nr_active update and request assignment into blk_mq_get_driver_tag(),
all are good to do during getting driver tag.

Meantime blk-flush related code is simplified and flush request needn't
to update the request table manually any more.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-flush.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk.h

index 15ae0155ec070ab7194e274b58fda40b2b0ea058..71c7f520e0401f2721a9a2bc029cdae93a75e8bc 100644 (file)
@@ -219,7 +219,6 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        struct request *rq, *n;
        unsigned long flags = 0;
        struct blk_flush_queue *fq = blk_get_flush_queue(q, flush_rq->mq_ctx);
-       struct blk_mq_hw_ctx *hctx;
 
        blk_account_io_flush(flush_rq);
 
@@ -235,13 +234,11 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
        if (fq->rq_status != BLK_STS_OK)
                error = fq->rq_status;
 
-       hctx = flush_rq->mq_hctx;
        if (!q->elevator) {
-               blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
-               flush_rq->tag = -1;
+               flush_rq->tag = BLK_MQ_NO_TAG;
        } else {
                blk_mq_put_driver_tag(flush_rq);
-               flush_rq->internal_tag = -1;
+               flush_rq->internal_tag = BLK_MQ_NO_TAG;
        }
 
        running = &fq->flush_queue[fq->flush_running_idx];
@@ -316,13 +313,10 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
        flush_rq->mq_ctx = first_rq->mq_ctx;
        flush_rq->mq_hctx = first_rq->mq_hctx;
 
-       if (!q->elevator) {
-               fq->orig_rq = first_rq;
+       if (!q->elevator)
                flush_rq->tag = first_rq->tag;
-               blk_mq_tag_set_rq(flush_rq->mq_hctx, first_rq->tag, flush_rq);
-       } else {
+       else
                flush_rq->internal_tag = first_rq->internal_tag;
-       }
 
        flush_rq->cmd_flags = REQ_OP_FLUSH | REQ_PREFLUSH;
        flush_rq->cmd_flags |= (flags & REQ_DRV) | (flags & REQ_FAILFAST_MASK);
index 3945c7f5b94469d33b712e92dd583e36b77269fc..b1acac518c4e087d707c6440f2a2645cdbff888b 100644 (file)
@@ -101,18 +101,6 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
        return atomic_read(&hctx->nr_active) < depth;
 }
 
-/*
- * This helper should only be used for flush request to share tag
- * with the request cloned from, and both the two requests can't be
- * in flight at the same time. The caller has to make sure the tag
- * can't be freed.
- */
-static inline void blk_mq_tag_set_rq(struct blk_mq_hw_ctx *hctx,
-               unsigned int tag, struct request *rq)
-{
-       hctx->tags->rqs[tag] = rq;
-}
-
 static inline bool blk_mq_tag_is_reserved(struct blk_mq_tags *tags,
                                          unsigned int tag)
 {
index 89c83fa97ba07fe51a6c1ec5e46e9490dc5c8130..b6dd080d39625a7bea831cc149af1a664fbc80e5 100644 (file)
@@ -277,26 +277,20 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
 {
        struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
        struct request *rq = tags->static_rqs[tag];
-       req_flags_t rq_flags = 0;
 
        if (data->q->elevator) {
                rq->tag = BLK_MQ_NO_TAG;
                rq->internal_tag = tag;
        } else {
-               if (data->hctx->flags & BLK_MQ_F_TAG_SHARED) {
-                       rq_flags = RQF_MQ_INFLIGHT;
-                       atomic_inc(&data->hctx->nr_active);
-               }
                rq->tag = tag;
                rq->internal_tag = BLK_MQ_NO_TAG;
-               data->hctx->tags->rqs[rq->tag] = rq;
        }
 
        /* csd/requeue_work/fifo_time is initialized before use */
        rq->q = data->q;
        rq->mq_ctx = data->ctx;
        rq->mq_hctx = data->hctx;
-       rq->rq_flags = rq_flags;
+       rq->rq_flags = 0;
        rq->cmd_flags = data->cmd_flags;
        if (data->flags & BLK_MQ_REQ_PREEMPT)
                rq->rq_flags |= RQF_PREEMPT;
@@ -1105,9 +1099,10 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
 {
        struct sbitmap_queue *bt = &rq->mq_hctx->tags->bitmap_tags;
        unsigned int tag_offset = rq->mq_hctx->tags->nr_reserved_tags;
-       bool shared = blk_mq_tag_busy(rq->mq_hctx);
        int tag;
 
+       blk_mq_tag_busy(rq->mq_hctx);
+
        if (blk_mq_tag_is_reserved(rq->mq_hctx->sched_tags, rq->internal_tag)) {
                bt = &rq->mq_hctx->tags->breserved_tags;
                tag_offset = 0;
@@ -1120,19 +1115,23 @@ static bool __blk_mq_get_driver_tag(struct request *rq)
                return false;
 
        rq->tag = tag + tag_offset;
-       if (shared) {
-               rq->rq_flags |= RQF_MQ_INFLIGHT;
-               atomic_inc(&rq->mq_hctx->nr_active);
-       }
-       rq->mq_hctx->tags->rqs[rq->tag] = rq;
        return true;
 }
 
 static bool blk_mq_get_driver_tag(struct request *rq)
 {
-       if (rq->tag != BLK_MQ_NO_TAG)
-               return true;
-       return __blk_mq_get_driver_tag(rq);
+       struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
+
+       if (rq->tag == BLK_MQ_NO_TAG && !__blk_mq_get_driver_tag(rq))
+               return false;
+
+       if ((hctx->flags & BLK_MQ_F_TAG_SHARED) &&
+                       !(rq->rq_flags & RQF_MQ_INFLIGHT)) {
+               rq->rq_flags |= RQF_MQ_INFLIGHT;
+               atomic_inc(&hctx->nr_active);
+       }
+       hctx->tags->rqs[rq->tag] = rq;
+       return true;
 }
 
 static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode,
index 94f7c084f68fc417b0bcf864a45d2aee4db0a195..9dcf51c94096a71cd6b37e9dd4fbe0579be1e872 100644 (file)
@@ -25,11 +25,6 @@ struct blk_flush_queue {
        struct list_head        flush_data_in_flight;
        struct request          *flush_rq;
 
-       /*
-        * flush_rq shares tag with this rq, both can't be active
-        * at the same time
-        */
-       struct request          *orig_rq;
        struct lock_class_key   key;
        spinlock_t              mq_flush_lock;
 };