Merge tag 'for-linus-6.1-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / block / blk-mq.c
index 83492d9423480c9ebd26eed64eb7c234dc55bf30..8070b6c10e8d5ae1cf97b6f8a712b566f89d2255 100644 (file)
@@ -510,25 +510,87 @@ retry:
                                        alloc_time_ns);
 }
 
-struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
-               blk_mq_req_flags_t flags)
+static struct request *blk_mq_rq_cache_fill(struct request_queue *q,
+                                           struct blk_plug *plug,
+                                           blk_opf_t opf,
+                                           blk_mq_req_flags_t flags)
 {
        struct blk_mq_alloc_data data = {
                .q              = q,
                .flags          = flags,
                .cmd_flags      = opf,
-               .nr_tags        = 1,
+               .nr_tags        = plug->nr_ios,
+               .cached_rq      = &plug->cached_rq,
        };
        struct request *rq;
-       int ret;
 
-       ret = blk_queue_enter(q, flags);
-       if (ret)
-               return ERR_PTR(ret);
+       if (blk_queue_enter(q, flags))
+               return NULL;
+
+       plug->nr_ios = 1;
 
        rq = __blk_mq_alloc_requests(&data);
-       if (!rq)
-               goto out_queue_exit;
+       if (unlikely(!rq))
+               blk_queue_exit(q);
+       return rq;
+}
+
+static struct request *blk_mq_alloc_cached_request(struct request_queue *q,
+                                                  blk_opf_t opf,
+                                                  blk_mq_req_flags_t flags)
+{
+       struct blk_plug *plug = current->plug;
+       struct request *rq;
+
+       if (!plug)
+               return NULL;
+       if (rq_list_empty(plug->cached_rq)) {
+               if (plug->nr_ios == 1)
+                       return NULL;
+               rq = blk_mq_rq_cache_fill(q, plug, opf, flags);
+               if (rq)
+                       goto got_it;
+               return NULL;
+       }
+       rq = rq_list_peek(&plug->cached_rq);
+       if (!rq || rq->q != q)
+               return NULL;
+
+       if (blk_mq_get_hctx_type(opf) != rq->mq_hctx->type)
+               return NULL;
+       if (op_is_flush(rq->cmd_flags) != op_is_flush(opf))
+               return NULL;
+
+       plug->cached_rq = rq_list_next(rq);
+got_it:
+       rq->cmd_flags = opf;
+       INIT_LIST_HEAD(&rq->queuelist);
+       return rq;
+}
+
+struct request *blk_mq_alloc_request(struct request_queue *q, blk_opf_t opf,
+               blk_mq_req_flags_t flags)
+{
+       struct request *rq;
+
+       rq = blk_mq_alloc_cached_request(q, opf, flags);
+       if (!rq) {
+               struct blk_mq_alloc_data data = {
+                       .q              = q,
+                       .flags          = flags,
+                       .cmd_flags      = opf,
+                       .nr_tags        = 1,
+               };
+               int ret;
+
+               ret = blk_queue_enter(q, flags);
+               if (ret)
+                       return ERR_PTR(ret);
+
+               rq = __blk_mq_alloc_requests(&data);
+               if (!rq)
+                       goto out_queue_exit;
+       }
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
@@ -761,8 +823,10 @@ static void blk_complete_request(struct request *req)
         * can find how many bytes remain in the request
         * later.
         */
-       req->bio = NULL;
-       req->__data_len = 0;
+       if (!req->end_io) {
+               req->bio = NULL;
+               req->__data_len = 0;
+       }
 }
 
 /**
@@ -939,7 +1003,8 @@ inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 
        if (rq->end_io) {
                rq_qos_done(rq->q, rq);
-               rq->end_io(rq, error);
+               if (rq->end_io(rq, error) == RQ_END_IO_FREE)
+                       blk_mq_free_request(rq);
        } else {
                blk_mq_free_request(rq);
        }
@@ -992,6 +1057,13 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
 
                rq_qos_done(rq->q, rq);
 
+               /*
+                * If end_io handler returns NONE, then it still has
+                * ownership of the request.
+                */
+               if (rq->end_io && rq->end_io(rq, 0) == RQ_END_IO_NONE)
+                       continue;
+
                WRITE_ONCE(rq->state, MQ_RQ_IDLE);
                if (!req_ref_put_and_test(rq))
                        continue;
@@ -1233,12 +1305,13 @@ struct blk_rq_wait {
        blk_status_t ret;
 };
 
-static void blk_end_sync_rq(struct request *rq, blk_status_t ret)
+static enum rq_end_io_ret blk_end_sync_rq(struct request *rq, blk_status_t ret)
 {
        struct blk_rq_wait *wait = rq->end_io_data;
 
        wait->ret = ret;
        complete(&wait->done);
+       return RQ_END_IO_NONE;
 }
 
 bool blk_rq_is_poll(struct request *rq)
@@ -1472,10 +1545,12 @@ static bool blk_mq_req_expired(struct request *rq, unsigned long *next)
 
 void blk_mq_put_rq_ref(struct request *rq)
 {
-       if (is_flush_rq(rq))
-               rq->end_io(rq, 0);
-       else if (req_ref_put_and_test(rq))
+       if (is_flush_rq(rq)) {
+               if (rq->end_io(rq, 0) == RQ_END_IO_FREE)
+                       blk_mq_free_request(rq);
+       } else if (req_ref_put_and_test(rq)) {
                __blk_mq_free_request(rq);
+       }
 }
 
 static bool blk_mq_check_expired(struct request *rq, void *priv)