Merge tag 'perf-core-for-mingo-5.3-20190709' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / block / blk-mq.c
index ce0f5f4ede70cdc55e102359601ff0149fc075e6..e5ef40c603ca36d64fbbf3dc965fb4ac63db9ae4 100644 (file)
@@ -355,13 +355,13 @@ static struct request *blk_mq_get_request(struct request_queue *q,
        struct elevator_queue *e = q->elevator;
        struct request *rq;
        unsigned int tag;
-       bool put_ctx_on_error = false;
+       bool clear_ctx_on_error = false;
 
        blk_queue_enter_live(q);
        data->q = q;
        if (likely(!data->ctx)) {
                data->ctx = blk_mq_get_ctx(q);
-               put_ctx_on_error = true;
+               clear_ctx_on_error = true;
        }
        if (likely(!data->hctx))
                data->hctx = blk_mq_map_queue(q, data->cmd_flags,
@@ -387,10 +387,8 @@ static struct request *blk_mq_get_request(struct request_queue *q,
 
        tag = blk_mq_get_tag(data);
        if (tag == BLK_MQ_TAG_FAIL) {
-               if (put_ctx_on_error) {
-                       blk_mq_put_ctx(data->ctx);
+               if (clear_ctx_on_error)
                        data->ctx = NULL;
-               }
                blk_queue_exit(q);
                return NULL;
        }
@@ -427,8 +425,6 @@ struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
        if (!rq)
                return ERR_PTR(-EWOULDBLOCK);
 
-       blk_mq_put_ctx(alloc_data.ctx);
-
        rq->__data_len = 0;
        rq->__sector = (sector_t) -1;
        rq->bio = rq->biotail = NULL;
@@ -1764,9 +1760,15 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
        }
 }
 
-static void blk_mq_bio_to_request(struct request *rq, struct bio *bio)
+static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
+               unsigned int nr_segs)
 {
-       blk_init_request_from_bio(rq, bio);
+       if (bio->bi_opf & REQ_RAHEAD)
+               rq->cmd_flags |= REQ_FAILFAST_MASK;
+
+       rq->__sector = bio->bi_iter.bi_sector;
+       rq->write_hint = bio->bi_write_hint;
+       blk_rq_bio_prep(rq, bio, nr_segs);
 
        blk_account_io_start(rq, true);
 }
@@ -1936,20 +1938,20 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        struct request *rq;
        struct blk_plug *plug;
        struct request *same_queue_rq = NULL;
+       unsigned int nr_segs;
        blk_qc_t cookie;
 
        blk_queue_bounce(q, &bio);
-
-       blk_queue_split(q, &bio);
+       __blk_queue_split(q, &bio, &nr_segs);
 
        if (!bio_integrity_prep(bio))
                return BLK_QC_T_NONE;
 
        if (!is_flush_fua && !blk_queue_nomerges(q) &&
-           blk_attempt_plug_merge(q, bio, &same_queue_rq))
+           blk_attempt_plug_merge(q, bio, nr_segs, &same_queue_rq))
                return BLK_QC_T_NONE;
 
-       if (blk_mq_sched_bio_merge(q, bio))
+       if (blk_mq_sched_bio_merge(q, bio, nr_segs))
                return BLK_QC_T_NONE;
 
        rq_qos_throttle(q, bio);
@@ -1969,11 +1971,10 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
        cookie = request_to_qc_t(data.hctx, rq);
 
+       blk_mq_bio_to_request(rq, bio, nr_segs);
+
        plug = current->plug;
        if (unlikely(is_flush_fua)) {
-               blk_mq_put_ctx(data.ctx);
-               blk_mq_bio_to_request(rq, bio);
-
                /* bypass scheduler for flush rq */
                blk_insert_flush(rq);
                blk_mq_run_hw_queue(data.hctx, true);
@@ -1985,9 +1986,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                unsigned int request_count = plug->rq_count;
                struct request *last = NULL;
 
-               blk_mq_put_ctx(data.ctx);
-               blk_mq_bio_to_request(rq, bio);
-
                if (!request_count)
                        trace_block_plug(q);
                else
@@ -2001,8 +1999,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
 
                blk_add_rq_to_plug(plug, rq);
        } else if (plug && !blk_queue_nomerges(q)) {
-               blk_mq_bio_to_request(rq, bio);
-
                /*
                 * We do limited plugging. If the bio can be merged, do that.
                 * Otherwise the existing request in the plug list will be
@@ -2019,8 +2015,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_add_rq_to_plug(plug, rq);
                trace_block_plug(q);
 
-               blk_mq_put_ctx(data.ctx);
-
                if (same_queue_rq) {
                        data.hctx = same_queue_rq->mq_hctx;
                        trace_block_unplug(q, 1, true);
@@ -2029,12 +2023,8 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                }
        } else if ((q->nr_hw_queues > 1 && is_sync) || (!q->elevator &&
                        !data.hctx->dispatch_busy)) {
-               blk_mq_put_ctx(data.ctx);
-               blk_mq_bio_to_request(rq, bio);
                blk_mq_try_issue_directly(data.hctx, rq, &cookie);
        } else {
-               blk_mq_put_ctx(data.ctx);
-               blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true, true);
        }