blk-mq-sched: add flush insertion into blk_mq_sched_insert_request()
authorJens Axboe <axboe@fb.com>
Fri, 27 Jan 2017 08:00:47 +0000 (01:00 -0700)
committerJens Axboe <axboe@fb.com>
Fri, 27 Jan 2017 16:03:14 +0000 (09:03 -0700)
Instead of letting the caller check this and handle the details
of inserting a flush request, put the logic in the scheduler
insertion function. This fixes direct flush insertion outside
of the usual make_request_fn calls, like from dm via
blk_insert_cloned_request().

Signed-off-by: Jens Axboe <axboe@fb.com>
block/blk-core.c
block/blk-exec.c
block/blk-flush.c
block/blk-mq-sched.c
block/blk-mq-sched.h
block/blk-mq-tag.c
block/blk-mq.c
block/blk-mq.h

index b830e14117ddd27ea1cf9e81e3b8a94d0ddbc2da..4bfd8674afd03124c68cd435356331a5b707a1d7 100644 (file)
@@ -2129,7 +2129,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
        if (q->mq_ops) {
                if (blk_queue_io_stat(q))
                        blk_account_io_start(rq, true);
-               blk_mq_sched_insert_request(rq, false, true, false);
+               blk_mq_sched_insert_request(rq, false, true, false, false);
                return 0;
        }
 
index 86656fdfa6378e07143d4f58bffc127c56f839ae..ed1f101652688cccda2e04d26ffec7919c81b0dd 100644 (file)
@@ -66,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
         * be reused after dying flag is set
         */
        if (q->mq_ops) {
-               blk_mq_sched_insert_request(rq, at_head, true, false);
+               blk_mq_sched_insert_request(rq, at_head, true, false, false);
                return;
        }
 
index d7de34ee39c2082d9fdd2f224dd0c9c49eae732e..4427896641ac7421aa7a1bdef6cc72eacda3e8eb 100644 (file)
@@ -456,7 +456,7 @@ void blk_insert_flush(struct request *rq)
        if ((policy & REQ_FSEQ_DATA) &&
            !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
                if (q->mq_ops)
-                       blk_mq_sched_insert_request(rq, false, true, false);
+                       blk_mq_sched_insert_request(rq, false, true, false, false);
                else
                        list_add_tail(&rq->queuelist, &q->queue_head);
                return;
index 4139b07ab33bdc0253c46e044d0c68d5366df19d..1112752f888d0ae1fc5ecb1aa255447e16fc26ea 100644 (file)
@@ -335,6 +335,64 @@ void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
        }
 }
 
+/*
+ * Add flush/fua to the queue. If we fail getting a driver tag, then
+ * punt to the requeue list. Requeue will re-invoke us from a context
+ * that's safe to block from.
+ */
+static void blk_mq_sched_insert_flush(struct blk_mq_hw_ctx *hctx,
+                                     struct request *rq, bool can_block)
+{
+       if (blk_mq_get_driver_tag(rq, &hctx, can_block)) {
+               blk_insert_flush(rq);
+               blk_mq_run_hw_queue(hctx, true);
+       } else
+               blk_mq_add_to_requeue_list(rq, true, true);
+}
+
+void blk_mq_sched_insert_request(struct request *rq, bool at_head,
+                                bool run_queue, bool async, bool can_block)
+{
+       struct request_queue *q = rq->q;
+       struct elevator_queue *e = q->elevator;
+       struct blk_mq_ctx *ctx = rq->mq_ctx;
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+
+       if (rq->tag == -1 && (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA))) {
+               blk_mq_sched_insert_flush(hctx, rq, can_block);
+               return;
+       }
+
+       if (e && e->type->ops.mq.insert_requests) {
+               LIST_HEAD(list);
+
+               list_add(&rq->queuelist, &list);
+               e->type->ops.mq.insert_requests(hctx, &list, at_head);
+       } else {
+               spin_lock(&ctx->lock);
+               __blk_mq_insert_request(hctx, rq, at_head);
+               spin_unlock(&ctx->lock);
+       }
+
+       if (run_queue)
+               blk_mq_run_hw_queue(hctx, async);
+}
+
+void blk_mq_sched_insert_requests(struct request_queue *q,
+                                 struct blk_mq_ctx *ctx,
+                                 struct list_head *list, bool run_queue_async)
+{
+       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
+       struct elevator_queue *e = hctx->queue->elevator;
+
+       if (e && e->type->ops.mq.insert_requests)
+               e->type->ops.mq.insert_requests(hctx, list, false);
+       else
+               blk_mq_insert_requests(hctx, ctx, list);
+
+       blk_mq_run_hw_queue(hctx, run_queue_async);
+}
+
 static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
                                   struct blk_mq_hw_ctx *hctx,
                                   unsigned int hctx_idx)
index becbc78403643609f3c9f10ba31c4762be4ab95c..9478aaeb48c59b9a72e40e9e465e39f0e24df437 100644 (file)
@@ -21,6 +21,12 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
 bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
 void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
 
+void blk_mq_sched_insert_request(struct request *rq, bool at_head,
+                                bool run_queue, bool async, bool can_block);
+void blk_mq_sched_insert_requests(struct request_queue *q,
+                                 struct blk_mq_ctx *ctx,
+                                 struct list_head *list, bool run_queue_async);
+
 void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
 void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
                        struct list_head *rq_list,
@@ -62,45 +68,6 @@ static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
                e->type->ops.mq.put_rq_priv(q, rq);
 }
 
-static inline void
-blk_mq_sched_insert_request(struct request *rq, bool at_head, bool run_queue,
-                           bool async)
-{
-       struct request_queue *q = rq->q;
-       struct elevator_queue *e = q->elevator;
-       struct blk_mq_ctx *ctx = rq->mq_ctx;
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
-
-       if (e && e->type->ops.mq.insert_requests) {
-               LIST_HEAD(list);
-
-               list_add(&rq->queuelist, &list);
-               e->type->ops.mq.insert_requests(hctx, &list, at_head);
-       } else {
-               spin_lock(&ctx->lock);
-               __blk_mq_insert_request(hctx, rq, at_head);
-               spin_unlock(&ctx->lock);
-       }
-
-       if (run_queue)
-               blk_mq_run_hw_queue(hctx, async);
-}
-
-static inline void
-blk_mq_sched_insert_requests(struct request_queue *q, struct blk_mq_ctx *ctx,
-                            struct list_head *list, bool run_queue_async)
-{
-       struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu);
-       struct elevator_queue *e = hctx->queue->elevator;
-
-       if (e && e->type->ops.mq.insert_requests)
-               e->type->ops.mq.insert_requests(hctx, list, false);
-       else
-               blk_mq_insert_requests(hctx, ctx, list);
-
-       blk_mq_run_hw_queue(hctx, run_queue_async);
-}
-
 static inline bool
 blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
                         struct bio *bio)
index f8de2dbbb29fcba85bc0fc26b8bad85624f86114..54c84363c1b2385899472d77cb4fc93c81a59643 100644 (file)
@@ -106,6 +106,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
        struct sbq_wait_state *ws;
        DEFINE_WAIT(wait);
        unsigned int tag_offset;
+       bool drop_ctx;
        int tag;
 
        if (data->flags & BLK_MQ_REQ_RESERVED) {
@@ -128,6 +129,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                return BLK_MQ_TAG_FAIL;
 
        ws = bt_wait_ptr(bt, data->hctx);
+       drop_ctx = data->ctx == NULL;
        do {
                prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
 
@@ -150,7 +152,8 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                if (tag != -1)
                        break;
 
-               blk_mq_put_ctx(data->ctx);
+               if (data->ctx)
+                       blk_mq_put_ctx(data->ctx);
 
                io_schedule();
 
@@ -166,6 +169,9 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
                ws = bt_wait_ptr(bt, data->hctx);
        } while (1);
 
+       if (drop_ctx && data->ctx)
+               blk_mq_put_ctx(data->ctx);
+
        finish_wait(&ws->wait, &wait);
 
 found_tag:
index da2123dd681e3e6d8e1abe8ed0719e1fcf130a93..60dac10228fe0fc410921b176415c57c3c6a6b11 100644 (file)
@@ -568,13 +568,13 @@ static void blk_mq_requeue_work(struct work_struct *work)
 
                rq->rq_flags &= ~RQF_SOFTBARRIER;
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, true, false, false);
+               blk_mq_sched_insert_request(rq, true, false, false, true);
        }
 
        while (!list_empty(&rq_list)) {
                rq = list_entry(rq_list.next, struct request, queuelist);
                list_del_init(&rq->queuelist);
-               blk_mq_sched_insert_request(rq, false, false, false);
+               blk_mq_sched_insert_request(rq, false, false, false, true);
        }
 
        blk_mq_run_hw_queues(q, false);
@@ -847,12 +847,11 @@ static inline unsigned int queued_to_index(unsigned int queued)
        return min(BLK_MQ_MAX_DISPATCH_ORDER - 1, ilog2(queued) + 1);
 }
 
-static bool blk_mq_get_driver_tag(struct request *rq,
-                                 struct blk_mq_hw_ctx **hctx, bool wait)
+bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
+                          bool wait)
 {
        struct blk_mq_alloc_data data = {
                .q = rq->q,
-               .ctx = rq->mq_ctx,
                .hctx = blk_mq_map_queue(rq->q, rq->mq_ctx->cpu),
                .flags = wait ? 0 : BLK_MQ_REQ_NOWAIT,
        };
@@ -1395,7 +1394,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
        }
 
 insert:
-       blk_mq_sched_insert_request(rq, false, true, true);
+       blk_mq_sched_insert_request(rq, false, true, true, false);
 }
 
 /*
@@ -1446,10 +1445,12 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
+               blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               goto run_queue;
+               blk_mq_run_hw_queue(data.hctx, true);
+               goto done;
        }
 
        plug = current->plug;
@@ -1502,7 +1503,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua);
+                                               !is_sync || is_flush_fua, true);
                goto done;
        }
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1512,7 +1513,6 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
-run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
        blk_mq_put_ctx(data.ctx);
@@ -1568,10 +1568,12 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
        cookie = request_to_qc_t(data.hctx, rq);
 
        if (unlikely(is_flush_fua)) {
+               blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_get_driver_tag(rq, NULL, true);
                blk_insert_flush(rq);
-               goto run_queue;
+               blk_mq_run_hw_queue(data.hctx, true);
+               goto done;
        }
 
        /*
@@ -1612,7 +1614,7 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                blk_mq_put_ctx(data.ctx);
                blk_mq_bio_to_request(rq, bio);
                blk_mq_sched_insert_request(rq, false, true,
-                                               !is_sync || is_flush_fua);
+                                               !is_sync || is_flush_fua, true);
                goto done;
        }
        if (!blk_mq_merge_queue_io(data.hctx, data.ctx, rq, bio)) {
@@ -1622,7 +1624,6 @@ static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
                 * latter allows for merging opportunities and more efficient
                 * dispatching.
                 */
-run_queue:
                blk_mq_run_hw_queue(data.hctx, !is_sync || is_flush_fua);
        }
 
index 077a4003f1fd0947238bb1ac7104ec9f88f2f818..57cdbf6c0cee964837cdcef208b02cebecaba023 100644 (file)
@@ -34,6 +34,8 @@ void blk_mq_wake_waiters(struct request_queue *q);
 bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
 void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
 bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
+bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
+                               bool wait);
 
 /*
  * Internal helpers for allocating/freeing the request map