blk-mq: improve the blk_mq_init_allocated_queue interface
authorChristoph Hellwig <hch@lst.de>
Wed, 2 Jun 2021 06:53:17 +0000 (09:53 +0300)
committerJens Axboe <axboe@kernel.dk>
Fri, 11 Jun 2021 17:53:02 +0000 (11:53 -0600)
Don't return the passed in request_queue but a normal error code, and
drop the elevator_init argument in favor of just calling elevator_init_mq
directly from dm-rq.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
Link: https://lore.kernel.org/r/20210602065345.355274-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-mq.c
block/blk.h
block/elevator.c
drivers/md/dm-rq.c
include/linux/blk-mq.h
include/linux/elevator.h

index 867e5faf4f5b527978810d0d483f1d81ea5a52a7..8550ad64982fad1073b2a63f289c816c43ad27c6 100644 (file)
@@ -3115,21 +3115,18 @@ void blk_mq_release(struct request_queue *q)
 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
                void *queuedata)
 {
-       struct request_queue *uninit_q, *q;
+       struct request_queue *q;
+       int ret;
 
-       uninit_q = blk_alloc_queue(set->numa_node);
-       if (!uninit_q)
+       q = blk_alloc_queue(set->numa_node);
+       if (!q)
                return ERR_PTR(-ENOMEM);
-       uninit_q->queuedata = queuedata;
-
-       /*
-        * Initialize the queue without an elevator. device_add_disk() will do
-        * the initialization.
-        */
-       q = blk_mq_init_allocated_queue(set, uninit_q, false);
-       if (IS_ERR(q))
-               blk_cleanup_queue(uninit_q);
-
+       q->queuedata = queuedata;
+       ret = blk_mq_init_allocated_queue(set, q);
+       if (ret) {
+               blk_cleanup_queue(q);
+               return ERR_PTR(ret);
+       }
        return q;
 }
 EXPORT_SYMBOL_GPL(blk_mq_init_queue_data);
@@ -3273,9 +3270,8 @@ static void blk_mq_realloc_hw_ctxs(struct blk_mq_tag_set *set,
        mutex_unlock(&q->sysfs_lock);
 }
 
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
-                                                 struct request_queue *q,
-                                                 bool elevator_init)
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+               struct request_queue *q)
 {
        /* mark the queue as mq asap */
        q->mq_ops = set->ops;
@@ -3325,11 +3321,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
        blk_mq_init_cpu_queues(q, set->nr_hw_queues);
        blk_mq_add_queue_tag_set(set, q);
        blk_mq_map_swqueue(q);
-
-       if (elevator_init)
-               elevator_init_mq(q);
-
-       return q;
+       return 0;
 
 err_hctxs:
        kfree(q->queue_hw_ctx);
@@ -3340,7 +3332,7 @@ err_poll:
        q->poll_cb = NULL;
 err_exit:
        q->mq_ops = NULL;
-       return ERR_PTR(-ENOMEM);
+       return -ENOMEM;
 }
 EXPORT_SYMBOL(blk_mq_init_allocated_queue);
 
index 3440142f029b22fcbed16b1f1ca1a701ae37f42f..d3fa47af3607cbb3fda0f52430bc00dcdc595c0d 100644 (file)
@@ -192,7 +192,6 @@ void blk_account_io_done(struct request *req, u64 now);
 
 void blk_insert_flush(struct request *rq);
 
-void elevator_init_mq(struct request_queue *q);
 int elevator_switch_mq(struct request_queue *q,
                              struct elevator_type *new_e);
 void __elevator_exit(struct request_queue *, struct elevator_queue *);
index 440699c28119337be8c88f40dc826784612a531d..06e203426410bbba0bc375c62c10c9572e10b15a 100644 (file)
@@ -693,7 +693,7 @@ void elevator_init_mq(struct request_queue *q)
                elevator_put(e);
        }
 }
-
+EXPORT_SYMBOL_GPL(elevator_init_mq); /* only for dm-rq */
 
 /*
  * switch to new_e io scheduler. be careful not to introduce deadlocks -
index 9c3bc3711b3350c8aad4a03e62ab72d20c806aa3..0dbd48cbdff95d13b9fb856adeeaf9959bfd7c40 100644 (file)
@@ -530,7 +530,6 @@ static const struct blk_mq_ops dm_mq_ops = {
 
 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
 {
-       struct request_queue *q;
        struct dm_target *immutable_tgt;
        int err;
 
@@ -557,12 +556,10 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
        if (err)
                goto out_kfree_tag_set;
 
-       q = blk_mq_init_allocated_queue(md->tag_set, md->queue, true);
-       if (IS_ERR(q)) {
-               err = PTR_ERR(q);
+       err = blk_mq_init_allocated_queue(md->tag_set, md->queue);
+       if (err)
                goto out_tag_set;
-       }
-
+       elevator_init_mq(md->queue);
        return 0;
 
 out_tag_set:
index bb950fc669ef5a68000f6228067069507d64920e..73750b2838d2c359ca7a442a29e0c4e0628f046d 100644 (file)
@@ -429,9 +429,8 @@ enum {
 struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
 struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
                void *queuedata);
-struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
-                                                 struct request_queue *q,
-                                                 bool elevator_init);
+int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
+               struct request_queue *q);
 struct request_queue *blk_mq_init_sq_queue(struct blk_mq_tag_set *set,
                                                const struct blk_mq_ops *ops,
                                                unsigned int queue_depth,
index dcb2f9022c1dfdf70471c52b8499a2bb601f8789..783ecb3cb77aa27c1e4275fbe407c14bcff34045 100644 (file)
@@ -120,6 +120,7 @@ extern void elv_merged_request(struct request_queue *, struct request *,
 extern bool elv_attempt_insert_merge(struct request_queue *, struct request *);
 extern struct request *elv_former_request(struct request_queue *, struct request *);
 extern struct request *elv_latter_request(struct request_queue *, struct request *);
+void elevator_init_mq(struct request_queue *q);
 
 /*
  * io scheduler registration