rbd: pass queue_limits to blk_mq_alloc_disk
authorChristoph Hellwig <hch@lst.de>
Thu, 15 Feb 2024 07:02:50 +0000 (08:02 +0100)
committerJens Axboe <axboe@kernel.dk>
Mon, 19 Feb 2024 23:59:31 +0000 (16:59 -0700)
Pass the limits rbd imposes directly to blk_mq_alloc_disk instead
of setting them one at a time.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20240215070300.2200308-8-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/block/rbd.c

index 6b4f1898a722a392ece02d866f1cb73a515b509f..26ff5cd2bf0abc118d5c83cdf733554a3be97e0c 100644 (file)
@@ -4952,6 +4952,14 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        struct request_queue *q;
        unsigned int objset_bytes =
            rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
+       struct queue_limits lim = {
+               .max_hw_sectors         = objset_bytes >> SECTOR_SHIFT,
+               .max_user_sectors       = objset_bytes >> SECTOR_SHIFT,
+               .io_min                 = rbd_dev->opts->alloc_size,
+               .io_opt                 = rbd_dev->opts->alloc_size,
+               .max_segments           = USHRT_MAX,
+               .max_segment_size       = UINT_MAX,
+       };
        int err;
 
        memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
@@ -4966,7 +4974,13 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        if (err)
                return err;
 
-       disk = blk_mq_alloc_disk(&rbd_dev->tag_set, NULL, rbd_dev);
+       if (rbd_dev->opts->trim) {
+               lim.discard_granularity = rbd_dev->opts->alloc_size;
+               lim.max_hw_discard_sectors = objset_bytes >> SECTOR_SHIFT;
+               lim.max_write_zeroes_sectors = objset_bytes >> SECTOR_SHIFT;
+       }
+
+       disk = blk_mq_alloc_disk(&rbd_dev->tag_set, &lim, rbd_dev);
        if (IS_ERR(disk)) {
                err = PTR_ERR(disk);
                goto out_tag_set;
@@ -4987,19 +5001,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
        /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
 
-       blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
-       q->limits.max_sectors = queue_max_hw_sectors(q);
-       blk_queue_max_segments(q, USHRT_MAX);
-       blk_queue_max_segment_size(q, UINT_MAX);
-       blk_queue_io_min(q, rbd_dev->opts->alloc_size);
-       blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
-
-       if (rbd_dev->opts->trim) {
-               q->limits.discard_granularity = rbd_dev->opts->alloc_size;
-               blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
-               blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
-       }
-
        if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
                blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);