block: Add common atomic writes enable flag
authorJohn Garry <john.g.garry@oracle.com>
Thu, 16 Jan 2025 17:02:54 +0000 (17:02 +0000)
committerJens Axboe <axboe@kernel.dk>
Fri, 17 Jan 2025 20:13:54 +0000 (13:13 -0700)
Currently only stacked devices need to explicitly enable atomic writes by
setting BLK_FEAT_ATOMIC_WRITES_STACKED flag.

This does not work well for device mapper stacking devices, as there many
sets of limits are stacked and what is the 'bottom' and 'top' device can
swapped. This means that BLK_FEAT_ATOMIC_WRITES_STACKED needs to be set
for many queue limits, which is messy.

Generalize enabling atomic writes enabling by ensuring that all devices
must explicitly set a flag - that includes NVMe, SCSI sd, and md raid.

Signed-off-by: John Garry <john.g.garry@oracle.com>
Reviewed-by: Mike Snitzer <snitzer@kernel.org>
Link: https://lore.kernel.org/r/20250116170301.474130-2-john.g.garry@oracle.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
block/blk-settings.c
drivers/md/raid0.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/nvme/host/core.c
drivers/scsi/sd.c
include/linux/blkdev.h

index c8368ee8de2ec28561887e8747bc69705097e420..db12396ff5c793a402e2057949d56e0fd7382d2c 100644 (file)
@@ -175,6 +175,9 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim)
 {
        unsigned int boundary_sectors;
 
+       if (!(lim->features & BLK_FEAT_ATOMIC_WRITES))
+               goto unsupported;
+
        if (!lim->atomic_write_hw_max)
                goto unsupported;
 
@@ -611,7 +614,7 @@ static bool blk_stack_atomic_writes_head(struct queue_limits *t,
 static void blk_stack_atomic_writes_limits(struct queue_limits *t,
                                struct queue_limits *b, sector_t start)
 {
-       if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED))
+       if (!(b->features & BLK_FEAT_ATOMIC_WRITES))
                goto unsupported;
 
        if (!b->atomic_write_hw_unit_min)
@@ -639,7 +642,6 @@ unsupported:
        t->atomic_write_hw_unit_max = 0;
        t->atomic_write_hw_unit_min = 0;
        t->atomic_write_hw_boundary = 0;
-       t->features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED;
 }
 
 /**
index 7049ec7fb8eb4449b9e62db6410bf5e97fc6ec57..8fc9339b00c728f3e46e9b143b35a8af5aeb472b 100644 (file)
@@ -384,7 +384,7 @@ static int raid0_set_limits(struct mddev *mddev)
        lim.max_write_zeroes_sectors = mddev->chunk_sectors;
        lim.io_min = mddev->chunk_sectors << 9;
        lim.io_opt = lim.io_min * mddev->raid_disks;
-       lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+       lim.features |= BLK_FEAT_ATOMIC_WRITES;
        err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
        if (err) {
                queue_limits_cancel_update(mddev->gendisk->queue);
index a5cd6522fc2d4db959be26b83984fd75ec10f835..9d57a88dbd261184194545e3565fa6e576396474 100644 (file)
@@ -3217,7 +3217,7 @@ static int raid1_set_limits(struct mddev *mddev)
 
        md_init_stacking_limits(&lim);
        lim.max_write_zeroes_sectors = 0;
-       lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+       lim.features |= BLK_FEAT_ATOMIC_WRITES;
        err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
        if (err) {
                queue_limits_cancel_update(mddev->gendisk->queue);
index e1e6cd7fb125e17fca98820379d9d8bd70f3fe9c..efe93b9791677ed982b76680cd0cb7e9329961be 100644 (file)
@@ -4018,7 +4018,7 @@ static int raid10_set_queue_limits(struct mddev *mddev)
        lim.max_write_zeroes_sectors = 0;
        lim.io_min = mddev->chunk_sectors << 9;
        lim.io_opt = lim.io_min * raid10_nr_stripes(conf);
-       lim.features |= BLK_FEAT_ATOMIC_WRITES_STACKED;
+       lim.features |= BLK_FEAT_ATOMIC_WRITES;
        err = mddev_stack_rdev_limits(mddev, &lim, MDDEV_STACK_INTEGRITY);
        if (err) {
                queue_limits_cancel_update(mddev->gendisk->queue);
index 0d21258e22833369ff218e6bf1138588edf8f0da..2147069775c629ea8f03e8ee853f1ecc68145ea9 100644 (file)
@@ -2002,6 +2002,7 @@ static void nvme_update_atomic_write_disk_info(struct nvme_ns *ns,
        lim->atomic_write_hw_boundary = boundary;
        lim->atomic_write_hw_unit_min = bs;
        lim->atomic_write_hw_unit_max = rounddown_pow_of_two(atomic_bs);
+       lim->features |= BLK_FEAT_ATOMIC_WRITES;
 }
 
 static u32 nvme_max_drv_segments(struct nvme_ctrl *ctrl)
index af62a8ed8620043db47e3730b7b2f325fa47f502..a48c4d5edfa3fe4e19d5617c24d70164fd751b2f 100644 (file)
@@ -991,6 +991,7 @@ static void sd_config_atomic(struct scsi_disk *sdkp, struct queue_limits *lim)
        lim->atomic_write_hw_boundary = 0;
        lim->atomic_write_hw_unit_min = unit_min * logical_block_size;
        lim->atomic_write_hw_unit_max = unit_max * logical_block_size;
+       lim->features |= BLK_FEAT_ATOMIC_WRITES;
 }
 
 static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
index 7ac153e4423a6fff5aa63ced47808674c51355cd..76f0a4e7c2e5d7b3ca922e0d0ea9ceb79cba8ee9 100644 (file)
@@ -331,8 +331,8 @@ typedef unsigned int __bitwise blk_features_t;
 #define BLK_FEAT_RAID_PARTIAL_STRIPES_EXPENSIVE \
        ((__force blk_features_t)(1u << 15))
 
-/* stacked device can/does support atomic writes */
-#define BLK_FEAT_ATOMIC_WRITES_STACKED \
+/* atomic writes enabled */
+#define BLK_FEAT_ATOMIC_WRITES \
        ((__force blk_features_t)(1u << 16))
 
 /*