dm: always manage discard support in terms of max_hw_discard_sectors
authorMike Snitzer <snitzer@kernel.org>
Mon, 20 May 2024 17:34:06 +0000 (13:34 -0400)
committerMike Snitzer <snitzer@kernel.org>
Mon, 20 May 2024 19:51:19 +0000 (15:51 -0400)
Commit 4f563a64732d ("block: add a max_user_discard_sectors queue
limit") changed block core to set max_discard_sectors to:
 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors)

Since commit 1c0e720228ad ("dm: use queue_limits_set") it was reported
dm-thinp was failing in a few fstests (generic/347 and generic/405)
with the first WARN_ON_ONCE in dm_cell_key_has_valid_range() being
reported, e.g.:
WARNING: CPU: 1 PID: 30 at drivers/md/dm-bio-prison-v1.c:128 dm_cell_key_has_valid_range+0x3d/0x50

blk_set_stacking_limits() sets max_user_discard_sectors to UINT_MAX,
so given how block core now sets max_discard_sectors (detailed above)
it follows that blk_stack_limits() stacks up the underlying device's
max_hw_discard_sectors and max_discard_sectors is set to match it. If
max_hw_discard_sectors exceeds dm's BIO_PRISON_MAX_RANGE, then
dm_cell_key_has_valid_range() will trigger the warning with:
WARN_ON_ONCE(key->block_end - key->block_begin > BIO_PRISON_MAX_RANGE)

Aside from this warning, the discard will fail.  Fix this and other DM
issues by governing discard support in terms of max_hw_discard_sectors
instead of max_discard_sectors.

Reported-by: Theodore Ts'o <tytso@mit.edu>
Fixes: 1c0e720228ad ("dm: use queue_limits_set")
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
drivers/md/dm-cache-target.c
drivers/md/dm-clone-target.c
drivers/md/dm-log-writes.c
drivers/md/dm-snap.c
drivers/md/dm-target.c
drivers/md/dm-thin.c
drivers/md/dm-zero.c
drivers/md/dm-zoned-target.c
drivers/md/dm.c

index 911f73f7ebbaa07b4fe345d4471ba1106a9e2190..1f0bc117323035c698406c9a6a9d141e3b65a5d2 100644 (file)
@@ -3394,8 +3394,8 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
 
        if (!cache->features.discard_passdown) {
                /* No passdown is done so setting own virtual limits */
-               limits->max_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
-                                                   cache->origin_sectors);
+               limits->max_hw_discard_sectors = min_t(sector_t, cache->discard_block_size * 1024,
+                                                      cache->origin_sectors);
                limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
                return;
        }
@@ -3404,7 +3404,6 @@ static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
         * cache_iterate_devices() is stacking both origin and fast device limits
         * but discards aren't passed to fast device, so inherit origin's limits.
         */
-       limits->max_discard_sectors = origin_limits->max_discard_sectors;
        limits->max_hw_discard_sectors = origin_limits->max_hw_discard_sectors;
        limits->discard_granularity = origin_limits->discard_granularity;
        limits->discard_alignment = origin_limits->discard_alignment;
index 94b2fc33f64be3663ef6ee76276b54c7dcfa7d31..2332d9798141299f9cc5177d54070fb6e9313bb8 100644 (file)
@@ -2050,7 +2050,8 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
        if (!test_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags)) {
                /* No passdown is done so we set our own virtual limits */
                limits->discard_granularity = clone->region_size << SECTOR_SHIFT;
-               limits->max_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT, clone->region_size);
+               limits->max_hw_discard_sectors = round_down(UINT_MAX >> SECTOR_SHIFT,
+                                                           clone->region_size);
                return;
        }
 
@@ -2059,7 +2060,6 @@ static void set_discard_limits(struct clone *clone, struct queue_limits *limits)
         * device limits but discards aren't passed to the source device, so
         * inherit destination's limits.
         */
-       limits->max_discard_sectors = dest_limits->max_discard_sectors;
        limits->max_hw_discard_sectors = dest_limits->max_hw_discard_sectors;
        limits->discard_granularity = dest_limits->discard_granularity;
        limits->discard_alignment = dest_limits->discard_alignment;
index f17a6cf2284ecf1a9530109c6a1d09939c3e67de..8d7df8303d0a18198d06c9081368834d8bb5f41e 100644 (file)
@@ -871,7 +871,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
        if (!bdev_max_discard_sectors(lc->dev->bdev)) {
                lc->device_supports_discard = false;
                limits->discard_granularity = lc->sectorsize;
-               limits->max_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
+               limits->max_hw_discard_sectors = (UINT_MAX >> SECTOR_SHIFT);
        }
        limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev);
        limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev);
index 0ace06d1bee384a1f87048c61d78213ffc82fee1..f40c18da400007af9f18ca2cb0a141cd43f33948 100644 (file)
@@ -2410,7 +2410,7 @@ static void snapshot_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
                /* All discards are split on chunk_size boundary */
                limits->discard_granularity = snap->store->chunk_size;
-               limits->max_discard_sectors = snap->store->chunk_size;
+               limits->max_hw_discard_sectors = snap->store->chunk_size;
 
                up_read(&_origins_lock);
        }
index 0c4efb0bef8a90ae668b02b2ea2f6557a1348180..652627aea11b6188d2873ade3e8717e746dea6dd 100644 (file)
@@ -249,7 +249,6 @@ static int io_err_iterate_devices(struct dm_target *ti,
 
 static void io_err_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
-       limits->max_discard_sectors = UINT_MAX;
        limits->max_hw_discard_sectors = UINT_MAX;
        limits->discard_granularity = 512;
 }
index 4793ad2aa1f7e85f44034c1a7b4f23b6ba43c4ac..e0528a4f809c4853185a1916628dcfb90f336f0a 100644 (file)
@@ -4100,7 +4100,7 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
        if (pt->adjusted_pf.discard_enabled) {
                disable_discard_passdown_if_not_supported(pt);
                if (!pt->adjusted_pf.discard_passdown)
-                       limits->max_discard_sectors = 0;
+                       limits->max_hw_discard_sectors = 0;
                /*
                 * The pool uses the same discard limits as the underlying data
                 * device.  DM core has already set this up.
@@ -4497,7 +4497,7 @@ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
        if (pool->pf.discard_enabled) {
                limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
-               limits->max_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
+               limits->max_hw_discard_sectors = pool->sectors_per_block * BIO_PRISON_MAX_RANGE;
        }
 }
 
index 3b13e6eb1aa47c027329771ba05fc1b5246f36ee..9a0bb623e823fa362016ef3f3b4b50789c16e8f1 100644 (file)
@@ -61,7 +61,6 @@ static int zero_map(struct dm_target *ti, struct bio *bio)
 
 static void zero_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
-       limits->max_discard_sectors = UINT_MAX;
        limits->max_hw_discard_sectors = UINT_MAX;
        limits->discard_granularity = 512;
 }
index 621794a9edd65ef831404a55e12393abaf69c7ee..12236e6f46f39c33ca392848f2320e32c55736f3 100644 (file)
@@ -1001,7 +1001,6 @@ static void dmz_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
        limits->discard_alignment = 0;
        limits->discard_granularity = DMZ_BLOCK_SIZE;
-       limits->max_discard_sectors = chunk_sectors;
        limits->max_hw_discard_sectors = chunk_sectors;
        limits->max_write_zeroes_sectors = chunk_sectors;
 
index 7d0746b37c8ec791f111d6e589476eb2b500e9d4..3adfc6b83c0133546f1b961be0cdf51eb292f043 100644 (file)
@@ -1086,7 +1086,7 @@ void disable_discard(struct mapped_device *md)
        struct queue_limits *limits = dm_get_queue_limits(md);
 
        /* device doesn't really support DISCARD, disable it */
-       limits->max_discard_sectors = 0;
+       limits->max_hw_discard_sectors = 0;
 }
 
 void disable_write_zeroes(struct mapped_device *md)