btrfs: zoned: make auto-reclaim less aggressive
authorJohannes Thumshirn <johannes.thumshirn@wdc.com>
Tue, 29 Mar 2022 08:56:09 +0000 (01:56 -0700)
committerDavid Sterba <dsterba@suse.com>
Mon, 16 May 2022 15:03:11 +0000 (17:03 +0200)
The current auto-reclaim algorithm starts reclaiming all block groups
with a zone_unusable value above a configured threshold. This is causing
a lot of reclaim IO even if there would be enough free zones on the
device.

Instead of only accounting a block groups zone_unusable value, also take
the ratio of free and not usable (written as well as zone_unusable)
bytes a device has into account.

Tested-by: Pankaj Raghav <p.raghav@samsung.com>
Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/block-group.c
fs/btrfs/zoned.c
fs/btrfs/zoned.h

index fb3ce80fd7e4034694c548aa6259705f462ec183..7bf10afab89c77afe3a950aad3c13aa050267b4a 100644 (file)
@@ -1512,6 +1512,13 @@ static int reclaim_bgs_cmp(void *unused, const struct list_head *a,
        return bg1->used > bg2->used;
 }
 
+static inline bool btrfs_should_reclaim(struct btrfs_fs_info *fs_info)
+{
+       if (btrfs_is_zoned(fs_info))
+               return btrfs_zoned_should_reclaim(fs_info);
+       return true;
+}
+
 void btrfs_reclaim_bgs_work(struct work_struct *work)
 {
        struct btrfs_fs_info *fs_info =
@@ -1522,6 +1529,9 @@ void btrfs_reclaim_bgs_work(struct work_struct *work)
        if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
                return;
 
+       if (!btrfs_should_reclaim(fs_info))
+               return;
+
        sb_start_write(fs_info->sb);
 
        if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
index d31b0eda210f1eeb1a7c25667ab6a8a3816f1ef3..6e91022ae9f6996ab85c9359ca59344a71c791ae 100644 (file)
@@ -2073,3 +2073,30 @@ void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info)
        }
        mutex_unlock(&fs_devices->device_list_mutex);
 }
+
+bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
+       struct btrfs_device *device;
+       u64 used = 0;
+       u64 total = 0;
+       u64 factor;
+
+       ASSERT(btrfs_is_zoned(fs_info));
+
+       if (fs_info->bg_reclaim_threshold == 0)
+               return false;
+
+       mutex_lock(&fs_devices->device_list_mutex);
+       list_for_each_entry(device, &fs_devices->devices, dev_list) {
+               if (!device->bdev)
+                       continue;
+
+               total += device->disk_total_bytes;
+               used += device->bytes_used;
+       }
+       mutex_unlock(&fs_devices->device_list_mutex);
+
+       factor = div64_u64(used * 100, total);
+       return factor >= fs_info->bg_reclaim_threshold;
+}
index 12aaaccf299873c11b0ddde9c6dc827b0227174c..de923fc8449d36ccb3132140b322d01862701678 100644 (file)
@@ -74,6 +74,7 @@ void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info, u64 logical,
                             u64 length);
 void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg);
 void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info);
+bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info);
 #else /* CONFIG_BLK_DEV_ZONED */
 static inline int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
                                     struct blk_zone *zone)
@@ -232,6 +233,11 @@ static inline void btrfs_zone_finish_endio(struct btrfs_fs_info *fs_info,
 static inline void btrfs_clear_data_reloc_bg(struct btrfs_block_group *bg) { }
 
 static inline void btrfs_free_zone_cache(struct btrfs_fs_info *fs_info) { }
+
+static inline bool btrfs_zoned_should_reclaim(struct btrfs_fs_info *fs_info)
+{
+       return false;
+}
 #endif
 
 static inline bool btrfs_dev_is_sequential(struct btrfs_device *device, u64 pos)