btrfs: handle allocation failure in btrfs_wq_submit_bio gracefully
authorChristoph Hellwig <hch@lst.de>
Fri, 17 Jun 2022 10:04:12 +0000 (12:04 +0200)
committerDavid Sterba <dsterba@suse.com>
Mon, 25 Jul 2022 15:45:40 +0000 (17:45 +0200)
btrfs_wq_submit_bio is used for writeback under memory pressure.
Instead of failing the I/O when we can't allocate the async_submit_bio,
just punt back to the synchronous submission path.

Reviewed-by: Nikolay Borisov <nborisov@suse.com>
Tested-by: Nikolay Borisov <nborisov@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: David Sterba <dsterba@suse.com>
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/inode.c

index 5719712f2d4c4fa977d8f4d4175fe35460f031e6..bcb6807ce19e86443c9f4abf3d47d24214a24758 100644 (file)
@@ -759,16 +759,23 @@ static void run_one_async_free(struct btrfs_work *work)
        kfree(async);
 }
 
-blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
-                                int mirror_num, u64 dio_file_offset,
-                                extent_submit_bio_start_t *submit_bio_start)
+/*
+ * Submit bio to an async queue.
+ *
+ * Retrun:
+ * - true if the work has been succesfuly submitted
+ * - false in case of error
+ */
+bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
+                        u64 dio_file_offset,
+                        extent_submit_bio_start_t *submit_bio_start)
 {
        struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
        struct async_submit_bio *async;
 
        async = kmalloc(sizeof(*async), GFP_NOFS);
        if (!async)
-               return BLK_STS_RESOURCE;
+               return false;
 
        async->inode = inode;
        async->bio = bio;
@@ -786,7 +793,7 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
                btrfs_queue_work(fs_info->hipri_workers, &async->work);
        else
                btrfs_queue_work(fs_info->workers, &async->work);
-       return 0;
+       return true;
 }
 
 static blk_status_t btree_csum_one_bio(struct bio *bio)
@@ -840,25 +847,23 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
                btrfs_submit_bio(fs_info, bio, mirror_num);
                return;
        }
-       if (!should_async_write(fs_info, BTRFS_I(inode))) {
-               ret = btree_csum_one_bio(bio);
-               if (!ret) {
-                       btrfs_submit_bio(fs_info, bio, mirror_num);
-                       return;
-               }
-       } else {
-               /*
-                * kthread helpers are used to submit writes so that
-                * checksumming can happen in parallel across all CPUs
-                */
-               ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
-                                         btree_submit_bio_start);
-       }
 
+       /*
+        * Kthread helpers are used to submit writes so that checksumming can
+        * happen in parallel across all CPUs.
+        */
+       if (should_async_write(fs_info, BTRFS_I(inode)) &&
+           btrfs_wq_submit_bio(inode, bio, mirror_num, 0, btree_submit_bio_start))
+               return;
+
+       ret = btree_csum_one_bio(bio);
        if (ret) {
                bio->bi_status = ret;
                bio_endio(bio);
+               return;
        }
+
+       btrfs_submit_bio(fs_info, bio, mirror_num);
 }
 
 #ifdef CONFIG_MIGRATION
index 05e779a41a99794cc94298afa5e2503a2e869bb3..8993b428e09ceb72205368a78e124b2c11640d69 100644 (file)
@@ -114,9 +114,9 @@ int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
                          int atomic);
 int btrfs_read_extent_buffer(struct extent_buffer *buf, u64 parent_transid,
                             int level, struct btrfs_key *first_key);
-blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
-                                int mirror_num, u64 dio_file_offset,
-                                extent_submit_bio_start_t *submit_bio_start);
+bool btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, int mirror_num,
+                        u64 dio_file_offset,
+                        extent_submit_bio_start_t *submit_bio_start);
 blk_status_t btrfs_submit_bio_done(void *private_data, struct bio *bio,
                          int mirror_num);
 int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans,
index dade66ee220020b1bb0e98d165cf5a300e1f2efd..42616f51c62ed477c7442769b231c2081e79fb8f 100644 (file)
@@ -2674,11 +2674,10 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro
        if (!(bi->flags & BTRFS_INODE_NODATASUM) &&
            !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state) &&
            !btrfs_is_data_reloc_root(bi->root)) {
-               if (!atomic_read(&bi->sync_writers)) {
-                       ret = btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
-                                                 btrfs_submit_bio_start);
-                       goto out;
-               }
+               if (!atomic_read(&bi->sync_writers) &&
+                   btrfs_wq_submit_bio(inode, bio, mirror_num, 0,
+                                       btrfs_submit_bio_start))
+                       return;
 
                ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
                if (ret)
@@ -8027,9 +8026,11 @@ static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
 
        if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
                /* Check btrfs_submit_data_write_bio() for async submit rules */
-               if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers))
-                       return btrfs_wq_submit_bio(inode, bio, 0, file_offset,
-                                       btrfs_submit_bio_start_direct_io);
+               if (async_submit && !atomic_read(&BTRFS_I(inode)->sync_writers) &&
+                   btrfs_wq_submit_bio(inode, bio, 0, file_offset,
+                                       btrfs_submit_bio_start_direct_io))
+                       return BLK_STS_OK;
+
                /*
                 * If we aren't doing async submit, calculate the csum of the
                 * bio now.