bcachefs: RO mounts now use less memory
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 5 Apr 2025 21:36:04 +0000 (17:36 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 22 May 2025 00:14:04 +0000 (20:14 -0400)
Defer memory allocations only needed in RW mode until we actually go RW.

This is part of improved support for RO images.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/bcachefs.h
fs/bcachefs/io_read.c
fs/bcachefs/io_write.c
fs/bcachefs/super.c

index 09df91f10c203c6ab978ba99c46a9ab0d3f31fdb..1e40ad2a7bce7fd913290db62d304f43673b715b 100644 (file)
@@ -614,6 +614,7 @@ struct bch_dev {
        x(accounting_replay_done)       \
        x(may_go_rw)                    \
        x(rw)                           \
+       x(rw_init_done)                 \
        x(was_rw)                       \
        x(stopping)                     \
        x(emergency_ro)                 \
index e5b3e987d7bbbcdfce57f7817eaef6a345e952f0..e490f136d63d4c98d52d2a7a303b2045144264af 100644 (file)
@@ -1490,10 +1490,18 @@ void bch2_fs_io_read_exit(struct bch_fs *c)
                rhashtable_destroy(&c->promote_table);
        bioset_exit(&c->bio_read_split);
        bioset_exit(&c->bio_read);
+       mempool_exit(&c->bio_bounce_pages);
 }
 
 int bch2_fs_io_read_init(struct bch_fs *c)
 {
+       if (mempool_init_page_pool(&c->bio_bounce_pages,
+                                  max_t(unsigned,
+                                        c->opts.btree_node_size,
+                                        c->opts.encoded_extent_max) /
+                                  PAGE_SIZE, 0))
+               return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
+
        if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
                        BIOSET_NEED_BVECS))
                return -BCH_ERR_ENOMEM_bio_read_init;
index c1237da079ede2076879fcf674152612d78ae6a3..401347e135b7c76a684869ad99195aabb1a18837 100644 (file)
@@ -1744,7 +1744,6 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
 
 void bch2_fs_io_write_exit(struct bch_fs *c)
 {
-       mempool_exit(&c->bio_bounce_pages);
        bioset_exit(&c->replica_set);
        bioset_exit(&c->bio_write);
 }
@@ -1755,12 +1754,5 @@ int bch2_fs_io_write_init(struct bch_fs *c)
            bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
                return -BCH_ERR_ENOMEM_bio_write_init;
 
-       if (mempool_init_page_pool(&c->bio_bounce_pages,
-                                  max_t(unsigned,
-                                        c->opts.btree_node_size,
-                                        c->opts.encoded_extent_max) /
-                                  PAGE_SIZE, 0))
-               return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
-
        return 0;
 }
index 9cff32bde7a464628053cb0b48d4b1e17747f04a..834ba091e84f65963ad75ba487a34cec24572066 100644 (file)
@@ -183,6 +183,7 @@ static int bch2_dev_alloc(struct bch_fs *, unsigned);
 static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
 static void bch2_dev_io_ref_stop(struct bch_dev *, int);
 static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
+static int bch2_fs_init_rw(struct bch_fs *);
 
 struct bch_fs *bch2_dev_to_fs(dev_t dev)
 {
@@ -439,6 +440,10 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
 
        bch_info(c, "going read-write");
 
+       ret = bch2_fs_init_rw(c);
+       if (ret)
+               goto err;
+
        ret = bch2_sb_members_v2_init(c);
        if (ret)
                goto err;
@@ -736,6 +741,35 @@ err:
        return ret;
 }
 
+static int bch2_fs_init_rw(struct bch_fs *c)
+{
+       if (test_bit(BCH_FS_rw_init_done, &c->flags))
+               return 0;
+
+       if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
+                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) ||
+           !(c->btree_write_complete_wq = alloc_workqueue("bcachefs_btree_write_complete",
+                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
+           !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
+                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
+           !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
+                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
+           !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
+                               WQ_FREEZABLE, 0)))
+               return -BCH_ERR_ENOMEM_fs_other_alloc;
+
+       int ret = bch2_fs_btree_interior_update_init(c) ?:
+               bch2_fs_btree_write_buffer_init(c) ?:
+               bch2_fs_fs_io_buffered_init(c) ?:
+               bch2_fs_io_write_init(c) ?:
+               bch2_fs_journal_init(&c->journal);
+       if (ret)
+               return ret;
+
+       set_bit(BCH_FS_rw_init_done, &c->flags);
+       return 0;
+}
+
 static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
 {
        struct bch_fs *c;
@@ -877,18 +911,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
                (btree_blocks(c) + 1) * 2 *
                sizeof(struct sort_iter_set);
 
-       if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
-                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_UNBOUND, 512)) ||
-           !(c->btree_write_complete_wq = alloc_workqueue("bcachefs_btree_write",
-                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
-           !(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
-                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
-           !(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
+       if (!(c->btree_read_complete_wq = alloc_workqueue("bcachefs_btree_read_complete",
                                WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 512)) ||
-           !(c->btree_write_submit_wq = alloc_workqueue("bcachefs_btree_write_sumit",
-                               WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
-           !(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
-                               WQ_FREEZABLE, 0)) ||
 #ifndef BCH_WRITE_REF_DEBUG
            percpu_ref_init(&c->writes, bch2_writes_disabled,
                            PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
@@ -911,9 +935,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
        ret =
            bch2_fs_btree_cache_init(c) ?:
            bch2_fs_btree_iter_init(c) ?:
-           bch2_fs_btree_interior_update_init(c) ?:
            bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
-           bch2_fs_btree_write_buffer_init(c) ?:
            bch2_fs_buckets_waiting_for_journal_init(c) ?:
            bch2_io_clock_init(&c->io_clock[READ]) ?:
            bch2_io_clock_init(&c->io_clock[WRITE]) ?:
@@ -922,11 +944,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
            bch2_fs_ec_init(c) ?:
            bch2_fs_encryption_init(c) ?:
            bch2_fs_fsio_init(c) ?:
-           bch2_fs_fs_io_buffered_init(c) ?:
            bch2_fs_fs_io_direct_init(c) ?:
            bch2_fs_io_read_init(c) ?:
-           bch2_fs_io_write_init(c) ?:
-           bch2_fs_journal_init(&c->journal) ?:
            bch2_fs_sb_errors_init(c) ?:
            bch2_fs_vfs_init(c);
        if (ret)