bcachefs: bch_dev.io_ref -> enumerated_ref
authorKent Overstreet <kent.overstreet@linux.dev>
Sat, 19 Apr 2025 01:54:12 +0000 (21:54 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 22 May 2025 00:14:28 +0000 (20:14 -0400)
Convert device IO refs to enumerated_refs, for easier debugging of
refcount issues.

Simple conversion: enumerate all users and convert to the new helpers.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
19 files changed:
fs/bcachefs/alloc_background.c
fs/bcachefs/backpointers.c
fs/bcachefs/bcachefs.h
fs/bcachefs/btree_io.c
fs/bcachefs/btree_node_scan.c
fs/bcachefs/buckets.c
fs/bcachefs/debug.c
fs/bcachefs/ec.c
fs/bcachefs/fs-io.c
fs/bcachefs/io_read.c
fs/bcachefs/io_write.c
fs/bcachefs/journal.c
fs/bcachefs/journal_io.c
fs/bcachefs/journal_reclaim.c
fs/bcachefs/sb-members.h
fs/bcachefs/super-io.c
fs/bcachefs/super.c
fs/bcachefs/super.h
fs/bcachefs/sysfs.c

index ced31309c54190fc5f233b8a83550876c93f3373..c63348c4b874aae4fa6d9a1a6013b4b4b0c283b7 100644 (file)
@@ -1953,7 +1953,7 @@ static void bch2_do_discards_work(struct work_struct *work)
        trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
                              bch2_err_str(ret));
 
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards);
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard);
 }
 
@@ -1964,13 +1964,13 @@ void bch2_dev_do_discards(struct bch_dev *ca)
        if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_discard))
                return;
 
-       if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+       if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_dev_do_discards))
                goto put_write_ref;
 
        if (queue_work(c->write_ref_wq, &ca->discard_work))
                return;
 
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_dev_do_discards);
 put_write_ref:
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard);
 }
@@ -2048,7 +2048,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
        trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
 
        bch2_trans_put(trans);
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast);
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard_fast);
 }
 
@@ -2062,13 +2062,13 @@ static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
        if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_discard_fast))
                return;
 
-       if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+       if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_discard_one_bucket_fast))
                goto put_ref;
 
        if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
                return;
 
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_discard_one_bucket_fast);
 put_ref:
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_discard_fast);
 }
@@ -2262,8 +2262,8 @@ restart_err:
        bch2_trans_iter_exit(trans, &iter);
 err:
        bch2_trans_put(trans);
-       percpu_ref_put(&ca->io_ref[WRITE]);
        bch2_bkey_buf_exit(&last_flushed, c);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates);
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate);
 }
 
@@ -2274,13 +2274,13 @@ void bch2_dev_do_invalidates(struct bch_dev *ca)
        if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_invalidate))
                return;
 
-       if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
+       if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE, BCH_DEV_WRITE_REF_do_invalidates))
                goto put_ref;
 
        if (queue_work(c->write_ref_wq, &ca->invalidate_work))
                return;
 
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates);
 put_ref:
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate);
 }
index 5f195d2280a4ea6938e5ea096fc7529637f55a0e..e6178eb2c396679c435757b5e7774c524ad514a5 100644 (file)
@@ -478,7 +478,8 @@ found:
 
        bytes = p.crc.compressed_size << 9;
 
-       struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ);
+       struct bch_dev *ca = bch2_dev_get_ioref(c, dev, READ,
+                               BCH_DEV_READ_REF_check_extent_checksums);
        if (!ca)
                return false;
 
@@ -515,7 +516,8 @@ err:
        if (bio)
                bio_put(bio);
        kvfree(data_buf);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ],
+                          BCH_DEV_READ_REF_check_extent_checksums);
        printbuf_exit(&buf);
        return ret;
 }
index ced80d4b606a3b7c00590d22e67265a9314be3b8..3d18dbe0d6f5c99a7e621e912a116e5c92b306b7 100644 (file)
@@ -516,6 +516,51 @@ struct discard_in_flight {
        u64                     bucket:63;
 };
 
+#define BCH_DEV_READ_REFS()                            \
+       x(bch2_online_devs)                             \
+       x(trans_mark_dev_sbs)                           \
+       x(read_fua_test)                                \
+       x(sb_field_resize)                              \
+       x(write_super)                                  \
+       x(journal_read)                                 \
+       x(fs_journal_alloc)                             \
+       x(fs_resize_on_mount)                           \
+       x(btree_node_read)                              \
+       x(btree_node_read_all_replicas)                 \
+       x(btree_node_scrub)                             \
+       x(btree_node_write)                             \
+       x(btree_node_scan)                              \
+       x(btree_verify_replicas)                        \
+       x(btree_node_ondisk_to_text)                    \
+       x(io_read)                                      \
+       x(check_extent_checksums)                       \
+       x(ec_block)
+
+enum bch_dev_read_ref {
+#define x(n) BCH_DEV_READ_REF_##n,
+       BCH_DEV_READ_REFS()
+#undef x
+       BCH_DEV_READ_REF_NR,
+};
+
+#define BCH_DEV_WRITE_REFS()                           \
+       x(journal_write)                                \
+       x(journal_do_discards)                          \
+       x(dev_do_discards)                              \
+       x(discard_one_bucket_fast)                      \
+       x(do_invalidates)                               \
+       x(nocow_flush)                                  \
+       x(io_write)                                     \
+       x(ec_block)                                     \
+       x(ec_bucket_zero)
+
+enum bch_dev_write_ref {
+#define x(n) BCH_DEV_WRITE_REF_##n,
+       BCH_DEV_WRITE_REFS()
+#undef x
+       BCH_DEV_WRITE_REF_NR,
+};
+
 struct bch_dev {
        struct kobject          kobj;
 #ifdef CONFIG_BCACHEFS_DEBUG
@@ -526,8 +571,7 @@ struct bch_dev {
        struct percpu_ref       ref;
 #endif
        struct completion       ref_completion;
-       struct percpu_ref       io_ref[2];
-       struct completion       io_ref_completion[2];
+       struct enumerated_ref   io_ref[2];
 
        struct bch_fs           *fs;
 
index 9e759d9e29b1f9faccbeae3cba94b46b0de7651b..8fe9e0fc662902ce31a94456d51520d78e8e9ea1 100644 (file)
@@ -1326,7 +1326,7 @@ static void btree_node_read_work(struct work_struct *work)
        while (1) {
                retry = true;
                bch_info(c, "retrying read");
-               ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ);
+               ca = bch2_dev_get_ioref(c, rb->pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
                rb->have_ioref          = ca != NULL;
                rb->start_time          = local_clock();
                bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
@@ -1351,7 +1351,7 @@ start:
                                        "btree read error %s for %s",
                                        bch2_blk_status_to_str(bio->bi_status), buf.buf);
                if (rb->have_ioref)
-                       percpu_ref_put(&ca->io_ref[READ]);
+                       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_read);
                rb->have_ioref = false;
 
                bch2_mark_io_failure(&failed, &rb->pick, false);
@@ -1609,7 +1609,8 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
                struct bch_dev *ca = bch2_dev_have_ref(c, rb->pick.ptr.dev);
 
                bch2_latency_acct(ca, rb->start_time, READ);
-               percpu_ref_put(&ca->io_ref[READ]);
+               enumerated_ref_put(&ca->io_ref[READ],
+                       BCH_DEV_READ_REF_btree_node_read_all_replicas);
        }
 
        ra->err[rb->idx] = bio->bi_status;
@@ -1649,7 +1650,8 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
 
        i = 0;
        bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
-               struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+               struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
+                                       BCH_DEV_READ_REF_btree_node_read_all_replicas);
                struct btree_read_bio *rb =
                        container_of(ra->bio[i], struct btree_read_bio, bio);
                rb->c                   = c;
@@ -1727,7 +1729,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
                return;
        }
 
-       ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+       ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ, BCH_DEV_READ_REF_btree_node_read);
 
        bio = bio_alloc_bioset(NULL,
                               buf_pages(b->data, btree_buf_bytes(b)),
@@ -1930,7 +1932,7 @@ err:
        printbuf_exit(&err);
        bch2_bkey_buf_exit(&scrub->key, c);;
        btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
-       percpu_ref_put(&scrub->ca->io_ref[READ]);
+       enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
        kfree(scrub);
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub);
 }
@@ -1959,7 +1961,8 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
        if (ret <= 0)
                goto err;
 
-       struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+       struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
+                                               BCH_DEV_READ_REF_btree_node_scrub);
        if (!ca) {
                ret = -BCH_ERR_device_offline;
                goto err;
@@ -1999,7 +2002,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
        return 0;
 err_free:
        btree_bounce_free(c, c->opts.btree_node_size, used_mempool, buf);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
 err:
        enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_node_scrub);
        return ret;
@@ -2169,7 +2172,8 @@ static void btree_node_write_endio(struct bio *bio)
         * btree writes yet (due to device removal/ro):
         */
        if (wbio->have_ioref)
-               percpu_ref_put(&ca->io_ref[READ]);
+               enumerated_ref_put(&ca->io_ref[READ],
+                                  BCH_DEV_READ_REF_btree_node_write);
 
        if (parent) {
                bio_put(bio);
index 81ee7ae88a776cf4c0b6d523de02c72fad8ee49f..7bd13438d5ef09f1239664c0046b36f245f11cfa 100644 (file)
@@ -271,7 +271,7 @@ static int read_btree_nodes_worker(void *p)
 err:
        bio_put(bio);
        free_page((unsigned long) buf);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan);
        closure_put(w->cl);
        kfree(w);
        return 0;
@@ -285,13 +285,13 @@ static int read_btree_nodes(struct find_btree_nodes *f)
 
        closure_init_stack(&cl);
 
-       for_each_online_member(c, ca) {
+       for_each_online_member(c, ca, BCH_DEV_READ_REF_btree_node_scan) {
                if (!(ca->mi.data_allowed & BIT(BCH_DATA_btree)))
                        continue;
 
                struct find_btree_nodes_worker *w = kmalloc(sizeof(*w), GFP_KERNEL);
                if (!w) {
-                       percpu_ref_put(&ca->io_ref[READ]);
+                       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan);
                        ret = -ENOMEM;
                        goto err;
                }
@@ -303,14 +303,14 @@ static int read_btree_nodes(struct find_btree_nodes *f)
                struct task_struct *t = kthread_create(read_btree_nodes_worker, w, "read_btree_nodes/%s", ca->name);
                ret = PTR_ERR_OR_ZERO(t);
                if (ret) {
-                       percpu_ref_put(&ca->io_ref[READ]);
+                       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan);
                        kfree(w);
                        bch_err_msg(c, ret, "starting kthread");
                        break;
                }
 
                closure_get(&cl);
-               percpu_ref_get(&ca->io_ref[READ]);
+               enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scan);
                wake_up_process(t);
        }
 err:
index 36c1e391d4dfc026533b6a7e724a0c611cab354d..3ec33a7e9d922889b1ab706564eaa571c07acb34 100644 (file)
@@ -1146,10 +1146,10 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
 int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
                        enum btree_iter_update_trigger_flags flags)
 {
-       for_each_online_member(c, ca) {
+       for_each_online_member(c, ca, BCH_DEV_READ_REF_trans_mark_dev_sbs) {
                int ret = bch2_trans_mark_dev_sb(c, ca, flags);
                if (ret) {
-                       percpu_ref_put(&ca->io_ref[READ]);
+                       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_trans_mark_dev_sbs);
                        return ret;
                }
        }
index 312f5ce7cba9a06fc69d7b5e2a96173166063a6f..4cbb19c36fa16f9382cb76fd2428963322f41030 100644 (file)
@@ -42,7 +42,8 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
        struct bio *bio;
        bool failed = false, saw_error = false;
 
-       struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+       struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
+                               BCH_DEV_READ_REF_btree_verify_replicas);
        if (!ca)
                return false;
 
@@ -57,7 +58,8 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
        submit_bio_wait(bio);
 
        bio_put(bio);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ],
+                          BCH_DEV_READ_REF_btree_verify_replicas);
 
        memcpy(n_ondisk, n_sorted, btree_buf_bytes(b));
 
@@ -196,7 +198,8 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
                return;
        }
 
-       ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+       ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
+                       BCH_DEV_READ_REF_btree_node_ondisk_to_text);
        if (!ca) {
                prt_printf(out, "error getting device to read from: not online\n");
                return;
@@ -297,7 +300,8 @@ out:
        if (bio)
                bio_put(bio);
        kvfree(n_ondisk);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ],
+                          BCH_DEV_READ_REF_btree_node_ondisk_to_text);
 }
 
 #ifdef CONFIG_DEBUG_FS
index 94c24f4582bd5b0b72a2cf247f8509e2e359481b..dcd4e2266d344c9711d973ddef5ab1dfbdf58ec1 100644 (file)
@@ -701,6 +701,9 @@ static void ec_block_endio(struct bio *bio)
        struct bch_dev *ca = ec_bio->ca;
        struct closure *cl = bio->bi_private;
        int rw = ec_bio->rw;
+       unsigned ref = rw == READ
+               ? BCH_DEV_READ_REF_ec_block
+               : BCH_DEV_WRITE_REF_ec_block;
 
        bch2_account_io_completion(ca, bio_data_dir(bio),
                                   ec_bio->submit_time, !bio->bi_status);
@@ -722,7 +725,7 @@ static void ec_block_endio(struct bio *bio)
        }
 
        bio_put(&ec_bio->bio);
-       percpu_ref_put(&ca->io_ref[rw]);
+       enumerated_ref_put(&ca->io_ref[rw], ref);
        closure_put(cl);
 }
 
@@ -736,8 +739,11 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
                ? BCH_DATA_user
                : BCH_DATA_parity;
        int rw = op_is_write(opf);
+       unsigned ref = rw == READ
+               ? BCH_DEV_READ_REF_ec_block
+               : BCH_DEV_WRITE_REF_ec_block;
 
-       struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
+       struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw, ref);
        if (!ca) {
                clear_bit(idx, buf->valid);
                return;
@@ -783,14 +789,14 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
                bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
 
                closure_get(cl);
-               percpu_ref_get(&ca->io_ref[rw]);
+               enumerated_ref_get(&ca->io_ref[rw], ref);
 
                submit_bio(&ec_bio->bio);
 
                offset += b;
        }
 
-       percpu_ref_put(&ca->io_ref[rw]);
+       enumerated_ref_put(&ca->io_ref[rw], ref);
 }
 
 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
@@ -1247,7 +1253,8 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
                                       unsigned block,
                                       struct open_bucket *ob)
 {
-       struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
+       struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE,
+                               BCH_DEV_WRITE_REF_ec_bucket_zero);
        if (!ca) {
                s->err = -BCH_ERR_erofs_no_writes;
                return;
@@ -1263,7 +1270,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
                        ob->sectors_free,
                        GFP_KERNEL, 0);
 
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_ec_bucket_zero);
 
        if (ret)
                s->err = ret;
index 6ea2762e651769e3a5c32a8c4be59acdb3053056..b1e9ee28fc0f9319076db2450bd41dff2b7708b3 100644 (file)
@@ -49,7 +49,8 @@ static void nocow_flush_endio(struct bio *_bio)
        struct nocow_flush *bio = container_of(_bio, struct nocow_flush, bio);
 
        closure_put(bio->cl);
-       percpu_ref_put(&bio->ca->io_ref[WRITE]);
+       enumerated_ref_put(&bio->ca->io_ref[WRITE],
+                          BCH_DEV_WRITE_REF_nocow_flush);
        bio_put(&bio->bio);
 }
 
@@ -72,7 +73,8 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
        for_each_set_bit(dev, devs.d, BCH_SB_MEMBERS_MAX) {
                rcu_read_lock();
                ca = rcu_dereference(c->devs[dev]);
-               if (ca && !percpu_ref_tryget(&ca->io_ref[WRITE]))
+               if (ca && !enumerated_ref_tryget(&ca->io_ref[WRITE],
+                                       BCH_DEV_WRITE_REF_nocow_flush))
                        ca = NULL;
                rcu_read_unlock();
 
index baedfee673999418477f8691d9303146b66f3ea8..136b6d54a2c2c6911e2eb1712ae354961a86781f 100644 (file)
@@ -409,7 +409,7 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
 
        if (rbio->have_ioref) {
                struct bch_dev *ca = bch2_dev_have_ref(rbio->c, rbio->pick.ptr.dev);
-               percpu_ref_put(&ca->io_ref[READ]);
+               enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read);
        }
 
        if (rbio->split) {
@@ -1100,7 +1100,8 @@ retry_pick:
                goto err;
        }
 
-       struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ);
+       struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
+                                       BCH_DEV_READ_REF_io_read);
 
        /*
         * Stale dirty pointers are treated as IO errors, but @failed isn't
@@ -1114,7 +1115,7 @@ retry_pick:
            unlikely(dev_ptr_stale(ca, &pick.ptr))) {
                read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
                bch2_mark_io_failure(failed, &pick, false);
-               percpu_ref_put(&ca->io_ref[READ]);
+               enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read);
                goto retry_pick;
        }
 
@@ -1147,7 +1148,8 @@ retry_pick:
                 */
                if (pick.crc.compressed_size > u->op.wbio.bio.bi_iter.bi_size) {
                        if (ca)
-                               percpu_ref_put(&ca->io_ref[READ]);
+                               enumerated_ref_put(&ca->io_ref[READ],
+                                       BCH_DEV_READ_REF_io_read);
                        rbio->ret = -BCH_ERR_data_read_buffer_too_small;
                        goto out_read_done;
                }
index e95a535ad44aaa4d8376e367be7400c0cd55d274..add141ac45b5fe0a4a2dbf4f242aa63c7412ad22 100644 (file)
@@ -461,6 +461,10 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
 {
        struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(k));
        struct bch_write_bio *n;
+       unsigned ref_rw  = type == BCH_DATA_btree ? READ : WRITE;
+       unsigned ref_idx = type == BCH_DATA_btree
+               ? BCH_DEV_READ_REF_btree_node_write
+               : BCH_DEV_WRITE_REF_io_write;
 
        BUG_ON(c->opts.nochanges);
 
@@ -472,7 +476,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
                 */
                struct bch_dev *ca = nocow
                        ? bch2_dev_have_ref(c, ptr->dev)
-                       : bch2_dev_get_ioref(c, ptr->dev, type == BCH_DATA_btree ? READ : WRITE);
+                       : bch2_dev_get_ioref(c, ptr->dev, ref_rw, ref_idx);
 
                if (to_entry(ptr + 1) < ptrs.end) {
                        n = to_wbio(bio_alloc_clone(NULL, &wbio->bio, GFP_NOFS, &c->replica_set));
@@ -747,7 +751,8 @@ static void bch2_write_endio(struct bio *bio)
        }
 
        if (wbio->have_ioref)
-               percpu_ref_put(&ca->io_ref[WRITE]);
+               enumerated_ref_put(&ca->io_ref[WRITE],
+                                  BCH_DEV_WRITE_REF_io_write);
 
        if (wbio->bounce)
                bch2_bio_free_pages_pool(c, bio);
@@ -1344,7 +1349,8 @@ retry:
                /* Get iorefs before dropping btree locks: */
                struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
                bkey_for_each_ptr(ptrs, ptr) {
-                       struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
+                       struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE,
+                                                       BCH_DEV_WRITE_REF_io_write);
                        if (unlikely(!ca))
                                goto err_get_ioref;
 
@@ -1446,7 +1452,8 @@ err:
        return;
 err_get_ioref:
        darray_for_each(buckets, i)
-               percpu_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE]);
+               enumerated_ref_put(&bch2_dev_have_ref(c, i->b.inode)->io_ref[WRITE],
+                                  BCH_DEV_WRITE_REF_io_write);
 
        /* Fall back to COW path: */
        goto out;
index e2c95192a5779c1832627e53e2cadbb69e50901c..f2963a6cca8876e2fbdcf216cca99c6e74cac0a7 100644 (file)
@@ -1336,13 +1336,14 @@ err:
 
 int bch2_fs_journal_alloc(struct bch_fs *c)
 {
-       for_each_online_member(c, ca) {
+       for_each_online_member(c, ca, BCH_DEV_READ_REF_fs_journal_alloc) {
                if (ca->journal.nr)
                        continue;
 
                int ret = bch2_dev_journal_alloc(ca, true);
                if (ret) {
-                       percpu_ref_put(&ca->io_ref[READ]);
+                       enumerated_ref_put(&ca->io_ref[READ],
+                                          BCH_DEV_READ_REF_fs_journal_alloc);
                        return ret;
                }
        }
index 438ad32ba242b4a4d9472273aa96f947245885d5..8f38e9485cd854847080084eb0ac2e5cacac28d3 100644 (file)
@@ -1219,7 +1219,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
 out:
        bch_verbose(c, "journal read done on device %s, ret %i", ca->name, ret);
        kvfree(buf.data);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_journal_read);
        closure_return(cl);
        return;
 err:
@@ -1254,7 +1254,8 @@ int bch2_journal_read(struct bch_fs *c,
 
                if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
                     ca->mi.state == BCH_MEMBER_STATE_ro) &&
-                   percpu_ref_tryget(&ca->io_ref[READ]))
+                   enumerated_ref_tryget(&ca->io_ref[READ],
+                                         BCH_DEV_READ_REF_journal_read))
                        closure_call(&ca->journal.read,
                                     bch2_journal_read_device,
                                     system_unbound_wq,
@@ -1770,7 +1771,7 @@ static void journal_write_endio(struct bio *bio)
        }
 
        closure_put(&w->io);
-       percpu_ref_put(&ca->io_ref[WRITE]);
+       enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_journal_write);
 }
 
 static CLOSURE_CALLBACK(journal_write_submit)
@@ -1781,7 +1782,8 @@ static CLOSURE_CALLBACK(journal_write_submit)
        unsigned sectors = vstruct_sectors(w->data, c->block_bits);
 
        extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
-               struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE);
+               struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, WRITE,
+                                       BCH_DEV_WRITE_REF_journal_write);
                if (!ca) {
                        /* XXX: fix this */
                        bch_err(c, "missing device %u for journal write", ptr->dev);
@@ -1844,8 +1846,9 @@ static CLOSURE_CALLBACK(journal_write_preflush)
        }
 
        if (w->separate_flush) {
-               for_each_rw_member(c, ca) {
-                       percpu_ref_get(&ca->io_ref[WRITE]);
+               for_each_rw_member(c, ca, BCH_DEV_WRITE_REF_journal_write) {
+                       enumerated_ref_get(&ca->io_ref[WRITE],
+                                          BCH_DEV_WRITE_REF_journal_write);
 
                        struct journal_device *ja = &ca->journal;
                        struct bio *bio = &ja->bio[w->idx]->bio;
index dc8169a970dd7da2b5a7fc3da34105c813110888..bb339be54e7b6fae6b6a656e6f97cfac3bc8eeb3 100644 (file)
@@ -295,7 +295,7 @@ void bch2_journal_do_discards(struct journal *j)
 
        mutex_lock(&j->discard_lock);
 
-       for_each_rw_member(c, ca) {
+       for_each_rw_member(c, ca, BCH_DEV_WRITE_REF_journal_do_discards) {
                struct journal_device *ja = &ca->journal;
 
                while (should_discard_bucket(j, ja)) {
index c71a1ba6152542343c21db8fbe6a8d076417d10b..c9cb8f7657b04d869d6d5b2bc262ad7379295f87 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "darray.h"
 #include "bkey_types.h"
+#include "enumerated_ref.h"
 
 extern char * const bch2_member_error_strs[];
 
@@ -20,7 +21,7 @@ struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
 
 static inline bool bch2_dev_is_online(struct bch_dev *ca)
 {
-       return !percpu_ref_is_zero(&ca->io_ref[READ]);
+       return !enumerated_ref_is_zero(&ca->io_ref[READ]);
 }
 
 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *, unsigned);
@@ -163,33 +164,33 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev
 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
                                                       struct bch_dev *ca,
                                                       unsigned state_mask,
-                                                      int rw)
+                                                      int rw, unsigned ref_idx)
 {
        rcu_read_lock();
        if (ca)
-               percpu_ref_put(&ca->io_ref[rw]);
+               enumerated_ref_put(&ca->io_ref[rw], ref_idx);
 
        while ((ca = __bch2_next_dev(c, ca, NULL)) &&
               (!((1 << ca->mi.state) & state_mask) ||
-               !percpu_ref_tryget(&ca->io_ref[rw])))
+               !enumerated_ref_tryget(&ca->io_ref[rw], ref_idx)))
                ;
        rcu_read_unlock();
 
        return ca;
 }
 
-#define __for_each_online_member(_c, _ca, state_mask, rw)              \
+#define __for_each_online_member(_c, _ca, state_mask, rw, ref_idx)     \
        for (struct bch_dev *_ca = NULL;                                \
-            (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw));)
+            (_ca = bch2_get_next_online_dev(_c, _ca, state_mask, rw, ref_idx));)
 
-#define for_each_online_member(c, ca)                                  \
-       __for_each_online_member(c, ca, ~0, READ)
+#define for_each_online_member(c, ca, ref_idx)                         \
+       __for_each_online_member(c, ca, ~0, READ, ref_idx)
 
-#define for_each_rw_member(c, ca                                     \
-       __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE)
+#define for_each_rw_member(c, ca, ref_idx)                                     \
+       __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), WRITE, ref_idx)
 
-#define for_each_readable_member(c, ca                               \
-       __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ)
+#define for_each_readable_member(c, ca, ref_idx)                               \
+       __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro), READ, ref_idx)
 
 static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
 {
@@ -293,13 +294,14 @@ static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev
        return bch2_dev_tryget(c, dev_idx);
 }
 
-static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw)
+static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
+                                                int rw, unsigned ref_idx)
 {
        might_sleep();
 
        rcu_read_lock();
        struct bch_dev *ca = bch2_dev_rcu(c, dev);
-       if (ca && !percpu_ref_tryget(&ca->io_ref[rw]))
+       if (ca && !enumerated_ref_tryget(&ca->io_ref[rw], ref_idx))
                ca = NULL;
        rcu_read_unlock();
 
@@ -309,7 +311,7 @@ static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev,
                return ca;
 
        if (ca)
-               percpu_ref_put(&ca->io_ref[rw]);
+               enumerated_ref_put(&ca->io_ref[rw], ref_idx);
        return NULL;
 }
 
index 872707e5fa95074a34adf99b26e2111f58b718a5..d53cbc5f992595dd17fab3298f23c14fcfd3d97a 100644 (file)
@@ -260,11 +260,11 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
 
                /* XXX: we're not checking that offline device have enough space */
 
-               for_each_online_member(c, ca) {
+               for_each_online_member(c, ca, BCH_DEV_READ_REF_sb_field_resize) {
                        struct bch_sb_handle *dev_sb = &ca->disk_sb;
 
                        if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
-                               percpu_ref_put(&ca->io_ref[READ]);
+                               enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_sb_field_resize);
                                return NULL;
                        }
                }
@@ -967,7 +967,7 @@ static void write_super_endio(struct bio *bio)
        }
 
        closure_put(&ca->fs->sb_write);
-       percpu_ref_put(&ca->io_ref[READ]);
+       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super);
 }
 
 static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
@@ -985,7 +985,7 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
 
        this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio));
 
-       percpu_ref_get(&ca->io_ref[READ]);
+       enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super);
        closure_bio_submit(bio, &c->sb_write);
 }
 
@@ -1011,7 +1011,7 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
        this_cpu_add(ca->io_done->sectors[WRITE][BCH_DATA_sb],
                     bio_sectors(bio));
 
-       percpu_ref_get(&ca->io_ref[READ]);
+       enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super);
        closure_bio_submit(bio, &c->sb_write);
 }
 
@@ -1043,13 +1043,13 @@ int bch2_write_super(struct bch_fs *c)
         * For now, we expect to be able to call write_super() when we're not
         * yet RW:
         */
-       for_each_online_member(c, ca) {
+       for_each_online_member(c, ca, BCH_DEV_READ_REF_write_super) {
                ret = darray_push(&online_devices, ca);
                if (bch2_fs_fatal_err_on(ret, c, "%s: error allocating online devices", __func__)) {
-                       percpu_ref_put(&ca->io_ref[READ]);
+                       enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super);
                        goto out;
                }
-               percpu_ref_get(&ca->io_ref[READ]);
+               enumerated_ref_get(&ca->io_ref[READ], BCH_DEV_READ_REF_write_super);
        }
 
        /* Make sure we're using the new magic numbers: */
@@ -1216,7 +1216,7 @@ out:
        /* Make new options visible after they're persistent: */
        bch2_sb_update(c);
        darray_for_each(online_devices, ca)
-               percpu_ref_put(&(*ca)->io_ref[READ]);
+               enumerated_ref_put(&(*ca)->io_ref[READ], BCH_DEV_READ_REF_write_super);
        darray_exit(&online_devices);
        printbuf_exit(&err);
        return ret;
index 6fa427d5cbd69eb81357a922998cb8fb8bf732e2..bed0f8a802128245e34b4da3fce185189b263649 100644 (file)
@@ -78,13 +78,28 @@ MODULE_DESCRIPTION("bcachefs filesystem");
 
 typedef DARRAY(struct bch_sb_handle) bch_sb_handles;
 
-const char * const bch2_fs_flag_strs[] = {
 #define x(n)           #n,
+const char * const bch2_fs_flag_strs[] = {
        BCH_FS_FLAGS()
-#undef x
        NULL
 };
 
+const char * const bch2_write_refs[] = {
+       BCH_WRITE_REFS()
+       NULL
+};
+
+const char * const bch2_dev_read_refs[] = {
+       BCH_DEV_READ_REFS()
+       NULL
+};
+
+const char * const bch2_dev_write_refs[] = {
+       BCH_DEV_WRITE_REFS()
+       NULL
+};
+#undef x
+
 static void __bch2_print_str(struct bch_fs *c, const char *prefix,
                             const char *str, bool nonblocking)
 {
@@ -469,7 +484,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
        for_each_online_member_rcu(c, ca)
                if (ca->mi.state == BCH_MEMBER_STATE_rw) {
                        bch2_dev_allocator_add(c, ca);
-                       percpu_ref_reinit(&ca->io_ref[WRITE]);
+                       enumerated_ref_start(&ca->io_ref[WRITE]);
                }
        rcu_read_unlock();
 
@@ -645,6 +660,12 @@ void __bch2_fs_stop(struct bch_fs *c)
        bch2_fs_read_only(c);
        up_write(&c->state_lock);
 
+       for (unsigned i = 0; i < c->sb.nr_devices; i++) {
+               struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
+               if (ca)
+                       bch2_dev_io_ref_stop(ca, READ);
+       }
+
        for_each_member_device(c, ca)
                bch2_dev_unlink(ca);
 
@@ -673,8 +694,6 @@ void __bch2_fs_stop(struct bch_fs *c)
 
 void bch2_fs_free(struct bch_fs *c)
 {
-       unsigned i;
-
        mutex_lock(&bch_fs_list_lock);
        list_del(&c->list);
        mutex_unlock(&bch_fs_list_lock);
@@ -682,7 +701,7 @@ void bch2_fs_free(struct bch_fs *c)
        closure_sync(&c->cl);
        closure_debug_destroy(&c->cl);
 
-       for (i = 0; i < c->sb.nr_devices; i++) {
+       for (unsigned i = 0; i < c->sb.nr_devices; i++) {
                struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
 
                if (ca) {
@@ -1290,11 +1309,11 @@ static void bch2_dev_io_ref_stop(struct bch_dev *ca, int rw)
        if (rw == READ)
                clear_bit(ca->dev_idx, ca->fs->online_devs.d);
 
-       if (!percpu_ref_is_zero(&ca->io_ref[rw])) {
-               reinit_completion(&ca->io_ref_completion[rw]);
-               percpu_ref_kill(&ca->io_ref[rw]);
-               wait_for_completion(&ca->io_ref_completion[rw]);
-       }
+       if (!enumerated_ref_is_zero(&ca->io_ref[rw]))
+               enumerated_ref_stop(&ca->io_ref[rw],
+                                   rw == READ
+                                   ? bch2_dev_read_refs
+                                   : bch2_dev_write_refs);
 }
 
 static void bch2_dev_release(struct kobject *kobj)
@@ -1306,8 +1325,8 @@ static void bch2_dev_release(struct kobject *kobj)
 
 static void bch2_dev_free(struct bch_dev *ca)
 {
-       WARN_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
-       WARN_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
+       WARN_ON(!enumerated_ref_is_zero(&ca->io_ref[WRITE]));
+       WARN_ON(!enumerated_ref_is_zero(&ca->io_ref[READ]));
 
        cancel_work_sync(&ca->io_error_work);
 
@@ -1327,8 +1346,8 @@ static void bch2_dev_free(struct bch_dev *ca)
        bch2_time_stats_quantiles_exit(&ca->io_latency[WRITE]);
        bch2_time_stats_quantiles_exit(&ca->io_latency[READ]);
 
-       percpu_ref_exit(&ca->io_ref[WRITE]);
-       percpu_ref_exit(&ca->io_ref[READ]);
+       enumerated_ref_exit(&ca->io_ref[WRITE]);
+       enumerated_ref_exit(&ca->io_ref[READ]);
 #ifndef CONFIG_BCACHEFS_DEBUG
        percpu_ref_exit(&ca->ref);
 #endif
@@ -1340,7 +1359,7 @@ static void __bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca)
 
        lockdep_assert_held(&c->state_lock);
 
-       if (percpu_ref_is_zero(&ca->io_ref[READ]))
+       if (enumerated_ref_is_zero(&ca->io_ref[READ]))
                return;
 
        __bch2_dev_read_only(c, ca);
@@ -1362,20 +1381,6 @@ static void bch2_dev_ref_complete(struct percpu_ref *ref)
 }
 #endif
 
-static void bch2_dev_io_ref_read_complete(struct percpu_ref *ref)
-{
-       struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[READ]);
-
-       complete(&ca->io_ref_completion[READ]);
-}
-
-static void bch2_dev_io_ref_write_complete(struct percpu_ref *ref)
-{
-       struct bch_dev *ca = container_of(ref, struct bch_dev, io_ref[WRITE]);
-
-       complete(&ca->io_ref_completion[WRITE]);
-}
-
 static void bch2_dev_unlink(struct bch_dev *ca)
 {
        struct kobject *b;
@@ -1437,8 +1442,6 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
 
        kobject_init(&ca->kobj, &bch2_dev_ktype);
        init_completion(&ca->ref_completion);
-       init_completion(&ca->io_ref_completion[READ]);
-       init_completion(&ca->io_ref_completion[WRITE]);
 
        INIT_WORK(&ca->io_error_work, bch2_io_error_work);
 
@@ -1464,10 +1467,8 @@ static struct bch_dev *__bch2_dev_alloc(struct bch_fs *c,
 
        bch2_dev_allocator_background_init(ca);
 
-       if (percpu_ref_init(&ca->io_ref[READ], bch2_dev_io_ref_read_complete,
-                           PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
-           percpu_ref_init(&ca->io_ref[WRITE], bch2_dev_io_ref_write_complete,
-                           PERCPU_REF_INIT_DEAD, GFP_KERNEL) ||
+       if (enumerated_ref_init(&ca->io_ref[READ],  BCH_DEV_READ_REF_NR,  NULL) ||
+           enumerated_ref_init(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_NR, NULL) ||
            !(ca->sb_read_scratch = kmalloc(BCH_SB_READ_SCRATCH_BUF_SIZE, GFP_KERNEL)) ||
            bch2_dev_buckets_alloc(c, ca) ||
            !(ca->io_done       = alloc_percpu(*ca->io_done)))
@@ -1529,8 +1530,8 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
                return -BCH_ERR_device_size_too_small;
        }
 
-       BUG_ON(!percpu_ref_is_zero(&ca->io_ref[READ]));
-       BUG_ON(!percpu_ref_is_zero(&ca->io_ref[WRITE]));
+       BUG_ON(!enumerated_ref_is_zero(&ca->io_ref[READ]));
+       BUG_ON(!enumerated_ref_is_zero(&ca->io_ref[WRITE]));
 
        ret = bch2_dev_journal_init(ca, sb->sb);
        if (ret)
@@ -1549,7 +1550,7 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
 
        ca->dev = ca->disk_sb.bdev->bd_dev;
 
-       percpu_ref_reinit(&ca->io_ref[READ]);
+       enumerated_ref_start(&ca->io_ref[READ]);
 
        return 0;
 }
@@ -1662,8 +1663,8 @@ static void __bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
        bch2_dev_allocator_add(c, ca);
        bch2_recalc_capacity(c);
 
-       if (percpu_ref_is_zero(&ca->io_ref[WRITE]))
-               percpu_ref_reinit(&ca->io_ref[WRITE]);
+       if (enumerated_ref_is_zero(&ca->io_ref[WRITE]))
+               enumerated_ref_start(&ca->io_ref[WRITE]);
 
        bch2_dev_do_discards(ca);
 }
@@ -1813,7 +1814,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
 err:
        if (test_bit(BCH_FS_rw, &c->flags) &&
            ca->mi.state == BCH_MEMBER_STATE_rw &&
-           !percpu_ref_is_zero(&ca->io_ref[READ]))
+           !enumerated_ref_is_zero(&ca->io_ref[READ]))
                __bch2_dev_read_write(c, ca);
        up_write(&c->state_lock);
        return ret;
@@ -2112,7 +2113,7 @@ err:
 
 int bch2_fs_resize_on_mount(struct bch_fs *c)
 {
-       for_each_online_member(c, ca) {
+       for_each_online_member(c, ca, BCH_DEV_READ_REF_fs_resize_on_mount) {
                u64 old_nbuckets = ca->mi.nbuckets;
                u64 new_nbuckets = div64_u64(get_capacity(ca->disk_sb.bdev->bd_disk),
                                         ca->mi.bucket_size);
@@ -2123,7 +2124,8 @@ int bch2_fs_resize_on_mount(struct bch_fs *c)
                        int ret = bch2_dev_buckets_resize(c, ca, new_nbuckets);
                        bch_err_fn(ca, ret);
                        if (ret) {
-                               percpu_ref_put(&ca->io_ref[READ]);
+                               enumerated_ref_put(&ca->io_ref[READ],
+                                                  BCH_DEV_READ_REF_fs_resize_on_mount);
                                up_write(&c->state_lock);
                                return ret;
                        }
@@ -2141,7 +2143,8 @@ int bch2_fs_resize_on_mount(struct bch_fs *c)
                        if (ca->mi.freespace_initialized) {
                                ret = __bch2_dev_resize_alloc(ca, old_nbuckets, new_nbuckets);
                                if (ret) {
-                                       percpu_ref_put(&ca->io_ref[READ]);
+                                       enumerated_ref_put(&ca->io_ref[READ],
+                                                       BCH_DEV_READ_REF_fs_resize_on_mount);
                                        up_write(&c->state_lock);
                                        return ret;
                                }
index 50588ab20be24f6455754bf28cc5ea44f82474e0..a1566f2d77c33f348b6fbc60541bc2f42e9f3d5f 100644 (file)
@@ -9,6 +9,9 @@
 #include <linux/math64.h>
 
 extern const char * const bch2_fs_flag_strs[];
+extern const char * const bch2_write_refs[];
+extern const char * const bch2_dev_read_refs[];
+extern const char * const bch2_dev_write_refs[];
 
 struct bch_fs *bch2_dev_to_fs(dev_t);
 struct bch_fs *bch2_uuid_to_fs(__uuid_t);
index b80c46af13d441ec03d37f700083d4ce27de4c1c..dfae5eda7a4cb071c8238076f1fcaa704d04f703 100644 (file)
@@ -176,14 +176,9 @@ read_attribute(btree_reserve_cache);
 read_attribute(open_buckets);
 read_attribute(open_buckets_partial);
 read_attribute(nocow_lock_table);
-read_attribute(write_refs);
 
-static const char * const bch2_write_refs[] = {
-#define x(n)   #n,
-       BCH_WRITE_REFS()
-#undef x
-       NULL
-};
+read_attribute(read_refs);
+read_attribute(write_refs);
 
 read_attribute(internal_uuid);
 read_attribute(disk_groups);
@@ -790,6 +785,12 @@ SHOW(bch2_dev)
        if (opt_id >= 0)
                return sysfs_opt_show(c, ca, opt_id, out);
 
+       if (attr == &sysfs_read_refs)
+               enumerated_ref_to_text(out, &ca->io_ref[READ], bch2_dev_read_refs);
+
+       if (attr == &sysfs_write_refs)
+               enumerated_ref_to_text(out, &ca->io_ref[WRITE], bch2_dev_write_refs);
+
        return 0;
 }
 
@@ -845,6 +846,9 @@ struct attribute *bch2_dev_files[] = {
        /* debug: */
        &sysfs_alloc_debug,
        &sysfs_open_buckets,
+
+       &sysfs_read_refs,
+       &sysfs_write_refs,
        NULL
 };