bcachefs: dev_alloc_list.devs -> dev_alloc_list.data
authorKent Overstreet <kent.overstreet@linux.dev>
Thu, 5 Dec 2024 00:21:22 +0000 (19:21 -0500)
committerKent Overstreet <kent.overstreet@linux.dev>
Sat, 21 Dec 2024 06:36:22 +0000 (01:36 -0500)
This lets us use darray macros on dev_alloc_list (and it will become a
darray eventually, when we increase the maximum number of devices).

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/alloc_foreground.c
fs/bcachefs/alloc_foreground.h
fs/bcachefs/journal_io.c

index 095bfe7c53bda2e4ebc6aab0696ffd1805723c89..49c9275465f9d1934ad5d21c71cf8d5a3332d07e 100644 (file)
@@ -626,9 +626,9 @@ struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *c,
        unsigned i;
 
        for_each_set_bit(i, devs->d, BCH_SB_MEMBERS_MAX)
-               ret.devs[ret.nr++] = i;
+               ret.data[ret.nr++] = i;
 
-       bubble_sort(ret.devs, ret.nr, dev_stripe_cmp);
+       bubble_sort(ret.data, ret.nr, dev_stripe_cmp);
        return ret;
 }
 
@@ -700,18 +700,13 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
                      struct closure *cl)
 {
        struct bch_fs *c = trans->c;
-       struct dev_alloc_list devs_sorted =
-               bch2_dev_alloc_list(c, stripe, devs_may_alloc);
        int ret = -BCH_ERR_insufficient_devices;
 
        BUG_ON(*nr_effective >= nr_replicas);
 
-       for (unsigned i = 0; i < devs_sorted.nr; i++) {
-               struct bch_dev_usage usage;
-               struct open_bucket *ob;
-
-               unsigned dev = devs_sorted.devs[i];
-               struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
+       struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, stripe, devs_may_alloc);
+       darray_for_each(devs_sorted, i) {
+               struct bch_dev *ca = bch2_dev_tryget_noerror(c, *i);
                if (!ca)
                        continue;
 
@@ -720,8 +715,9 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
                        continue;
                }
 
-               ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
-                                            cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
+               struct bch_dev_usage usage;
+               struct open_bucket *ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
+                                                    cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
                if (!IS_ERR(ob))
                        bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
                bch2_dev_put(ca);
@@ -765,10 +761,6 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
                         struct closure *cl)
 {
        struct bch_fs *c = trans->c;
-       struct dev_alloc_list devs_sorted;
-       struct ec_stripe_head *h;
-       struct open_bucket *ob;
-       unsigned i, ec_idx;
        int ret = 0;
 
        if (nr_replicas < 2)
@@ -777,34 +769,32 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
        if (ec_open_bucket(c, ptrs))
                return 0;
 
-       h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
+       struct ec_stripe_head *h =
+               bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
        if (IS_ERR(h))
                return PTR_ERR(h);
        if (!h)
                return 0;
 
-       devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
-
-       for (i = 0; i < devs_sorted.nr; i++)
-               for (ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
+       struct dev_alloc_list devs_sorted = bch2_dev_alloc_list(c, &wp->stripe, devs_may_alloc);
+       darray_for_each(devs_sorted, i)
+               for (unsigned ec_idx = 0; ec_idx < h->s->nr_data; ec_idx++) {
                        if (!h->s->blocks[ec_idx])
                                continue;
 
-                       ob = c->open_buckets + h->s->blocks[ec_idx];
-                       if (ob->dev == devs_sorted.devs[i] &&
-                           !test_and_set_bit(ec_idx, h->s->blocks_allocated))
-                               goto got_bucket;
+                       struct open_bucket *ob = c->open_buckets + h->s->blocks[ec_idx];
+                       if (ob->dev == *i && !test_and_set_bit(ec_idx, h->s->blocks_allocated)) {
+                               ob->ec_idx      = ec_idx;
+                               ob->ec          = h->s;
+                               ec_stripe_new_get(h->s, STRIPE_REF_io);
+
+                               ret = add_new_bucket(c, ptrs, devs_may_alloc,
+                                                    nr_replicas, nr_effective,
+                                                    have_cache, ob);
+                               goto out;
+                       }
                }
-       goto out_put_head;
-got_bucket:
-       ob->ec_idx      = ec_idx;
-       ob->ec          = h->s;
-       ec_stripe_new_get(h->s, STRIPE_REF_io);
-
-       ret = add_new_bucket(c, ptrs, devs_may_alloc,
-                            nr_replicas, nr_effective,
-                            have_cache, ob);
-out_put_head:
+out:
        bch2_ec_stripe_head_put(c, h);
        return ret;
 }
index 4f87745df97ebd5f8539bdd74e63f9e3b1a5ce38..f25481a0d1a06806b0c998b49221ccc1d61e8886 100644 (file)
@@ -20,7 +20,7 @@ void bch2_reset_alloc_cursors(struct bch_fs *);
 
 struct dev_alloc_list {
        unsigned        nr;
-       u8              devs[BCH_SB_MEMBERS_MAX];
+       u8              data[BCH_SB_MEMBERS_MAX];
 };
 
 struct dev_alloc_list bch2_dev_alloc_list(struct bch_fs *,
index d7dfea5f0181c7abce8bcc3ce605835aaaf02b99..9a1647297d11789d7adb2618ba6c47d7a6c840b1 100644 (file)
@@ -1422,25 +1422,22 @@ fsck_err:
 
 static void __journal_write_alloc(struct journal *j,
                                  struct journal_buf *w,
-                                 struct dev_alloc_list *devs_sorted,
+                                 struct dev_alloc_list *devs,
                                  unsigned sectors,
                                  unsigned *replicas,
                                  unsigned replicas_want)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
-       struct journal_device *ja;
-       struct bch_dev *ca;
-       unsigned i;
 
        if (*replicas >= replicas_want)
                return;
 
-       for (i = 0; i < devs_sorted->nr; i++) {
-               ca = rcu_dereference(c->devs[devs_sorted->devs[i]]);
+       darray_for_each(*devs, i) {
+               struct bch_dev *ca = rcu_dereference(c->devs[*i]);
                if (!ca)
                        continue;
 
-               ja = &ca->journal;
+               struct journal_device *ja = &ca->journal;
 
                /*
                 * Check that we can use this device, and aren't already using
@@ -1486,13 +1483,11 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w)
 {
        struct bch_fs *c = container_of(j, struct bch_fs, journal);
        struct bch_devs_mask devs;
-       struct journal_device *ja;
-       struct bch_dev *ca;
        struct dev_alloc_list devs_sorted;
        unsigned sectors = vstruct_sectors(w->data, c->block_bits);
        unsigned target = c->opts.metadata_target ?:
                c->opts.foreground_target;
-       unsigned i, replicas = 0, replicas_want =
+       unsigned replicas = 0, replicas_want =
                READ_ONCE(c->opts.metadata_replicas);
        unsigned replicas_need = min_t(unsigned, replicas_want,
                                       READ_ONCE(c->opts.metadata_replicas_required));
@@ -1517,12 +1512,12 @@ retry:
        if (replicas >= replicas_want)
                goto done;
 
-       for (i = 0; i < devs_sorted.nr; i++) {
-               ca = rcu_dereference(c->devs[devs_sorted.devs[i]]);
+       darray_for_each(devs_sorted, i) {
+               struct bch_dev *ca = rcu_dereference(c->devs[*i]);
                if (!ca)
                        continue;
 
-               ja = &ca->journal;
+               struct journal_device *ja = &ca->journal;
 
                if (sectors > ja->sectors_free &&
                    sectors <= ca->mi.bucket_size &&