bcachefs: new_stripe_alloc_buckets() takes alloc_request
authorKent Overstreet <kent.overstreet@linux.dev>
Mon, 31 Mar 2025 19:46:45 +0000 (15:46 -0400)
committerKent Overstreet <kent.overstreet@linux.dev>
Thu, 22 May 2025 00:13:33 +0000 (20:13 -0400)
More stack usage improvements: instead of creating a new alloc_request
(currently on the stack), save/restore just the fields we need to reuse.

This is a bit tricky, because we're doing a normal alloc_foreground.c
allocation, which calls into ec.c to get a stripe, which then does more
normal allocations - some of the fields get reused, and used
differently.

So we have to save and restore them - but the stack usage improvements
will be well worth it.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
fs/bcachefs/ec.c

index 6f977e134d089aa284e7214e6ff266a2c2b4dc04..11f46dccc14f40e32b565779660f0a6d8b5dc3d8 100644 (file)
@@ -1710,8 +1710,9 @@ err:
 }
 
 static int new_stripe_alloc_buckets(struct btree_trans *trans,
+                                   struct alloc_request *req,
                                    struct ec_stripe_head *h, struct ec_stripe_new *s,
-                                   enum bch_watermark watermark, struct closure *cl)
+                                   struct closure *cl)
 {
        struct bch_fs *c = trans->c;
        struct open_bucket *ob;
@@ -1719,17 +1720,21 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans,
        unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
        int ret = 0;
 
-       struct alloc_request req = {
-               .watermark      = watermark,
-               .devs_may_alloc = h->devs,
-               .have_cache     = true,
-       };
+       enum bch_data_type      saved_data_type         = req->data_type;
+       struct open_buckets     saved_ptrs              = req->ptrs;
+       unsigned                saved_nr_replicas       = req->nr_replicas;
+       unsigned                saved_nr_effective      = req->nr_effective;
+       bool                    saved_have_cache        = req->have_cache;
+       struct bch_devs_mask    saved_devs_may_alloc    = req->devs_may_alloc;
+
+       req->devs_may_alloc     = h->devs;
+       req->have_cache         = true;
 
        BUG_ON(v->nr_blocks     != s->nr_data + s->nr_parity);
        BUG_ON(v->nr_redundant  != s->nr_parity);
 
        /* * We bypass the sector allocator which normally does this: */
-       bitmap_and(req.devs_may_alloc.d, req.devs_may_alloc.d,
+       bitmap_and(req->devs_may_alloc.d, req->devs_may_alloc.d,
                   c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX);
 
        for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) {
@@ -1740,7 +1745,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans,
                 * block when updating the stripe
                 */
                if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID)
-                       __clear_bit(v->ptrs[i].dev, req.devs_may_alloc.d);
+                       __clear_bit(v->ptrs[i].dev, req->devs_may_alloc.d);
 
                if (i < s->nr_data)
                        nr_have_data++;
@@ -1751,52 +1756,58 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans,
        BUG_ON(nr_have_data     > s->nr_data);
        BUG_ON(nr_have_parity   > s->nr_parity);
 
-       req.ptrs.nr = 0;
+       req->ptrs.nr = 0;
        if (nr_have_parity < s->nr_parity) {
-               req.nr_replicas         = s->nr_parity;
-               req.nr_effective        = nr_have_parity;
-               req.data_type           = BCH_DATA_parity;
+               req->nr_replicas        = s->nr_parity;
+               req->nr_effective       = nr_have_parity;
+               req->data_type          = BCH_DATA_parity;
 
-               ret = bch2_bucket_alloc_set_trans(trans, &req, &h->parity_stripe, cl);
+               ret = bch2_bucket_alloc_set_trans(trans, req, &h->parity_stripe, cl);
 
-               open_bucket_for_each(c, &req.ptrs, ob, i) {
+               open_bucket_for_each(c, &req->ptrs, ob, i) {
                        j = find_next_zero_bit(s->blocks_gotten,
                                               s->nr_data + s->nr_parity,
                                               s->nr_data);
                        BUG_ON(j >= s->nr_data + s->nr_parity);
 
-                       s->blocks[j] = req.ptrs.v[i];
+                       s->blocks[j] = req->ptrs.v[i];
                        v->ptrs[j] = bch2_ob_ptr(c, ob);
                        __set_bit(j, s->blocks_gotten);
                }
 
                if (ret)
-                       return ret;
+                       goto err;
        }
 
-       req.ptrs.nr = 0;
+       req->ptrs.nr = 0;
        if (nr_have_data < s->nr_data) {
-               req.nr_replicas         = s->nr_data;
-               req.nr_effective        = nr_have_data;
-               req.data_type           = BCH_DATA_user;
+               req->nr_replicas        = s->nr_data;
+               req->nr_effective       = nr_have_data;
+               req->data_type          = BCH_DATA_user;
 
-               ret = bch2_bucket_alloc_set_trans(trans, &req, &h->block_stripe, cl);
+               ret = bch2_bucket_alloc_set_trans(trans, req, &h->block_stripe, cl);
 
-               open_bucket_for_each(c, &req.ptrs, ob, i) {
+               open_bucket_for_each(c, &req->ptrs, ob, i) {
                        j = find_next_zero_bit(s->blocks_gotten,
                                               s->nr_data, 0);
                        BUG_ON(j >= s->nr_data);
 
-                       s->blocks[j] = req.ptrs.v[i];
+                       s->blocks[j] = req->ptrs.v[i];
                        v->ptrs[j] = bch2_ob_ptr(c, ob);
                        __set_bit(j, s->blocks_gotten);
                }
 
                if (ret)
-                       return ret;
+                       goto err;
        }
-
-       return 0;
+err:
+       req->data_type          = saved_data_type;
+       req->ptrs               = saved_ptrs;
+       req->nr_replicas        = saved_nr_replicas;
+       req->nr_effective       = saved_nr_effective;
+       req->have_cache         = saved_have_cache;
+       req->devs_may_alloc     = saved_devs_may_alloc;
+       return ret;
 }
 
 static int __get_existing_stripe(struct btree_trans *trans,
@@ -2022,8 +2033,12 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
                goto alloc_existing;
 
        /* First, try to allocate a full stripe: */
-       ret =   new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?:
+       enum bch_watermark saved_watermark = BCH_WATERMARK_stripe;
+       swap(req->watermark, saved_watermark);
+       ret =   new_stripe_alloc_buckets(trans, req, h, s, NULL) ?:
                __bch2_ec_stripe_head_reserve(trans, h, s);
+       swap(req->watermark, saved_watermark);
+
        if (!ret)
                goto allocate_buf;
        if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
@@ -2042,7 +2057,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
                        goto err;
 
                if (req->watermark == BCH_WATERMARK_copygc) {
-                       ret =   new_stripe_alloc_buckets(trans, h, s, req->watermark, NULL) ?:
+                       ret =   new_stripe_alloc_buckets(trans, req, h, s, NULL) ?:
                                __bch2_ec_stripe_head_reserve(trans, h, s);
                        if (ret)
                                goto err;
@@ -2061,7 +2076,7 @@ alloc_existing:
         * Retry allocating buckets, with the watermark for this
         * particular write:
         */
-       ret = new_stripe_alloc_buckets(trans, h, s, req->watermark, cl);
+       ret = new_stripe_alloc_buckets(trans, req, h, s, cl);
        if (ret)
                goto err;