unsigned nr_replicas,
unsigned *nr_effective,
bool *have_cache,
+ enum alloc_reserve reserve,
unsigned flags,
struct closure *cl)
{
if (ec_open_bucket(c, ptrs))
return 0;
- h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1,
- wp == &c->copygc_write_point,
- cl);
+ h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
if (IS_ERR(h))
return PTR_ERR(h);
if (!h)
ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
target, erasure_code,
nr_replicas, nr_effective,
- have_cache, flags, _cl);
+ have_cache, reserve, flags, _cl);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
static struct ec_stripe_head *
ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
unsigned algo, unsigned redundancy,
- bool copygc)
+ enum alloc_reserve reserve)
{
struct ec_stripe_head *h;
struct bch_dev *ca;
h->target = target;
h->algo = algo;
h->redundancy = redundancy;
- h->copygc = copygc;
+ h->reserve = reserve;
rcu_read_lock();
h->devs = target_rw_devs(c, BCH_DATA_user, target);
unsigned target,
unsigned algo,
unsigned redundancy,
- bool copygc)
+ enum alloc_reserve reserve)
{
struct bch_fs *c = trans->c;
struct ec_stripe_head *h;
if (h->target == target &&
h->algo == algo &&
h->redundancy == redundancy &&
- h->copygc == copygc) {
+ h->reserve == reserve) {
ret = bch2_trans_mutex_lock(trans, &h->lock);
if (ret)
h = ERR_PTR(ret);
goto found;
}
- h = ec_new_stripe_head_alloc(c, target, algo, redundancy, copygc);
+ h = ec_new_stripe_head_alloc(c, target, algo, redundancy, reserve);
found:
mutex_unlock(&c->ec_stripe_head_lock);
return h;
}
static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
- struct closure *cl)
+ enum alloc_reserve reserve, struct closure *cl)
{
struct bch_fs *c = trans->c;
struct bch_devs_mask devs = h->devs;
bool have_cache = true;
int ret = 0;
- for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
- if (test_bit(i, h->s->blocks_gotten)) {
- __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
- if (i < h->s->nr_data)
- nr_have_data++;
- else
- nr_have_parity++;
- }
+ for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) {
+ __clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
+ if (i < h->s->nr_data)
+ nr_have_data++;
+ else
+ nr_have_parity++;
}
BUG_ON(nr_have_data > h->s->nr_data);
h->s->nr_parity,
&nr_have_parity,
&have_cache,
- h->copygc
- ? RESERVE_movinggc
- : RESERVE_none,
+ reserve,
0,
cl);
h->s->nr_data,
&nr_have_data,
&have_cache,
- h->copygc
- ? RESERVE_movinggc
- : RESERVE_none,
+ reserve,
0,
cl);
unsigned target,
unsigned algo,
unsigned redundancy,
- bool copygc,
+ enum alloc_reserve reserve,
struct closure *cl)
{
struct bch_fs *c = trans->c;
int ret;
bool needs_stripe_new;
- h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, copygc);
+ h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, reserve);
if (!h)
bch_err(c, "no stripe head");
if (IS_ERR_OR_NULL(h))
}
if (!h->s->allocated) {
- ret = new_stripe_alloc_buckets(trans, h, cl);
+ ret = new_stripe_alloc_buckets(trans, h, reserve, cl);
if (ret)
goto err;
unsigned target;
unsigned algo;
unsigned redundancy;
- bool copygc;
+ enum alloc_reserve reserve;
struct bch_devs_mask devs;
unsigned nr_active_devs;
void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
- unsigned, unsigned, unsigned, bool, struct closure *);
+ unsigned, unsigned, unsigned,
+ enum alloc_reserve, struct closure *);
void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);