}
{
- unsigned nr = sizeof(struct bch_fs_usage) / sizeof(u64) +
- c->replicas.nr;
+ unsigned nr = fs_usage_u64s(c);
struct bch_fs_usage *dst = (void *)
bch2_acc_percpu_u64s((void *) c->usage[0], nr);
struct bch_fs_usage *src = (void *)
BUG_ON(c->usage[1]);
- c->usage[1] = __alloc_percpu_gfp(sizeof(struct bch_fs_usage) +
- sizeof(u64) * c->replicas.nr,
- sizeof(u64),
- GFP_KERNEL);
+ c->usage[1] = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
+ sizeof(u64), GFP_KERNEL);
percpu_up_write(&c->mark_lock);
if (!c->usage[1])
void bch2_fs_usage_initialize(struct bch_fs *c)
{
struct bch_fs_usage *usage;
- unsigned i, nr;
+ unsigned i;
percpu_down_write(&c->mark_lock);
- nr = sizeof(struct bch_fs_usage) / sizeof(u64) + c->replicas.nr;
- usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0], nr);
+ usage = (void *) bch2_acc_percpu_u64s((void *) c->usage[0],
+ fs_usage_u64s(c));
for (i = 0; i < BCH_REPLICAS_MAX; i++)
usage->reserved += usage->persistent_reserved[i];
struct bch_fs_usage *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage *ret;
- unsigned nr = READ_ONCE(c->replicas.nr);
+ unsigned v, u64s = fs_usage_u64s(c);
retry:
- ret = kzalloc(sizeof(*ret) + nr * sizeof(u64), GFP_NOFS);
+ ret = kzalloc(u64s * sizeof(u64), GFP_NOFS);
if (unlikely(!ret))
return NULL;
percpu_down_read(&c->mark_lock);
- if (unlikely(nr < c->replicas.nr)) {
- nr = c->replicas.nr;
+ v = fs_usage_u64s(c);
+ if (unlikely(u64s != v)) {
+ u64s = v;
percpu_up_read(&c->mark_lock);
kfree(ret);
goto retry;
}
- acc_u64s_percpu((u64 *) ret,
- (u64 __percpu *) c->usage[0],
- sizeof(*ret) / sizeof(u64) + nr);
+ acc_u64s_percpu((u64 *) ret, (u64 __percpu *) c->usage[0], u64s);
return ret;
}
preempt_disable();
acc_u64s((u64 *) this_cpu_ptr(c->usage[0]),
- (u64 *) fs_usage,
- sizeof(*fs_usage) / sizeof(u64) + c->replicas.nr);
+ (u64 *) fs_usage, fs_usage_u64s(c));
preempt_enable();
return ret;
/* Filesystem usage: */
-static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
+static inline unsigned fs_usage_u64s(struct bch_fs *c)
{
- struct bch_fs_usage *ret;
- ret = this_cpu_ptr(c->usage_scratch);
+ return sizeof(struct bch_fs_usage) / sizeof(u64) +
+ READ_ONCE(c->replicas.nr);
+}
- memset(ret, 0, sizeof(*ret) + c->replicas.nr * sizeof(u64));
+static inline struct bch_fs_usage *bch2_fs_usage_get_scratch(struct bch_fs *c)
+{
+ struct bch_fs_usage *ret = this_cpu_ptr(c->usage_scratch);
+ memset(ret, 0, fs_usage_u64s(c) * sizeof(u64));
return ret;
}
static int replicas_table_update(struct bch_fs *c,
struct bch_replicas_cpu *new_r)
{
- struct bch_fs_usage __percpu *new_usage[3] = { NULL, NULL, NULL };
+ struct bch_fs_usage __percpu *new_usage[2] = { NULL, NULL };
+ struct bch_fs_usage __percpu *new_scratch = NULL;
unsigned bytes = sizeof(struct bch_fs_usage) +
sizeof(u64) * new_r->nr;
- unsigned i;
int ret = -ENOMEM;
- for (i = 0; i < 3; i++) {
- if (i < 2 && !c->usage[i])
- continue;
-
- new_usage[i] = __alloc_percpu_gfp(bytes, sizeof(u64),
- GFP_NOIO);
- if (!new_usage[i])
- goto err;
- }
-
- for (i = 0; i < 2; i++) {
- if (!c->usage[i])
- continue;
-
- __replicas_table_update(new_usage[i], new_r,
- c->usage[i], &c->replicas);
-
- swap(c->usage[i], new_usage[i]);
- }
-
- swap(c->usage_scratch, new_usage[2]);
+ if (!(new_usage[0] = __alloc_percpu_gfp(bytes, sizeof(u64),
+ GFP_NOIO)) ||
+ (c->usage[1] &&
+ !(new_usage[1] = __alloc_percpu_gfp(bytes, sizeof(u64),
+ GFP_NOIO))) ||
+ !(new_scratch = __alloc_percpu_gfp(bytes, sizeof(u64),
+ GFP_NOIO)))
+ goto err;
- swap(c->replicas, *new_r);
+ if (c->usage[0])
+ __replicas_table_update(new_usage[0], new_r,
+ c->usage[0], &c->replicas);
+ if (c->usage[1])
+ __replicas_table_update(new_usage[1], new_r,
+ c->usage[1], &c->replicas);
+
+ swap(c->usage[0], new_usage[0]);
+ swap(c->usage[1], new_usage[1]);
+ swap(c->usage_scratch, new_scratch);
+ swap(c->replicas, *new_r);
ret = 0;
err:
- for (i = 0; i < 3; i++)
- free_percpu(new_usage[i]);
+ free_percpu(new_scratch);
+ free_percpu(new_usage[1]);
+ free_percpu(new_usage[0]);
return ret;
}
{
c->journal.entry_u64s_reserved +=
reserve_journal_replicas(c, &c->replicas);
- return 0;
+
+ return replicas_table_update(c, &c->replicas);
}
{
struct bch_sb_field_members *mi;
struct bch_fs *c;
- unsigned i, iter_size, fs_usage_size;
+ unsigned i, iter_size;
const char *err;
pr_verbose_init(opts, "");
(btree_blocks(c) + 1) * 2 *
sizeof(struct btree_node_iter_set);
- fs_usage_size = sizeof(struct bch_fs_usage) +
- sizeof(u64) * c->replicas.nr;
-
if (!(c->wq = alloc_workqueue("bcachefs",
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_HIGHPRI, 1)) ||
!(c->copygc_wq = alloc_workqueue("bcache_copygc",
max(offsetof(struct btree_read_bio, bio),
offsetof(struct btree_write_bio, wbio.bio)),
BIOSET_NEED_BVECS) ||
- !(c->usage[0] = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
- !(c->usage_scratch = __alloc_percpu(fs_usage_size, sizeof(u64))) ||
!(c->pcpu = alloc_percpu(struct bch_fs_pcpu)) ||
mempool_init_kvpmalloc_pool(&c->btree_bounce_pool, 1,
btree_bytes(c)) ||