{
struct bch_fs *c = as->c;
struct pending_btree_node_free *d;
-
- /*
- * btree_update lock is only needed here to avoid racing with
- * gc:
- */
- mutex_lock(&c->btree_interior_update_lock);
+ struct gc_pos pos = { 0 };
for (d = as->pending; d < as->pending + as->nr_pending; d++)
if (!bkey_cmp(k.k->p, d->key.k.p) &&
if (gc_pos_cmp(c->gc_pos, b
? gc_pos_btree_node(b)
: gc_pos_btree_root(as->btree_id)) >= 0 &&
- gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0) {
- struct gc_pos pos = { 0 };
-
- bch2_mark_key(c, BKEY_TYPE_BTREE,
+ gc_pos_cmp(c->gc_pos, gc_phase(GC_PHASE_PENDING_DELETE)) < 0)
+ bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&d->key),
false, 0, pos,
NULL, 0, BCH_BUCKET_MARK_GC);
- /*
- * Don't apply tmp - pending deletes aren't tracked in
- * bch_alloc_stats:
- */
- }
-
- mutex_unlock(&c->btree_interior_update_lock);
}
static void __btree_node_free(struct bch_fs *c, struct btree *b)
__bch2_btree_set_root_inmem(c, b);
- bch2_mark_key(c, BKEY_TYPE_BTREE,
+ mutex_lock(&c->btree_interior_update_lock);
+ percpu_down_read(&c->usage_lock);
+
+ bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&b->key),
true, 0,
gc_pos_btree_root(b->btree_id),
&stats);
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
+
+ percpu_up_read(&c->usage_lock);
+ mutex_unlock(&c->btree_interior_update_lock);
}
static void bch2_btree_set_root_ondisk(struct bch_fs *c, struct btree *b, int rw)
BUG_ON(insert->k.u64s > bch_btree_keys_u64s_remaining(c, b));
+ mutex_lock(&c->btree_interior_update_lock);
+ percpu_down_read(&c->usage_lock);
+
if (bkey_extent_is_data(&insert->k))
- bch2_mark_key(c, BKEY_TYPE_BTREE,
+ bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(insert),
true, 0,
gc_pos_btree_node(b), &stats, 0, 0);
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_node(b));
+ percpu_up_read(&c->usage_lock);
+ mutex_unlock(&c->btree_interior_update_lock);
+
bch2_btree_bset_insert_key(iter, b, node_iter, insert);
set_btree_node_dirty(b);
set_btree_node_need_write(b);
bch2_btree_node_lock_write(b, iter);
- bch2_mark_key(c, BKEY_TYPE_BTREE,
+ mutex_lock(&c->btree_interior_update_lock);
+ percpu_down_read(&c->usage_lock);
+
+ bch2_mark_key_locked(c, BKEY_TYPE_BTREE,
bkey_i_to_s_c(&new_key->k_i),
true, 0,
gc_pos_btree_root(b->btree_id),
bch2_fs_usage_apply(c, &stats, &as->reserve->disk_res,
gc_pos_btree_root(b->btree_id));
+ percpu_up_read(&c->usage_lock);
+ mutex_unlock(&c->btree_interior_update_lock);
+
if (PTR_HASH(&new_key->k_i) != PTR_HASH(&b->key)) {
mutex_lock(&c->btree_cache.lock);
bch2_btree_node_hash_remove(&c->btree_cache, b);
s64 added = sum.data + sum.reserved;
s64 should_not_have_added;
+ percpu_rwsem_assert_held(&c->usage_lock);
+
/*
* Not allowed to reduce sectors_available except by getting a
* reservation:
stats->online_reserved -= added;
}
- percpu_down_read(&c->usage_lock);
preempt_disable();
/* online_reserved not subject to gc: */
this_cpu_add(c->usage[0]->online_reserved, stats->online_reserved);
bch2_fs_stats_verify(c);
preempt_enable();
- percpu_up_read(&c->usage_lock);
memset(stats, 0, sizeof(*stats));
}
bch2_dev_stats_verify(ca);
}
-#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
+void bch2_dev_usage_from_buckets(struct bch_fs *c, struct bch_dev *ca)
+{
+ struct bucket_mark old = { .v.counter = 0 };
+ struct bch_fs_usage *fs_usage;
+ struct bucket_array *buckets;
+ struct bucket *g;
+
+ percpu_down_read(&c->usage_lock);
+ fs_usage = this_cpu_ptr(c->usage[0]);
+ buckets = bucket_array(ca);
+
+ for_each_bucket(g, buckets)
+ if (g->mark.data_type)
+ bch2_dev_usage_update(c, ca, fs_usage, old, g->mark, false);
+ percpu_up_read(&c->usage_lock);
+}
+
+#define bucket_data_cmpxchg(c, ca, fs_usage, g, new, expr) \
({ \
struct bucket_mark _old = bucket_cmpxchg(g, new, expr); \
\
{
struct bch_fs_usage *fs_usage = this_cpu_ptr(c->usage[gc]);
struct bucket *g = __bucket(ca, b, gc);
- struct bucket_mark old, new;
+ struct bucket_mark new;
BUG_ON(type != BCH_DATA_SB &&
type != BCH_DATA_JOURNAL);
- old = bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
+ bucket_data_cmpxchg(c, ca, fs_usage, g, new, ({
new.data_type = type;
checked_add(new.dirty_sectors, sectors);
}));
return ret;
}
-int bch2_mark_key(struct bch_fs *c,
- enum bkey_type type, struct bkey_s_c k,
- bool inserting, s64 sectors,
- struct gc_pos pos,
- struct bch_fs_usage *stats,
- u64 journal_seq, unsigned flags)
+int bch2_mark_key_locked(struct bch_fs *c,
+ enum bkey_type type, struct bkey_s_c k,
+ bool inserting, s64 sectors,
+ struct gc_pos pos,
+ struct bch_fs_usage *stats,
+ u64 journal_seq, unsigned flags)
{
- int ret = 0;
-
- percpu_down_read(&c->usage_lock);
+ int ret;
if (!(flags & BCH_BUCKET_MARK_GC)) {
if (!stats)
ret = __bch2_mark_key(c, type, k, inserting, sectors,
stats, journal_seq, flags, false);
if (ret)
- goto out;
+ return ret;
}
if ((flags & BCH_BUCKET_MARK_GC) ||
this_cpu_ptr(c->usage[1]),
journal_seq, flags, true);
if (ret)
- goto out;
+ return ret;
}
-out:
+
+ return 0;
+}
+
+int bch2_mark_key(struct bch_fs *c,
+ enum bkey_type type, struct bkey_s_c k,
+ bool inserting, s64 sectors,
+ struct gc_pos pos,
+ struct bch_fs_usage *stats,
+ u64 journal_seq, unsigned flags)
+{
+ int ret;
+
+ percpu_down_read(&c->usage_lock);
+ ret = bch2_mark_key_locked(c, type, k, inserting, sectors,
+ pos, stats, journal_seq, flags);
percpu_up_read(&c->usage_lock);
return ret;
struct gc_pos pos = gc_pos_btree_node(b);
struct bkey_packed *_k;
+ if (!bkey_type_needs_gc(iter->btree_id))
+ return;
+
+ percpu_down_read(&c->usage_lock);
+
if (!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
- bch2_mark_key(c, btree_node_type(b), bkey_i_to_s_c(insert->k),
- true,
- bpos_min(insert->k->k.p, b->key.k.p).offset -
- bkey_start_offset(&insert->k->k),
- pos, &stats, trans->journal_res.seq, 0);
+ bch2_mark_key_locked(c, btree_node_type(b),
+ bkey_i_to_s_c(insert->k), true,
+ bpos_min(insert->k->k.p, b->key.k.p).offset -
+ bkey_start_offset(&insert->k->k),
+ pos, &stats, trans->journal_res.seq, 0);
while ((_k = bch2_btree_node_iter_peek_filter(&node_iter, b,
KEY_TYPE_DISCARD))) {
sectors = k.k->p.offset - insert->k->k.p.offset;
BUG_ON(sectors <= 0);
- bch2_mark_key(c, btree_node_type(b), k,
- true, sectors,
- pos, &stats, trans->journal_res.seq, 0);
+ bch2_mark_key_locked(c, btree_node_type(b),
+ k, true, sectors, pos, &stats,
+ trans->journal_res.seq, 0);
sectors = bkey_start_offset(&insert->k->k) -
k.k->p.offset;
BUG_ON(sectors >= 0);
}
- bch2_mark_key(c, btree_node_type(b), k,
- false, sectors,
- pos, &stats, trans->journal_res.seq, 0);
+ bch2_mark_key_locked(c, btree_node_type(b),
+ k, false, sectors, pos, &stats,
+ trans->journal_res.seq, 0);
bch2_btree_node_iter_advance(&node_iter, b);
}
bch2_fs_usage_apply(c, &stats, trans->disk_res, pos);
+
+ percpu_up_read(&c->usage_lock);
}
/* Disk reservations: */