}
static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
- bool initial)
+ bool initial, bool metadata_only)
{
struct btree_trans trans;
struct btree_iter *iter;
struct btree *b;
- unsigned depth = bch2_expensive_debug_checks ? 0
+ unsigned depth = metadata_only ? 1
+ : bch2_expensive_debug_checks ? 0
: !btree_node_type_needs_gc(btree_id) ? 1
: 0;
u8 max_stale = 0;
}
static int bch2_gc_btree_init(struct bch_fs *c,
- enum btree_id btree_id)
+ enum btree_id btree_id,
+ bool metadata_only)
{
struct btree *b;
- unsigned target_depth = bch2_expensive_debug_checks ? 0
- : !btree_node_type_needs_gc(btree_id) ? 1
+ unsigned target_depth = metadata_only ? 1
+ : bch2_expensive_debug_checks ? 0
+ : !btree_node_type_needs_gc(btree_id) ? 1
: 0;
u8 max_stale = 0;
int ret = 0;
(int) btree_id_to_gc_phase(r);
}
-static int bch2_gc_btrees(struct bch_fs *c, bool initial)
+static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
{
enum btree_id ids[BTREE_ID_NR];
unsigned i;
for (i = 0; i < BTREE_ID_NR; i++) {
enum btree_id id = ids[i];
int ret = initial
- ? bch2_gc_btree_init(c, id)
- : bch2_gc_btree(c, id, initial);
+ ? bch2_gc_btree_init(c, id, metadata_only)
+ : bch2_gc_btree(c, id, initial, metadata_only);
if (ret) {
bch_err(c, "%s: ret %i", __func__, ret);
return ret;
}
static int bch2_gc_done(struct bch_fs *c,
- bool initial)
+ bool initial, bool metadata_only)
{
struct bch_dev *ca;
- bool verify = (!initial ||
+ bool verify = !metadata_only && (!initial ||
(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
unsigned i, dev;
int ret = 0;
#define copy_fs_field(_f, _msg, ...) \
copy_field(_f, "fs has wrong " _msg, ##__VA_ARGS__)
- {
+ if (!metadata_only) {
struct genradix_iter iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src;
copy_fs_field(hidden, "hidden");
copy_fs_field(btree, "btree");
- copy_fs_field(data, "data");
- copy_fs_field(cached, "cached");
- copy_fs_field(reserved, "reserved");
- copy_fs_field(nr_inodes,"nr_inodes");
- for (i = 0; i < BCH_REPLICAS_MAX; i++)
- copy_fs_field(persistent_reserved[i],
- "persistent_reserved[%i]", i);
+ if (!metadata_only) {
+ copy_fs_field(data, "data");
+ copy_fs_field(cached, "cached");
+ copy_fs_field(reserved, "reserved");
+ copy_fs_field(nr_inodes,"nr_inodes");
+
+ for (i = 0; i < BCH_REPLICAS_MAX; i++)
+ copy_fs_field(persistent_reserved[i],
+ "persistent_reserved[%i]", i);
+ }
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
char buf[80];
+ if (metadata_only &&
+ (e->data_type == BCH_DATA_user ||
+ e->data_type == BCH_DATA_cached))
+ continue;
+
bch2_replicas_entry_to_text(&PBUF(buf), e);
copy_fs_field(replicas[i], "%s", buf);
return ret;
}
-static int bch2_gc_start(struct bch_fs *c)
+static int bch2_gc_start(struct bch_fs *c,
+ bool metadata_only)
{
struct bch_dev *ca;
unsigned i;
d->_mark.gen = dst->b[b].oldest_gen = s->mark.gen;
d->gen_valid = s->gen_valid;
+
+ if (metadata_only &&
+ (s->mark.data_type == BCH_DATA_user ||
+ s->mark.data_type == BCH_DATA_cached)) {
+ d->_mark = s->mark;
+ d->_mark.owned_by_allocator = 0;
+ }
}
};
* move around - if references move backwards in the ordering GC
* uses, GC could skip past them
*/
-int bch2_gc(struct bch_fs *c, bool initial)
+int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
struct bch_dev *ca;
u64 start_time = local_clock();
closure_wait_event(&c->btree_interior_update_wait,
!bch2_btree_interior_updates_nr_pending(c));
again:
- ret = bch2_gc_start(c);
+ ret = bch2_gc_start(c, metadata_only);
if (ret)
goto out;
bch2_mark_superblocks(c);
- ret = bch2_gc_btrees(c, initial);
+ ret = bch2_gc_btrees(c, initial, metadata_only);
if (ret)
goto out;
bch2_journal_block(&c->journal);
percpu_down_write(&c->mark_lock);
- ret = bch2_gc_done(c, initial);
+ ret = bch2_gc_done(c, initial, metadata_only);
bch2_journal_unblock(&c->journal);
} else {