* move around - if references move backwards in the ordering GC
* uses, GC could skip past them
*/
-int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
+static int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
{
unsigned iter = 0;
int ret;
return ret;
}
+int bch2_check_allocations(struct bch_fs *c)
+{
+ return bch2_gc(c, true, false);
+}
+
static int gc_btree_gens_key(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
last = atomic64_read(&clock->now);
last_kick = atomic_read(&c->kick_gc);
- /*
- * Full gc is currently incompatible with btree key cache:
- */
-#if 0
- ret = bch2_gc(c, false, false);
-#else
bch2_gc_gens(c);
-#endif
debug_check_no_locks_held();
}
#include "btree_types.h"
int bch2_check_topology(struct bch_fs *);
-int bch2_gc(struct bch_fs *, bool, bool);
+int bch2_check_allocations(struct bch_fs *);
int bch2_gc_gens(struct bch_fs *);
void bch2_gc_thread_stop(struct bch_fs *);
int bch2_gc_thread_start(struct bch_fs *);
NULL
};
-static int bch2_check_allocations(struct bch_fs *c)
-{
- return bch2_gc(c, true, false);
-}
-
static int bch2_set_may_go_rw(struct bch_fs *c)
{
struct journal_keys *keys = &c->journal_keys;
if (attr == &sysfs_btree_wakeup)
bch2_btree_wakeup_all(c);
- if (attr == &sysfs_trigger_gc) {
- /*
- * Full gc is currently incompatible with btree key cache:
- */
-#if 0
- down_read(&c->state_lock);
- bch2_gc(c, false, false);
- up_read(&c->state_lock);
-#else
+ if (attr == &sysfs_trigger_gc)
bch2_gc_gens(c);
-#endif
- }
if (attr == &sysfs_trigger_discards)
bch2_do_discards(c);