{
spin_lock_init(&c->freelist_lock);
}
+
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+{
+ struct open_bucket *ob;
+
+ for (ob = c->open_buckets;
+ ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
+ ob++) {
+ spin_lock(&ob->lock);
+ if (ob->valid && !ob->on_partial_list) {
+ pr_buf(out, "%zu ref %u type %s\n",
+ ob - c->open_buckets,
+ atomic_read(&ob->pin),
+ bch2_data_types[ob->type]);
+ }
+ spin_unlock(&ob->lock);
+ }
+
+}
int bch2_alloc_write(struct bch_fs *, unsigned);
void bch2_fs_allocator_background_init(struct bch_fs *);
+void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *);
+
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
read_attribute(btree_key_cache);
read_attribute(btree_transactions);
read_attribute(stripes_heap);
+read_attribute(open_buckets);
read_attribute(internal_uuid);
return out.pos - buf;
}
+ if (attr == &sysfs_open_buckets) {
+ bch2_open_buckets_to_text(&out, c);
+ return out.pos - buf;
+ }
+
if (attr == &sysfs_compression_stats) {
bch2_compression_stats_to_text(&out, c);
return out.pos - buf;
&sysfs_btree_key_cache,
&sysfs_btree_transactions,
&sysfs_stripes_heap,
+ &sysfs_open_buckets,
&sysfs_read_realloc_races,
&sysfs_extent_migrate_done,