1 // SPDX-License-Identifier: GPL-2.0
5 #include "alloc_background.h"
7 #include "btree_update.h"
8 #include "btree_update_interior.h"
14 #include "fs-common.h"
16 #include "journal_io.h"
17 #include "journal_reclaim.h"
18 #include "journal_seq_blacklist.h"
25 #include <linux/sort.h>
26 #include <linux/stat.h>
28 #define QSTR(n) { { { .len = strlen(n) } }, .name = n }
30 /* for -o reconstruct_alloc: */
31 static void drop_alloc_keys(struct journal_keys *keys)
35 for (src = 0, dst = 0; src < keys->nr; src++)
36 if (keys->d[src].btree_id != BTREE_ID_alloc)
37 keys->d[dst++] = keys->d[src];
42 /* iterate over keys read from the journal: */
44 static int __journal_key_cmp(enum btree_id l_btree_id,
47 struct journal_key *r)
49 return (cmp_int(l_btree_id, r->btree_id) ?:
50 cmp_int(l_level, r->level) ?:
51 bpos_cmp(l_pos, r->k->k.p));
54 static int journal_key_cmp(struct journal_key *l, struct journal_key *r)
56 return (cmp_int(l->btree_id, r->btree_id) ?:
57 cmp_int(l->level, r->level) ?:
58 bpos_cmp(l->k->k.p, r->k->k.p));
61 static size_t journal_key_search(struct journal_keys *journal_keys,
62 enum btree_id id, unsigned level,
65 size_t l = 0, r = journal_keys->nr, m;
68 m = l + ((r - l) >> 1);
69 if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
75 BUG_ON(l < journal_keys->nr &&
76 __journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
79 __journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
84 static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
86 struct bkey_i *n = iter->keys->d[idx].k;
87 struct btree_and_journal_iter *biter =
88 container_of(iter, struct btree_and_journal_iter, journal);
90 if (iter->idx > idx ||
93 bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
97 int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
98 unsigned level, struct bkey_i *k)
100 struct journal_key n = {
106 struct journal_keys *keys = &c->journal_keys;
107 struct journal_iter *iter;
108 unsigned idx = journal_key_search(keys, id, level, k->k.p);
110 if (idx < keys->nr &&
111 journal_key_cmp(&n, &keys->d[idx]) == 0) {
112 if (keys->d[idx].allocated)
113 kfree(keys->d[idx].k);
118 if (keys->nr == keys->size) {
119 struct journal_keys new_keys = {
121 .size = keys->size * 2,
122 .journal_seq_base = keys->journal_seq_base,
125 new_keys.d = kvmalloc(sizeof(new_keys.d[0]) * new_keys.size, GFP_KERNEL);
127 bch_err(c, "%s: error allocating new key array (size %zu)",
128 __func__, new_keys.size);
132 memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
137 array_insert_item(keys->d, keys->nr, idx, n);
139 list_for_each_entry(iter, &c->journal_iters, list)
140 journal_iter_fix(c, iter, idx);
145 int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
146 unsigned level, struct bpos pos)
148 struct bkey_i *whiteout =
149 kmalloc(sizeof(struct bkey), GFP_KERNEL);
153 bch_err(c, "%s: error allocating new key", __func__);
157 bkey_init(&whiteout->k);
160 ret = bch2_journal_key_insert(c, id, level, whiteout);
166 static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
168 struct journal_key *k = iter->idx - iter->keys->nr
169 ? iter->keys->d + iter->idx : NULL;
172 k->btree_id == iter->btree_id &&
173 k->level == iter->level)
176 iter->idx = iter->keys->nr;
180 static void bch2_journal_iter_advance(struct journal_iter *iter)
182 if (iter->idx < iter->keys->nr)
186 static void bch2_journal_iter_exit(struct journal_iter *iter)
188 list_del(&iter->list);
191 static void bch2_journal_iter_init(struct bch_fs *c,
192 struct journal_iter *iter,
193 enum btree_id id, unsigned level,
198 iter->keys = &c->journal_keys;
199 iter->idx = journal_key_search(&c->journal_keys, id, level, pos);
200 list_add(&iter->list, &c->journal_iters);
203 static struct bkey_s_c bch2_journal_iter_peek_btree(struct btree_and_journal_iter *iter)
205 return bch2_btree_node_iter_peek_unpack(&iter->node_iter,
206 iter->b, &iter->unpacked);
209 static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
211 bch2_btree_node_iter_advance(&iter->node_iter, iter->b);
214 void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
216 switch (iter->last) {
220 bch2_journal_iter_advance_btree(iter);
223 bch2_journal_iter_advance(&iter->journal);
230 struct bkey_s_c bch2_btree_and_journal_iter_peek(struct btree_and_journal_iter *iter)
235 struct bkey_s_c btree_k =
236 bch2_journal_iter_peek_btree(iter);
237 struct bkey_s_c journal_k =
238 bkey_i_to_s_c(bch2_journal_iter_peek(&iter->journal));
240 if (btree_k.k && journal_k.k) {
241 int cmp = bpos_cmp(btree_k.k->p, journal_k.k->p);
244 bch2_journal_iter_advance_btree(iter);
246 iter->last = cmp < 0 ? btree : journal;
247 } else if (btree_k.k) {
249 } else if (journal_k.k) {
250 iter->last = journal;
253 return bkey_s_c_null;
256 ret = iter->last == journal ? journal_k : btree_k;
259 bpos_cmp(ret.k->p, iter->b->data->max_key) > 0) {
260 iter->journal.idx = iter->journal.keys->nr;
262 return bkey_s_c_null;
265 if (!bkey_deleted(ret.k))
268 bch2_btree_and_journal_iter_advance(iter);
274 struct bkey_s_c bch2_btree_and_journal_iter_next(struct btree_and_journal_iter *iter)
276 bch2_btree_and_journal_iter_advance(iter);
278 return bch2_btree_and_journal_iter_peek(iter);
281 void bch2_btree_and_journal_iter_exit(struct btree_and_journal_iter *iter)
283 bch2_journal_iter_exit(&iter->journal);
286 void bch2_btree_and_journal_iter_init_node_iter(struct btree_and_journal_iter *iter,
290 memset(iter, 0, sizeof(*iter));
293 bch2_btree_node_iter_init_from_start(&iter->node_iter, iter->b);
294 bch2_journal_iter_init(c, &iter->journal,
295 b->c.btree_id, b->c.level, b->data->min_key);
298 /* Walk btree, overlaying keys from the journal: */
300 static void btree_and_journal_iter_prefetch(struct bch_fs *c, struct btree *b,
301 struct btree_and_journal_iter iter)
303 unsigned i = 0, nr = b->c.level > 1 ? 2 : 16;
309 bch2_bkey_buf_init(&tmp);
312 (k = bch2_btree_and_journal_iter_peek(&iter)).k) {
313 bch2_bkey_buf_reassemble(&tmp, c, k);
315 bch2_btree_node_prefetch(c, NULL, tmp.k,
316 b->c.btree_id, b->c.level - 1);
318 bch2_btree_and_journal_iter_advance(&iter);
322 bch2_bkey_buf_exit(&tmp, c);
325 static int bch2_btree_and_journal_walk_recurse(struct bch_fs *c, struct btree *b,
326 struct journal_keys *journal_keys,
327 enum btree_id btree_id,
328 btree_walk_node_fn node_fn,
329 btree_walk_key_fn key_fn)
331 struct btree_and_journal_iter iter;
337 bch2_bkey_buf_init(&tmp);
338 bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
340 while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
341 ret = key_fn(c, btree_id, b->c.level, k);
346 bch2_bkey_buf_reassemble(&tmp, c, k);
348 bch2_btree_and_journal_iter_advance(&iter);
350 child = bch2_btree_node_get_noiter(c, tmp.k,
351 b->c.btree_id, b->c.level - 1,
354 ret = PTR_ERR_OR_ZERO(child);
358 btree_and_journal_iter_prefetch(c, b, iter);
360 ret = (node_fn ? node_fn(c, b) : 0) ?:
361 bch2_btree_and_journal_walk_recurse(c, child,
362 journal_keys, btree_id, node_fn, key_fn);
363 six_unlock_read(&child->c.lock);
368 bch2_btree_and_journal_iter_advance(&iter);
372 bch2_btree_and_journal_iter_exit(&iter);
373 bch2_bkey_buf_exit(&tmp, c);
377 int bch2_btree_and_journal_walk(struct bch_fs *c, struct journal_keys *journal_keys,
378 enum btree_id btree_id,
379 btree_walk_node_fn node_fn,
380 btree_walk_key_fn key_fn)
382 struct btree *b = c->btree_roots[btree_id].b;
385 if (btree_node_fake(b))
388 six_lock_read(&b->c.lock, NULL, NULL);
389 ret = (node_fn ? node_fn(c, b) : 0) ?:
390 bch2_btree_and_journal_walk_recurse(c, b, journal_keys, btree_id,
392 key_fn(c, btree_id, b->c.level + 1, bkey_i_to_s_c(&b->key));
393 six_unlock_read(&b->c.lock);
398 /* sort and dedup all keys in the journal: */
400 void bch2_journal_entries_free(struct list_head *list)
403 while (!list_empty(list)) {
404 struct journal_replay *i =
405 list_first_entry(list, struct journal_replay, list);
407 kvpfree(i, offsetof(struct journal_replay, j) +
408 vstruct_bytes(&i->j));
413 * When keys compare equal, oldest compares first:
415 static int journal_sort_key_cmp(const void *_l, const void *_r)
417 const struct journal_key *l = _l;
418 const struct journal_key *r = _r;
420 return cmp_int(l->btree_id, r->btree_id) ?:
421 cmp_int(l->level, r->level) ?:
422 bpos_cmp(l->k->k.p, r->k->k.p) ?:
423 cmp_int(l->journal_seq, r->journal_seq) ?:
424 cmp_int(l->journal_offset, r->journal_offset);
427 void bch2_journal_keys_free(struct journal_keys *keys)
429 struct journal_key *i;
431 for (i = keys->d; i < keys->d + keys->nr; i++)
440 static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
442 struct journal_replay *i;
443 struct jset_entry *entry;
444 struct bkey_i *k, *_n;
445 struct journal_keys keys = { NULL };
446 struct journal_key *src, *dst;
449 if (list_empty(journal_entries))
452 list_for_each_entry(i, journal_entries, list) {
456 if (!keys.journal_seq_base)
457 keys.journal_seq_base = le64_to_cpu(i->j.seq);
459 for_each_jset_key(k, _n, entry, &i->j)
463 keys.size = roundup_pow_of_two(nr_keys);
465 keys.d = kvmalloc(sizeof(keys.d[0]) * keys.size, GFP_KERNEL);
469 list_for_each_entry(i, journal_entries, list) {
473 BUG_ON(le64_to_cpu(i->j.seq) - keys.journal_seq_base > U32_MAX);
475 for_each_jset_key(k, _n, entry, &i->j)
476 keys.d[keys.nr++] = (struct journal_key) {
477 .btree_id = entry->btree_id,
478 .level = entry->level,
480 .journal_seq = le64_to_cpu(i->j.seq) -
481 keys.journal_seq_base,
482 .journal_offset = k->_data - i->j._data,
486 sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_key_cmp, NULL);
489 while (src < keys.d + keys.nr) {
490 while (src + 1 < keys.d + keys.nr &&
491 src[0].btree_id == src[1].btree_id &&
492 src[0].level == src[1].level &&
493 !bpos_cmp(src[0].k->k.p, src[1].k->k.p))
499 keys.nr = dst - keys.d;
504 /* journal replay: */
506 static void replay_now_at(struct journal *j, u64 seq)
508 BUG_ON(seq < j->replay_journal_seq);
509 BUG_ON(seq > j->replay_journal_seq_end);
511 while (j->replay_journal_seq < seq)
512 bch2_journal_pin_put(j, j->replay_journal_seq++);
515 static int __bch2_journal_replay_key(struct btree_trans *trans,
516 enum btree_id id, unsigned level,
519 struct btree_iter *iter;
522 iter = bch2_trans_get_node_iter(trans, id, k->k.p,
523 BTREE_MAX_DEPTH, level,
527 * iter->flags & BTREE_ITER_IS_EXTENTS triggers the update path to run
528 * extent_handle_overwrites() and extent_update_to_keys() - but we don't
529 * want that here, journal replay is supposed to treat extents like
532 BUG_ON(iter->flags & BTREE_ITER_IS_EXTENTS);
534 ret = bch2_btree_iter_traverse(iter) ?:
535 bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
536 bch2_trans_iter_put(trans, iter);
540 static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
542 unsigned commit_flags = BTREE_INSERT_NOFAIL|
543 BTREE_INSERT_LAZY_RW;
546 commit_flags |= BTREE_INSERT_JOURNAL_REPLAY;
548 return bch2_trans_do(c, NULL, NULL, commit_flags,
549 __bch2_journal_replay_key(&trans, k->btree_id, k->level, k->k));
552 static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
554 struct btree_iter *iter;
557 iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
559 BTREE_ITER_CACHED_NOFILL|
561 ret = bch2_trans_update(trans, iter, k, BTREE_TRIGGER_NORUN);
562 bch2_trans_iter_put(trans, iter);
566 static int bch2_alloc_replay_key(struct bch_fs *c, struct bkey_i *k)
568 return bch2_trans_do(c, NULL, NULL,
570 BTREE_INSERT_USE_RESERVE|
571 BTREE_INSERT_LAZY_RW|
572 BTREE_INSERT_JOURNAL_REPLAY,
573 __bch2_alloc_replay_key(&trans, k));
576 static int journal_sort_seq_cmp(const void *_l, const void *_r)
578 const struct journal_key *l = _l;
579 const struct journal_key *r = _r;
581 return cmp_int(r->level, l->level) ?:
582 cmp_int(l->journal_seq, r->journal_seq) ?:
583 cmp_int(l->btree_id, r->btree_id) ?:
584 bpos_cmp(l->k->k.p, r->k->k.p);
587 static int bch2_journal_replay(struct bch_fs *c,
588 struct journal_keys keys)
590 struct journal *j = &c->journal;
591 struct journal_key *i;
595 sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
598 replay_now_at(j, keys.journal_seq_base);
600 seq = j->replay_journal_seq;
603 * First replay updates to the alloc btree - these will only update the
606 for_each_journal_key(keys, i) {
609 if (!i->level && i->btree_id == BTREE_ID_alloc) {
610 j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
611 ret = bch2_alloc_replay_key(c, i->k);
618 * Next replay updates to interior btree nodes:
620 for_each_journal_key(keys, i) {
624 j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
625 ret = bch2_journal_replay_key(c, i);
632 * Now that the btree is in a consistent state, we can start journal
633 * reclaim (which will be flushing entries from the btree key cache back
636 set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
637 set_bit(JOURNAL_RECLAIM_STARTED, &j->flags);
638 journal_reclaim_kick(j);
640 j->replay_journal_seq = seq;
643 * Now replay leaf node updates:
645 for_each_journal_key(keys, i) {
648 if (i->level || i->btree_id == BTREE_ID_alloc)
651 replay_now_at(j, keys.journal_seq_base + i->journal_seq);
653 ret = bch2_journal_replay_key(c, i);
658 replay_now_at(j, j->replay_journal_seq_end);
659 j->replay_journal_seq = 0;
661 bch2_journal_set_replay_done(j);
662 bch2_journal_flush_all_pins(j);
663 return bch2_journal_error(j);
665 bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
666 ret, bch2_btree_ids[i->btree_id], i->level);
670 /* journal replay early: */
672 static int journal_replay_entry_early(struct bch_fs *c,
673 struct jset_entry *entry)
677 switch (entry->type) {
678 case BCH_JSET_ENTRY_btree_root: {
679 struct btree_root *r;
681 if (entry->btree_id >= BTREE_ID_NR) {
682 bch_err(c, "filesystem has unknown btree type %u",
687 r = &c->btree_roots[entry->btree_id];
690 r->level = entry->level;
691 bkey_copy(&r->key, &entry->start[0]);
699 case BCH_JSET_ENTRY_usage: {
700 struct jset_entry_usage *u =
701 container_of(entry, struct jset_entry_usage, entry);
703 switch (entry->btree_id) {
704 case FS_USAGE_RESERVED:
705 if (entry->level < BCH_REPLICAS_MAX)
706 c->usage_base->persistent_reserved[entry->level] =
709 case FS_USAGE_INODES:
710 c->usage_base->nr_inodes = le64_to_cpu(u->v);
712 case FS_USAGE_KEY_VERSION:
713 atomic64_set(&c->key_version,
720 case BCH_JSET_ENTRY_data_usage: {
721 struct jset_entry_data_usage *u =
722 container_of(entry, struct jset_entry_data_usage, entry);
724 ret = bch2_replicas_set_usage(c, &u->r,
728 case BCH_JSET_ENTRY_dev_usage: {
729 struct jset_entry_dev_usage *u =
730 container_of(entry, struct jset_entry_dev_usage, entry);
731 struct bch_dev *ca = bch_dev_bkey_exists(c, u->dev);
732 unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
733 unsigned nr_types = (bytes - sizeof(struct jset_entry_dev_usage)) /
734 sizeof(struct jset_entry_dev_usage_type);
737 ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
738 ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable);
740 for (i = 0; i < nr_types; i++) {
741 ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
742 ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
743 ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);
748 case BCH_JSET_ENTRY_blacklist: {
749 struct jset_entry_blacklist *bl_entry =
750 container_of(entry, struct jset_entry_blacklist, entry);
752 ret = bch2_journal_seq_blacklist_add(c,
753 le64_to_cpu(bl_entry->seq),
754 le64_to_cpu(bl_entry->seq) + 1);
757 case BCH_JSET_ENTRY_blacklist_v2: {
758 struct jset_entry_blacklist_v2 *bl_entry =
759 container_of(entry, struct jset_entry_blacklist_v2, entry);
761 ret = bch2_journal_seq_blacklist_add(c,
762 le64_to_cpu(bl_entry->start),
763 le64_to_cpu(bl_entry->end) + 1);
766 case BCH_JSET_ENTRY_clock: {
767 struct jset_entry_clock *clock =
768 container_of(entry, struct jset_entry_clock, entry);
770 atomic64_set(&c->io_clock[clock->rw].now, clock->time);
777 static int journal_replay_early(struct bch_fs *c,
778 struct bch_sb_field_clean *clean,
779 struct list_head *journal)
781 struct journal_replay *i;
782 struct jset_entry *entry;
786 for (entry = clean->start;
787 entry != vstruct_end(&clean->field);
788 entry = vstruct_next(entry)) {
789 ret = journal_replay_entry_early(c, entry);
794 list_for_each_entry(i, journal, list) {
798 vstruct_for_each(&i->j, entry) {
799 ret = journal_replay_entry_early(c, entry);
806 bch2_fs_usage_initialize(c);
811 /* sb clean section: */
813 static struct bkey_i *btree_root_find(struct bch_fs *c,
814 struct bch_sb_field_clean *clean,
816 enum btree_id id, unsigned *level)
819 struct jset_entry *entry, *start, *end;
822 start = clean->start;
823 end = vstruct_end(&clean->field);
826 end = vstruct_last(j);
829 for (entry = start; entry < end; entry = vstruct_next(entry))
830 if (entry->type == BCH_JSET_ENTRY_btree_root &&
831 entry->btree_id == id)
837 return ERR_PTR(-EINVAL);
840 *level = entry->level;
844 static int verify_superblock_clean(struct bch_fs *c,
845 struct bch_sb_field_clean **cleanp,
849 struct bch_sb_field_clean *clean = *cleanp;
852 if (mustfix_fsck_err_on(j->seq != clean->journal_seq, c,
853 "superblock journal seq (%llu) doesn't match journal (%llu) after clean shutdown",
854 le64_to_cpu(clean->journal_seq),
855 le64_to_cpu(j->seq))) {
861 for (i = 0; i < BTREE_ID_NR; i++) {
862 char buf1[200], buf2[200];
863 struct bkey_i *k1, *k2;
864 unsigned l1 = 0, l2 = 0;
866 k1 = btree_root_find(c, clean, NULL, i, &l1);
867 k2 = btree_root_find(c, NULL, j, i, &l2);
872 mustfix_fsck_err_on(!k1 || !k2 ||
875 k1->k.u64s != k2->k.u64s ||
876 memcmp(k1, k2, bkey_bytes(k1)) ||
878 "superblock btree root %u doesn't match journal after clean shutdown\n"
880 "journal: l=%u %s\n", i,
881 l1, (bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(k1)), buf1),
882 l2, (bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(k2)), buf2));
888 static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
890 struct bch_sb_field_clean *clean, *sb_clean;
893 mutex_lock(&c->sb_lock);
894 sb_clean = bch2_sb_get_clean(c->disk_sb.sb);
896 if (fsck_err_on(!sb_clean, c,
897 "superblock marked clean but clean section not present")) {
898 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
900 mutex_unlock(&c->sb_lock);
904 clean = kmemdup(sb_clean, vstruct_bytes(&sb_clean->field),
907 mutex_unlock(&c->sb_lock);
908 return ERR_PTR(-ENOMEM);
911 ret = bch2_sb_clean_validate(c, clean, READ);
913 mutex_unlock(&c->sb_lock);
917 mutex_unlock(&c->sb_lock);
921 mutex_unlock(&c->sb_lock);
925 static int read_btree_roots(struct bch_fs *c)
930 for (i = 0; i < BTREE_ID_NR; i++) {
931 struct btree_root *r = &c->btree_roots[i];
936 if (i == BTREE_ID_alloc &&
937 c->opts.reconstruct_alloc) {
938 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
943 __fsck_err(c, i == BTREE_ID_alloc
944 ? FSCK_CAN_IGNORE : 0,
945 "invalid btree root %s",
947 if (i == BTREE_ID_alloc)
948 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
951 ret = bch2_btree_root_read(c, i, &r->key, r->level);
953 __fsck_err(c, i == BTREE_ID_alloc
954 ? FSCK_CAN_IGNORE : 0,
955 "error reading btree root %s",
957 if (i == BTREE_ID_alloc)
958 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
962 for (i = 0; i < BTREE_ID_NR; i++)
963 if (!c->btree_roots[i].b)
964 bch2_btree_root_alloc(c, i);
969 int bch2_fs_recovery(struct bch_fs *c)
971 const char *err = "cannot allocate memory";
972 struct bch_sb_field_clean *clean = NULL;
973 struct jset *last_journal_entry = NULL;
974 u64 blacklist_seq, journal_seq;
975 bool write_sb = false;
979 clean = read_superblock_clean(c);
980 ret = PTR_ERR_OR_ZERO(clean);
985 bch_info(c, "recovering from clean shutdown, journal seq %llu",
986 le64_to_cpu(clean->journal_seq));
988 if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
989 bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
995 !(c->sb.features & (1ULL << BCH_FEATURE_extents_above_btree_updates))) {
996 bch_err(c, "filesystem needs recovery from older version; run fsck from older bcachefs-tools to fix");
1001 if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
1002 bch_info(c, "alloc_v2 feature bit not set, fsck required");
1003 c->opts.fsck = true;
1004 c->opts.fix_errors = FSCK_OPT_YES;
1007 if (!c->replicas.entries ||
1008 c->opts.rebuild_replicas) {
1009 bch_info(c, "building replicas info");
1010 set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1013 ret = bch2_blacklist_table_initialize(c);
1015 bch_err(c, "error initializing blacklist table");
1019 if (!c->sb.clean || c->opts.fsck || c->opts.keep_journal) {
1020 struct journal_replay *i;
1022 ret = bch2_journal_read(c, &c->journal_entries,
1023 &blacklist_seq, &journal_seq);
1027 list_for_each_entry_reverse(i, &c->journal_entries, list)
1029 last_journal_entry = &i->j;
1033 if (mustfix_fsck_err_on(c->sb.clean &&
1034 last_journal_entry &&
1035 !journal_entry_empty(last_journal_entry), c,
1036 "filesystem marked clean but journal not empty")) {
1037 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1038 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1039 c->sb.clean = false;
1042 if (!last_journal_entry) {
1043 fsck_err_on(!c->sb.clean, c, "no journal entries found");
1047 c->journal_keys = journal_keys_sort(&c->journal_entries);
1048 if (!c->journal_keys.d) {
1053 if (c->sb.clean && last_journal_entry) {
1054 ret = verify_superblock_clean(c, &clean,
1055 last_journal_entry);
1062 bch_err(c, "no superblock clean section found");
1063 ret = BCH_FSCK_REPAIR_IMPOSSIBLE;
1067 blacklist_seq = journal_seq = le64_to_cpu(clean->journal_seq) + 1;
1070 if (c->opts.reconstruct_alloc) {
1071 c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
1072 drop_alloc_keys(&c->journal_keys);
1075 ret = journal_replay_early(c, clean, &c->journal_entries);
1080 * After an unclean shutdown, skip then next few journal sequence
1081 * numbers as they may have been referenced by btree writes that
1082 * happened before their corresponding journal writes - those btree
1083 * writes need to be ignored, by skipping and blacklisting the next few
1084 * journal sequence numbers:
1089 if (blacklist_seq != journal_seq) {
1090 ret = bch2_journal_seq_blacklist_add(c,
1091 blacklist_seq, journal_seq);
1093 bch_err(c, "error creating new journal seq blacklist entry");
1098 ret = bch2_fs_journal_start(&c->journal, journal_seq,
1099 &c->journal_entries);
1103 ret = read_btree_roots(c);
1107 bch_verbose(c, "starting alloc read");
1108 err = "error reading allocation information";
1109 ret = bch2_alloc_read(c, &c->journal_keys);
1112 bch_verbose(c, "alloc read done");
1114 bch_verbose(c, "starting stripes_read");
1115 err = "error reading stripes";
1116 ret = bch2_stripes_read(c, &c->journal_keys);
1119 bch_verbose(c, "stripes_read done");
1121 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1124 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)) ||
1125 !(c->sb.compat & (1ULL << BCH_COMPAT_alloc_metadata)) ||
1126 test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags)) {
1127 bool metadata_only = c->opts.norecovery;
1129 bch_info(c, "starting mark and sweep");
1130 err = "error in mark and sweep";
1131 ret = bch2_gc(c, true, metadata_only);
1134 bch_verbose(c, "mark and sweep done");
1137 bch2_stripes_heap_start(c);
1139 clear_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
1140 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1143 * Skip past versions that might have possibly been used (as nonces),
1144 * but hadn't had their pointers written:
1146 if (c->sb.encryption_type && !c->sb.clean)
1147 atomic64_add(1 << 16, &c->key_version);
1149 if (c->opts.norecovery)
1152 bch_verbose(c, "starting journal replay");
1153 err = "journal replay failed";
1154 ret = bch2_journal_replay(c, c->journal_keys);
1157 bch_verbose(c, "journal replay done");
1159 if (test_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags) &&
1160 !c->opts.nochanges) {
1162 * note that even when filesystem was clean there might be work
1163 * to do here, if we ran gc (because of fsck) which recalculated
1166 bch_verbose(c, "writing allocation info");
1167 err = "error writing out alloc info";
1168 ret = bch2_stripes_write(c, BTREE_INSERT_LAZY_RW) ?:
1169 bch2_alloc_write(c, BTREE_INSERT_LAZY_RW);
1171 bch_err(c, "error writing alloc info");
1174 bch_verbose(c, "alloc write done");
1178 if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
1179 bch_info(c, "checking inode link counts");
1180 err = "error in recovery";
1181 ret = bch2_fsck_inode_nlink(c);
1184 bch_verbose(c, "check inodes done");
1187 bch_verbose(c, "checking for deleted inodes");
1188 err = "error in recovery";
1189 ret = bch2_fsck_walk_inodes_only(c);
1192 bch_verbose(c, "check inodes done");
1197 bch_info(c, "starting fsck");
1198 err = "error in fsck";
1199 ret = bch2_fsck_full(c);
1202 bch_verbose(c, "fsck done");
1205 if (enabled_qtypes(c)) {
1206 bch_verbose(c, "reading quotas");
1207 ret = bch2_fs_quota_read(c);
1210 bch_verbose(c, "quotas done");
1213 if (!(c->sb.compat & (1ULL << BCH_COMPAT_extents_above_btree_updates_done)) ||
1214 !(c->sb.compat & (1ULL << BCH_COMPAT_bformat_overflow_done))) {
1215 struct bch_move_stats stats = { 0 };
1217 bch_info(c, "scanning for old btree nodes");
1218 ret = bch2_fs_read_write(c);
1222 ret = bch2_scan_old_btree_nodes(c, &stats);
1225 bch_info(c, "scanning for old btree nodes done");
1228 mutex_lock(&c->sb_lock);
1229 if (c->opts.version_upgrade) {
1230 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
1231 c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1235 if (!test_bit(BCH_FS_ERROR, &c->flags)) {
1236 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_alloc_info;
1241 !test_bit(BCH_FS_ERROR, &c->flags)) {
1242 c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
1243 SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
1248 bch2_write_super(c);
1249 mutex_unlock(&c->sb_lock);
1251 if (c->journal_seq_blacklist_table &&
1252 c->journal_seq_blacklist_table->nr > 128)
1253 queue_work(system_long_wq, &c->journal_seq_blacklist_gc_work);
1258 set_bit(BCH_FS_FSCK_DONE, &c->flags);
1259 bch2_flush_fsck_errs(c);
1261 if (!c->opts.keep_journal) {
1262 bch2_journal_keys_free(&c->journal_keys);
1263 bch2_journal_entries_free(&c->journal_entries);
1267 bch_err(c, "Error in recovery: %s (%i)", err, ret);
1269 bch_verbose(c, "ret %i", ret);
1273 int bch2_fs_initialize(struct bch_fs *c)
1275 struct bch_inode_unpacked root_inode, lostfound_inode;
1276 struct bkey_inode_buf packed_inode;
1277 struct qstr lostfound = QSTR("lost+found");
1278 const char *err = "cannot allocate memory";
1284 bch_notice(c, "initializing new filesystem");
1286 mutex_lock(&c->sb_lock);
1287 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_extents_above_btree_updates_done;
1288 c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_bformat_overflow_done;
1290 if (c->opts.version_upgrade) {
1291 c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
1292 c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
1293 bch2_write_super(c);
1296 for_each_online_member(ca, c, i)
1297 bch2_mark_dev_superblock(c, ca, 0);
1298 mutex_unlock(&c->sb_lock);
1300 set_bit(BCH_FS_ALLOC_READ_DONE, &c->flags);
1301 set_bit(BCH_FS_INITIAL_GC_DONE, &c->flags);
1303 for (i = 0; i < BTREE_ID_NR; i++)
1304 bch2_btree_root_alloc(c, i);
1306 set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
1307 set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
1309 err = "unable to allocate journal buckets";
1310 for_each_online_member(ca, c, i) {
1311 ret = bch2_dev_journal_alloc(ca);
1313 percpu_ref_put(&ca->io_ref);
1319 * journal_res_get() will crash if called before this has
1320 * set up the journal.pin FIFO and journal.cur pointer:
1322 bch2_fs_journal_start(&c->journal, 1, &journal);
1323 bch2_journal_set_replay_done(&c->journal);
1325 err = "error going read-write";
1326 ret = bch2_fs_read_write_early(c);
1331 * Write out the superblock and journal buckets, now that we can do
1334 err = "error writing alloc info";
1335 ret = bch2_alloc_write(c, 0);
1339 bch2_inode_init(c, &root_inode, 0, 0,
1340 S_IFDIR|S_IRWXU|S_IRUGO|S_IXUGO, 0, NULL);
1341 root_inode.bi_inum = BCACHEFS_ROOT_INO;
1342 bch2_inode_pack(c, &packed_inode, &root_inode);
1344 err = "error creating root directory";
1345 ret = bch2_btree_insert(c, BTREE_ID_inodes,
1346 &packed_inode.inode.k_i,
1351 bch2_inode_init_early(c, &lostfound_inode);
1353 err = "error creating lost+found";
1354 ret = bch2_trans_do(c, NULL, NULL, 0,
1355 bch2_create_trans(&trans, BCACHEFS_ROOT_INO,
1356 &root_inode, &lostfound_inode,
1358 0, 0, S_IFDIR|0700, 0,
1361 bch_err(c, "error creating lost+found");
1365 if (enabled_qtypes(c)) {
1366 ret = bch2_fs_quota_read(c);
1371 err = "error writing first journal entry";
1372 ret = bch2_journal_meta(&c->journal);
1376 mutex_lock(&c->sb_lock);
1377 SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
1378 SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
1380 bch2_write_super(c);
1381 mutex_unlock(&c->sb_lock);
1385 pr_err("Error initializing new filesystem: %s (%i)", err, ret);