1 // SPDX-License-Identifier: GPL-2.0
4 #include "bcachefs_ioctl.h"
6 #include "btree_cache.h"
7 #include "btree_update.h"
17 #include "recovery_passes.h"
20 #include "thread_with_file.h"
23 #include <linux/bsearch.h>
24 #include <linux/dcache.h> /* struct qstr */
26 static int dirent_points_to_inode_nowarn(struct bch_fs *c,
27 struct bkey_s_c_dirent d,
28 struct bch_inode_unpacked *inode)
30 if (d.v->d_type == DT_SUBVOL
31 ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
32 : le64_to_cpu(d.v->d_inum) == inode->bi_inum)
34 return bch_err_throw(c, ENOENT_dirent_doesnt_match_inode);
37 static void dirent_inode_mismatch_msg(struct printbuf *out,
39 struct bkey_s_c_dirent dirent,
40 struct bch_inode_unpacked *inode)
42 prt_str(out, "inode points to dirent that does not point back:");
44 bch2_bkey_val_to_text(out, c, dirent.s_c);
46 bch2_inode_unpacked_to_text(out, inode);
49 static int dirent_points_to_inode(struct bch_fs *c,
50 struct bkey_s_c_dirent dirent,
51 struct bch_inode_unpacked *inode)
53 int ret = dirent_points_to_inode_nowarn(c, dirent, inode);
55 struct printbuf buf = PRINTBUF;
56 dirent_inode_mismatch_msg(&buf, c, dirent, inode);
57 bch_warn(c, "%s", buf.buf);
64 * XXX: this is handling transaction restarts without returning
65 * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
67 static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
72 int ret = for_each_btree_key_max(trans, iter, BTREE_ID_extents,
73 SPOS(inum, 0, snapshot),
76 if (bkey_extent_is_allocation(k.k))
81 return ret ?: sectors;
84 static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
89 int ret = for_each_btree_key_max(trans, iter, BTREE_ID_dirents,
90 SPOS(inum, 0, snapshot),
93 if (k.k->type == KEY_TYPE_dirent &&
94 bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
99 return ret ?: subdirs;
102 static int subvol_lookup(struct btree_trans *trans, u32 subvol,
103 u32 *snapshot, u64 *inum)
105 struct bch_subvolume s;
106 int ret = bch2_subvolume_get(trans, subvol, false, &s);
108 *snapshot = le32_to_cpu(s.snapshot);
109 *inum = le64_to_cpu(s.inode);
113 static int lookup_dirent_in_snapshot(struct btree_trans *trans,
114 struct bch_hash_info hash_info,
115 subvol_inum dir, struct qstr *name,
116 u64 *target, unsigned *type, u32 snapshot)
118 struct btree_iter iter;
119 struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
120 &hash_info, dir, name, 0, snapshot);
121 int ret = bkey_err(k);
125 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
126 *target = le64_to_cpu(d.v->d_inum);
128 bch2_trans_iter_exit(trans, &iter);
133 * Find any subvolume associated with a tree of snapshots
134 * We can't rely on master_subvol - it might have been deleted.
136 static int find_snapshot_tree_subvol(struct btree_trans *trans,
137 u32 tree_id, u32 *subvol)
139 struct btree_iter iter;
143 for_each_btree_key_norestart(trans, iter, BTREE_ID_snapshots, POS_MIN, 0, k, ret) {
144 if (k.k->type != KEY_TYPE_snapshot)
147 struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
148 if (le32_to_cpu(s.v->tree) != tree_id)
152 *subvol = le32_to_cpu(s.v->subvol);
156 ret = bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol);
158 bch2_trans_iter_exit(trans, &iter);
162 /* Get lost+found, create if it doesn't exist: */
163 static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
164 struct bch_inode_unpacked *lostfound,
165 u64 reattaching_inum)
167 struct bch_fs *c = trans->c;
168 struct qstr lostfound_str = QSTR("lost+found");
169 struct btree_iter lostfound_iter = {};
174 struct bch_snapshot_tree st;
175 ret = bch2_snapshot_tree_lookup(trans,
176 bch2_snapshot_tree(c, snapshot), &st);
181 ret = find_snapshot_tree_subvol(trans,
182 bch2_snapshot_tree(c, snapshot), &subvolid);
183 bch_err_msg(c, ret, "finding subvol associated with snapshot tree %u",
184 bch2_snapshot_tree(c, snapshot));
188 struct bch_subvolume subvol;
189 ret = bch2_subvolume_get(trans, subvolid, false, &subvol);
190 bch_err_msg(c, ret, "looking up subvol %u for snapshot %u", subvolid, snapshot);
195 struct btree_iter iter;
196 struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter,
197 BTREE_ID_subvolumes, POS(0, subvolid),
199 ret = PTR_ERR_OR_ZERO(subvol);
203 subvol->v.inode = cpu_to_le64(reattaching_inum);
204 bch2_trans_iter_exit(trans, &iter);
207 subvol_inum root_inum = {
209 .inum = le64_to_cpu(subvol.inode)
212 struct bch_inode_unpacked root_inode;
213 struct bch_hash_info root_hash_info;
214 ret = bch2_inode_find_by_inum_snapshot(trans, root_inum.inum, snapshot, &root_inode, 0);
215 bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
216 root_inum.inum, subvolid);
220 root_hash_info = bch2_hash_info_init(c, &root_inode);
222 ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
223 &lostfound_str, &inum, &d_type, snapshot);
224 if (bch2_err_matches(ret, ENOENT))
225 goto create_lostfound;
231 if (d_type != DT_DIR) {
232 bch_err(c, "error looking up lost+found: not a directory");
233 return bch_err_throw(c, ENOENT_not_directory);
237 * The bch2_check_dirents pass has already run, dangling dirents
238 * shouldn't exist here:
240 ret = bch2_inode_find_by_inum_snapshot(trans, inum, snapshot, lostfound, 0);
241 bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
242 inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
247 * we always create lost+found in the root snapshot; we don't want
248 * different branches of the snapshot tree to have different lost+found
250 snapshot = le32_to_cpu(st.root_snapshot);
252 * XXX: we could have a nicer log message here if we had a nice way to
253 * walk backpointers to print a path
255 struct printbuf path = PRINTBUF;
256 ret = bch2_inum_to_path(trans, root_inum, &path);
260 bch_notice(c, "creating %s/lost+found in subvol %llu snapshot %u",
261 path.buf, root_inum.subvol, snapshot);
262 printbuf_exit(&path);
264 u64 now = bch2_current_time(c);
265 u64 cpu = raw_smp_processor_id();
267 bch2_inode_init_early(c, lostfound);
268 bch2_inode_init_late(c, lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
269 lostfound->bi_dir = root_inode.bi_inum;
270 lostfound->bi_snapshot = le32_to_cpu(st.root_snapshot);
272 root_inode.bi_nlink++;
274 ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
278 bch2_btree_iter_set_snapshot(trans, &lostfound_iter, snapshot);
279 ret = bch2_btree_iter_traverse(trans, &lostfound_iter);
283 ret = bch2_dirent_create_snapshot(trans,
284 0, root_inode.bi_inum, snapshot, &root_hash_info,
285 mode_to_type(lostfound->bi_mode),
288 &lostfound->bi_dir_offset,
289 BTREE_UPDATE_internal_snapshot_node|
290 STR_HASH_must_create) ?:
291 bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
292 BTREE_UPDATE_internal_snapshot_node);
294 bch_err_msg(c, ret, "creating lost+found");
295 bch2_trans_iter_exit(trans, &lostfound_iter);
299 static inline bool inode_should_reattach(struct bch_inode_unpacked *inode)
301 if (inode->bi_inum == BCACHEFS_ROOT_INO &&
302 inode->bi_subvol == BCACHEFS_ROOT_SUBVOL)
306 * Subvolume roots are special: older versions of subvolume roots may be
307 * disconnected, it's only the newest version that matters.
309 * We only keep a single dirent pointing to a subvolume root, i.e.
310 * older versions of snapshots will not have a different dirent pointing
311 * to the same subvolume root.
313 * This is because dirents that point to subvolumes are only visible in
314 * the parent subvolume - versioning is not needed - and keeping them
315 * around would break fsck, because when we're crossing subvolumes we
316 * don't have a consistent snapshot ID to do check the inode <-> dirent
319 * Thus, a subvolume root that's been renamed after a snapshot will have
320 * a disconnected older version - that's expected.
322 * Note that taking a snapshot always updates the root inode (to update
323 * the dirent backpointer), so a subvolume root inode with
324 * BCH_INODE_has_child_snapshot is never visible.
326 if (inode->bi_subvol &&
327 (inode->bi_flags & BCH_INODE_has_child_snapshot))
330 return !bch2_inode_has_backpointer(inode) &&
331 !(inode->bi_flags & BCH_INODE_unlinked);
334 static int maybe_delete_dirent(struct btree_trans *trans, struct bpos d_pos, u32 snapshot)
336 struct btree_iter iter;
337 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_dirents,
338 SPOS(d_pos.inode, d_pos.offset, snapshot),
340 BTREE_ITER_with_updates);
341 int ret = bkey_err(k);
345 if (bpos_eq(k.k->p, d_pos)) {
347 * delet_at() doesn't work because the update path doesn't
348 * internally use BTREE_ITER_with_updates yet
350 struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(*k));
351 ret = PTR_ERR_OR_ZERO(k);
356 k->k.type = KEY_TYPE_whiteout;
358 ret = bch2_trans_update(trans, &iter, k, BTREE_UPDATE_internal_snapshot_node);
361 bch2_trans_iter_exit(trans, &iter);
365 static int reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *inode)
367 struct bch_fs *c = trans->c;
368 struct bch_inode_unpacked lostfound;
372 u32 dirent_snapshot = inode->bi_snapshot;
373 if (inode->bi_subvol) {
374 inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL;
376 struct btree_iter subvol_iter;
377 struct bkey_i_subvolume *subvol =
378 bch2_bkey_get_mut_typed(trans, &subvol_iter,
379 BTREE_ID_subvolumes, POS(0, inode->bi_subvol),
381 ret = PTR_ERR_OR_ZERO(subvol);
385 subvol->v.fs_path_parent = BCACHEFS_ROOT_SUBVOL;
386 bch2_trans_iter_exit(trans, &subvol_iter);
389 ret = subvol_lookup(trans, inode->bi_parent_subvol,
390 &dirent_snapshot, &root_inum);
394 snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol);
396 snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
399 ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum);
403 bch_verbose(c, "got lostfound inum %llu", lostfound.bi_inum);
405 lostfound.bi_nlink += S_ISDIR(inode->bi_mode);
407 /* ensure lost+found inode is also present in inode snapshot */
408 if (!inode->bi_subvol) {
409 BUG_ON(!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, lostfound.bi_snapshot));
410 lostfound.bi_snapshot = inode->bi_snapshot;
413 ret = __bch2_fsck_write_inode(trans, &lostfound);
417 struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound);
418 struct qstr name = QSTR(name_buf);
420 inode->bi_dir = lostfound.bi_inum;
422 ret = bch2_dirent_create_snapshot(trans,
423 inode->bi_parent_subvol, lostfound.bi_inum,
428 inode->bi_subvol ?: inode->bi_inum,
429 &inode->bi_dir_offset,
430 BTREE_UPDATE_internal_snapshot_node|
431 STR_HASH_must_create);
433 bch_err_msg(c, ret, "error creating dirent");
437 ret = __bch2_fsck_write_inode(trans, inode);
442 CLASS(printbuf, buf)();
443 ret = bch2_inum_snapshot_to_path(trans, inode->bi_inum,
444 inode->bi_snapshot, NULL, &buf);
448 bch_info(c, "reattached at %s", buf.buf);
452 * Fix up inodes in child snapshots: if they should also be reattached
453 * update the backpointer field, if they should not be we need to emit
454 * whiteouts for the dirent we just created.
456 if (!inode->bi_subvol && bch2_snapshot_is_leaf(c, inode->bi_snapshot) <= 0) {
457 snapshot_id_list whiteouts_done;
458 struct btree_iter iter;
461 darray_init(&whiteouts_done);
463 for_each_btree_key_reverse_norestart(trans, iter,
464 BTREE_ID_inodes, SPOS(0, inode->bi_inum, inode->bi_snapshot - 1),
465 BTREE_ITER_all_snapshots|BTREE_ITER_intent, k, ret) {
466 if (k.k->p.offset != inode->bi_inum)
469 if (!bkey_is_inode(k.k) ||
470 !bch2_snapshot_is_ancestor(c, k.k->p.snapshot, inode->bi_snapshot) ||
471 snapshot_list_has_ancestor(c, &whiteouts_done, k.k->p.snapshot))
474 struct bch_inode_unpacked child_inode;
475 ret = bch2_inode_unpack(k, &child_inode);
479 if (!inode_should_reattach(&child_inode)) {
480 ret = maybe_delete_dirent(trans,
481 SPOS(lostfound.bi_inum, inode->bi_dir_offset,
487 ret = snapshot_list_add(c, &whiteouts_done, k.k->p.snapshot);
491 iter.snapshot = k.k->p.snapshot;
492 child_inode.bi_dir = inode->bi_dir;
493 child_inode.bi_dir_offset = inode->bi_dir_offset;
495 ret = bch2_inode_write_flags(trans, &iter, &child_inode,
496 BTREE_UPDATE_internal_snapshot_node);
501 darray_exit(&whiteouts_done);
502 bch2_trans_iter_exit(trans, &iter);
508 static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
509 struct btree_iter *iter,
512 return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
515 static int remove_backpointer(struct btree_trans *trans,
516 struct bch_inode_unpacked *inode)
518 if (!bch2_inode_has_backpointer(inode))
521 u32 snapshot = inode->bi_snapshot;
523 if (inode->bi_parent_subvol) {
524 int ret = bch2_subvolume_get_snapshot(trans, inode->bi_parent_subvol, &snapshot);
529 struct bch_fs *c = trans->c;
530 struct btree_iter iter;
531 struct bkey_s_c_dirent d = dirent_get_by_pos(trans, &iter,
532 SPOS(inode->bi_dir, inode->bi_dir_offset, snapshot));
533 int ret = bkey_err(d) ?:
534 dirent_points_to_inode(c, d, inode) ?:
535 bch2_fsck_remove_dirent(trans, d.k->p);
536 bch2_trans_iter_exit(trans, &iter);
540 static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume s)
542 struct bch_fs *c = trans->c;
544 struct bch_inode_unpacked inode;
545 int ret = bch2_inode_find_by_inum_trans(trans,
546 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
551 ret = remove_backpointer(trans, &inode);
552 if (!bch2_err_matches(ret, ENOENT))
553 bch_err_msg(c, ret, "removing dirent");
557 ret = reattach_inode(trans, &inode);
558 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
562 static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum)
564 struct bch_fs *c = trans->c;
566 if (!bch2_snapshot_is_leaf(c, snapshotid)) {
567 bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
568 return bch_err_throw(c, fsck_repair_unimplemented);
572 * If inum isn't set, that means we're being called from check_dirents,
573 * not check_inodes - the root of this subvolume doesn't exist or we
574 * would have found it there:
577 struct btree_iter inode_iter = {};
578 struct bch_inode_unpacked new_inode;
579 u64 cpu = raw_smp_processor_id();
581 bch2_inode_init_early(c, &new_inode);
582 bch2_inode_init_late(c, &new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL);
584 new_inode.bi_subvol = subvolid;
586 int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
587 bch2_btree_iter_traverse(trans, &inode_iter) ?:
588 bch2_inode_write(trans, &inode_iter, &new_inode);
589 bch2_trans_iter_exit(trans, &inode_iter);
593 inum = new_inode.bi_inum;
596 bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum);
598 struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
599 int ret = PTR_ERR_OR_ZERO(new_subvol);
603 bkey_subvolume_init(&new_subvol->k_i);
604 new_subvol->k.p.offset = subvolid;
605 new_subvol->v.snapshot = cpu_to_le32(snapshotid);
606 new_subvol->v.inode = cpu_to_le64(inum);
607 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0);
611 struct btree_iter iter;
612 struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter,
613 BTREE_ID_snapshots, POS(0, snapshotid),
615 ret = PTR_ERR_OR_ZERO(s);
616 bch_err_msg(c, ret, "getting snapshot %u", snapshotid);
620 u32 snapshot_tree = le32_to_cpu(s->v.tree);
622 s->v.subvol = cpu_to_le32(subvolid);
623 SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
624 bch2_trans_iter_exit(trans, &iter);
626 struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
627 BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
629 ret = PTR_ERR_OR_ZERO(st);
630 bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree);
634 if (!st->v.master_subvol)
635 st->v.master_subvol = cpu_to_le32(subvolid);
637 bch2_trans_iter_exit(trans, &iter);
641 static int reconstruct_inode(struct btree_trans *trans, enum btree_id btree, u32 snapshot, u64 inum)
643 struct bch_fs *c = trans->c;
644 unsigned i_mode = S_IFREG;
648 case BTREE_ID_extents: {
649 struct btree_iter iter = {};
651 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
652 struct bkey_s_c k = bch2_btree_iter_peek_prev_min(trans, &iter, POS(inum, 0));
653 bch2_trans_iter_exit(trans, &iter);
654 int ret = bkey_err(k);
658 i_size = k.k->p.offset << 9;
661 case BTREE_ID_dirents:
664 case BTREE_ID_xattrs:
670 struct bch_inode_unpacked new_inode;
671 bch2_inode_init_early(c, &new_inode);
672 bch2_inode_init_late(c, &new_inode, bch2_current_time(c), 0, 0, i_mode|0600, 0, NULL);
673 new_inode.bi_size = i_size;
674 new_inode.bi_inum = inum;
675 new_inode.bi_snapshot = snapshot;
677 return __bch2_fsck_write_inode(trans, &new_inode);
680 static inline void snapshots_seen_exit(struct snapshots_seen *s)
682 darray_exit(&s->ids);
685 static inline void snapshots_seen_init(struct snapshots_seen *s)
687 memset(s, 0, sizeof(*s));
690 static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
693 __darray_for_each(s->ids, i) {
700 int ret = darray_insert_item(&s->ids, i - s->ids.data, id);
702 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
707 static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
708 enum btree_id btree_id, struct bpos pos)
710 if (!bkey_eq(s->pos, pos))
714 return snapshot_list_add_nodup(c, &s->ids, pos.snapshot);
718 * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
719 * and @ancestor hasn't been overwritten in @seen
721 * @c: filesystem handle
722 * @seen: list of snapshot ids already seen at current position
723 * @id: descendent snapshot id
724 * @ancestor: ancestor snapshot id
726 * Returns: whether key in @ancestor snapshot is visible in @id snapshot
728 static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
729 u32 id, u32 ancestor)
731 EBUG_ON(id > ancestor);
736 if (!bch2_snapshot_is_ancestor(c, id, ancestor))
740 * We know that @id is a descendant of @ancestor, we're checking if
741 * we've seen a key that overwrote @ancestor - i.e. also a descendent of
742 * @ascestor and with @id as a descendent.
744 * But we already know that we're scanning IDs between @id and @ancestor
745 * numerically, since snapshot ID lists are kept sorted, so if we find
746 * an id that's an ancestor of @id we're done:
748 darray_for_each_reverse(seen->ids, i)
749 if (*i != ancestor && bch2_snapshot_is_ancestor(c, id, *i))
756 * ref_visible - given a key with snapshot id @src that points to a key with
757 * snapshot id @dst, test whether there is some snapshot in which @dst is
760 * @c: filesystem handle
761 * @s: list of snapshot IDs already seen at @src
762 * @src: snapshot ID of src key
763 * @dst: snapshot ID of dst key
764 * Returns: true if there is some snapshot in which @dst is visible
766 * Assumes we're visiting @src keys in natural key order
768 static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
772 ? key_visible_in_snapshot(c, s, dst, src)
773 : bch2_snapshot_is_ancestor(c, src, dst);
776 static int ref_visible2(struct bch_fs *c,
777 u32 src, struct snapshots_seen *src_seen,
778 u32 dst, struct snapshots_seen *dst_seen)
782 swap(dst_seen, src_seen);
784 return key_visible_in_snapshot(c, src_seen, dst, src);
787 #define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
788 for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
789 (_i)->inode.bi_snapshot <= (_snapshot); _i++) \
790 if (key_visible_in_snapshot(_c, _s, _i->inode.bi_snapshot, _snapshot))
792 struct inode_walker_entry {
793 struct bch_inode_unpacked inode;
799 struct inode_walker {
800 bool first_this_inode;
802 bool recalculate_sums;
803 struct bpos last_pos;
805 DARRAY(struct inode_walker_entry) inodes;
806 snapshot_id_list deletes;
809 static void inode_walker_exit(struct inode_walker *w)
811 darray_exit(&w->inodes);
812 darray_exit(&w->deletes);
815 static struct inode_walker inode_walker_init(void)
817 return (struct inode_walker) { 0, };
820 static int add_inode(struct bch_fs *c, struct inode_walker *w,
821 struct bkey_s_c inode)
823 int ret = darray_push(&w->inodes, ((struct inode_walker_entry) {
824 .whiteout = !bkey_is_inode(inode.k),
829 struct inode_walker_entry *n = &darray_last(w->inodes);
831 return bch2_inode_unpack(inode, &n->inode);
833 n->inode.bi_inum = inode.k->p.offset;
834 n->inode.bi_snapshot = inode.k->p.snapshot;
839 static int get_inodes_all_snapshots(struct btree_trans *trans,
840 struct inode_walker *w, u64 inum)
842 struct bch_fs *c = trans->c;
843 struct btree_iter iter;
848 * We no longer have inodes for w->last_pos; clear this to avoid
849 * screwing up check_i_sectors/check_subdir_count if we take a
850 * transaction restart here:
852 w->have_inodes = false;
853 w->recalculate_sums = false;
856 for_each_btree_key_max_norestart(trans, iter,
857 BTREE_ID_inodes, POS(0, inum), SPOS(0, inum, U32_MAX),
858 BTREE_ITER_all_snapshots, k, ret) {
859 ret = add_inode(c, w, k);
863 bch2_trans_iter_exit(trans, &iter);
868 w->first_this_inode = true;
869 w->have_inodes = true;
873 static int get_visible_inodes(struct btree_trans *trans,
874 struct inode_walker *w,
875 struct snapshots_seen *s,
878 struct bch_fs *c = trans->c;
879 struct btree_iter iter;
886 for_each_btree_key_reverse_norestart(trans, iter, BTREE_ID_inodes, SPOS(0, inum, s->pos.snapshot),
887 BTREE_ITER_all_snapshots, k, ret) {
888 if (k.k->p.offset != inum)
891 if (!ref_visible(c, s, s->pos.snapshot, k.k->p.snapshot))
894 if (snapshot_list_has_ancestor(c, &w->deletes, k.k->p.snapshot))
897 ret = bkey_is_inode(k.k)
899 : snapshot_list_add(c, &w->deletes, k.k->p.snapshot);
903 bch2_trans_iter_exit(trans, &iter);
908 static struct inode_walker_entry *
909 lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, struct bkey_s_c k)
911 struct bch_fs *c = trans->c;
913 struct inode_walker_entry *i = darray_find_p(w->inodes, i,
914 bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i->inode.bi_snapshot));
919 struct printbuf buf = PRINTBUF;
922 if (fsck_err_on(k.k->p.snapshot != i->inode.bi_snapshot,
923 trans, snapshot_key_missing_inode_snapshot,
924 "have key for inode %llu:%u but have inode in ancestor snapshot %u\n"
925 "unexpected because we should always update the inode when we update a key in that inode\n"
927 w->last_pos.inode, k.k->p.snapshot, i->inode.bi_snapshot,
928 (bch2_bkey_val_to_text(&buf, c, k),
931 struct bch_inode_unpacked new = i->inode;
932 new.bi_snapshot = k.k->p.snapshot;
933 ret = __bch2_fsck_write_inode(trans, &new);
935 struct bkey_i whiteout;
936 bkey_init(&whiteout.k);
937 whiteout.k.type = KEY_TYPE_whiteout;
938 whiteout.k.p = SPOS(0, i->inode.bi_inum, k.k->p.snapshot);
939 ret = bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
941 BTREE_UPDATE_internal_snapshot_node);
947 ret = bch2_trans_commit(trans, NULL, NULL, 0);
951 struct inode_walker_entry new_entry = *i;
953 new_entry.inode.bi_snapshot = k.k->p.snapshot;
955 new_entry.i_size = 0;
957 while (i > w->inodes.data && i[-1].inode.bi_snapshot > k.k->p.snapshot)
960 size_t pos = i - w->inodes.data;
961 ret = darray_insert_item(&w->inodes, pos, new_entry);
965 ret = bch_err_throw(c, transaction_restart_nested);
976 static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
977 struct inode_walker *w,
980 if (w->last_pos.inode != k.k->p.inode) {
981 int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
986 w->last_pos = k.k->p;
988 return lookup_inode_for_snapshot(trans, w, k);
992 * Prefer to delete the first one, since that will be the one at the wrong
994 * return value: 0 -> delete k1, 1 -> delete k2
996 int bch2_fsck_update_backpointers(struct btree_trans *trans,
997 struct snapshots_seen *s,
998 const struct bch_hash_desc desc,
999 struct bch_hash_info *hash_info,
1002 if (new->k.type != KEY_TYPE_dirent)
1005 struct bkey_i_dirent *d = bkey_i_to_dirent(new);
1006 struct inode_walker target = inode_walker_init();
1009 if (d->v.d_type == DT_SUBVOL) {
1010 bch_err(trans->c, "%s does not support DT_SUBVOL", __func__);
1011 ret = -BCH_ERR_fsck_repair_unimplemented;
1013 ret = get_visible_inodes(trans, &target, s, le64_to_cpu(d->v.d_inum));
1017 darray_for_each(target.inodes, i) {
1018 i->inode.bi_dir_offset = d->k.p.offset;
1019 ret = __bch2_fsck_write_inode(trans, &i->inode);
1025 inode_walker_exit(&target);
1029 static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
1030 struct btree_iter *iter,
1031 struct bch_inode_unpacked *inode,
1034 if (inode->bi_subvol) {
1036 int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum);
1038 return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) });
1041 return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
1044 static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
1046 struct btree_iter iter;
1047 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
1048 int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set;
1049 bch2_trans_iter_exit(trans, &iter);
1053 static int check_inode_dirent_inode(struct btree_trans *trans,
1054 struct bch_inode_unpacked *inode,
1057 struct bch_fs *c = trans->c;
1058 struct printbuf buf = PRINTBUF;
1060 u32 inode_snapshot = inode->bi_snapshot;
1061 struct btree_iter dirent_iter = {};
1062 struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
1063 int ret = bkey_err(d);
1064 if (ret && !bch2_err_matches(ret, ENOENT))
1067 if ((ret || dirent_points_to_inode_nowarn(c, d, inode)) &&
1069 (inode->bi_flags & BCH_INODE_has_child_snapshot)) {
1070 /* Older version of a renamed subvolume root: we won't have a
1071 * correct dirent for it. That's expected, see
1072 * inode_should_reattach().
1074 * We don't clear the backpointer field when doing the rename
1075 * because there might be arbitrarily many versions in older
1079 inode->bi_dir_offset = 0;
1080 *write_inode = true;
1084 if (fsck_err_on(ret,
1085 trans, inode_points_to_missing_dirent,
1086 "inode points to missing dirent\n%s",
1087 (bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) ||
1088 fsck_err_on(!ret && dirent_points_to_inode_nowarn(c, d, inode),
1089 trans, inode_points_to_wrong_dirent,
1091 (printbuf_reset(&buf),
1092 dirent_inode_mismatch_msg(&buf, c, d, inode),
1095 * We just clear the backpointer fields for now. If we find a
1096 * dirent that points to this inode in check_dirents(), we'll
1097 * update it then; then when we get to check_path() if the
1098 * backpointer is still 0 we'll reattach it.
1101 inode->bi_dir_offset = 0;
1102 *write_inode = true;
1107 bch2_trans_iter_exit(trans, &dirent_iter);
1108 printbuf_exit(&buf);
1113 static int check_inode(struct btree_trans *trans,
1114 struct btree_iter *iter,
1116 struct bch_inode_unpacked *snapshot_root,
1117 struct snapshots_seen *s)
1119 struct bch_fs *c = trans->c;
1120 struct printbuf buf = PRINTBUF;
1121 struct bch_inode_unpacked u;
1122 bool do_update = false;
1125 ret = bch2_check_key_has_snapshot(trans, iter, k);
1131 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1135 if (!bkey_is_inode(k.k))
1138 ret = bch2_inode_unpack(k, &u);
1142 if (snapshot_root->bi_inum != u.bi_inum) {
1143 ret = bch2_inode_find_snapshot_root(trans, u.bi_inum, snapshot_root);
1148 if (u.bi_hash_seed != snapshot_root->bi_hash_seed ||
1149 INODE_STR_HASH(&u) != INODE_STR_HASH(snapshot_root)) {
1150 ret = bch2_repair_inode_hash_info(trans, snapshot_root);
1151 BUG_ON(ret == -BCH_ERR_fsck_repair_unimplemented);
1156 ret = bch2_check_inode_has_case_insensitive(trans, &u, &s->ids, &do_update);
1160 if (bch2_inode_has_backpointer(&u)) {
1161 ret = check_inode_dirent_inode(trans, &u, &do_update);
1166 if (fsck_err_on(bch2_inode_has_backpointer(&u) &&
1167 (u.bi_flags & BCH_INODE_unlinked),
1168 trans, inode_unlinked_but_has_dirent,
1169 "inode unlinked but has dirent\n%s",
1170 (printbuf_reset(&buf),
1171 bch2_inode_unpacked_to_text(&buf, &u),
1173 u.bi_flags &= ~BCH_INODE_unlinked;
1177 if (S_ISDIR(u.bi_mode) && (u.bi_flags & BCH_INODE_unlinked)) {
1178 /* Check for this early so that check_unreachable_inode() will reattach it */
1180 ret = bch2_empty_dir_snapshot(trans, k.k->p.offset, 0, k.k->p.snapshot);
1181 if (ret && ret != -BCH_ERR_ENOTEMPTY_dir_not_empty)
1184 fsck_err_on(ret, trans, inode_dir_unlinked_but_not_empty,
1185 "dir unlinked but not empty\n%s",
1186 (printbuf_reset(&buf),
1187 bch2_inode_unpacked_to_text(&buf, &u),
1189 u.bi_flags &= ~BCH_INODE_unlinked;
1194 if (fsck_err_on(S_ISDIR(u.bi_mode) && u.bi_size,
1195 trans, inode_dir_has_nonzero_i_size,
1196 "directory %llu:%u with nonzero i_size %lli",
1197 u.bi_inum, u.bi_snapshot, u.bi_size)) {
1202 ret = bch2_inode_has_child_snapshots(trans, k.k->p);
1206 if (fsck_err_on(ret != !!(u.bi_flags & BCH_INODE_has_child_snapshot),
1207 trans, inode_has_child_snapshots_wrong,
1208 "inode has_child_snapshots flag wrong (should be %u)\n%s",
1210 (printbuf_reset(&buf),
1211 bch2_inode_unpacked_to_text(&buf, &u),
1214 u.bi_flags |= BCH_INODE_has_child_snapshot;
1216 u.bi_flags &= ~BCH_INODE_has_child_snapshot;
1221 if ((u.bi_flags & BCH_INODE_unlinked) &&
1222 !(u.bi_flags & BCH_INODE_has_child_snapshot)) {
1223 if (!test_bit(BCH_FS_started, &c->flags)) {
1225 * If we're not in online fsck, don't delete unlinked
1226 * inodes, just make sure they're on the deleted list.
1228 * They might be referred to by a logged operation -
1229 * i.e. we might have crashed in the middle of a
1230 * truncate on an unlinked but open file - so we want to
1231 * let the delete_dead_inodes kill it after resuming
1234 ret = check_inode_deleted_list(trans, k.k->p);
1239 trans, unlinked_inode_not_on_deleted_list,
1240 "inode %llu:%u unlinked, but not on deleted list",
1241 u.bi_inum, k.k->p.snapshot);
1243 ret = bch2_btree_bit_mod_buffered(trans, BTREE_ID_deleted_inodes, k.k->p, 1);
1247 ret = bch2_inode_or_descendents_is_open(trans, k.k->p);
1251 if (fsck_err_on(!ret,
1252 trans, inode_unlinked_and_not_open,
1253 "inode %llu:%u unlinked and not open",
1254 u.bi_inum, u.bi_snapshot)) {
1255 ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
1256 bch_err_msg(c, ret, "in fsck deleting inode");
1263 if (fsck_err_on(u.bi_parent_subvol &&
1264 (u.bi_subvol == 0 ||
1265 u.bi_subvol == BCACHEFS_ROOT_SUBVOL),
1266 trans, inode_bi_parent_nonzero,
1267 "inode %llu:%u has subvol %u but nonzero parent subvol %u",
1268 u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) {
1269 u.bi_parent_subvol = 0;
1274 struct bch_subvolume s;
1276 ret = bch2_subvolume_get(trans, u.bi_subvol, false, &s);
1277 if (ret && !bch2_err_matches(ret, ENOENT))
1280 if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
1281 ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum);
1285 if (fsck_err_on(ret,
1286 trans, inode_bi_subvol_missing,
1287 "inode %llu:%u bi_subvol points to missing subvolume %u",
1288 u.bi_inum, k.k->p.snapshot, u.bi_subvol) ||
1289 fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum ||
1290 !bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot),
1292 trans, inode_bi_subvol_wrong,
1293 "inode %llu:%u points to subvol %u, but subvol points to %llu:%u",
1294 u.bi_inum, k.k->p.snapshot, u.bi_subvol,
1295 le64_to_cpu(s.inode),
1296 le32_to_cpu(s.snapshot))) {
1298 u.bi_parent_subvol = 0;
1303 if (fsck_err_on(u.bi_journal_seq > journal_cur_seq(&c->journal),
1304 trans, inode_journal_seq_in_future,
1305 "inode journal seq in future (currently at %llu)\n%s",
1306 journal_cur_seq(&c->journal),
1307 (printbuf_reset(&buf),
1308 bch2_inode_unpacked_to_text(&buf, &u),
1310 u.bi_journal_seq = journal_cur_seq(&c->journal);
1315 ret = __bch2_fsck_write_inode(trans, &u);
1316 bch_err_msg(c, ret, "in fsck updating inode");
1324 printbuf_exit(&buf);
1328 int bch2_check_inodes(struct bch_fs *c)
1330 struct bch_inode_unpacked snapshot_root = {};
1331 struct snapshots_seen s;
1333 snapshots_seen_init(&s);
1335 int ret = bch2_trans_run(c,
1336 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
1338 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
1339 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1340 check_inode(trans, &iter, k, &snapshot_root, &s)));
1342 snapshots_seen_exit(&s);
1347 static int find_oldest_inode_needs_reattach(struct btree_trans *trans,
1348 struct bch_inode_unpacked *inode)
1350 struct bch_fs *c = trans->c;
1351 struct btree_iter iter;
1356 * We look for inodes to reattach in natural key order, leaves first,
1357 * but we should do the reattach at the oldest version that needs to be
1360 for_each_btree_key_norestart(trans, iter,
1362 SPOS(0, inode->bi_inum, inode->bi_snapshot + 1),
1363 BTREE_ITER_all_snapshots, k, ret) {
1364 if (k.k->p.offset != inode->bi_inum)
1367 if (!bch2_snapshot_is_ancestor(c, inode->bi_snapshot, k.k->p.snapshot))
1370 if (!bkey_is_inode(k.k))
1373 struct bch_inode_unpacked parent_inode;
1374 ret = bch2_inode_unpack(k, &parent_inode);
1378 if (!inode_should_reattach(&parent_inode))
1381 *inode = parent_inode;
1383 bch2_trans_iter_exit(trans, &iter);
1388 static int check_unreachable_inode(struct btree_trans *trans,
1389 struct btree_iter *iter,
1392 struct printbuf buf = PRINTBUF;
1395 if (!bkey_is_inode(k.k))
1398 struct bch_inode_unpacked inode;
1399 ret = bch2_inode_unpack(k, &inode);
1403 if (!inode_should_reattach(&inode))
1406 ret = find_oldest_inode_needs_reattach(trans, &inode);
1410 if (fsck_err(trans, inode_unreachable,
1411 "unreachable inode:\n%s",
1412 (bch2_inode_unpacked_to_text(&buf, &inode),
1414 ret = reattach_inode(trans, &inode);
1416 printbuf_exit(&buf);
1421 * Reattach unreachable (but not unlinked) inodes
1423 * Run after check_inodes() and check_dirents(), so we node that inode
1424 * backpointer fields point to valid dirents, and every inode that has a dirent
1425 * that points to it has its backpointer field set - so we're just looking for
1426 * non-unlinked inodes without backpointers:
1428 * XXX: this is racy w.r.t. hardlink removal in online fsck
1430 int bch2_check_unreachable_inodes(struct bch_fs *c)
1432 int ret = bch2_trans_run(c,
1433 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
1435 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
1436 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1437 check_unreachable_inode(trans, &iter, k)));
1442 static inline bool btree_matches_i_mode(enum btree_id btree, unsigned mode)
1445 case BTREE_ID_extents:
1446 return S_ISREG(mode) || S_ISLNK(mode);
1447 case BTREE_ID_dirents:
1448 return S_ISDIR(mode);
1449 case BTREE_ID_xattrs:
1456 static int check_key_has_inode(struct btree_trans *trans,
1457 struct btree_iter *iter,
1458 struct inode_walker *inode,
1459 struct inode_walker_entry *i,
1462 struct bch_fs *c = trans->c;
1463 struct printbuf buf = PRINTBUF;
1464 struct btree_iter iter2 = {};
1465 int ret = PTR_ERR_OR_ZERO(i);
1469 if (k.k->type == KEY_TYPE_whiteout)
1472 bool have_inode = i && !i->whiteout;
1474 if (!have_inode && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes)))
1477 if (have_inode && btree_matches_i_mode(iter->btree_id, i->inode.bi_mode))
1480 prt_printf(&buf, ", ");
1482 bool have_old_inode = false;
1483 darray_for_each(inode->inodes, i2)
1484 if (!i2->whiteout &&
1485 bch2_snapshot_is_ancestor(c, k.k->p.snapshot, i2->inode.bi_snapshot) &&
1486 btree_matches_i_mode(iter->btree_id, i2->inode.bi_mode)) {
1487 prt_printf(&buf, "but found good inode in older snapshot\n");
1488 bch2_inode_unpacked_to_text(&buf, &i2->inode);
1490 have_old_inode = true;
1495 unsigned nr_keys = 0;
1497 prt_printf(&buf, "found keys:\n");
1499 for_each_btree_key_max_norestart(trans, iter2, iter->btree_id,
1500 SPOS(k.k->p.inode, 0, k.k->p.snapshot),
1501 POS(k.k->p.inode, U64_MAX),
1504 if (nr_keys <= 10) {
1505 bch2_bkey_val_to_text(&buf, c, k2);
1516 prt_printf(&buf, "found > %u keys for this missing inode\n", nr_keys);
1517 else if (nr_keys > 10)
1518 prt_printf(&buf, "found %u keys for this missing inode\n", nr_keys);
1521 if (fsck_err_on(!have_inode,
1522 trans, key_in_missing_inode,
1523 "key in missing inode%s", buf.buf)) {
1525 * Maybe a deletion that raced with data move, or something
1526 * weird like that? But if we know the inode was deleted, or
1527 * it's just a few keys, we can safely delete them.
1529 * If it's many keys, we should probably recreate the inode
1531 if (have_old_inode || nr_keys <= 2)
1538 * not autofix, this one would be a giant wtf - bit error in the
1539 * inode corrupting i_mode?
1541 * may want to try repairing inode instead of deleting
1543 if (fsck_err_on(!btree_matches_i_mode(iter->btree_id, i->inode.bi_mode),
1544 trans, key_in_wrong_inode_type,
1545 "key for wrong inode mode %o%s",
1546 i->inode.bi_mode, buf.buf))
1552 bch2_trans_iter_exit(trans, &iter2);
1553 printbuf_exit(&buf);
1558 * XXX: print out more info
1559 * count up extents for this inode, check if we have different inode in
1560 * an older snapshot version, perhaps decide if we want to reconstitute
1562 ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node);
1565 ret = reconstruct_inode(trans, iter->btree_id, k.k->p.snapshot, k.k->p.inode) ?:
1566 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
1570 inode->last_pos.inode--;
1571 ret = bch_err_throw(c, transaction_restart_nested);
1575 static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_walker *w)
1577 struct bch_fs *c = trans->c;
1581 darray_for_each(w->inodes, i) {
1582 if (i->inode.bi_sectors == i->count)
1585 count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->inode.bi_snapshot);
1587 if (w->recalculate_sums)
1590 if (i->count != count2) {
1591 bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
1592 w->last_pos.inode, i->inode.bi_snapshot, i->count, count2);
1596 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
1597 trans, inode_i_sectors_wrong,
1598 "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1599 w->last_pos.inode, i->inode.bi_snapshot,
1600 i->inode.bi_sectors, i->count)) {
1601 i->inode.bi_sectors = i->count;
1602 ret = bch2_fsck_write_inode(trans, &i->inode);
1612 static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1614 u32 restart_count = trans->restart_count;
1615 return check_i_sectors_notnested(trans, w) ?:
1616 trans_was_restarted(trans, restart_count);
1622 struct snapshots_seen seen;
1625 struct extent_ends {
1626 struct bpos last_pos;
1627 DARRAY(struct extent_end) e;
1630 static void extent_ends_reset(struct extent_ends *extent_ends)
1632 darray_for_each(extent_ends->e, i)
1633 snapshots_seen_exit(&i->seen);
1634 extent_ends->e.nr = 0;
1637 static void extent_ends_exit(struct extent_ends *extent_ends)
1639 extent_ends_reset(extent_ends);
1640 darray_exit(&extent_ends->e);
1643 static void extent_ends_init(struct extent_ends *extent_ends)
1645 memset(extent_ends, 0, sizeof(*extent_ends));
1648 static int extent_ends_at(struct bch_fs *c,
1649 struct extent_ends *extent_ends,
1650 struct snapshots_seen *seen,
1653 struct extent_end *i, n = (struct extent_end) {
1654 .offset = k.k->p.offset,
1655 .snapshot = k.k->p.snapshot,
1659 n.seen.ids.data = kmemdup(seen->ids.data,
1660 sizeof(seen->ids.data[0]) * seen->ids.size,
1662 if (!n.seen.ids.data)
1663 return bch_err_throw(c, ENOMEM_fsck_extent_ends_at);
1665 __darray_for_each(extent_ends->e, i) {
1666 if (i->snapshot == k.k->p.snapshot) {
1667 snapshots_seen_exit(&i->seen);
1672 if (i->snapshot >= k.k->p.snapshot)
1676 return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
1679 static int overlapping_extents_found(struct btree_trans *trans,
1680 enum btree_id btree,
1681 struct bpos pos1, struct snapshots_seen *pos1_seen,
1684 struct extent_end *extent_end)
1686 struct bch_fs *c = trans->c;
1687 struct printbuf buf = PRINTBUF;
1688 struct btree_iter iter1, iter2 = {};
1689 struct bkey_s_c k1, k2;
1692 BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
1694 bch2_trans_iter_init(trans, &iter1, btree, pos1,
1695 BTREE_ITER_all_snapshots|
1696 BTREE_ITER_not_extents);
1697 k1 = bch2_btree_iter_peek_max(trans, &iter1, POS(pos1.inode, U64_MAX));
1703 bch2_bkey_val_to_text(&buf, c, k1);
1705 if (!bpos_eq(pos1, k1.k->p)) {
1706 prt_str(&buf, "\nwanted\n ");
1707 bch2_bpos_to_text(&buf, pos1);
1708 prt_str(&buf, "\n");
1709 bch2_bkey_to_text(&buf, &pos2);
1711 bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
1713 ret = bch_err_throw(c, internal_fsck_err);
1717 bch2_trans_copy_iter(trans, &iter2, &iter1);
1720 bch2_btree_iter_advance(trans, &iter2);
1722 k2 = bch2_btree_iter_peek_max(trans, &iter2, POS(pos1.inode, U64_MAX));
1727 if (bpos_ge(k2.k->p, pos2.p))
1732 bch2_bkey_val_to_text(&buf, c, k2);
1734 if (bpos_gt(k2.k->p, pos2.p) ||
1735 pos2.size != k2.k->size) {
1736 bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
1738 ret = bch_err_throw(c, internal_fsck_err);
1742 prt_printf(&buf, "\noverwriting %s extent",
1743 pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
1745 if (fsck_err(trans, extent_overlapping,
1746 "overlapping extents%s", buf.buf)) {
1747 struct btree_iter *old_iter = &iter1;
1748 struct disk_reservation res = { 0 };
1750 if (pos1.snapshot < pos2.p.snapshot) {
1755 trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
1757 ret = bch2_trans_update_extent_overwrite(trans, old_iter,
1758 BTREE_UPDATE_internal_snapshot_node,
1760 bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
1761 bch2_disk_reservation_put(c, &res);
1763 bch_info(c, "repair ret %s", bch2_err_str(ret));
1770 if (pos1.snapshot == pos2.p.snapshot) {
1772 * We overwrote the first extent, and did the overwrite
1773 * in the same snapshot:
1775 extent_end->offset = bkey_start_offset(&pos2);
1776 } else if (pos1.snapshot > pos2.p.snapshot) {
1778 * We overwrote the first extent in pos2's snapshot:
1780 ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
1783 * We overwrote the second extent - restart
1784 * check_extent() from the top:
1786 ret = bch_err_throw(c, transaction_restart_nested);
1791 bch2_trans_iter_exit(trans, &iter2);
1792 bch2_trans_iter_exit(trans, &iter1);
1793 printbuf_exit(&buf);
1797 static int check_overlapping_extents(struct btree_trans *trans,
1798 struct snapshots_seen *seen,
1799 struct extent_ends *extent_ends,
1801 struct btree_iter *iter,
1804 struct bch_fs *c = trans->c;
1807 /* transaction restart, running again */
1808 if (bpos_eq(extent_ends->last_pos, k.k->p))
1811 if (extent_ends->last_pos.inode != k.k->p.inode)
1812 extent_ends_reset(extent_ends);
1814 darray_for_each(extent_ends->e, i) {
1815 if (i->offset <= bkey_start_offset(k.k))
1818 if (!ref_visible2(c,
1819 k.k->p.snapshot, seen,
1820 i->snapshot, &i->seen))
1823 ret = overlapping_extents_found(trans, iter->btree_id,
1824 SPOS(iter->pos.inode,
1833 extent_ends->last_pos = k.k->p;
1838 static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
1841 struct bch_fs *c = trans->c;
1842 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1843 struct bch_extent_crc_unpacked crc;
1844 const union bch_extent_entry *i;
1845 unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
1847 bkey_for_each_crc(k.k, ptrs, crc, i)
1848 if (crc_is_encoded(crc) &&
1849 crc.uncompressed_size > encoded_extent_max_sectors) {
1850 struct printbuf buf = PRINTBUF;
1852 bch2_bkey_val_to_text(&buf, c, k);
1853 bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf);
1854 printbuf_exit(&buf);
1860 static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
1862 struct inode_walker *inode,
1863 struct snapshots_seen *s,
1864 struct extent_ends *extent_ends,
1865 struct disk_reservation *res)
1867 struct bch_fs *c = trans->c;
1868 struct printbuf buf = PRINTBUF;
1871 ret = bch2_check_key_has_snapshot(trans, iter, k);
1873 ret = ret < 0 ? ret : 0;
1877 if (inode->last_pos.inode != k.k->p.inode && inode->have_inodes) {
1878 ret = check_i_sectors(trans, inode);
1883 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1887 struct inode_walker_entry *extent_i = walk_inode(trans, inode, k);
1888 ret = PTR_ERR_OR_ZERO(extent_i);
1892 ret = check_key_has_inode(trans, iter, inode, extent_i, k);
1896 if (k.k->type != KEY_TYPE_whiteout) {
1897 ret = check_overlapping_extents(trans, s, extent_ends, k, iter,
1898 &inode->recalculate_sums);
1903 * Check inodes in reverse order, from oldest snapshots to
1904 * newest, starting from the inode that matches this extent's
1905 * snapshot. If we didn't have one, iterate over all inodes:
1907 for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
1908 inode->inodes.data && i >= inode->inodes.data;
1910 if (i->inode.bi_snapshot > k.k->p.snapshot ||
1911 !key_visible_in_snapshot(c, s, i->inode.bi_snapshot, k.k->p.snapshot))
1914 u64 last_block = round_up(i->inode.bi_size, block_bytes(c)) >> 9;
1916 if (fsck_err_on(k.k->p.offset > last_block &&
1917 !bkey_extent_is_reservation(k),
1918 trans, extent_past_end_of_inode,
1919 "extent type past end of inode %llu:%u, i_size %llu\n%s",
1920 i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size,
1921 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1922 struct bkey_i *whiteout = bch2_trans_kmalloc(trans, sizeof(*whiteout));
1923 ret = PTR_ERR_OR_ZERO(whiteout);
1927 bkey_init(&whiteout->k);
1928 whiteout->k.p = SPOS(k.k->p.inode,
1930 i->inode.bi_snapshot);
1931 bch2_key_resize(&whiteout->k,
1932 min(KEY_SIZE_MAX & (~0 << c->block_bits),
1933 U64_MAX - whiteout->k.p.offset));
1937 * Need a normal (not BTREE_ITER_all_snapshots)
1938 * iterator, if we're deleting in a different
1939 * snapshot and need to emit a whiteout
1941 struct btree_iter iter2;
1942 bch2_trans_iter_init(trans, &iter2, BTREE_ID_extents,
1943 bkey_start_pos(&whiteout->k),
1945 ret = bch2_btree_iter_traverse(trans, &iter2) ?:
1946 bch2_trans_update(trans, &iter2, whiteout,
1947 BTREE_UPDATE_internal_snapshot_node);
1948 bch2_trans_iter_exit(trans, &iter2);
1952 iter->k.type = KEY_TYPE_whiteout;
1958 ret = bch2_trans_commit(trans, res, NULL, BCH_TRANS_COMMIT_no_enospc);
1962 if (bkey_extent_is_allocation(k.k)) {
1963 for (struct inode_walker_entry *i = extent_i ?: &darray_last(inode->inodes);
1964 inode->inodes.data && i >= inode->inodes.data;
1967 i->inode.bi_snapshot > k.k->p.snapshot ||
1968 !key_visible_in_snapshot(c, s, i->inode.bi_snapshot, k.k->p.snapshot))
1971 i->count += k.k->size;
1975 if (k.k->type != KEY_TYPE_whiteout) {
1976 ret = extent_ends_at(c, extent_ends, s, k);
1983 printbuf_exit(&buf);
1989 * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1990 * that i_size an i_sectors are consistent
1992 int bch2_check_extents(struct bch_fs *c)
1994 struct inode_walker w = inode_walker_init();
1995 struct snapshots_seen s;
1996 struct extent_ends extent_ends;
1997 struct disk_reservation res = { 0 };
1999 snapshots_seen_init(&s);
2000 extent_ends_init(&extent_ends);
2002 int ret = bch2_trans_run(c,
2003 for_each_btree_key(trans, iter, BTREE_ID_extents,
2004 POS(BCACHEFS_ROOT_INO, 0),
2005 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, ({
2006 bch2_disk_reservation_put(c, &res);
2007 check_extent(trans, &iter, k, &w, &s, &extent_ends, &res) ?:
2008 check_extent_overbig(trans, &iter, k);
2010 check_i_sectors_notnested(trans, &w));
2012 bch2_disk_reservation_put(c, &res);
2013 extent_ends_exit(&extent_ends);
2014 inode_walker_exit(&w);
2015 snapshots_seen_exit(&s);
2021 int bch2_check_indirect_extents(struct bch_fs *c)
2023 struct disk_reservation res = { 0 };
2025 int ret = bch2_trans_run(c,
2026 for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
2028 BTREE_ITER_prefetch, k,
2030 BCH_TRANS_COMMIT_no_enospc, ({
2031 bch2_disk_reservation_put(c, &res);
2032 check_extent_overbig(trans, &iter, k);
2035 bch2_disk_reservation_put(c, &res);
2040 static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_walker *w)
2042 struct bch_fs *c = trans->c;
2046 darray_for_each(w->inodes, i) {
2047 if (i->inode.bi_nlink == i->count)
2050 count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->inode.bi_snapshot);
2054 if (i->count != count2) {
2055 bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu",
2056 w->last_pos.inode, i->inode.bi_snapshot, i->count, count2);
2058 if (i->inode.bi_nlink == i->count)
2062 if (i->inode.bi_nlink != i->count) {
2063 CLASS(printbuf, buf)();
2065 lockrestart_do(trans,
2066 bch2_inum_snapshot_to_path(trans, w->last_pos.inode,
2067 i->inode.bi_snapshot, NULL, &buf));
2069 if (fsck_err_on(i->inode.bi_nlink != i->count,
2070 trans, inode_dir_wrong_nlink,
2071 "directory with wrong i_nlink: got %u, should be %llu\n%s",
2072 i->inode.bi_nlink, i->count, buf.buf)) {
2073 i->inode.bi_nlink = i->count;
2074 ret = bch2_fsck_write_inode(trans, &i->inode);
2085 static int check_subdir_dirents_count(struct btree_trans *trans, struct inode_walker *w)
2087 u32 restart_count = trans->restart_count;
2088 return check_subdir_count_notnested(trans, w) ?:
2089 trans_was_restarted(trans, restart_count);
2092 /* find a subvolume that's a descendent of @snapshot: */
2093 static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid)
2095 struct btree_iter iter;
2099 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) {
2100 if (k.k->type != KEY_TYPE_subvolume)
2103 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
2104 if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) {
2105 bch2_trans_iter_exit(trans, &iter);
2106 *subvolid = k.k->p.offset;
2113 bch2_trans_iter_exit(trans, &iter);
2118 static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
2119 struct bkey_s_c_dirent d)
2121 struct bch_fs *c = trans->c;
2122 struct btree_iter subvol_iter = {};
2123 struct bch_inode_unpacked subvol_root;
2124 u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
2125 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
2126 u32 parent_snapshot;
2127 u32 new_parent_subvol = 0;
2129 struct printbuf buf = PRINTBUF;
2132 ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum);
2133 if (ret && !bch2_err_matches(ret, ENOENT))
2137 (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) {
2138 int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
2139 if (ret2 && !bch2_err_matches(ret, ENOENT))
2144 !new_parent_subvol &&
2145 (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
2147 * Couldn't find a subvol for dirent's snapshot - but we lost
2148 * subvols, so we need to reconstruct:
2150 ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0);
2154 parent_snapshot = d.k->p.snapshot;
2157 if (fsck_err_on(ret,
2158 trans, dirent_to_missing_parent_subvol,
2159 "dirent parent_subvol points to missing subvolume\n%s",
2160 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
2161 fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot),
2162 trans, dirent_not_visible_in_parent_subvol,
2163 "dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
2165 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
2166 if (!new_parent_subvol) {
2167 bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
2168 return bch_err_throw(c, fsck_repair_unimplemented);
2171 struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
2172 ret = PTR_ERR_OR_ZERO(new_dirent);
2176 new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol);
2179 struct bkey_s_c_subvolume s =
2180 bch2_bkey_get_iter_typed(trans, &subvol_iter,
2181 BTREE_ID_subvolumes, POS(0, target_subvol),
2183 ret = bkey_err(s.s_c);
2184 if (ret && !bch2_err_matches(ret, ENOENT))
2188 if (fsck_err(trans, dirent_to_missing_subvol,
2189 "dirent points to missing subvolume\n%s",
2190 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)))
2191 return bch2_fsck_remove_dirent(trans, d.k->p);
2196 if (le32_to_cpu(s.v->fs_path_parent) != parent_subvol) {
2197 printbuf_reset(&buf);
2199 prt_printf(&buf, "subvol with wrong fs_path_parent, should be be %u\n",
2202 ret = bch2_inum_to_path(trans, (subvol_inum) { s.k->p.offset,
2203 le64_to_cpu(s.v->inode) }, &buf);
2207 bch2_bkey_val_to_text(&buf, c, s.s_c);
2209 if (fsck_err(trans, subvol_fs_path_parent_wrong, "%s", buf.buf)) {
2210 struct bkey_i_subvolume *n =
2211 bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume);
2212 ret = PTR_ERR_OR_ZERO(n);
2216 n->v.fs_path_parent = cpu_to_le32(parent_subvol);
2220 u64 target_inum = le64_to_cpu(s.v->inode);
2221 u32 target_snapshot = le32_to_cpu(s.v->snapshot);
2223 ret = bch2_inode_find_by_inum_snapshot(trans, target_inum, target_snapshot,
2225 if (ret && !bch2_err_matches(ret, ENOENT))
2229 bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
2230 ret = bch_err_throw(c, fsck_repair_unimplemented);
2234 if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol,
2235 trans, inode_bi_parent_wrong,
2236 "subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
2238 subvol_root.bi_parent_subvol, parent_subvol)) {
2239 subvol_root.bi_parent_subvol = parent_subvol;
2240 subvol_root.bi_snapshot = le32_to_cpu(s.v->snapshot);
2241 ret = __bch2_fsck_write_inode(trans, &subvol_root);
2246 ret = bch2_check_dirent_target(trans, iter, d, &subvol_root, true);
2252 bch2_trans_iter_exit(trans, &subvol_iter);
2253 printbuf_exit(&buf);
2257 static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
2259 struct bch_hash_info *hash_info,
2260 struct inode_walker *dir,
2261 struct inode_walker *target,
2262 struct snapshots_seen *s,
2263 bool *need_second_pass)
2265 struct bch_fs *c = trans->c;
2266 struct inode_walker_entry *i;
2267 struct printbuf buf = PRINTBUF;
2270 ret = bch2_check_key_has_snapshot(trans, iter, k);
2272 ret = ret < 0 ? ret : 0;
2276 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
2280 if (k.k->type == KEY_TYPE_whiteout)
2283 if (dir->last_pos.inode != k.k->p.inode && dir->have_inodes) {
2284 ret = check_subdir_dirents_count(trans, dir);
2289 i = walk_inode(trans, dir, k);
2290 ret = PTR_ERR_OR_ZERO(i);
2294 ret = check_key_has_inode(trans, iter, dir, i, k);
2298 if (!i || i->whiteout)
2301 if (dir->first_this_inode)
2302 *hash_info = bch2_hash_info_init(c, &i->inode);
2303 dir->first_this_inode = false;
2305 hash_info->cf_encoding = bch2_inode_casefold(c, &i->inode) ? c->cf_encoding : NULL;
2307 ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info,
2308 iter, k, need_second_pass);
2312 /* dirent has been deleted */
2317 if (k.k->type != KEY_TYPE_dirent)
2320 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
2322 /* check casefold */
2323 if (fsck_err_on(d.v->d_casefold != !!hash_info->cf_encoding,
2324 trans, dirent_casefold_mismatch,
2325 "dirent casefold does not match dir casefold\n%s",
2326 (printbuf_reset(&buf),
2327 bch2_bkey_val_to_text(&buf, c, k),
2329 subvol_inum dir_inum = { .subvol = d.v->d_type == DT_SUBVOL
2330 ? le32_to_cpu(d.v->d_parent_subvol)
2333 u64 target = d.v->d_type == DT_SUBVOL
2334 ? le32_to_cpu(d.v->d_child_subvol)
2335 : le64_to_cpu(d.v->d_inum);
2336 struct qstr name = bch2_dirent_get_name(d);
2338 struct bkey_i_dirent *new_d =
2339 bch2_dirent_create_key(trans, hash_info, dir_inum,
2340 d.v->d_type, &name, NULL, target);
2341 ret = PTR_ERR_OR_ZERO(new_d);
2345 new_d->k.p.inode = d.k->p.inode;
2346 new_d->k.p.snapshot = d.k->p.snapshot;
2348 struct btree_iter dup_iter = {};
2349 ret = bch2_hash_delete_at(trans,
2350 bch2_dirent_hash_desc, hash_info, iter,
2351 BTREE_UPDATE_internal_snapshot_node) ?:
2352 bch2_str_hash_repair_key(trans, s,
2353 &bch2_dirent_hash_desc, hash_info,
2354 iter, bkey_i_to_s_c(&new_d->k_i),
2355 &dup_iter, bkey_s_c_null,
2360 if (d.v->d_type == DT_SUBVOL) {
2361 ret = check_dirent_to_subvol(trans, iter, d);
2365 ret = get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
2369 if (fsck_err_on(!target->inodes.nr,
2370 trans, dirent_to_missing_inode,
2371 "dirent points to missing inode:\n%s",
2372 (printbuf_reset(&buf),
2373 bch2_bkey_val_to_text(&buf, c, k),
2375 ret = bch2_fsck_remove_dirent(trans, d.k->p);
2380 darray_for_each(target->inodes, i) {
2381 ret = bch2_check_dirent_target(trans, iter, d, &i->inode, true);
2386 darray_for_each(target->deletes, i)
2387 if (fsck_err_on(!snapshot_list_has_id(&s->ids, *i),
2388 trans, dirent_to_overwritten_inode,
2389 "dirent points to inode overwritten in snapshot %u:\n%s",
2391 (printbuf_reset(&buf),
2392 bch2_bkey_val_to_text(&buf, c, k),
2394 struct btree_iter delete_iter;
2395 bch2_trans_iter_init(trans, &delete_iter,
2397 SPOS(k.k->p.inode, k.k->p.offset, *i),
2399 ret = bch2_btree_iter_traverse(trans, &delete_iter) ?:
2400 bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
2403 BTREE_UPDATE_internal_snapshot_node);
2404 bch2_trans_iter_exit(trans, &delete_iter);
2411 ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
2415 for_each_visible_inode(c, s, dir, d.k->p.snapshot, i) {
2416 if (d.v->d_type == DT_DIR)
2418 i->i_size += bkey_bytes(d.k);
2423 printbuf_exit(&buf);
2428 * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
2431 int bch2_check_dirents(struct bch_fs *c)
2433 struct inode_walker dir = inode_walker_init();
2434 struct inode_walker target = inode_walker_init();
2435 struct snapshots_seen s;
2436 struct bch_hash_info hash_info;
2437 bool need_second_pass = false, did_second_pass = false;
2440 snapshots_seen_init(&s);
2442 ret = bch2_trans_run(c,
2443 for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
2444 POS(BCACHEFS_ROOT_INO, 0),
2445 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
2446 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2447 check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s,
2448 &need_second_pass)) ?:
2449 check_subdir_count_notnested(trans, &dir));
2451 if (!ret && need_second_pass && !did_second_pass) {
2452 bch_info(c, "check_dirents requires second pass");
2453 swap(did_second_pass, need_second_pass);
2457 if (!ret && need_second_pass) {
2458 bch_err(c, "dirents not repairing");
2462 snapshots_seen_exit(&s);
2463 inode_walker_exit(&dir);
2464 inode_walker_exit(&target);
2469 static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
2471 struct bch_hash_info *hash_info,
2472 struct inode_walker *inode)
2474 struct bch_fs *c = trans->c;
2476 int ret = bch2_check_key_has_snapshot(trans, iter, k);
2482 struct inode_walker_entry *i = walk_inode(trans, inode, k);
2483 ret = PTR_ERR_OR_ZERO(i);
2487 ret = check_key_has_inode(trans, iter, inode, i, k);
2491 if (!i || i->whiteout)
2494 if (inode->first_this_inode)
2495 *hash_info = bch2_hash_info_init(c, &i->inode);
2496 inode->first_this_inode = false;
2498 bool need_second_pass = false;
2499 return bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info,
2500 iter, k, &need_second_pass);
2504 * Walk xattrs: verify that they all have a corresponding inode
2506 int bch2_check_xattrs(struct bch_fs *c)
2508 struct inode_walker inode = inode_walker_init();
2509 struct bch_hash_info hash_info;
2512 ret = bch2_trans_run(c,
2513 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
2514 POS(BCACHEFS_ROOT_INO, 0),
2515 BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
2518 BCH_TRANS_COMMIT_no_enospc,
2519 check_xattr(trans, &iter, k, &hash_info, &inode)));
2521 inode_walker_exit(&inode);
2526 static int check_root_trans(struct btree_trans *trans)
2528 struct bch_fs *c = trans->c;
2529 struct bch_inode_unpacked root_inode;
2534 ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
2535 if (ret && !bch2_err_matches(ret, ENOENT))
2538 if (mustfix_fsck_err_on(ret, trans, root_subvol_missing,
2539 "root subvol missing")) {
2540 struct bkey_i_subvolume *root_subvol =
2541 bch2_trans_kmalloc(trans, sizeof(*root_subvol));
2542 ret = PTR_ERR_OR_ZERO(root_subvol);
2547 inum = BCACHEFS_ROOT_INO;
2549 bkey_subvolume_init(&root_subvol->k_i);
2550 root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
2551 root_subvol->v.flags = 0;
2552 root_subvol->v.snapshot = cpu_to_le32(snapshot);
2553 root_subvol->v.inode = cpu_to_le64(inum);
2554 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
2555 bch_err_msg(c, ret, "writing root subvol");
2560 ret = bch2_inode_find_by_inum_snapshot(trans, BCACHEFS_ROOT_INO, snapshot,
2562 if (ret && !bch2_err_matches(ret, ENOENT))
2565 if (mustfix_fsck_err_on(ret,
2566 trans, root_dir_missing,
2567 "root directory missing") ||
2568 mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
2569 trans, root_inode_not_dir,
2570 "root inode not a directory")) {
2571 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
2573 root_inode.bi_inum = inum;
2574 root_inode.bi_snapshot = snapshot;
2576 ret = __bch2_fsck_write_inode(trans, &root_inode);
2577 bch_err_msg(c, ret, "writing root inode");
2584 /* Get root directory, create if it doesn't exist: */
2585 int bch2_check_root(struct bch_fs *c)
2587 int ret = bch2_trans_commit_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2588 check_root_trans(trans));
2593 static bool darray_u32_has(darray_u32 *d, u32 v)
2595 darray_for_each(*d, i)
2601 static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
2603 struct bch_fs *c = trans->c;
2604 struct btree_iter parent_iter = {};
2605 darray_u32 subvol_path = {};
2606 struct printbuf buf = PRINTBUF;
2609 if (k.k->type != KEY_TYPE_subvolume)
2612 subvol_inum start = {
2613 .subvol = k.k->p.offset,
2614 .inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode),
2617 while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) {
2618 ret = darray_push(&subvol_path, k.k->p.offset);
2622 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
2624 struct bch_inode_unpacked subvol_root;
2625 ret = bch2_inode_find_by_inum_trans(trans,
2626 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
2631 u32 parent = le32_to_cpu(s.v->fs_path_parent);
2633 if (darray_u32_has(&subvol_path, parent)) {
2634 printbuf_reset(&buf);
2635 prt_printf(&buf, "subvolume loop: ");
2637 ret = bch2_inum_to_path(trans, start, &buf);
2641 if (fsck_err(trans, subvol_loop, "%s", buf.buf))
2642 ret = reattach_subvol(trans, s);
2646 bch2_trans_iter_exit(trans, &parent_iter);
2647 bch2_trans_iter_init(trans, &parent_iter,
2648 BTREE_ID_subvolumes, POS(0, parent), 0);
2649 k = bch2_btree_iter_peek_slot(trans, &parent_iter);
2654 if (fsck_err_on(k.k->type != KEY_TYPE_subvolume,
2655 trans, subvol_unreachable,
2656 "unreachable subvolume %s",
2657 (printbuf_reset(&buf),
2658 bch2_bkey_val_to_text(&buf, c, s.s_c),
2660 ret = reattach_subvol(trans, s);
2666 printbuf_exit(&buf);
2667 darray_exit(&subvol_path);
2668 bch2_trans_iter_exit(trans, &parent_iter);
2672 int bch2_check_subvolume_structure(struct bch_fs *c)
2674 int ret = bch2_trans_run(c,
2675 for_each_btree_key_commit(trans, iter,
2676 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
2677 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2678 check_subvol_path(trans, &iter, k)));
2683 static int bch2_bi_depth_renumber_one(struct btree_trans *trans,
2684 u64 inum, u32 snapshot,
2687 struct btree_iter iter;
2688 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
2689 SPOS(0, inum, snapshot), 0);
2691 struct bch_inode_unpacked inode;
2692 int ret = bkey_err(k) ?:
2693 !bkey_is_inode(k.k) ? -BCH_ERR_ENOENT_inode
2694 : bch2_inode_unpack(k, &inode);
2698 if (inode.bi_depth != new_depth) {
2699 inode.bi_depth = new_depth;
2700 ret = __bch2_fsck_write_inode(trans, &inode) ?:
2701 bch2_trans_commit(trans, NULL, NULL, 0);
2704 bch2_trans_iter_exit(trans, &iter);
2708 static int bch2_bi_depth_renumber(struct btree_trans *trans, darray_u64 *path,
2709 u32 snapshot, u32 new_bi_depth)
2711 u32 restart_count = trans->restart_count;
2714 darray_for_each_reverse(*path, i) {
2715 ret = nested_lockrestart_do(trans,
2716 bch2_bi_depth_renumber_one(trans, *i, snapshot, new_bi_depth));
2717 bch_err_fn(trans->c, ret);
2724 return ret ?: trans_was_restarted(trans, restart_count);
2727 static int check_path_loop(struct btree_trans *trans, struct bkey_s_c inode_k)
2729 struct bch_fs *c = trans->c;
2730 struct btree_iter inode_iter = {};
2731 darray_u64 path = {};
2732 struct printbuf buf = PRINTBUF;
2733 u32 snapshot = inode_k.k->p.snapshot;
2734 bool redo_bi_depth = false;
2735 u32 min_bi_depth = U32_MAX;
2738 struct bpos start = inode_k.k->p;
2740 struct bch_inode_unpacked inode;
2741 ret = bch2_inode_unpack(inode_k, &inode);
2746 * If we're running full fsck, check_dirents() will have already ran,
2747 * and we shouldn't see any missing backpointers here - otherwise that's
2748 * handled separately, by check_unreachable_inodes
2750 while (!inode.bi_subvol &&
2751 bch2_inode_has_backpointer(&inode)) {
2752 struct btree_iter dirent_iter;
2753 struct bkey_s_c_dirent d;
2755 d = dirent_get_by_pos(trans, &dirent_iter,
2756 SPOS(inode.bi_dir, inode.bi_dir_offset, snapshot));
2757 ret = bkey_err(d.s_c);
2758 if (ret && !bch2_err_matches(ret, ENOENT))
2761 if (!ret && (ret = dirent_points_to_inode(c, d, &inode)))
2762 bch2_trans_iter_exit(trans, &dirent_iter);
2764 if (bch2_err_matches(ret, ENOENT)) {
2765 printbuf_reset(&buf);
2766 bch2_bkey_val_to_text(&buf, c, inode_k);
2767 bch_err(c, "unreachable inode in check_directory_structure: %s\n%s",
2768 bch2_err_str(ret), buf.buf);
2772 bch2_trans_iter_exit(trans, &dirent_iter);
2774 ret = darray_push(&path, inode.bi_inum);
2778 bch2_trans_iter_exit(trans, &inode_iter);
2779 inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
2780 SPOS(0, inode.bi_dir, snapshot), 0);
2782 struct bch_inode_unpacked parent_inode;
2783 ret = bkey_err(inode_k) ?:
2784 !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode
2785 : bch2_inode_unpack(inode_k, &parent_inode);
2787 /* Should have been caught in dirents pass */
2788 bch_err_msg(c, ret, "error looking up parent directory");
2792 min_bi_depth = parent_inode.bi_depth;
2794 if (parent_inode.bi_depth < inode.bi_depth &&
2795 min_bi_depth < U16_MAX)
2798 inode = parent_inode;
2799 redo_bi_depth = true;
2801 if (darray_find(path, inode.bi_inum)) {
2802 printbuf_reset(&buf);
2803 prt_printf(&buf, "directory structure loop in snapshot %u: ",
2806 ret = bch2_inum_snapshot_to_path(trans, start.offset, start.snapshot, NULL, &buf);
2810 if (c->opts.verbose) {
2812 darray_for_each(path, i)
2813 prt_printf(&buf, "%llu ", *i);
2816 if (fsck_err(trans, dir_loop, "%s", buf.buf)) {
2817 ret = remove_backpointer(trans, &inode);
2818 bch_err_msg(c, ret, "removing dirent");
2822 ret = reattach_inode(trans, &inode);
2823 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
2830 if (inode.bi_subvol)
2834 ret = bch2_bi_depth_renumber(trans, &path, snapshot, min_bi_depth);
2837 bch2_trans_iter_exit(trans, &inode_iter);
2839 printbuf_exit(&buf);
2845 * Check for loops in the directory structure: all other connectivity issues
2846 * have been fixed by prior passes
2848 int bch2_check_directory_structure(struct bch_fs *c)
2850 int ret = bch2_trans_run(c,
2851 for_each_btree_key_reverse_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
2853 BTREE_ITER_prefetch|
2854 BTREE_ITER_all_snapshots, k,
2855 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
2856 if (!S_ISDIR(bkey_inode_mode(k)))
2859 if (bch2_inode_flags(k) & BCH_INODE_unlinked)
2862 check_path_loop(trans, k);
2869 struct nlink_table {
2880 static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2881 u64 inum, u32 snapshot)
2883 if (t->nr == t->size) {
2884 size_t new_size = max_t(size_t, 128UL, t->size * 2);
2885 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2888 bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2890 return bch_err_throw(c, ENOMEM_fsck_add_nlink);
2894 memcpy(d, t->d, t->size * sizeof(t->d[0]));
2902 t->d[t->nr++] = (struct nlink) {
2904 .snapshot = snapshot,
2910 static int nlink_cmp(const void *_l, const void *_r)
2912 const struct nlink *l = _l;
2913 const struct nlink *r = _r;
2915 return cmp_int(l->inum, r->inum);
2918 static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2919 struct nlink_table *links,
2920 u64 range_start, u64 range_end, u64 inum, u32 snapshot)
2922 struct nlink *link, key = {
2923 .inum = inum, .snapshot = U32_MAX,
2926 if (inum < range_start || inum >= range_end)
2929 link = __inline_bsearch(&key, links->d, links->nr,
2930 sizeof(links->d[0]), nlink_cmp);
2934 while (link > links->d && link[0].inum == link[-1].inum)
2937 for (; link < links->d + links->nr && link->inum == inum; link++)
2938 if (ref_visible(c, s, snapshot, link->snapshot)) {
2940 if (link->snapshot >= snapshot)
2946 static int check_nlinks_find_hardlinks(struct bch_fs *c,
2947 struct nlink_table *t,
2948 u64 start, u64 *end)
2950 int ret = bch2_trans_run(c,
2951 for_each_btree_key(trans, iter, BTREE_ID_inodes,
2954 BTREE_ITER_prefetch|
2955 BTREE_ITER_all_snapshots, k, ({
2956 if (!bkey_is_inode(k.k))
2959 /* Should never fail, checked by bch2_inode_invalid: */
2960 struct bch_inode_unpacked u;
2961 _ret3 = bch2_inode_unpack(k, &u);
2966 * Backpointer and directory structure checks are sufficient for
2967 * directories, since they can't have hardlinks:
2969 if (S_ISDIR(u.bi_mode))
2973 * Previous passes ensured that bi_nlink is nonzero if
2974 * it had multiple hardlinks:
2979 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2981 *end = k.k->p.offset;
2993 static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2994 u64 range_start, u64 range_end)
2996 struct snapshots_seen s;
2998 snapshots_seen_init(&s);
3000 int ret = bch2_trans_run(c,
3001 for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
3003 BTREE_ITER_prefetch|
3004 BTREE_ITER_all_snapshots, k, ({
3005 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
3009 if (k.k->type == KEY_TYPE_dirent) {
3010 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
3012 if (d.v->d_type != DT_DIR &&
3013 d.v->d_type != DT_SUBVOL)
3014 inc_link(c, &s, links, range_start, range_end,
3015 le64_to_cpu(d.v->d_inum), d.k->p.snapshot);
3020 snapshots_seen_exit(&s);
3026 static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
3028 struct nlink_table *links,
3029 size_t *idx, u64 range_end)
3031 struct bch_inode_unpacked u;
3032 struct nlink *link = &links->d[*idx];
3035 if (k.k->p.offset >= range_end)
3038 if (!bkey_is_inode(k.k))
3041 ret = bch2_inode_unpack(k, &u);
3045 if (S_ISDIR(u.bi_mode))
3051 while ((cmp_int(link->inum, k.k->p.offset) ?:
3052 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
3053 BUG_ON(*idx == links->nr);
3054 link = &links->d[++*idx];
3057 if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
3058 trans, inode_wrong_nlink,
3059 "inode %llu type %s has wrong i_nlink (%u, should be %u)",
3060 u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
3061 bch2_inode_nlink_get(&u), link->count)) {
3062 bch2_inode_nlink_set(&u, link->count);
3063 ret = __bch2_fsck_write_inode(trans, &u);
3070 static int check_nlinks_update_hardlinks(struct bch_fs *c,
3071 struct nlink_table *links,
3072 u64 range_start, u64 range_end)
3076 int ret = bch2_trans_run(c,
3077 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
3078 POS(0, range_start),
3079 BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
3080 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
3081 check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
3083 bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret));
3090 int bch2_check_nlinks(struct bch_fs *c)
3092 struct nlink_table links = { 0 };
3093 u64 this_iter_range_start, next_iter_range_start = 0;
3097 this_iter_range_start = next_iter_range_start;
3098 next_iter_range_start = U64_MAX;
3100 ret = check_nlinks_find_hardlinks(c, &links,
3101 this_iter_range_start,
3102 &next_iter_range_start);
3104 ret = check_nlinks_walk_dirents(c, &links,
3105 this_iter_range_start,
3106 next_iter_range_start);
3110 ret = check_nlinks_update_hardlinks(c, &links,
3111 this_iter_range_start,
3112 next_iter_range_start);
3117 } while (next_iter_range_start != U64_MAX);
3124 static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
3127 struct bkey_s_c_reflink_p p;
3128 struct bkey_i_reflink_p *u;
3130 if (k.k->type != KEY_TYPE_reflink_p)
3133 p = bkey_s_c_to_reflink_p(k);
3135 if (!p.v->front_pad && !p.v->back_pad)
3138 u = bch2_trans_kmalloc(trans, sizeof(*u));
3139 int ret = PTR_ERR_OR_ZERO(u);
3143 bkey_reassemble(&u->k_i, k);
3147 return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun);
3150 int bch2_fix_reflink_p(struct bch_fs *c)
3152 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
3155 int ret = bch2_trans_run(c,
3156 for_each_btree_key_commit(trans, iter,
3157 BTREE_ID_extents, POS_MIN,
3158 BTREE_ITER_intent|BTREE_ITER_prefetch|
3159 BTREE_ITER_all_snapshots, k,
3160 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
3161 fix_reflink_p_key(trans, &iter, k)));
3166 #ifndef NO_BCACHEFS_CHARDEV
3168 struct fsck_thread {
3169 struct thread_with_stdio thr;
3171 struct bch_opts opts;
3174 static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr)
3176 struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr);
3180 static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
3182 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
3183 struct bch_fs *c = thr->c;
3185 int ret = PTR_ERR_OR_ZERO(c);
3189 ret = bch2_fs_start(thr->c);
3193 if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
3194 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name);
3197 if (test_bit(BCH_FS_error, &c->flags)) {
3198 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name);
3206 static const struct thread_with_stdio_ops bch2_offline_fsck_ops = {
3207 .exit = bch2_fsck_thread_exit,
3208 .fn = bch2_fsck_offline_thread_fn,
3211 long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg)
3213 struct bch_ioctl_fsck_offline arg;
3214 struct fsck_thread *thr = NULL;
3215 darray_const_str devs = {};
3218 if (copy_from_user(&arg, user_arg, sizeof(arg)))
3224 if (!capable(CAP_SYS_ADMIN))
3227 for (size_t i = 0; i < arg.nr_devs; i++) {
3229 ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64));
3233 char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX);
3234 ret = PTR_ERR_OR_ZERO(dev_str);
3238 ret = darray_push(&devs, dev_str);
3245 thr = kzalloc(sizeof(*thr), GFP_KERNEL);
3251 thr->opts = bch2_opts_empty();
3254 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
3255 ret = PTR_ERR_OR_ZERO(optstr) ?:
3256 bch2_parse_mount_opts(NULL, &thr->opts, NULL, optstr, false);
3257 if (!IS_ERR(optstr))
3264 opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio);
3265 opt_set(thr->opts, read_only, 1);
3266 opt_set(thr->opts, ratelimit_errors, 0);
3268 /* We need request_key() to be called before we punt to kthread: */
3269 opt_set(thr->opts, nostart, true);
3271 bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops);
3273 thr->c = bch2_fs_open(&devs, &thr->opts);
3275 if (!IS_ERR(thr->c) &&
3276 thr->c->opts.errors == BCH_ON_ERROR_panic)
3277 thr->c->opts.errors = BCH_ON_ERROR_ro;
3279 ret = __bch2_run_thread_with_stdio(&thr->thr);
3281 darray_for_each(devs, i)
3287 bch2_fsck_thread_exit(&thr->thr);
3288 pr_err("ret %s", bch2_err_str(ret));
3292 static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio)
3294 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr);
3295 struct bch_fs *c = thr->c;
3297 c->stdio_filter = current;
3298 c->stdio = &thr->thr.stdio;
3301 * XXX: can we figure out a way to do this without mucking with c->opts?
3303 unsigned old_fix_errors = c->opts.fix_errors;
3304 if (opt_defined(thr->opts, fix_errors))
3305 c->opts.fix_errors = thr->opts.fix_errors;
3307 c->opts.fix_errors = FSCK_FIX_ask;
3309 c->opts.fsck = true;
3310 set_bit(BCH_FS_in_fsck, &c->flags);
3312 int ret = bch2_run_online_recovery_passes(c, ~0ULL);
3314 clear_bit(BCH_FS_in_fsck, &c->flags);
3318 c->stdio_filter = NULL;
3319 c->opts.fix_errors = old_fix_errors;
3321 up(&c->recovery.run_lock);
3326 static const struct thread_with_stdio_ops bch2_online_fsck_ops = {
3327 .exit = bch2_fsck_thread_exit,
3328 .fn = bch2_fsck_online_thread_fn,
3331 long bch2_ioctl_fsck_online(struct bch_fs *c, struct bch_ioctl_fsck_online arg)
3333 struct fsck_thread *thr = NULL;
3339 if (!capable(CAP_SYS_ADMIN))
3342 if (!bch2_ro_ref_tryget(c))
3345 if (down_trylock(&c->recovery.run_lock)) {
3350 thr = kzalloc(sizeof(*thr), GFP_KERNEL);
3357 thr->opts = bch2_opts_empty();
3360 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16);
3362 ret = PTR_ERR_OR_ZERO(optstr) ?:
3363 bch2_parse_mount_opts(c, &thr->opts, NULL, optstr, false);
3364 if (!IS_ERR(optstr))
3371 ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops);
3376 bch2_fsck_thread_exit(&thr->thr);
3377 up(&c->recovery.run_lock);
3383 #endif /* NO_BCACHEFS_CHARDEV */