Merge tag 'platform-drivers-x86-v6.9-3' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / bcachefs / fsck.c
CommitLineData
1c6fdbd8
KO
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bcachefs.h"
07a1006a 4#include "bkey_buf.h"
88dfe193 5#include "btree_cache.h"
1c6fdbd8 6#include "btree_update.h"
e3dc75eb 7#include "buckets.h"
91d961ba 8#include "darray.h"
1c6fdbd8
KO
9#include "dirent.h"
10#include "error.h"
96385742 11#include "fs-common.h"
1c6fdbd8
KO
12#include "fsck.h"
13#include "inode.h"
14#include "keylist.h"
d2554263 15#include "recovery_passes.h"
8e877caa 16#include "snapshot.h"
1c6fdbd8
KO
17#include "super.h"
18#include "xattr.h"
19
fc51b041 20#include <linux/bsearch.h>
1c6fdbd8 21#include <linux/dcache.h> /* struct qstr */
1c6fdbd8 22
42590b53
KO
23/*
24 * XXX: this is handling transaction restarts without returning
25 * -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
26 */
ef1669ff
KO
27static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
28 u32 snapshot)
424eb881 29{
424eb881
KO
30 u64 sectors = 0;
31
44ddd8ad
KO
32 int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
33 SPOS(inum, 0, snapshot),
34 POS(inum, U64_MAX),
35 0, k, ({
424eb881
KO
36 if (bkey_extent_is_allocation(k.k))
37 sectors += k.k->size;
44ddd8ad
KO
38 0;
39 }));
94f651e2
KO
40
41 return ret ?: sectors;
424eb881
KO
42}
43
ef1669ff
KO
44static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
45 u32 snapshot)
46{
ef1669ff 47 u64 subdirs = 0;
ef1669ff 48
44ddd8ad 49 int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
3a860b5a
KO
50 SPOS(inum, 0, snapshot),
51 POS(inum, U64_MAX),
44ddd8ad
KO
52 0, k, ({
53 if (k.k->type == KEY_TYPE_dirent &&
54 bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
ef1669ff 55 subdirs++;
44ddd8ad
KO
56 0;
57 }));
ef1669ff
KO
58
59 return ret ?: subdirs;
60}
61
c98d132e
KO
62static int subvol_lookup(struct btree_trans *trans, u32 subvol,
63 u32 *snapshot, u64 *inum)
81ed9ce3 64{
97996ddf 65 struct bch_subvolume s;
cc053290 66 int ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
81ed9ce3 67
97996ddf
KO
68 *snapshot = le32_to_cpu(s.snapshot);
69 *inum = le64_to_cpu(s.inode);
81ed9ce3 70 return ret;
81ed9ce3
KO
71}
72
c27314b4
KO
73static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
74 struct bch_inode_unpacked *inode)
75{
76 struct btree_iter iter;
77 struct bkey_s_c k;
78 int ret;
79
80 bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
81 POS(0, inode_nr),
82 BTREE_ITER_ALL_SNAPSHOTS);
83 k = bch2_btree_iter_peek(&iter);
84 ret = bkey_err(k);
85 if (ret)
86 goto err;
87
e88a75eb 88 if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
e47a390a 89 ret = -BCH_ERR_ENOENT_inode;
c27314b4
KO
90 goto err;
91 }
92
3e52c222 93 ret = bch2_inode_unpack(k, inode);
c27314b4 94err:
d2a990d1 95 bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
c27314b4
KO
96 bch2_trans_iter_exit(trans, &iter);
97 return ret;
98}
99
c98d132e 100static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
23f25223
KO
101 struct bch_inode_unpacked *inode,
102 u32 *snapshot)
8a85b20c 103{
67e0dd8f 104 struct btree_iter iter;
8a85b20c
KO
105 struct bkey_s_c k;
106 int ret;
107
bcb79a51
KO
108 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
109 SPOS(0, inode_nr, *snapshot), 0);
8a85b20c
KO
110 ret = bkey_err(k);
111 if (ret)
112 goto err;
113
3e52c222
KO
114 ret = bkey_is_inode(k.k)
115 ? bch2_inode_unpack(k, inode)
e47a390a 116 : -BCH_ERR_ENOENT_inode;
4db65027
KO
117 if (!ret)
118 *snapshot = iter.pos.snapshot;
8a85b20c 119err:
67e0dd8f 120 bch2_trans_iter_exit(trans, &iter);
8a85b20c
KO
121 return ret;
122}
123
d2fda304 124static int lookup_dirent_in_snapshot(struct btree_trans *trans,
ef1669ff
KO
125 struct bch_hash_info hash_info,
126 subvol_inum dir, struct qstr *name,
d2fda304 127 u64 *target, unsigned *type, u32 snapshot)
ef1669ff
KO
128{
129 struct btree_iter iter;
130 struct bkey_s_c_dirent d;
d2fda304
KO
131 int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
132 &hash_info, dir, name, 0, snapshot);
ef1669ff
KO
133 if (ret)
134 return ret;
135
136 d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
137 *target = le64_to_cpu(d.v->d_inum);
138 *type = d.v->d_type;
139 bch2_trans_iter_exit(trans, &iter);
140 return 0;
141}
142
ae8bbb9f 143static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
1c6fdbd8 144{
0f238367 145 struct bch_fs *c = trans->c;
67e0dd8f 146 struct btree_iter iter;
1c6fdbd8
KO
147 struct bch_inode_unpacked dir_inode;
148 struct bch_hash_info dir_hash_info;
1c6fdbd8 149 int ret;
1c6fdbd8 150
c27314b4 151 ret = lookup_first_inode(trans, pos.inode, &dir_inode);
b1fd23df 152 if (ret)
e492e7b6 153 goto err;
1c6fdbd8
KO
154
155 dir_hash_info = bch2_hash_info_init(c, &dir_inode);
156
67e0dd8f 157 bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
b1fd23df 158
8ce1db80
KO
159 ret = bch2_btree_iter_traverse(&iter) ?:
160 bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
161 &dir_hash_info, &iter,
162 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
67e0dd8f 163 bch2_trans_iter_exit(trans, &iter);
e492e7b6 164err:
d2a990d1 165 bch_err_fn(c, ret);
ae8bbb9f 166 return ret;
b1fd23df
KO
167}
168
58686a25 169/* Get lost+found, create if it doesn't exist: */
d296e7b1 170static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
cc053290
KO
171 struct bch_inode_unpacked *lostfound,
172 u64 reattaching_inum)
1c6fdbd8 173{
58686a25 174 struct bch_fs *c = trans->c;
58686a25 175 struct qstr lostfound_str = QSTR("lost+found");
ef1669ff
KO
176 u64 inum = 0;
177 unsigned d_type = 0;
58686a25
KO
178 int ret;
179
d296e7b1
KO
180 struct bch_snapshot_tree st;
181 ret = bch2_snapshot_tree_lookup(trans,
182 bch2_snapshot_tree(c, snapshot), &st);
ef1669ff
KO
183 if (ret)
184 return ret;
81ed9ce3 185
d296e7b1 186 subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
d296e7b1 187
cc053290
KO
188 struct bch_subvolume subvol;
189 ret = bch2_subvolume_get(trans, le32_to_cpu(st.master_subvol),
190 false, 0, &subvol);
191 bch_err_msg(c, ret, "looking up root subvol %u for snapshot %u",
192 le32_to_cpu(st.master_subvol), snapshot);
285b181a 193 if (ret)
58686a25
KO
194 return ret;
195
cc053290
KO
196 if (!subvol.inode) {
197 struct btree_iter iter;
198 struct bkey_i_subvolume *subvol = bch2_bkey_get_mut_typed(trans, &iter,
199 BTREE_ID_subvolumes, POS(0, le32_to_cpu(st.master_subvol)),
200 0, subvolume);
201 ret = PTR_ERR_OR_ZERO(subvol);
202 if (ret)
203 return ret;
204
205 subvol->v.inode = cpu_to_le64(reattaching_inum);
206 bch2_trans_iter_exit(trans, &iter);
207 }
208
209 root_inum.inum = le64_to_cpu(subvol.inode);
210
d296e7b1
KO
211 struct bch_inode_unpacked root_inode;
212 struct bch_hash_info root_hash_info;
d2fda304
KO
213 u32 root_inode_snapshot = snapshot;
214 ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
cc053290
KO
215 bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
216 root_inum.inum, le32_to_cpu(st.master_subvol));
d296e7b1
KO
217 if (ret)
218 return ret;
219
220 root_hash_info = bch2_hash_info_init(c, &root_inode);
ef1669ff 221
d2fda304
KO
222 ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
223 &lostfound_str, &inum, &d_type, snapshot);
d296e7b1 224 if (bch2_err_matches(ret, ENOENT))
58686a25 225 goto create_lostfound;
58686a25 226
d2a990d1 227 bch_err_fn(c, ret);
285b181a 228 if (ret)
ef1669ff 229 return ret;
ef1669ff
KO
230
231 if (d_type != DT_DIR) {
232 bch_err(c, "error looking up lost+found: not a directory");
40a53b92 233 return -BCH_ERR_ENOENT_not_directory;
ef1669ff
KO
234 }
235
285b181a 236 /*
067d228b 237 * The bch2_check_dirents pass has already run, dangling dirents
285b181a
KO
238 * shouldn't exist here:
239 */
d2fda304
KO
240 ret = lookup_inode(trans, inum, lostfound, &snapshot);
241 bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
242 inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
243 return ret;
58686a25 244
58686a25 245create_lostfound:
d296e7b1
KO
246 /*
247 * XXX: we could have a nicer log message here if we had a nice way to
248 * walk backpointers to print a path
249 */
250 bch_notice(c, "creating lost+found in snapshot %u", le32_to_cpu(st.root_snapshot));
251
252 u64 now = bch2_current_time(c);
253 struct btree_iter lostfound_iter = { NULL };
254 u64 cpu = raw_smp_processor_id();
255
285b181a 256 bch2_inode_init_early(c, lostfound);
d296e7b1
KO
257 bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
258 lostfound->bi_dir = root_inode.bi_inum;
259
260 root_inode.bi_nlink++;
261
262 ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
263 if (ret)
264 goto err;
285b181a 265
d296e7b1
KO
266 bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
267 ret = bch2_btree_iter_traverse(&lostfound_iter);
268 if (ret)
269 goto err;
270
271 ret = bch2_dirent_create_snapshot(trans,
56e23047 272 0, root_inode.bi_inum, snapshot, &root_hash_info,
d296e7b1
KO
273 mode_to_type(lostfound->bi_mode),
274 &lostfound_str,
275 lostfound->bi_inum,
276 &lostfound->bi_dir_offset,
277 BCH_HASH_SET_MUST_CREATE) ?:
278 bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
279 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
280err:
d2a990d1 281 bch_err_msg(c, ret, "creating lost+found");
d296e7b1 282 bch2_trans_iter_exit(trans, &lostfound_iter);
285b181a 283 return ret;
58686a25
KO
284}
285
c98d132e 286static int reattach_inode(struct btree_trans *trans,
81ed9ce3 287 struct bch_inode_unpacked *inode,
ef1669ff 288 u32 inode_snapshot)
58686a25
KO
289{
290 struct bch_hash_info dir_hash;
291 struct bch_inode_unpacked lostfound;
1c6fdbd8
KO
292 char name_buf[20];
293 struct qstr name;
176cf4bf 294 u64 dir_offset = 0;
56e23047 295 u32 dirent_snapshot = inode_snapshot;
1c6fdbd8
KO
296 int ret;
297
56e23047
KO
298 if (inode->bi_subvol) {
299 inode->bi_parent_subvol = BCACHEFS_ROOT_SUBVOL;
300
301 u64 root_inum;
302 ret = subvol_lookup(trans, inode->bi_parent_subvol,
303 &dirent_snapshot, &root_inum);
304 if (ret)
305 return ret;
306
307 snprintf(name_buf, sizeof(name_buf), "subvol-%u", inode->bi_subvol);
308 } else {
309 snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
310 }
311
cc053290 312 ret = lookup_lostfound(trans, dirent_snapshot, &lostfound, inode->bi_inum);
176cf4bf 313 if (ret)
d3ff7fec 314 return ret;
176cf4bf 315
58686a25
KO
316 if (S_ISDIR(inode->bi_mode)) {
317 lostfound.bi_nlink++;
176cf4bf 318
69c8e6ce 319 ret = __bch2_fsck_write_inode(trans, &lostfound, U32_MAX);
176cf4bf 320 if (ret)
d3ff7fec 321 return ret;
176cf4bf
KO
322 }
323
58686a25 324 dir_hash = bch2_hash_info_init(trans->c, &lostfound);
176cf4bf 325
58686a25 326 name = (struct qstr) QSTR(name_buf);
176cf4bf 327
d296e7b1 328 ret = bch2_dirent_create_snapshot(trans,
56e23047
KO
329 inode->bi_parent_subvol, lostfound.bi_inum,
330 dirent_snapshot,
d296e7b1
KO
331 &dir_hash,
332 inode_d_type(inode),
56e23047
KO
333 &name,
334 inode->bi_subvol ?: inode->bi_inum,
335 &dir_offset,
d296e7b1 336 BCH_HASH_SET_MUST_CREATE);
285b181a
KO
337 if (ret)
338 return ret;
339
340 inode->bi_dir = lostfound.bi_inum;
341 inode->bi_dir_offset = dir_offset;
342
69c8e6ce 343 return __bch2_fsck_write_inode(trans, inode, inode_snapshot);
285b181a
KO
344}
345
d3ff7fec
KO
346static int remove_backpointer(struct btree_trans *trans,
347 struct bch_inode_unpacked *inode)
348{
67e0dd8f 349 struct btree_iter iter;
bcb79a51 350 struct bkey_s_c_dirent d;
d3ff7fec
KO
351 int ret;
352
bcb79a51
KO
353 d = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_dirents,
354 POS(inode->bi_dir, inode->bi_dir_offset), 0,
355 dirent);
356 ret = bkey_err(d) ?:
357 __remove_dirent(trans, d.k->p);
67e0dd8f 358 bch2_trans_iter_exit(trans, &iter);
d3ff7fec
KO
359 return ret;
360}
361
663db5a5
KO
362static int reattach_subvol(struct btree_trans *trans, struct bkey_s_c_subvolume s)
363{
364 struct bch_fs *c = trans->c;
365
366 struct bch_inode_unpacked inode;
367 int ret = bch2_inode_find_by_inum_trans(trans,
368 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
369 &inode);
370 if (ret)
371 return ret;
372
373 ret = remove_backpointer(trans, &inode);
374 bch_err_msg(c, ret, "removing dirent");
375 if (ret)
376 return ret;
377
378 ret = reattach_inode(trans, &inode, le32_to_cpu(s.v->snapshot));
379 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
380 return ret;
381}
382
cc053290
KO
383static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 subvolid, u64 inum)
384{
385 struct bch_fs *c = trans->c;
386
387 if (!bch2_snapshot_is_leaf(c, snapshotid)) {
388 bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
389 return -BCH_ERR_fsck_repair_unimplemented;
390 }
391
392 /*
393 * If inum isn't set, that means we're being called from check_dirents,
394 * not check_inodes - the root of this subvolume doesn't exist or we
395 * would have found it there:
396 */
397 if (!inum) {
398 struct btree_iter inode_iter = {};
399 struct bch_inode_unpacked new_inode;
400 u64 cpu = raw_smp_processor_id();
401
402 bch2_inode_init_early(c, &new_inode);
403 bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, S_IFDIR|0755, 0, NULL);
404
405 new_inode.bi_subvol = subvolid;
406
407 int ret = bch2_inode_create(trans, &inode_iter, &new_inode, snapshotid, cpu) ?:
408 bch2_btree_iter_traverse(&inode_iter) ?:
409 bch2_inode_write(trans, &inode_iter, &new_inode);
410 bch2_trans_iter_exit(trans, &inode_iter);
411 if (ret)
412 return ret;
413
414 inum = new_inode.bi_inum;
415 }
416
417 bch_info(c, "reconstructing subvol %u with root inode %llu", subvolid, inum);
418
419 struct bkey_i_subvolume *new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol));
420 int ret = PTR_ERR_OR_ZERO(new_subvol);
421 if (ret)
422 return ret;
423
424 bkey_subvolume_init(&new_subvol->k_i);
425 new_subvol->k.p.offset = subvolid;
426 new_subvol->v.snapshot = cpu_to_le32(snapshotid);
427 new_subvol->v.inode = cpu_to_le64(inum);
428 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &new_subvol->k_i, 0);
429 if (ret)
430 return ret;
431
432 struct btree_iter iter;
433 struct bkey_i_snapshot *s = bch2_bkey_get_mut_typed(trans, &iter,
434 BTREE_ID_snapshots, POS(0, snapshotid),
435 0, snapshot);
436 ret = PTR_ERR_OR_ZERO(s);
437 bch_err_msg(c, ret, "getting snapshot %u", snapshotid);
438 if (ret)
439 return ret;
440
441 u32 snapshot_tree = le32_to_cpu(s->v.tree);
442
443 s->v.subvol = cpu_to_le32(subvolid);
444 SET_BCH_SNAPSHOT_SUBVOL(&s->v, true);
445 bch2_trans_iter_exit(trans, &iter);
446
447 struct bkey_i_snapshot_tree *st = bch2_bkey_get_mut_typed(trans, &iter,
448 BTREE_ID_snapshot_trees, POS(0, snapshot_tree),
449 0, snapshot_tree);
450 ret = PTR_ERR_OR_ZERO(st);
451 bch_err_msg(c, ret, "getting snapshot tree %u", snapshot_tree);
452 if (ret)
453 return ret;
454
455 if (!st->v.master_subvol)
456 st->v.master_subvol = cpu_to_le32(subvolid);
457
458 bch2_trans_iter_exit(trans, &iter);
459 return 0;
460}
461
09d4c2ac
KO
462static int reconstruct_inode(struct btree_trans *trans, u32 snapshot, u64 inum, u64 size, unsigned mode)
463{
464 struct bch_fs *c = trans->c;
465 struct bch_inode_unpacked new_inode;
466
467 bch2_inode_init_early(c, &new_inode);
468 bch2_inode_init_late(&new_inode, bch2_current_time(c), 0, 0, mode|0755, 0, NULL);
469 new_inode.bi_size = size;
470 new_inode.bi_inum = inum;
471
472 return __bch2_fsck_write_inode(trans, &new_inode, snapshot);
473}
474
475static int reconstruct_reg_inode(struct btree_trans *trans, u32 snapshot, u64 inum)
476{
477 struct btree_iter iter = {};
478
479 bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, SPOS(inum, U64_MAX, snapshot), 0);
480 struct bkey_s_c k = bch2_btree_iter_peek_prev(&iter);
481 bch2_trans_iter_exit(trans, &iter);
482 int ret = bkey_err(k);
483 if (ret)
484 return ret;
485
486 return reconstruct_inode(trans, snapshot, inum, k.k->p.offset << 9, S_IFREG);
487}
488
49124d8a
KO
489struct snapshots_seen_entry {
490 u32 id;
491 u32 equiv;
492};
493
494struct snapshots_seen {
495 struct bpos pos;
496 DARRAY(struct snapshots_seen_entry) ids;
497};
498
499static inline void snapshots_seen_exit(struct snapshots_seen *s)
500{
501 darray_exit(&s->ids);
502}
503
504static inline void snapshots_seen_init(struct snapshots_seen *s)
505{
506 memset(s, 0, sizeof(*s));
507}
508
e2bd0617
KO
509static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s, u32 id)
510{
511 struct snapshots_seen_entry *i, n = {
512 .id = id,
513 .equiv = bch2_snapshot_equiv(c, id),
514 };
515 int ret = 0;
516
defd9e39 517 __darray_for_each(s->ids, i) {
e2bd0617
KO
518 if (i->id == id)
519 return 0;
520 if (i->id > id)
521 break;
522 }
523
524 ret = darray_insert_item(&s->ids, i - s->ids.data, n);
525 if (ret)
526 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
527 s->ids.size);
528 return ret;
529}
530
49124d8a
KO
531static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
532 enum btree_id btree_id, struct bpos pos)
ef1669ff 533{
defd9e39 534 struct snapshots_seen_entry n = {
49124d8a
KO
535 .id = pos.snapshot,
536 .equiv = bch2_snapshot_equiv(c, pos.snapshot),
537 };
c7be3cb5 538 int ret = 0;
ef1669ff 539
e88a75eb 540 if (!bkey_eq(s->pos, pos))
91d961ba 541 s->ids.nr = 0;
49124d8a 542
ef1669ff 543 s->pos = pos;
6b20d746 544 s->pos.snapshot = n.equiv;
ef1669ff 545
6b20d746
KO
546 darray_for_each(s->ids, i) {
547 if (i->id == n.id)
49124d8a 548 return 0;
6b20d746
KO
549
550 /*
551 * We currently don't rigorously track for snapshot cleanup
552 * needing to be run, so it shouldn't be a fsck error yet:
553 */
554 if (i->equiv == n.equiv) {
555 bch_err(c, "snapshot deletion did not finish:\n"
556 " duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
88dfe193 557 bch2_btree_id_str(btree_id),
6b20d746
KO
558 pos.inode, pos.offset,
559 i->id, n.id, n.equiv);
3c471b65 560 set_bit(BCH_FS_need_delete_dead_snapshots, &c->flags);
ae2e13d7 561 return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
49124d8a 562 }
6b20d746 563 }
49124d8a
KO
564
565 ret = darray_push(&s->ids, n);
566 if (ret)
567 bch_err(c, "error reallocating snapshots_seen table (size %zu)",
568 s->ids.size);
569 return ret;
ef1669ff
KO
570}
571
572/**
573 * key_visible_in_snapshot - returns true if @id is a descendent of @ancestor,
574 * and @ancestor hasn't been overwritten in @seen
575 *
96dea3d5
KO
576 * @c: filesystem handle
577 * @seen: list of snapshot ids already seen at current position
578 * @id: descendent snapshot id
579 * @ancestor: ancestor snapshot id
580 *
581 * Returns: whether key in @ancestor snapshot is visible in @id snapshot
ef1669ff
KO
582 */
583static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
584 u32 id, u32 ancestor)
585{
586 ssize_t i;
587
464ee192
KO
588 EBUG_ON(id > ancestor);
589 EBUG_ON(!bch2_snapshot_is_equiv(c, id));
590 EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
ef1669ff
KO
591
592 /* @ancestor should be the snapshot most recently added to @seen */
464ee192
KO
593 EBUG_ON(ancestor != seen->pos.snapshot);
594 EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
ef1669ff
KO
595
596 if (id == ancestor)
597 return true;
598
599 if (!bch2_snapshot_is_ancestor(c, id, ancestor))
600 return false;
601
464ee192
KO
602 /*
603 * We know that @id is a descendant of @ancestor, we're checking if
604 * we've seen a key that overwrote @ancestor - i.e. also a descendent of
605 * @ascestor and with @id as a descendent.
606 *
607 * But we already know that we're scanning IDs between @id and @ancestor
608 * numerically, since snapshot ID lists are kept sorted, so if we find
609 * an id that's an ancestor of @id we're done:
610 */
611
91d961ba 612 for (i = seen->ids.nr - 2;
49124d8a 613 i >= 0 && seen->ids.data[i].equiv >= id;
ef1669ff 614 --i)
464ee192 615 if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
ef1669ff
KO
616 return false;
617
618 return true;
619}
620
621/**
622 * ref_visible - given a key with snapshot id @src that points to a key with
623 * snapshot id @dst, test whether there is some snapshot in which @dst is
624 * visible.
625 *
96dea3d5
KO
626 * @c: filesystem handle
627 * @s: list of snapshot IDs already seen at @src
628 * @src: snapshot ID of src key
629 * @dst: snapshot ID of dst key
630 * Returns: true if there is some snapshot in which @dst is visible
ef1669ff 631 *
96dea3d5 632 * Assumes we're visiting @src keys in natural key order
ef1669ff 633 */
96dea3d5
KO
634static bool ref_visible(struct bch_fs *c, struct snapshots_seen *s,
635 u32 src, u32 dst)
ef1669ff
KO
636{
637 return dst <= src
638 ? key_visible_in_snapshot(c, s, dst, src)
639 : bch2_snapshot_is_ancestor(c, src, dst);
640}
641
c58029ec
DH
642static int ref_visible2(struct bch_fs *c,
643 u32 src, struct snapshots_seen *src_seen,
644 u32 dst, struct snapshots_seen *dst_seen)
645{
646 src = bch2_snapshot_equiv(c, src);
647 dst = bch2_snapshot_equiv(c, dst);
648
649 if (dst > src) {
650 swap(dst, src);
651 swap(dst_seen, src_seen);
652 }
653 return key_visible_in_snapshot(c, src_seen, dst, src);
654}
655
49124d8a
KO
656#define for_each_visible_inode(_c, _s, _w, _snapshot, _i) \
657 for (_i = (_w)->inodes.data; _i < (_w)->inodes.data + (_w)->inodes.nr && \
658 (_i)->snapshot <= (_snapshot); _i++) \
ef1669ff
KO
659 if (key_visible_in_snapshot(_c, _s, _i->snapshot, _snapshot))
660
91d961ba
KO
661struct inode_walker_entry {
662 struct bch_inode_unpacked inode;
663 u32 snapshot;
f9f52bc4 664 bool seen_this_pos;
91d961ba
KO
665 u64 count;
666};
667
1c6fdbd8 668struct inode_walker {
ef1669ff 669 bool first_this_inode;
43b81a4e 670 bool recalculate_sums;
f9f52bc4 671 struct bpos last_pos;
ef1669ff 672
91d961ba 673 DARRAY(struct inode_walker_entry) inodes;
1c6fdbd8
KO
674};
675
ef1669ff
KO
676static void inode_walker_exit(struct inode_walker *w)
677{
91d961ba 678 darray_exit(&w->inodes);
ef1669ff
KO
679}
680
1c6fdbd8
KO
681static struct inode_walker inode_walker_init(void)
682{
ef1669ff
KO
683 return (struct inode_walker) { 0, };
684}
685
ef1669ff 686static int add_inode(struct bch_fs *c, struct inode_walker *w,
3e52c222 687 struct bkey_s_c inode)
ef1669ff
KO
688{
689 struct bch_inode_unpacked u;
ef1669ff
KO
690
691 BUG_ON(bch2_inode_unpack(inode, &u));
692
91d961ba 693 return darray_push(&w->inodes, ((struct inode_walker_entry) {
ef1669ff 694 .inode = u,
49124d8a 695 .snapshot = bch2_snapshot_equiv(c, inode.k->p.snapshot),
91d961ba 696 }));
1c6fdbd8
KO
697}
698
06dcca51
KO
699static int get_inodes_all_snapshots(struct btree_trans *trans,
700 struct inode_walker *w, u64 inum)
1c6fdbd8 701{
ef1669ff
KO
702 struct bch_fs *c = trans->c;
703 struct btree_iter iter;
704 struct bkey_s_c k;
ef1669ff 705 int ret;
1c6fdbd8 706
43b81a4e 707 w->recalculate_sums = false;
91d961ba 708 w->inodes.nr = 0;
ef1669ff 709
27b2df98
KO
710 for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
711 BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
06dcca51 712 if (k.k->p.offset != inum)
ef1669ff
KO
713 break;
714
3e52c222
KO
715 if (bkey_is_inode(k.k))
716 add_inode(c, w, k);
ef1669ff
KO
717 }
718 bch2_trans_iter_exit(trans, &iter);
719
720 if (ret)
721 return ret;
722
f9f52bc4 723 w->first_this_inode = true;
27b2df98 724 return 0;
06dcca51
KO
725}
726
727static struct inode_walker_entry *
971a1503 728lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w, struct bkey_s_c k)
06dcca51 729{
971a1503
KO
730 bool is_whiteout = k.k->type == KEY_TYPE_whiteout;
731 u32 snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
06dcca51 732
971a1503 733 struct inode_walker_entry *i;
defd9e39 734 __darray_for_each(w->inodes, i)
06dcca51 735 if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
ef1669ff 736 goto found;
06dcca51
KO
737
738 return NULL;
ef1669ff 739found:
06dcca51 740 BUG_ON(snapshot > i->snapshot);
ef1669ff 741
a57f4d61 742 if (snapshot != i->snapshot && !is_whiteout) {
06dcca51 743 struct inode_walker_entry new = *i;
49124d8a 744
06dcca51
KO
745 new.snapshot = snapshot;
746 new.count = 0;
49124d8a 747
971a1503
KO
748 struct printbuf buf = PRINTBUF;
749 bch2_bkey_val_to_text(&buf, c, k);
750
751 bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u\n"
752 "unexpected because we should always update the inode when we update a key in that inode\n"
753 "%s",
754 w->last_pos.inode, snapshot, i->snapshot, buf.buf);
755 printbuf_exit(&buf);
ef1669ff 756
06dcca51 757 while (i > w->inodes.data && i[-1].snapshot > snapshot)
ef1669ff
KO
758 --i;
759
971a1503
KO
760 size_t pos = i - w->inodes.data;
761 int ret = darray_insert_item(&w->inodes, pos, new);
ef1669ff 762 if (ret)
06dcca51 763 return ERR_PTR(ret);
20e6d9a8
KO
764
765 i = w->inodes.data + pos;
ef1669ff
KO
766 }
767
768 return i;
1c6fdbd8
KO
769}
770
06dcca51 771static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
971a1503
KO
772 struct inode_walker *w,
773 struct bkey_s_c k)
06dcca51 774{
971a1503
KO
775 if (w->last_pos.inode != k.k->p.inode) {
776 int ret = get_inodes_all_snapshots(trans, w, k.k->p.inode);
f9f52bc4
KO
777 if (ret)
778 return ERR_PTR(ret);
971a1503 779 } else if (bkey_cmp(w->last_pos, k.k->p)) {
f9f52bc4
KO
780 darray_for_each(w->inodes, i)
781 i->seen_this_pos = false;
f9f52bc4
KO
782 }
783
971a1503 784 w->last_pos = k.k->p;
06dcca51 785
971a1503 786 return lookup_inode_for_snapshot(trans->c, w, k);
06dcca51
KO
787}
788
ef1669ff
KO
789static int __get_visible_inodes(struct btree_trans *trans,
790 struct inode_walker *w,
791 struct snapshots_seen *s,
792 u64 inum)
793{
794 struct bch_fs *c = trans->c;
795 struct btree_iter iter;
796 struct bkey_s_c k;
797 int ret;
798
91d961ba 799 w->inodes.nr = 0;
ef1669ff 800
12043cf1 801 for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
ef1669ff 802 BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
49124d8a
KO
803 u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
804
ef1669ff
KO
805 if (k.k->p.offset != inum)
806 break;
807
49124d8a 808 if (!ref_visible(c, s, s->pos.snapshot, equiv))
ef1669ff
KO
809 continue;
810
49124d8a 811 if (bkey_is_inode(k.k))
3e52c222 812 add_inode(c, w, k);
49124d8a
KO
813
814 if (equiv >= s->pos.snapshot)
815 break;
ef1669ff
KO
816 }
817 bch2_trans_iter_exit(trans, &iter);
818
819 return ret;
820}
821
822static int check_key_has_snapshot(struct btree_trans *trans,
823 struct btree_iter *iter,
824 struct bkey_s_c k)
825{
826 struct bch_fs *c = trans->c;
fa8e94fa 827 struct printbuf buf = PRINTBUF;
ef1669ff
KO
828 int ret = 0;
829
49124d8a 830 if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
b65db750
KO
831 bkey_in_missing_snapshot,
832 "key in missing snapshot: %s",
833 (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
fa8e94fa 834 ret = bch2_btree_delete_at(trans, iter,
285b181a 835 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
ef1669ff 836fsck_err:
fa8e94fa 837 printbuf_exit(&buf);
ef1669ff 838 return ret;
914f2786
KO
839}
840
7ac2c55e
KO
841static int hash_redo_key(struct btree_trans *trans,
842 const struct bch_hash_desc desc,
843 struct bch_hash_info *hash_info,
844 struct btree_iter *k_iter, struct bkey_s_c k)
1c6fdbd8 845{
e3b4b48c 846 struct bkey_i *delete;
1c6fdbd8 847 struct bkey_i *tmp;
1c6fdbd8 848
e3b4b48c
KO
849 delete = bch2_trans_kmalloc(trans, sizeof(*delete));
850 if (IS_ERR(delete))
851 return PTR_ERR(delete);
852
dbda63bb 853 tmp = bch2_bkey_make_mut_noupdate(trans, k);
b1fd23df
KO
854 if (IS_ERR(tmp))
855 return PTR_ERR(tmp);
1c6fdbd8 856
e3b4b48c
KO
857 bkey_init(&delete->k);
858 delete->k.p = k_iter->pos;
8c3f6da9
KO
859 return bch2_btree_iter_traverse(k_iter) ?:
860 bch2_trans_update(trans, k_iter, delete, 0) ?:
23f25223 861 bch2_hash_set_in_snapshot(trans, desc, hash_info,
5877d887
KO
862 (subvol_inum) { 0, k.k->p.inode },
863 k.k->p.snapshot, tmp,
864 BCH_HASH_SET_MUST_CREATE,
865 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
3f0e297d 866 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
1c6fdbd8
KO
867}
868
7ac2c55e
KO
869static int hash_check_key(struct btree_trans *trans,
870 const struct bch_hash_desc desc,
871 struct bch_hash_info *hash_info,
872 struct btree_iter *k_iter, struct bkey_s_c hash_k)
d69f41d6 873{
424eb881 874 struct bch_fs *c = trans->c;
67e0dd8f 875 struct btree_iter iter = { NULL };
fa8e94fa 876 struct printbuf buf = PRINTBUF;
7ac2c55e
KO
877 struct bkey_s_c k;
878 u64 hash;
d69f41d6
KO
879 int ret = 0;
880
7ac2c55e
KO
881 if (hash_k.k->type != desc.key_type)
882 return 0;
883
884 hash = desc.hash_bkey(hash_info, hash_k);
885
886 if (likely(hash == hash_k.k->p.offset))
d69f41d6
KO
887 return 0;
888
7ac2c55e
KO
889 if (hash_k.k->p.offset < hash)
890 goto bad_hash;
d69f41d6 891
d8f31407 892 for_each_btree_key_norestart(trans, iter, desc.btree_id,
419fc65f 893 SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
d8f31407 894 BTREE_ITER_SLOTS, k, ret) {
e88a75eb 895 if (bkey_eq(k.k->p, hash_k.k->p))
d69f41d6
KO
896 break;
897
7ac2c55e
KO
898 if (fsck_err_on(k.k->type == desc.key_type &&
899 !desc.cmp_bkey(k, hash_k), c,
b65db750 900 hash_table_key_duplicate,
d69f41d6 901 "duplicate hash table keys:\n%s",
fa8e94fa
KO
902 (printbuf_reset(&buf),
903 bch2_bkey_val_to_text(&buf, c, hash_k),
904 buf.buf))) {
285b181a 905 ret = bch2_hash_delete_at(trans, desc, hash_info, k_iter, 0) ?: 1;
d69f41d6
KO
906 break;
907 }
d69f41d6 908
7ac2c55e 909 if (bkey_deleted(k.k)) {
67e0dd8f 910 bch2_trans_iter_exit(trans, &iter);
7ac2c55e 911 goto bad_hash;
1c6fdbd8 912 }
7ac2c55e 913 }
fa8e94fa 914out:
67e0dd8f 915 bch2_trans_iter_exit(trans, &iter);
fa8e94fa 916 printbuf_exit(&buf);
1c6fdbd8 917 return ret;
7ac2c55e 918bad_hash:
b65db750
KO
919 if (fsck_err(c, hash_table_key_wrong_offset,
920 "hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
88dfe193 921 bch2_btree_id_str(desc.btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
fa8e94fa 922 (printbuf_reset(&buf),
1ed0a5d2
KO
923 bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
924 ret = hash_redo_key(trans, desc, hash_info, k_iter, hash_k);
d2a990d1 925 bch_err_fn(c, ret);
c8d5b714 926 if (ret)
1ed0a5d2 927 return ret;
1ed0a5d2 928 ret = -BCH_ERR_transaction_restart_nested;
741daa5b 929 }
741daa5b 930fsck_err:
fa8e94fa 931 goto out;
741daa5b
KO
932}
933
0b17618f
KO
934static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
935 struct btree_iter *iter,
936 struct bpos pos)
937{
938 return bch2_bkey_get_iter_typed(trans, iter, BTREE_ID_dirents, pos, 0, dirent);
939}
940
941static struct bkey_s_c_dirent inode_get_dirent(struct btree_trans *trans,
942 struct btree_iter *iter,
943 struct bch_inode_unpacked *inode,
944 u32 *snapshot)
945{
946 if (inode->bi_subvol) {
947 u64 inum;
948 int ret = subvol_lookup(trans, inode->bi_parent_subvol, snapshot, &inum);
949 if (ret)
950 return ((struct bkey_s_c_dirent) { .k = ERR_PTR(ret) });
951 }
952
953 return dirent_get_by_pos(trans, iter, SPOS(inode->bi_dir, inode->bi_dir_offset, *snapshot));
954}
955
956static bool inode_points_to_dirent(struct bch_inode_unpacked *inode,
957 struct bkey_s_c_dirent d)
958{
959 return inode->bi_dir == d.k->p.inode &&
960 inode->bi_dir_offset == d.k->p.offset;
961}
962
963static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
964 struct bch_inode_unpacked *inode)
965{
966 return d.v->d_type == DT_SUBVOL
967 ? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
968 : le64_to_cpu(d.v->d_inum) == inode->bi_inum;
969}
970
359d1bad
KO
971static int check_inode_deleted_list(struct btree_trans *trans, struct bpos p)
972{
973 struct btree_iter iter;
974 struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_deleted_inodes, p, 0);
52f3a72f 975 int ret = bkey_err(k) ?: k.k->type == KEY_TYPE_set;
359d1bad 976 bch2_trans_iter_exit(trans, &iter);
52f3a72f 977 return ret;
359d1bad
KO
978}
979
0b17618f
KO
980static int check_inode_dirent_inode(struct btree_trans *trans, struct bkey_s_c inode_k,
981 struct bch_inode_unpacked *inode,
982 u32 inode_snapshot, bool *write_inode)
983{
984 struct bch_fs *c = trans->c;
985 struct printbuf buf = PRINTBUF;
986
987 struct btree_iter dirent_iter = {};
988 struct bkey_s_c_dirent d = inode_get_dirent(trans, &dirent_iter, inode, &inode_snapshot);
989 int ret = bkey_err(d);
990 if (ret && !bch2_err_matches(ret, ENOENT))
991 return ret;
992
993 if (fsck_err_on(ret,
994 c, inode_points_to_missing_dirent,
995 "inode points to missing dirent\n%s",
996 (bch2_bkey_val_to_text(&buf, c, inode_k), buf.buf)) ||
997 fsck_err_on(!ret && !dirent_points_to_inode(d, inode),
998 c, inode_points_to_wrong_dirent,
999 "inode points to dirent that does not point back:\n%s",
1000 (bch2_bkey_val_to_text(&buf, c, inode_k),
1001 prt_newline(&buf),
1002 bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
1003 /*
1004 * We just clear the backpointer fields for now. If we find a
1005 * dirent that points to this inode in check_dirents(), we'll
1006 * update it then; then when we get to check_path() if the
1007 * backpointer is still 0 we'll reattach it.
1008 */
1009 inode->bi_dir = 0;
1010 inode->bi_dir_offset = 0;
1011 inode->bi_flags &= ~BCH_INODE_backptr_untrusted;
1012 *write_inode = true;
1013 }
1014
1015 ret = 0;
1016fsck_err:
1017 bch2_trans_iter_exit(trans, &dirent_iter);
1018 printbuf_exit(&buf);
1019 bch_err_fn(c, ret);
1020 return ret;
1021}
1022
5c16add5
KO
1023static int check_inode(struct btree_trans *trans,
1024 struct btree_iter *iter,
a1783320 1025 struct bkey_s_c k,
ef1669ff 1026 struct bch_inode_unpacked *prev,
49124d8a 1027 struct snapshots_seen *s,
285b181a 1028 bool full)
5c16add5
KO
1029{
1030 struct bch_fs *c = trans->c;
285b181a 1031 struct bch_inode_unpacked u;
5c16add5 1032 bool do_update = false;
285b181a
KO
1033 int ret;
1034
285b181a 1035 ret = check_key_has_snapshot(trans, iter, k);
e492e7b6
KO
1036 if (ret < 0)
1037 goto err;
285b181a 1038 if (ret)
e492e7b6 1039 return 0;
285b181a 1040
49124d8a
KO
1041 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
1042 if (ret)
1043 goto err;
1044
3e52c222 1045 if (!bkey_is_inode(k.k))
285b181a
KO
1046 return 0;
1047
3e52c222 1048 BUG_ON(bch2_inode_unpack(k, &u));
285b181a
KO
1049
1050 if (!full &&
103ffe9a
KO
1051 !(u.bi_flags & (BCH_INODE_i_size_dirty|
1052 BCH_INODE_i_sectors_dirty|
1053 BCH_INODE_unlinked)))
285b181a
KO
1054 return 0;
1055
285b181a
KO
1056 if (prev->bi_inum != u.bi_inum)
1057 *prev = u;
1058
1059 if (fsck_err_on(prev->bi_hash_seed != u.bi_hash_seed ||
b65db750
KO
1060 inode_d_type(prev) != inode_d_type(&u),
1061 c, inode_snapshot_mismatch,
ef1669ff
KO
1062 "inodes in different snapshots don't match")) {
1063 bch_err(c, "repair not implemented yet");
359d1bad 1064 return -BCH_ERR_fsck_repair_unimplemented;
ef1669ff 1065 }
5c16add5 1066
103ffe9a 1067 if ((u.bi_flags & (BCH_INODE_i_size_dirty|BCH_INODE_unlinked)) &&
a111901f
KO
1068 bch2_key_has_snapshot_overwrites(trans, BTREE_ID_inodes, k.k->p)) {
1069 struct bpos new_min_pos;
1070
1071 ret = bch2_propagate_key_to_snapshot_leaves(trans, iter->btree_id, k, &new_min_pos);
1072 if (ret)
1073 goto err;
1074
103ffe9a 1075 u.bi_flags &= ~BCH_INODE_i_size_dirty|BCH_INODE_unlinked;
a111901f 1076
69c8e6ce
KO
1077 ret = __bch2_fsck_write_inode(trans, &u, iter->pos.snapshot);
1078
d2a990d1
KO
1079 bch_err_msg(c, ret, "in fsck updating inode");
1080 if (ret)
a111901f 1081 return ret;
a111901f
KO
1082
1083 if (!bpos_eq(new_min_pos, POS_MIN))
1084 bch2_btree_iter_set_pos(iter, bpos_predecessor(new_min_pos));
1085 return 0;
1086 }
1087
074cbcda 1088 if (u.bi_flags & BCH_INODE_unlinked) {
359d1bad
KO
1089 ret = check_inode_deleted_list(trans, k.k->p);
1090 if (ret < 0)
1091 return ret;
1092
52f3a72f 1093 fsck_err_on(!ret, c, unlinked_inode_not_on_deleted_list,
359d1bad
KO
1094 "inode %llu:%u unlinked, but not on deleted list",
1095 u.bi_inum, k.k->p.snapshot);
1096 ret = 0;
1097 }
1098
103ffe9a 1099 if (u.bi_flags & BCH_INODE_unlinked &&
5c16add5 1100 (!c->sb.clean ||
b65db750
KO
1101 fsck_err(c, inode_unlinked_but_clean,
1102 "filesystem marked clean, but inode %llu unlinked",
5c16add5 1103 u.bi_inum))) {
7904c82c 1104 ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
d2a990d1 1105 bch_err_msg(c, ret, "in fsck deleting inode");
5c16add5
KO
1106 return ret;
1107 }
1108
103ffe9a 1109 if (u.bi_flags & BCH_INODE_i_size_dirty &&
5c16add5 1110 (!c->sb.clean ||
b65db750
KO
1111 fsck_err(c, inode_i_size_dirty_but_clean,
1112 "filesystem marked clean, but inode %llu has i_size dirty",
5c16add5
KO
1113 u.bi_inum))) {
1114 bch_verbose(c, "truncating inode %llu", u.bi_inum);
1115
5c16add5
KO
1116 /*
1117 * XXX: need to truncate partial blocks too here - or ideally
1118 * just switch units to bytes and that issue goes away
1119 */
1120 ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
ef1669ff
KO
1121 SPOS(u.bi_inum, round_up(u.bi_size, block_bytes(c)) >> 9,
1122 iter->pos.snapshot),
5c16add5 1123 POS(u.bi_inum, U64_MAX),
ef1669ff 1124 0, NULL);
d2a990d1 1125 bch_err_msg(c, ret, "in fsck truncating inode");
51fe0332 1126 if (ret)
5c16add5 1127 return ret;
5c16add5
KO
1128
1129 /*
1130 * We truncated without our normal sector accounting hook, just
1131 * make sure we recalculate it:
1132 */
103ffe9a 1133 u.bi_flags |= BCH_INODE_i_sectors_dirty;
5c16add5 1134
103ffe9a 1135 u.bi_flags &= ~BCH_INODE_i_size_dirty;
5c16add5
KO
1136 do_update = true;
1137 }
1138
103ffe9a 1139 if (u.bi_flags & BCH_INODE_i_sectors_dirty &&
5c16add5 1140 (!c->sb.clean ||
b65db750
KO
1141 fsck_err(c, inode_i_sectors_dirty_but_clean,
1142 "filesystem marked clean, but inode %llu has i_sectors dirty",
5c16add5
KO
1143 u.bi_inum))) {
1144 s64 sectors;
1145
1146 bch_verbose(c, "recounting sectors for inode %llu",
1147 u.bi_inum);
1148
ef1669ff 1149 sectors = bch2_count_inode_sectors(trans, u.bi_inum, iter->pos.snapshot);
5c16add5 1150 if (sectors < 0) {
d2a990d1 1151 bch_err_msg(c, sectors, "in fsck recounting inode sectors");
5c16add5
KO
1152 return sectors;
1153 }
1154
1155 u.bi_sectors = sectors;
103ffe9a 1156 u.bi_flags &= ~BCH_INODE_i_sectors_dirty;
5c16add5
KO
1157 do_update = true;
1158 }
1159
103ffe9a 1160 if (u.bi_flags & BCH_INODE_backptr_untrusted) {
d3ff7fec
KO
1161 u.bi_dir = 0;
1162 u.bi_dir_offset = 0;
103ffe9a 1163 u.bi_flags &= ~BCH_INODE_backptr_untrusted;
5c16add5
KO
1164 do_update = true;
1165 }
1166
0b17618f
KO
1167 if (u.bi_dir || u.bi_dir_offset) {
1168 ret = check_inode_dirent_inode(trans, k, &u, k.k->p.snapshot, &do_update);
1169 if (ret)
1170 goto err;
1171 }
1172
0b498a5a
KO
1173 if (fsck_err_on(u.bi_parent_subvol &&
1174 (u.bi_subvol == 0 ||
1175 u.bi_subvol == BCACHEFS_ROOT_SUBVOL),
f4e68c85 1176 c, inode_bi_parent_nonzero,
0b498a5a
KO
1177 "inode %llu:%u has subvol %u but nonzero parent subvol %u",
1178 u.bi_inum, k.k->p.snapshot, u.bi_subvol, u.bi_parent_subvol)) {
1179 u.bi_parent_subvol = 0;
1180 do_update = true;
1181 }
1182
f2b02d09
KO
1183 if (u.bi_subvol) {
1184 struct bch_subvolume s;
1185
1186 ret = bch2_subvolume_get(trans, u.bi_subvol, false, 0, &s);
1187 if (ret && !bch2_err_matches(ret, ENOENT))
1188 goto err;
1189
cc053290
KO
1190 if (ret && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
1191 ret = reconstruct_subvol(trans, k.k->p.snapshot, u.bi_subvol, u.bi_inum);
1192 goto do_update;
1193 }
1194
f2b02d09
KO
1195 if (fsck_err_on(ret,
1196 c, inode_bi_subvol_missing,
1197 "inode %llu:%u bi_subvol points to missing subvolume %u",
1198 u.bi_inum, k.k->p.snapshot, u.bi_subvol) ||
1199 fsck_err_on(le64_to_cpu(s.inode) != u.bi_inum ||
1200 !bch2_snapshot_is_ancestor(c, le32_to_cpu(s.snapshot),
1201 k.k->p.snapshot),
1202 c, inode_bi_subvol_wrong,
1203 "inode %llu:%u points to subvol %u, but subvol points to %llu:%u",
1204 u.bi_inum, k.k->p.snapshot, u.bi_subvol,
1205 le64_to_cpu(s.inode),
1206 le32_to_cpu(s.snapshot))) {
1207 u.bi_subvol = 0;
1208 u.bi_parent_subvol = 0;
1209 do_update = true;
1210 }
1211 }
cc053290 1212do_update:
5c16add5 1213 if (do_update) {
69c8e6ce 1214 ret = __bch2_fsck_write_inode(trans, &u, iter->pos.snapshot);
d2a990d1 1215 bch_err_msg(c, ret, "in fsck updating inode");
a190cbcf 1216 if (ret)
a111901f 1217 return ret;
5c16add5 1218 }
e492e7b6 1219err:
5c16add5 1220fsck_err:
d2a990d1 1221 bch_err_fn(c, ret);
5c16add5
KO
1222 return ret;
1223}
1224
067d228b 1225int bch2_check_inodes(struct bch_fs *c)
5c16add5 1226{
067d228b 1227 bool full = c->opts.fsck;
285b181a 1228 struct bch_inode_unpacked prev = { 0 };
49124d8a 1229 struct snapshots_seen s;
5c16add5 1230
49124d8a 1231 snapshots_seen_init(&s);
5c16add5 1232
4eb3877e
KO
1233 int ret = bch2_trans_run(c,
1234 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
1235 POS_MIN,
1236 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1237 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1238 check_inode(trans, &iter, k, &prev, &s, full)));
ef1669ff 1239
49124d8a 1240 snapshots_seen_exit(&s);
d2a990d1 1241 bch_err_fn(c, ret);
285b181a
KO
1242 return ret;
1243}
ef1669ff 1244
109ea419 1245static int check_i_sectors_notnested(struct btree_trans *trans, struct inode_walker *w)
ef1669ff
KO
1246{
1247 struct bch_fs *c = trans->c;
0763c552 1248 int ret = 0;
ef1669ff
KO
1249 s64 count2;
1250
91d961ba 1251 darray_for_each(w->inodes, i) {
ef1669ff
KO
1252 if (i->inode.bi_sectors == i->count)
1253 continue;
1254
f9f52bc4 1255 count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
ef1669ff 1256
43b81a4e 1257 if (w->recalculate_sums)
ef1669ff 1258 i->count = count2;
43b81a4e
KO
1259
1260 if (i->count != count2) {
fa14b504
KO
1261 bch_err_ratelimited(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
1262 w->last_pos.inode, i->snapshot, i->count, count2);
43b81a4e 1263 return -BCH_ERR_internal_fsck_err;
ef1669ff
KO
1264 }
1265
103ffe9a 1266 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_sectors_dirty),
b65db750
KO
1267 c, inode_i_sectors_wrong,
1268 "inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
1269 w->last_pos.inode, i->snapshot,
1270 i->inode.bi_sectors, i->count)) {
1ed0a5d2 1271 i->inode.bi_sectors = i->count;
69c8e6ce 1272 ret = bch2_fsck_write_inode(trans, &i->inode, i->snapshot);
1ed0a5d2
KO
1273 if (ret)
1274 break;
1ed0a5d2 1275 }
ef1669ff
KO
1276 }
1277fsck_err:
d2a990d1 1278 bch_err_fn(c, ret);
109ea419
KO
1279 return ret;
1280}
1281
1282static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
1283{
1284 u32 restart_count = trans->restart_count;
1285 return check_i_sectors_notnested(trans, w) ?:
1286 trans_was_restarted(trans, restart_count);
ef1669ff
KO
1287}
1288
c58029ec
DH
1289struct extent_end {
1290 u32 snapshot;
1291 u64 offset;
1292 struct snapshots_seen seen;
1293};
1294
a397b8df
KO
1295struct extent_ends {
1296 struct bpos last_pos;
1297 DARRAY(struct extent_end) e;
1298};
1299
1300static void extent_ends_reset(struct extent_ends *extent_ends)
1301{
a397b8df
KO
1302 darray_for_each(extent_ends->e, i)
1303 snapshots_seen_exit(&i->seen);
a397b8df
KO
1304 extent_ends->e.nr = 0;
1305}
1306
1307static void extent_ends_exit(struct extent_ends *extent_ends)
1308{
1309 extent_ends_reset(extent_ends);
1310 darray_exit(&extent_ends->e);
1311}
1312
1313static void extent_ends_init(struct extent_ends *extent_ends)
1314{
1315 memset(extent_ends, 0, sizeof(*extent_ends));
1316}
1317
1318static int extent_ends_at(struct bch_fs *c,
1319 struct extent_ends *extent_ends,
1320 struct snapshots_seen *seen,
1321 struct bkey_s_c k)
1322{
1323 struct extent_end *i, n = (struct extent_end) {
1324 .offset = k.k->p.offset,
1325 .snapshot = k.k->p.snapshot,
1326 .seen = *seen,
1327 };
1328
1329 n.seen.ids.data = kmemdup(seen->ids.data,
1330 sizeof(seen->ids.data[0]) * seen->ids.size,
1331 GFP_KERNEL);
1332 if (!n.seen.ids.data)
1333 return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
1334
defd9e39 1335 __darray_for_each(extent_ends->e, i) {
a397b8df
KO
1336 if (i->snapshot == k.k->p.snapshot) {
1337 snapshots_seen_exit(&i->seen);
1338 *i = n;
1339 return 0;
1340 }
1341
1342 if (i->snapshot >= k.k->p.snapshot)
1343 break;
1344 }
1345
1346 return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
1347}
c58029ec 1348
43b81a4e
KO
1349static int overlapping_extents_found(struct btree_trans *trans,
1350 enum btree_id btree,
e2bd0617
KO
1351 struct bpos pos1, struct snapshots_seen *pos1_seen,
1352 struct bkey pos2,
1353 bool *fixed,
1354 struct extent_end *extent_end)
454377d8 1355{
43b81a4e
KO
1356 struct bch_fs *c = trans->c;
1357 struct printbuf buf = PRINTBUF;
e2bd0617
KO
1358 struct btree_iter iter1, iter2 = { NULL };
1359 struct bkey_s_c k1, k2;
454377d8
KO
1360 int ret;
1361
43b81a4e
KO
1362 BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
1363
e2bd0617
KO
1364 bch2_trans_iter_init(trans, &iter1, btree, pos1,
1365 BTREE_ITER_ALL_SNAPSHOTS|
1366 BTREE_ITER_NOT_EXTENTS);
1367 k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
1368 ret = bkey_err(k1);
454377d8 1369 if (ret)
43b81a4e
KO
1370 goto err;
1371
1372 prt_str(&buf, "\n ");
e2bd0617 1373 bch2_bkey_val_to_text(&buf, c, k1);
43b81a4e 1374
e2bd0617
KO
1375 if (!bpos_eq(pos1, k1.k->p)) {
1376 prt_str(&buf, "\n wanted\n ");
1377 bch2_bpos_to_text(&buf, pos1);
1378 prt_str(&buf, "\n ");
1379 bch2_bkey_to_text(&buf, &pos2);
1380
1381 bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
43b81a4e
KO
1382 __func__, buf.buf);
1383 ret = -BCH_ERR_internal_fsck_err;
1384 goto err;
1385 }
1386
e2bd0617
KO
1387 bch2_trans_copy_iter(&iter2, &iter1);
1388
43b81a4e 1389 while (1) {
e2bd0617 1390 bch2_btree_iter_advance(&iter2);
43b81a4e 1391
e2bd0617
KO
1392 k2 = bch2_btree_iter_peek_upto(&iter2, POS(pos1.inode, U64_MAX));
1393 ret = bkey_err(k2);
43b81a4e
KO
1394 if (ret)
1395 goto err;
1396
e2bd0617 1397 if (bpos_ge(k2.k->p, pos2.p))
43b81a4e 1398 break;
43b81a4e
KO
1399 }
1400
1401 prt_str(&buf, "\n ");
e2bd0617 1402 bch2_bkey_val_to_text(&buf, c, k2);
43b81a4e 1403
e2bd0617
KO
1404 if (bpos_gt(k2.k->p, pos2.p) ||
1405 pos2.size != k2.k->size) {
43b81a4e
KO
1406 bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
1407 __func__, buf.buf);
1408 ret = -BCH_ERR_internal_fsck_err;
1409 goto err;
1410 }
1411
e2bd0617
KO
1412 prt_printf(&buf, "\n overwriting %s extent",
1413 pos1.snapshot >= pos2.p.snapshot ? "first" : "second");
1414
b65db750
KO
1415 if (fsck_err(c, extent_overlapping,
1416 "overlapping extents%s", buf.buf)) {
e2bd0617
KO
1417 struct btree_iter *old_iter = &iter1;
1418 struct disk_reservation res = { 0 };
454377d8 1419
e2bd0617
KO
1420 if (pos1.snapshot < pos2.p.snapshot) {
1421 old_iter = &iter2;
1422 swap(k1, k2);
1423 }
1424
6474b706 1425 trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
e2bd0617
KO
1426
1427 ret = bch2_trans_update_extent_overwrite(trans, old_iter,
1428 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
1429 k1, k2) ?:
3f0e297d 1430 bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
e2bd0617
KO
1431 bch2_disk_reservation_put(c, &res);
1432
1433 if (ret)
43b81a4e
KO
1434 goto err;
1435
1436 *fixed = true;
e2bd0617
KO
1437
1438 if (pos1.snapshot == pos2.p.snapshot) {
1439 /*
1440 * We overwrote the first extent, and did the overwrite
1441 * in the same snapshot:
1442 */
1443 extent_end->offset = bkey_start_offset(&pos2);
1444 } else if (pos1.snapshot > pos2.p.snapshot) {
1445 /*
1446 * We overwrote the first extent in pos2's snapshot:
1447 */
1448 ret = snapshots_seen_add_inorder(c, pos1_seen, pos2.p.snapshot);
1449 } else {
1450 /*
1451 * We overwrote the second extent - restart
1452 * check_extent() from the top:
1453 */
1454 ret = -BCH_ERR_transaction_restart_nested;
1455 }
43b81a4e
KO
1456 }
1457fsck_err:
1458err:
e2bd0617
KO
1459 bch2_trans_iter_exit(trans, &iter2);
1460 bch2_trans_iter_exit(trans, &iter1);
43b81a4e
KO
1461 printbuf_exit(&buf);
1462 return ret;
454377d8
KO
1463}
1464
c58029ec
DH
1465static int check_overlapping_extents(struct btree_trans *trans,
1466 struct snapshots_seen *seen,
a397b8df 1467 struct extent_ends *extent_ends,
c58029ec 1468 struct bkey_s_c k,
a397b8df 1469 u32 equiv,
e2bd0617
KO
1470 struct btree_iter *iter,
1471 bool *fixed)
c58029ec
DH
1472{
1473 struct bch_fs *c = trans->c;
c58029ec
DH
1474 int ret = 0;
1475
a397b8df
KO
1476 /* transaction restart, running again */
1477 if (bpos_eq(extent_ends->last_pos, k.k->p))
1478 return 0;
1479
1480 if (extent_ends->last_pos.inode != k.k->p.inode)
1481 extent_ends_reset(extent_ends);
1482
1483 darray_for_each(extent_ends->e, i) {
1484 if (i->offset <= bkey_start_offset(k.k))
c58029ec
DH
1485 continue;
1486
1487 if (!ref_visible2(c,
1488 k.k->p.snapshot, seen,
1489 i->snapshot, &i->seen))
1490 continue;
1491
a397b8df
KO
1492 ret = overlapping_extents_found(trans, iter->btree_id,
1493 SPOS(iter->pos.inode,
1494 i->offset,
1495 i->snapshot),
e2bd0617
KO
1496 &i->seen,
1497 *k.k, fixed, i);
a397b8df
KO
1498 if (ret)
1499 goto err;
c58029ec
DH
1500 }
1501
a397b8df
KO
1502 extent_ends->last_pos = k.k->p;
1503err:
e2bd0617 1504 return ret;
c58029ec
DH
1505}
1506
9db2f860
KO
1507static int check_extent_overbig(struct btree_trans *trans, struct btree_iter *iter,
1508 struct bkey_s_c k)
1509{
1510 struct bch_fs *c = trans->c;
1511 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1512 struct bch_extent_crc_unpacked crc;
1513 const union bch_extent_entry *i;
1514 unsigned encoded_extent_max_sectors = c->opts.encoded_extent_max >> 9;
1515
1516 bkey_for_each_crc(k.k, ptrs, crc, i)
1517 if (crc_is_encoded(crc) &&
1518 crc.uncompressed_size > encoded_extent_max_sectors) {
1519 struct printbuf buf = PRINTBUF;
1520
1521 bch2_bkey_val_to_text(&buf, c, k);
1522 bch_err(c, "overbig encoded extent, please report this:\n %s", buf.buf);
1523 printbuf_exit(&buf);
1524 }
1525
1526 return 0;
1527}
1528
ef1669ff 1529static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
a1783320 1530 struct bkey_s_c k,
ef1669ff 1531 struct inode_walker *inode,
c58029ec 1532 struct snapshots_seen *s,
a397b8df 1533 struct extent_ends *extent_ends)
ef1669ff
KO
1534{
1535 struct bch_fs *c = trans->c;
ef1669ff 1536 struct inode_walker_entry *i;
fa8e94fa 1537 struct printbuf buf = PRINTBUF;
0d8f320d 1538 struct bpos equiv = k.k->p;
ef1669ff 1539 int ret = 0;
ef1669ff 1540
0d8f320d
KO
1541 equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
1542
ef1669ff 1543 ret = check_key_has_snapshot(trans, iter, k);
fa8e94fa
KO
1544 if (ret) {
1545 ret = ret < 0 ? ret : 0;
1546 goto out;
1547 }
ef1669ff 1548
f9f52bc4 1549 if (inode->last_pos.inode != k.k->p.inode) {
ef1669ff
KO
1550 ret = check_i_sectors(trans, inode);
1551 if (ret)
fa8e94fa 1552 goto err;
ef1669ff 1553 }
292dea86 1554
971a1503 1555 i = walk_inode(trans, inode, k);
a57f4d61
KO
1556 ret = PTR_ERR_OR_ZERO(i);
1557 if (ret)
1558 goto err;
1559
650eb16b 1560 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
c58029ec
DH
1561 if (ret)
1562 goto err;
ef1669ff 1563
650eb16b 1564 if (k.k->type != KEY_TYPE_whiteout) {
09d4c2ac
KO
1565 if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
1566 ret = reconstruct_reg_inode(trans, k.k->p.snapshot, k.k->p.inode) ?:
1567 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
1568 if (ret)
1569 goto err;
1570
1571 inode->last_pos.inode--;
1572 ret = -BCH_ERR_transaction_restart_nested;
1573 goto err;
1574 }
1575
b65db750 1576 if (fsck_err_on(!i, c, extent_in_missing_inode,
650eb16b
KO
1577 "extent in missing inode:\n %s",
1578 (printbuf_reset(&buf),
1579 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1580 goto delete;
1581
1582 if (fsck_err_on(i &&
1583 !S_ISREG(i->inode.bi_mode) &&
b65db750
KO
1584 !S_ISLNK(i->inode.bi_mode),
1585 c, extent_in_non_reg_inode,
650eb16b
KO
1586 "extent in non regular inode mode %o:\n %s",
1587 i->inode.bi_mode,
1588 (printbuf_reset(&buf),
1589 bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1590 goto delete;
ef1669ff 1591
a397b8df 1592 ret = check_overlapping_extents(trans, s, extent_ends, k,
e2bd0617
KO
1593 equiv.snapshot, iter,
1594 &inode->recalculate_sums);
650eb16b 1595 if (ret)
e2bd0617 1596 goto err;
650eb16b 1597 }
ef1669ff 1598
0d06b4ec 1599 /*
a0076086
KO
1600 * Check inodes in reverse order, from oldest snapshots to newest,
1601 * starting from the inode that matches this extent's snapshot. If we
1602 * didn't have one, iterate over all inodes:
0d06b4ec 1603 */
a0076086
KO
1604 if (!i)
1605 i = inode->inodes.data + inode->inodes.nr - 1;
1606
1607 for (;
650eb16b 1608 inode->inodes.data && i >= inode->inodes.data;
0d06b4ec
KO
1609 --i) {
1610 if (i->snapshot > equiv.snapshot ||
1611 !key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
1612 continue;
1613
650eb16b 1614 if (k.k->type != KEY_TYPE_whiteout) {
103ffe9a 1615 if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_i_size_dirty) &&
650eb16b 1616 k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
b65db750
KO
1617 !bkey_extent_is_reservation(k),
1618 c, extent_past_end_of_inode,
650eb16b
KO
1619 "extent type past end of inode %llu:%u, i_size %llu\n %s",
1620 i->inode.bi_inum, i->snapshot, i->inode.bi_size,
1621 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1622 struct btree_iter iter2;
1623
1624 bch2_trans_copy_iter(&iter2, iter);
1625 bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
1626 ret = bch2_btree_iter_traverse(&iter2) ?:
1627 bch2_btree_delete_at(trans, &iter2,
1628 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1629 bch2_trans_iter_exit(trans, &iter2);
0d06b4ec
KO
1630 if (ret)
1631 goto err;
650eb16b 1632
650eb16b 1633 iter->k.type = KEY_TYPE_whiteout;
ef1669ff 1634 }
a0076086
KO
1635
1636 if (bkey_extent_is_allocation(k.k))
1637 i->count += k.k->size;
ef1669ff 1638 }
8a85b20c 1639
a0076086
KO
1640 i->seen_this_pos = true;
1641 }
eab3a3ce
KO
1642
1643 if (k.k->type != KEY_TYPE_whiteout) {
1644 ret = extent_ends_at(c, extent_ends, s, k);
1645 if (ret)
1646 goto err;
1647 }
fa8e94fa
KO
1648out:
1649err:
ef1669ff 1650fsck_err:
fa8e94fa 1651 printbuf_exit(&buf);
d2a990d1 1652 bch_err_fn(c, ret);
8a85b20c 1653 return ret;
0d8f320d
KO
1654delete:
1655 ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
1656 goto out;
8a85b20c
KO
1657}
1658
1c6fdbd8
KO
1659/*
1660 * Walk extents: verify that extents have a corresponding S_ISREG inode, and
1661 * that i_size an i_sectors are consistent
1662 */
067d228b 1663int bch2_check_extents(struct bch_fs *c)
1c6fdbd8
KO
1664{
1665 struct inode_walker w = inode_walker_init();
ef1669ff 1666 struct snapshots_seen s;
a397b8df 1667 struct extent_ends extent_ends;
e3dc75eb 1668 struct disk_reservation res = { 0 };
1c6fdbd8 1669
ef1669ff 1670 snapshots_seen_init(&s);
a397b8df 1671 extent_ends_init(&extent_ends);
424eb881 1672
4eb3877e
KO
1673 int ret = bch2_trans_run(c,
1674 for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
1675 POS(BCACHEFS_ROOT_INO, 0),
1676 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
1677 &res, NULL,
1678 BCH_TRANS_COMMIT_no_enospc, ({
1679 bch2_disk_reservation_put(c, &res);
1680 check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
1681 check_extent_overbig(trans, &iter, k);
1682 })) ?:
109ea419 1683 check_i_sectors_notnested(trans, &w));
c58029ec 1684
e3dc75eb 1685 bch2_disk_reservation_put(c, &res);
a397b8df 1686 extent_ends_exit(&extent_ends);
ef1669ff 1687 inode_walker_exit(&w);
ef1669ff
KO
1688 snapshots_seen_exit(&s);
1689
d2a990d1 1690 bch_err_fn(c, ret);
ef1669ff
KO
1691 return ret;
1692}
1693
9db2f860
KO
1694int bch2_check_indirect_extents(struct bch_fs *c)
1695{
9db2f860 1696 struct disk_reservation res = { 0 };
9db2f860 1697
4eb3877e
KO
1698 int ret = bch2_trans_run(c,
1699 for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
1700 POS_MIN,
1701 BTREE_ITER_PREFETCH, k,
1702 &res, NULL,
1703 BCH_TRANS_COMMIT_no_enospc, ({
1704 bch2_disk_reservation_put(c, &res);
1705 check_extent_overbig(trans, &iter, k);
1706 })));
9db2f860
KO
1707
1708 bch2_disk_reservation_put(c, &res);
9db2f860
KO
1709 bch_err_fn(c, ret);
1710 return ret;
1711}
1712
109ea419 1713static int check_subdir_count_notnested(struct btree_trans *trans, struct inode_walker *w)
ef1669ff
KO
1714{
1715 struct bch_fs *c = trans->c;
0763c552 1716 int ret = 0;
ef1669ff
KO
1717 s64 count2;
1718
91d961ba 1719 darray_for_each(w->inodes, i) {
ef1669ff
KO
1720 if (i->inode.bi_nlink == i->count)
1721 continue;
1722
f9f52bc4 1723 count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
9e343161
KO
1724 if (count2 < 0)
1725 return count2;
ef1669ff
KO
1726
1727 if (i->count != count2) {
fa14b504
KO
1728 bch_err_ratelimited(c, "fsck counted subdirectories wrong for inum %llu:%u: got %llu should be %llu",
1729 w->last_pos.inode, i->snapshot, i->count, count2);
ef1669ff
KO
1730 i->count = count2;
1731 if (i->inode.bi_nlink == i->count)
1732 continue;
1733 }
1734
b65db750
KO
1735 if (fsck_err_on(i->inode.bi_nlink != i->count,
1736 c, inode_dir_wrong_nlink,
ef1669ff 1737 "directory %llu:%u with wrong i_nlink: got %u, should be %llu",
f9f52bc4 1738 w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
ef1669ff 1739 i->inode.bi_nlink = i->count;
69c8e6ce 1740 ret = bch2_fsck_write_inode(trans, &i->inode, i->snapshot);
abcecb49
KO
1741 if (ret)
1742 break;
1743 }
ef1669ff
KO
1744 }
1745fsck_err:
d2a990d1 1746 bch_err_fn(c, ret);
109ea419
KO
1747 return ret;
1748}
1749
1750static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
1751{
1752 u32 restart_count = trans->restart_count;
1753 return check_subdir_count_notnested(trans, w) ?:
1754 trans_was_restarted(trans, restart_count);
ef1669ff 1755}
abcecb49 1756
0b17618f 1757static int check_dirent_inode_dirent(struct btree_trans *trans,
3d4998c2
KO
1758 struct btree_iter *iter,
1759 struct bkey_s_c_dirent d,
1760 struct bch_inode_unpacked *target,
1761 u32 target_snapshot)
ef1669ff
KO
1762{
1763 struct bch_fs *c = trans->c;
3d4998c2 1764 struct printbuf buf = PRINTBUF;
ef1669ff
KO
1765 int ret = 0;
1766
e539ebb8
KO
1767 if (inode_points_to_dirent(target, d))
1768 return 0;
1769
ef1669ff
KO
1770 if (!target->bi_dir &&
1771 !target->bi_dir_offset) {
1772 target->bi_dir = d.k->p.inode;
1773 target->bi_dir_offset = d.k->p.offset;
e539ebb8 1774 return __bch2_fsck_write_inode(trans, target, target_snapshot);
ef1669ff
KO
1775 }
1776
e539ebb8
KO
1777 struct btree_iter bp_iter = { NULL };
1778 struct bkey_s_c_dirent bp_dirent = dirent_get_by_pos(trans, &bp_iter,
1779 SPOS(target->bi_dir, target->bi_dir_offset, target_snapshot));
1780 ret = bkey_err(bp_dirent);
1781 if (ret && !bch2_err_matches(ret, ENOENT))
1782 goto err;
e3e464ac 1783
e539ebb8
KO
1784 bool backpointer_exists = !ret;
1785 ret = 0;
e3e464ac 1786
e539ebb8
KO
1787 if (fsck_err_on(!backpointer_exists,
1788 c, inode_wrong_backpointer,
1789 "inode %llu:%u has wrong backpointer:\n"
1790 "got %llu:%llu\n"
1791 "should be %llu:%llu",
1792 target->bi_inum, target_snapshot,
1793 target->bi_dir,
1794 target->bi_dir_offset,
1795 d.k->p.inode,
1796 d.k->p.offset)) {
1797 target->bi_dir = d.k->p.inode;
1798 target->bi_dir_offset = d.k->p.offset;
1799 ret = __bch2_fsck_write_inode(trans, target, target_snapshot);
1800 goto out;
1801 }
e3e464ac 1802
e539ebb8
KO
1803 bch2_bkey_val_to_text(&buf, c, d.s_c);
1804 prt_newline(&buf);
1805 if (backpointer_exists)
1806 bch2_bkey_val_to_text(&buf, c, bp_dirent.s_c);
1807
1808 if (fsck_err_on(backpointer_exists &&
1809 (S_ISDIR(target->bi_mode) ||
1810 target->bi_subvol),
1811 c, inode_dir_multiple_links,
1812 "%s %llu:%u with multiple links\n%s",
1813 S_ISDIR(target->bi_mode) ? "directory" : "subvolume",
1814 target->bi_inum, target_snapshot, buf.buf)) {
1815 ret = __remove_dirent(trans, d.k->p);
1816 goto out;
1817 }
1c6fdbd8 1818
e539ebb8
KO
1819 /*
1820 * hardlinked file with nlink 0:
1821 * We're just adjusting nlink here so check_nlinks() will pick
1822 * it up, it ignores inodes with nlink 0
1823 */
1824 if (fsck_err_on(backpointer_exists && !target->bi_nlink,
1825 c, inode_multiple_links_but_nlink_0,
1826 "inode %llu:%u type %s has multiple links but i_nlink 0\n%s",
1827 target->bi_inum, target_snapshot, bch2_d_types[d.v->d_type], buf.buf)) {
1828 target->bi_nlink++;
1829 target->bi_flags &= ~BCH_INODE_unlinked;
1830 ret = __bch2_fsck_write_inode(trans, target, target_snapshot);
1831 if (ret)
1832 goto err;
ef1669ff 1833 }
3d4998c2
KO
1834out:
1835err:
1836fsck_err:
1837 bch2_trans_iter_exit(trans, &bp_iter);
1838 printbuf_exit(&buf);
1839 bch_err_fn(c, ret);
1840 return ret;
1841}
1842
1843static int check_dirent_target(struct btree_trans *trans,
1844 struct btree_iter *iter,
1845 struct bkey_s_c_dirent d,
1846 struct bch_inode_unpacked *target,
1847 u32 target_snapshot)
1848{
1849 struct bch_fs *c = trans->c;
1850 struct bkey_i_dirent *n;
1851 struct printbuf buf = PRINTBUF;
1852 int ret = 0;
1853
0b17618f 1854 ret = check_dirent_inode_dirent(trans, iter, d, target, target_snapshot);
3d4998c2
KO
1855 if (ret)
1856 goto err;
1c6fdbd8 1857
b65db750
KO
1858 if (fsck_err_on(d.v->d_type != inode_d_type(target),
1859 c, dirent_d_type_wrong,
285b181a
KO
1860 "incorrect d_type: got %s, should be %s:\n%s",
1861 bch2_d_type_str(d.v->d_type),
1862 bch2_d_type_str(inode_d_type(target)),
fa8e94fa
KO
1863 (printbuf_reset(&buf),
1864 bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
285b181a
KO
1865 n = bch2_trans_kmalloc(trans, bkey_bytes(d.k));
1866 ret = PTR_ERR_OR_ZERO(n);
1867 if (ret)
fa8e94fa 1868 goto err;
ef1669ff
KO
1869
1870 bkey_reassemble(&n->k_i, d.s_c);
285b181a 1871 n->v.d_type = inode_d_type(target);
45b4ed52
KO
1872 if (n->v.d_type == DT_SUBVOL) {
1873 n->v.d_parent_subvol = cpu_to_le32(target->bi_parent_subvol);
1874 n->v.d_child_subvol = cpu_to_le32(target->bi_subvol);
1875 } else {
1876 n->v.d_inum = cpu_to_le64(target->bi_inum);
1877 }
ef1669ff 1878
285b181a
KO
1879 ret = bch2_trans_update(trans, iter, &n->k_i, 0);
1880 if (ret)
fa8e94fa 1881 goto err;
4db65027 1882
285b181a 1883 d = dirent_i_to_s_c(n);
4db65027 1884 }
ef1669ff 1885err:
1c6fdbd8 1886fsck_err:
fa8e94fa 1887 printbuf_exit(&buf);
d2a990d1 1888 bch_err_fn(c, ret);
ef1669ff 1889 return ret;
1c6fdbd8
KO
1890}
1891
c60b7f80
KO
1892/* find a subvolume that's a descendent of @snapshot: */
1893static int find_snapshot_subvol(struct btree_trans *trans, u32 snapshot, u32 *subvolid)
1894{
1895 struct btree_iter iter;
1896 struct bkey_s_c k;
1897 int ret;
1898
1899 for_each_btree_key_norestart(trans, iter, BTREE_ID_subvolumes, POS_MIN, 0, k, ret) {
1900 if (k.k->type != KEY_TYPE_subvolume)
1901 continue;
1902
1903 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
1904 if (bch2_snapshot_is_ancestor(trans->c, le32_to_cpu(s.v->snapshot), snapshot)) {
1905 bch2_trans_iter_exit(trans, &iter);
1906 *subvolid = k.k->p.offset;
1907 goto found;
1908 }
1909 }
1910 if (!ret)
1911 ret = -ENOENT;
1912found:
1913 bch2_trans_iter_exit(trans, &iter);
1914 return ret;
1915}
1916
ea27001e
KO
1917static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *iter,
1918 struct bkey_s_c_dirent d)
11def188
KO
1919{
1920 struct bch_fs *c = trans->c;
b8628a25 1921 struct btree_iter subvol_iter = {};
11def188 1922 struct bch_inode_unpacked subvol_root;
f4e68c85 1923 u32 parent_subvol = le32_to_cpu(d.v->d_parent_subvol);
11def188 1924 u32 target_subvol = le32_to_cpu(d.v->d_child_subvol);
b8628a25 1925 u32 parent_snapshot;
cc053290 1926 u32 new_parent_subvol = 0;
b8628a25 1927 u64 parent_inum;
c60b7f80 1928 struct printbuf buf = PRINTBUF;
11def188
KO
1929 int ret = 0;
1930
c60b7f80
KO
1931 ret = subvol_lookup(trans, parent_subvol, &parent_snapshot, &parent_inum);
1932 if (ret && !bch2_err_matches(ret, ENOENT))
1933 return ret;
1934
cc053290
KO
1935 if (ret ||
1936 (!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot))) {
1937 int ret2 = find_snapshot_subvol(trans, d.k->p.snapshot, &new_parent_subvol);
1938 if (ret2 && !bch2_err_matches(ret, ENOENT))
1939 return ret2;
1940 }
1941
1942 if (ret &&
1943 !new_parent_subvol &&
1944 (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_subvolumes))) {
1945 /*
1946 * Couldn't find a subvol for dirent's snapshot - but we lost
1947 * subvols, so we need to reconstruct:
1948 */
1949 ret = reconstruct_subvol(trans, d.k->p.snapshot, parent_subvol, 0);
1950 if (ret)
1951 return ret;
1952
1953 parent_snapshot = d.k->p.snapshot;
1954 }
1955
c60b7f80
KO
1956 if (fsck_err_on(ret, c, dirent_to_missing_parent_subvol,
1957 "dirent parent_subvol points to missing subvolume\n%s",
1958 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)) ||
1959 fsck_err_on(!ret && !bch2_snapshot_is_ancestor(c, parent_snapshot, d.k->p.snapshot),
1960 c, dirent_not_visible_in_parent_subvol,
1961 "dirent not visible in parent_subvol (not an ancestor of subvol snap %u)\n%s",
1962 parent_snapshot,
1963 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
cc053290
KO
1964 if (!new_parent_subvol) {
1965 bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
1966 return -BCH_ERR_fsck_repair_unimplemented;
1967 }
c60b7f80
KO
1968
1969 struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
1970 ret = PTR_ERR_OR_ZERO(new_dirent);
1971 if (ret)
1972 goto err;
1973
1974 new_dirent->v.d_parent_subvol = cpu_to_le32(new_parent_subvol);
1975 }
1976
b8628a25
KO
1977 struct bkey_s_c_subvolume s =
1978 bch2_bkey_get_iter_typed(trans, &subvol_iter,
1979 BTREE_ID_subvolumes, POS(0, target_subvol),
1980 0, subvolume);
1981 ret = bkey_err(s.s_c);
11def188
KO
1982 if (ret && !bch2_err_matches(ret, ENOENT))
1983 return ret;
1984
f5d58d0c
KO
1985 if (ret) {
1986 if (fsck_err(c, dirent_to_missing_subvol,
1987 "dirent points to missing subvolume\n%s",
1988 (bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf)))
1989 return __remove_dirent(trans, d.k->p);
1990 ret = 0;
1991 goto out;
1992 }
11def188 1993
b8628a25
KO
1994 if (fsck_err_on(le32_to_cpu(s.v->fs_path_parent) != parent_subvol,
1995 c, subvol_fs_path_parent_wrong,
1996 "subvol with wrong fs_path_parent, should be be %u\n%s",
1997 parent_subvol,
1998 (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
1999 struct bkey_i_subvolume *n =
2000 bch2_bkey_make_mut_typed(trans, &subvol_iter, &s.s_c, 0, subvolume);
2001 ret = PTR_ERR_OR_ZERO(n);
2002 if (ret)
2003 goto err;
2004
2005 n->v.fs_path_parent = cpu_to_le32(parent_subvol);
2006 }
2007
2008 u64 target_inum = le64_to_cpu(s.v->inode);
2009 u32 target_snapshot = le32_to_cpu(s.v->snapshot);
2010
2011 ret = lookup_inode(trans, target_inum, &subvol_root, &target_snapshot);
11def188 2012 if (ret && !bch2_err_matches(ret, ENOENT))
cc053290 2013 goto err;
11def188 2014
cc053290
KO
2015 if (ret) {
2016 bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
2017 ret = -BCH_ERR_fsck_repair_unimplemented;
2018 ret = 0;
2019 goto err;
2020 }
2021
2022 if (fsck_err_on(!ret && parent_subvol != subvol_root.bi_parent_subvol,
f4e68c85
KO
2023 c, inode_bi_parent_wrong,
2024 "subvol root %llu has wrong bi_parent_subvol: got %u, should be %u",
2025 target_inum,
2026 subvol_root.bi_parent_subvol, parent_subvol)) {
2027 subvol_root.bi_parent_subvol = parent_subvol;
2028 ret = __bch2_fsck_write_inode(trans, &subvol_root, target_snapshot);
2029 if (ret)
cc053290 2030 goto err;
f4e68c85
KO
2031 }
2032
11def188
KO
2033 ret = check_dirent_target(trans, iter, d, &subvol_root,
2034 target_snapshot);
2035 if (ret)
cc053290 2036 goto err;
f5d58d0c 2037out:
c60b7f80 2038err:
11def188 2039fsck_err:
b8628a25 2040 bch2_trans_iter_exit(trans, &subvol_iter);
c60b7f80 2041 printbuf_exit(&buf);
11def188
KO
2042 return ret;
2043}
2044
914f2786 2045static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
a1783320 2046 struct bkey_s_c k,
914f2786 2047 struct bch_hash_info *hash_info,
ef1669ff
KO
2048 struct inode_walker *dir,
2049 struct inode_walker *target,
2050 struct snapshots_seen *s)
1c6fdbd8 2051{
914f2786 2052 struct bch_fs *c = trans->c;
ef1669ff 2053 struct inode_walker_entry *i;
fa8e94fa 2054 struct printbuf buf = PRINTBUF;
49124d8a 2055 struct bpos equiv;
fa8e94fa 2056 int ret = 0;
d69f41d6 2057
ef1669ff 2058 ret = check_key_has_snapshot(trans, iter, k);
fa8e94fa
KO
2059 if (ret) {
2060 ret = ret < 0 ? ret : 0;
2061 goto out;
2062 }
1c6fdbd8 2063
49124d8a
KO
2064 equiv = k.k->p;
2065 equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
2066
2067 ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
914f2786 2068 if (ret)
fa8e94fa 2069 goto err;
8a85b20c 2070
ef1669ff 2071 if (k.k->type == KEY_TYPE_whiteout)
fa8e94fa 2072 goto out;
ef1669ff 2073
f9f52bc4 2074 if (dir->last_pos.inode != k.k->p.inode) {
ef1669ff
KO
2075 ret = check_subdir_count(trans, dir);
2076 if (ret)
fa8e94fa 2077 goto err;
ef1669ff
KO
2078 }
2079
07f383c7 2080 BUG_ON(!btree_iter_path(trans, iter)->should_be_locked);
292dea86 2081
971a1503 2082 i = walk_inode(trans, dir, k);
06dcca51 2083 ret = PTR_ERR_OR_ZERO(i);
ef1669ff 2084 if (ret < 0)
fa8e94fa 2085 goto err;
1c6fdbd8 2086
2a89a3e9 2087 if (dir->first_this_inode && dir->inodes.nr)
c8d5b714
KO
2088 *hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
2089 dir->first_this_inode = false;
2090
09d4c2ac
KO
2091 if (!i && (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_inodes))) {
2092 ret = reconstruct_inode(trans, k.k->p.snapshot, k.k->p.inode, 0, S_IFDIR) ?:
2093 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
2094 if (ret)
2095 goto err;
2096
2097 dir->last_pos.inode--;
2098 ret = -BCH_ERR_transaction_restart_nested;
2099 goto err;
2100 }
2101
b65db750 2102 if (fsck_err_on(!i, c, dirent_in_missing_dir_inode,
914f2786 2103 "dirent in nonexisting directory:\n%s",
fa8e94fa
KO
2104 (printbuf_reset(&buf),
2105 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
2106 ret = bch2_btree_delete_at(trans, iter,
285b181a 2107 BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
fa8e94fa
KO
2108 goto out;
2109 }
ef1669ff 2110
06dcca51 2111 if (!i)
fa8e94fa 2112 goto out;
ef1669ff 2113
b65db750
KO
2114 if (fsck_err_on(!S_ISDIR(i->inode.bi_mode),
2115 c, dirent_in_non_dir_inode,
285b181a
KO
2116 "dirent in non directory inode type %s:\n%s",
2117 bch2_d_type_str(inode_d_type(&i->inode)),
fa8e94fa
KO
2118 (printbuf_reset(&buf),
2119 bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
2120 ret = bch2_btree_delete_at(trans, iter, 0);
2121 goto out;
2122 }
8a85b20c 2123
c8d5b714 2124 ret = hash_check_key(trans, bch2_dirent_hash_desc, hash_info, iter, k);
914f2786 2125 if (ret < 0)
fa8e94fa
KO
2126 goto err;
2127 if (ret) {
2128 /* dirent has been deleted */
2129 ret = 0;
2130 goto out;
2131 }
7ac2c55e 2132
914f2786 2133 if (k.k->type != KEY_TYPE_dirent)
fa8e94fa 2134 goto out;
1c6fdbd8 2135
09d4c2ac 2136 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
1c6fdbd8 2137
4db65027 2138 if (d.v->d_type == DT_SUBVOL) {
ea27001e 2139 ret = check_dirent_to_subvol(trans, iter, d);
914f2786 2140 if (ret)
fa8e94fa 2141 goto err;
ef1669ff 2142 } else {
4db65027 2143 ret = __get_visible_inodes(trans, target, s, le64_to_cpu(d.v->d_inum));
ef1669ff 2144 if (ret)
fa8e94fa 2145 goto err;
1c6fdbd8 2146
b65db750
KO
2147 if (fsck_err_on(!target->inodes.nr,
2148 c, dirent_to_missing_inode,
49124d8a
KO
2149 "dirent points to missing inode: (equiv %u)\n%s",
2150 equiv.snapshot,
fa8e94fa
KO
2151 (printbuf_reset(&buf),
2152 bch2_bkey_val_to_text(&buf, c, k),
2153 buf.buf))) {
285b181a 2154 ret = __remove_dirent(trans, d.k->p);
ef1669ff 2155 if (ret)
fa8e94fa 2156 goto err;
1c6fdbd8
KO
2157 }
2158
91d961ba 2159 darray_for_each(target->inodes, i) {
ef1669ff
KO
2160 ret = check_dirent_target(trans, iter, d,
2161 &i->inode, i->snapshot);
2162 if (ret)
fa8e94fa 2163 goto err;
d3ff7fec
KO
2164 }
2165
11def188
KO
2166 if (d.v->d_type == DT_DIR)
2167 for_each_visible_inode(c, s, dir, equiv.snapshot, i)
2168 i->count++;
2169 }
fa8e94fa
KO
2170out:
2171err:
914f2786 2172fsck_err:
fa8e94fa 2173 printbuf_exit(&buf);
d2a990d1 2174 bch_err_fn(c, ret);
914f2786
KO
2175 return ret;
2176}
1c6fdbd8 2177
914f2786
KO
2178/*
2179 * Walk dirents: verify that they all have a corresponding S_ISDIR inode,
2180 * validate d_type
2181 */
067d228b 2182int bch2_check_dirents(struct bch_fs *c)
914f2786 2183{
ef1669ff
KO
2184 struct inode_walker dir = inode_walker_init();
2185 struct inode_walker target = inode_walker_init();
2186 struct snapshots_seen s;
914f2786 2187 struct bch_hash_info hash_info;
1c6fdbd8 2188
ef1669ff 2189 snapshots_seen_init(&s);
1c6fdbd8 2190
4eb3877e
KO
2191 int ret = bch2_trans_run(c,
2192 for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
2193 POS(BCACHEFS_ROOT_INO, 0),
2194 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
2195 k,
2196 NULL, NULL,
2197 BCH_TRANS_COMMIT_no_enospc,
109ea419
KO
2198 check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?:
2199 check_subdir_count_notnested(trans, &dir));
914f2786 2200
ef1669ff
KO
2201 snapshots_seen_exit(&s);
2202 inode_walker_exit(&dir);
2203 inode_walker_exit(&target);
d2a990d1 2204 bch_err_fn(c, ret);
ef1669ff 2205 return ret;
1c6fdbd8
KO
2206}
2207
285b181a 2208static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
a1783320 2209 struct bkey_s_c k,
285b181a
KO
2210 struct bch_hash_info *hash_info,
2211 struct inode_walker *inode)
2212{
2213 struct bch_fs *c = trans->c;
06dcca51 2214 struct inode_walker_entry *i;
285b181a
KO
2215 int ret;
2216
285b181a 2217 ret = check_key_has_snapshot(trans, iter, k);
3ff34756 2218 if (ret < 0)
285b181a 2219 return ret;
3ff34756
KO
2220 if (ret)
2221 return 0;
285b181a 2222
971a1503 2223 i = walk_inode(trans, inode, k);
06dcca51
KO
2224 ret = PTR_ERR_OR_ZERO(i);
2225 if (ret)
285b181a
KO
2226 return ret;
2227
2a89a3e9 2228 if (inode->first_this_inode && inode->inodes.nr)
c8d5b714
KO
2229 *hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
2230 inode->first_this_inode = false;
2231
b65db750 2232 if (fsck_err_on(!i, c, xattr_in_missing_inode,
285b181a
KO
2233 "xattr for missing inode %llu",
2234 k.k->p.inode))
2235 return bch2_btree_delete_at(trans, iter, 0);
2236
06dcca51 2237 if (!i)
285b181a
KO
2238 return 0;
2239
285b181a
KO
2240 ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
2241fsck_err:
d2a990d1 2242 bch_err_fn(c, ret);
285b181a
KO
2243 return ret;
2244}
2245
1c6fdbd8
KO
2246/*
2247 * Walk xattrs: verify that they all have a corresponding inode
2248 */
067d228b 2249int bch2_check_xattrs(struct bch_fs *c)
1c6fdbd8 2250{
285b181a 2251 struct inode_walker inode = inode_walker_init();
7ac2c55e 2252 struct bch_hash_info hash_info;
1c6fdbd8
KO
2253 int ret = 0;
2254
6bd68ec2
KO
2255 ret = bch2_trans_run(c,
2256 for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
a1783320
KO
2257 POS(BCACHEFS_ROOT_INO, 0),
2258 BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
2259 k,
2260 NULL, NULL,
3f0e297d 2261 BCH_TRANS_COMMIT_no_enospc,
6bd68ec2 2262 check_xattr(trans, &iter, k, &hash_info, &inode)));
d2a990d1 2263 bch_err_fn(c, ret);
9a796fdb 2264 return ret;
1c6fdbd8
KO
2265}
2266
285b181a 2267static int check_root_trans(struct btree_trans *trans)
1c6fdbd8 2268{
285b181a 2269 struct bch_fs *c = trans->c;
ef1669ff 2270 struct bch_inode_unpacked root_inode;
d3ff7fec 2271 u32 snapshot;
ef1669ff 2272 u64 inum;
1c6fdbd8
KO
2273 int ret;
2274
c98d132e 2275 ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
e47a390a 2276 if (ret && !bch2_err_matches(ret, ENOENT))
1c6fdbd8
KO
2277 return ret;
2278
b65db750
KO
2279 if (mustfix_fsck_err_on(ret, c, root_subvol_missing,
2280 "root subvol missing")) {
dcc1c045
KO
2281 struct bkey_i_subvolume *root_subvol =
2282 bch2_trans_kmalloc(trans, sizeof(*root_subvol));
2283 ret = PTR_ERR_OR_ZERO(root_subvol);
2284 if (ret)
2285 goto err;
1c6fdbd8 2286
ef1669ff
KO
2287 snapshot = U32_MAX;
2288 inum = BCACHEFS_ROOT_INO;
1c6fdbd8 2289
dcc1c045
KO
2290 bkey_subvolume_init(&root_subvol->k_i);
2291 root_subvol->k.p.offset = BCACHEFS_ROOT_SUBVOL;
2292 root_subvol->v.flags = 0;
2293 root_subvol->v.snapshot = cpu_to_le32(snapshot);
2294 root_subvol->v.inode = cpu_to_le64(inum);
2295 ret = bch2_btree_insert_trans(trans, BTREE_ID_subvolumes, &root_subvol->k_i, 0);
d2a990d1
KO
2296 bch_err_msg(c, ret, "writing root subvol");
2297 if (ret)
ef1669ff 2298 goto err;
ef1669ff
KO
2299 }
2300
c98d132e 2301 ret = lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
e47a390a 2302 if (ret && !bch2_err_matches(ret, ENOENT))
ef1669ff 2303 return ret;
1c6fdbd8 2304
b65db750
KO
2305 if (mustfix_fsck_err_on(ret, c, root_dir_missing,
2306 "root directory missing") ||
2307 mustfix_fsck_err_on(!S_ISDIR(root_inode.bi_mode),
2308 c, root_inode_not_dir,
ef1669ff
KO
2309 "root inode not a directory")) {
2310 bch2_inode_init(c, &root_inode, 0, 0, S_IFDIR|0755,
2311 0, NULL);
2312 root_inode.bi_inum = inum;
1c6fdbd8 2313
69c8e6ce 2314 ret = __bch2_fsck_write_inode(trans, &root_inode, snapshot);
d2a990d1 2315 bch_err_msg(c, ret, "writing root inode");
ef1669ff
KO
2316 }
2317err:
2318fsck_err:
ef1669ff 2319 return ret;
1c6fdbd8
KO
2320}
2321
285b181a 2322/* Get root directory, create if it doesn't exist: */
067d228b 2323int bch2_check_root(struct bch_fs *c)
285b181a 2324{
002c76dc 2325 int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
6bd68ec2 2326 check_root_trans(trans));
d2a990d1 2327 bch_err_fn(c, ret);
1bb3c2a9 2328 return ret;
285b181a
KO
2329}
2330
663db5a5
KO
2331typedef DARRAY(u32) darray_u32;
2332
2333static bool darray_u32_has(darray_u32 *d, u32 v)
2334{
2335 darray_for_each(*d, i)
2336 if (*i == v)
2337 return true;
2338 return false;
2339}
2340
2341/*
2342 * We've checked that inode backpointers point to valid dirents; here, it's
2343 * sufficient to check that the subvolume root has a dirent:
2344 */
2345static int subvol_has_dirent(struct btree_trans *trans, struct bkey_s_c_subvolume s)
2346{
2347 struct bch_inode_unpacked inode;
2348 int ret = bch2_inode_find_by_inum_trans(trans,
2349 (subvol_inum) { s.k->p.offset, le64_to_cpu(s.v->inode) },
2350 &inode);
2351 if (ret)
2352 return ret;
2353
2354 return inode.bi_dir != 0;
2355}
2356
2357static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
2358{
2359 struct bch_fs *c = trans->c;
2360 struct btree_iter parent_iter = {};
2361 darray_u32 subvol_path = {};
2362 struct printbuf buf = PRINTBUF;
2363 int ret = 0;
2364
2365 if (k.k->type != KEY_TYPE_subvolume)
2366 return 0;
2367
2368 while (k.k->p.offset != BCACHEFS_ROOT_SUBVOL) {
2369 ret = darray_push(&subvol_path, k.k->p.offset);
2370 if (ret)
2371 goto err;
2372
2373 struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
2374
2375 ret = subvol_has_dirent(trans, s);
2376 if (ret < 0)
2377 break;
2378
2379 if (fsck_err_on(!ret,
2380 c, subvol_unreachable,
2381 "unreachable subvolume %s",
2382 (bch2_bkey_val_to_text(&buf, c, s.s_c),
2383 buf.buf))) {
2384 ret = reattach_subvol(trans, s);
2385 break;
2386 }
2387
2388 u32 parent = le32_to_cpu(s.v->fs_path_parent);
2389
2390 if (darray_u32_has(&subvol_path, parent)) {
2391 if (fsck_err(c, subvol_loop, "subvolume loop"))
2392 ret = reattach_subvol(trans, s);
2393 break;
2394 }
2395
2396 bch2_trans_iter_exit(trans, &parent_iter);
2397 bch2_trans_iter_init(trans, &parent_iter,
2398 BTREE_ID_subvolumes, POS(0, parent), 0);
2399 k = bch2_btree_iter_peek_slot(&parent_iter);
2400 ret = bkey_err(k);
2401 if (ret)
2402 goto err;
2403
2404 if (fsck_err_on(k.k->type != KEY_TYPE_subvolume,
2405 c, subvol_unreachable,
2406 "unreachable subvolume %s",
2407 (bch2_bkey_val_to_text(&buf, c, s.s_c),
2408 buf.buf))) {
2409 ret = reattach_subvol(trans, s);
2410 break;
2411 }
2412 }
2413fsck_err:
2414err:
2415 printbuf_exit(&buf);
2416 darray_exit(&subvol_path);
2417 bch2_trans_iter_exit(trans, &parent_iter);
2418 return ret;
2419}
2420
2421int bch2_check_subvolume_structure(struct bch_fs *c)
2422{
2423 int ret = bch2_trans_run(c,
2424 for_each_btree_key_commit(trans, iter,
2425 BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
2426 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2427 check_subvol_path(trans, &iter, k)));
2428 bch_err_fn(c, ret);
2429 return ret;
2430}
2431
91d961ba
KO
2432struct pathbuf_entry {
2433 u64 inum;
2434 u32 snapshot;
1c6fdbd8
KO
2435};
2436
91d961ba
KO
2437typedef DARRAY(struct pathbuf_entry) pathbuf;
2438
2439static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
6e0c886d 2440{
91d961ba 2441 darray_for_each(*p, i)
6e0c886d
KO
2442 if (i->inum == inum &&
2443 i->snapshot == snapshot)
2444 return true;
6e0c886d
KO
2445 return false;
2446}
2447
6e0c886d 2448/*
74406f66
KO
2449 * Check that a given inode is reachable from its subvolume root - we already
2450 * verified subvolume connectivity:
6e0c886d
KO
2451 *
2452 * XXX: we should also be verifying that inodes are in the right subvolumes
2453 */
688a7694 2454static int check_path(struct btree_trans *trans, pathbuf *p, struct bkey_s_c inode_k)
1c6fdbd8 2455{
d3ff7fec 2456 struct bch_fs *c = trans->c;
3a136177 2457 struct btree_iter inode_iter = {};
688a7694 2458 struct bch_inode_unpacked inode;
3a136177 2459 struct printbuf buf = PRINTBUF;
688a7694 2460 u32 snapshot = bch2_snapshot_equiv(c, inode_k.k->p.snapshot);
1c6fdbd8
KO
2461 int ret = 0;
2462
d3ff7fec 2463 p->nr = 0;
424eb881 2464
688a7694
KO
2465 BUG_ON(bch2_inode_unpack(inode_k, &inode));
2466
74406f66 2467 while (!inode.bi_subvol) {
4db65027
KO
2468 struct btree_iter dirent_iter;
2469 struct bkey_s_c_dirent d;
6e0c886d
KO
2470 u32 parent_snapshot = snapshot;
2471
688a7694 2472 d = inode_get_dirent(trans, &dirent_iter, &inode, &parent_snapshot);
c98d132e 2473 ret = bkey_err(d.s_c);
e47a390a 2474 if (ret && !bch2_err_matches(ret, ENOENT))
d3ff7fec 2475 break;
1c6fdbd8 2476
688a7694 2477 if (!ret && !dirent_points_to_inode(d, &inode)) {
4db65027 2478 bch2_trans_iter_exit(trans, &dirent_iter);
e47a390a 2479 ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
4db65027
KO
2480 }
2481
e47a390a 2482 if (bch2_err_matches(ret, ENOENT)) {
56e23047 2483 ret = 0;
688a7694 2484 if (fsck_err(c, inode_unreachable,
3a136177
KO
2485 "unreachable inode\n%s",
2486 (printbuf_reset(&buf),
2487 bch2_bkey_val_to_text(&buf, c, inode_k),
2488 buf.buf)))
688a7694 2489 ret = reattach_inode(trans, &inode, snapshot);
3a136177 2490 goto out;
d3ff7fec 2491 }
4db65027
KO
2492
2493 bch2_trans_iter_exit(trans, &dirent_iter);
1c6fdbd8 2494
688a7694 2495 if (!S_ISDIR(inode.bi_mode))
d3ff7fec 2496 break;
1c6fdbd8 2497
663db5a5
KO
2498 ret = darray_push(p, ((struct pathbuf_entry) {
2499 .inum = inode.bi_inum,
2500 .snapshot = snapshot,
2501 }));
2502 if (ret)
d3ff7fec 2503 return ret;
1c6fdbd8 2504
6e0c886d
KO
2505 snapshot = parent_snapshot;
2506
3a136177
KO
2507 bch2_trans_iter_exit(trans, &inode_iter);
2508 inode_k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes,
2509 SPOS(0, inode.bi_dir, snapshot), 0);
2510 ret = bkey_err(inode_k) ?:
2511 !bkey_is_inode(inode_k.k) ? -BCH_ERR_ENOENT_inode
2512 : bch2_inode_unpack(inode_k, &inode);
6e0c886d
KO
2513 if (ret) {
2514 /* Should have been caught in dirents pass */
c98d132e
KO
2515 if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
2516 bch_err(c, "error looking up parent directory: %i", ret);
6e0c886d
KO
2517 break;
2518 }
2519
3a136177
KO
2520 snapshot = inode_k.k->p.snapshot;
2521
688a7694 2522 if (path_is_dup(p, inode.bi_inum, snapshot)) {
d3ff7fec 2523 /* XXX print path */
6e0c886d
KO
2524 bch_err(c, "directory structure loop");
2525
91d961ba 2526 darray_for_each(*p, i)
6e0c886d 2527 pr_err("%llu:%u", i->inum, i->snapshot);
688a7694 2528 pr_err("%llu:%u", inode.bi_inum, snapshot);
6e0c886d 2529
663db5a5
KO
2530 if (fsck_err(c, dir_loop, "directory structure loop")) {
2531 ret = remove_backpointer(trans, &inode);
c98d132e 2532 bch_err_msg(c, ret, "removing dirent");
663db5a5
KO
2533 if (ret)
2534 break;
1c6fdbd8 2535
663db5a5 2536 ret = reattach_inode(trans, &inode, snapshot);
688a7694 2537 bch_err_msg(c, ret, "reattaching inode %llu", inode.bi_inum);
663db5a5 2538 }
c98d132e 2539 break;
1c6fdbd8 2540 }
1c6fdbd8 2541 }
3a136177 2542out:
d3ff7fec 2543fsck_err:
3a136177
KO
2544 bch2_trans_iter_exit(trans, &inode_iter);
2545 printbuf_exit(&buf);
d2a990d1 2546 bch_err_fn(c, ret);
d3ff7fec
KO
2547 return ret;
2548}
1c6fdbd8 2549
d3ff7fec
KO
2550/*
2551 * Check for unreachable inodes, as well as loops in the directory structure:
067d228b 2552 * After bch2_check_dirents(), if an inode backpointer doesn't exist that means it's
d3ff7fec
KO
2553 * unreachable:
2554 */
067d228b 2555int bch2_check_directory_structure(struct bch_fs *c)
d3ff7fec 2556{
91d961ba 2557 pathbuf path = { 0, };
d3ff7fec 2558 int ret;
1c6fdbd8 2559
c98d132e
KO
2560 ret = bch2_trans_run(c,
2561 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
2562 BTREE_ITER_INTENT|
2563 BTREE_ITER_PREFETCH|
2564 BTREE_ITER_ALL_SNAPSHOTS, k,
2565 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
2566 if (!bkey_is_inode(k.k))
2567 continue;
1c6fdbd8 2568
688a7694 2569 if (bch2_inode_flags(k) & BCH_INODE_unlinked)
c98d132e 2570 continue;
ef1669ff 2571
688a7694 2572 check_path(trans, &path, k);
c98d132e 2573 })));
91d961ba 2574 darray_exit(&path);
27b2df98 2575
d2a990d1 2576 bch_err_fn(c, ret);
9a796fdb 2577 return ret;
1c6fdbd8
KO
2578}
2579
fc51b041
KO
2580struct nlink_table {
2581 size_t nr;
2582 size_t size;
1c6fdbd8 2583
fc51b041
KO
2584 struct nlink {
2585 u64 inum;
2586 u32 snapshot;
2587 u32 count;
2588 } *d;
2589};
1c6fdbd8 2590
f0f41a6d
KO
2591static int add_nlink(struct bch_fs *c, struct nlink_table *t,
2592 u64 inum, u32 snapshot)
1c6fdbd8 2593{
fc51b041
KO
2594 if (t->nr == t->size) {
2595 size_t new_size = max_t(size_t, 128UL, t->size * 2);
3e3e02e6
KO
2596 void *d = kvmalloc_array(new_size, sizeof(t->d[0]), GFP_KERNEL);
2597
fc51b041 2598 if (!d) {
f0f41a6d
KO
2599 bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
2600 new_size);
65d48e35 2601 return -BCH_ERR_ENOMEM_fsck_add_nlink;
fc51b041 2602 }
1c6fdbd8 2603
82355e28
KO
2604 if (t->d)
2605 memcpy(d, t->d, t->size * sizeof(t->d[0]));
fc51b041 2606 kvfree(t->d);
1c6fdbd8 2607
fc51b041
KO
2608 t->d = d;
2609 t->size = new_size;
2bb748a6
KO
2610 }
2611
fc51b041
KO
2612
2613 t->d[t->nr++] = (struct nlink) {
2614 .inum = inum,
2615 .snapshot = snapshot,
2616 };
2617
2618 return 0;
2619}
2620
2621static int nlink_cmp(const void *_l, const void *_r)
2622{
2623 const struct nlink *l = _l;
2624 const struct nlink *r = _r;
2625
db18ef1a 2626 return cmp_int(l->inum, r->inum);
fc51b041
KO
2627}
2628
ef1669ff
KO
2629static void inc_link(struct bch_fs *c, struct snapshots_seen *s,
2630 struct nlink_table *links,
2631 u64 range_start, u64 range_end, u64 inum, u32 snapshot)
fc51b041
KO
2632{
2633 struct nlink *link, key = {
2634 .inum = inum, .snapshot = U32_MAX,
2635 };
2636
2637 if (inum < range_start || inum >= range_end)
1c6fdbd8 2638 return;
fc51b041
KO
2639
2640 link = __inline_bsearch(&key, links->d, links->nr,
2641 sizeof(links->d[0]), nlink_cmp);
ef1669ff
KO
2642 if (!link)
2643 return;
2644
2645 while (link > links->d && link[0].inum == link[-1].inum)
2646 --link;
2647
2648 for (; link < links->d + links->nr && link->inum == inum; link++)
2649 if (ref_visible(c, s, snapshot, link->snapshot)) {
2650 link->count++;
2651 if (link->snapshot >= snapshot)
2652 break;
2653 }
fc51b041
KO
2654}
2655
2656noinline_for_stack
2657static int check_nlinks_find_hardlinks(struct bch_fs *c,
2658 struct nlink_table *t,
2659 u64 start, u64 *end)
2660{
27b2df98 2661 int ret = bch2_trans_run(c,
5028b907
KO
2662 for_each_btree_key(trans, iter, BTREE_ID_inodes,
2663 POS(0, start),
2664 BTREE_ITER_INTENT|
2665 BTREE_ITER_PREFETCH|
2666 BTREE_ITER_ALL_SNAPSHOTS, k, ({
27b2df98
KO
2667 if (!bkey_is_inode(k.k))
2668 continue;
fc51b041 2669
27b2df98 2670 /* Should never fail, checked by bch2_inode_invalid: */
80eab7a7 2671 struct bch_inode_unpacked u;
27b2df98 2672 BUG_ON(bch2_inode_unpack(k, &u));
fc51b041 2673
27b2df98
KO
2674 /*
2675 * Backpointer and directory structure checks are sufficient for
2676 * directories, since they can't have hardlinks:
2677 */
2678 if (S_ISDIR(u.bi_mode))
2679 continue;
fc51b041 2680
27b2df98
KO
2681 if (!u.bi_nlink)
2682 continue;
fc51b041 2683
27b2df98
KO
2684 ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
2685 if (ret) {
2686 *end = k.k->p.offset;
2687 ret = 0;
2688 break;
2689 }
2690 0;
2691 })));
1c6fdbd8 2692
27b2df98 2693 bch_err_fn(c, ret);
fc51b041 2694 return ret;
1c6fdbd8
KO
2695}
2696
2697noinline_for_stack
fc51b041
KO
2698static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links,
2699 u64 range_start, u64 range_end)
1c6fdbd8 2700{
ef1669ff 2701 struct snapshots_seen s;
1c6fdbd8 2702
ef1669ff
KO
2703 snapshots_seen_init(&s);
2704
27b2df98 2705 int ret = bch2_trans_run(c,
5028b907
KO
2706 for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
2707 BTREE_ITER_INTENT|
2708 BTREE_ITER_PREFETCH|
2709 BTREE_ITER_ALL_SNAPSHOTS, k, ({
27b2df98
KO
2710 ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
2711 if (ret)
2712 break;
1c6fdbd8 2713
80eab7a7
KO
2714 if (k.k->type == KEY_TYPE_dirent) {
2715 struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
abcecb49 2716
27b2df98
KO
2717 if (d.v->d_type != DT_DIR &&
2718 d.v->d_type != DT_SUBVOL)
2719 inc_link(c, &s, links, range_start, range_end,
2720 le64_to_cpu(d.v->d_inum),
2721 bch2_snapshot_equiv(c, d.k->p.snapshot));
27b2df98
KO
2722 }
2723 0;
2724 })));
1c6fdbd8 2725
ef1669ff 2726 snapshots_seen_exit(&s);
27b2df98
KO
2727
2728 bch_err_fn(c, ret);
1c6fdbd8
KO
2729 return ret;
2730}
2731
eace11a7
KO
2732static int check_nlinks_update_inode(struct btree_trans *trans, struct btree_iter *iter,
2733 struct bkey_s_c k,
2734 struct nlink_table *links,
2735 size_t *idx, u64 range_end)
2736{
2737 struct bch_fs *c = trans->c;
2738 struct bch_inode_unpacked u;
2739 struct nlink *link = &links->d[*idx];
2740 int ret = 0;
2741
2742 if (k.k->p.offset >= range_end)
2743 return 1;
2744
2745 if (!bkey_is_inode(k.k))
2746 return 0;
2747
2748 BUG_ON(bch2_inode_unpack(k, &u));
2749
73bd774d 2750 if (S_ISDIR(u.bi_mode))
eace11a7
KO
2751 return 0;
2752
2753 if (!u.bi_nlink)
2754 return 0;
2755
2756 while ((cmp_int(link->inum, k.k->p.offset) ?:
2757 cmp_int(link->snapshot, k.k->p.snapshot)) < 0) {
2758 BUG_ON(*idx == links->nr);
2759 link = &links->d[++*idx];
2760 }
2761
b65db750
KO
2762 if (fsck_err_on(bch2_inode_nlink_get(&u) != link->count,
2763 c, inode_wrong_nlink,
eace11a7
KO
2764 "inode %llu type %s has wrong i_nlink (%u, should be %u)",
2765 u.bi_inum, bch2_d_types[mode_to_type(u.bi_mode)],
2766 bch2_inode_nlink_get(&u), link->count)) {
2767 bch2_inode_nlink_set(&u, link->count);
69c8e6ce 2768 ret = __bch2_fsck_write_inode(trans, &u, k.k->p.snapshot);
eace11a7
KO
2769 }
2770fsck_err:
2771 return ret;
2772}
2773
1c6fdbd8 2774noinline_for_stack
fc51b041
KO
2775static int check_nlinks_update_hardlinks(struct bch_fs *c,
2776 struct nlink_table *links,
1c6fdbd8
KO
2777 u64 range_start, u64 range_end)
2778{
eace11a7 2779 size_t idx = 0;
1c6fdbd8 2780
80eab7a7 2781 int ret = bch2_trans_run(c,
6bd68ec2
KO
2782 for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
2783 POS(0, range_start),
2784 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
3f0e297d 2785 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
6bd68ec2 2786 check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
eace11a7 2787 if (ret < 0) {
8b58623f 2788 bch_err(c, "error in fsck walking inodes: %s", bch2_err_str(ret));
eace11a7
KO
2789 return ret;
2790 }
1c6fdbd8 2791
eace11a7 2792 return 0;
1c6fdbd8
KO
2793}
2794
067d228b 2795int bch2_check_nlinks(struct bch_fs *c)
1c6fdbd8 2796{
fc51b041 2797 struct nlink_table links = { 0 };
1c6fdbd8
KO
2798 u64 this_iter_range_start, next_iter_range_start = 0;
2799 int ret = 0;
2800
1c6fdbd8
KO
2801 do {
2802 this_iter_range_start = next_iter_range_start;
2803 next_iter_range_start = U64_MAX;
2804
fc51b041
KO
2805 ret = check_nlinks_find_hardlinks(c, &links,
2806 this_iter_range_start,
2807 &next_iter_range_start);
2808
2809 ret = check_nlinks_walk_dirents(c, &links,
1c6fdbd8 2810 this_iter_range_start,
fc51b041 2811 next_iter_range_start);
1c6fdbd8
KO
2812 if (ret)
2813 break;
2814
fc51b041 2815 ret = check_nlinks_update_hardlinks(c, &links,
1c6fdbd8
KO
2816 this_iter_range_start,
2817 next_iter_range_start);
2818 if (ret)
2819 break;
2820
fc51b041 2821 links.nr = 0;
1c6fdbd8
KO
2822 } while (next_iter_range_start != U64_MAX);
2823
fc51b041 2824 kvfree(links.d);
d2a990d1 2825 bch_err_fn(c, ret);
1c6fdbd8
KO
2826 return ret;
2827}
2828
eace11a7
KO
2829static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
2830 struct bkey_s_c k)
bfe88863 2831{
bfe88863
KO
2832 struct bkey_s_c_reflink_p p;
2833 struct bkey_i_reflink_p *u;
bfe88863 2834
bfe88863
KO
2835 if (k.k->type != KEY_TYPE_reflink_p)
2836 return 0;
2837
2838 p = bkey_s_c_to_reflink_p(k);
2839
6d76aefe 2840 if (!p.v->front_pad && !p.v->back_pad)
bfe88863
KO
2841 return 0;
2842
2843 u = bch2_trans_kmalloc(trans, sizeof(*u));
cf904c8d 2844 int ret = PTR_ERR_OR_ZERO(u);
bfe88863
KO
2845 if (ret)
2846 return ret;
2847
2848 bkey_reassemble(&u->k_i, k);
6d76aefe
KO
2849 u->v.front_pad = 0;
2850 u->v.back_pad = 0;
bfe88863 2851
6b3d8b89 2852 return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
bfe88863
KO
2853}
2854
067d228b 2855int bch2_fix_reflink_p(struct bch_fs *c)
bfe88863 2856{
bfe88863
KO
2857 if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
2858 return 0;
2859
80eab7a7 2860 int ret = bch2_trans_run(c,
6bd68ec2 2861 for_each_btree_key_commit(trans, iter,
1bb3c2a9
KO
2862 BTREE_ID_extents, POS_MIN,
2863 BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
2864 BTREE_ITER_ALL_SNAPSHOTS, k,
3f0e297d 2865 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
6bd68ec2 2866 fix_reflink_p_key(trans, &iter, k)));
d2a990d1 2867 bch_err_fn(c, ret);
bfe88863
KO
2868 return ret;
2869}