1 // SPDX-License-Identifier: GPL-2.0
4 #include "alloc_background.h"
5 #include "backpointers.h"
7 #include "btree_cache.h"
8 #include "btree_update.h"
9 #include "btree_update_interior.h"
10 #include "btree_write_buffer.h"
16 static bool extent_matches_bp(struct bch_fs *c,
17 enum btree_id btree_id, unsigned level,
20 struct bch_backpointer bp)
22 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
23 const union bch_extent_entry *entry;
24 struct extent_ptr_decoded p;
26 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
28 struct bch_backpointer bp2;
33 bch2_extent_ptr_to_bp(c, btree_id, level, k, p, entry, &bucket2, &bp2);
34 if (bpos_eq(bucket, bucket2) &&
35 !memcmp(&bp, &bp2, sizeof(bp)))
42 int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
43 enum bkey_invalid_flags flags,
46 struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
48 /* these will be caught by fsck */
49 if (!bch2_dev_exists2(c, bp.k->p.inode))
52 struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
55 bkey_fsck_err_on(!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset)),
57 backpointer_pos_wrong,
58 "backpointer at wrong pos");
63 void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
65 prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
66 bch2_btree_id_str(bp->btree_id),
68 (u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
69 (u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
71 bch2_bpos_to_text(out, bp->pos);
74 void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
76 if (bch2_dev_exists2(c, k.k->p.inode)) {
77 prt_str(out, "bucket=");
78 bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
82 bch2_backpointer_to_text(out, bkey_s_c_to_backpointer(k).v);
85 void bch2_backpointer_swab(struct bkey_s k)
87 struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
89 bp.v->bucket_offset = swab40(bp.v->bucket_offset);
90 bp.v->bucket_len = swab32(bp.v->bucket_len);
91 bch2_bpos_swab(&bp.v->pos);
94 static noinline int backpointer_mod_err(struct btree_trans *trans,
95 struct bch_backpointer bp,
97 struct bkey_s_c orig_k,
100 struct bch_fs *c = trans->c;
101 struct printbuf buf = PRINTBUF;
104 prt_printf(&buf, "existing backpointer found when inserting ");
105 bch2_backpointer_to_text(&buf, &bp);
107 printbuf_indent_add(&buf, 2);
109 prt_printf(&buf, "found ");
110 bch2_bkey_val_to_text(&buf, c, bp_k);
113 prt_printf(&buf, "for ");
114 bch2_bkey_val_to_text(&buf, c, orig_k);
116 bch_err(c, "%s", buf.buf);
117 } else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
118 prt_printf(&buf, "backpointer not found when deleting");
120 printbuf_indent_add(&buf, 2);
122 prt_printf(&buf, "searching for ");
123 bch2_backpointer_to_text(&buf, &bp);
126 prt_printf(&buf, "got ");
127 bch2_bkey_val_to_text(&buf, c, bp_k);
130 prt_printf(&buf, "for ");
131 bch2_bkey_val_to_text(&buf, c, orig_k);
133 bch_err(c, "%s", buf.buf);
138 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
139 return bch2_inconsistent_error(c) ? BCH_ERR_erofs_unfixed_errors : 0;
145 int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
147 struct bch_backpointer bp,
148 struct bkey_s_c orig_k,
151 struct btree_iter bp_iter;
153 struct bkey_i_backpointer *bp_k;
156 bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
157 ret = PTR_ERR_OR_ZERO(bp_k);
161 bkey_backpointer_init(&bp_k->k_i);
162 bp_k->k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
166 bp_k->k.type = KEY_TYPE_deleted;
167 set_bkey_val_u64s(&bp_k->k, 0);
170 k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
174 BTREE_ITER_WITH_UPDATES);
181 : (k.k->type != KEY_TYPE_backpointer ||
182 memcmp(bkey_s_c_to_backpointer(k).v, &bp, sizeof(bp)))) {
183 ret = backpointer_mod_err(trans, bp, k, orig_k, insert);
188 ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
190 bch2_trans_iter_exit(trans, &bp_iter);
195 * Find the next backpointer >= *bp_offset:
197 int bch2_get_next_backpointer(struct btree_trans *trans,
198 struct bpos bucket, int gen,
200 struct bch_backpointer *bp,
203 struct bch_fs *c = trans->c;
204 struct bpos bp_end_pos = bucket_pos_to_bp(c, bpos_nosnap_successor(bucket), 0);
205 struct btree_iter alloc_iter = { NULL }, bp_iter = { NULL };
209 if (bpos_ge(*bp_pos, bp_end_pos))
213 k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
214 bucket, BTREE_ITER_CACHED|iter_flags);
219 if (k.k->type != KEY_TYPE_alloc_v4 ||
220 bkey_s_c_to_alloc_v4(k).v->gen != gen)
224 *bp_pos = bpos_max(*bp_pos, bucket_pos_to_bp(c, bucket, 0));
226 for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
227 *bp_pos, iter_flags, k, ret) {
228 if (bpos_ge(k.k->p, bp_end_pos))
232 *bp = *bkey_s_c_to_backpointer(k).v;
238 bch2_trans_iter_exit(trans, &bp_iter);
239 bch2_trans_iter_exit(trans, &alloc_iter);
243 static void backpointer_not_found(struct btree_trans *trans,
245 struct bch_backpointer bp,
248 struct bch_fs *c = trans->c;
249 struct printbuf buf = PRINTBUF;
250 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
253 * If we're using the btree write buffer, the backpointer we were
254 * looking at may have already been deleted - failure to find what it
255 * pointed to is not an error:
257 if (likely(!bch2_backpointers_no_use_write_buffer))
260 prt_printf(&buf, "backpointer doesn't match %s it points to:\n ",
261 bp.level ? "btree node" : "extent");
262 prt_printf(&buf, "bucket: ");
263 bch2_bpos_to_text(&buf, bucket);
264 prt_printf(&buf, "\n ");
266 prt_printf(&buf, "backpointer pos: ");
267 bch2_bpos_to_text(&buf, bp_pos);
268 prt_printf(&buf, "\n ");
270 bch2_backpointer_to_text(&buf, &bp);
271 prt_printf(&buf, "\n ");
272 bch2_bkey_val_to_text(&buf, c, k);
273 if (c->curr_recovery_pass >= BCH_RECOVERY_PASS_check_extents_to_backpointers)
274 bch_err_ratelimited(c, "%s", buf.buf);
276 bch2_trans_inconsistent(trans, "%s", buf.buf);
281 struct bkey_s_c bch2_backpointer_get_key(struct btree_trans *trans,
282 struct btree_iter *iter,
284 struct bch_backpointer bp,
287 if (likely(!bp.level)) {
288 struct bch_fs *c = trans->c;
289 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
292 bch2_trans_node_iter_init(trans, iter,
297 k = bch2_btree_iter_peek_slot(iter);
299 bch2_trans_iter_exit(trans, iter);
303 if (k.k && extent_matches_bp(c, bp.btree_id, bp.level, k, bucket, bp))
306 bch2_trans_iter_exit(trans, iter);
307 backpointer_not_found(trans, bp_pos, bp, k);
308 return bkey_s_c_null;
310 struct btree *b = bch2_backpointer_get_node(trans, iter, bp_pos, bp);
312 if (IS_ERR_OR_NULL(b)) {
313 bch2_trans_iter_exit(trans, iter);
314 return IS_ERR(b) ? bkey_s_c_err(PTR_ERR(b)) : bkey_s_c_null;
316 return bkey_i_to_s_c(&b->key);
320 struct btree *bch2_backpointer_get_node(struct btree_trans *trans,
321 struct btree_iter *iter,
323 struct bch_backpointer bp)
325 struct bch_fs *c = trans->c;
326 struct bpos bucket = bp_pos_to_bucket(c, bp_pos);
331 bch2_trans_node_iter_init(trans, iter,
337 b = bch2_btree_iter_peek_node(iter);
338 if (IS_ERR_OR_NULL(b))
341 BUG_ON(b->c.level != bp.level - 1);
343 if (extent_matches_bp(c, bp.btree_id, bp.level,
344 bkey_i_to_s_c(&b->key),
348 if (btree_node_will_make_reachable(b)) {
349 b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
351 backpointer_not_found(trans, bp_pos, bp, bkey_i_to_s_c(&b->key));
355 bch2_trans_iter_exit(trans, iter);
359 static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_iter *bp_iter,
362 struct bch_fs *c = trans->c;
363 struct btree_iter alloc_iter = { NULL };
364 struct bkey_s_c alloc_k;
365 struct printbuf buf = PRINTBUF;
368 if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
369 backpointer_to_missing_device,
370 "backpointer for missing device:\n%s",
371 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
372 ret = bch2_btree_delete_at(trans, bp_iter, 0);
376 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
377 bp_pos_to_bucket(c, k.k->p), 0);
378 ret = bkey_err(alloc_k);
382 if (fsck_err_on(alloc_k.k->type != KEY_TYPE_alloc_v4, c,
383 backpointer_to_missing_alloc,
384 "backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
385 alloc_iter.pos.inode, alloc_iter.pos.offset,
386 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
387 ret = bch2_btree_delete_at(trans, bp_iter, 0);
392 bch2_trans_iter_exit(trans, &alloc_iter);
397 /* verify that every backpointer has a corresponding alloc key */
398 int bch2_check_btree_backpointers(struct bch_fs *c)
400 int ret = bch2_trans_run(c,
401 for_each_btree_key_commit(trans, iter,
402 BTREE_ID_backpointers, POS_MIN, 0, k,
403 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
404 bch2_check_btree_backpointer(trans, &iter, k)));
409 static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
411 return bpos_eq(l.k->p, r.k->p) &&
412 bkey_bytes(l.k) == bkey_bytes(r.k) &&
413 !memcmp(l.v, r.v, bkey_val_bytes(l.k));
416 struct extents_to_bp_state {
417 struct bpos bucket_start;
418 struct bpos bucket_end;
419 struct bkey_buf last_flushed;
422 static int drop_dev_and_update(struct btree_trans *trans, enum btree_id btree,
423 struct bkey_s_c extent, unsigned dev)
425 struct bkey_i *n = bch2_bkey_make_mut_noupdate(trans, extent);
426 int ret = PTR_ERR_OR_ZERO(n);
430 bch2_bkey_drop_device(bkey_i_to_s(n), dev);
431 return bch2_btree_insert_trans(trans, btree, n, 0);
434 static int check_extent_checksum(struct btree_trans *trans,
435 enum btree_id btree, struct bkey_s_c extent,
436 enum btree_id o_btree, struct bkey_s_c extent2, unsigned dev)
438 struct bch_fs *c = trans->c;
439 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(extent);
440 const union bch_extent_entry *entry;
441 struct extent_ptr_decoded p;
442 struct printbuf buf = PRINTBUF;
443 void *data_buf = NULL;
444 struct bio *bio = NULL;
448 if (bkey_is_btree_ptr(extent.k))
451 bkey_for_each_ptr_decode(extent.k, ptrs, p, entry)
452 if (p.ptr.dev == dev)
456 if (!p.crc.csum_type)
459 bytes = p.crc.compressed_size << 9;
461 struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
462 if (!bch2_dev_get_ioref(ca, READ))
465 data_buf = kvmalloc(bytes, GFP_KERNEL);
471 bio = bio_alloc(ca->disk_sb.bdev, 1, REQ_OP_READ, GFP_KERNEL);
472 bio->bi_iter.bi_sector = p.ptr.offset;
473 bch2_bio_map(bio, data_buf, bytes);
474 ret = submit_bio_wait(bio);
478 prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
479 prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree));
480 bch2_bkey_val_to_text(&buf, c, extent);
481 prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
482 bch2_bkey_val_to_text(&buf, c, extent2);
484 struct nonce nonce = extent_nonce(extent.k->version, p.crc);
485 struct bch_csum csum = bch2_checksum(c, p.crc.csum_type, nonce, data_buf, bytes);
486 if (fsck_err_on(bch2_crc_cmp(csum, p.crc.csum),
487 c, dup_backpointer_to_bad_csum_extent,
489 ret = drop_dev_and_update(trans, btree, extent, dev) ?: 1;
495 percpu_ref_put(&ca->io_ref);
500 static int check_bp_exists(struct btree_trans *trans,
501 struct extents_to_bp_state *s,
503 struct bch_backpointer bp,
504 struct bkey_s_c orig_k)
506 struct bch_fs *c = trans->c;
507 struct btree_iter bp_iter = {};
508 struct btree_iter other_extent_iter = {};
509 struct printbuf buf = PRINTBUF;
510 struct bkey_s_c bp_k;
514 bch2_bkey_buf_init(&tmp);
516 if (!bch2_dev_bucket_exists(c, bucket)) {
517 prt_str(&buf, "extent for nonexistent device:bucket ");
518 bch2_bpos_to_text(&buf, bucket);
519 prt_str(&buf, "\n ");
520 bch2_bkey_val_to_text(&buf, c, orig_k);
521 bch_err(c, "%s", buf.buf);
522 return -BCH_ERR_fsck_repair_unimplemented;
525 if (bpos_lt(bucket, s->bucket_start) ||
526 bpos_gt(bucket, s->bucket_end))
529 bp_k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
530 bucket_pos_to_bp(c, bucket, bp.bucket_offset),
532 ret = bkey_err(bp_k);
536 if (bp_k.k->type != KEY_TYPE_backpointer ||
537 memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
538 bch2_bkey_buf_reassemble(&tmp, c, orig_k);
540 if (!bkey_and_val_eq(orig_k, bkey_i_to_s_c(s->last_flushed.k))) {
542 bch2_trans_unlock(trans);
543 bch2_btree_interior_updates_flush(c);
546 ret = bch2_btree_write_buffer_flush_sync(trans);
550 bch2_bkey_buf_copy(&s->last_flushed, c, tmp.k);
551 ret = -BCH_ERR_transaction_restart_write_buffer_flush;
555 goto check_existing_bp;
560 bch2_trans_iter_exit(trans, &other_extent_iter);
561 bch2_trans_iter_exit(trans, &bp_iter);
562 bch2_bkey_buf_exit(&tmp, c);
566 /* Do we have a backpointer for a different extent? */
567 if (bp_k.k->type != KEY_TYPE_backpointer)
570 struct bch_backpointer other_bp = *bkey_s_c_to_backpointer(bp_k).v;
572 struct bkey_s_c other_extent =
573 bch2_backpointer_get_key(trans, &other_extent_iter, bp_k.k->p, other_bp, 0);
574 ret = bkey_err(other_extent);
575 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
583 if (bch2_extents_match(orig_k, other_extent)) {
584 printbuf_reset(&buf);
585 prt_printf(&buf, "duplicate versions of same extent, deleting smaller\n ");
586 bch2_bkey_val_to_text(&buf, c, orig_k);
587 prt_str(&buf, "\n ");
588 bch2_bkey_val_to_text(&buf, c, other_extent);
589 bch_err(c, "%s", buf.buf);
591 if (other_extent.k->size <= orig_k.k->size) {
592 ret = drop_dev_and_update(trans, other_bp.btree_id, other_extent, bucket.inode);
597 ret = drop_dev_and_update(trans, bp.btree_id, orig_k, bucket.inode);
604 ret = check_extent_checksum(trans, other_bp.btree_id, other_extent, bp.btree_id, orig_k, bucket.inode);
612 ret = check_extent_checksum(trans, bp.btree_id, orig_k, other_bp.btree_id, other_extent, bucket.inode);
620 printbuf_reset(&buf);
621 prt_printf(&buf, "duplicate extents pointing to same space on dev %llu\n ", bucket.inode);
622 bch2_bkey_val_to_text(&buf, c, orig_k);
623 prt_str(&buf, "\n ");
624 bch2_bkey_val_to_text(&buf, c, other_extent);
625 bch_err(c, "%s", buf.buf);
626 ret = -BCH_ERR_fsck_repair_unimplemented;
629 printbuf_reset(&buf);
630 prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
631 bch2_btree_id_str(bp.btree_id), bp.level);
632 bch2_bkey_val_to_text(&buf, c, orig_k);
633 prt_printf(&buf, "\n got: ");
634 bch2_bkey_val_to_text(&buf, c, bp_k);
636 struct bkey_i_backpointer n_bp_k;
637 bkey_backpointer_init(&n_bp_k.k_i);
638 n_bp_k.k.p = bucket_pos_to_bp(trans->c, bucket, bp.bucket_offset);
640 prt_printf(&buf, "\n want: ");
641 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&n_bp_k.k_i));
643 if (fsck_err(c, ptr_to_missing_backpointer, "%s", buf.buf))
644 ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
649 static int check_extent_to_backpointers(struct btree_trans *trans,
650 struct extents_to_bp_state *s,
651 enum btree_id btree, unsigned level,
654 struct bch_fs *c = trans->c;
655 struct bkey_ptrs_c ptrs;
656 const union bch_extent_entry *entry;
657 struct extent_ptr_decoded p;
660 ptrs = bch2_bkey_ptrs_c(k);
661 bkey_for_each_ptr_decode(k.k, ptrs, p, entry) {
662 struct bpos bucket_pos;
663 struct bch_backpointer bp;
668 bch2_extent_ptr_to_bp(c, btree, level, k, p, entry, &bucket_pos, &bp);
670 ret = check_bp_exists(trans, s, bucket_pos, bp, k);
678 static int check_btree_root_to_backpointers(struct btree_trans *trans,
679 struct extents_to_bp_state *s,
680 enum btree_id btree_id,
683 struct bch_fs *c = trans->c;
684 struct btree_iter iter;
689 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN,
690 0, bch2_btree_id_root(c, btree_id)->b->c.level, 0);
691 b = bch2_btree_iter_peek_node(&iter);
692 ret = PTR_ERR_OR_ZERO(b);
696 if (b != btree_node_root(c, b)) {
697 bch2_trans_iter_exit(trans, &iter);
703 k = bkey_i_to_s_c(&b->key);
704 ret = check_extent_to_backpointers(trans, s, btree_id, b->c.level + 1, k);
706 bch2_trans_iter_exit(trans, &iter);
710 static inline struct bbpos bp_to_bbpos(struct bch_backpointer bp)
712 return (struct bbpos) {
713 .btree = bp.btree_id,
718 static u64 mem_may_pin_bytes(struct bch_fs *c)
723 u64 mem_bytes = i.totalram * i.mem_unit;
724 return div_u64(mem_bytes * c->opts.fsck_memory_usage_percent, 100);
727 static size_t btree_nodes_fit_in_ram(struct bch_fs *c)
729 return div_u64(mem_may_pin_bytes(c), c->opts.btree_node_size);
732 static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
734 u64 btree_interior_mask,
735 struct bbpos start, struct bbpos *end)
737 struct bch_fs *c = trans->c;
738 s64 mem_may_pin = mem_may_pin_bytes(c);
741 btree_interior_mask |= btree_leaf_mask;
743 c->btree_cache.pinned_nodes_leaf_mask = btree_leaf_mask;
744 c->btree_cache.pinned_nodes_interior_mask = btree_interior_mask;
745 c->btree_cache.pinned_nodes_start = start;
746 c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX;
748 for (enum btree_id btree = start.btree;
749 btree < BTREE_ID_NR && !ret;
751 unsigned depth = ((1U << btree) & btree_leaf_mask) ? 0 : 1;
752 struct btree_iter iter;
755 if (!((1U << btree) & btree_leaf_mask) &&
756 !((1U << btree) & btree_interior_mask))
759 __for_each_btree_node(trans, iter, btree,
760 btree == start.btree ? start.pos : POS_MIN,
761 0, depth, BTREE_ITER_PREFETCH, b, ret) {
762 mem_may_pin -= btree_buf_bytes(b);
763 if (mem_may_pin <= 0) {
764 c->btree_cache.pinned_nodes_end = *end =
765 BBPOS(btree, b->key.k.p);
766 bch2_trans_iter_exit(trans, &iter);
770 bch2_trans_iter_exit(trans, &iter);
776 static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
777 struct extents_to_bp_state *s)
779 struct bch_fs *c = trans->c;
782 for (enum btree_id btree_id = 0;
783 btree_id < btree_id_nr_alive(c);
785 int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
787 ret = commit_do(trans, NULL, NULL,
788 BCH_TRANS_COMMIT_no_enospc,
789 check_btree_root_to_backpointers(trans, s, btree_id, &level));
793 while (level >= depth) {
794 struct btree_iter iter;
795 bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
797 BTREE_ITER_PREFETCH);
799 bch2_trans_begin(trans);
801 struct bkey_s_c k = bch2_btree_iter_peek(&iter);
805 check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
806 bch2_trans_commit(trans, NULL, NULL,
807 BCH_TRANS_COMMIT_no_enospc);
808 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
814 if (bpos_eq(iter.pos, SPOS_MAX))
816 bch2_btree_iter_advance(&iter);
818 bch2_trans_iter_exit(trans, &iter);
830 int bch2_check_extents_to_backpointers(struct bch_fs *c)
832 struct btree_trans *trans = bch2_trans_get(c);
833 struct extents_to_bp_state s = { .bucket_start = POS_MIN };
836 bch2_bkey_buf_init(&s.last_flushed);
837 bkey_init(&s.last_flushed.k->k);
841 ret = bch2_get_btree_in_memory_pos(trans,
842 BIT_ULL(BTREE_ID_backpointers),
843 BIT_ULL(BTREE_ID_backpointers),
844 BBPOS(BTREE_ID_backpointers, s.bucket_start), &end);
848 s.bucket_end = end.pos;
850 if ( bpos_eq(s.bucket_start, POS_MIN) &&
851 !bpos_eq(s.bucket_end, SPOS_MAX))
852 bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
853 __func__, btree_nodes_fit_in_ram(c));
855 if (!bpos_eq(s.bucket_start, POS_MIN) ||
856 !bpos_eq(s.bucket_end, SPOS_MAX)) {
857 struct printbuf buf = PRINTBUF;
859 prt_str(&buf, "check_extents_to_backpointers(): ");
860 bch2_bpos_to_text(&buf, s.bucket_start);
862 bch2_bpos_to_text(&buf, s.bucket_end);
864 bch_verbose(c, "%s", buf.buf);
868 ret = bch2_check_extents_to_backpointers_pass(trans, &s);
869 if (ret || bpos_eq(s.bucket_end, SPOS_MAX))
872 s.bucket_start = bpos_successor(s.bucket_end);
874 bch2_trans_put(trans);
875 bch2_bkey_buf_exit(&s.last_flushed, c);
877 c->btree_cache.pinned_nodes_leaf_mask = 0;
878 c->btree_cache.pinned_nodes_interior_mask = 0;
884 static int check_one_backpointer(struct btree_trans *trans,
887 struct bkey_s_c_backpointer bp,
888 struct bpos *last_flushed_pos)
890 struct bch_fs *c = trans->c;
891 struct btree_iter iter;
892 struct bbpos pos = bp_to_bbpos(*bp.v);
894 struct printbuf buf = PRINTBUF;
897 if (bbpos_cmp(pos, start) < 0 ||
898 bbpos_cmp(pos, end) > 0)
901 k = bch2_backpointer_get_key(trans, &iter, bp.k->p, *bp.v, 0);
903 if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node)
908 if (!k.k && !bpos_eq(*last_flushed_pos, bp.k->p)) {
909 *last_flushed_pos = bp.k->p;
910 ret = bch2_btree_write_buffer_flush_sync(trans) ?:
911 -BCH_ERR_transaction_restart_write_buffer_flush;
915 if (fsck_err_on(!k.k, c,
916 backpointer_to_missing_ptr,
917 "backpointer for missing %s\n %s",
918 bp.v->level ? "btree node" : "extent",
919 (bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
920 ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
925 bch2_trans_iter_exit(trans, &iter);
930 static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
934 struct bpos last_flushed_pos = SPOS_MAX;
936 return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
937 POS_MIN, BTREE_ITER_PREFETCH, k,
938 NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
939 check_one_backpointer(trans, start, end,
940 bkey_s_c_to_backpointer(k),
944 int bch2_check_backpointers_to_extents(struct bch_fs *c)
946 struct btree_trans *trans = bch2_trans_get(c);
947 struct bbpos start = (struct bbpos) { .btree = 0, .pos = POS_MIN, }, end;
951 ret = bch2_get_btree_in_memory_pos(trans,
952 (1U << BTREE_ID_extents)|
953 (1U << BTREE_ID_reflink),
959 if (!bbpos_cmp(start, BBPOS_MIN) &&
960 bbpos_cmp(end, BBPOS_MAX))
961 bch_verbose(c, "%s(): extents do not fit in ram, running in multiple passes with %zu nodes per pass",
962 __func__, btree_nodes_fit_in_ram(c));
964 if (bbpos_cmp(start, BBPOS_MIN) ||
965 bbpos_cmp(end, BBPOS_MAX)) {
966 struct printbuf buf = PRINTBUF;
968 prt_str(&buf, "check_backpointers_to_extents(): ");
969 bch2_bbpos_to_text(&buf, start);
971 bch2_bbpos_to_text(&buf, end);
973 bch_verbose(c, "%s", buf.buf);
977 ret = bch2_check_backpointers_to_extents_pass(trans, start, end);
978 if (ret || !bbpos_cmp(end, BBPOS_MAX))
981 start = bbpos_successor(end);
983 bch2_trans_put(trans);
985 c->btree_cache.pinned_nodes_leaf_mask = 0;
986 c->btree_cache.pinned_nodes_interior_mask = 0;