1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 2017-2023 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <djwong@kernel.org>
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_mount.h"
12 #include "xfs_btree.h"
13 #include "xfs_log_format.h"
14 #include "xfs_trans.h"
15 #include "xfs_inode.h"
16 #include "xfs_icache.h"
17 #include "xfs_alloc.h"
18 #include "xfs_alloc_btree.h"
19 #include "xfs_ialloc.h"
20 #include "xfs_ialloc_btree.h"
21 #include "xfs_refcount_btree.h"
23 #include "xfs_rmap_btree.h"
25 #include "xfs_trans_priv.h"
26 #include "xfs_da_format.h"
27 #include "xfs_da_btree.h"
28 #include "xfs_dir2_priv.h"
30 #include "xfs_reflink.h"
32 #include "xfs_error.h"
33 #include "xfs_quota.h"
34 #include "xfs_exchmaps.h"
35 #include "scrub/scrub.h"
36 #include "scrub/common.h"
37 #include "scrub/trace.h"
38 #include "scrub/repair.h"
39 #include "scrub/health.h"
41 /* Common code for the metadata scrubbers. */
44 * Handling operational errors.
46 * The *_process_error() family of functions are used to process error return
47 * codes from functions called as part of a scrub operation.
49 * If there's no error, we return true to tell the caller that it's ok
50 * to move on to the next check in its list.
52 * For non-verifier errors (e.g. ENOMEM) we return false to tell the
53 * caller that something bad happened, and we preserve *error so that
54 * the caller can return the *error up the stack to userspace.
56 * Verifier errors (EFSBADCRC/EFSCORRUPTED) are recorded by setting
57 * OFLAG_CORRUPT in sm_flags and the *error is cleared. In other words,
58 * we track verifier errors (and failed scrub checks) via OFLAG_CORRUPT,
59 * not via return codes. We return false to tell the caller that
60 * something bad happened. Since the error has been cleared, the caller
61 * will (presumably) return that zero and scrubbing will move on to
64 * ftrace can be used to record the precise metadata location and the
65 * approximate code location of the failed operation.
68 /* Check for operational errors. */
83 /* Used to restart an op with deadlock avoidance. */
84 trace_xchk_deadlock_retry(
85 sc->ip ? sc->ip : XFS_I(file_inode(sc->file)),
90 * ECANCELED here means that the caller set one of the scrub
91 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit
92 * quickly. Set error to zero and do not continue.
94 trace_xchk_op_error(sc, agno, bno, *error, ret_ip);
99 /* Note the badness but don't abort. */
100 sc->sm->sm_flags |= errflag;
104 trace_xchk_op_error(sc, agno, bno, *error, ret_ip);
112 struct xfs_scrub *sc,
117 return __xchk_process_error(sc, agno, bno, error,
118 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
122 xchk_xref_process_error(
123 struct xfs_scrub *sc,
128 return __xchk_process_error(sc, agno, bno, error,
129 XFS_SCRUB_OFLAG_XFAIL, __return_address);
132 /* Check for operational errors for a file offset. */
134 __xchk_fblock_process_error(
135 struct xfs_scrub *sc,
137 xfs_fileoff_t offset,
147 /* Used to restart an op with deadlock avoidance. */
148 trace_xchk_deadlock_retry(sc->ip, sc->sm, *error);
152 * ECANCELED here means that the caller set one of the scrub
153 * outcome flags (corrupt, xfail, xcorrupt) and wants to exit
154 * quickly. Set error to zero and do not continue.
156 trace_xchk_file_op_error(sc, whichfork, offset, *error,
162 /* Note the badness but don't abort. */
163 sc->sm->sm_flags |= errflag;
167 trace_xchk_file_op_error(sc, whichfork, offset, *error,
175 xchk_fblock_process_error(
176 struct xfs_scrub *sc,
178 xfs_fileoff_t offset,
181 return __xchk_fblock_process_error(sc, whichfork, offset, error,
182 XFS_SCRUB_OFLAG_CORRUPT, __return_address);
186 xchk_fblock_xref_process_error(
187 struct xfs_scrub *sc,
189 xfs_fileoff_t offset,
192 return __xchk_fblock_process_error(sc, whichfork, offset, error,
193 XFS_SCRUB_OFLAG_XFAIL, __return_address);
197 * Handling scrub corruption/optimization/warning checks.
199 * The *_set_{corrupt,preen,warning}() family of functions are used to
200 * record the presence of metadata that is incorrect (corrupt), could be
201 * optimized somehow (preen), or should be flagged for administrative
202 * review but is not incorrect (warn).
204 * ftrace can be used to record the precise metadata location and
205 * approximate code location of the failed check.
208 /* Record a block which could be optimized. */
210 xchk_block_set_preen(
211 struct xfs_scrub *sc,
214 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
215 trace_xchk_block_preen(sc, xfs_buf_daddr(bp), __return_address);
219 * Record an inode which could be optimized. The trace data will
220 * include the block given by bp if bp is given; otherwise it will use
221 * the block location of the inode record itself.
225 struct xfs_scrub *sc,
228 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_PREEN;
229 trace_xchk_ino_preen(sc, ino, __return_address);
232 /* Record something being wrong with the filesystem primary superblock. */
235 struct xfs_scrub *sc)
237 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
238 trace_xchk_fs_error(sc, 0, __return_address);
241 /* Record a corrupt block. */
243 xchk_block_set_corrupt(
244 struct xfs_scrub *sc,
247 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
248 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
251 #ifdef CONFIG_XFS_QUOTA
252 /* Record a corrupt quota counter. */
254 xchk_qcheck_set_corrupt(
255 struct xfs_scrub *sc,
259 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
260 trace_xchk_qcheck_error(sc, dqtype, id, __return_address);
264 /* Record a corruption while cross-referencing. */
266 xchk_block_xref_set_corrupt(
267 struct xfs_scrub *sc,
270 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
271 trace_xchk_block_error(sc, xfs_buf_daddr(bp), __return_address);
275 * Record a corrupt inode. The trace data will include the block given
276 * by bp if bp is given; otherwise it will use the block location of the
277 * inode record itself.
280 xchk_ino_set_corrupt(
281 struct xfs_scrub *sc,
284 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
285 trace_xchk_ino_error(sc, ino, __return_address);
288 /* Record a corruption while cross-referencing with an inode. */
290 xchk_ino_xref_set_corrupt(
291 struct xfs_scrub *sc,
294 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
295 trace_xchk_ino_error(sc, ino, __return_address);
298 /* Record corruption in a block indexed by a file fork. */
300 xchk_fblock_set_corrupt(
301 struct xfs_scrub *sc,
303 xfs_fileoff_t offset)
305 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
306 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
309 /* Record a corruption while cross-referencing a fork block. */
311 xchk_fblock_xref_set_corrupt(
312 struct xfs_scrub *sc,
314 xfs_fileoff_t offset)
316 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XCORRUPT;
317 trace_xchk_fblock_error(sc, whichfork, offset, __return_address);
321 * Warn about inodes that need administrative review but is not
325 xchk_ino_set_warning(
326 struct xfs_scrub *sc,
329 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
330 trace_xchk_ino_warning(sc, ino, __return_address);
333 /* Warn about a block indexed by a file fork that needs review. */
335 xchk_fblock_set_warning(
336 struct xfs_scrub *sc,
338 xfs_fileoff_t offset)
340 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_WARNING;
341 trace_xchk_fblock_warning(sc, whichfork, offset, __return_address);
344 /* Signal an incomplete scrub. */
347 struct xfs_scrub *sc)
349 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_INCOMPLETE;
350 trace_xchk_incomplete(sc, __return_address);
354 * rmap scrubbing -- compute the number of blocks with a given owner,
355 * at least according to the reverse mapping data.
358 struct xchk_rmap_ownedby_info {
359 const struct xfs_owner_info *oinfo;
360 xfs_filblks_t *blocks;
364 xchk_count_rmap_ownedby_irec(
365 struct xfs_btree_cur *cur,
366 const struct xfs_rmap_irec *rec,
369 struct xchk_rmap_ownedby_info *sroi = priv;
373 irec_attr = rec->rm_flags & XFS_RMAP_ATTR_FORK;
374 oinfo_attr = sroi->oinfo->oi_flags & XFS_OWNER_INFO_ATTR_FORK;
376 if (rec->rm_owner != sroi->oinfo->oi_owner)
379 if (XFS_RMAP_NON_INODE_OWNER(rec->rm_owner) || irec_attr == oinfo_attr)
380 (*sroi->blocks) += rec->rm_blockcount;
386 * Calculate the number of blocks the rmap thinks are owned by something.
387 * The caller should pass us an rmapbt cursor.
390 xchk_count_rmap_ownedby_ag(
391 struct xfs_scrub *sc,
392 struct xfs_btree_cur *cur,
393 const struct xfs_owner_info *oinfo,
394 xfs_filblks_t *blocks)
396 struct xchk_rmap_ownedby_info sroi = {
402 return xfs_rmap_query_all(cur, xchk_count_rmap_ownedby_irec,
409 * These helpers facilitate locking an allocation group's header
410 * buffers, setting up cursors for all btrees that are present, and
411 * cleaning everything up once we're through.
414 /* Decide if we want to return an AG header read failure. */
416 want_ag_read_header_failure(
417 struct xfs_scrub *sc,
420 /* Return all AG header read failures when scanning btrees. */
421 if (sc->sm->sm_type != XFS_SCRUB_TYPE_AGF &&
422 sc->sm->sm_type != XFS_SCRUB_TYPE_AGFL &&
423 sc->sm->sm_type != XFS_SCRUB_TYPE_AGI)
426 * If we're scanning a given type of AG header, we only want to
427 * see read failures from that specific header. We'd like the
428 * other headers to cross-check them, but this isn't required.
430 if (sc->sm->sm_type == type)
436 * Grab the AG header buffers for the attached perag structure.
438 * The headers should be released by xchk_ag_free, but as a fail safe we attach
439 * all the buffers we grab to the scrub transaction so they'll all be freed
443 xchk_perag_read_headers(
444 struct xfs_scrub *sc,
449 error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp);
450 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
453 error = xfs_alloc_read_agf(sa->pag, sc->tp, 0, &sa->agf_bp);
454 if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGF))
461 * Grab the AG headers for the attached perag structure and wait for pending
465 xchk_perag_drain_and_lock(
466 struct xfs_scrub *sc)
468 struct xchk_ag *sa = &sc->sa;
471 ASSERT(sa->pag != NULL);
472 ASSERT(sa->agi_bp == NULL);
473 ASSERT(sa->agf_bp == NULL);
476 if (xchk_should_terminate(sc, &error))
479 error = xchk_perag_read_headers(sc, sa);
484 * If we've grabbed an inode for scrubbing then we assume that
485 * holding its ILOCK will suffice to coordinate with any intent
486 * chains involving this inode.
492 * Decide if this AG is quiet enough for all metadata to be
493 * consistent with each other. XFS allows the AG header buffer
494 * locks to cycle across transaction rolls while processing
495 * chains of deferred ops, which means that there could be
496 * other threads in the middle of processing a chain of
497 * deferred ops. For regular operations we are careful about
498 * ordering operations to prevent collisions between threads
499 * (which is why we don't need a per-AG lock), but scrub and
500 * repair have to serialize against chained operations.
502 * We just locked all the AG headers buffers; now take a look
503 * to see if there are any intents in progress. If there are,
504 * drop the AG headers and wait for the intents to drain.
505 * Since we hold all the AG header locks for the duration of
506 * the scrub, this is the only time we have to sample the
507 * intents counter; any threads increasing it after this point
508 * can't possibly be in the middle of a chain of AG metadata
511 * Obviously, this should be slanted against scrub and in favor
512 * of runtime threads.
514 if (!xfs_perag_intent_busy(sa->pag))
518 xfs_trans_brelse(sc->tp, sa->agf_bp);
523 xfs_trans_brelse(sc->tp, sa->agi_bp);
527 if (!(sc->flags & XCHK_FSGATES_DRAIN))
529 error = xfs_perag_intent_drain(sa->pag);
530 if (error == -ERESTARTSYS)
538 * Grab the per-AG structure, grab all AG header buffers, and wait until there
539 * aren't any pending intents. Returns -ENOENT if we can't grab the perag
543 xchk_ag_read_headers(
544 struct xfs_scrub *sc,
548 struct xfs_mount *mp = sc->mp;
551 sa->pag = xfs_perag_get(mp, agno);
555 return xchk_perag_drain_and_lock(sc);
558 /* Release all the AG btree cursors. */
564 xfs_btree_del_cursor(sa->refc_cur, XFS_BTREE_ERROR);
566 xfs_btree_del_cursor(sa->rmap_cur, XFS_BTREE_ERROR);
568 xfs_btree_del_cursor(sa->fino_cur, XFS_BTREE_ERROR);
570 xfs_btree_del_cursor(sa->ino_cur, XFS_BTREE_ERROR);
572 xfs_btree_del_cursor(sa->cnt_cur, XFS_BTREE_ERROR);
574 xfs_btree_del_cursor(sa->bno_cur, XFS_BTREE_ERROR);
584 /* Initialize all the btree cursors for an AG. */
587 struct xfs_scrub *sc,
590 struct xfs_mount *mp = sc->mp;
593 /* Set up a bnobt cursor for cross-referencing. */
594 sa->bno_cur = xfs_bnobt_init_cursor(mp, sc->tp, sa->agf_bp,
596 xchk_ag_btree_del_cursor_if_sick(sc, &sa->bno_cur,
597 XFS_SCRUB_TYPE_BNOBT);
599 /* Set up a cntbt cursor for cross-referencing. */
600 sa->cnt_cur = xfs_cntbt_init_cursor(mp, sc->tp, sa->agf_bp,
602 xchk_ag_btree_del_cursor_if_sick(sc, &sa->cnt_cur,
603 XFS_SCRUB_TYPE_CNTBT);
605 /* Set up a rmapbt cursor for cross-referencing. */
606 if (xfs_has_rmapbt(mp)) {
607 sa->rmap_cur = xfs_rmapbt_init_cursor(mp, sc->tp,
608 sa->agf_bp, sa->pag);
609 xchk_ag_btree_del_cursor_if_sick(sc, &sa->rmap_cur,
610 XFS_SCRUB_TYPE_RMAPBT);
613 /* Set up a refcountbt cursor for cross-referencing. */
614 if (xfs_has_reflink(mp)) {
615 sa->refc_cur = xfs_refcountbt_init_cursor(mp, sc->tp,
616 sa->agf_bp, sa->pag);
617 xchk_ag_btree_del_cursor_if_sick(sc, &sa->refc_cur,
618 XFS_SCRUB_TYPE_REFCNTBT);
623 /* Set up a inobt cursor for cross-referencing. */
624 sa->ino_cur = xfs_inobt_init_cursor(sa->pag, sc->tp,
626 xchk_ag_btree_del_cursor_if_sick(sc, &sa->ino_cur,
627 XFS_SCRUB_TYPE_INOBT);
629 /* Set up a finobt cursor for cross-referencing. */
630 if (xfs_has_finobt(mp)) {
631 sa->fino_cur = xfs_finobt_init_cursor(sa->pag, sc->tp,
633 xchk_ag_btree_del_cursor_if_sick(sc, &sa->fino_cur,
634 XFS_SCRUB_TYPE_FINOBT);
639 /* Release the AG header context and btree cursors. */
642 struct xfs_scrub *sc,
645 xchk_ag_btcur_free(sa);
646 xrep_reset_perag_resv(sc);
648 xfs_trans_brelse(sc->tp, sa->agf_bp);
652 xfs_trans_brelse(sc->tp, sa->agi_bp);
656 xfs_perag_put(sa->pag);
662 * For scrub, grab the perag structure, the AGI, and the AGF headers, in that
663 * order. Locking order requires us to get the AGI before the AGF. We use the
664 * transaction to avoid deadlocking on crosslinked metadata buffers; either the
665 * caller passes one in (bmap scrub) or we have to create a transaction
666 * ourselves. Returns ENOENT if the perag struct cannot be grabbed.
670 struct xfs_scrub *sc,
676 error = xchk_ag_read_headers(sc, agno, sa);
680 xchk_ag_btcur_init(sc, sa);
684 /* Per-scrubber setup functions */
688 struct xfs_scrub *sc)
690 xfs_trans_cancel(sc->tp);
695 xchk_trans_alloc_empty(
696 struct xfs_scrub *sc)
698 return xfs_trans_alloc_empty(sc->mp, &sc->tp);
702 * Grab an empty transaction so that we can re-grab locked buffers if
703 * one of our btrees turns out to be cyclic.
705 * If we're going to repair something, we need to ask for the largest possible
706 * log reservation so that we can handle the worst case scenario for metadata
707 * updates while rebuilding a metadata item. We also need to reserve as many
708 * blocks in the head transaction as we think we're going to need to rebuild
709 * the metadata object.
713 struct xfs_scrub *sc,
716 if (sc->sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)
717 return xfs_trans_alloc(sc->mp, &M_RES(sc->mp)->tr_itruncate,
718 resblks, 0, 0, &sc->tp);
720 return xchk_trans_alloc_empty(sc);
723 /* Set us up with a transaction and an empty context. */
726 struct xfs_scrub *sc)
730 resblks = xrep_calc_ag_resblks(sc);
731 return xchk_trans_alloc(sc, resblks);
734 /* Set us up with AG headers and btree cursors. */
737 struct xfs_scrub *sc,
740 struct xfs_mount *mp = sc->mp;
744 * If the caller asks us to checkpont the log, do so. This
745 * expensive operation should be performed infrequently and only
746 * as a last resort. Any caller that sets force_log should
747 * document why they need to do so.
750 error = xchk_checkpoint_log(mp);
755 error = xchk_setup_fs(sc);
759 return xchk_ag_init(sc, sc->sm->sm_agno, &sc->sa);
762 /* Push everything out of the log onto disk. */
765 struct xfs_mount *mp)
769 error = xfs_log_force(mp, XFS_LOG_SYNC);
772 xfs_ail_push_all_sync(mp->m_ail);
776 /* Verify that an inode is allocated ondisk, then return its cached inode. */
779 struct xfs_scrub *sc,
781 struct xfs_inode **ipp)
783 ASSERT(sc->tp != NULL);
785 return xfs_iget(sc->mp, sc->tp, inum, XFS_IGET_UNTRUSTED, 0, ipp);
789 * Try to grab an inode in a manner that avoids races with physical inode
790 * allocation. If we can't, return the locked AGI buffer so that the caller
791 * can single-step the loading process to see where things went wrong.
792 * Callers must have a valid scrub transaction.
794 * If the iget succeeds, return 0, a NULL AGI, and the inode.
796 * If the iget fails, return the error, the locked AGI, and a NULL inode. This
797 * can include -EINVAL and -ENOENT for invalid inode numbers or inodes that are
798 * no longer allocated; or any other corruption or runtime error.
800 * If the AGI read fails, return the error, a NULL AGI, and NULL inode.
802 * If a fatal signal is pending, return -EINTR, a NULL AGI, and a NULL inode.
806 struct xfs_scrub *sc,
808 struct xfs_buf **agi_bpp,
809 struct xfs_inode **ipp)
811 struct xfs_mount *mp = sc->mp;
812 struct xfs_trans *tp = sc->tp;
813 struct xfs_perag *pag;
816 ASSERT(sc->tp != NULL);
823 if (xchk_should_terminate(sc, &error))
827 * Attach the AGI buffer to the scrub transaction to avoid deadlocks
828 * in the iget cache miss path.
830 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
831 error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp);
836 error = xfs_iget(mp, tp, inum,
837 XFS_IGET_NORETRY | XFS_IGET_UNTRUSTED, 0, ipp);
838 if (error == -EAGAIN) {
840 * The inode may be in core but temporarily unavailable and may
841 * require the AGI buffer before it can be returned. Drop the
842 * AGI buffer and retry the lookup.
844 * Incore lookup will fail with EAGAIN on a cache hit if the
845 * inode is queued to the inactivation list. The inactivation
846 * worker may remove the inode from the unlinked list and hence
849 * Hence xchk_iget_agi() needs to drop the AGI lock on EAGAIN
850 * to allow inodegc to make progress and move the inode to
851 * IRECLAIMABLE state where xfs_iget will be able to return it
852 * again if it can lock the inode.
854 xfs_trans_brelse(tp, *agi_bpp);
861 /* We got the inode, so we can release the AGI. */
862 ASSERT(*ipp != NULL);
863 xfs_trans_brelse(tp, *agi_bpp);
868 #ifdef CONFIG_XFS_QUOTA
870 * Try to attach dquots to this inode if we think we might want to repair it.
871 * Callers must not hold any ILOCKs. If the dquots are broken and cannot be
872 * attached, a quotacheck will be scheduled.
876 struct xfs_scrub *sc)
878 ASSERT(sc->tp != NULL);
879 ASSERT(sc->ip != NULL);
881 if (!xchk_could_repair(sc))
884 return xrep_ino_dqattach(sc);
888 /* Install an inode that we opened by handle for scrubbing. */
890 xchk_install_handle_inode(
891 struct xfs_scrub *sc,
892 struct xfs_inode *ip)
894 if (VFS_I(ip)->i_generation != sc->sm->sm_gen) {
904 * Install an already-referenced inode for scrubbing. Get our own reference to
905 * the inode to make disposal simpler. The inode must not be in I_FREEING or
909 xchk_install_live_inode(
910 struct xfs_scrub *sc,
911 struct xfs_inode *ip)
913 if (!igrab(VFS_I(ip))) {
914 xchk_ino_set_corrupt(sc, ip->i_ino);
915 return -EFSCORRUPTED;
923 * In preparation to scrub metadata structures that hang off of an inode,
924 * grab either the inode referenced in the scrub control structure or the
925 * inode passed in. If the inumber does not reference an allocated inode
926 * record, the function returns ENOENT to end the scrub early. The inode
930 xchk_iget_for_scrubbing(
931 struct xfs_scrub *sc)
933 struct xfs_imap imap;
934 struct xfs_mount *mp = sc->mp;
935 struct xfs_perag *pag;
936 struct xfs_buf *agi_bp;
937 struct xfs_inode *ip_in = XFS_I(file_inode(sc->file));
938 struct xfs_inode *ip = NULL;
939 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, sc->sm->sm_ino);
942 ASSERT(sc->tp == NULL);
944 /* We want to scan the inode we already had opened. */
945 if (sc->sm->sm_ino == 0 || sc->sm->sm_ino == ip_in->i_ino)
946 return xchk_install_live_inode(sc, ip_in);
948 /* Reject internal metadata files and obviously bad inode numbers. */
949 if (xfs_internal_inum(mp, sc->sm->sm_ino))
951 if (!xfs_verify_ino(sc->mp, sc->sm->sm_ino))
954 /* Try a safe untrusted iget. */
955 error = xchk_iget_safe(sc, sc->sm->sm_ino, &ip);
957 return xchk_install_handle_inode(sc, ip);
958 if (error == -ENOENT)
960 if (error != -EINVAL)
964 * EINVAL with IGET_UNTRUSTED probably means one of several things:
965 * userspace gave us an inode number that doesn't correspond to fs
966 * space; the inode btree lacks a record for this inode; or there is a
967 * record, and it says this inode is free.
969 * We want to look up this inode in the inobt to distinguish two
970 * scenarios: (1) the inobt says the inode is free, in which case
971 * there's nothing to do; and (2) the inobt says the inode is
972 * allocated, but loading it failed due to corruption.
974 * Allocate a transaction and grab the AGI to prevent inobt activity
975 * in this AG. Retry the iget in case someone allocated a new inode
976 * after the first iget failed.
978 error = xchk_trans_alloc(sc, 0);
982 error = xchk_iget_agi(sc, sc->sm->sm_ino, &agi_bp, &ip);
984 /* Actually got the inode, so install it. */
985 xchk_trans_cancel(sc);
986 return xchk_install_handle_inode(sc, ip);
988 if (error == -ENOENT)
990 if (error != -EINVAL)
993 /* Ensure that we have protected against inode allocation/freeing. */
994 if (agi_bp == NULL) {
995 ASSERT(agi_bp != NULL);
1001 * Untrusted iget failed a second time. Let's try an inobt lookup.
1002 * If the inobt thinks this the inode neither can exist inside the
1003 * filesystem nor is allocated, return ENOENT to signal that the check
1006 * If the lookup returns corruption, we'll mark this inode corrupt and
1007 * exit to userspace. There's little chance of fixing anything until
1008 * the inobt is straightened out, but there's nothing we can do here.
1010 * If the lookup encounters any other error, exit to userspace.
1012 * If the lookup succeeds, something else must be very wrong in the fs
1013 * such that setting up the incore inode failed in some strange way.
1014 * Treat those as corruptions.
1016 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sc->sm->sm_ino));
1018 error = -EFSCORRUPTED;
1022 error = xfs_imap(pag, sc->tp, sc->sm->sm_ino, &imap,
1023 XFS_IGET_UNTRUSTED);
1025 if (error == -EINVAL || error == -ENOENT)
1028 error = -EFSCORRUPTED;
1031 xchk_trans_cancel(sc);
1033 trace_xchk_op_error(sc, agno, XFS_INO_TO_AGBNO(mp, sc->sm->sm_ino),
1034 error, __return_address);
1037 /* The file is gone, so there's nothing to check. */
1038 xchk_trans_cancel(sc);
1042 /* Release an inode, possibly dropping it in the process. */
1045 struct xfs_scrub *sc,
1046 struct xfs_inode *ip)
1050 * If we are in a transaction, we /cannot/ drop the inode
1051 * ourselves, because the VFS will trigger writeback, which
1052 * can require a transaction. Clear DONTCACHE to force the
1053 * inode to the LRU, where someone else can take care of
1056 * Note that when we grabbed our reference to the inode, it
1057 * could have had an active ref and DONTCACHE set if a sysadmin
1058 * is trying to coerce a change in file access mode. icache
1059 * hits do not clear DONTCACHE, so we must do it here.
1061 spin_lock(&VFS_I(ip)->i_lock);
1062 VFS_I(ip)->i_state &= ~I_DONTCACHE;
1063 spin_unlock(&VFS_I(ip)->i_lock);
1064 } else if (atomic_read(&VFS_I(ip)->i_count) == 1) {
1066 * If this is the last reference to the inode and the caller
1067 * permits it, set DONTCACHE to avoid thrashing.
1069 d_mark_dontcache(VFS_I(ip));
1076 * Set us up to scrub metadata mapped by a file's fork. Callers must not use
1077 * this to operate on user-accessible regular file data because the MMAPLOCK is
1081 xchk_setup_inode_contents(
1082 struct xfs_scrub *sc,
1083 unsigned int resblks)
1087 error = xchk_iget_for_scrubbing(sc);
1091 /* Lock the inode so the VFS cannot touch this file. */
1092 xchk_ilock(sc, XFS_IOLOCK_EXCL);
1094 error = xchk_trans_alloc(sc, resblks);
1098 error = xchk_ino_dqattach(sc);
1102 xchk_ilock(sc, XFS_ILOCK_EXCL);
1104 /* scrub teardown will unlock and release the inode for us */
1110 struct xfs_scrub *sc,
1111 unsigned int ilock_flags)
1113 xfs_ilock(sc->ip, ilock_flags);
1114 sc->ilock_flags |= ilock_flags;
1119 struct xfs_scrub *sc,
1120 unsigned int ilock_flags)
1122 if (xfs_ilock_nowait(sc->ip, ilock_flags)) {
1123 sc->ilock_flags |= ilock_flags;
1132 struct xfs_scrub *sc,
1133 unsigned int ilock_flags)
1135 sc->ilock_flags &= ~ilock_flags;
1136 xfs_iunlock(sc->ip, ilock_flags);
1140 * Predicate that decides if we need to evaluate the cross-reference check.
1141 * If there was an error accessing the cross-reference btree, just delete
1142 * the cursor and skip the check.
1145 xchk_should_check_xref(
1146 struct xfs_scrub *sc,
1148 struct xfs_btree_cur **curpp)
1150 /* No point in xref if we already know we're corrupt. */
1151 if (xchk_skip_xref(sc->sm))
1158 /* If we've already given up on xref, just bail out. */
1162 /* xref error, delete cursor and bail out. */
1163 xfs_btree_del_cursor(*curpp, XFS_BTREE_ERROR);
1167 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_XFAIL;
1168 trace_xchk_xref_error(sc, *error, __return_address);
1171 * Errors encountered during cross-referencing with another
1172 * data structure should not cause this scrubber to abort.
1178 /* Run the structure verifiers on in-memory buffers to detect bad memory. */
1180 xchk_buffer_recheck(
1181 struct xfs_scrub *sc,
1186 if (bp->b_ops == NULL) {
1187 xchk_block_set_corrupt(sc, bp);
1190 if (bp->b_ops->verify_struct == NULL) {
1191 xchk_set_incomplete(sc);
1194 fa = bp->b_ops->verify_struct(bp);
1197 sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT;
1198 trace_xchk_block_error(sc, xfs_buf_daddr(bp), fa);
1202 xchk_metadata_inode_subtype(
1203 struct xfs_scrub *sc,
1204 unsigned int scrub_type)
1206 struct xfs_scrub_subord *sub;
1209 sub = xchk_scrub_create_subord(sc, scrub_type);
1210 error = sub->sc.ops->scrub(&sub->sc);
1211 xchk_scrub_free_subord(sub);
1216 * Scrub the attr/data forks of a metadata inode. The metadata inode must be
1217 * pointed to by sc->ip and the ILOCK must be held.
1220 xchk_metadata_inode_forks(
1221 struct xfs_scrub *sc)
1226 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
1229 /* Check the inode record. */
1230 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_INODE);
1231 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
1234 /* Metadata inodes don't live on the rt device. */
1235 if (sc->ip->i_diflags & XFS_DIFLAG_REALTIME) {
1236 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1240 /* They should never participate in reflink. */
1241 if (xfs_is_reflink_inode(sc->ip)) {
1242 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1246 /* They also should never have extended attributes. */
1247 if (xfs_inode_hasattr(sc->ip)) {
1248 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1252 /* Invoke the data fork scrubber. */
1253 error = xchk_metadata_inode_subtype(sc, XFS_SCRUB_TYPE_BMBTD);
1254 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
1257 /* Look for incorrect shared blocks. */
1258 if (xfs_has_reflink(sc->mp)) {
1259 error = xfs_reflink_inode_has_shared_extents(sc->tp, sc->ip,
1261 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0,
1265 xchk_ino_set_corrupt(sc, sc->ip->i_ino);
1272 * Enable filesystem hooks (i.e. runtime code patching) before starting a scrub
1273 * operation. Callers must not hold any locks that intersect with the CPU
1274 * hotplug lock (e.g. writeback locks) because code patching must halt the CPUs
1275 * to change kernel code.
1278 xchk_fsgates_enable(
1279 struct xfs_scrub *sc,
1280 unsigned int scrub_fsgates)
1282 ASSERT(!(scrub_fsgates & ~XCHK_FSGATES_ALL));
1283 ASSERT(!(sc->flags & scrub_fsgates));
1285 trace_xchk_fsgates_enable(sc, scrub_fsgates);
1287 if (scrub_fsgates & XCHK_FSGATES_DRAIN)
1288 xfs_drain_wait_enable();
1290 if (scrub_fsgates & XCHK_FSGATES_QUOTA)
1291 xfs_dqtrx_hook_enable();
1293 if (scrub_fsgates & XCHK_FSGATES_DIRENTS)
1294 xfs_dir_hook_enable();
1296 if (scrub_fsgates & XCHK_FSGATES_RMAP)
1297 xfs_rmap_hook_enable();
1299 sc->flags |= scrub_fsgates;
1303 * Decide if this is this a cached inode that's also allocated. The caller
1304 * must hold a reference to an AG and the AGI buffer lock to prevent inodes
1305 * from being allocated or freed.
1307 * Look up an inode by number in the given file system. If the inode number
1308 * is invalid, return -EINVAL. If the inode is not in cache, return -ENODATA.
1309 * If the inode is being reclaimed, return -ENODATA because we know the inode
1310 * cache cannot be updating the ondisk metadata.
1312 * Otherwise, the incore inode is the one we want, and it is either live,
1313 * somewhere in the inactivation machinery, or reclaimable. The inode is
1314 * allocated if i_mode is nonzero. In all three cases, the cached inode will
1315 * be more up to date than the ondisk inode buffer, so we must use the incore
1319 xchk_inode_is_allocated(
1320 struct xfs_scrub *sc,
1324 struct xfs_mount *mp = sc->mp;
1325 struct xfs_perag *pag = sc->sa.pag;
1327 struct xfs_inode *ip;
1330 /* caller must hold perag reference */
1332 ASSERT(pag != NULL);
1336 /* caller must have AGI buffer */
1337 if (sc->sa.agi_bp == NULL) {
1338 ASSERT(sc->sa.agi_bp != NULL);
1342 /* reject inode numbers outside existing AGs */
1343 ino = XFS_AGINO_TO_INO(sc->mp, pag->pag_agno, agino);
1344 if (!xfs_verify_ino(mp, ino))
1349 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
1356 * If the inode number doesn't match, the incore inode got reused
1357 * during an RCU grace period and the radix tree hasn't been updated.
1358 * This isn't the inode we want.
1360 spin_lock(&ip->i_flags_lock);
1361 if (ip->i_ino != ino)
1364 trace_xchk_inode_is_allocated(ip);
1367 * We have an incore inode that matches the inode we want, and the
1368 * caller holds the perag structure and the AGI buffer. Let's check
1369 * our assumptions below:
1374 * (1) If the incore inode is live (i.e. referenced from the dcache),
1375 * it will not be INEW, nor will it be in the inactivation or reclaim
1376 * machinery. The ondisk inode had better be allocated. This is the
1377 * most trivial case.
1379 if (!(ip->i_flags & (XFS_NEED_INACTIVE | XFS_INEW | XFS_IRECLAIMABLE |
1380 XFS_INACTIVATING))) {
1382 ASSERT(VFS_I(ip)->i_mode != 0);
1386 * If the incore inode is INEW, there are several possibilities:
1388 * (2) For a file that is being created, note that we allocate the
1389 * ondisk inode before allocating, initializing, and adding the incore
1390 * inode to the radix tree.
1392 * (3) If the incore inode is being recycled, the inode has to be
1393 * allocated because we don't allow freed inodes to be recycled.
1394 * Recycling doesn't touch i_mode.
1396 if (ip->i_flags & XFS_INEW) {
1397 /* created on disk already or recycling */
1398 ASSERT(VFS_I(ip)->i_mode != 0);
1402 * (4) If the inode is queued for inactivation (NEED_INACTIVE) but
1403 * inactivation has not started (!INACTIVATING), it is still allocated.
1405 if ((ip->i_flags & XFS_NEED_INACTIVE) &&
1406 !(ip->i_flags & XFS_INACTIVATING)) {
1407 /* definitely before difree */
1408 ASSERT(VFS_I(ip)->i_mode != 0);
1413 * If the incore inode is undergoing inactivation (INACTIVATING), there
1414 * are two possibilities:
1416 * (5) It is before the point where it would get freed ondisk, in which
1417 * case i_mode is still nonzero.
1419 * (6) It has already been freed, in which case i_mode is zero.
1421 * We don't take the ILOCK here, but difree and dialloc update the AGI,
1422 * and we've taken the AGI buffer lock, which prevents that from
1427 * (7) Inodes undergoing inactivation (INACTIVATING) or queued for
1428 * reclaim (IRECLAIMABLE) could be allocated or free. i_mode still
1429 * reflects the ondisk state.
1433 * (8) If the inode is in IFLUSHING, it's safe to query i_mode because
1434 * the flush code uses i_mode to format the ondisk inode.
1438 * (9) If the inode is in IRECLAIM and was reachable via the radix
1439 * tree, it still has the same i_mode as it did before it entered
1440 * reclaim. The inode object is still alive because we hold the RCU
1444 *inuse = VFS_I(ip)->i_mode != 0;
1448 spin_unlock(&ip->i_flags_lock);