2 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_fork.h"
34 #include "xfs_alloc.h"
35 #include "xfs_rtalloc.h"
37 #include "xfs_bmap_util.h"
38 #include "xfs_bmap_btree.h"
40 #include "xfs_rmap_btree.h"
41 #include "xfs_refcount.h"
42 #include "scrub/xfs_scrub.h"
43 #include "scrub/scrub.h"
44 #include "scrub/common.h"
45 #include "scrub/btree.h"
46 #include "scrub/trace.h"
48 /* Set us up with an inode's bmap. */
50 xfs_scrub_setup_inode_bmap(
51 struct xfs_scrub_context *sc,
54 struct xfs_mount *mp = sc->mp;
57 error = xfs_scrub_get_inode(sc, ip);
61 sc->ilock_flags = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL;
62 xfs_ilock(sc->ip, sc->ilock_flags);
65 * We don't want any ephemeral data fork updates sitting around
66 * while we inspect block mappings, so wait for directio to finish
67 * and flush dirty data if we have delalloc reservations.
69 if (S_ISREG(VFS_I(sc->ip)->i_mode) &&
70 sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) {
71 inode_dio_wait(VFS_I(sc->ip));
72 error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping);
77 /* Got the inode, lock it and we're ready to go. */
78 error = xfs_scrub_trans_alloc(sc->sm, mp, &sc->tp);
81 sc->ilock_flags |= XFS_ILOCK_EXCL;
82 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
85 /* scrub teardown will unlock and release the inode */
90 * Inode fork block mapping (BMBT) scrubber.
91 * More complex than the others because we have to scrub
92 * all the extents regardless of whether or not the fork
96 struct xfs_scrub_bmap_info {
97 struct xfs_scrub_context *sc;
98 xfs_fileoff_t lastoff;
104 /* Look for a corresponding rmap for this irec. */
106 xfs_scrub_bmap_get_rmap(
107 struct xfs_scrub_bmap_info *info,
108 struct xfs_bmbt_irec *irec,
111 struct xfs_rmap_irec *rmap)
113 xfs_fileoff_t offset;
114 unsigned int rflags = 0;
118 if (info->whichfork == XFS_ATTR_FORK)
119 rflags |= XFS_RMAP_ATTR_FORK;
122 * CoW staging extents are owned (on disk) by the refcountbt, so
123 * their rmaps do not have offsets.
125 if (info->whichfork == XFS_COW_FORK)
128 offset = irec->br_startoff;
131 * If the caller thinks this could be a shared bmbt extent (IOWs,
132 * any data fork extent of a reflink inode) then we have to use the
133 * range rmap lookup to make sure we get the correct owner/offset.
135 if (info->is_shared) {
136 error = xfs_rmap_lookup_le_range(info->sc->sa.rmap_cur, agbno,
137 owner, offset, rflags, rmap, &has_rmap);
138 if (!xfs_scrub_should_check_xref(info->sc, &error,
139 &info->sc->sa.rmap_cur))
145 * Otherwise, use the (faster) regular lookup.
147 error = xfs_rmap_lookup_le(info->sc->sa.rmap_cur, agbno, 0, owner,
148 offset, rflags, &has_rmap);
149 if (!xfs_scrub_should_check_xref(info->sc, &error,
150 &info->sc->sa.rmap_cur))
155 error = xfs_rmap_get_rec(info->sc->sa.rmap_cur, rmap, &has_rmap);
156 if (!xfs_scrub_should_check_xref(info->sc, &error,
157 &info->sc->sa.rmap_cur))
162 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
167 /* Make sure that we have rmapbt records for this extent. */
169 xfs_scrub_bmap_xref_rmap(
170 struct xfs_scrub_bmap_info *info,
171 struct xfs_bmbt_irec *irec,
174 struct xfs_rmap_irec rmap;
175 unsigned long long rmap_end;
178 if (!info->sc->sa.rmap_cur)
181 if (info->whichfork == XFS_COW_FORK)
182 owner = XFS_RMAP_OWN_COW;
184 owner = info->sc->ip->i_ino;
186 /* Find the rmap record for this irec. */
187 if (!xfs_scrub_bmap_get_rmap(info, irec, agbno, owner, &rmap))
190 /* Check the rmap. */
191 rmap_end = (unsigned long long)rmap.rm_startblock + rmap.rm_blockcount;
192 if (rmap.rm_startblock > agbno ||
193 agbno + irec->br_blockcount > rmap_end)
194 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
198 * Check the logical offsets if applicable. CoW staging extents
199 * don't track logical offsets since the mappings only exist in
202 if (info->whichfork != XFS_COW_FORK) {
203 rmap_end = (unsigned long long)rmap.rm_offset +
205 if (rmap.rm_offset > irec->br_startoff ||
206 irec->br_startoff + irec->br_blockcount > rmap_end)
207 xfs_scrub_fblock_xref_set_corrupt(info->sc,
208 info->whichfork, irec->br_startoff);
211 if (rmap.rm_owner != owner)
212 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
216 * Check for discrepancies between the unwritten flag in the irec and
217 * the rmap. Note that the (in-memory) CoW fork distinguishes between
218 * unwritten and written extents, but we don't track that in the rmap
219 * records because the blocks are owned (on-disk) by the refcountbt,
220 * which doesn't track unwritten state.
222 if (owner != XFS_RMAP_OWN_COW &&
223 irec->br_state == XFS_EXT_UNWRITTEN &&
224 !(rmap.rm_flags & XFS_RMAP_UNWRITTEN))
225 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
228 if (info->whichfork == XFS_ATTR_FORK &&
229 !(rmap.rm_flags & XFS_RMAP_ATTR_FORK))
230 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
232 if (rmap.rm_flags & XFS_RMAP_BMBT_BLOCK)
233 xfs_scrub_fblock_xref_set_corrupt(info->sc, info->whichfork,
237 /* Cross-reference a single rtdev extent record. */
239 xfs_scrub_bmap_rt_extent_xref(
240 struct xfs_scrub_bmap_info *info,
241 struct xfs_inode *ip,
242 struct xfs_btree_cur *cur,
243 struct xfs_bmbt_irec *irec)
245 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
248 xfs_scrub_xref_is_used_rt_space(info->sc, irec->br_startblock,
249 irec->br_blockcount);
252 /* Cross-reference a single datadev extent record. */
254 xfs_scrub_bmap_extent_xref(
255 struct xfs_scrub_bmap_info *info,
256 struct xfs_inode *ip,
257 struct xfs_btree_cur *cur,
258 struct xfs_bmbt_irec *irec)
260 struct xfs_mount *mp = info->sc->mp;
266 if (info->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
269 agno = XFS_FSB_TO_AGNO(mp, irec->br_startblock);
270 agbno = XFS_FSB_TO_AGBNO(mp, irec->br_startblock);
271 len = irec->br_blockcount;
273 error = xfs_scrub_ag_init(info->sc, agno, &info->sc->sa);
274 if (!xfs_scrub_fblock_process_error(info->sc, info->whichfork,
275 irec->br_startoff, &error))
278 xfs_scrub_xref_is_used_space(info->sc, agbno, len);
279 xfs_scrub_xref_is_not_inode_chunk(info->sc, agbno, len);
280 xfs_scrub_bmap_xref_rmap(info, irec, agbno);
281 switch (info->whichfork) {
283 if (xfs_is_reflink_inode(info->sc->ip))
287 xfs_scrub_xref_is_not_shared(info->sc, agbno,
288 irec->br_blockcount);
291 xfs_scrub_xref_is_cow_staging(info->sc, agbno,
292 irec->br_blockcount);
296 xfs_scrub_ag_free(info->sc, &info->sc->sa);
299 /* Scrub a single extent record. */
301 xfs_scrub_bmap_extent(
302 struct xfs_inode *ip,
303 struct xfs_btree_cur *cur,
304 struct xfs_scrub_bmap_info *info,
305 struct xfs_bmbt_irec *irec)
307 struct xfs_mount *mp = info->sc->mp;
308 struct xfs_buf *bp = NULL;
313 xfs_btree_get_block(cur, 0, &bp);
316 * Check for out-of-order extents. This record could have come
317 * from the incore list, for which there is no ordering check.
319 if (irec->br_startoff < info->lastoff)
320 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
323 /* There should never be a "hole" extent in either extent list. */
324 if (irec->br_startblock == HOLESTARTBLOCK)
325 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
329 * Check for delalloc extents. We never iterate the ones in the
330 * in-core extent scan, and we should never see these in the bmbt.
332 if (isnullstartblock(irec->br_startblock))
333 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
336 /* Make sure the extent points to a valid place. */
337 if (irec->br_blockcount > MAXEXTLEN)
338 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
340 if (irec->br_startblock + irec->br_blockcount <= irec->br_startblock)
341 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
343 end = irec->br_startblock + irec->br_blockcount - 1;
345 (!xfs_verify_rtbno(mp, irec->br_startblock) ||
346 !xfs_verify_rtbno(mp, end)))
347 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
350 (!xfs_verify_fsbno(mp, irec->br_startblock) ||
351 !xfs_verify_fsbno(mp, end) ||
352 XFS_FSB_TO_AGNO(mp, irec->br_startblock) !=
353 XFS_FSB_TO_AGNO(mp, end)))
354 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
357 /* We don't allow unwritten extents on attr forks. */
358 if (irec->br_state == XFS_EXT_UNWRITTEN &&
359 info->whichfork == XFS_ATTR_FORK)
360 xfs_scrub_fblock_set_corrupt(info->sc, info->whichfork,
364 xfs_scrub_bmap_rt_extent_xref(info, ip, cur, irec);
366 xfs_scrub_bmap_extent_xref(info, ip, cur, irec);
368 info->lastoff = irec->br_startoff + irec->br_blockcount;
372 /* Scrub a bmbt record. */
374 xfs_scrub_bmapbt_rec(
375 struct xfs_scrub_btree *bs,
376 union xfs_btree_rec *rec)
378 struct xfs_bmbt_irec irec;
379 struct xfs_scrub_bmap_info *info = bs->private;
380 struct xfs_inode *ip = bs->cur->bc_private.b.ip;
381 struct xfs_buf *bp = NULL;
382 struct xfs_btree_block *block;
387 * Check the owners of the btree blocks up to the level below
388 * the root since the verifiers don't do that.
390 if (xfs_sb_version_hascrc(&bs->cur->bc_mp->m_sb) &&
391 bs->cur->bc_ptrs[0] == 1) {
392 for (i = 0; i < bs->cur->bc_nlevels - 1; i++) {
393 block = xfs_btree_get_block(bs->cur, i, &bp);
394 owner = be64_to_cpu(block->bb_u.l.bb_owner);
395 if (owner != ip->i_ino)
396 xfs_scrub_fblock_set_corrupt(bs->sc,
401 /* Set up the in-core record and scrub it. */
402 xfs_bmbt_disk_get_all(&rec->bmbt, &irec);
403 return xfs_scrub_bmap_extent(ip, bs->cur, info, &irec);
406 /* Scan the btree records. */
408 xfs_scrub_bmap_btree(
409 struct xfs_scrub_context *sc,
411 struct xfs_scrub_bmap_info *info)
413 struct xfs_owner_info oinfo;
414 struct xfs_mount *mp = sc->mp;
415 struct xfs_inode *ip = sc->ip;
416 struct xfs_btree_cur *cur;
419 cur = xfs_bmbt_init_cursor(mp, sc->tp, ip, whichfork);
420 xfs_rmap_ino_bmbt_owner(&oinfo, ip->i_ino, whichfork);
421 error = xfs_scrub_btree(sc, cur, xfs_scrub_bmapbt_rec, &oinfo, info);
422 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR :
427 struct xfs_scrub_bmap_check_rmap_info {
428 struct xfs_scrub_context *sc;
430 struct xfs_iext_cursor icur;
433 /* Can we find bmaps that fit this rmap? */
435 xfs_scrub_bmap_check_rmap(
436 struct xfs_btree_cur *cur,
437 struct xfs_rmap_irec *rec,
440 struct xfs_bmbt_irec irec;
441 struct xfs_scrub_bmap_check_rmap_info *sbcri = priv;
442 struct xfs_ifork *ifp;
443 struct xfs_scrub_context *sc = sbcri->sc;
446 /* Is this even the right fork? */
447 if (rec->rm_owner != sc->ip->i_ino)
449 if ((sbcri->whichfork == XFS_ATTR_FORK) ^
450 !!(rec->rm_flags & XFS_RMAP_ATTR_FORK))
452 if (rec->rm_flags & XFS_RMAP_BMBT_BLOCK)
455 /* Now look up the bmbt record. */
456 ifp = XFS_IFORK_PTR(sc->ip, sbcri->whichfork);
458 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
462 have_map = xfs_iext_lookup_extent(sc->ip, ifp, rec->rm_offset,
463 &sbcri->icur, &irec);
465 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
468 * bmap extent record lengths are constrained to 2^21 blocks in length
469 * because of space constraints in the on-disk metadata structure.
470 * However, rmap extent record lengths are constrained only by AG
471 * length, so we have to loop through the bmbt to make sure that the
472 * entire rmap is covered by bmbt records.
475 if (irec.br_startoff != rec->rm_offset)
476 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
478 if (irec.br_startblock != XFS_AGB_TO_FSB(sc->mp,
479 cur->bc_private.a.agno, rec->rm_startblock))
480 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
482 if (irec.br_blockcount > rec->rm_blockcount)
483 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
485 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
487 rec->rm_startblock += irec.br_blockcount;
488 rec->rm_offset += irec.br_blockcount;
489 rec->rm_blockcount -= irec.br_blockcount;
490 if (rec->rm_blockcount == 0)
492 have_map = xfs_iext_next_extent(ifp, &sbcri->icur, &irec);
494 xfs_scrub_fblock_set_corrupt(sc, sbcri->whichfork,
499 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
500 return XFS_BTREE_QUERY_RANGE_ABORT;
504 /* Make sure each rmap has a corresponding bmbt entry. */
506 xfs_scrub_bmap_check_ag_rmaps(
507 struct xfs_scrub_context *sc,
511 struct xfs_scrub_bmap_check_rmap_info sbcri;
512 struct xfs_btree_cur *cur;
516 error = xfs_alloc_read_agf(sc->mp, sc->tp, agno, 0, &agf);
520 cur = xfs_rmapbt_init_cursor(sc->mp, sc->tp, agf, agno);
527 sbcri.whichfork = whichfork;
528 error = xfs_rmap_query_all(cur, xfs_scrub_bmap_check_rmap, &sbcri);
529 if (error == XFS_BTREE_QUERY_RANGE_ABORT)
532 xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
534 xfs_trans_brelse(sc->tp, agf);
538 /* Make sure each rmap has a corresponding bmbt entry. */
540 xfs_scrub_bmap_check_rmaps(
541 struct xfs_scrub_context *sc,
548 if (!xfs_sb_version_hasrmapbt(&sc->mp->m_sb) ||
549 whichfork == XFS_COW_FORK ||
550 (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
553 /* Don't support realtime rmap checks yet. */
554 if (XFS_IS_REALTIME_INODE(sc->ip) && whichfork == XFS_DATA_FORK)
558 * Only do this for complex maps that are in btree format, or for
559 * situations where we would seem to have a size but zero extents.
560 * The inode repair code can zap broken iforks, which means we have
561 * to flag this bmap as corrupt if there are rmaps that need to be
566 size = i_size_read(VFS_I(sc->ip));
569 size = XFS_IFORK_Q(sc->ip);
575 if (XFS_IFORK_FORMAT(sc->ip, whichfork) != XFS_DINODE_FMT_BTREE &&
576 (size == 0 || XFS_IFORK_NEXTENTS(sc->ip, whichfork) > 0))
579 for (agno = 0; agno < sc->mp->m_sb.sb_agcount; agno++) {
580 error = xfs_scrub_bmap_check_ag_rmaps(sc, whichfork, agno);
583 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
591 * Scrub an inode fork's block mappings.
593 * First we scan every record in every btree block, if applicable.
594 * Then we unconditionally scan the incore extent cache.
598 struct xfs_scrub_context *sc,
601 struct xfs_bmbt_irec irec;
602 struct xfs_scrub_bmap_info info = { NULL };
603 struct xfs_mount *mp = sc->mp;
604 struct xfs_inode *ip = sc->ip;
605 struct xfs_ifork *ifp;
606 xfs_fileoff_t endoff;
607 struct xfs_iext_cursor icur;
610 ifp = XFS_IFORK_PTR(ip, whichfork);
612 info.is_rt = whichfork == XFS_DATA_FORK && XFS_IS_REALTIME_INODE(ip);
613 info.whichfork = whichfork;
614 info.is_shared = whichfork == XFS_DATA_FORK && xfs_is_reflink_inode(ip);
619 /* Non-existent CoW forks are ignorable. */
622 /* No CoW forks on non-reflink inodes/filesystems. */
623 if (!xfs_is_reflink_inode(ip)) {
624 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
631 if (!xfs_sb_version_hasattr(&mp->m_sb) &&
632 !xfs_sb_version_hasattr2(&mp->m_sb))
633 xfs_scrub_ino_set_corrupt(sc, sc->ip->i_ino);
636 ASSERT(whichfork == XFS_DATA_FORK);
640 /* Check the fork values */
641 switch (XFS_IFORK_FORMAT(ip, whichfork)) {
642 case XFS_DINODE_FMT_UUID:
643 case XFS_DINODE_FMT_DEV:
644 case XFS_DINODE_FMT_LOCAL:
645 /* No mappings to check. */
647 case XFS_DINODE_FMT_EXTENTS:
648 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
649 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
653 case XFS_DINODE_FMT_BTREE:
654 if (whichfork == XFS_COW_FORK) {
655 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
659 error = xfs_scrub_bmap_btree(sc, whichfork, &info);
664 xfs_scrub_fblock_set_corrupt(sc, whichfork, 0);
668 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
671 /* Now try to scrub the in-memory extent list. */
672 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
673 error = xfs_iread_extents(sc->tp, ip, whichfork);
674 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
678 /* Find the offset of the last extent in the mapping. */
679 error = xfs_bmap_last_offset(ip, &endoff, whichfork);
680 if (!xfs_scrub_fblock_process_error(sc, whichfork, 0, &error))
683 /* Scrub extent records. */
685 ifp = XFS_IFORK_PTR(ip, whichfork);
686 for_each_xfs_iext(ifp, &icur, &irec) {
687 if (xfs_scrub_should_terminate(sc, &error))
689 if (isnullstartblock(irec.br_startblock))
691 if (irec.br_startoff >= endoff) {
692 xfs_scrub_fblock_set_corrupt(sc, whichfork,
696 error = xfs_scrub_bmap_extent(ip, NULL, &info, &irec);
702 error = xfs_scrub_bmap_check_rmaps(sc, whichfork);
703 if (!xfs_scrub_fblock_xref_process_error(sc, whichfork, 0, &error))
709 /* Scrub an inode's data fork. */
712 struct xfs_scrub_context *sc)
714 return xfs_scrub_bmap(sc, XFS_DATA_FORK);
717 /* Scrub an inode's attr fork. */
720 struct xfs_scrub_context *sc)
722 return xfs_scrub_bmap(sc, XFS_ATTR_FORK);
725 /* Scrub an inode's CoW fork. */
728 struct xfs_scrub_context *sc)
730 if (!xfs_is_reflink_inode(sc->ip))
733 return xfs_scrub_bmap(sc, XFS_COW_FORK);