xfs: check the uniqueness of the AGFL entries
[linux-2.6-block.git] / fs / xfs / scrub / agheader.c
1 /*
2  * Copyright (C) 2017 Oracle.  All Rights Reserved.
3  *
4  * Author: Darrick J. Wong <darrick.wong@oracle.com>
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version 2
9  * of the License, or (at your option) any later version.
10  *
11  * This program is distributed in the hope that it would be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write the Free Software Foundation,
18  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301, USA.
19  */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_alloc.h"
34 #include "xfs_ialloc.h"
35 #include "scrub/xfs_scrub.h"
36 #include "scrub/scrub.h"
37 #include "scrub/common.h"
38 #include "scrub/trace.h"
39
40 /*
41  * Set up scrub to check all the static metadata in each AG.
42  * This means the SB, AGF, AGI, and AGFL headers.
43  */
44 int
45 xfs_scrub_setup_ag_header(
46         struct xfs_scrub_context        *sc,
47         struct xfs_inode                *ip)
48 {
49         struct xfs_mount                *mp = sc->mp;
50
51         if (sc->sm->sm_agno >= mp->m_sb.sb_agcount ||
52             sc->sm->sm_ino || sc->sm->sm_gen)
53                 return -EINVAL;
54         return xfs_scrub_setup_fs(sc, ip);
55 }
56
57 /* Walk all the blocks in the AGFL. */
58 int
59 xfs_scrub_walk_agfl(
60         struct xfs_scrub_context        *sc,
61         int                             (*fn)(struct xfs_scrub_context *,
62                                               xfs_agblock_t bno, void *),
63         void                            *priv)
64 {
65         struct xfs_agf                  *agf;
66         __be32                          *agfl_bno;
67         struct xfs_mount                *mp = sc->mp;
68         unsigned int                    flfirst;
69         unsigned int                    fllast;
70         int                             i;
71         int                             error;
72
73         agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
74         agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
75         flfirst = be32_to_cpu(agf->agf_flfirst);
76         fllast = be32_to_cpu(agf->agf_fllast);
77
78         /* Nothing to walk in an empty AGFL. */
79         if (agf->agf_flcount == cpu_to_be32(0))
80                 return 0;
81
82         /* first to last is a consecutive list. */
83         if (fllast >= flfirst) {
84                 for (i = flfirst; i <= fllast; i++) {
85                         error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
86                         if (error)
87                                 return error;
88                         if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
89                                 return error;
90                 }
91
92                 return 0;
93         }
94
95         /* first to the end */
96         for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
97                 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
98                 if (error)
99                         return error;
100                 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
101                         return error;
102         }
103
104         /* the start to last. */
105         for (i = 0; i <= fllast; i++) {
106                 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
107                 if (error)
108                         return error;
109                 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
110                         return error;
111         }
112
113         return 0;
114 }
115
116 /* Superblock */
117
118 /*
119  * Scrub the filesystem superblock.
120  *
121  * Note: We do /not/ attempt to check AG 0's superblock.  Mount is
122  * responsible for validating all the geometry information in sb 0, so
123  * if the filesystem is capable of initiating online scrub, then clearly
124  * sb 0 is ok and we can use its information to check everything else.
125  */
126 int
127 xfs_scrub_superblock(
128         struct xfs_scrub_context        *sc)
129 {
130         struct xfs_mount                *mp = sc->mp;
131         struct xfs_buf                  *bp;
132         struct xfs_dsb                  *sb;
133         xfs_agnumber_t                  agno;
134         uint32_t                        v2_ok;
135         __be32                          features_mask;
136         int                             error;
137         __be16                          vernum_mask;
138
139         agno = sc->sm->sm_agno;
140         if (agno == 0)
141                 return 0;
142
143         error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
144                   XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
145                   XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
146         if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
147                 return error;
148
149         sb = XFS_BUF_TO_SBP(bp);
150
151         /*
152          * Verify the geometries match.  Fields that are permanently
153          * set by mkfs are checked; fields that can be updated later
154          * (and are not propagated to backup superblocks) are preen
155          * checked.
156          */
157         if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
158                 xfs_scrub_block_set_corrupt(sc, bp);
159
160         if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
161                 xfs_scrub_block_set_corrupt(sc, bp);
162
163         if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
164                 xfs_scrub_block_set_corrupt(sc, bp);
165
166         if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
167                 xfs_scrub_block_set_corrupt(sc, bp);
168
169         if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
170                 xfs_scrub_block_set_preen(sc, bp);
171
172         if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
173                 xfs_scrub_block_set_corrupt(sc, bp);
174
175         if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
176                 xfs_scrub_block_set_preen(sc, bp);
177
178         if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
179                 xfs_scrub_block_set_preen(sc, bp);
180
181         if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
182                 xfs_scrub_block_set_preen(sc, bp);
183
184         if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
185                 xfs_scrub_block_set_corrupt(sc, bp);
186
187         if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
188                 xfs_scrub_block_set_corrupt(sc, bp);
189
190         if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
191                 xfs_scrub_block_set_corrupt(sc, bp);
192
193         if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
194                 xfs_scrub_block_set_corrupt(sc, bp);
195
196         if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
197                 xfs_scrub_block_set_corrupt(sc, bp);
198
199         /* Check sb_versionnum bits that are set at mkfs time. */
200         vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
201                                   XFS_SB_VERSION_NUMBITS |
202                                   XFS_SB_VERSION_ALIGNBIT |
203                                   XFS_SB_VERSION_DALIGNBIT |
204                                   XFS_SB_VERSION_SHAREDBIT |
205                                   XFS_SB_VERSION_LOGV2BIT |
206                                   XFS_SB_VERSION_SECTORBIT |
207                                   XFS_SB_VERSION_EXTFLGBIT |
208                                   XFS_SB_VERSION_DIRV2BIT);
209         if ((sb->sb_versionnum & vernum_mask) !=
210             (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
211                 xfs_scrub_block_set_corrupt(sc, bp);
212
213         /* Check sb_versionnum bits that can be set after mkfs time. */
214         vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
215                                   XFS_SB_VERSION_NLINKBIT |
216                                   XFS_SB_VERSION_QUOTABIT);
217         if ((sb->sb_versionnum & vernum_mask) !=
218             (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
219                 xfs_scrub_block_set_preen(sc, bp);
220
221         if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
222                 xfs_scrub_block_set_corrupt(sc, bp);
223
224         if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
225                 xfs_scrub_block_set_corrupt(sc, bp);
226
227         if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
228                 xfs_scrub_block_set_corrupt(sc, bp);
229
230         if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
231                 xfs_scrub_block_set_preen(sc, bp);
232
233         if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
234                 xfs_scrub_block_set_corrupt(sc, bp);
235
236         if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
237                 xfs_scrub_block_set_corrupt(sc, bp);
238
239         if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
240                 xfs_scrub_block_set_corrupt(sc, bp);
241
242         if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
243                 xfs_scrub_block_set_corrupt(sc, bp);
244
245         if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
246                 xfs_scrub_block_set_corrupt(sc, bp);
247
248         if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
249                 xfs_scrub_block_set_corrupt(sc, bp);
250
251         if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
252                 xfs_scrub_block_set_preen(sc, bp);
253
254         /*
255          * Skip the summary counters since we track them in memory anyway.
256          * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
257          */
258
259         if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
260                 xfs_scrub_block_set_preen(sc, bp);
261
262         if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
263                 xfs_scrub_block_set_preen(sc, bp);
264
265         /*
266          * Skip the quota flags since repair will force quotacheck.
267          * sb_qflags
268          */
269
270         if (sb->sb_flags != mp->m_sb.sb_flags)
271                 xfs_scrub_block_set_corrupt(sc, bp);
272
273         if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
274                 xfs_scrub_block_set_corrupt(sc, bp);
275
276         if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
277                 xfs_scrub_block_set_corrupt(sc, bp);
278
279         if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
280                 xfs_scrub_block_set_preen(sc, bp);
281
282         if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
283                 xfs_scrub_block_set_preen(sc, bp);
284
285         if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
286                 xfs_scrub_block_set_corrupt(sc, bp);
287
288         if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
289                 xfs_scrub_block_set_corrupt(sc, bp);
290
291         if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
292                 xfs_scrub_block_set_corrupt(sc, bp);
293
294         if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
295                 xfs_scrub_block_set_corrupt(sc, bp);
296
297         /* Do we see any invalid bits in sb_features2? */
298         if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
299                 if (sb->sb_features2 != 0)
300                         xfs_scrub_block_set_corrupt(sc, bp);
301         } else {
302                 v2_ok = XFS_SB_VERSION2_OKBITS;
303                 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
304                         v2_ok |= XFS_SB_VERSION2_CRCBIT;
305
306                 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
307                         xfs_scrub_block_set_corrupt(sc, bp);
308
309                 if (sb->sb_features2 != sb->sb_bad_features2)
310                         xfs_scrub_block_set_preen(sc, bp);
311         }
312
313         /* Check sb_features2 flags that are set at mkfs time. */
314         features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
315                                     XFS_SB_VERSION2_PROJID32BIT |
316                                     XFS_SB_VERSION2_CRCBIT |
317                                     XFS_SB_VERSION2_FTYPE);
318         if ((sb->sb_features2 & features_mask) !=
319             (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
320                 xfs_scrub_block_set_corrupt(sc, bp);
321
322         /* Check sb_features2 flags that can be set after mkfs time. */
323         features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
324         if ((sb->sb_features2 & features_mask) !=
325             (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
326                 xfs_scrub_block_set_corrupt(sc, bp);
327
328         if (!xfs_sb_version_hascrc(&mp->m_sb)) {
329                 /* all v5 fields must be zero */
330                 if (memchr_inv(&sb->sb_features_compat, 0,
331                                 sizeof(struct xfs_dsb) -
332                                 offsetof(struct xfs_dsb, sb_features_compat)))
333                         xfs_scrub_block_set_corrupt(sc, bp);
334         } else {
335                 /* Check compat flags; all are set at mkfs time. */
336                 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
337                 if ((sb->sb_features_compat & features_mask) !=
338                     (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
339                         xfs_scrub_block_set_corrupt(sc, bp);
340
341                 /* Check ro compat flags; all are set at mkfs time. */
342                 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
343                                             XFS_SB_FEAT_RO_COMPAT_FINOBT |
344                                             XFS_SB_FEAT_RO_COMPAT_RMAPBT |
345                                             XFS_SB_FEAT_RO_COMPAT_REFLINK);
346                 if ((sb->sb_features_ro_compat & features_mask) !=
347                     (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
348                      features_mask))
349                         xfs_scrub_block_set_corrupt(sc, bp);
350
351                 /* Check incompat flags; all are set at mkfs time. */
352                 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
353                                             XFS_SB_FEAT_INCOMPAT_FTYPE |
354                                             XFS_SB_FEAT_INCOMPAT_SPINODES |
355                                             XFS_SB_FEAT_INCOMPAT_META_UUID);
356                 if ((sb->sb_features_incompat & features_mask) !=
357                     (cpu_to_be32(mp->m_sb.sb_features_incompat) &
358                      features_mask))
359                         xfs_scrub_block_set_corrupt(sc, bp);
360
361                 /* Check log incompat flags; all are set at mkfs time. */
362                 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
363                 if ((sb->sb_features_log_incompat & features_mask) !=
364                     (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
365                      features_mask))
366                         xfs_scrub_block_set_corrupt(sc, bp);
367
368                 /* Don't care about sb_crc */
369
370                 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
371                         xfs_scrub_block_set_corrupt(sc, bp);
372
373                 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
374                         xfs_scrub_block_set_preen(sc, bp);
375
376                 /* Don't care about sb_lsn */
377         }
378
379         if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
380                 /* The metadata UUID must be the same for all supers */
381                 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
382                         xfs_scrub_block_set_corrupt(sc, bp);
383         }
384
385         /* Everything else must be zero. */
386         if (memchr_inv(sb + 1, 0,
387                         BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
388                 xfs_scrub_block_set_corrupt(sc, bp);
389
390         return error;
391 }
392
393 /* AGF */
394
395 /* Scrub the AGF. */
396 int
397 xfs_scrub_agf(
398         struct xfs_scrub_context        *sc)
399 {
400         struct xfs_mount                *mp = sc->mp;
401         struct xfs_agf                  *agf;
402         xfs_agnumber_t                  agno;
403         xfs_agblock_t                   agbno;
404         xfs_agblock_t                   eoag;
405         xfs_agblock_t                   agfl_first;
406         xfs_agblock_t                   agfl_last;
407         xfs_agblock_t                   agfl_count;
408         xfs_agblock_t                   fl_count;
409         int                             level;
410         int                             error = 0;
411
412         agno = sc->sa.agno = sc->sm->sm_agno;
413         error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
414                         &sc->sa.agf_bp, &sc->sa.agfl_bp);
415         if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
416                 goto out;
417
418         agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
419
420         /* Check the AG length */
421         eoag = be32_to_cpu(agf->agf_length);
422         if (eoag != xfs_ag_block_count(mp, agno))
423                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
424
425         /* Check the AGF btree roots and levels */
426         agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
427         if (!xfs_verify_agbno(mp, agno, agbno))
428                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
429
430         agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
431         if (!xfs_verify_agbno(mp, agno, agbno))
432                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
433
434         level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
435         if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
436                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
437
438         level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
439         if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
440                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
441
442         if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
443                 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
444                 if (!xfs_verify_agbno(mp, agno, agbno))
445                         xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
446
447                 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
448                 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
449                         xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
450         }
451
452         if (xfs_sb_version_hasreflink(&mp->m_sb)) {
453                 agbno = be32_to_cpu(agf->agf_refcount_root);
454                 if (!xfs_verify_agbno(mp, agno, agbno))
455                         xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
456
457                 level = be32_to_cpu(agf->agf_refcount_level);
458                 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
459                         xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
460         }
461
462         /* Check the AGFL counters */
463         agfl_first = be32_to_cpu(agf->agf_flfirst);
464         agfl_last = be32_to_cpu(agf->agf_fllast);
465         agfl_count = be32_to_cpu(agf->agf_flcount);
466         if (agfl_last > agfl_first)
467                 fl_count = agfl_last - agfl_first + 1;
468         else
469                 fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
470         if (agfl_count != 0 && fl_count != agfl_count)
471                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
472
473 out:
474         return error;
475 }
476
477 /* AGFL */
478
479 struct xfs_scrub_agfl_info {
480         unsigned int                    sz_entries;
481         unsigned int                    nr_entries;
482         xfs_agblock_t                   *entries;
483 };
484
485 /* Scrub an AGFL block. */
486 STATIC int
487 xfs_scrub_agfl_block(
488         struct xfs_scrub_context        *sc,
489         xfs_agblock_t                   agbno,
490         void                            *priv)
491 {
492         struct xfs_mount                *mp = sc->mp;
493         struct xfs_scrub_agfl_info      *sai = priv;
494         xfs_agnumber_t                  agno = sc->sa.agno;
495
496         if (xfs_verify_agbno(mp, agno, agbno) &&
497             sai->nr_entries < sai->sz_entries)
498                 sai->entries[sai->nr_entries++] = agbno;
499         else
500                 xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
501
502         return 0;
503 }
504
505 static int
506 xfs_scrub_agblock_cmp(
507         const void              *pa,
508         const void              *pb)
509 {
510         const xfs_agblock_t     *a = pa;
511         const xfs_agblock_t     *b = pb;
512
513         return (int)*a - (int)*b;
514 }
515
516 /* Scrub the AGFL. */
517 int
518 xfs_scrub_agfl(
519         struct xfs_scrub_context        *sc)
520 {
521         struct xfs_scrub_agfl_info      sai = { 0 };
522         struct xfs_agf                  *agf;
523         xfs_agnumber_t                  agno;
524         unsigned int                    agflcount;
525         unsigned int                    i;
526         int                             error;
527
528         agno = sc->sa.agno = sc->sm->sm_agno;
529         error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
530                         &sc->sa.agf_bp, &sc->sa.agfl_bp);
531         if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
532                 goto out;
533         if (!sc->sa.agf_bp)
534                 return -EFSCORRUPTED;
535
536         /* Allocate buffer to ensure uniqueness of AGFL entries. */
537         agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
538         agflcount = be32_to_cpu(agf->agf_flcount);
539         if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
540                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
541                 goto out;
542         }
543         sai.sz_entries = agflcount;
544         sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
545         if (!sai.entries) {
546                 error = -ENOMEM;
547                 goto out;
548         }
549
550         /* Check the blocks in the AGFL. */
551         error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
552         if (error)
553                 goto out_free;
554
555         if (agflcount != sai.nr_entries) {
556                 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
557                 goto out_free;
558         }
559
560         /* Sort entries, check for duplicates. */
561         sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
562                         xfs_scrub_agblock_cmp, NULL);
563         for (i = 1; i < sai.nr_entries; i++) {
564                 if (sai.entries[i] == sai.entries[i - 1]) {
565                         xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
566                         break;
567                 }
568         }
569
570 out_free:
571         kmem_free(sai.entries);
572 out:
573         return error;
574 }
575
576 /* AGI */
577
578 /* Scrub the AGI. */
579 int
580 xfs_scrub_agi(
581         struct xfs_scrub_context        *sc)
582 {
583         struct xfs_mount                *mp = sc->mp;
584         struct xfs_agi                  *agi;
585         xfs_agnumber_t                  agno;
586         xfs_agblock_t                   agbno;
587         xfs_agblock_t                   eoag;
588         xfs_agino_t                     agino;
589         xfs_agino_t                     first_agino;
590         xfs_agino_t                     last_agino;
591         xfs_agino_t                     icount;
592         int                             i;
593         int                             level;
594         int                             error = 0;
595
596         agno = sc->sa.agno = sc->sm->sm_agno;
597         error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
598                         &sc->sa.agf_bp, &sc->sa.agfl_bp);
599         if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
600                 goto out;
601
602         agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
603
604         /* Check the AG length */
605         eoag = be32_to_cpu(agi->agi_length);
606         if (eoag != xfs_ag_block_count(mp, agno))
607                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
608
609         /* Check btree roots and levels */
610         agbno = be32_to_cpu(agi->agi_root);
611         if (!xfs_verify_agbno(mp, agno, agbno))
612                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
613
614         level = be32_to_cpu(agi->agi_level);
615         if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
616                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
617
618         if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
619                 agbno = be32_to_cpu(agi->agi_free_root);
620                 if (!xfs_verify_agbno(mp, agno, agbno))
621                         xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
622
623                 level = be32_to_cpu(agi->agi_free_level);
624                 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
625                         xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
626         }
627
628         /* Check inode counters */
629         xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
630         icount = be32_to_cpu(agi->agi_count);
631         if (icount > last_agino - first_agino + 1 ||
632             icount < be32_to_cpu(agi->agi_freecount))
633                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
634
635         /* Check inode pointers */
636         agino = be32_to_cpu(agi->agi_newino);
637         if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
638                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
639
640         agino = be32_to_cpu(agi->agi_dirino);
641         if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
642                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
643
644         /* Check unlinked inode buckets */
645         for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
646                 agino = be32_to_cpu(agi->agi_unlinked[i]);
647                 if (agino == NULLAGINO)
648                         continue;
649                 if (!xfs_verify_agino(mp, agno, agino))
650                         xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
651         }
652
653         if (agi->agi_pad32 != cpu_to_be32(0))
654                 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
655
656 out:
657         return error;
658 }