xfs: ignore agfl read errors when not scrubbing agfl
[linux-block.git] / fs / xfs / scrub / agheader.c
CommitLineData
21fb4cb1
DW
1/*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20#include "xfs.h"
21#include "xfs_fs.h"
22#include "xfs_shared.h"
23#include "xfs_format.h"
24#include "xfs_trans_resv.h"
25#include "xfs_mount.h"
26#include "xfs_defer.h"
27#include "xfs_btree.h"
28#include "xfs_bit.h"
29#include "xfs_log_format.h"
30#include "xfs_trans.h"
31#include "xfs_sb.h"
32#include "xfs_inode.h"
ab9d5dc5 33#include "xfs_alloc.h"
a12890ae 34#include "xfs_ialloc.h"
21fb4cb1
DW
35#include "scrub/xfs_scrub.h"
36#include "scrub/scrub.h"
37#include "scrub/common.h"
38#include "scrub/trace.h"
39
ab9d5dc5
DW
40/* Walk all the blocks in the AGFL. */
41int
42xfs_scrub_walk_agfl(
43 struct xfs_scrub_context *sc,
44 int (*fn)(struct xfs_scrub_context *,
45 xfs_agblock_t bno, void *),
46 void *priv)
47{
48 struct xfs_agf *agf;
49 __be32 *agfl_bno;
50 struct xfs_mount *mp = sc->mp;
51 unsigned int flfirst;
52 unsigned int fllast;
53 int i;
54 int error;
55
56 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
57 agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, sc->sa.agfl_bp);
58 flfirst = be32_to_cpu(agf->agf_flfirst);
59 fllast = be32_to_cpu(agf->agf_fllast);
60
61 /* Nothing to walk in an empty AGFL. */
62 if (agf->agf_flcount == cpu_to_be32(0))
63 return 0;
64
65 /* first to last is a consecutive list. */
66 if (fllast >= flfirst) {
67 for (i = flfirst; i <= fllast; i++) {
68 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
69 if (error)
70 return error;
71 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
72 return error;
73 }
74
75 return 0;
76 }
77
78 /* first to the end */
79 for (i = flfirst; i < XFS_AGFL_SIZE(mp); i++) {
80 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
81 if (error)
82 return error;
83 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
84 return error;
85 }
86
87 /* the start to last. */
88 for (i = 0; i <= fllast; i++) {
89 error = fn(sc, be32_to_cpu(agfl_bno[i]), priv);
90 if (error)
91 return error;
92 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
93 return error;
94 }
95
96 return 0;
97}
98
21fb4cb1
DW
99/* Superblock */
100
101/*
102 * Scrub the filesystem superblock.
103 *
104 * Note: We do /not/ attempt to check AG 0's superblock. Mount is
105 * responsible for validating all the geometry information in sb 0, so
106 * if the filesystem is capable of initiating online scrub, then clearly
107 * sb 0 is ok and we can use its information to check everything else.
108 */
109int
110xfs_scrub_superblock(
111 struct xfs_scrub_context *sc)
112{
113 struct xfs_mount *mp = sc->mp;
114 struct xfs_buf *bp;
115 struct xfs_dsb *sb;
116 xfs_agnumber_t agno;
117 uint32_t v2_ok;
118 __be32 features_mask;
119 int error;
120 __be16 vernum_mask;
121
122 agno = sc->sm->sm_agno;
123 if (agno == 0)
124 return 0;
125
126 error = xfs_trans_read_buf(mp, sc->tp, mp->m_ddev_targp,
127 XFS_AGB_TO_DADDR(mp, agno, XFS_SB_BLOCK(mp)),
128 XFS_FSS_TO_BB(mp, 1), 0, &bp, &xfs_sb_buf_ops);
129 if (!xfs_scrub_process_error(sc, agno, XFS_SB_BLOCK(mp), &error))
130 return error;
131
132 sb = XFS_BUF_TO_SBP(bp);
133
134 /*
135 * Verify the geometries match. Fields that are permanently
136 * set by mkfs are checked; fields that can be updated later
137 * (and are not propagated to backup superblocks) are preen
138 * checked.
139 */
140 if (sb->sb_blocksize != cpu_to_be32(mp->m_sb.sb_blocksize))
141 xfs_scrub_block_set_corrupt(sc, bp);
142
143 if (sb->sb_dblocks != cpu_to_be64(mp->m_sb.sb_dblocks))
144 xfs_scrub_block_set_corrupt(sc, bp);
145
146 if (sb->sb_rblocks != cpu_to_be64(mp->m_sb.sb_rblocks))
147 xfs_scrub_block_set_corrupt(sc, bp);
148
149 if (sb->sb_rextents != cpu_to_be64(mp->m_sb.sb_rextents))
150 xfs_scrub_block_set_corrupt(sc, bp);
151
152 if (!uuid_equal(&sb->sb_uuid, &mp->m_sb.sb_uuid))
153 xfs_scrub_block_set_preen(sc, bp);
154
155 if (sb->sb_logstart != cpu_to_be64(mp->m_sb.sb_logstart))
156 xfs_scrub_block_set_corrupt(sc, bp);
157
158 if (sb->sb_rootino != cpu_to_be64(mp->m_sb.sb_rootino))
159 xfs_scrub_block_set_preen(sc, bp);
160
161 if (sb->sb_rbmino != cpu_to_be64(mp->m_sb.sb_rbmino))
162 xfs_scrub_block_set_preen(sc, bp);
163
164 if (sb->sb_rsumino != cpu_to_be64(mp->m_sb.sb_rsumino))
165 xfs_scrub_block_set_preen(sc, bp);
166
167 if (sb->sb_rextsize != cpu_to_be32(mp->m_sb.sb_rextsize))
168 xfs_scrub_block_set_corrupt(sc, bp);
169
170 if (sb->sb_agblocks != cpu_to_be32(mp->m_sb.sb_agblocks))
171 xfs_scrub_block_set_corrupt(sc, bp);
172
173 if (sb->sb_agcount != cpu_to_be32(mp->m_sb.sb_agcount))
174 xfs_scrub_block_set_corrupt(sc, bp);
175
176 if (sb->sb_rbmblocks != cpu_to_be32(mp->m_sb.sb_rbmblocks))
177 xfs_scrub_block_set_corrupt(sc, bp);
178
179 if (sb->sb_logblocks != cpu_to_be32(mp->m_sb.sb_logblocks))
180 xfs_scrub_block_set_corrupt(sc, bp);
181
182 /* Check sb_versionnum bits that are set at mkfs time. */
183 vernum_mask = cpu_to_be16(~XFS_SB_VERSION_OKBITS |
184 XFS_SB_VERSION_NUMBITS |
185 XFS_SB_VERSION_ALIGNBIT |
186 XFS_SB_VERSION_DALIGNBIT |
187 XFS_SB_VERSION_SHAREDBIT |
188 XFS_SB_VERSION_LOGV2BIT |
189 XFS_SB_VERSION_SECTORBIT |
190 XFS_SB_VERSION_EXTFLGBIT |
191 XFS_SB_VERSION_DIRV2BIT);
192 if ((sb->sb_versionnum & vernum_mask) !=
193 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
194 xfs_scrub_block_set_corrupt(sc, bp);
195
196 /* Check sb_versionnum bits that can be set after mkfs time. */
197 vernum_mask = cpu_to_be16(XFS_SB_VERSION_ATTRBIT |
198 XFS_SB_VERSION_NLINKBIT |
199 XFS_SB_VERSION_QUOTABIT);
200 if ((sb->sb_versionnum & vernum_mask) !=
201 (cpu_to_be16(mp->m_sb.sb_versionnum) & vernum_mask))
202 xfs_scrub_block_set_preen(sc, bp);
203
204 if (sb->sb_sectsize != cpu_to_be16(mp->m_sb.sb_sectsize))
205 xfs_scrub_block_set_corrupt(sc, bp);
206
207 if (sb->sb_inodesize != cpu_to_be16(mp->m_sb.sb_inodesize))
208 xfs_scrub_block_set_corrupt(sc, bp);
209
210 if (sb->sb_inopblock != cpu_to_be16(mp->m_sb.sb_inopblock))
211 xfs_scrub_block_set_corrupt(sc, bp);
212
213 if (memcmp(sb->sb_fname, mp->m_sb.sb_fname, sizeof(sb->sb_fname)))
214 xfs_scrub_block_set_preen(sc, bp);
215
216 if (sb->sb_blocklog != mp->m_sb.sb_blocklog)
217 xfs_scrub_block_set_corrupt(sc, bp);
218
219 if (sb->sb_sectlog != mp->m_sb.sb_sectlog)
220 xfs_scrub_block_set_corrupt(sc, bp);
221
222 if (sb->sb_inodelog != mp->m_sb.sb_inodelog)
223 xfs_scrub_block_set_corrupt(sc, bp);
224
225 if (sb->sb_inopblog != mp->m_sb.sb_inopblog)
226 xfs_scrub_block_set_corrupt(sc, bp);
227
228 if (sb->sb_agblklog != mp->m_sb.sb_agblklog)
229 xfs_scrub_block_set_corrupt(sc, bp);
230
231 if (sb->sb_rextslog != mp->m_sb.sb_rextslog)
232 xfs_scrub_block_set_corrupt(sc, bp);
233
234 if (sb->sb_imax_pct != mp->m_sb.sb_imax_pct)
235 xfs_scrub_block_set_preen(sc, bp);
236
237 /*
238 * Skip the summary counters since we track them in memory anyway.
239 * sb_icount, sb_ifree, sb_fdblocks, sb_frexents
240 */
241
242 if (sb->sb_uquotino != cpu_to_be64(mp->m_sb.sb_uquotino))
243 xfs_scrub_block_set_preen(sc, bp);
244
245 if (sb->sb_gquotino != cpu_to_be64(mp->m_sb.sb_gquotino))
246 xfs_scrub_block_set_preen(sc, bp);
247
248 /*
249 * Skip the quota flags since repair will force quotacheck.
250 * sb_qflags
251 */
252
253 if (sb->sb_flags != mp->m_sb.sb_flags)
254 xfs_scrub_block_set_corrupt(sc, bp);
255
256 if (sb->sb_shared_vn != mp->m_sb.sb_shared_vn)
257 xfs_scrub_block_set_corrupt(sc, bp);
258
259 if (sb->sb_inoalignmt != cpu_to_be32(mp->m_sb.sb_inoalignmt))
260 xfs_scrub_block_set_corrupt(sc, bp);
261
262 if (sb->sb_unit != cpu_to_be32(mp->m_sb.sb_unit))
263 xfs_scrub_block_set_preen(sc, bp);
264
265 if (sb->sb_width != cpu_to_be32(mp->m_sb.sb_width))
266 xfs_scrub_block_set_preen(sc, bp);
267
268 if (sb->sb_dirblklog != mp->m_sb.sb_dirblklog)
269 xfs_scrub_block_set_corrupt(sc, bp);
270
271 if (sb->sb_logsectlog != mp->m_sb.sb_logsectlog)
272 xfs_scrub_block_set_corrupt(sc, bp);
273
274 if (sb->sb_logsectsize != cpu_to_be16(mp->m_sb.sb_logsectsize))
275 xfs_scrub_block_set_corrupt(sc, bp);
276
277 if (sb->sb_logsunit != cpu_to_be32(mp->m_sb.sb_logsunit))
278 xfs_scrub_block_set_corrupt(sc, bp);
279
280 /* Do we see any invalid bits in sb_features2? */
281 if (!xfs_sb_version_hasmorebits(&mp->m_sb)) {
282 if (sb->sb_features2 != 0)
283 xfs_scrub_block_set_corrupt(sc, bp);
284 } else {
285 v2_ok = XFS_SB_VERSION2_OKBITS;
286 if (XFS_SB_VERSION_NUM(&mp->m_sb) >= XFS_SB_VERSION_5)
287 v2_ok |= XFS_SB_VERSION2_CRCBIT;
288
289 if (!!(sb->sb_features2 & cpu_to_be32(~v2_ok)))
290 xfs_scrub_block_set_corrupt(sc, bp);
291
292 if (sb->sb_features2 != sb->sb_bad_features2)
293 xfs_scrub_block_set_preen(sc, bp);
294 }
295
296 /* Check sb_features2 flags that are set at mkfs time. */
297 features_mask = cpu_to_be32(XFS_SB_VERSION2_LAZYSBCOUNTBIT |
298 XFS_SB_VERSION2_PROJID32BIT |
299 XFS_SB_VERSION2_CRCBIT |
300 XFS_SB_VERSION2_FTYPE);
301 if ((sb->sb_features2 & features_mask) !=
302 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
303 xfs_scrub_block_set_corrupt(sc, bp);
304
305 /* Check sb_features2 flags that can be set after mkfs time. */
306 features_mask = cpu_to_be32(XFS_SB_VERSION2_ATTR2BIT);
307 if ((sb->sb_features2 & features_mask) !=
308 (cpu_to_be32(mp->m_sb.sb_features2) & features_mask))
309 xfs_scrub_block_set_corrupt(sc, bp);
310
311 if (!xfs_sb_version_hascrc(&mp->m_sb)) {
312 /* all v5 fields must be zero */
313 if (memchr_inv(&sb->sb_features_compat, 0,
314 sizeof(struct xfs_dsb) -
315 offsetof(struct xfs_dsb, sb_features_compat)))
316 xfs_scrub_block_set_corrupt(sc, bp);
317 } else {
318 /* Check compat flags; all are set at mkfs time. */
319 features_mask = cpu_to_be32(XFS_SB_FEAT_COMPAT_UNKNOWN);
320 if ((sb->sb_features_compat & features_mask) !=
321 (cpu_to_be32(mp->m_sb.sb_features_compat) & features_mask))
322 xfs_scrub_block_set_corrupt(sc, bp);
323
324 /* Check ro compat flags; all are set at mkfs time. */
325 features_mask = cpu_to_be32(XFS_SB_FEAT_RO_COMPAT_UNKNOWN |
326 XFS_SB_FEAT_RO_COMPAT_FINOBT |
327 XFS_SB_FEAT_RO_COMPAT_RMAPBT |
328 XFS_SB_FEAT_RO_COMPAT_REFLINK);
329 if ((sb->sb_features_ro_compat & features_mask) !=
330 (cpu_to_be32(mp->m_sb.sb_features_ro_compat) &
331 features_mask))
332 xfs_scrub_block_set_corrupt(sc, bp);
333
334 /* Check incompat flags; all are set at mkfs time. */
335 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_UNKNOWN |
336 XFS_SB_FEAT_INCOMPAT_FTYPE |
337 XFS_SB_FEAT_INCOMPAT_SPINODES |
338 XFS_SB_FEAT_INCOMPAT_META_UUID);
339 if ((sb->sb_features_incompat & features_mask) !=
340 (cpu_to_be32(mp->m_sb.sb_features_incompat) &
341 features_mask))
342 xfs_scrub_block_set_corrupt(sc, bp);
343
344 /* Check log incompat flags; all are set at mkfs time. */
345 features_mask = cpu_to_be32(XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN);
346 if ((sb->sb_features_log_incompat & features_mask) !=
347 (cpu_to_be32(mp->m_sb.sb_features_log_incompat) &
348 features_mask))
349 xfs_scrub_block_set_corrupt(sc, bp);
350
351 /* Don't care about sb_crc */
352
353 if (sb->sb_spino_align != cpu_to_be32(mp->m_sb.sb_spino_align))
354 xfs_scrub_block_set_corrupt(sc, bp);
355
356 if (sb->sb_pquotino != cpu_to_be64(mp->m_sb.sb_pquotino))
357 xfs_scrub_block_set_preen(sc, bp);
358
359 /* Don't care about sb_lsn */
360 }
361
362 if (xfs_sb_version_hasmetauuid(&mp->m_sb)) {
363 /* The metadata UUID must be the same for all supers */
364 if (!uuid_equal(&sb->sb_meta_uuid, &mp->m_sb.sb_meta_uuid))
365 xfs_scrub_block_set_corrupt(sc, bp);
366 }
367
368 /* Everything else must be zero. */
369 if (memchr_inv(sb + 1, 0,
370 BBTOB(bp->b_length) - sizeof(struct xfs_dsb)))
371 xfs_scrub_block_set_corrupt(sc, bp);
372
373 return error;
374}
ab9d5dc5
DW
375
376/* AGF */
377
378/* Scrub the AGF. */
379int
380xfs_scrub_agf(
381 struct xfs_scrub_context *sc)
382{
383 struct xfs_mount *mp = sc->mp;
384 struct xfs_agf *agf;
385 xfs_agnumber_t agno;
386 xfs_agblock_t agbno;
387 xfs_agblock_t eoag;
388 xfs_agblock_t agfl_first;
389 xfs_agblock_t agfl_last;
390 xfs_agblock_t agfl_count;
391 xfs_agblock_t fl_count;
392 int level;
393 int error = 0;
394
395 agno = sc->sa.agno = sc->sm->sm_agno;
396 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
397 &sc->sa.agf_bp, &sc->sa.agfl_bp);
398 if (!xfs_scrub_process_error(sc, agno, XFS_AGF_BLOCK(sc->mp), &error))
399 goto out;
400
401 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
402
403 /* Check the AG length */
404 eoag = be32_to_cpu(agf->agf_length);
405 if (eoag != xfs_ag_block_count(mp, agno))
406 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
407
408 /* Check the AGF btree roots and levels */
409 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]);
410 if (!xfs_verify_agbno(mp, agno, agbno))
411 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
412
413 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]);
414 if (!xfs_verify_agbno(mp, agno, agbno))
415 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
416
417 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
418 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
419 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
420
421 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
422 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
423 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
424
425 if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
426 agbno = be32_to_cpu(agf->agf_roots[XFS_BTNUM_RMAP]);
427 if (!xfs_verify_agbno(mp, agno, agbno))
428 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
429
430 level = be32_to_cpu(agf->agf_levels[XFS_BTNUM_RMAP]);
431 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
432 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
433 }
434
435 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
436 agbno = be32_to_cpu(agf->agf_refcount_root);
437 if (!xfs_verify_agbno(mp, agno, agbno))
438 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
439
440 level = be32_to_cpu(agf->agf_refcount_level);
441 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
442 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
443 }
444
445 /* Check the AGFL counters */
446 agfl_first = be32_to_cpu(agf->agf_flfirst);
447 agfl_last = be32_to_cpu(agf->agf_fllast);
448 agfl_count = be32_to_cpu(agf->agf_flcount);
449 if (agfl_last > agfl_first)
450 fl_count = agfl_last - agfl_first + 1;
451 else
452 fl_count = XFS_AGFL_SIZE(mp) - agfl_first + agfl_last + 1;
453 if (agfl_count != 0 && fl_count != agfl_count)
454 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
455
456out:
457 return error;
458}
459
460/* AGFL */
461
d44b47fd
DW
462struct xfs_scrub_agfl_info {
463 unsigned int sz_entries;
464 unsigned int nr_entries;
465 xfs_agblock_t *entries;
466};
467
ab9d5dc5
DW
468/* Scrub an AGFL block. */
469STATIC int
470xfs_scrub_agfl_block(
471 struct xfs_scrub_context *sc,
472 xfs_agblock_t agbno,
473 void *priv)
474{
475 struct xfs_mount *mp = sc->mp;
d44b47fd 476 struct xfs_scrub_agfl_info *sai = priv;
ab9d5dc5
DW
477 xfs_agnumber_t agno = sc->sa.agno;
478
d44b47fd
DW
479 if (xfs_verify_agbno(mp, agno, agbno) &&
480 sai->nr_entries < sai->sz_entries)
481 sai->entries[sai->nr_entries++] = agbno;
482 else
ab9d5dc5
DW
483 xfs_scrub_block_set_corrupt(sc, sc->sa.agfl_bp);
484
485 return 0;
486}
487
d44b47fd
DW
488static int
489xfs_scrub_agblock_cmp(
490 const void *pa,
491 const void *pb)
492{
493 const xfs_agblock_t *a = pa;
494 const xfs_agblock_t *b = pb;
495
496 return (int)*a - (int)*b;
497}
498
ab9d5dc5
DW
499/* Scrub the AGFL. */
500int
501xfs_scrub_agfl(
502 struct xfs_scrub_context *sc)
503{
d44b47fd
DW
504 struct xfs_scrub_agfl_info sai = { 0 };
505 struct xfs_agf *agf;
ab9d5dc5 506 xfs_agnumber_t agno;
d44b47fd
DW
507 unsigned int agflcount;
508 unsigned int i;
ab9d5dc5
DW
509 int error;
510
511 agno = sc->sa.agno = sc->sm->sm_agno;
512 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
513 &sc->sa.agf_bp, &sc->sa.agfl_bp);
514 if (!xfs_scrub_process_error(sc, agno, XFS_AGFL_BLOCK(sc->mp), &error))
515 goto out;
516 if (!sc->sa.agf_bp)
517 return -EFSCORRUPTED;
518
d44b47fd
DW
519 /* Allocate buffer to ensure uniqueness of AGFL entries. */
520 agf = XFS_BUF_TO_AGF(sc->sa.agf_bp);
521 agflcount = be32_to_cpu(agf->agf_flcount);
522 if (agflcount > XFS_AGFL_SIZE(sc->mp)) {
523 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
524 goto out;
525 }
526 sai.sz_entries = agflcount;
527 sai.entries = kmem_zalloc(sizeof(xfs_agblock_t) * agflcount, KM_NOFS);
528 if (!sai.entries) {
529 error = -ENOMEM;
530 goto out;
531 }
532
ab9d5dc5 533 /* Check the blocks in the AGFL. */
d44b47fd
DW
534 error = xfs_scrub_walk_agfl(sc, xfs_scrub_agfl_block, &sai);
535 if (error)
536 goto out_free;
537
538 if (agflcount != sai.nr_entries) {
539 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
540 goto out_free;
541 }
542
543 /* Sort entries, check for duplicates. */
544 sort(sai.entries, sai.nr_entries, sizeof(sai.entries[0]),
545 xfs_scrub_agblock_cmp, NULL);
546 for (i = 1; i < sai.nr_entries; i++) {
547 if (sai.entries[i] == sai.entries[i - 1]) {
548 xfs_scrub_block_set_corrupt(sc, sc->sa.agf_bp);
549 break;
550 }
551 }
552
553out_free:
554 kmem_free(sai.entries);
ab9d5dc5
DW
555out:
556 return error;
557}
a12890ae
DW
558
559/* AGI */
560
561/* Scrub the AGI. */
562int
563xfs_scrub_agi(
564 struct xfs_scrub_context *sc)
565{
566 struct xfs_mount *mp = sc->mp;
567 struct xfs_agi *agi;
568 xfs_agnumber_t agno;
569 xfs_agblock_t agbno;
570 xfs_agblock_t eoag;
571 xfs_agino_t agino;
572 xfs_agino_t first_agino;
573 xfs_agino_t last_agino;
574 xfs_agino_t icount;
575 int i;
576 int level;
577 int error = 0;
578
579 agno = sc->sa.agno = sc->sm->sm_agno;
580 error = xfs_scrub_ag_read_headers(sc, agno, &sc->sa.agi_bp,
581 &sc->sa.agf_bp, &sc->sa.agfl_bp);
582 if (!xfs_scrub_process_error(sc, agno, XFS_AGI_BLOCK(sc->mp), &error))
583 goto out;
584
585 agi = XFS_BUF_TO_AGI(sc->sa.agi_bp);
586
587 /* Check the AG length */
588 eoag = be32_to_cpu(agi->agi_length);
589 if (eoag != xfs_ag_block_count(mp, agno))
590 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
591
592 /* Check btree roots and levels */
593 agbno = be32_to_cpu(agi->agi_root);
594 if (!xfs_verify_agbno(mp, agno, agbno))
595 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
596
597 level = be32_to_cpu(agi->agi_level);
598 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
599 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
600
601 if (xfs_sb_version_hasfinobt(&mp->m_sb)) {
602 agbno = be32_to_cpu(agi->agi_free_root);
603 if (!xfs_verify_agbno(mp, agno, agbno))
604 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
605
606 level = be32_to_cpu(agi->agi_free_level);
607 if (level <= 0 || level > XFS_BTREE_MAXLEVELS)
608 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
609 }
610
611 /* Check inode counters */
612 xfs_ialloc_agino_range(mp, agno, &first_agino, &last_agino);
613 icount = be32_to_cpu(agi->agi_count);
614 if (icount > last_agino - first_agino + 1 ||
615 icount < be32_to_cpu(agi->agi_freecount))
616 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
617
618 /* Check inode pointers */
619 agino = be32_to_cpu(agi->agi_newino);
620 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
621 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
622
623 agino = be32_to_cpu(agi->agi_dirino);
624 if (agino != NULLAGINO && !xfs_verify_agino(mp, agno, agino))
625 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
626
627 /* Check unlinked inode buckets */
628 for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) {
629 agino = be32_to_cpu(agi->agi_unlinked[i]);
630 if (agino == NULLAGINO)
631 continue;
632 if (!xfs_verify_agino(mp, agno, agino))
633 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
634 }
635
636 if (agi->agi_pad32 != cpu_to_be32(0))
637 xfs_scrub_block_set_corrupt(sc, sc->sa.agi_bp);
638
639out:
640 return error;
641}