2 * Copyright (C) 2017 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_fork.h"
34 #include "xfs_alloc.h"
36 #include "xfs_quota.h"
38 #include "xfs_dquot.h"
39 #include "xfs_dquot_item.h"
40 #include "scrub/xfs_scrub.h"
41 #include "scrub/scrub.h"
42 #include "scrub/common.h"
43 #include "scrub/trace.h"
45 /* Convert a scrub type code to a DQ flag, or return 0 if error. */
47 xfs_scrub_quota_to_dqtype(
48 struct xfs_scrub_context *sc)
50 switch (sc->sm->sm_type) {
51 case XFS_SCRUB_TYPE_UQUOTA:
53 case XFS_SCRUB_TYPE_GQUOTA:
55 case XFS_SCRUB_TYPE_PQUOTA:
62 /* Set us up to scrub a quota. */
64 xfs_scrub_setup_quota(
65 struct xfs_scrub_context *sc,
71 if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp))
74 dqtype = xfs_scrub_quota_to_dqtype(sc);
77 sc->has_quotaofflock = true;
78 mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock);
79 if (!xfs_this_quota_on(sc->mp, dqtype))
81 error = xfs_scrub_setup_fs(sc, ip);
84 sc->ip = xfs_quota_inode(sc->mp, dqtype);
85 xfs_ilock(sc->ip, XFS_ILOCK_EXCL);
86 sc->ilock_flags = XFS_ILOCK_EXCL;
92 struct xfs_scrub_quota_info {
93 struct xfs_scrub_context *sc;
97 /* Scrub the fields in an individual quota item. */
100 struct xfs_dquot *dq,
104 struct xfs_scrub_quota_info *sqi = priv;
105 struct xfs_scrub_context *sc = sqi->sc;
106 struct xfs_mount *mp = sc->mp;
107 struct xfs_disk_dquot *d = &dq->q_core;
108 struct xfs_quotainfo *qi = mp->m_quotainfo;
109 xfs_fileoff_t offset;
110 unsigned long long bsoft;
111 unsigned long long isoft;
112 unsigned long long rsoft;
113 unsigned long long bhard;
114 unsigned long long ihard;
115 unsigned long long rhard;
116 unsigned long long bcount;
117 unsigned long long icount;
118 unsigned long long rcount;
120 xfs_dqid_t id = be32_to_cpu(d->d_id);
123 * Except for the root dquot, the actual dquot we got must either have
124 * the same or higher id as we saw before.
126 offset = id / qi->qi_dqperchunk;
127 if (id && id <= sqi->last_id)
128 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
132 /* Did we get the dquot type we wanted? */
133 if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES))
134 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
136 if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0))
137 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
139 /* Check the limits. */
140 bhard = be64_to_cpu(d->d_blk_hardlimit);
141 ihard = be64_to_cpu(d->d_ino_hardlimit);
142 rhard = be64_to_cpu(d->d_rtb_hardlimit);
144 bsoft = be64_to_cpu(d->d_blk_softlimit);
145 isoft = be64_to_cpu(d->d_ino_softlimit);
146 rsoft = be64_to_cpu(d->d_rtb_softlimit);
149 * Warn if the hard limits are larger than the fs.
150 * Administrators can do this, though in production this seems
151 * suspect, which is why we flag it for review.
153 * Complain about corruption if the soft limit is greater than
156 if (bhard > mp->m_sb.sb_dblocks)
157 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
159 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
161 if (ihard > mp->m_maxicount)
162 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
164 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
166 if (rhard > mp->m_sb.sb_rblocks)
167 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
169 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
171 /* Check the resource counts. */
172 bcount = be64_to_cpu(d->d_bcount);
173 icount = be64_to_cpu(d->d_icount);
174 rcount = be64_to_cpu(d->d_rtbcount);
175 fs_icount = percpu_counter_sum(&mp->m_icount);
178 * Check that usage doesn't exceed physical limits. However, on
179 * a reflink filesystem we're allowed to exceed physical space
180 * if there are no quota limits.
182 if (xfs_sb_version_hasreflink(&mp->m_sb)) {
183 if (mp->m_sb.sb_dblocks < bcount)
184 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK,
187 if (mp->m_sb.sb_dblocks < bcount)
188 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
191 if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks)
192 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK, offset);
195 * We can violate the hard limits if the admin suddenly sets a
196 * lower limit than the actual usage. However, we flag it for
199 if (id != 0 && bhard != 0 && bcount > bhard)
200 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
201 if (id != 0 && ihard != 0 && icount > ihard)
202 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
203 if (id != 0 && rhard != 0 && rcount > rhard)
204 xfs_scrub_fblock_set_warning(sc, XFS_DATA_FORK, offset);
209 /* Check the quota's data fork. */
211 xfs_scrub_quota_data_fork(
212 struct xfs_scrub_context *sc)
214 struct xfs_bmbt_irec irec = { 0 };
215 struct xfs_iext_cursor icur;
216 struct xfs_quotainfo *qi = sc->mp->m_quotainfo;
217 struct xfs_ifork *ifp;
218 xfs_fileoff_t max_dqid_off;
221 /* Invoke the fork scrubber. */
222 error = xfs_scrub_metadata_inode_forks(sc);
223 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
226 /* Check for data fork problems that apply only to quota files. */
227 max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk;
228 ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK);
229 for_each_xfs_iext(ifp, &icur, &irec) {
230 if (xfs_scrub_should_terminate(sc, &error))
233 * delalloc extents or blocks mapped above the highest
234 * quota id shouldn't happen.
236 if (isnullstartblock(irec.br_startblock) ||
237 irec.br_startoff > max_dqid_off ||
238 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) {
239 xfs_scrub_fblock_set_corrupt(sc, XFS_DATA_FORK,
248 /* Scrub all of a quota type's items. */
251 struct xfs_scrub_context *sc)
253 struct xfs_scrub_quota_info sqi;
254 struct xfs_mount *mp = sc->mp;
255 struct xfs_quotainfo *qi = mp->m_quotainfo;
259 dqtype = xfs_scrub_quota_to_dqtype(sc);
261 /* Look for problem extents. */
262 error = xfs_scrub_quota_data_fork(sc);
265 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
269 * Check all the quota items. Now that we've checked the quota inode
270 * data fork we have to drop ILOCK_EXCL to use the regular dquot
273 xfs_iunlock(sc->ip, sc->ilock_flags);
277 error = xfs_qm_dqiterate(mp, dqtype, xfs_scrub_quota_item, &sqi);
278 sc->ilock_flags = XFS_ILOCK_EXCL;
279 xfs_ilock(sc->ip, sc->ilock_flags);
280 if (!xfs_scrub_fblock_process_error(sc, XFS_DATA_FORK,
281 sqi.last_id * qi->qi_dqperchunk, &error))