xfs: remove iter_flags parameter from xfs_inode_walk_*
[linux-block.git] / fs / xfs / xfs_icache.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
fe4fa4b8
DC
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
fe4fa4b8
DC
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
5467b34b 8#include "xfs_shared.h"
6ca1c906 9#include "xfs_format.h"
239880ef
DC
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
fe4fa4b8 12#include "xfs_sb.h"
fe4fa4b8 13#include "xfs_mount.h"
fe4fa4b8 14#include "xfs_inode.h"
239880ef
DC
15#include "xfs_trans.h"
16#include "xfs_trans_priv.h"
fe4fa4b8 17#include "xfs_inode_item.h"
7d095257 18#include "xfs_quota.h"
0b1b213f 19#include "xfs_trace.h"
6d8b79cf 20#include "xfs_icache.h"
c24b5dfa 21#include "xfs_bmap_util.h"
dc06f398
BF
22#include "xfs_dquot_item.h"
23#include "xfs_dquot.h"
83104d44 24#include "xfs_reflink.h"
bb8a66af 25#include "xfs_ialloc.h"
fe4fa4b8 26
f0e28280 27#include <linux/iversion.h>
a167b17e 28
c809d7e9
DW
29/* Radix tree tags for incore inode tree. */
30
31/* inode is to be reclaimed */
32#define XFS_ICI_RECLAIM_TAG 0
33/* Inode has speculative preallocations (posteof or cow) to clean. */
34#define XFS_ICI_BLOCKGC_TAG 1
35
36/*
37 * The goal for walking incore inodes. These can correspond with incore inode
38 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
39 */
40enum xfs_icwalk_goal {
41 /* Goals that are not related to tags; these must be < 0. */
42 XFS_ICWALK_DQRELE = -1,
43
44 /* Goals directly associated with tagged inodes. */
45 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
46};
47
48#define XFS_ICWALK_NULL_TAG (-1U)
49
50/* Compute the inode radix tree tag for this goal. */
51static inline unsigned int
52xfs_icwalk_tag(enum xfs_icwalk_goal goal)
53{
54 return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
55}
56
7fdff526 57static int xfs_icwalk(struct xfs_mount *mp,
df600197 58 int (*execute)(struct xfs_inode *ip, void *args),
c809d7e9 59 void *args, enum xfs_icwalk_goal goal);
7fdff526 60static int xfs_icwalk_ag(struct xfs_perag *pag,
df600197 61 int (*execute)(struct xfs_inode *ip, void *args),
c809d7e9 62 void *args, enum xfs_icwalk_goal goal);
df600197 63
1ad2cfe0
DW
64/*
65 * Private inode cache walk flags for struct xfs_eofblocks. Must not coincide
66 * with XFS_EOF_FLAGS_*.
67 */
68#define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31)
69#define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30)
70#define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29)
71
72#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \
73 XFS_ICWALK_FLAG_DROP_GDQUOT | \
74 XFS_ICWALK_FLAG_DROP_PDQUOT)
75
33479e05
DC
76/*
77 * Allocate and initialise an xfs_inode.
78 */
638f4416 79struct xfs_inode *
33479e05
DC
80xfs_inode_alloc(
81 struct xfs_mount *mp,
82 xfs_ino_t ino)
83{
84 struct xfs_inode *ip;
85
86 /*
3050bd0b
CM
87 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
88 * and return NULL here on ENOMEM.
33479e05 89 */
3050bd0b
CM
90 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
91
33479e05 92 if (inode_init_always(mp->m_super, VFS_I(ip))) {
377bcd5f 93 kmem_cache_free(xfs_inode_zone, ip);
33479e05
DC
94 return NULL;
95 }
96
c19b3b05
DC
97 /* VFS doesn't initialise i_mode! */
98 VFS_I(ip)->i_mode = 0;
99
ff6d6af2 100 XFS_STATS_INC(mp, vn_active);
33479e05 101 ASSERT(atomic_read(&ip->i_pincount) == 0);
33479e05
DC
102 ASSERT(ip->i_ino == 0);
103
33479e05
DC
104 /* initialise the xfs inode */
105 ip->i_ino = ino;
106 ip->i_mount = mp;
107 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
108 ip->i_afp = NULL;
3993baeb 109 ip->i_cowfp = NULL;
3ba738df 110 memset(&ip->i_df, 0, sizeof(ip->i_df));
33479e05
DC
111 ip->i_flags = 0;
112 ip->i_delayed_blks = 0;
3e09ab8f 113 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
6e73a545 114 ip->i_nblocks = 0;
7821ea30 115 ip->i_forkoff = 0;
6772c1f1
DW
116 ip->i_sick = 0;
117 ip->i_checked = 0;
cb357bf3
DW
118 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
119 INIT_LIST_HEAD(&ip->i_ioend_list);
120 spin_lock_init(&ip->i_ioend_lock);
33479e05
DC
121
122 return ip;
123}
124
125STATIC void
126xfs_inode_free_callback(
127 struct rcu_head *head)
128{
129 struct inode *inode = container_of(head, struct inode, i_rcu);
130 struct xfs_inode *ip = XFS_I(inode);
131
c19b3b05 132 switch (VFS_I(ip)->i_mode & S_IFMT) {
33479e05
DC
133 case S_IFREG:
134 case S_IFDIR:
135 case S_IFLNK:
ef838512 136 xfs_idestroy_fork(&ip->i_df);
33479e05
DC
137 break;
138 }
139
ef838512
CH
140 if (ip->i_afp) {
141 xfs_idestroy_fork(ip->i_afp);
142 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
143 }
144 if (ip->i_cowfp) {
145 xfs_idestroy_fork(ip->i_cowfp);
146 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
147 }
33479e05 148 if (ip->i_itemp) {
22525c17
DC
149 ASSERT(!test_bit(XFS_LI_IN_AIL,
150 &ip->i_itemp->ili_item.li_flags));
33479e05
DC
151 xfs_inode_item_destroy(ip);
152 ip->i_itemp = NULL;
153 }
154
377bcd5f 155 kmem_cache_free(xfs_inode_zone, ip);
1f2dcfe8
DC
156}
157
8a17d7dd
DC
158static void
159__xfs_inode_free(
160 struct xfs_inode *ip)
161{
162 /* asserts to verify all state is correct here */
163 ASSERT(atomic_read(&ip->i_pincount) == 0);
48d55e2a 164 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
8a17d7dd
DC
165 XFS_STATS_DEC(ip->i_mount, vn_active);
166
167 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
168}
169
1f2dcfe8
DC
170void
171xfs_inode_free(
172 struct xfs_inode *ip)
173{
718ecc50 174 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
98efe8af 175
33479e05
DC
176 /*
177 * Because we use RCU freeing we need to ensure the inode always
178 * appears to be reclaimed with an invalid inode number when in the
179 * free state. The ip->i_flags_lock provides the barrier against lookup
180 * races.
181 */
182 spin_lock(&ip->i_flags_lock);
183 ip->i_flags = XFS_IRECLAIM;
184 ip->i_ino = 0;
185 spin_unlock(&ip->i_flags_lock);
186
8a17d7dd 187 __xfs_inode_free(ip);
33479e05
DC
188}
189
ad438c40 190/*
02511a5a
DC
191 * Queue background inode reclaim work if there are reclaimable inodes and there
192 * isn't reclaim work already scheduled or in progress.
ad438c40
DC
193 */
194static void
195xfs_reclaim_work_queue(
196 struct xfs_mount *mp)
197{
198
199 rcu_read_lock();
200 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
201 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
202 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
203 }
204 rcu_read_unlock();
205}
206
ad438c40
DC
207static void
208xfs_perag_set_reclaim_tag(
209 struct xfs_perag *pag)
210{
211 struct xfs_mount *mp = pag->pag_mount;
212
95989c46 213 lockdep_assert_held(&pag->pag_ici_lock);
ad438c40
DC
214 if (pag->pag_ici_reclaimable++)
215 return;
216
217 /* propagate the reclaim tag up into the perag radix tree */
218 spin_lock(&mp->m_perag_lock);
219 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
220 XFS_ICI_RECLAIM_TAG);
221 spin_unlock(&mp->m_perag_lock);
222
223 /* schedule periodic background inode reclaim */
224 xfs_reclaim_work_queue(mp);
225
226 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
227}
228
229static void
230xfs_perag_clear_reclaim_tag(
231 struct xfs_perag *pag)
232{
233 struct xfs_mount *mp = pag->pag_mount;
234
95989c46 235 lockdep_assert_held(&pag->pag_ici_lock);
ad438c40
DC
236 if (--pag->pag_ici_reclaimable)
237 return;
238
239 /* clear the reclaim tag from the perag radix tree */
240 spin_lock(&mp->m_perag_lock);
241 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
242 XFS_ICI_RECLAIM_TAG);
243 spin_unlock(&mp->m_perag_lock);
244 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
245}
246
247
248/*
249 * We set the inode flag atomically with the radix tree tag.
250 * Once we get tag lookups on the radix tree, this inode flag
251 * can go away.
252 */
253void
254xfs_inode_set_reclaim_tag(
255 struct xfs_inode *ip)
256{
257 struct xfs_mount *mp = ip->i_mount;
258 struct xfs_perag *pag;
259
260 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
261 spin_lock(&pag->pag_ici_lock);
262 spin_lock(&ip->i_flags_lock);
263
264 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
265 XFS_ICI_RECLAIM_TAG);
266 xfs_perag_set_reclaim_tag(pag);
267 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
268
269 spin_unlock(&ip->i_flags_lock);
270 spin_unlock(&pag->pag_ici_lock);
271 xfs_perag_put(pag);
272}
273
274STATIC void
275xfs_inode_clear_reclaim_tag(
276 struct xfs_perag *pag,
277 xfs_ino_t ino)
278{
279 radix_tree_tag_clear(&pag->pag_ici_root,
280 XFS_INO_TO_AGINO(pag->pag_mount, ino),
281 XFS_ICI_RECLAIM_TAG);
282 xfs_perag_clear_reclaim_tag(pag);
283}
284
7fdff526 285static inline void
ae2c4ac2
BF
286xfs_inew_wait(
287 struct xfs_inode *ip)
288{
289 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
290 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
291
292 do {
21417136 293 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
ae2c4ac2
BF
294 if (!xfs_iflags_test(ip, XFS_INEW))
295 break;
296 schedule();
297 } while (true);
21417136 298 finish_wait(wq, &wait.wq_entry);
ae2c4ac2
BF
299}
300
50997470
DC
301/*
302 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
303 * part of the structure. This is made more complex by the fact we store
304 * information about the on-disk values in the VFS inode and so we can't just
83e06f21 305 * overwrite the values unconditionally. Hence we save the parameters we
50997470 306 * need to retain across reinitialisation, and rewrite them into the VFS inode
83e06f21 307 * after reinitialisation even if it fails.
50997470
DC
308 */
309static int
310xfs_reinit_inode(
311 struct xfs_mount *mp,
312 struct inode *inode)
313{
314 int error;
54d7b5c1 315 uint32_t nlink = inode->i_nlink;
9e9a2674 316 uint32_t generation = inode->i_generation;
f0e28280 317 uint64_t version = inode_peek_iversion(inode);
c19b3b05 318 umode_t mode = inode->i_mode;
acd1d715 319 dev_t dev = inode->i_rdev;
3d8f2821
CH
320 kuid_t uid = inode->i_uid;
321 kgid_t gid = inode->i_gid;
50997470
DC
322
323 error = inode_init_always(mp->m_super, inode);
324
54d7b5c1 325 set_nlink(inode, nlink);
9e9a2674 326 inode->i_generation = generation;
f0e28280 327 inode_set_iversion_queried(inode, version);
c19b3b05 328 inode->i_mode = mode;
acd1d715 329 inode->i_rdev = dev;
3d8f2821
CH
330 inode->i_uid = uid;
331 inode->i_gid = gid;
50997470
DC
332 return error;
333}
334
afca6c5b
DC
335/*
336 * If we are allocating a new inode, then check what was returned is
337 * actually a free, empty inode. If we are not allocating an inode,
338 * then check we didn't find a free inode.
339 *
340 * Returns:
341 * 0 if the inode free state matches the lookup context
342 * -ENOENT if the inode is free and we are not allocating
343 * -EFSCORRUPTED if there is any state mismatch at all
344 */
345static int
346xfs_iget_check_free_state(
347 struct xfs_inode *ip,
348 int flags)
349{
350 if (flags & XFS_IGET_CREATE) {
351 /* should be a free inode */
352 if (VFS_I(ip)->i_mode != 0) {
353 xfs_warn(ip->i_mount,
354"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
355 ip->i_ino, VFS_I(ip)->i_mode);
356 return -EFSCORRUPTED;
357 }
358
6e73a545 359 if (ip->i_nblocks != 0) {
afca6c5b
DC
360 xfs_warn(ip->i_mount,
361"Corruption detected! Free inode 0x%llx has blocks allocated!",
362 ip->i_ino);
363 return -EFSCORRUPTED;
364 }
365 return 0;
366 }
367
368 /* should be an allocated inode */
369 if (VFS_I(ip)->i_mode == 0)
370 return -ENOENT;
371
372 return 0;
373}
374
33479e05
DC
375/*
376 * Check the validity of the inode we just found it the cache
377 */
378static int
379xfs_iget_cache_hit(
380 struct xfs_perag *pag,
381 struct xfs_inode *ip,
382 xfs_ino_t ino,
383 int flags,
384 int lock_flags) __releases(RCU)
385{
386 struct inode *inode = VFS_I(ip);
387 struct xfs_mount *mp = ip->i_mount;
388 int error;
389
390 /*
391 * check for re-use of an inode within an RCU grace period due to the
392 * radix tree nodes not being updated yet. We monitor for this by
393 * setting the inode number to zero before freeing the inode structure.
394 * If the inode has been reallocated and set up, then the inode number
395 * will not match, so check for that, too.
396 */
397 spin_lock(&ip->i_flags_lock);
398 if (ip->i_ino != ino) {
399 trace_xfs_iget_skip(ip);
ff6d6af2 400 XFS_STATS_INC(mp, xs_ig_frecycle);
2451337d 401 error = -EAGAIN;
33479e05
DC
402 goto out_error;
403 }
404
405
406 /*
407 * If we are racing with another cache hit that is currently
408 * instantiating this inode or currently recycling it out of
409 * reclaimabe state, wait for the initialisation to complete
410 * before continuing.
411 *
412 * XXX(hch): eventually we should do something equivalent to
413 * wait_on_inode to wait for these flags to be cleared
414 * instead of polling for it.
415 */
416 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
417 trace_xfs_iget_skip(ip);
ff6d6af2 418 XFS_STATS_INC(mp, xs_ig_frecycle);
2451337d 419 error = -EAGAIN;
33479e05
DC
420 goto out_error;
421 }
422
423 /*
afca6c5b
DC
424 * Check the inode free state is valid. This also detects lookup
425 * racing with unlinks.
33479e05 426 */
afca6c5b
DC
427 error = xfs_iget_check_free_state(ip, flags);
428 if (error)
33479e05 429 goto out_error;
33479e05
DC
430
431 /*
432 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
433 * Need to carefully get it back into useable state.
434 */
435 if (ip->i_flags & XFS_IRECLAIMABLE) {
436 trace_xfs_iget_reclaim(ip);
437
378f681c
DW
438 if (flags & XFS_IGET_INCORE) {
439 error = -EAGAIN;
440 goto out_error;
441 }
442
33479e05
DC
443 /*
444 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
445 * from stomping over us while we recycle the inode. We can't
446 * clear the radix tree reclaimable tag yet as it requires
447 * pag_ici_lock to be held exclusive.
448 */
449 ip->i_flags |= XFS_IRECLAIM;
450
451 spin_unlock(&ip->i_flags_lock);
452 rcu_read_unlock();
453
d45344d6 454 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
50997470 455 error = xfs_reinit_inode(mp, inode);
33479e05 456 if (error) {
756baca2 457 bool wake;
33479e05
DC
458 /*
459 * Re-initializing the inode failed, and we are in deep
460 * trouble. Try to re-add it to the reclaim list.
461 */
462 rcu_read_lock();
463 spin_lock(&ip->i_flags_lock);
756baca2 464 wake = !!__xfs_iflags_test(ip, XFS_INEW);
33479e05 465 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
756baca2
BF
466 if (wake)
467 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
33479e05
DC
468 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
469 trace_xfs_iget_reclaim_fail(ip);
470 goto out_error;
471 }
472
473 spin_lock(&pag->pag_ici_lock);
474 spin_lock(&ip->i_flags_lock);
475
476 /*
477 * Clear the per-lifetime state in the inode as we are now
478 * effectively a new inode and need to return to the initial
479 * state before reuse occurs.
480 */
481 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
482 ip->i_flags |= XFS_INEW;
545c0889 483 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
33479e05 484 inode->i_state = I_NEW;
6772c1f1
DW
485 ip->i_sick = 0;
486 ip->i_checked = 0;
33479e05 487
33479e05
DC
488 spin_unlock(&ip->i_flags_lock);
489 spin_unlock(&pag->pag_ici_lock);
490 } else {
491 /* If the VFS inode is being torn down, pause and try again. */
492 if (!igrab(inode)) {
493 trace_xfs_iget_skip(ip);
2451337d 494 error = -EAGAIN;
33479e05
DC
495 goto out_error;
496 }
497
498 /* We've got a live one. */
499 spin_unlock(&ip->i_flags_lock);
500 rcu_read_unlock();
501 trace_xfs_iget_hit(ip);
502 }
503
504 if (lock_flags != 0)
505 xfs_ilock(ip, lock_flags);
506
378f681c 507 if (!(flags & XFS_IGET_INCORE))
dae2f8ed 508 xfs_iflags_clear(ip, XFS_ISTALE);
ff6d6af2 509 XFS_STATS_INC(mp, xs_ig_found);
33479e05
DC
510
511 return 0;
512
513out_error:
514 spin_unlock(&ip->i_flags_lock);
515 rcu_read_unlock();
516 return error;
517}
518
519
520static int
521xfs_iget_cache_miss(
522 struct xfs_mount *mp,
523 struct xfs_perag *pag,
524 xfs_trans_t *tp,
525 xfs_ino_t ino,
526 struct xfs_inode **ipp,
527 int flags,
528 int lock_flags)
529{
530 struct xfs_inode *ip;
531 int error;
532 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
533 int iflags;
534
535 ip = xfs_inode_alloc(mp, ino);
536 if (!ip)
2451337d 537 return -ENOMEM;
33479e05 538
bb8a66af 539 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
33479e05
DC
540 if (error)
541 goto out_destroy;
542
bb8a66af
CH
543 /*
544 * For version 5 superblocks, if we are initialising a new inode and we
545 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
546 * simply build the new inode core with a random generation number.
547 *
548 * For version 4 (and older) superblocks, log recovery is dependent on
965e0a1a 549 * the i_flushiter field being initialised from the current on-disk
bb8a66af
CH
550 * value and hence we must also read the inode off disk even when
551 * initializing new inodes.
552 */
553 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
554 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
555 VFS_I(ip)->i_generation = prandom_u32();
556 } else {
bb8a66af
CH
557 struct xfs_buf *bp;
558
af9dcdde 559 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
bb8a66af
CH
560 if (error)
561 goto out_destroy;
562
af9dcdde
CH
563 error = xfs_inode_from_disk(ip,
564 xfs_buf_offset(bp, ip->i_imap.im_boffset));
bb8a66af
CH
565 if (!error)
566 xfs_buf_set_ref(bp, XFS_INO_REF);
567 xfs_trans_brelse(tp, bp);
568
569 if (error)
570 goto out_destroy;
571 }
572
33479e05
DC
573 trace_xfs_iget_miss(ip);
574
ee457001 575 /*
afca6c5b
DC
576 * Check the inode free state is valid. This also detects lookup
577 * racing with unlinks.
ee457001 578 */
afca6c5b
DC
579 error = xfs_iget_check_free_state(ip, flags);
580 if (error)
33479e05 581 goto out_destroy;
33479e05
DC
582
583 /*
584 * Preload the radix tree so we can insert safely under the
585 * write spinlock. Note that we cannot sleep inside the preload
586 * region. Since we can be called from transaction context, don't
587 * recurse into the file system.
588 */
589 if (radix_tree_preload(GFP_NOFS)) {
2451337d 590 error = -EAGAIN;
33479e05
DC
591 goto out_destroy;
592 }
593
594 /*
595 * Because the inode hasn't been added to the radix-tree yet it can't
596 * be found by another thread, so we can do the non-sleeping lock here.
597 */
598 if (lock_flags) {
599 if (!xfs_ilock_nowait(ip, lock_flags))
600 BUG();
601 }
602
603 /*
604 * These values must be set before inserting the inode into the radix
605 * tree as the moment it is inserted a concurrent lookup (allowed by the
606 * RCU locking mechanism) can find it and that lookup must see that this
607 * is an inode currently under construction (i.e. that XFS_INEW is set).
608 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
609 * memory barrier that ensures this detection works correctly at lookup
610 * time.
611 */
612 iflags = XFS_INEW;
613 if (flags & XFS_IGET_DONTCACHE)
2c567af4 614 d_mark_dontcache(VFS_I(ip));
113a5683
CS
615 ip->i_udquot = NULL;
616 ip->i_gdquot = NULL;
92f8ff73 617 ip->i_pdquot = NULL;
33479e05
DC
618 xfs_iflags_set(ip, iflags);
619
620 /* insert the new inode */
621 spin_lock(&pag->pag_ici_lock);
622 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
623 if (unlikely(error)) {
624 WARN_ON(error != -EEXIST);
ff6d6af2 625 XFS_STATS_INC(mp, xs_ig_dup);
2451337d 626 error = -EAGAIN;
33479e05
DC
627 goto out_preload_end;
628 }
629 spin_unlock(&pag->pag_ici_lock);
630 radix_tree_preload_end();
631
632 *ipp = ip;
633 return 0;
634
635out_preload_end:
636 spin_unlock(&pag->pag_ici_lock);
637 radix_tree_preload_end();
638 if (lock_flags)
639 xfs_iunlock(ip, lock_flags);
640out_destroy:
641 __destroy_inode(VFS_I(ip));
642 xfs_inode_free(ip);
643 return error;
644}
645
646/*
02511a5a
DC
647 * Look up an inode by number in the given file system. The inode is looked up
648 * in the cache held in each AG. If the inode is found in the cache, initialise
649 * the vfs inode if necessary.
33479e05 650 *
02511a5a
DC
651 * If it is not in core, read it in from the file system's device, add it to the
652 * cache and initialise the vfs inode.
33479e05
DC
653 *
654 * The inode is locked according to the value of the lock_flags parameter.
02511a5a
DC
655 * Inode lookup is only done during metadata operations and not as part of the
656 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
33479e05
DC
657 */
658int
659xfs_iget(
02511a5a
DC
660 struct xfs_mount *mp,
661 struct xfs_trans *tp,
662 xfs_ino_t ino,
663 uint flags,
664 uint lock_flags,
665 struct xfs_inode **ipp)
33479e05 666{
02511a5a
DC
667 struct xfs_inode *ip;
668 struct xfs_perag *pag;
669 xfs_agino_t agino;
670 int error;
33479e05 671
33479e05
DC
672 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
673
674 /* reject inode numbers outside existing AGs */
675 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
2451337d 676 return -EINVAL;
33479e05 677
ff6d6af2 678 XFS_STATS_INC(mp, xs_ig_attempts);
8774cf8b 679
33479e05
DC
680 /* get the perag structure and ensure that it's inode capable */
681 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
682 agino = XFS_INO_TO_AGINO(mp, ino);
683
684again:
685 error = 0;
686 rcu_read_lock();
687 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
688
689 if (ip) {
690 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
691 if (error)
692 goto out_error_or_again;
693 } else {
694 rcu_read_unlock();
378f681c 695 if (flags & XFS_IGET_INCORE) {
ed438b47 696 error = -ENODATA;
378f681c
DW
697 goto out_error_or_again;
698 }
ff6d6af2 699 XFS_STATS_INC(mp, xs_ig_missed);
33479e05
DC
700
701 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
702 flags, lock_flags);
703 if (error)
704 goto out_error_or_again;
705 }
706 xfs_perag_put(pag);
707
708 *ipp = ip;
709
710 /*
58c90473 711 * If we have a real type for an on-disk inode, we can setup the inode
33479e05
DC
712 * now. If it's a new inode being created, xfs_ialloc will handle it.
713 */
c19b3b05 714 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
58c90473 715 xfs_setup_existing_inode(ip);
33479e05
DC
716 return 0;
717
718out_error_or_again:
378f681c 719 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
33479e05
DC
720 delay(1);
721 goto again;
722 }
723 xfs_perag_put(pag);
724 return error;
725}
726
378f681c
DW
727/*
728 * "Is this a cached inode that's also allocated?"
729 *
730 * Look up an inode by number in the given file system. If the inode is
731 * in cache and isn't in purgatory, return 1 if the inode is allocated
732 * and 0 if it is not. For all other cases (not in cache, being torn
733 * down, etc.), return a negative error code.
734 *
735 * The caller has to prevent inode allocation and freeing activity,
736 * presumably by locking the AGI buffer. This is to ensure that an
737 * inode cannot transition from allocated to freed until the caller is
738 * ready to allow that. If the inode is in an intermediate state (new,
739 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
740 * inode is not in the cache, -ENOENT will be returned. The caller must
741 * deal with these scenarios appropriately.
742 *
743 * This is a specialized use case for the online scrubber; if you're
744 * reading this, you probably want xfs_iget.
745 */
746int
747xfs_icache_inode_is_allocated(
748 struct xfs_mount *mp,
749 struct xfs_trans *tp,
750 xfs_ino_t ino,
751 bool *inuse)
752{
753 struct xfs_inode *ip;
754 int error;
755
756 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
757 if (error)
758 return error;
759
760 *inuse = !!(VFS_I(ip)->i_mode);
44a8736b 761 xfs_irele(ip);
378f681c
DW
762 return 0;
763}
764
78ae5256
DC
765/*
766 * The inode lookup is done in batches to keep the amount of lock traffic and
767 * radix tree lookups to a minimum. The batch size is a trade off between
768 * lookup reduction and stack usage. This is in the reclaim path, so we can't
769 * be too greedy.
df600197 770 *
c1115c0c 771 * XXX: This will be moved closer to xfs_icwalk* once we get rid of the
df600197 772 * separate reclaim walk functions.
78ae5256
DC
773 */
774#define XFS_LOOKUP_BATCH 32
775
1ad2cfe0 776#ifdef CONFIG_XFS_QUOTA
b9baaef4
DW
777/* Decide if we want to grab this inode to drop its dquots. */
778static bool
779xfs_dqrele_igrab(
780 struct xfs_inode *ip)
781{
782 bool ret = false;
783
784 ASSERT(rcu_read_lock_held());
785
786 /* Check for stale RCU freed inode */
787 spin_lock(&ip->i_flags_lock);
788 if (!ip->i_ino)
789 goto out_unlock;
790
791 /*
792 * Skip inodes that are anywhere in the reclaim machinery because we
793 * drop dquots before tagging an inode for reclamation.
794 */
795 if (ip->i_flags & (XFS_IRECLAIM | XFS_IRECLAIMABLE))
796 goto out_unlock;
797
798 /*
799 * The inode looks alive; try to grab a VFS reference so that it won't
800 * get destroyed. If we got the reference, return true to say that
801 * we grabbed the inode.
802 *
803 * If we can't get the reference, then we know the inode had its VFS
804 * state torn down and hasn't yet entered the reclaim machinery. Since
805 * we also know that dquots are detached from an inode before it enters
806 * reclaim, we can skip the inode.
807 */
808 ret = igrab(VFS_I(ip)) != NULL;
809
810out_unlock:
811 spin_unlock(&ip->i_flags_lock);
812 return ret;
813}
814
1ad2cfe0
DW
815/* Drop this inode's dquots. */
816static int
817xfs_dqrele_inode(
818 struct xfs_inode *ip,
819 void *priv)
820{
821 struct xfs_eofblocks *eofb = priv;
822
9d2793ce
DW
823 if (xfs_iflags_test(ip, XFS_INEW))
824 xfs_inew_wait(ip);
825
1ad2cfe0
DW
826 xfs_ilock(ip, XFS_ILOCK_EXCL);
827 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) {
828 xfs_qm_dqrele(ip->i_udquot);
829 ip->i_udquot = NULL;
830 }
831 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) {
832 xfs_qm_dqrele(ip->i_gdquot);
833 ip->i_gdquot = NULL;
834 }
835 if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) {
836 xfs_qm_dqrele(ip->i_pdquot);
837 ip->i_pdquot = NULL;
838 }
839 xfs_iunlock(ip, XFS_ILOCK_EXCL);
840 return 0;
841}
842
843/*
844 * Detach all dquots from incore inodes if we can. The caller must already
845 * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will
846 * not get reattached.
847 */
848int
849xfs_dqrele_all_inodes(
850 struct xfs_mount *mp,
851 unsigned int qflags)
852{
853 struct xfs_eofblocks eofb = { .eof_flags = 0 };
854
855 if (qflags & XFS_UQUOTA_ACCT)
856 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT;
857 if (qflags & XFS_GQUOTA_ACCT)
858 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT;
859 if (qflags & XFS_PQUOTA_ACCT)
860 eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT;
861
7fdff526 862 return xfs_icwalk(mp, xfs_dqrele_inode, &eofb, XFS_ICWALK_DQRELE);
5662d38c 863}
b9baaef4
DW
864#else
865# define xfs_dqrele_igrab(ip) (false)
1ad2cfe0 866#endif /* CONFIG_XFS_QUOTA */
5662d38c 867
e3a20c0b
DC
868/*
869 * Grab the inode for reclaim exclusively.
50718b8d
DC
870 *
871 * We have found this inode via a lookup under RCU, so the inode may have
872 * already been freed, or it may be in the process of being recycled by
873 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
874 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
875 * will not be set. Hence we need to check for both these flag conditions to
876 * avoid inodes that are no longer reclaim candidates.
877 *
878 * Note: checking for other state flags here, under the i_flags_lock or not, is
879 * racy and should be avoided. Those races should be resolved only after we have
880 * ensured that we are able to reclaim this inode and the world can see that we
881 * are going to reclaim it.
882 *
883 * Return true if we grabbed it, false otherwise.
e3a20c0b 884 */
50718b8d 885static bool
e3a20c0b 886xfs_reclaim_inode_grab(
50718b8d 887 struct xfs_inode *ip)
e3a20c0b 888{
1a3e8f3d
DC
889 ASSERT(rcu_read_lock_held());
890
e3a20c0b 891 spin_lock(&ip->i_flags_lock);
1a3e8f3d
DC
892 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
893 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
894 /* not a reclaim candidate. */
e3a20c0b 895 spin_unlock(&ip->i_flags_lock);
50718b8d 896 return false;
e3a20c0b
DC
897 }
898 __xfs_iflags_set(ip, XFS_IRECLAIM);
899 spin_unlock(&ip->i_flags_lock);
50718b8d 900 return true;
e3a20c0b
DC
901}
902
777df5af 903/*
02511a5a
DC
904 * Inode reclaim is non-blocking, so the default action if progress cannot be
905 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
906 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
907 * blocking anymore and hence we can wait for the inode to be able to reclaim
908 * it.
777df5af 909 *
02511a5a
DC
910 * We do no IO here - if callers require inodes to be cleaned they must push the
911 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
912 * done in the background in a non-blocking manner, and enables memory reclaim
913 * to make progress without blocking.
777df5af 914 */
4d0bab3a 915static void
c8e20be0 916xfs_reclaim_inode(
75f3cb13 917 struct xfs_inode *ip,
50718b8d 918 struct xfs_perag *pag)
fce08f2f 919{
8a17d7dd 920 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
777df5af 921
9552e14d 922 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
617825fe 923 goto out;
718ecc50 924 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
9552e14d 925 goto out_iunlock;
7a3be02b 926
777df5af
DC
927 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
928 xfs_iunpin_wait(ip);
88fc1879 929 xfs_iflush_abort(ip);
777df5af
DC
930 goto reclaim;
931 }
617825fe 932 if (xfs_ipincount(ip))
718ecc50 933 goto out_clear_flush;
617825fe 934 if (!xfs_inode_clean(ip))
718ecc50 935 goto out_clear_flush;
8a48088f 936
718ecc50 937 xfs_iflags_clear(ip, XFS_IFLUSHING);
777df5af 938reclaim:
98efe8af 939
8a17d7dd
DC
940 /*
941 * Because we use RCU freeing we need to ensure the inode always appears
942 * to be reclaimed with an invalid inode number when in the free state.
98efe8af 943 * We do this as early as possible under the ILOCK so that
f2e9ad21
OS
944 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
945 * detect races with us here. By doing this, we guarantee that once
946 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
947 * it will see either a valid inode that will serialise correctly, or it
948 * will see an invalid inode that it can skip.
8a17d7dd
DC
949 */
950 spin_lock(&ip->i_flags_lock);
951 ip->i_flags = XFS_IRECLAIM;
952 ip->i_ino = 0;
953 spin_unlock(&ip->i_flags_lock);
954
c8e20be0 955 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2f11feab 956
ff6d6af2 957 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
2f11feab
DC
958 /*
959 * Remove the inode from the per-AG radix tree.
960 *
961 * Because radix_tree_delete won't complain even if the item was never
962 * added to the tree assert that it's been there before to catch
963 * problems with the inode life time early on.
964 */
1a427ab0 965 spin_lock(&pag->pag_ici_lock);
2f11feab 966 if (!radix_tree_delete(&pag->pag_ici_root,
8a17d7dd 967 XFS_INO_TO_AGINO(ip->i_mount, ino)))
2f11feab 968 ASSERT(0);
545c0889 969 xfs_perag_clear_reclaim_tag(pag);
1a427ab0 970 spin_unlock(&pag->pag_ici_lock);
2f11feab
DC
971
972 /*
973 * Here we do an (almost) spurious inode lock in order to coordinate
974 * with inode cache radix tree lookups. This is because the lookup
975 * can reference the inodes in the cache without taking references.
976 *
977 * We make that OK here by ensuring that we wait until the inode is
ad637a10 978 * unlocked after the lookup before we go ahead and free it.
2f11feab 979 */
ad637a10 980 xfs_ilock(ip, XFS_ILOCK_EXCL);
3ea06d73 981 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
ad637a10 982 xfs_iunlock(ip, XFS_ILOCK_EXCL);
96355d5a 983 ASSERT(xfs_inode_clean(ip));
2f11feab 984
8a17d7dd 985 __xfs_inode_free(ip);
4d0bab3a 986 return;
8a48088f 987
718ecc50
DC
988out_clear_flush:
989 xfs_iflags_clear(ip, XFS_IFLUSHING);
9552e14d 990out_iunlock:
8a48088f 991 xfs_iunlock(ip, XFS_ILOCK_EXCL);
9552e14d 992out:
617825fe 993 xfs_iflags_clear(ip, XFS_IRECLAIM);
7a3be02b
DC
994}
995
65d0f205
DC
996/*
997 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
998 * corrupted, we still want to try to reclaim all the inodes. If we don't,
999 * then a shut down during filesystem unmount reclaim walk leak all the
1000 * unreclaimed inodes.
617825fe
DC
1001 *
1002 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1003 * so that callers that want to block until all dirty inodes are written back
1004 * and reclaimed can sanely loop.
65d0f205 1005 */
4d0bab3a 1006static void
65d0f205
DC
1007xfs_reclaim_inodes_ag(
1008 struct xfs_mount *mp,
65d0f205
DC
1009 int *nr_to_scan)
1010{
1011 struct xfs_perag *pag;
0e8e2c63 1012 xfs_agnumber_t ag = 0;
65d0f205 1013
65d0f205
DC
1014 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1015 unsigned long first_index = 0;
1016 int done = 0;
e3a20c0b 1017 int nr_found = 0;
65d0f205
DC
1018
1019 ag = pag->pag_agno + 1;
1020
0e8e2c63 1021 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
65d0f205 1022 do {
e3a20c0b
DC
1023 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1024 int i;
65d0f205 1025
1a3e8f3d 1026 rcu_read_lock();
e3a20c0b
DC
1027 nr_found = radix_tree_gang_lookup_tag(
1028 &pag->pag_ici_root,
1029 (void **)batch, first_index,
1030 XFS_LOOKUP_BATCH,
65d0f205
DC
1031 XFS_ICI_RECLAIM_TAG);
1032 if (!nr_found) {
b2232219 1033 done = 1;
1a3e8f3d 1034 rcu_read_unlock();
65d0f205
DC
1035 break;
1036 }
1037
1038 /*
e3a20c0b
DC
1039 * Grab the inodes before we drop the lock. if we found
1040 * nothing, nr == 0 and the loop will be skipped.
65d0f205 1041 */
e3a20c0b
DC
1042 for (i = 0; i < nr_found; i++) {
1043 struct xfs_inode *ip = batch[i];
1044
50718b8d 1045 if (done || !xfs_reclaim_inode_grab(ip))
e3a20c0b
DC
1046 batch[i] = NULL;
1047
1048 /*
1049 * Update the index for the next lookup. Catch
1050 * overflows into the next AG range which can
1051 * occur if we have inodes in the last block of
1052 * the AG and we are currently pointing to the
1053 * last inode.
1a3e8f3d
DC
1054 *
1055 * Because we may see inodes that are from the
1056 * wrong AG due to RCU freeing and
1057 * reallocation, only update the index if it
1058 * lies in this AG. It was a race that lead us
1059 * to see this inode, so another lookup from
1060 * the same index will not find it again.
e3a20c0b 1061 */
1a3e8f3d
DC
1062 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1063 pag->pag_agno)
1064 continue;
e3a20c0b
DC
1065 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1066 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1067 done = 1;
1068 }
65d0f205 1069
e3a20c0b 1070 /* unlock now we've grabbed the inodes. */
1a3e8f3d 1071 rcu_read_unlock();
e3a20c0b
DC
1072
1073 for (i = 0; i < nr_found; i++) {
4d0bab3a
DC
1074 if (batch[i])
1075 xfs_reclaim_inode(batch[i], pag);
e3a20c0b
DC
1076 }
1077
1078 *nr_to_scan -= XFS_LOOKUP_BATCH;
8daaa831 1079 cond_resched();
e3a20c0b 1080 } while (nr_found && !done && *nr_to_scan > 0);
65d0f205 1081
0e8e2c63
DC
1082 if (done)
1083 first_index = 0;
1084 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
65d0f205
DC
1085 xfs_perag_put(pag);
1086 }
65d0f205
DC
1087}
1088
4d0bab3a 1089void
7a3be02b 1090xfs_reclaim_inodes(
4d0bab3a 1091 struct xfs_mount *mp)
7a3be02b 1092{
65d0f205
DC
1093 int nr_to_scan = INT_MAX;
1094
4d0bab3a 1095 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
617825fe 1096 xfs_ail_push_all_sync(mp->m_ail);
4d0bab3a 1097 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
0f4ec0f1 1098 }
9bf729c0
DC
1099}
1100
1101/*
02511a5a
DC
1102 * The shrinker infrastructure determines how many inodes we should scan for
1103 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1104 * push the AIL here. We also want to proactively free up memory if we can to
1105 * minimise the amount of work memory reclaim has to do so we kick the
1106 * background reclaim if it isn't already scheduled.
9bf729c0 1107 */
0a234c6d 1108long
8daaa831
DC
1109xfs_reclaim_inodes_nr(
1110 struct xfs_mount *mp,
1111 int nr_to_scan)
9bf729c0 1112{
8daaa831 1113 /* kick background reclaimer and push the AIL */
5889608d 1114 xfs_reclaim_work_queue(mp);
8daaa831 1115 xfs_ail_push_all(mp->m_ail);
a7b339f1 1116
50718b8d 1117 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
617825fe 1118 return 0;
8daaa831 1119}
9bf729c0 1120
8daaa831
DC
1121/*
1122 * Return the number of reclaimable inodes in the filesystem for
1123 * the shrinker to determine how much to reclaim.
1124 */
1125int
1126xfs_reclaim_inodes_count(
1127 struct xfs_mount *mp)
1128{
1129 struct xfs_perag *pag;
1130 xfs_agnumber_t ag = 0;
1131 int reclaimable = 0;
9bf729c0 1132
65d0f205
DC
1133 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1134 ag = pag->pag_agno + 1;
70e60ce7
DC
1135 reclaimable += pag->pag_ici_reclaimable;
1136 xfs_perag_put(pag);
9bf729c0 1137 }
9bf729c0
DC
1138 return reclaimable;
1139}
1140
39b1cfd7 1141STATIC bool
3e3f9f58
BF
1142xfs_inode_match_id(
1143 struct xfs_inode *ip,
1144 struct xfs_eofblocks *eofb)
1145{
b9fe5052
DE
1146 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1147 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
39b1cfd7 1148 return false;
3e3f9f58 1149
b9fe5052
DE
1150 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1151 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
39b1cfd7 1152 return false;
1b556048 1153
b9fe5052 1154 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
ceaf603c 1155 ip->i_projid != eofb->eof_prid)
39b1cfd7 1156 return false;
1b556048 1157
39b1cfd7 1158 return true;
3e3f9f58
BF
1159}
1160
f4526397
BF
1161/*
1162 * A union-based inode filtering algorithm. Process the inode if any of the
1163 * criteria match. This is for global/internal scans only.
1164 */
39b1cfd7 1165STATIC bool
f4526397
BF
1166xfs_inode_match_id_union(
1167 struct xfs_inode *ip,
1168 struct xfs_eofblocks *eofb)
1169{
1170 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1171 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
39b1cfd7 1172 return true;
f4526397
BF
1173
1174 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1175 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
39b1cfd7 1176 return true;
f4526397
BF
1177
1178 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
ceaf603c 1179 ip->i_projid == eofb->eof_prid)
39b1cfd7 1180 return true;
f4526397 1181
39b1cfd7 1182 return false;
f4526397
BF
1183}
1184
a91bf992
DW
1185/*
1186 * Is this inode @ip eligible for eof/cow block reclamation, given some
1187 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1188 * if the predicate functions match.
1189 */
1190static bool
1191xfs_inode_matches_eofb(
1192 struct xfs_inode *ip,
1193 struct xfs_eofblocks *eofb)
1194{
39b1cfd7 1195 bool match;
a91bf992
DW
1196
1197 if (!eofb)
1198 return true;
1199
1200 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1201 match = xfs_inode_match_id_union(ip, eofb);
1202 else
1203 match = xfs_inode_match_id(ip, eofb);
1204 if (!match)
1205 return false;
1206
1207 /* skip the inode if the file size is too small */
1208 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1209 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1210 return false;
1211
1212 return true;
1213}
1214
4d0bab3a
DC
1215/*
1216 * This is a fast pass over the inode cache to try to get reclaim moving on as
1217 * many inodes as possible in a short period of time. It kicks itself every few
1218 * seconds, as well as being kicked by the inode cache shrinker when memory
02511a5a 1219 * goes low.
4d0bab3a
DC
1220 */
1221void
1222xfs_reclaim_worker(
1223 struct work_struct *work)
1224{
1225 struct xfs_mount *mp = container_of(to_delayed_work(work),
1226 struct xfs_mount, m_reclaim_work);
1227 int nr_to_scan = INT_MAX;
1228
1229 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1230 xfs_reclaim_work_queue(mp);
1231}
1232
41176a68
BF
1233STATIC int
1234xfs_inode_free_eofblocks(
1235 struct xfs_inode *ip,
0fa4a10a
DW
1236 void *args,
1237 unsigned int *lockflags)
41176a68 1238{
390600f8
DW
1239 struct xfs_eofblocks *eofb = args;
1240 bool wait;
390600f8
DW
1241
1242 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
5400da7d 1243
ce2d3bbe
DW
1244 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1245 return 0;
1246
41176a68
BF
1247 /*
1248 * If the mapping is dirty the operation can block and wait for some
1249 * time. Unless we are waiting, skip it.
1250 */
390600f8 1251 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
41176a68
BF
1252 return 0;
1253
a91bf992
DW
1254 if (!xfs_inode_matches_eofb(ip, eofb))
1255 return 0;
3e3f9f58 1256
a36b9261
BF
1257 /*
1258 * If the caller is waiting, return -EAGAIN to keep the background
1259 * scanner moving and revisit the inode in a subsequent pass.
1260 */
c3155097 1261 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
390600f8
DW
1262 if (wait)
1263 return -EAGAIN;
1264 return 0;
a36b9261 1265 }
0fa4a10a 1266 *lockflags |= XFS_IOLOCK_EXCL;
390600f8 1267
2b156ff8
DW
1268 if (xfs_can_free_eofblocks(ip, false))
1269 return xfs_free_eofblocks(ip);
1270
1271 /* inode could be preallocated or append-only */
1272 trace_xfs_inode_free_eofblocks_invalid(ip);
1273 xfs_inode_clear_eofblocks_tag(ip);
1274 return 0;
41176a68
BF
1275}
1276
f9296569 1277/*
9669f51d
DW
1278 * Background scanning to trim preallocated space. This is queued based on the
1279 * 'speculative_prealloc_lifetime' tunable (5m by default).
f9296569 1280 */
9669f51d
DW
1281static inline void
1282xfs_blockgc_queue(
894ecacf 1283 struct xfs_perag *pag)
f9296569
DW
1284{
1285 rcu_read_lock();
894ecacf 1286 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
3fef46fc 1287 queue_delayed_work(pag->pag_mount->m_gc_workqueue,
894ecacf 1288 &pag->pag_blockgc_work,
9669f51d 1289 msecs_to_jiffies(xfs_blockgc_secs * 1000));
f9296569
DW
1290 rcu_read_unlock();
1291}
1292
83104d44 1293static void
ce2d3bbe
DW
1294xfs_blockgc_set_iflag(
1295 struct xfs_inode *ip,
ce2d3bbe 1296 unsigned long iflag)
27b52867 1297{
ce2d3bbe
DW
1298 struct xfs_mount *mp = ip->i_mount;
1299 struct xfs_perag *pag;
1300 int tagged;
1301
1302 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1303
85a6e764
CH
1304 /*
1305 * Don't bother locking the AG and looking up in the radix trees
1306 * if we already know that we have the tag set.
1307 */
ce2d3bbe 1308 if (ip->i_flags & iflag)
85a6e764
CH
1309 return;
1310 spin_lock(&ip->i_flags_lock);
ce2d3bbe 1311 ip->i_flags |= iflag;
85a6e764
CH
1312 spin_unlock(&ip->i_flags_lock);
1313
27b52867
BF
1314 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1315 spin_lock(&pag->pag_ici_lock);
27b52867 1316
ce2d3bbe 1317 tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
27b52867 1318 radix_tree_tag_set(&pag->pag_ici_root,
ce2d3bbe
DW
1319 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1320 XFS_ICI_BLOCKGC_TAG);
27b52867 1321 if (!tagged) {
ce2d3bbe 1322 /* propagate the blockgc tag up into the perag radix tree */
27b52867
BF
1323 spin_lock(&ip->i_mount->m_perag_lock);
1324 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1325 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
ce2d3bbe 1326 XFS_ICI_BLOCKGC_TAG);
27b52867 1327 spin_unlock(&ip->i_mount->m_perag_lock);
579b62fa
BF
1328
1329 /* kick off background trimming */
894ecacf 1330 xfs_blockgc_queue(pag);
27b52867 1331
ce2d3bbe
DW
1332 trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
1333 _RET_IP_);
27b52867
BF
1334 }
1335
1336 spin_unlock(&pag->pag_ici_lock);
1337 xfs_perag_put(pag);
1338}
1339
1340void
83104d44 1341xfs_inode_set_eofblocks_tag(
27b52867 1342 xfs_inode_t *ip)
83104d44
DW
1343{
1344 trace_xfs_inode_set_eofblocks_tag(ip);
9669f51d 1345 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1346}
1347
1348static void
ce2d3bbe
DW
1349xfs_blockgc_clear_iflag(
1350 struct xfs_inode *ip,
1351 unsigned long iflag)
27b52867 1352{
ce2d3bbe
DW
1353 struct xfs_mount *mp = ip->i_mount;
1354 struct xfs_perag *pag;
1355 bool clear_tag;
1356
1357 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1358
85a6e764 1359 spin_lock(&ip->i_flags_lock);
ce2d3bbe
DW
1360 ip->i_flags &= ~iflag;
1361 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
85a6e764
CH
1362 spin_unlock(&ip->i_flags_lock);
1363
ce2d3bbe
DW
1364 if (!clear_tag)
1365 return;
1366
27b52867
BF
1367 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1368 spin_lock(&pag->pag_ici_lock);
27b52867
BF
1369
1370 radix_tree_tag_clear(&pag->pag_ici_root,
ce2d3bbe
DW
1371 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1372 XFS_ICI_BLOCKGC_TAG);
1373 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
1374 /* clear the blockgc tag from the perag radix tree */
27b52867
BF
1375 spin_lock(&ip->i_mount->m_perag_lock);
1376 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1377 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
ce2d3bbe 1378 XFS_ICI_BLOCKGC_TAG);
27b52867 1379 spin_unlock(&ip->i_mount->m_perag_lock);
ce2d3bbe
DW
1380 trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
1381 _RET_IP_);
27b52867
BF
1382 }
1383
1384 spin_unlock(&pag->pag_ici_lock);
1385 xfs_perag_put(pag);
1386}
1387
83104d44
DW
1388void
1389xfs_inode_clear_eofblocks_tag(
1390 xfs_inode_t *ip)
1391{
1392 trace_xfs_inode_clear_eofblocks_tag(ip);
ce2d3bbe 1393 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1394}
1395
1396/*
be78ff0e
DW
1397 * Set ourselves up to free CoW blocks from this file. If it's already clean
1398 * then we can bail out quickly, but otherwise we must back off if the file
1399 * is undergoing some kind of write.
83104d44 1400 */
be78ff0e
DW
1401static bool
1402xfs_prep_free_cowblocks(
51d62690 1403 struct xfs_inode *ip)
83104d44 1404{
39937234
BF
1405 /*
1406 * Just clear the tag if we have an empty cow fork or none at all. It's
1407 * possible the inode was fully unshared since it was originally tagged.
1408 */
51d62690 1409 if (!xfs_inode_has_cow_data(ip)) {
83104d44
DW
1410 trace_xfs_inode_free_cowblocks_invalid(ip);
1411 xfs_inode_clear_cowblocks_tag(ip);
be78ff0e 1412 return false;
83104d44
DW
1413 }
1414
1415 /*
1416 * If the mapping is dirty or under writeback we cannot touch the
1417 * CoW fork. Leave it alone if we're in the midst of a directio.
1418 */
a1b7a4de
CH
1419 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1420 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
83104d44
DW
1421 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1422 atomic_read(&VFS_I(ip)->i_dio_count))
be78ff0e
DW
1423 return false;
1424
1425 return true;
1426}
1427
1428/*
1429 * Automatic CoW Reservation Freeing
1430 *
1431 * These functions automatically garbage collect leftover CoW reservations
1432 * that were made on behalf of a cowextsize hint when we start to run out
1433 * of quota or when the reservations sit around for too long. If the file
1434 * has dirty pages or is undergoing writeback, its CoW reservations will
1435 * be retained.
1436 *
1437 * The actual garbage collection piggybacks off the same code that runs
1438 * the speculative EOF preallocation garbage collector.
1439 */
1440STATIC int
1441xfs_inode_free_cowblocks(
1442 struct xfs_inode *ip,
0fa4a10a
DW
1443 void *args,
1444 unsigned int *lockflags)
be78ff0e
DW
1445{
1446 struct xfs_eofblocks *eofb = args;
f41a0716 1447 bool wait;
be78ff0e
DW
1448 int ret = 0;
1449
f41a0716
DW
1450 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1451
ce2d3bbe
DW
1452 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1453 return 0;
1454
51d62690 1455 if (!xfs_prep_free_cowblocks(ip))
83104d44
DW
1456 return 0;
1457
a91bf992
DW
1458 if (!xfs_inode_matches_eofb(ip, eofb))
1459 return 0;
83104d44 1460
f41a0716
DW
1461 /*
1462 * If the caller is waiting, return -EAGAIN to keep the background
1463 * scanner moving and revisit the inode in a subsequent pass.
1464 */
0fa4a10a
DW
1465 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1466 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
f41a0716
DW
1467 if (wait)
1468 return -EAGAIN;
1469 return 0;
1470 }
0fa4a10a
DW
1471 *lockflags |= XFS_IOLOCK_EXCL;
1472
f41a0716
DW
1473 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1474 if (wait)
0fa4a10a
DW
1475 return -EAGAIN;
1476 return 0;
f41a0716 1477 }
0fa4a10a 1478 *lockflags |= XFS_MMAPLOCK_EXCL;
83104d44 1479
be78ff0e
DW
1480 /*
1481 * Check again, nobody else should be able to dirty blocks or change
1482 * the reflink iflag now that we have the first two locks held.
1483 */
51d62690 1484 if (xfs_prep_free_cowblocks(ip))
be78ff0e 1485 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
83104d44
DW
1486 return ret;
1487}
1488
83104d44
DW
1489void
1490xfs_inode_set_cowblocks_tag(
1491 xfs_inode_t *ip)
1492{
7b7381f0 1493 trace_xfs_inode_set_cowblocks_tag(ip);
9669f51d 1494 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
83104d44
DW
1495}
1496
1497void
1498xfs_inode_clear_cowblocks_tag(
1499 xfs_inode_t *ip)
1500{
7b7381f0 1501 trace_xfs_inode_clear_cowblocks_tag(ip);
ce2d3bbe 1502 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
83104d44 1503}
d6b636eb 1504
894ecacf
DW
1505#define for_each_perag_tag(mp, next_agno, pag, tag) \
1506 for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
1507 (pag) != NULL; \
1508 (next_agno) = (pag)->pag_agno + 1, \
1509 xfs_perag_put(pag), \
1510 (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
1511
1512
d6b636eb
DW
1513/* Disable post-EOF and CoW block auto-reclamation. */
1514void
c9a6526f 1515xfs_blockgc_stop(
d6b636eb
DW
1516 struct xfs_mount *mp)
1517{
894ecacf
DW
1518 struct xfs_perag *pag;
1519 xfs_agnumber_t agno;
1520
1521 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1522 cancel_delayed_work_sync(&pag->pag_blockgc_work);
d6b636eb
DW
1523}
1524
1525/* Enable post-EOF and CoW block auto-reclamation. */
1526void
c9a6526f 1527xfs_blockgc_start(
d6b636eb
DW
1528 struct xfs_mount *mp)
1529{
894ecacf
DW
1530 struct xfs_perag *pag;
1531 xfs_agnumber_t agno;
1532
1533 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1534 xfs_blockgc_queue(pag);
d6b636eb 1535}
3d4feec0 1536
df600197 1537/*
b9baaef4
DW
1538 * Decide if the given @ip is eligible for garbage collection of speculative
1539 * preallocations, and grab it if so. Returns true if it's ready to go or
1540 * false if we should just ignore it.
df600197
DW
1541 */
1542static bool
b9baaef4 1543xfs_blockgc_igrab(
7fdff526 1544 struct xfs_inode *ip)
df600197
DW
1545{
1546 struct inode *inode = VFS_I(ip);
df600197
DW
1547
1548 ASSERT(rcu_read_lock_held());
1549
1550 /* Check for stale RCU freed inode */
1551 spin_lock(&ip->i_flags_lock);
1552 if (!ip->i_ino)
1553 goto out_unlock_noent;
1554
1555 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
7fdff526 1556 if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
df600197
DW
1557 goto out_unlock_noent;
1558 spin_unlock(&ip->i_flags_lock);
1559
1560 /* nothing to sync during shutdown */
1561 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1562 return false;
1563
1564 /* If we can't grab the inode, it must on it's way to reclaim. */
1565 if (!igrab(inode))
1566 return false;
1567
1568 /* inode is valid */
1569 return true;
1570
1571out_unlock_noent:
1572 spin_unlock(&ip->i_flags_lock);
1573 return false;
1574}
1575
41956753
DW
1576/* Scan one incore inode for block preallocations that we can remove. */
1577static int
1578xfs_blockgc_scan_inode(
1579 struct xfs_inode *ip,
1580 void *args)
85c5b270 1581{
0fa4a10a 1582 unsigned int lockflags = 0;
85c5b270
DW
1583 int error;
1584
0fa4a10a 1585 error = xfs_inode_free_eofblocks(ip, args, &lockflags);
85c5b270 1586 if (error)
0fa4a10a 1587 goto unlock;
85c5b270 1588
0fa4a10a
DW
1589 error = xfs_inode_free_cowblocks(ip, args, &lockflags);
1590unlock:
1591 if (lockflags)
1592 xfs_iunlock(ip, lockflags);
1593 return error;
85c5b270
DW
1594}
1595
9669f51d
DW
1596/* Background worker that trims preallocated space. */
1597void
1598xfs_blockgc_worker(
1599 struct work_struct *work)
1600{
894ecacf
DW
1601 struct xfs_perag *pag = container_of(to_delayed_work(work),
1602 struct xfs_perag, pag_blockgc_work);
1603 struct xfs_mount *mp = pag->pag_mount;
9669f51d
DW
1604 int error;
1605
1606 if (!sb_start_write_trylock(mp->m_super))
1607 return;
7fdff526 1608 error = xfs_icwalk_ag(pag, xfs_blockgc_scan_inode, NULL,
c809d7e9 1609 XFS_ICWALK_BLOCKGC);
9669f51d 1610 if (error)
894ecacf
DW
1611 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1612 pag->pag_agno, error);
9669f51d 1613 sb_end_write(mp->m_super);
894ecacf 1614 xfs_blockgc_queue(pag);
9669f51d
DW
1615}
1616
85c5b270
DW
1617/*
1618 * Try to free space in the filesystem by purging eofblocks and cowblocks.
1619 */
1620int
1621xfs_blockgc_free_space(
1622 struct xfs_mount *mp,
1623 struct xfs_eofblocks *eofb)
1624{
1625 trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
1626
7fdff526 1627 return xfs_icwalk(mp, xfs_blockgc_scan_inode, eofb,
c809d7e9 1628 XFS_ICWALK_BLOCKGC);
85c5b270
DW
1629}
1630
3d4feec0 1631/*
c237dd7c
DW
1632 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1633 * quota caused an allocation failure, so we make a best effort by including
1634 * each quota under low free space conditions (less than 1% free space) in the
1635 * scan.
111068f8
DW
1636 *
1637 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1638 * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
1639 * MMAPLOCK.
3d4feec0 1640 */
111068f8 1641int
c237dd7c
DW
1642xfs_blockgc_free_dquots(
1643 struct xfs_mount *mp,
1644 struct xfs_dquot *udqp,
1645 struct xfs_dquot *gdqp,
1646 struct xfs_dquot *pdqp,
111068f8 1647 unsigned int eof_flags)
3d4feec0
DW
1648{
1649 struct xfs_eofblocks eofb = {0};
3d4feec0
DW
1650 bool do_work = false;
1651
c237dd7c
DW
1652 if (!udqp && !gdqp && !pdqp)
1653 return 0;
1654
3d4feec0 1655 /*
111068f8
DW
1656 * Run a scan to free blocks using the union filter to cover all
1657 * applicable quotas in a single scan.
3d4feec0 1658 */
111068f8 1659 eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
3d4feec0 1660
c237dd7c
DW
1661 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1662 eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1663 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1664 do_work = true;
3d4feec0
DW
1665 }
1666
c237dd7c
DW
1667 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1668 eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1669 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1670 do_work = true;
3d4feec0
DW
1671 }
1672
c237dd7c
DW
1673 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1674 eofb.eof_prid = pdqp->q_id;
1675 eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
1676 do_work = true;
3d4feec0
DW
1677 }
1678
1679 if (!do_work)
111068f8 1680 return 0;
3d4feec0 1681
85c5b270 1682 return xfs_blockgc_free_space(mp, &eofb);
c237dd7c
DW
1683}
1684
1685/* Run cow/eofblocks scans on the quotas attached to the inode. */
1686int
1687xfs_blockgc_free_quota(
1688 struct xfs_inode *ip,
1689 unsigned int eof_flags)
1690{
1691 return xfs_blockgc_free_dquots(ip->i_mount,
1692 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1693 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1694 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);
3d4feec0 1695}
df600197
DW
1696
1697/* XFS Inode Cache Walking Code */
1698
b9baaef4
DW
1699/*
1700 * Decide if we want to grab this inode in anticipation of doing work towards
1701 * the goal. If selected, the VFS must hold a reference to this inode, which
1702 * will be released after processing.
1703 */
1704static inline bool
1705xfs_icwalk_igrab(
1706 enum xfs_icwalk_goal goal,
7fdff526 1707 struct xfs_inode *ip)
b9baaef4
DW
1708{
1709 switch (goal) {
1710 case XFS_ICWALK_DQRELE:
1711 return xfs_dqrele_igrab(ip);
1712 case XFS_ICWALK_BLOCKGC:
7fdff526 1713 return xfs_blockgc_igrab(ip);
b9baaef4
DW
1714 default:
1715 return false;
1716 }
1717}
1718
df600197
DW
1719/*
1720 * For a given per-AG structure @pag, grab, @execute, and rele all incore
1721 * inodes with the given radix tree @tag.
1722 */
1723static int
c1115c0c 1724xfs_icwalk_ag(
df600197 1725 struct xfs_perag *pag,
df600197
DW
1726 int (*execute)(struct xfs_inode *ip, void *args),
1727 void *args,
c809d7e9 1728 enum xfs_icwalk_goal goal)
df600197
DW
1729{
1730 struct xfs_mount *mp = pag->pag_mount;
1731 uint32_t first_index;
1732 int last_error = 0;
1733 int skipped;
1734 bool done;
1735 int nr_found;
1736
1737restart:
1738 done = false;
1739 skipped = 0;
1740 first_index = 0;
1741 nr_found = 0;
1742 do {
1743 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
c809d7e9 1744 unsigned int tag = xfs_icwalk_tag(goal);
df600197
DW
1745 int error = 0;
1746 int i;
1747
1748 rcu_read_lock();
1749
c809d7e9 1750 if (tag == XFS_ICWALK_NULL_TAG)
df600197
DW
1751 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
1752 (void **)batch, first_index,
1753 XFS_LOOKUP_BATCH);
1754 else
1755 nr_found = radix_tree_gang_lookup_tag(
1756 &pag->pag_ici_root,
1757 (void **) batch, first_index,
1758 XFS_LOOKUP_BATCH, tag);
1759
1760 if (!nr_found) {
1761 rcu_read_unlock();
1762 break;
1763 }
1764
1765 /*
1766 * Grab the inodes before we drop the lock. if we found
1767 * nothing, nr == 0 and the loop will be skipped.
1768 */
1769 for (i = 0; i < nr_found; i++) {
1770 struct xfs_inode *ip = batch[i];
1771
7fdff526 1772 if (done || !xfs_icwalk_igrab(goal, ip))
df600197
DW
1773 batch[i] = NULL;
1774
1775 /*
1776 * Update the index for the next lookup. Catch
1777 * overflows into the next AG range which can occur if
1778 * we have inodes in the last block of the AG and we
1779 * are currently pointing to the last inode.
1780 *
1781 * Because we may see inodes that are from the wrong AG
1782 * due to RCU freeing and reallocation, only update the
1783 * index if it lies in this AG. It was a race that lead
1784 * us to see this inode, so another lookup from the
1785 * same index will not find it again.
1786 */
1787 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1788 continue;
1789 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1790 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1791 done = true;
1792 }
1793
1794 /* unlock now we've grabbed the inodes. */
1795 rcu_read_unlock();
1796
1797 for (i = 0; i < nr_found; i++) {
1798 if (!batch[i])
1799 continue;
df600197
DW
1800 error = execute(batch[i], args);
1801 xfs_irele(batch[i]);
1802 if (error == -EAGAIN) {
1803 skipped++;
1804 continue;
1805 }
1806 if (error && last_error != -EFSCORRUPTED)
1807 last_error = error;
1808 }
1809
1810 /* bail out if the filesystem is corrupted. */
1811 if (error == -EFSCORRUPTED)
1812 break;
1813
1814 cond_resched();
1815
1816 } while (nr_found && !done);
1817
1818 if (skipped) {
1819 delay(1);
1820 goto restart;
1821 }
1822 return last_error;
1823}
1824
1825/* Fetch the next (possibly tagged) per-AG structure. */
1826static inline struct xfs_perag *
c1115c0c 1827xfs_icwalk_get_perag(
df600197
DW
1828 struct xfs_mount *mp,
1829 xfs_agnumber_t agno,
c809d7e9 1830 enum xfs_icwalk_goal goal)
df600197 1831{
c809d7e9
DW
1832 unsigned int tag = xfs_icwalk_tag(goal);
1833
1834 if (tag == XFS_ICWALK_NULL_TAG)
df600197
DW
1835 return xfs_perag_get(mp, agno);
1836 return xfs_perag_get_tag(mp, agno, tag);
1837}
1838
1839/*
1840 * Call the @execute function on all incore inodes matching the radix tree
1841 * @tag.
1842 */
1843static int
c1115c0c 1844xfs_icwalk(
df600197 1845 struct xfs_mount *mp,
df600197
DW
1846 int (*execute)(struct xfs_inode *ip, void *args),
1847 void *args,
c809d7e9 1848 enum xfs_icwalk_goal goal)
df600197
DW
1849{
1850 struct xfs_perag *pag;
1851 int error = 0;
1852 int last_error = 0;
1853 xfs_agnumber_t agno = 0;
1854
c809d7e9 1855 while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
df600197 1856 agno = pag->pag_agno + 1;
7fdff526 1857 error = xfs_icwalk_ag(pag, execute, args, goal);
df600197
DW
1858 xfs_perag_put(pag);
1859 if (error) {
1860 last_error = error;
1861 if (error == -EFSCORRUPTED)
1862 break;
1863 }
1864 }
1865 return last_error;
1866 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID);
1867}