1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_inode_item.h"
17 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_dquot_item.h"
22 #include "xfs_dquot.h"
23 #include "xfs_reflink.h"
24 #include "xfs_ialloc.h"
27 #include <linux/iversion.h>
29 /* Radix tree tags for incore inode tree. */
31 /* inode is to be reclaimed */
32 #define XFS_ICI_RECLAIM_TAG 0
33 /* Inode has speculative preallocations (posteof or cow) to clean. */
34 #define XFS_ICI_BLOCKGC_TAG 1
37 * The goal for walking incore inodes. These can correspond with incore inode
38 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 enum xfs_icwalk_goal {
41 /* Goals directly associated with tagged inodes. */
42 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
43 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
46 #define XFS_ICWALK_NULL_TAG (-1U)
48 /* Compute the inode radix tree tag for this goal. */
49 static inline unsigned int
50 xfs_icwalk_tag(enum xfs_icwalk_goal goal)
52 return goal < 0 ? XFS_ICWALK_NULL_TAG : goal;
55 static int xfs_icwalk(struct xfs_mount *mp,
56 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
57 static int xfs_icwalk_ag(struct xfs_perag *pag,
58 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
61 * Private inode cache walk flags for struct xfs_icwalk. Must not
62 * coincide with XFS_ICWALK_FLAGS_VALID.
65 /* Stop scanning after icw_scan_limit inodes. */
66 #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
68 #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
69 #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
71 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
72 XFS_ICWALK_FLAG_RECLAIM_SICK | \
73 XFS_ICWALK_FLAG_UNION)
76 * Allocate and initialise an xfs_inode.
86 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
87 * and return NULL here on ENOMEM.
89 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
91 if (inode_init_always(mp->m_super, VFS_I(ip))) {
92 kmem_cache_free(xfs_inode_zone, ip);
96 /* VFS doesn't initialise i_mode! */
97 VFS_I(ip)->i_mode = 0;
99 XFS_STATS_INC(mp, vn_active);
100 ASSERT(atomic_read(&ip->i_pincount) == 0);
101 ASSERT(ip->i_ino == 0);
103 /* initialise the xfs inode */
106 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
109 memset(&ip->i_df, 0, sizeof(ip->i_df));
111 ip->i_delayed_blks = 0;
112 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
117 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
118 INIT_LIST_HEAD(&ip->i_ioend_list);
119 spin_lock_init(&ip->i_ioend_lock);
125 xfs_inode_free_callback(
126 struct rcu_head *head)
128 struct inode *inode = container_of(head, struct inode, i_rcu);
129 struct xfs_inode *ip = XFS_I(inode);
131 switch (VFS_I(ip)->i_mode & S_IFMT) {
135 xfs_idestroy_fork(&ip->i_df);
140 xfs_idestroy_fork(ip->i_afp);
141 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
144 xfs_idestroy_fork(ip->i_cowfp);
145 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
148 ASSERT(!test_bit(XFS_LI_IN_AIL,
149 &ip->i_itemp->ili_item.li_flags));
150 xfs_inode_item_destroy(ip);
154 kmem_cache_free(xfs_inode_zone, ip);
159 struct xfs_inode *ip)
161 /* asserts to verify all state is correct here */
162 ASSERT(atomic_read(&ip->i_pincount) == 0);
163 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
164 XFS_STATS_DEC(ip->i_mount, vn_active);
166 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
171 struct xfs_inode *ip)
173 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
176 * Because we use RCU freeing we need to ensure the inode always
177 * appears to be reclaimed with an invalid inode number when in the
178 * free state. The ip->i_flags_lock provides the barrier against lookup
181 spin_lock(&ip->i_flags_lock);
182 ip->i_flags = XFS_IRECLAIM;
184 spin_unlock(&ip->i_flags_lock);
186 __xfs_inode_free(ip);
190 * Queue background inode reclaim work if there are reclaimable inodes and there
191 * isn't reclaim work already scheduled or in progress.
194 xfs_reclaim_work_queue(
195 struct xfs_mount *mp)
199 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
200 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
201 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
207 * Background scanning to trim preallocated space. This is queued based on the
208 * 'speculative_prealloc_lifetime' tunable (5m by default).
212 struct xfs_perag *pag)
214 struct xfs_mount *mp = pag->pag_mount;
216 if (!xfs_is_blockgc_enabled(mp))
220 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
221 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
222 &pag->pag_blockgc_work,
223 msecs_to_jiffies(xfs_blockgc_secs * 1000));
227 /* Set a tag on both the AG incore inode tree and the AG radix tree. */
229 xfs_perag_set_inode_tag(
230 struct xfs_perag *pag,
234 struct xfs_mount *mp = pag->pag_mount;
237 lockdep_assert_held(&pag->pag_ici_lock);
239 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
240 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
242 if (tag == XFS_ICI_RECLAIM_TAG)
243 pag->pag_ici_reclaimable++;
248 /* propagate the tag up into the perag radix tree */
249 spin_lock(&mp->m_perag_lock);
250 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
251 spin_unlock(&mp->m_perag_lock);
253 /* start background work */
255 case XFS_ICI_RECLAIM_TAG:
256 xfs_reclaim_work_queue(mp);
258 case XFS_ICI_BLOCKGC_TAG:
259 xfs_blockgc_queue(pag);
263 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
266 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
268 xfs_perag_clear_inode_tag(
269 struct xfs_perag *pag,
273 struct xfs_mount *mp = pag->pag_mount;
275 lockdep_assert_held(&pag->pag_ici_lock);
278 * Reclaim can signal (with a null agino) that it cleared its own tag
279 * by removing the inode from the radix tree.
281 if (agino != NULLAGINO)
282 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
284 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
286 if (tag == XFS_ICI_RECLAIM_TAG)
287 pag->pag_ici_reclaimable--;
289 if (radix_tree_tagged(&pag->pag_ici_root, tag))
292 /* clear the tag from the perag radix tree */
293 spin_lock(&mp->m_perag_lock);
294 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
295 spin_unlock(&mp->m_perag_lock);
297 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
302 struct xfs_inode *ip)
304 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
305 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
308 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
309 if (!xfs_iflags_test(ip, XFS_INEW))
313 finish_wait(wq, &wait.wq_entry);
317 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
318 * part of the structure. This is made more complex by the fact we store
319 * information about the on-disk values in the VFS inode and so we can't just
320 * overwrite the values unconditionally. Hence we save the parameters we
321 * need to retain across reinitialisation, and rewrite them into the VFS inode
322 * after reinitialisation even if it fails.
326 struct xfs_mount *mp,
330 uint32_t nlink = inode->i_nlink;
331 uint32_t generation = inode->i_generation;
332 uint64_t version = inode_peek_iversion(inode);
333 umode_t mode = inode->i_mode;
334 dev_t dev = inode->i_rdev;
335 kuid_t uid = inode->i_uid;
336 kgid_t gid = inode->i_gid;
338 error = inode_init_always(mp->m_super, inode);
340 set_nlink(inode, nlink);
341 inode->i_generation = generation;
342 inode_set_iversion_queried(inode, version);
343 inode->i_mode = mode;
351 * Carefully nudge an inode whose VFS state has been torn down back into a
352 * usable state. Drops the i_flags_lock and the rcu read lock.
356 struct xfs_perag *pag,
357 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
359 struct xfs_mount *mp = ip->i_mount;
360 struct inode *inode = VFS_I(ip);
363 trace_xfs_iget_recycle(ip);
366 * We need to make it look like the inode is being reclaimed to prevent
367 * the actual reclaim workers from stomping over us while we recycle
368 * the inode. We can't clear the radix tree tag yet as it requires
369 * pag_ici_lock to be held exclusive.
371 ip->i_flags |= XFS_IRECLAIM;
373 spin_unlock(&ip->i_flags_lock);
376 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
377 error = xfs_reinit_inode(mp, inode);
382 * Re-initializing the inode failed, and we are in deep
383 * trouble. Try to re-add it to the reclaim list.
386 spin_lock(&ip->i_flags_lock);
387 wake = !!__xfs_iflags_test(ip, XFS_INEW);
388 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
390 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
391 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
392 spin_unlock(&ip->i_flags_lock);
395 trace_xfs_iget_recycle_fail(ip);
399 spin_lock(&pag->pag_ici_lock);
400 spin_lock(&ip->i_flags_lock);
403 * Clear the per-lifetime state in the inode as we are now effectively
404 * a new inode and need to return to the initial state before reuse
407 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
408 ip->i_flags |= XFS_INEW;
409 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
410 XFS_ICI_RECLAIM_TAG);
411 inode->i_state = I_NEW;
412 spin_unlock(&ip->i_flags_lock);
413 spin_unlock(&pag->pag_ici_lock);
419 * If we are allocating a new inode, then check what was returned is
420 * actually a free, empty inode. If we are not allocating an inode,
421 * then check we didn't find a free inode.
424 * 0 if the inode free state matches the lookup context
425 * -ENOENT if the inode is free and we are not allocating
426 * -EFSCORRUPTED if there is any state mismatch at all
429 xfs_iget_check_free_state(
430 struct xfs_inode *ip,
433 if (flags & XFS_IGET_CREATE) {
434 /* should be a free inode */
435 if (VFS_I(ip)->i_mode != 0) {
436 xfs_warn(ip->i_mount,
437 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
438 ip->i_ino, VFS_I(ip)->i_mode);
439 return -EFSCORRUPTED;
442 if (ip->i_nblocks != 0) {
443 xfs_warn(ip->i_mount,
444 "Corruption detected! Free inode 0x%llx has blocks allocated!",
446 return -EFSCORRUPTED;
451 /* should be an allocated inode */
452 if (VFS_I(ip)->i_mode == 0)
458 /* Make all pending inactivation work start immediately. */
460 xfs_inodegc_queue_all(
461 struct xfs_mount *mp)
463 struct xfs_inodegc *gc;
466 for_each_online_cpu(cpu) {
467 gc = per_cpu_ptr(mp->m_inodegc, cpu);
468 if (!llist_empty(&gc->list))
469 queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
474 * Check the validity of the inode we just found it the cache
478 struct xfs_perag *pag,
479 struct xfs_inode *ip,
482 int lock_flags) __releases(RCU)
484 struct inode *inode = VFS_I(ip);
485 struct xfs_mount *mp = ip->i_mount;
489 * check for re-use of an inode within an RCU grace period due to the
490 * radix tree nodes not being updated yet. We monitor for this by
491 * setting the inode number to zero before freeing the inode structure.
492 * If the inode has been reallocated and set up, then the inode number
493 * will not match, so check for that, too.
495 spin_lock(&ip->i_flags_lock);
496 if (ip->i_ino != ino)
500 * If we are racing with another cache hit that is currently
501 * instantiating this inode or currently recycling it out of
502 * reclaimable state, wait for the initialisation to complete
505 * If we're racing with the inactivation worker we also want to wait.
506 * If we're creating a new file, it's possible that the worker
507 * previously marked the inode as free on disk but hasn't finished
508 * updating the incore state yet. The AGI buffer will be dirty and
509 * locked to the icreate transaction, so a synchronous push of the
510 * inodegc workers would result in deadlock. For a regular iget, the
511 * worker is running already, so we might as well wait.
513 * XXX(hch): eventually we should do something equivalent to
514 * wait_on_inode to wait for these flags to be cleared
515 * instead of polling for it.
517 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
520 if (ip->i_flags & XFS_NEED_INACTIVE) {
521 /* Unlinked inodes cannot be re-grabbed. */
522 if (VFS_I(ip)->i_nlink == 0) {
526 goto out_inodegc_flush;
530 * Check the inode free state is valid. This also detects lookup
531 * racing with unlinks.
533 error = xfs_iget_check_free_state(ip, flags);
537 /* Skip inodes that have no vfs state. */
538 if ((flags & XFS_IGET_INCORE) &&
539 (ip->i_flags & XFS_IRECLAIMABLE))
542 /* The inode fits the selection criteria; process it. */
543 if (ip->i_flags & XFS_IRECLAIMABLE) {
544 /* Drops i_flags_lock and RCU read lock. */
545 error = xfs_iget_recycle(pag, ip);
549 /* If the VFS inode is being torn down, pause and try again. */
553 /* We've got a live one. */
554 spin_unlock(&ip->i_flags_lock);
556 trace_xfs_iget_hit(ip);
560 xfs_ilock(ip, lock_flags);
562 if (!(flags & XFS_IGET_INCORE))
563 xfs_iflags_clear(ip, XFS_ISTALE);
564 XFS_STATS_INC(mp, xs_ig_found);
569 trace_xfs_iget_skip(ip);
570 XFS_STATS_INC(mp, xs_ig_frecycle);
573 spin_unlock(&ip->i_flags_lock);
578 spin_unlock(&ip->i_flags_lock);
581 * Do not wait for the workers, because the caller could hold an AGI
582 * buffer lock. We're just going to sleep in a loop anyway.
584 if (xfs_is_inodegc_enabled(mp))
585 xfs_inodegc_queue_all(mp);
591 struct xfs_mount *mp,
592 struct xfs_perag *pag,
595 struct xfs_inode **ipp,
599 struct xfs_inode *ip;
601 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
604 ip = xfs_inode_alloc(mp, ino);
608 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
613 * For version 5 superblocks, if we are initialising a new inode and we
614 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
615 * simply build the new inode core with a random generation number.
617 * For version 4 (and older) superblocks, log recovery is dependent on
618 * the i_flushiter field being initialised from the current on-disk
619 * value and hence we must also read the inode off disk even when
620 * initializing new inodes.
622 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
623 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
624 VFS_I(ip)->i_generation = prandom_u32();
628 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
632 error = xfs_inode_from_disk(ip,
633 xfs_buf_offset(bp, ip->i_imap.im_boffset));
635 xfs_buf_set_ref(bp, XFS_INO_REF);
636 xfs_trans_brelse(tp, bp);
642 trace_xfs_iget_miss(ip);
645 * Check the inode free state is valid. This also detects lookup
646 * racing with unlinks.
648 error = xfs_iget_check_free_state(ip, flags);
653 * Preload the radix tree so we can insert safely under the
654 * write spinlock. Note that we cannot sleep inside the preload
655 * region. Since we can be called from transaction context, don't
656 * recurse into the file system.
658 if (radix_tree_preload(GFP_NOFS)) {
664 * Because the inode hasn't been added to the radix-tree yet it can't
665 * be found by another thread, so we can do the non-sleeping lock here.
668 if (!xfs_ilock_nowait(ip, lock_flags))
673 * These values must be set before inserting the inode into the radix
674 * tree as the moment it is inserted a concurrent lookup (allowed by the
675 * RCU locking mechanism) can find it and that lookup must see that this
676 * is an inode currently under construction (i.e. that XFS_INEW is set).
677 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
678 * memory barrier that ensures this detection works correctly at lookup
682 if (flags & XFS_IGET_DONTCACHE)
683 d_mark_dontcache(VFS_I(ip));
687 xfs_iflags_set(ip, iflags);
689 /* insert the new inode */
690 spin_lock(&pag->pag_ici_lock);
691 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
692 if (unlikely(error)) {
693 WARN_ON(error != -EEXIST);
694 XFS_STATS_INC(mp, xs_ig_dup);
696 goto out_preload_end;
698 spin_unlock(&pag->pag_ici_lock);
699 radix_tree_preload_end();
705 spin_unlock(&pag->pag_ici_lock);
706 radix_tree_preload_end();
708 xfs_iunlock(ip, lock_flags);
710 __destroy_inode(VFS_I(ip));
716 * Look up an inode by number in the given file system. The inode is looked up
717 * in the cache held in each AG. If the inode is found in the cache, initialise
718 * the vfs inode if necessary.
720 * If it is not in core, read it in from the file system's device, add it to the
721 * cache and initialise the vfs inode.
723 * The inode is locked according to the value of the lock_flags parameter.
724 * Inode lookup is only done during metadata operations and not as part of the
725 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
729 struct xfs_mount *mp,
730 struct xfs_trans *tp,
734 struct xfs_inode **ipp)
736 struct xfs_inode *ip;
737 struct xfs_perag *pag;
741 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
743 /* reject inode numbers outside existing AGs */
744 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
747 XFS_STATS_INC(mp, xs_ig_attempts);
749 /* get the perag structure and ensure that it's inode capable */
750 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
751 agino = XFS_INO_TO_AGINO(mp, ino);
756 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
759 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
761 goto out_error_or_again;
764 if (flags & XFS_IGET_INCORE) {
766 goto out_error_or_again;
768 XFS_STATS_INC(mp, xs_ig_missed);
770 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
773 goto out_error_or_again;
780 * If we have a real type for an on-disk inode, we can setup the inode
781 * now. If it's a new inode being created, xfs_ialloc will handle it.
783 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
784 xfs_setup_existing_inode(ip);
788 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
797 * "Is this a cached inode that's also allocated?"
799 * Look up an inode by number in the given file system. If the inode is
800 * in cache and isn't in purgatory, return 1 if the inode is allocated
801 * and 0 if it is not. For all other cases (not in cache, being torn
802 * down, etc.), return a negative error code.
804 * The caller has to prevent inode allocation and freeing activity,
805 * presumably by locking the AGI buffer. This is to ensure that an
806 * inode cannot transition from allocated to freed until the caller is
807 * ready to allow that. If the inode is in an intermediate state (new,
808 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
809 * inode is not in the cache, -ENOENT will be returned. The caller must
810 * deal with these scenarios appropriately.
812 * This is a specialized use case for the online scrubber; if you're
813 * reading this, you probably want xfs_iget.
816 xfs_icache_inode_is_allocated(
817 struct xfs_mount *mp,
818 struct xfs_trans *tp,
822 struct xfs_inode *ip;
825 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
829 *inuse = !!(VFS_I(ip)->i_mode);
835 * Grab the inode for reclaim exclusively.
837 * We have found this inode via a lookup under RCU, so the inode may have
838 * already been freed, or it may be in the process of being recycled by
839 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
840 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
841 * will not be set. Hence we need to check for both these flag conditions to
842 * avoid inodes that are no longer reclaim candidates.
844 * Note: checking for other state flags here, under the i_flags_lock or not, is
845 * racy and should be avoided. Those races should be resolved only after we have
846 * ensured that we are able to reclaim this inode and the world can see that we
847 * are going to reclaim it.
849 * Return true if we grabbed it, false otherwise.
853 struct xfs_inode *ip,
854 struct xfs_icwalk *icw)
856 ASSERT(rcu_read_lock_held());
858 spin_lock(&ip->i_flags_lock);
859 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
860 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
861 /* not a reclaim candidate. */
862 spin_unlock(&ip->i_flags_lock);
866 /* Don't reclaim a sick inode unless the caller asked for it. */
868 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
869 spin_unlock(&ip->i_flags_lock);
873 __xfs_iflags_set(ip, XFS_IRECLAIM);
874 spin_unlock(&ip->i_flags_lock);
879 * Inode reclaim is non-blocking, so the default action if progress cannot be
880 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
881 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
882 * blocking anymore and hence we can wait for the inode to be able to reclaim
885 * We do no IO here - if callers require inodes to be cleaned they must push the
886 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
887 * done in the background in a non-blocking manner, and enables memory reclaim
888 * to make progress without blocking.
892 struct xfs_inode *ip,
893 struct xfs_perag *pag)
895 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
897 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
899 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
902 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
904 xfs_iflush_abort(ip);
907 if (xfs_ipincount(ip))
908 goto out_clear_flush;
909 if (!xfs_inode_clean(ip))
910 goto out_clear_flush;
912 xfs_iflags_clear(ip, XFS_IFLUSHING);
914 trace_xfs_inode_reclaiming(ip);
917 * Because we use RCU freeing we need to ensure the inode always appears
918 * to be reclaimed with an invalid inode number when in the free state.
919 * We do this as early as possible under the ILOCK so that
920 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
921 * detect races with us here. By doing this, we guarantee that once
922 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
923 * it will see either a valid inode that will serialise correctly, or it
924 * will see an invalid inode that it can skip.
926 spin_lock(&ip->i_flags_lock);
927 ip->i_flags = XFS_IRECLAIM;
931 spin_unlock(&ip->i_flags_lock);
933 xfs_iunlock(ip, XFS_ILOCK_EXCL);
935 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
937 * Remove the inode from the per-AG radix tree.
939 * Because radix_tree_delete won't complain even if the item was never
940 * added to the tree assert that it's been there before to catch
941 * problems with the inode life time early on.
943 spin_lock(&pag->pag_ici_lock);
944 if (!radix_tree_delete(&pag->pag_ici_root,
945 XFS_INO_TO_AGINO(ip->i_mount, ino)))
947 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
948 spin_unlock(&pag->pag_ici_lock);
951 * Here we do an (almost) spurious inode lock in order to coordinate
952 * with inode cache radix tree lookups. This is because the lookup
953 * can reference the inodes in the cache without taking references.
955 * We make that OK here by ensuring that we wait until the inode is
956 * unlocked after the lookup before we go ahead and free it.
958 xfs_ilock(ip, XFS_ILOCK_EXCL);
959 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
960 xfs_iunlock(ip, XFS_ILOCK_EXCL);
961 ASSERT(xfs_inode_clean(ip));
963 __xfs_inode_free(ip);
967 xfs_iflags_clear(ip, XFS_IFLUSHING);
969 xfs_iunlock(ip, XFS_ILOCK_EXCL);
971 xfs_iflags_clear(ip, XFS_IRECLAIM);
974 /* Reclaim sick inodes if we're unmounting or the fs went down. */
976 xfs_want_reclaim_sick(
977 struct xfs_mount *mp)
979 return (mp->m_flags & XFS_MOUNT_UNMOUNTING) ||
980 (mp->m_flags & XFS_MOUNT_NORECOVERY) ||
981 XFS_FORCED_SHUTDOWN(mp);
986 struct xfs_mount *mp)
988 struct xfs_icwalk icw = {
992 if (xfs_want_reclaim_sick(mp))
993 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
995 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
996 xfs_ail_push_all_sync(mp->m_ail);
997 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1002 * The shrinker infrastructure determines how many inodes we should scan for
1003 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1004 * push the AIL here. We also want to proactively free up memory if we can to
1005 * minimise the amount of work memory reclaim has to do so we kick the
1006 * background reclaim if it isn't already scheduled.
1009 xfs_reclaim_inodes_nr(
1010 struct xfs_mount *mp,
1011 unsigned long nr_to_scan)
1013 struct xfs_icwalk icw = {
1014 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
1015 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
1018 if (xfs_want_reclaim_sick(mp))
1019 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1021 /* kick background reclaimer and push the AIL */
1022 xfs_reclaim_work_queue(mp);
1023 xfs_ail_push_all(mp->m_ail);
1025 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1030 * Return the number of reclaimable inodes in the filesystem for
1031 * the shrinker to determine how much to reclaim.
1034 xfs_reclaim_inodes_count(
1035 struct xfs_mount *mp)
1037 struct xfs_perag *pag;
1038 xfs_agnumber_t ag = 0;
1039 long reclaimable = 0;
1041 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1042 ag = pag->pag_agno + 1;
1043 reclaimable += pag->pag_ici_reclaimable;
1050 xfs_icwalk_match_id(
1051 struct xfs_inode *ip,
1052 struct xfs_icwalk *icw)
1054 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1055 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1058 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1059 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1062 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1063 ip->i_projid != icw->icw_prid)
1070 * A union-based inode filtering algorithm. Process the inode if any of the
1071 * criteria match. This is for global/internal scans only.
1074 xfs_icwalk_match_id_union(
1075 struct xfs_inode *ip,
1076 struct xfs_icwalk *icw)
1078 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1079 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1082 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1083 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1086 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1087 ip->i_projid == icw->icw_prid)
1094 * Is this inode @ip eligible for eof/cow block reclamation, given some
1095 * filtering parameters @icw? The inode is eligible if @icw is null or
1096 * if the predicate functions match.
1100 struct xfs_inode *ip,
1101 struct xfs_icwalk *icw)
1108 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1109 match = xfs_icwalk_match_id_union(ip, icw);
1111 match = xfs_icwalk_match_id(ip, icw);
1115 /* skip the inode if the file size is too small */
1116 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1117 XFS_ISIZE(ip) < icw->icw_min_file_size)
1124 * This is a fast pass over the inode cache to try to get reclaim moving on as
1125 * many inodes as possible in a short period of time. It kicks itself every few
1126 * seconds, as well as being kicked by the inode cache shrinker when memory
1131 struct work_struct *work)
1133 struct xfs_mount *mp = container_of(to_delayed_work(work),
1134 struct xfs_mount, m_reclaim_work);
1136 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1137 xfs_reclaim_work_queue(mp);
1141 xfs_inode_free_eofblocks(
1142 struct xfs_inode *ip,
1143 struct xfs_icwalk *icw,
1144 unsigned int *lockflags)
1148 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1150 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1154 * If the mapping is dirty the operation can block and wait for some
1155 * time. Unless we are waiting, skip it.
1157 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1160 if (!xfs_icwalk_match(ip, icw))
1164 * If the caller is waiting, return -EAGAIN to keep the background
1165 * scanner moving and revisit the inode in a subsequent pass.
1167 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1172 *lockflags |= XFS_IOLOCK_EXCL;
1174 if (xfs_can_free_eofblocks(ip, false))
1175 return xfs_free_eofblocks(ip);
1177 /* inode could be preallocated or append-only */
1178 trace_xfs_inode_free_eofblocks_invalid(ip);
1179 xfs_inode_clear_eofblocks_tag(ip);
1184 xfs_blockgc_set_iflag(
1185 struct xfs_inode *ip,
1186 unsigned long iflag)
1188 struct xfs_mount *mp = ip->i_mount;
1189 struct xfs_perag *pag;
1191 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1194 * Don't bother locking the AG and looking up in the radix trees
1195 * if we already know that we have the tag set.
1197 if (ip->i_flags & iflag)
1199 spin_lock(&ip->i_flags_lock);
1200 ip->i_flags |= iflag;
1201 spin_unlock(&ip->i_flags_lock);
1203 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1204 spin_lock(&pag->pag_ici_lock);
1206 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1207 XFS_ICI_BLOCKGC_TAG);
1209 spin_unlock(&pag->pag_ici_lock);
1214 xfs_inode_set_eofblocks_tag(
1217 trace_xfs_inode_set_eofblocks_tag(ip);
1218 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1222 xfs_blockgc_clear_iflag(
1223 struct xfs_inode *ip,
1224 unsigned long iflag)
1226 struct xfs_mount *mp = ip->i_mount;
1227 struct xfs_perag *pag;
1230 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1232 spin_lock(&ip->i_flags_lock);
1233 ip->i_flags &= ~iflag;
1234 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1235 spin_unlock(&ip->i_flags_lock);
1240 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1241 spin_lock(&pag->pag_ici_lock);
1243 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1244 XFS_ICI_BLOCKGC_TAG);
1246 spin_unlock(&pag->pag_ici_lock);
1251 xfs_inode_clear_eofblocks_tag(
1254 trace_xfs_inode_clear_eofblocks_tag(ip);
1255 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1259 * Set ourselves up to free CoW blocks from this file. If it's already clean
1260 * then we can bail out quickly, but otherwise we must back off if the file
1261 * is undergoing some kind of write.
1264 xfs_prep_free_cowblocks(
1265 struct xfs_inode *ip)
1268 * Just clear the tag if we have an empty cow fork or none at all. It's
1269 * possible the inode was fully unshared since it was originally tagged.
1271 if (!xfs_inode_has_cow_data(ip)) {
1272 trace_xfs_inode_free_cowblocks_invalid(ip);
1273 xfs_inode_clear_cowblocks_tag(ip);
1278 * If the mapping is dirty or under writeback we cannot touch the
1279 * CoW fork. Leave it alone if we're in the midst of a directio.
1281 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1282 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1283 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1284 atomic_read(&VFS_I(ip)->i_dio_count))
1291 * Automatic CoW Reservation Freeing
1293 * These functions automatically garbage collect leftover CoW reservations
1294 * that were made on behalf of a cowextsize hint when we start to run out
1295 * of quota or when the reservations sit around for too long. If the file
1296 * has dirty pages or is undergoing writeback, its CoW reservations will
1299 * The actual garbage collection piggybacks off the same code that runs
1300 * the speculative EOF preallocation garbage collector.
1303 xfs_inode_free_cowblocks(
1304 struct xfs_inode *ip,
1305 struct xfs_icwalk *icw,
1306 unsigned int *lockflags)
1311 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1313 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1316 if (!xfs_prep_free_cowblocks(ip))
1319 if (!xfs_icwalk_match(ip, icw))
1323 * If the caller is waiting, return -EAGAIN to keep the background
1324 * scanner moving and revisit the inode in a subsequent pass.
1326 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1327 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1332 *lockflags |= XFS_IOLOCK_EXCL;
1334 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1339 *lockflags |= XFS_MMAPLOCK_EXCL;
1342 * Check again, nobody else should be able to dirty blocks or change
1343 * the reflink iflag now that we have the first two locks held.
1345 if (xfs_prep_free_cowblocks(ip))
1346 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1351 xfs_inode_set_cowblocks_tag(
1354 trace_xfs_inode_set_cowblocks_tag(ip);
1355 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1359 xfs_inode_clear_cowblocks_tag(
1362 trace_xfs_inode_clear_cowblocks_tag(ip);
1363 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1366 /* Disable post-EOF and CoW block auto-reclamation. */
1369 struct xfs_mount *mp)
1371 struct xfs_perag *pag;
1372 xfs_agnumber_t agno;
1374 if (!xfs_clear_blockgc_enabled(mp))
1377 for_each_perag(mp, agno, pag)
1378 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1379 trace_xfs_blockgc_stop(mp, __return_address);
1382 /* Enable post-EOF and CoW block auto-reclamation. */
1385 struct xfs_mount *mp)
1387 struct xfs_perag *pag;
1388 xfs_agnumber_t agno;
1390 if (xfs_set_blockgc_enabled(mp))
1393 trace_xfs_blockgc_start(mp, __return_address);
1394 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1395 xfs_blockgc_queue(pag);
1398 /* Don't try to run block gc on an inode that's in any of these states. */
1399 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1400 XFS_NEED_INACTIVE | \
1401 XFS_INACTIVATING | \
1402 XFS_IRECLAIMABLE | \
1405 * Decide if the given @ip is eligible for garbage collection of speculative
1406 * preallocations, and grab it if so. Returns true if it's ready to go or
1407 * false if we should just ignore it.
1411 struct xfs_inode *ip)
1413 struct inode *inode = VFS_I(ip);
1415 ASSERT(rcu_read_lock_held());
1417 /* Check for stale RCU freed inode */
1418 spin_lock(&ip->i_flags_lock);
1420 goto out_unlock_noent;
1422 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1423 goto out_unlock_noent;
1424 spin_unlock(&ip->i_flags_lock);
1426 /* nothing to sync during shutdown */
1427 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
1430 /* If we can't grab the inode, it must on it's way to reclaim. */
1434 /* inode is valid */
1438 spin_unlock(&ip->i_flags_lock);
1442 /* Scan one incore inode for block preallocations that we can remove. */
1444 xfs_blockgc_scan_inode(
1445 struct xfs_inode *ip,
1446 struct xfs_icwalk *icw)
1448 unsigned int lockflags = 0;
1451 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1455 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1458 xfs_iunlock(ip, lockflags);
1463 /* Background worker that trims preallocated space. */
1466 struct work_struct *work)
1468 struct xfs_perag *pag = container_of(to_delayed_work(work),
1469 struct xfs_perag, pag_blockgc_work);
1470 struct xfs_mount *mp = pag->pag_mount;
1473 trace_xfs_blockgc_worker(mp, __return_address);
1475 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1477 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1478 pag->pag_agno, error);
1479 xfs_blockgc_queue(pag);
1483 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1487 xfs_blockgc_free_space(
1488 struct xfs_mount *mp,
1489 struct xfs_icwalk *icw)
1493 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1495 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1499 xfs_inodegc_flush(mp);
1504 * Reclaim all the free space that we can by scheduling the background blockgc
1505 * and inodegc workers immediately and waiting for them all to clear.
1508 xfs_blockgc_flush_all(
1509 struct xfs_mount *mp)
1511 struct xfs_perag *pag;
1512 xfs_agnumber_t agno;
1514 trace_xfs_blockgc_flush_all(mp, __return_address);
1517 * For each blockgc worker, move its queue time up to now. If it
1518 * wasn't queued, it will not be requeued. Then flush whatever's
1521 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1522 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1523 &pag->pag_blockgc_work, 0);
1525 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1526 flush_delayed_work(&pag->pag_blockgc_work);
1528 xfs_inodegc_flush(mp);
1532 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1533 * quota caused an allocation failure, so we make a best effort by including
1534 * each quota under low free space conditions (less than 1% free space) in the
1537 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1538 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1542 xfs_blockgc_free_dquots(
1543 struct xfs_mount *mp,
1544 struct xfs_dquot *udqp,
1545 struct xfs_dquot *gdqp,
1546 struct xfs_dquot *pdqp,
1547 unsigned int iwalk_flags)
1549 struct xfs_icwalk icw = {0};
1550 bool do_work = false;
1552 if (!udqp && !gdqp && !pdqp)
1556 * Run a scan to free blocks using the union filter to cover all
1557 * applicable quotas in a single scan.
1559 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1561 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1562 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1563 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1567 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1568 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1569 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1573 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1574 icw.icw_prid = pdqp->q_id;
1575 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1582 return xfs_blockgc_free_space(mp, &icw);
1585 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1587 xfs_blockgc_free_quota(
1588 struct xfs_inode *ip,
1589 unsigned int iwalk_flags)
1591 return xfs_blockgc_free_dquots(ip->i_mount,
1592 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1593 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1594 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1597 /* XFS Inode Cache Walking Code */
1600 * The inode lookup is done in batches to keep the amount of lock traffic and
1601 * radix tree lookups to a minimum. The batch size is a trade off between
1602 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1605 #define XFS_LOOKUP_BATCH 32
1609 * Decide if we want to grab this inode in anticipation of doing work towards
1614 enum xfs_icwalk_goal goal,
1615 struct xfs_inode *ip,
1616 struct xfs_icwalk *icw)
1619 case XFS_ICWALK_BLOCKGC:
1620 return xfs_blockgc_igrab(ip);
1621 case XFS_ICWALK_RECLAIM:
1622 return xfs_reclaim_igrab(ip, icw);
1629 * Process an inode. Each processing function must handle any state changes
1630 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1633 xfs_icwalk_process_inode(
1634 enum xfs_icwalk_goal goal,
1635 struct xfs_inode *ip,
1636 struct xfs_perag *pag,
1637 struct xfs_icwalk *icw)
1642 case XFS_ICWALK_BLOCKGC:
1643 error = xfs_blockgc_scan_inode(ip, icw);
1645 case XFS_ICWALK_RECLAIM:
1646 xfs_reclaim_inode(ip, pag);
1653 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1654 * process them in some manner.
1658 struct xfs_perag *pag,
1659 enum xfs_icwalk_goal goal,
1660 struct xfs_icwalk *icw)
1662 struct xfs_mount *mp = pag->pag_mount;
1663 uint32_t first_index;
1672 if (goal == XFS_ICWALK_RECLAIM)
1673 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1678 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1679 unsigned int tag = xfs_icwalk_tag(goal);
1685 if (tag == XFS_ICWALK_NULL_TAG)
1686 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
1687 (void **)batch, first_index,
1690 nr_found = radix_tree_gang_lookup_tag(
1692 (void **) batch, first_index,
1693 XFS_LOOKUP_BATCH, tag);
1702 * Grab the inodes before we drop the lock. if we found
1703 * nothing, nr == 0 and the loop will be skipped.
1705 for (i = 0; i < nr_found; i++) {
1706 struct xfs_inode *ip = batch[i];
1708 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1712 * Update the index for the next lookup. Catch
1713 * overflows into the next AG range which can occur if
1714 * we have inodes in the last block of the AG and we
1715 * are currently pointing to the last inode.
1717 * Because we may see inodes that are from the wrong AG
1718 * due to RCU freeing and reallocation, only update the
1719 * index if it lies in this AG. It was a race that lead
1720 * us to see this inode, so another lookup from the
1721 * same index will not find it again.
1723 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1725 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1726 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1730 /* unlock now we've grabbed the inodes. */
1733 for (i = 0; i < nr_found; i++) {
1736 error = xfs_icwalk_process_inode(goal, batch[i], pag,
1738 if (error == -EAGAIN) {
1742 if (error && last_error != -EFSCORRUPTED)
1746 /* bail out if the filesystem is corrupted. */
1747 if (error == -EFSCORRUPTED)
1752 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1753 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1754 if (icw->icw_scan_limit <= 0)
1757 } while (nr_found && !done);
1759 if (goal == XFS_ICWALK_RECLAIM) {
1762 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1772 /* Fetch the next (possibly tagged) per-AG structure. */
1773 static inline struct xfs_perag *
1774 xfs_icwalk_get_perag(
1775 struct xfs_mount *mp,
1776 xfs_agnumber_t agno,
1777 enum xfs_icwalk_goal goal)
1779 unsigned int tag = xfs_icwalk_tag(goal);
1781 if (tag == XFS_ICWALK_NULL_TAG)
1782 return xfs_perag_get(mp, agno);
1783 return xfs_perag_get_tag(mp, agno, tag);
1786 /* Walk all incore inodes to achieve a given goal. */
1789 struct xfs_mount *mp,
1790 enum xfs_icwalk_goal goal,
1791 struct xfs_icwalk *icw)
1793 struct xfs_perag *pag;
1796 xfs_agnumber_t agno = 0;
1798 while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) {
1799 agno = pag->pag_agno + 1;
1800 error = xfs_icwalk_ag(pag, goal, icw);
1804 if (error == -EFSCORRUPTED)
1809 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1815 struct xfs_inode *ip,
1818 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1819 struct xfs_bmbt_irec got;
1820 struct xfs_iext_cursor icur;
1822 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1825 if (isnullstartblock(got.br_startblock)) {
1826 xfs_warn(ip->i_mount,
1827 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1829 whichfork == XFS_DATA_FORK ? "data" : "cow",
1830 got.br_startoff, got.br_blockcount);
1832 } while (xfs_iext_next_extent(ifp, &icur, &got));
1835 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1838 /* Schedule the inode for reclaim. */
1840 xfs_inodegc_set_reclaimable(
1841 struct xfs_inode *ip)
1843 struct xfs_mount *mp = ip->i_mount;
1844 struct xfs_perag *pag;
1846 if (!XFS_FORCED_SHUTDOWN(mp) && ip->i_delayed_blks) {
1847 xfs_check_delalloc(ip, XFS_DATA_FORK);
1848 xfs_check_delalloc(ip, XFS_COW_FORK);
1852 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1853 spin_lock(&pag->pag_ici_lock);
1854 spin_lock(&ip->i_flags_lock);
1856 trace_xfs_inode_set_reclaimable(ip);
1857 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1858 ip->i_flags |= XFS_IRECLAIMABLE;
1859 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1860 XFS_ICI_RECLAIM_TAG);
1862 spin_unlock(&ip->i_flags_lock);
1863 spin_unlock(&pag->pag_ici_lock);
1868 * Free all speculative preallocations and possibly even the inode itself.
1869 * This is the last chance to make changes to an otherwise unreferenced file
1870 * before incore reclamation happens.
1873 xfs_inodegc_inactivate(
1874 struct xfs_inode *ip)
1876 trace_xfs_inode_inactivating(ip);
1878 xfs_inodegc_set_reclaimable(ip);
1883 struct work_struct *work)
1885 struct xfs_inodegc *gc = container_of(work, struct xfs_inodegc,
1887 struct llist_node *node = llist_del_all(&gc->list);
1888 struct xfs_inode *ip, *n;
1890 WRITE_ONCE(gc->items, 0);
1895 ip = llist_entry(node, struct xfs_inode, i_gclist);
1896 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1898 WRITE_ONCE(gc->shrinker_hits, 0);
1899 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1900 xfs_iflags_set(ip, XFS_INACTIVATING);
1901 xfs_inodegc_inactivate(ip);
1906 * Force all currently queued inode inactivation work to run immediately, and
1907 * wait for the work to finish. Two pass - queue all the work first pass, wait
1908 * for it in a second pass.
1912 struct xfs_mount *mp)
1914 struct xfs_inodegc *gc;
1917 if (!xfs_is_inodegc_enabled(mp))
1920 trace_xfs_inodegc_flush(mp, __return_address);
1922 xfs_inodegc_queue_all(mp);
1924 for_each_online_cpu(cpu) {
1925 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1926 flush_work(&gc->work);
1931 * Flush all the pending work and then disable the inode inactivation background
1932 * workers and wait for them to stop.
1936 struct xfs_mount *mp)
1938 struct xfs_inodegc *gc;
1941 if (!xfs_clear_inodegc_enabled(mp))
1944 xfs_inodegc_queue_all(mp);
1946 for_each_online_cpu(cpu) {
1947 gc = per_cpu_ptr(mp->m_inodegc, cpu);
1948 cancel_work_sync(&gc->work);
1950 trace_xfs_inodegc_stop(mp, __return_address);
1954 * Enable the inode inactivation background workers and schedule deferred inode
1955 * inactivation work if there is any.
1959 struct xfs_mount *mp)
1961 if (xfs_set_inodegc_enabled(mp))
1964 trace_xfs_inodegc_start(mp, __return_address);
1965 xfs_inodegc_queue_all(mp);
1968 #ifdef CONFIG_XFS_RT
1970 xfs_inodegc_want_queue_rt_file(
1971 struct xfs_inode *ip)
1973 struct xfs_mount *mp = ip->i_mount;
1976 if (!XFS_IS_REALTIME_INODE(ip))
1979 freertx = READ_ONCE(mp->m_sb.sb_frextents);
1980 return freertx < mp->m_low_rtexts[XFS_LOWSP_5_PCNT];
1983 # define xfs_inodegc_want_queue_rt_file(ip) (false)
1984 #endif /* CONFIG_XFS_RT */
1987 * Schedule the inactivation worker when:
1989 * - We've accumulated more than one inode cluster buffer's worth of inodes.
1990 * - There is less than 5% free space left.
1991 * - Any of the quotas for this inode are near an enforcement limit.
1994 xfs_inodegc_want_queue_work(
1995 struct xfs_inode *ip,
1998 struct xfs_mount *mp = ip->i_mount;
2000 if (items > mp->m_ino_geo.inodes_per_cluster)
2003 if (__percpu_counter_compare(&mp->m_fdblocks,
2004 mp->m_low_space[XFS_LOWSP_5_PCNT],
2005 XFS_FDBLOCKS_BATCH) < 0)
2008 if (xfs_inodegc_want_queue_rt_file(ip))
2011 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2014 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2017 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2024 * Upper bound on the number of inodes in each AG that can be queued for
2025 * inactivation at any given time, to avoid monopolizing the workqueue.
2027 #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2030 * Make the frontend wait for inactivations when:
2032 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2033 * - The queue depth exceeds the maximum allowable percpu backlog.
2035 * Note: If the current thread is running a transaction, we don't ever want to
2036 * wait for other transactions because that could introduce a deadlock.
2039 xfs_inodegc_want_flush_work(
2040 struct xfs_inode *ip,
2042 unsigned int shrinker_hits)
2044 if (current->journal_info)
2047 if (shrinker_hits > 0)
2050 if (items > XFS_INODEGC_MAX_BACKLOG)
2057 * Queue a background inactivation worker if there are inodes that need to be
2058 * inactivated and higher level xfs code hasn't disabled the background
2063 struct xfs_inode *ip)
2065 struct xfs_mount *mp = ip->i_mount;
2066 struct xfs_inodegc *gc;
2068 unsigned int shrinker_hits;
2070 trace_xfs_inode_set_need_inactive(ip);
2071 spin_lock(&ip->i_flags_lock);
2072 ip->i_flags |= XFS_NEED_INACTIVE;
2073 spin_unlock(&ip->i_flags_lock);
2075 gc = get_cpu_ptr(mp->m_inodegc);
2076 llist_add(&ip->i_gclist, &gc->list);
2077 items = READ_ONCE(gc->items);
2078 WRITE_ONCE(gc->items, items + 1);
2079 shrinker_hits = READ_ONCE(gc->shrinker_hits);
2082 if (!xfs_is_inodegc_enabled(mp))
2085 if (xfs_inodegc_want_queue_work(ip, items)) {
2086 trace_xfs_inodegc_queue(mp, __return_address);
2087 queue_work(mp->m_inodegc_wq, &gc->work);
2090 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2091 trace_xfs_inodegc_throttle(mp, __return_address);
2092 flush_work(&gc->work);
2097 * Fold the dead CPU inodegc queue into the current CPUs queue.
2100 xfs_inodegc_cpu_dead(
2101 struct xfs_mount *mp,
2102 unsigned int dead_cpu)
2104 struct xfs_inodegc *dead_gc, *gc;
2105 struct llist_node *first, *last;
2106 unsigned int count = 0;
2108 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2109 cancel_work_sync(&dead_gc->work);
2111 if (llist_empty(&dead_gc->list))
2114 first = dead_gc->list.first;
2116 while (last->next) {
2120 dead_gc->list.first = NULL;
2123 /* Add pending work to current CPU */
2124 gc = get_cpu_ptr(mp->m_inodegc);
2125 llist_add_batch(first, last, &gc->list);
2126 count += READ_ONCE(gc->items);
2127 WRITE_ONCE(gc->items, count);
2130 if (xfs_is_inodegc_enabled(mp)) {
2131 trace_xfs_inodegc_queue(mp, __return_address);
2132 queue_work(mp->m_inodegc_wq, &gc->work);
2137 * We set the inode flag atomically with the radix tree tag. Once we get tag
2138 * lookups on the radix tree, this inode flag can go away.
2140 * We always use background reclaim here because even if the inode is clean, it
2141 * still may be under IO and hence we have wait for IO completion to occur
2142 * before we can reclaim the inode. The background reclaim path handles this
2143 * more efficiently than we can here, so simply let background reclaim tear down
2147 xfs_inode_mark_reclaimable(
2148 struct xfs_inode *ip)
2150 struct xfs_mount *mp = ip->i_mount;
2153 XFS_STATS_INC(mp, vn_reclaim);
2156 * We should never get here with any of the reclaim flags already set.
2158 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2160 need_inactive = xfs_inode_needs_inactive(ip);
2161 if (need_inactive) {
2162 xfs_inodegc_queue(ip);
2166 /* Going straight to reclaim, so drop the dquots. */
2167 xfs_qm_dqdetach(ip);
2168 xfs_inodegc_set_reclaimable(ip);
2172 * Register a phony shrinker so that we can run background inodegc sooner when
2173 * there's memory pressure. Inactivation does not itself free any memory but
2174 * it does make inodes reclaimable, which eventually frees memory.
2176 * The count function, seek value, and batch value are crafted to trigger the
2177 * scan function during the second round of scanning. Hopefully this means
2178 * that we reclaimed enough memory that initiating metadata transactions won't
2179 * make things worse.
2181 #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2182 #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2184 static unsigned long
2185 xfs_inodegc_shrinker_count(
2186 struct shrinker *shrink,
2187 struct shrink_control *sc)
2189 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2190 m_inodegc_shrinker);
2191 struct xfs_inodegc *gc;
2194 if (!xfs_is_inodegc_enabled(mp))
2197 for_each_online_cpu(cpu) {
2198 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2199 if (!llist_empty(&gc->list))
2200 return XFS_INODEGC_SHRINKER_COUNT;
2206 static unsigned long
2207 xfs_inodegc_shrinker_scan(
2208 struct shrinker *shrink,
2209 struct shrink_control *sc)
2211 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2212 m_inodegc_shrinker);
2213 struct xfs_inodegc *gc;
2215 bool no_items = true;
2217 if (!xfs_is_inodegc_enabled(mp))
2220 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2222 for_each_online_cpu(cpu) {
2223 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2224 if (!llist_empty(&gc->list)) {
2225 unsigned int h = READ_ONCE(gc->shrinker_hits);
2227 WRITE_ONCE(gc->shrinker_hits, h + 1);
2228 queue_work_on(cpu, mp->m_inodegc_wq, &gc->work);
2234 * If there are no inodes to inactivate, we don't want the shrinker
2235 * to think there's deferred work to call us back about.
2243 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2245 xfs_inodegc_register_shrinker(
2246 struct xfs_mount *mp)
2248 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2250 shrink->count_objects = xfs_inodegc_shrinker_count;
2251 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2253 shrink->flags = SHRINKER_NONSLAB;
2254 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2256 return register_shrinker(shrink);