1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25 #include "xfs_ialloc.h"
27 #include <linux/iversion.h>
30 * Allocate and initialise an xfs_inode.
40 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
41 * and return NULL here on ENOMEM.
43 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
45 if (inode_init_always(mp->m_super, VFS_I(ip))) {
46 kmem_cache_free(xfs_inode_zone, ip);
50 /* VFS doesn't initialise i_mode! */
51 VFS_I(ip)->i_mode = 0;
53 XFS_STATS_INC(mp, vn_active);
54 ASSERT(atomic_read(&ip->i_pincount) == 0);
55 ASSERT(ip->i_ino == 0);
57 /* initialise the xfs inode */
60 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
63 memset(&ip->i_df, 0, sizeof(ip->i_df));
65 ip->i_delayed_blks = 0;
66 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
71 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
72 INIT_LIST_HEAD(&ip->i_ioend_list);
73 spin_lock_init(&ip->i_ioend_lock);
79 xfs_inode_free_callback(
80 struct rcu_head *head)
82 struct inode *inode = container_of(head, struct inode, i_rcu);
83 struct xfs_inode *ip = XFS_I(inode);
85 switch (VFS_I(ip)->i_mode & S_IFMT) {
89 xfs_idestroy_fork(&ip->i_df);
94 xfs_idestroy_fork(ip->i_afp);
95 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
98 xfs_idestroy_fork(ip->i_cowfp);
99 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
102 ASSERT(!test_bit(XFS_LI_IN_AIL,
103 &ip->i_itemp->ili_item.li_flags));
104 xfs_inode_item_destroy(ip);
108 kmem_cache_free(xfs_inode_zone, ip);
113 struct xfs_inode *ip)
115 /* asserts to verify all state is correct here */
116 ASSERT(atomic_read(&ip->i_pincount) == 0);
117 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
118 XFS_STATS_DEC(ip->i_mount, vn_active);
120 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
125 struct xfs_inode *ip)
127 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
130 * Because we use RCU freeing we need to ensure the inode always
131 * appears to be reclaimed with an invalid inode number when in the
132 * free state. The ip->i_flags_lock provides the barrier against lookup
135 spin_lock(&ip->i_flags_lock);
136 ip->i_flags = XFS_IRECLAIM;
138 spin_unlock(&ip->i_flags_lock);
140 __xfs_inode_free(ip);
144 * Queue background inode reclaim work if there are reclaimable inodes and there
145 * isn't reclaim work already scheduled or in progress.
148 xfs_reclaim_work_queue(
149 struct xfs_mount *mp)
153 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
154 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
155 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
161 xfs_perag_set_reclaim_tag(
162 struct xfs_perag *pag)
164 struct xfs_mount *mp = pag->pag_mount;
166 lockdep_assert_held(&pag->pag_ici_lock);
167 if (pag->pag_ici_reclaimable++)
170 /* propagate the reclaim tag up into the perag radix tree */
171 spin_lock(&mp->m_perag_lock);
172 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
173 XFS_ICI_RECLAIM_TAG);
174 spin_unlock(&mp->m_perag_lock);
176 /* schedule periodic background inode reclaim */
177 xfs_reclaim_work_queue(mp);
179 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
183 xfs_perag_clear_reclaim_tag(
184 struct xfs_perag *pag)
186 struct xfs_mount *mp = pag->pag_mount;
188 lockdep_assert_held(&pag->pag_ici_lock);
189 if (--pag->pag_ici_reclaimable)
192 /* clear the reclaim tag from the perag radix tree */
193 spin_lock(&mp->m_perag_lock);
194 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
195 XFS_ICI_RECLAIM_TAG);
196 spin_unlock(&mp->m_perag_lock);
197 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
202 * We set the inode flag atomically with the radix tree tag.
203 * Once we get tag lookups on the radix tree, this inode flag
207 xfs_inode_set_reclaim_tag(
208 struct xfs_inode *ip)
210 struct xfs_mount *mp = ip->i_mount;
211 struct xfs_perag *pag;
213 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
214 spin_lock(&pag->pag_ici_lock);
215 spin_lock(&ip->i_flags_lock);
217 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
218 XFS_ICI_RECLAIM_TAG);
219 xfs_perag_set_reclaim_tag(pag);
220 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
222 spin_unlock(&ip->i_flags_lock);
223 spin_unlock(&pag->pag_ici_lock);
228 xfs_inode_clear_reclaim_tag(
229 struct xfs_perag *pag,
232 radix_tree_tag_clear(&pag->pag_ici_root,
233 XFS_INO_TO_AGINO(pag->pag_mount, ino),
234 XFS_ICI_RECLAIM_TAG);
235 xfs_perag_clear_reclaim_tag(pag);
240 struct xfs_inode *ip)
242 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
243 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
246 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
247 if (!xfs_iflags_test(ip, XFS_INEW))
251 finish_wait(wq, &wait.wq_entry);
255 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
256 * part of the structure. This is made more complex by the fact we store
257 * information about the on-disk values in the VFS inode and so we can't just
258 * overwrite the values unconditionally. Hence we save the parameters we
259 * need to retain across reinitialisation, and rewrite them into the VFS inode
260 * after reinitialisation even if it fails.
264 struct xfs_mount *mp,
268 uint32_t nlink = inode->i_nlink;
269 uint32_t generation = inode->i_generation;
270 uint64_t version = inode_peek_iversion(inode);
271 umode_t mode = inode->i_mode;
272 dev_t dev = inode->i_rdev;
273 kuid_t uid = inode->i_uid;
274 kgid_t gid = inode->i_gid;
276 error = inode_init_always(mp->m_super, inode);
278 set_nlink(inode, nlink);
279 inode->i_generation = generation;
280 inode_set_iversion_queried(inode, version);
281 inode->i_mode = mode;
289 * If we are allocating a new inode, then check what was returned is
290 * actually a free, empty inode. If we are not allocating an inode,
291 * then check we didn't find a free inode.
294 * 0 if the inode free state matches the lookup context
295 * -ENOENT if the inode is free and we are not allocating
296 * -EFSCORRUPTED if there is any state mismatch at all
299 xfs_iget_check_free_state(
300 struct xfs_inode *ip,
303 if (flags & XFS_IGET_CREATE) {
304 /* should be a free inode */
305 if (VFS_I(ip)->i_mode != 0) {
306 xfs_warn(ip->i_mount,
307 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
308 ip->i_ino, VFS_I(ip)->i_mode);
309 return -EFSCORRUPTED;
312 if (ip->i_nblocks != 0) {
313 xfs_warn(ip->i_mount,
314 "Corruption detected! Free inode 0x%llx has blocks allocated!",
316 return -EFSCORRUPTED;
321 /* should be an allocated inode */
322 if (VFS_I(ip)->i_mode == 0)
329 * Check the validity of the inode we just found it the cache
333 struct xfs_perag *pag,
334 struct xfs_inode *ip,
337 int lock_flags) __releases(RCU)
339 struct inode *inode = VFS_I(ip);
340 struct xfs_mount *mp = ip->i_mount;
344 * check for re-use of an inode within an RCU grace period due to the
345 * radix tree nodes not being updated yet. We monitor for this by
346 * setting the inode number to zero before freeing the inode structure.
347 * If the inode has been reallocated and set up, then the inode number
348 * will not match, so check for that, too.
350 spin_lock(&ip->i_flags_lock);
351 if (ip->i_ino != ino) {
352 trace_xfs_iget_skip(ip);
353 XFS_STATS_INC(mp, xs_ig_frecycle);
360 * If we are racing with another cache hit that is currently
361 * instantiating this inode or currently recycling it out of
362 * reclaimabe state, wait for the initialisation to complete
365 * XXX(hch): eventually we should do something equivalent to
366 * wait_on_inode to wait for these flags to be cleared
367 * instead of polling for it.
369 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
370 trace_xfs_iget_skip(ip);
371 XFS_STATS_INC(mp, xs_ig_frecycle);
377 * Check the inode free state is valid. This also detects lookup
378 * racing with unlinks.
380 error = xfs_iget_check_free_state(ip, flags);
385 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
386 * Need to carefully get it back into useable state.
388 if (ip->i_flags & XFS_IRECLAIMABLE) {
389 trace_xfs_iget_reclaim(ip);
391 if (flags & XFS_IGET_INCORE) {
397 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
398 * from stomping over us while we recycle the inode. We can't
399 * clear the radix tree reclaimable tag yet as it requires
400 * pag_ici_lock to be held exclusive.
402 ip->i_flags |= XFS_IRECLAIM;
404 spin_unlock(&ip->i_flags_lock);
407 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
408 error = xfs_reinit_inode(mp, inode);
412 * Re-initializing the inode failed, and we are in deep
413 * trouble. Try to re-add it to the reclaim list.
416 spin_lock(&ip->i_flags_lock);
417 wake = !!__xfs_iflags_test(ip, XFS_INEW);
418 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
420 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
421 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
422 trace_xfs_iget_reclaim_fail(ip);
426 spin_lock(&pag->pag_ici_lock);
427 spin_lock(&ip->i_flags_lock);
430 * Clear the per-lifetime state in the inode as we are now
431 * effectively a new inode and need to return to the initial
432 * state before reuse occurs.
434 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
435 ip->i_flags |= XFS_INEW;
436 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
437 inode->i_state = I_NEW;
441 spin_unlock(&ip->i_flags_lock);
442 spin_unlock(&pag->pag_ici_lock);
444 /* If the VFS inode is being torn down, pause and try again. */
446 trace_xfs_iget_skip(ip);
451 /* We've got a live one. */
452 spin_unlock(&ip->i_flags_lock);
454 trace_xfs_iget_hit(ip);
458 xfs_ilock(ip, lock_flags);
460 if (!(flags & XFS_IGET_INCORE))
461 xfs_iflags_clear(ip, XFS_ISTALE);
462 XFS_STATS_INC(mp, xs_ig_found);
467 spin_unlock(&ip->i_flags_lock);
475 struct xfs_mount *mp,
476 struct xfs_perag *pag,
479 struct xfs_inode **ipp,
483 struct xfs_inode *ip;
485 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
488 ip = xfs_inode_alloc(mp, ino);
492 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
497 * For version 5 superblocks, if we are initialising a new inode and we
498 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
499 * simply build the new inode core with a random generation number.
501 * For version 4 (and older) superblocks, log recovery is dependent on
502 * the i_flushiter field being initialised from the current on-disk
503 * value and hence we must also read the inode off disk even when
504 * initializing new inodes.
506 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
507 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
508 VFS_I(ip)->i_generation = prandom_u32();
512 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
516 error = xfs_inode_from_disk(ip,
517 xfs_buf_offset(bp, ip->i_imap.im_boffset));
519 xfs_buf_set_ref(bp, XFS_INO_REF);
520 xfs_trans_brelse(tp, bp);
526 trace_xfs_iget_miss(ip);
529 * Check the inode free state is valid. This also detects lookup
530 * racing with unlinks.
532 error = xfs_iget_check_free_state(ip, flags);
537 * Preload the radix tree so we can insert safely under the
538 * write spinlock. Note that we cannot sleep inside the preload
539 * region. Since we can be called from transaction context, don't
540 * recurse into the file system.
542 if (radix_tree_preload(GFP_NOFS)) {
548 * Because the inode hasn't been added to the radix-tree yet it can't
549 * be found by another thread, so we can do the non-sleeping lock here.
552 if (!xfs_ilock_nowait(ip, lock_flags))
557 * These values must be set before inserting the inode into the radix
558 * tree as the moment it is inserted a concurrent lookup (allowed by the
559 * RCU locking mechanism) can find it and that lookup must see that this
560 * is an inode currently under construction (i.e. that XFS_INEW is set).
561 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
562 * memory barrier that ensures this detection works correctly at lookup
566 if (flags & XFS_IGET_DONTCACHE)
567 d_mark_dontcache(VFS_I(ip));
571 xfs_iflags_set(ip, iflags);
573 /* insert the new inode */
574 spin_lock(&pag->pag_ici_lock);
575 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
576 if (unlikely(error)) {
577 WARN_ON(error != -EEXIST);
578 XFS_STATS_INC(mp, xs_ig_dup);
580 goto out_preload_end;
582 spin_unlock(&pag->pag_ici_lock);
583 radix_tree_preload_end();
589 spin_unlock(&pag->pag_ici_lock);
590 radix_tree_preload_end();
592 xfs_iunlock(ip, lock_flags);
594 __destroy_inode(VFS_I(ip));
600 * Look up an inode by number in the given file system. The inode is looked up
601 * in the cache held in each AG. If the inode is found in the cache, initialise
602 * the vfs inode if necessary.
604 * If it is not in core, read it in from the file system's device, add it to the
605 * cache and initialise the vfs inode.
607 * The inode is locked according to the value of the lock_flags parameter.
608 * Inode lookup is only done during metadata operations and not as part of the
609 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
613 struct xfs_mount *mp,
614 struct xfs_trans *tp,
618 struct xfs_inode **ipp)
620 struct xfs_inode *ip;
621 struct xfs_perag *pag;
625 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
627 /* reject inode numbers outside existing AGs */
628 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
631 XFS_STATS_INC(mp, xs_ig_attempts);
633 /* get the perag structure and ensure that it's inode capable */
634 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
635 agino = XFS_INO_TO_AGINO(mp, ino);
640 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
643 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
645 goto out_error_or_again;
648 if (flags & XFS_IGET_INCORE) {
650 goto out_error_or_again;
652 XFS_STATS_INC(mp, xs_ig_missed);
654 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
657 goto out_error_or_again;
664 * If we have a real type for an on-disk inode, we can setup the inode
665 * now. If it's a new inode being created, xfs_ialloc will handle it.
667 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
668 xfs_setup_existing_inode(ip);
672 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
681 * "Is this a cached inode that's also allocated?"
683 * Look up an inode by number in the given file system. If the inode is
684 * in cache and isn't in purgatory, return 1 if the inode is allocated
685 * and 0 if it is not. For all other cases (not in cache, being torn
686 * down, etc.), return a negative error code.
688 * The caller has to prevent inode allocation and freeing activity,
689 * presumably by locking the AGI buffer. This is to ensure that an
690 * inode cannot transition from allocated to freed until the caller is
691 * ready to allow that. If the inode is in an intermediate state (new,
692 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
693 * inode is not in the cache, -ENOENT will be returned. The caller must
694 * deal with these scenarios appropriately.
696 * This is a specialized use case for the online scrubber; if you're
697 * reading this, you probably want xfs_iget.
700 xfs_icache_inode_is_allocated(
701 struct xfs_mount *mp,
702 struct xfs_trans *tp,
706 struct xfs_inode *ip;
709 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
713 *inuse = !!(VFS_I(ip)->i_mode);
719 * The inode lookup is done in batches to keep the amount of lock traffic and
720 * radix tree lookups to a minimum. The batch size is a trade off between
721 * lookup reduction and stack usage. This is in the reclaim path, so we can't
724 #define XFS_LOOKUP_BATCH 32
727 * Decide if the given @ip is eligible to be a part of the inode walk, and
728 * grab it if so. Returns true if it's ready to go or false if we should just
732 xfs_inode_walk_ag_grab(
733 struct xfs_inode *ip,
736 struct inode *inode = VFS_I(ip);
737 bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
739 ASSERT(rcu_read_lock_held());
741 /* Check for stale RCU freed inode */
742 spin_lock(&ip->i_flags_lock);
744 goto out_unlock_noent;
746 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
747 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
748 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
749 goto out_unlock_noent;
750 spin_unlock(&ip->i_flags_lock);
752 /* nothing to sync during shutdown */
753 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
756 /* If we can't grab the inode, it must on it's way to reclaim. */
764 spin_unlock(&ip->i_flags_lock);
769 * For a given per-AG structure @pag, grab, @execute, and rele all incore
770 * inodes with the given radix tree @tag.
774 struct xfs_perag *pag,
776 int (*execute)(struct xfs_inode *ip, void *args),
780 struct xfs_mount *mp = pag->pag_mount;
781 uint32_t first_index;
793 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
799 if (tag == XFS_ICI_NO_TAG)
800 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
801 (void **)batch, first_index,
804 nr_found = radix_tree_gang_lookup_tag(
806 (void **) batch, first_index,
807 XFS_LOOKUP_BATCH, tag);
815 * Grab the inodes before we drop the lock. if we found
816 * nothing, nr == 0 and the loop will be skipped.
818 for (i = 0; i < nr_found; i++) {
819 struct xfs_inode *ip = batch[i];
821 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
825 * Update the index for the next lookup. Catch
826 * overflows into the next AG range which can occur if
827 * we have inodes in the last block of the AG and we
828 * are currently pointing to the last inode.
830 * Because we may see inodes that are from the wrong AG
831 * due to RCU freeing and reallocation, only update the
832 * index if it lies in this AG. It was a race that lead
833 * us to see this inode, so another lookup from the
834 * same index will not find it again.
836 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
838 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
839 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
843 /* unlock now we've grabbed the inodes. */
846 for (i = 0; i < nr_found; i++) {
849 if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
850 xfs_iflags_test(batch[i], XFS_INEW))
851 xfs_inew_wait(batch[i]);
852 error = execute(batch[i], args);
854 if (error == -EAGAIN) {
858 if (error && last_error != -EFSCORRUPTED)
862 /* bail out if the filesystem is corrupted. */
863 if (error == -EFSCORRUPTED)
868 } while (nr_found && !done);
877 /* Fetch the next (possibly tagged) per-AG structure. */
878 static inline struct xfs_perag *
879 xfs_inode_walk_get_perag(
880 struct xfs_mount *mp,
884 if (tag == XFS_ICI_NO_TAG)
885 return xfs_perag_get(mp, agno);
886 return xfs_perag_get_tag(mp, agno, tag);
890 * Call the @execute function on all incore inodes matching the radix tree
895 struct xfs_mount *mp,
897 int (*execute)(struct xfs_inode *ip, void *args),
901 struct xfs_perag *pag;
907 while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
908 ag = pag->pag_agno + 1;
909 error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
913 if (error == -EFSCORRUPTED)
921 * Grab the inode for reclaim exclusively.
923 * We have found this inode via a lookup under RCU, so the inode may have
924 * already been freed, or it may be in the process of being recycled by
925 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
926 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
927 * will not be set. Hence we need to check for both these flag conditions to
928 * avoid inodes that are no longer reclaim candidates.
930 * Note: checking for other state flags here, under the i_flags_lock or not, is
931 * racy and should be avoided. Those races should be resolved only after we have
932 * ensured that we are able to reclaim this inode and the world can see that we
933 * are going to reclaim it.
935 * Return true if we grabbed it, false otherwise.
938 xfs_reclaim_inode_grab(
939 struct xfs_inode *ip)
941 ASSERT(rcu_read_lock_held());
943 spin_lock(&ip->i_flags_lock);
944 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
945 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
946 /* not a reclaim candidate. */
947 spin_unlock(&ip->i_flags_lock);
950 __xfs_iflags_set(ip, XFS_IRECLAIM);
951 spin_unlock(&ip->i_flags_lock);
956 * Inode reclaim is non-blocking, so the default action if progress cannot be
957 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
958 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
959 * blocking anymore and hence we can wait for the inode to be able to reclaim
962 * We do no IO here - if callers require inodes to be cleaned they must push the
963 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
964 * done in the background in a non-blocking manner, and enables memory reclaim
965 * to make progress without blocking.
969 struct xfs_inode *ip,
970 struct xfs_perag *pag)
972 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
974 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
976 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
979 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
981 xfs_iflush_abort(ip);
984 if (xfs_ipincount(ip))
985 goto out_clear_flush;
986 if (!xfs_inode_clean(ip))
987 goto out_clear_flush;
989 xfs_iflags_clear(ip, XFS_IFLUSHING);
993 * Because we use RCU freeing we need to ensure the inode always appears
994 * to be reclaimed with an invalid inode number when in the free state.
995 * We do this as early as possible under the ILOCK so that
996 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
997 * detect races with us here. By doing this, we guarantee that once
998 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
999 * it will see either a valid inode that will serialise correctly, or it
1000 * will see an invalid inode that it can skip.
1002 spin_lock(&ip->i_flags_lock);
1003 ip->i_flags = XFS_IRECLAIM;
1005 spin_unlock(&ip->i_flags_lock);
1007 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1009 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1011 * Remove the inode from the per-AG radix tree.
1013 * Because radix_tree_delete won't complain even if the item was never
1014 * added to the tree assert that it's been there before to catch
1015 * problems with the inode life time early on.
1017 spin_lock(&pag->pag_ici_lock);
1018 if (!radix_tree_delete(&pag->pag_ici_root,
1019 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1021 xfs_perag_clear_reclaim_tag(pag);
1022 spin_unlock(&pag->pag_ici_lock);
1025 * Here we do an (almost) spurious inode lock in order to coordinate
1026 * with inode cache radix tree lookups. This is because the lookup
1027 * can reference the inodes in the cache without taking references.
1029 * We make that OK here by ensuring that we wait until the inode is
1030 * unlocked after the lookup before we go ahead and free it.
1032 xfs_ilock(ip, XFS_ILOCK_EXCL);
1033 xfs_qm_dqdetach(ip);
1034 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1035 ASSERT(xfs_inode_clean(ip));
1037 __xfs_inode_free(ip);
1041 xfs_iflags_clear(ip, XFS_IFLUSHING);
1043 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1045 xfs_iflags_clear(ip, XFS_IRECLAIM);
1049 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1050 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1051 * then a shut down during filesystem unmount reclaim walk leak all the
1052 * unreclaimed inodes.
1054 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1055 * so that callers that want to block until all dirty inodes are written back
1056 * and reclaimed can sanely loop.
1059 xfs_reclaim_inodes_ag(
1060 struct xfs_mount *mp,
1063 struct xfs_perag *pag;
1064 xfs_agnumber_t ag = 0;
1066 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1067 unsigned long first_index = 0;
1071 ag = pag->pag_agno + 1;
1073 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1075 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1079 nr_found = radix_tree_gang_lookup_tag(
1081 (void **)batch, first_index,
1083 XFS_ICI_RECLAIM_TAG);
1091 * Grab the inodes before we drop the lock. if we found
1092 * nothing, nr == 0 and the loop will be skipped.
1094 for (i = 0; i < nr_found; i++) {
1095 struct xfs_inode *ip = batch[i];
1097 if (done || !xfs_reclaim_inode_grab(ip))
1101 * Update the index for the next lookup. Catch
1102 * overflows into the next AG range which can
1103 * occur if we have inodes in the last block of
1104 * the AG and we are currently pointing to the
1107 * Because we may see inodes that are from the
1108 * wrong AG due to RCU freeing and
1109 * reallocation, only update the index if it
1110 * lies in this AG. It was a race that lead us
1111 * to see this inode, so another lookup from
1112 * the same index will not find it again.
1114 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1117 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1118 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1122 /* unlock now we've grabbed the inodes. */
1125 for (i = 0; i < nr_found; i++) {
1127 xfs_reclaim_inode(batch[i], pag);
1130 *nr_to_scan -= XFS_LOOKUP_BATCH;
1132 } while (nr_found && !done && *nr_to_scan > 0);
1136 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1143 struct xfs_mount *mp)
1145 int nr_to_scan = INT_MAX;
1147 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1148 xfs_ail_push_all_sync(mp->m_ail);
1149 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1154 * The shrinker infrastructure determines how many inodes we should scan for
1155 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1156 * push the AIL here. We also want to proactively free up memory if we can to
1157 * minimise the amount of work memory reclaim has to do so we kick the
1158 * background reclaim if it isn't already scheduled.
1161 xfs_reclaim_inodes_nr(
1162 struct xfs_mount *mp,
1165 /* kick background reclaimer and push the AIL */
1166 xfs_reclaim_work_queue(mp);
1167 xfs_ail_push_all(mp->m_ail);
1169 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1174 * Return the number of reclaimable inodes in the filesystem for
1175 * the shrinker to determine how much to reclaim.
1178 xfs_reclaim_inodes_count(
1179 struct xfs_mount *mp)
1181 struct xfs_perag *pag;
1182 xfs_agnumber_t ag = 0;
1183 int reclaimable = 0;
1185 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1186 ag = pag->pag_agno + 1;
1187 reclaimable += pag->pag_ici_reclaimable;
1195 struct xfs_inode *ip,
1196 struct xfs_eofblocks *eofb)
1198 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1199 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1202 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1203 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1206 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1207 ip->i_projid != eofb->eof_prid)
1214 * A union-based inode filtering algorithm. Process the inode if any of the
1215 * criteria match. This is for global/internal scans only.
1218 xfs_inode_match_id_union(
1219 struct xfs_inode *ip,
1220 struct xfs_eofblocks *eofb)
1222 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1223 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1226 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1227 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1230 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1231 ip->i_projid == eofb->eof_prid)
1238 * Is this inode @ip eligible for eof/cow block reclamation, given some
1239 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1240 * if the predicate functions match.
1243 xfs_inode_matches_eofb(
1244 struct xfs_inode *ip,
1245 struct xfs_eofblocks *eofb)
1252 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1253 match = xfs_inode_match_id_union(ip, eofb);
1255 match = xfs_inode_match_id(ip, eofb);
1259 /* skip the inode if the file size is too small */
1260 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1261 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1268 * This is a fast pass over the inode cache to try to get reclaim moving on as
1269 * many inodes as possible in a short period of time. It kicks itself every few
1270 * seconds, as well as being kicked by the inode cache shrinker when memory
1275 struct work_struct *work)
1277 struct xfs_mount *mp = container_of(to_delayed_work(work),
1278 struct xfs_mount, m_reclaim_work);
1279 int nr_to_scan = INT_MAX;
1281 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1282 xfs_reclaim_work_queue(mp);
1286 xfs_inode_free_eofblocks(
1287 struct xfs_inode *ip,
1289 unsigned int *lockflags)
1291 struct xfs_eofblocks *eofb = args;
1294 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1296 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1300 * If the mapping is dirty the operation can block and wait for some
1301 * time. Unless we are waiting, skip it.
1303 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1306 if (!xfs_inode_matches_eofb(ip, eofb))
1310 * If the caller is waiting, return -EAGAIN to keep the background
1311 * scanner moving and revisit the inode in a subsequent pass.
1313 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1318 *lockflags |= XFS_IOLOCK_EXCL;
1320 if (xfs_can_free_eofblocks(ip, false))
1321 return xfs_free_eofblocks(ip);
1323 /* inode could be preallocated or append-only */
1324 trace_xfs_inode_free_eofblocks_invalid(ip);
1325 xfs_inode_clear_eofblocks_tag(ip);
1330 * Background scanning to trim preallocated space. This is queued based on the
1331 * 'speculative_prealloc_lifetime' tunable (5m by default).
1335 struct xfs_perag *pag)
1338 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
1339 queue_delayed_work(pag->pag_mount->m_gc_workqueue,
1340 &pag->pag_blockgc_work,
1341 msecs_to_jiffies(xfs_blockgc_secs * 1000));
1346 xfs_blockgc_set_iflag(
1347 struct xfs_inode *ip,
1348 unsigned long iflag)
1350 struct xfs_mount *mp = ip->i_mount;
1351 struct xfs_perag *pag;
1354 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1357 * Don't bother locking the AG and looking up in the radix trees
1358 * if we already know that we have the tag set.
1360 if (ip->i_flags & iflag)
1362 spin_lock(&ip->i_flags_lock);
1363 ip->i_flags |= iflag;
1364 spin_unlock(&ip->i_flags_lock);
1366 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1367 spin_lock(&pag->pag_ici_lock);
1369 tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG);
1370 radix_tree_tag_set(&pag->pag_ici_root,
1371 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1372 XFS_ICI_BLOCKGC_TAG);
1374 /* propagate the blockgc tag up into the perag radix tree */
1375 spin_lock(&ip->i_mount->m_perag_lock);
1376 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1377 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1378 XFS_ICI_BLOCKGC_TAG);
1379 spin_unlock(&ip->i_mount->m_perag_lock);
1381 /* kick off background trimming */
1382 xfs_blockgc_queue(pag);
1384 trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1,
1388 spin_unlock(&pag->pag_ici_lock);
1393 xfs_inode_set_eofblocks_tag(
1396 trace_xfs_inode_set_eofblocks_tag(ip);
1397 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1401 xfs_blockgc_clear_iflag(
1402 struct xfs_inode *ip,
1403 unsigned long iflag)
1405 struct xfs_mount *mp = ip->i_mount;
1406 struct xfs_perag *pag;
1409 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1411 spin_lock(&ip->i_flags_lock);
1412 ip->i_flags &= ~iflag;
1413 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1414 spin_unlock(&ip->i_flags_lock);
1419 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1420 spin_lock(&pag->pag_ici_lock);
1422 radix_tree_tag_clear(&pag->pag_ici_root,
1423 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1424 XFS_ICI_BLOCKGC_TAG);
1425 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) {
1426 /* clear the blockgc tag from the perag radix tree */
1427 spin_lock(&ip->i_mount->m_perag_lock);
1428 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1429 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1430 XFS_ICI_BLOCKGC_TAG);
1431 spin_unlock(&ip->i_mount->m_perag_lock);
1432 trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1,
1436 spin_unlock(&pag->pag_ici_lock);
1441 xfs_inode_clear_eofblocks_tag(
1444 trace_xfs_inode_clear_eofblocks_tag(ip);
1445 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1449 * Set ourselves up to free CoW blocks from this file. If it's already clean
1450 * then we can bail out quickly, but otherwise we must back off if the file
1451 * is undergoing some kind of write.
1454 xfs_prep_free_cowblocks(
1455 struct xfs_inode *ip)
1458 * Just clear the tag if we have an empty cow fork or none at all. It's
1459 * possible the inode was fully unshared since it was originally tagged.
1461 if (!xfs_inode_has_cow_data(ip)) {
1462 trace_xfs_inode_free_cowblocks_invalid(ip);
1463 xfs_inode_clear_cowblocks_tag(ip);
1468 * If the mapping is dirty or under writeback we cannot touch the
1469 * CoW fork. Leave it alone if we're in the midst of a directio.
1471 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1472 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1473 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1474 atomic_read(&VFS_I(ip)->i_dio_count))
1481 * Automatic CoW Reservation Freeing
1483 * These functions automatically garbage collect leftover CoW reservations
1484 * that were made on behalf of a cowextsize hint when we start to run out
1485 * of quota or when the reservations sit around for too long. If the file
1486 * has dirty pages or is undergoing writeback, its CoW reservations will
1489 * The actual garbage collection piggybacks off the same code that runs
1490 * the speculative EOF preallocation garbage collector.
1493 xfs_inode_free_cowblocks(
1494 struct xfs_inode *ip,
1496 unsigned int *lockflags)
1498 struct xfs_eofblocks *eofb = args;
1502 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1504 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1507 if (!xfs_prep_free_cowblocks(ip))
1510 if (!xfs_inode_matches_eofb(ip, eofb))
1514 * If the caller is waiting, return -EAGAIN to keep the background
1515 * scanner moving and revisit the inode in a subsequent pass.
1517 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1518 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1523 *lockflags |= XFS_IOLOCK_EXCL;
1525 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1530 *lockflags |= XFS_MMAPLOCK_EXCL;
1533 * Check again, nobody else should be able to dirty blocks or change
1534 * the reflink iflag now that we have the first two locks held.
1536 if (xfs_prep_free_cowblocks(ip))
1537 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1542 xfs_inode_set_cowblocks_tag(
1545 trace_xfs_inode_set_cowblocks_tag(ip);
1546 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1550 xfs_inode_clear_cowblocks_tag(
1553 trace_xfs_inode_clear_cowblocks_tag(ip);
1554 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1557 #define for_each_perag_tag(mp, next_agno, pag, tag) \
1558 for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \
1560 (next_agno) = (pag)->pag_agno + 1, \
1561 xfs_perag_put(pag), \
1562 (pag) = xfs_perag_get_tag((mp), (next_agno), (tag)))
1565 /* Disable post-EOF and CoW block auto-reclamation. */
1568 struct xfs_mount *mp)
1570 struct xfs_perag *pag;
1571 xfs_agnumber_t agno;
1573 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1574 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1577 /* Enable post-EOF and CoW block auto-reclamation. */
1580 struct xfs_mount *mp)
1582 struct xfs_perag *pag;
1583 xfs_agnumber_t agno;
1585 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1586 xfs_blockgc_queue(pag);
1589 /* Scan one incore inode for block preallocations that we can remove. */
1591 xfs_blockgc_scan_inode(
1592 struct xfs_inode *ip,
1595 unsigned int lockflags = 0;
1598 error = xfs_inode_free_eofblocks(ip, args, &lockflags);
1602 error = xfs_inode_free_cowblocks(ip, args, &lockflags);
1605 xfs_iunlock(ip, lockflags);
1609 /* Background worker that trims preallocated space. */
1612 struct work_struct *work)
1614 struct xfs_perag *pag = container_of(to_delayed_work(work),
1615 struct xfs_perag, pag_blockgc_work);
1616 struct xfs_mount *mp = pag->pag_mount;
1619 if (!sb_start_write_trylock(mp->m_super))
1621 error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL,
1622 XFS_ICI_BLOCKGC_TAG);
1624 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1625 pag->pag_agno, error);
1626 sb_end_write(mp->m_super);
1627 xfs_blockgc_queue(pag);
1631 * Try to free space in the filesystem by purging eofblocks and cowblocks.
1634 xfs_blockgc_free_space(
1635 struct xfs_mount *mp,
1636 struct xfs_eofblocks *eofb)
1638 trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_);
1640 return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb,
1641 XFS_ICI_BLOCKGC_TAG);
1645 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1646 * quota caused an allocation failure, so we make a best effort by including
1647 * each quota under low free space conditions (less than 1% free space) in the
1650 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1651 * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or
1655 xfs_blockgc_free_dquots(
1656 struct xfs_mount *mp,
1657 struct xfs_dquot *udqp,
1658 struct xfs_dquot *gdqp,
1659 struct xfs_dquot *pdqp,
1660 unsigned int eof_flags)
1662 struct xfs_eofblocks eofb = {0};
1663 bool do_work = false;
1665 if (!udqp && !gdqp && !pdqp)
1669 * Run a scan to free blocks using the union filter to cover all
1670 * applicable quotas in a single scan.
1672 eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags;
1674 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1675 eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1676 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1680 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1681 eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1682 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1686 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1687 eofb.eof_prid = pdqp->q_id;
1688 eofb.eof_flags |= XFS_EOF_FLAGS_PRID;
1695 return xfs_blockgc_free_space(mp, &eofb);
1698 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1700 xfs_blockgc_free_quota(
1701 struct xfs_inode *ip,
1702 unsigned int eof_flags)
1704 return xfs_blockgc_free_dquots(ip->i_mount,
1705 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1706 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1707 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags);