xfs: make inode attribute forks a permanent part of struct xfs_inode
[linux-block.git] / fs / xfs / xfs_icache.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
fe4fa4b8
DC
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
fe4fa4b8
DC
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
5467b34b 8#include "xfs_shared.h"
6ca1c906 9#include "xfs_format.h"
239880ef
DC
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
fe4fa4b8 12#include "xfs_mount.h"
fe4fa4b8 13#include "xfs_inode.h"
239880ef
DC
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
fe4fa4b8 16#include "xfs_inode_item.h"
7d095257 17#include "xfs_quota.h"
0b1b213f 18#include "xfs_trace.h"
6d8b79cf 19#include "xfs_icache.h"
c24b5dfa 20#include "xfs_bmap_util.h"
dc06f398
BF
21#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
83104d44 23#include "xfs_reflink.h"
bb8a66af 24#include "xfs_ialloc.h"
9bbafc71 25#include "xfs_ag.h"
01728b44 26#include "xfs_log_priv.h"
fe4fa4b8 27
f0e28280 28#include <linux/iversion.h>
a167b17e 29
c809d7e9
DW
30/* Radix tree tags for incore inode tree. */
31
32/* inode is to be reclaimed */
33#define XFS_ICI_RECLAIM_TAG 0
34/* Inode has speculative preallocations (posteof or cow) to clean. */
35#define XFS_ICI_BLOCKGC_TAG 1
36
37/*
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 */
41enum xfs_icwalk_goal {
c809d7e9
DW
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
f1bc5c56 44 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
c809d7e9
DW
45};
46
7fdff526 47static int xfs_icwalk(struct xfs_mount *mp,
b26b2bf1 48 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
7fdff526 49static int xfs_icwalk_ag(struct xfs_perag *pag,
b26b2bf1 50 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
df600197 51
1ad2cfe0 52/*
b26b2bf1
DW
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
1ad2cfe0 55 */
1ad2cfe0 56
f1bc5c56
DW
57/* Stop scanning after icw_scan_limit inodes. */
58#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
59
9492750a 60#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
2d53f66b 61#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
9492750a 62
777eb1fa 63#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
2d53f66b
DW
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
1ad2cfe0 66
33479e05
DC
67/*
68 * Allocate and initialise an xfs_inode.
69 */
638f4416 70struct xfs_inode *
33479e05
DC
71xfs_inode_alloc(
72 struct xfs_mount *mp,
73 xfs_ino_t ino)
74{
75 struct xfs_inode *ip;
76
77 /*
3050bd0b
CM
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
33479e05 80 */
fd60b288 81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
3050bd0b 82
33479e05 83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
182696fb 84 kmem_cache_free(xfs_inode_cache, ip);
33479e05
DC
85 return NULL;
86 }
87
f38a032b 88 /* VFS doesn't initialise i_mode or i_state! */
c19b3b05 89 VFS_I(ip)->i_mode = 0;
f38a032b 90 VFS_I(ip)->i_state = 0;
67958013 91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
c19b3b05 92
ff6d6af2 93 XFS_STATS_INC(mp, vn_active);
33479e05 94 ASSERT(atomic_read(&ip->i_pincount) == 0);
33479e05
DC
95 ASSERT(ip->i_ino == 0);
96
33479e05
DC
97 /* initialise the xfs inode */
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
3993baeb 101 ip->i_cowfp = NULL;
2ed5b09b
DW
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
3ba738df 104 memset(&ip->i_df, 0, sizeof(ip->i_df));
2ed5b09b 105 ip->i_df.if_present = 1;
33479e05
DC
106 ip->i_flags = 0;
107 ip->i_delayed_blks = 0;
3e09ab8f 108 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
6e73a545 109 ip->i_nblocks = 0;
7821ea30 110 ip->i_forkoff = 0;
6772c1f1
DW
111 ip->i_sick = 0;
112 ip->i_checked = 0;
cb357bf3
DW
113 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
114 INIT_LIST_HEAD(&ip->i_ioend_list);
115 spin_lock_init(&ip->i_ioend_lock);
33479e05
DC
116
117 return ip;
118}
119
120STATIC void
121xfs_inode_free_callback(
122 struct rcu_head *head)
123{
124 struct inode *inode = container_of(head, struct inode, i_rcu);
125 struct xfs_inode *ip = XFS_I(inode);
126
c19b3b05 127 switch (VFS_I(ip)->i_mode & S_IFMT) {
33479e05
DC
128 case S_IFREG:
129 case S_IFDIR:
130 case S_IFLNK:
ef838512 131 xfs_idestroy_fork(&ip->i_df);
33479e05
DC
132 break;
133 }
134
2ed5b09b
DW
135 if (ip->i_af.if_present) {
136 xfs_idestroy_fork(&ip->i_af);
137 xfs_ifork_zap_attr(ip);
ef838512
CH
138 }
139 if (ip->i_cowfp) {
140 xfs_idestroy_fork(ip->i_cowfp);
182696fb 141 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
ef838512 142 }
33479e05 143 if (ip->i_itemp) {
22525c17
DC
144 ASSERT(!test_bit(XFS_LI_IN_AIL,
145 &ip->i_itemp->ili_item.li_flags));
33479e05
DC
146 xfs_inode_item_destroy(ip);
147 ip->i_itemp = NULL;
148 }
149
182696fb 150 kmem_cache_free(xfs_inode_cache, ip);
1f2dcfe8
DC
151}
152
8a17d7dd
DC
153static void
154__xfs_inode_free(
155 struct xfs_inode *ip)
156{
157 /* asserts to verify all state is correct here */
158 ASSERT(atomic_read(&ip->i_pincount) == 0);
48d55e2a 159 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
8a17d7dd
DC
160 XFS_STATS_DEC(ip->i_mount, vn_active);
161
162 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
163}
164
1f2dcfe8
DC
165void
166xfs_inode_free(
167 struct xfs_inode *ip)
168{
718ecc50 169 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
98efe8af 170
33479e05
DC
171 /*
172 * Because we use RCU freeing we need to ensure the inode always
173 * appears to be reclaimed with an invalid inode number when in the
174 * free state. The ip->i_flags_lock provides the barrier against lookup
175 * races.
176 */
177 spin_lock(&ip->i_flags_lock);
178 ip->i_flags = XFS_IRECLAIM;
179 ip->i_ino = 0;
180 spin_unlock(&ip->i_flags_lock);
181
8a17d7dd 182 __xfs_inode_free(ip);
33479e05
DC
183}
184
ad438c40 185/*
02511a5a
DC
186 * Queue background inode reclaim work if there are reclaimable inodes and there
187 * isn't reclaim work already scheduled or in progress.
ad438c40
DC
188 */
189static void
190xfs_reclaim_work_queue(
191 struct xfs_mount *mp)
192{
193
194 rcu_read_lock();
195 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
196 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
197 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
198 }
199 rcu_read_unlock();
200}
201
c076ae7a
DW
202/*
203 * Background scanning to trim preallocated space. This is queued based on the
204 * 'speculative_prealloc_lifetime' tunable (5m by default).
205 */
206static inline void
207xfs_blockgc_queue(
ad438c40 208 struct xfs_perag *pag)
c076ae7a 209{
6f649091
DW
210 struct xfs_mount *mp = pag->pag_mount;
211
212 if (!xfs_is_blockgc_enabled(mp))
213 return;
214
c076ae7a
DW
215 rcu_read_lock();
216 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
ab23a776 217 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
c076ae7a
DW
218 &pag->pag_blockgc_work,
219 msecs_to_jiffies(xfs_blockgc_secs * 1000));
220 rcu_read_unlock();
221}
222
223/* Set a tag on both the AG incore inode tree and the AG radix tree. */
224static void
225xfs_perag_set_inode_tag(
226 struct xfs_perag *pag,
227 xfs_agino_t agino,
228 unsigned int tag)
ad438c40
DC
229{
230 struct xfs_mount *mp = pag->pag_mount;
c076ae7a 231 bool was_tagged;
ad438c40 232
95989c46 233 lockdep_assert_held(&pag->pag_ici_lock);
c076ae7a
DW
234
235 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
236 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
237
238 if (tag == XFS_ICI_RECLAIM_TAG)
239 pag->pag_ici_reclaimable++;
240
241 if (was_tagged)
ad438c40
DC
242 return;
243
c076ae7a 244 /* propagate the tag up into the perag radix tree */
ad438c40 245 spin_lock(&mp->m_perag_lock);
c076ae7a 246 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
ad438c40
DC
247 spin_unlock(&mp->m_perag_lock);
248
c076ae7a
DW
249 /* start background work */
250 switch (tag) {
251 case XFS_ICI_RECLAIM_TAG:
252 xfs_reclaim_work_queue(mp);
253 break;
254 case XFS_ICI_BLOCKGC_TAG:
255 xfs_blockgc_queue(pag);
256 break;
257 }
ad438c40 258
c076ae7a 259 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
ad438c40
DC
260}
261
c076ae7a 262/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
ad438c40 263static void
c076ae7a
DW
264xfs_perag_clear_inode_tag(
265 struct xfs_perag *pag,
266 xfs_agino_t agino,
267 unsigned int tag)
ad438c40
DC
268{
269 struct xfs_mount *mp = pag->pag_mount;
270
95989c46 271 lockdep_assert_held(&pag->pag_ici_lock);
c076ae7a
DW
272
273 /*
274 * Reclaim can signal (with a null agino) that it cleared its own tag
275 * by removing the inode from the radix tree.
276 */
277 if (agino != NULLAGINO)
278 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
279 else
280 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
281
282 if (tag == XFS_ICI_RECLAIM_TAG)
283 pag->pag_ici_reclaimable--;
284
285 if (radix_tree_tagged(&pag->pag_ici_root, tag))
ad438c40
DC
286 return;
287
c076ae7a 288 /* clear the tag from the perag radix tree */
ad438c40 289 spin_lock(&mp->m_perag_lock);
c076ae7a 290 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
ad438c40 291 spin_unlock(&mp->m_perag_lock);
ad438c40 292
c076ae7a
DW
293 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
294}
ad438c40 295
50997470
DC
296/*
297 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
298 * part of the structure. This is made more complex by the fact we store
299 * information about the on-disk values in the VFS inode and so we can't just
83e06f21 300 * overwrite the values unconditionally. Hence we save the parameters we
50997470 301 * need to retain across reinitialisation, and rewrite them into the VFS inode
83e06f21 302 * after reinitialisation even if it fails.
50997470
DC
303 */
304static int
305xfs_reinit_inode(
306 struct xfs_mount *mp,
307 struct inode *inode)
308{
ff7bebeb
DW
309 int error;
310 uint32_t nlink = inode->i_nlink;
311 uint32_t generation = inode->i_generation;
312 uint64_t version = inode_peek_iversion(inode);
313 umode_t mode = inode->i_mode;
314 dev_t dev = inode->i_rdev;
315 kuid_t uid = inode->i_uid;
316 kgid_t gid = inode->i_gid;
50997470
DC
317
318 error = inode_init_always(mp->m_super, inode);
319
54d7b5c1 320 set_nlink(inode, nlink);
9e9a2674 321 inode->i_generation = generation;
f0e28280 322 inode_set_iversion_queried(inode, version);
c19b3b05 323 inode->i_mode = mode;
acd1d715 324 inode->i_rdev = dev;
3d8f2821
CH
325 inode->i_uid = uid;
326 inode->i_gid = gid;
67958013 327 mapping_set_large_folios(inode->i_mapping);
50997470
DC
328 return error;
329}
330
ff7bebeb
DW
331/*
332 * Carefully nudge an inode whose VFS state has been torn down back into a
333 * usable state. Drops the i_flags_lock and the rcu read lock.
334 */
335static int
336xfs_iget_recycle(
337 struct xfs_perag *pag,
338 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
339{
340 struct xfs_mount *mp = ip->i_mount;
341 struct inode *inode = VFS_I(ip);
342 int error;
343
344 trace_xfs_iget_recycle(ip);
345
346 /*
347 * We need to make it look like the inode is being reclaimed to prevent
348 * the actual reclaim workers from stomping over us while we recycle
349 * the inode. We can't clear the radix tree tag yet as it requires
350 * pag_ici_lock to be held exclusive.
351 */
352 ip->i_flags |= XFS_IRECLAIM;
353
354 spin_unlock(&ip->i_flags_lock);
355 rcu_read_unlock();
356
357 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
358 error = xfs_reinit_inode(mp, inode);
359 if (error) {
ff7bebeb
DW
360 /*
361 * Re-initializing the inode failed, and we are in deep
362 * trouble. Try to re-add it to the reclaim list.
363 */
364 rcu_read_lock();
365 spin_lock(&ip->i_flags_lock);
ff7bebeb 366 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
ff7bebeb
DW
367 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
368 spin_unlock(&ip->i_flags_lock);
369 rcu_read_unlock();
370
371 trace_xfs_iget_recycle_fail(ip);
372 return error;
373 }
374
375 spin_lock(&pag->pag_ici_lock);
376 spin_lock(&ip->i_flags_lock);
377
378 /*
379 * Clear the per-lifetime state in the inode as we are now effectively
380 * a new inode and need to return to the initial state before reuse
381 * occurs.
382 */
383 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
384 ip->i_flags |= XFS_INEW;
385 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
386 XFS_ICI_RECLAIM_TAG);
387 inode->i_state = I_NEW;
388 spin_unlock(&ip->i_flags_lock);
389 spin_unlock(&pag->pag_ici_lock);
390
391 return 0;
392}
393
afca6c5b
DC
394/*
395 * If we are allocating a new inode, then check what was returned is
396 * actually a free, empty inode. If we are not allocating an inode,
397 * then check we didn't find a free inode.
398 *
399 * Returns:
400 * 0 if the inode free state matches the lookup context
401 * -ENOENT if the inode is free and we are not allocating
402 * -EFSCORRUPTED if there is any state mismatch at all
403 */
404static int
405xfs_iget_check_free_state(
406 struct xfs_inode *ip,
407 int flags)
408{
409 if (flags & XFS_IGET_CREATE) {
410 /* should be a free inode */
411 if (VFS_I(ip)->i_mode != 0) {
412 xfs_warn(ip->i_mount,
413"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
414 ip->i_ino, VFS_I(ip)->i_mode);
415 return -EFSCORRUPTED;
416 }
417
6e73a545 418 if (ip->i_nblocks != 0) {
afca6c5b
DC
419 xfs_warn(ip->i_mount,
420"Corruption detected! Free inode 0x%llx has blocks allocated!",
421 ip->i_ino);
422 return -EFSCORRUPTED;
423 }
424 return 0;
425 }
426
427 /* should be an allocated inode */
428 if (VFS_I(ip)->i_mode == 0)
429 return -ENOENT;
430
431 return 0;
432}
433
ab23a776
DC
434/* Make all pending inactivation work start immediately. */
435static void
436xfs_inodegc_queue_all(
437 struct xfs_mount *mp)
438{
439 struct xfs_inodegc *gc;
440 int cpu;
441
442 for_each_online_cpu(cpu) {
443 gc = per_cpu_ptr(mp->m_inodegc, cpu);
444 if (!llist_empty(&gc->list))
7cf2b0f9 445 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
ab23a776
DC
446 }
447}
448
33479e05
DC
449/*
450 * Check the validity of the inode we just found it the cache
451 */
452static int
453xfs_iget_cache_hit(
454 struct xfs_perag *pag,
455 struct xfs_inode *ip,
456 xfs_ino_t ino,
457 int flags,
458 int lock_flags) __releases(RCU)
459{
460 struct inode *inode = VFS_I(ip);
461 struct xfs_mount *mp = ip->i_mount;
462 int error;
463
464 /*
465 * check for re-use of an inode within an RCU grace period due to the
466 * radix tree nodes not being updated yet. We monitor for this by
467 * setting the inode number to zero before freeing the inode structure.
468 * If the inode has been reallocated and set up, then the inode number
469 * will not match, so check for that, too.
470 */
471 spin_lock(&ip->i_flags_lock);
77b4d286
DW
472 if (ip->i_ino != ino)
473 goto out_skip;
33479e05
DC
474
475 /*
476 * If we are racing with another cache hit that is currently
477 * instantiating this inode or currently recycling it out of
ff7bebeb 478 * reclaimable state, wait for the initialisation to complete
33479e05
DC
479 * before continuing.
480 *
ab23a776
DC
481 * If we're racing with the inactivation worker we also want to wait.
482 * If we're creating a new file, it's possible that the worker
483 * previously marked the inode as free on disk but hasn't finished
484 * updating the incore state yet. The AGI buffer will be dirty and
485 * locked to the icreate transaction, so a synchronous push of the
486 * inodegc workers would result in deadlock. For a regular iget, the
487 * worker is running already, so we might as well wait.
488 *
33479e05
DC
489 * XXX(hch): eventually we should do something equivalent to
490 * wait_on_inode to wait for these flags to be cleared
491 * instead of polling for it.
492 */
ab23a776 493 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
77b4d286 494 goto out_skip;
33479e05 495
ab23a776
DC
496 if (ip->i_flags & XFS_NEED_INACTIVE) {
497 /* Unlinked inodes cannot be re-grabbed. */
498 if (VFS_I(ip)->i_nlink == 0) {
499 error = -ENOENT;
500 goto out_error;
501 }
502 goto out_inodegc_flush;
503 }
504
33479e05 505 /*
afca6c5b
DC
506 * Check the inode free state is valid. This also detects lookup
507 * racing with unlinks.
33479e05 508 */
afca6c5b
DC
509 error = xfs_iget_check_free_state(ip, flags);
510 if (error)
33479e05 511 goto out_error;
33479e05 512
77b4d286
DW
513 /* Skip inodes that have no vfs state. */
514 if ((flags & XFS_IGET_INCORE) &&
515 (ip->i_flags & XFS_IRECLAIMABLE))
516 goto out_skip;
378f681c 517
77b4d286
DW
518 /* The inode fits the selection criteria; process it. */
519 if (ip->i_flags & XFS_IRECLAIMABLE) {
ff7bebeb
DW
520 /* Drops i_flags_lock and RCU read lock. */
521 error = xfs_iget_recycle(pag, ip);
522 if (error)
523 return error;
33479e05
DC
524 } else {
525 /* If the VFS inode is being torn down, pause and try again. */
77b4d286
DW
526 if (!igrab(inode))
527 goto out_skip;
33479e05
DC
528
529 /* We've got a live one. */
530 spin_unlock(&ip->i_flags_lock);
531 rcu_read_unlock();
532 trace_xfs_iget_hit(ip);
533 }
534
535 if (lock_flags != 0)
536 xfs_ilock(ip, lock_flags);
537
378f681c 538 if (!(flags & XFS_IGET_INCORE))
dae2f8ed 539 xfs_iflags_clear(ip, XFS_ISTALE);
ff6d6af2 540 XFS_STATS_INC(mp, xs_ig_found);
33479e05
DC
541
542 return 0;
543
77b4d286
DW
544out_skip:
545 trace_xfs_iget_skip(ip);
546 XFS_STATS_INC(mp, xs_ig_frecycle);
547 error = -EAGAIN;
33479e05
DC
548out_error:
549 spin_unlock(&ip->i_flags_lock);
550 rcu_read_unlock();
551 return error;
ab23a776
DC
552
553out_inodegc_flush:
554 spin_unlock(&ip->i_flags_lock);
555 rcu_read_unlock();
556 /*
557 * Do not wait for the workers, because the caller could hold an AGI
558 * buffer lock. We're just going to sleep in a loop anyway.
559 */
560 if (xfs_is_inodegc_enabled(mp))
561 xfs_inodegc_queue_all(mp);
562 return -EAGAIN;
33479e05
DC
563}
564
33479e05
DC
565static int
566xfs_iget_cache_miss(
567 struct xfs_mount *mp,
568 struct xfs_perag *pag,
569 xfs_trans_t *tp,
570 xfs_ino_t ino,
571 struct xfs_inode **ipp,
572 int flags,
573 int lock_flags)
574{
575 struct xfs_inode *ip;
576 int error;
577 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
578 int iflags;
579
580 ip = xfs_inode_alloc(mp, ino);
581 if (!ip)
2451337d 582 return -ENOMEM;
33479e05 583
bb8a66af 584 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
33479e05
DC
585 if (error)
586 goto out_destroy;
587
bb8a66af
CH
588 /*
589 * For version 5 superblocks, if we are initialising a new inode and we
0560f31a 590 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
bb8a66af
CH
591 * simply build the new inode core with a random generation number.
592 *
593 * For version 4 (and older) superblocks, log recovery is dependent on
965e0a1a 594 * the i_flushiter field being initialised from the current on-disk
bb8a66af
CH
595 * value and hence we must also read the inode off disk even when
596 * initializing new inodes.
597 */
38c26bfd 598 if (xfs_has_v3inodes(mp) &&
0560f31a 599 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
bb8a66af
CH
600 VFS_I(ip)->i_generation = prandom_u32();
601 } else {
bb8a66af
CH
602 struct xfs_buf *bp;
603
af9dcdde 604 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
bb8a66af
CH
605 if (error)
606 goto out_destroy;
607
af9dcdde
CH
608 error = xfs_inode_from_disk(ip,
609 xfs_buf_offset(bp, ip->i_imap.im_boffset));
bb8a66af
CH
610 if (!error)
611 xfs_buf_set_ref(bp, XFS_INO_REF);
612 xfs_trans_brelse(tp, bp);
613
614 if (error)
615 goto out_destroy;
616 }
617
33479e05
DC
618 trace_xfs_iget_miss(ip);
619
ee457001 620 /*
afca6c5b
DC
621 * Check the inode free state is valid. This also detects lookup
622 * racing with unlinks.
ee457001 623 */
afca6c5b
DC
624 error = xfs_iget_check_free_state(ip, flags);
625 if (error)
33479e05 626 goto out_destroy;
33479e05
DC
627
628 /*
629 * Preload the radix tree so we can insert safely under the
630 * write spinlock. Note that we cannot sleep inside the preload
631 * region. Since we can be called from transaction context, don't
632 * recurse into the file system.
633 */
634 if (radix_tree_preload(GFP_NOFS)) {
2451337d 635 error = -EAGAIN;
33479e05
DC
636 goto out_destroy;
637 }
638
639 /*
640 * Because the inode hasn't been added to the radix-tree yet it can't
641 * be found by another thread, so we can do the non-sleeping lock here.
642 */
643 if (lock_flags) {
644 if (!xfs_ilock_nowait(ip, lock_flags))
645 BUG();
646 }
647
648 /*
649 * These values must be set before inserting the inode into the radix
650 * tree as the moment it is inserted a concurrent lookup (allowed by the
651 * RCU locking mechanism) can find it and that lookup must see that this
652 * is an inode currently under construction (i.e. that XFS_INEW is set).
653 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
654 * memory barrier that ensures this detection works correctly at lookup
655 * time.
656 */
657 iflags = XFS_INEW;
658 if (flags & XFS_IGET_DONTCACHE)
2c567af4 659 d_mark_dontcache(VFS_I(ip));
113a5683
CS
660 ip->i_udquot = NULL;
661 ip->i_gdquot = NULL;
92f8ff73 662 ip->i_pdquot = NULL;
33479e05
DC
663 xfs_iflags_set(ip, iflags);
664
665 /* insert the new inode */
666 spin_lock(&pag->pag_ici_lock);
667 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
668 if (unlikely(error)) {
669 WARN_ON(error != -EEXIST);
ff6d6af2 670 XFS_STATS_INC(mp, xs_ig_dup);
2451337d 671 error = -EAGAIN;
33479e05
DC
672 goto out_preload_end;
673 }
674 spin_unlock(&pag->pag_ici_lock);
675 radix_tree_preload_end();
676
677 *ipp = ip;
678 return 0;
679
680out_preload_end:
681 spin_unlock(&pag->pag_ici_lock);
682 radix_tree_preload_end();
683 if (lock_flags)
684 xfs_iunlock(ip, lock_flags);
685out_destroy:
686 __destroy_inode(VFS_I(ip));
687 xfs_inode_free(ip);
688 return error;
689}
690
691/*
02511a5a
DC
692 * Look up an inode by number in the given file system. The inode is looked up
693 * in the cache held in each AG. If the inode is found in the cache, initialise
694 * the vfs inode if necessary.
33479e05 695 *
02511a5a
DC
696 * If it is not in core, read it in from the file system's device, add it to the
697 * cache and initialise the vfs inode.
33479e05
DC
698 *
699 * The inode is locked according to the value of the lock_flags parameter.
02511a5a
DC
700 * Inode lookup is only done during metadata operations and not as part of the
701 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
33479e05
DC
702 */
703int
704xfs_iget(
02511a5a
DC
705 struct xfs_mount *mp,
706 struct xfs_trans *tp,
707 xfs_ino_t ino,
708 uint flags,
709 uint lock_flags,
710 struct xfs_inode **ipp)
33479e05 711{
02511a5a
DC
712 struct xfs_inode *ip;
713 struct xfs_perag *pag;
714 xfs_agino_t agino;
715 int error;
33479e05 716
33479e05
DC
717 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
718
719 /* reject inode numbers outside existing AGs */
720 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
2451337d 721 return -EINVAL;
33479e05 722
ff6d6af2 723 XFS_STATS_INC(mp, xs_ig_attempts);
8774cf8b 724
33479e05
DC
725 /* get the perag structure and ensure that it's inode capable */
726 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
727 agino = XFS_INO_TO_AGINO(mp, ino);
728
729again:
730 error = 0;
731 rcu_read_lock();
732 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
733
734 if (ip) {
735 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
736 if (error)
737 goto out_error_or_again;
738 } else {
739 rcu_read_unlock();
378f681c 740 if (flags & XFS_IGET_INCORE) {
ed438b47 741 error = -ENODATA;
378f681c
DW
742 goto out_error_or_again;
743 }
ff6d6af2 744 XFS_STATS_INC(mp, xs_ig_missed);
33479e05
DC
745
746 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
747 flags, lock_flags);
748 if (error)
749 goto out_error_or_again;
750 }
751 xfs_perag_put(pag);
752
753 *ipp = ip;
754
755 /*
58c90473 756 * If we have a real type for an on-disk inode, we can setup the inode
132c460e
YX
757 * now. If it's a new inode being created, xfs_init_new_inode will
758 * handle it.
33479e05 759 */
c19b3b05 760 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
58c90473 761 xfs_setup_existing_inode(ip);
33479e05
DC
762 return 0;
763
764out_error_or_again:
378f681c 765 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
33479e05
DC
766 delay(1);
767 goto again;
768 }
769 xfs_perag_put(pag);
770 return error;
771}
772
378f681c
DW
773/*
774 * "Is this a cached inode that's also allocated?"
775 *
776 * Look up an inode by number in the given file system. If the inode is
777 * in cache and isn't in purgatory, return 1 if the inode is allocated
778 * and 0 if it is not. For all other cases (not in cache, being torn
779 * down, etc.), return a negative error code.
780 *
781 * The caller has to prevent inode allocation and freeing activity,
782 * presumably by locking the AGI buffer. This is to ensure that an
783 * inode cannot transition from allocated to freed until the caller is
784 * ready to allow that. If the inode is in an intermediate state (new,
785 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
786 * inode is not in the cache, -ENOENT will be returned. The caller must
787 * deal with these scenarios appropriately.
788 *
789 * This is a specialized use case for the online scrubber; if you're
790 * reading this, you probably want xfs_iget.
791 */
792int
793xfs_icache_inode_is_allocated(
794 struct xfs_mount *mp,
795 struct xfs_trans *tp,
796 xfs_ino_t ino,
797 bool *inuse)
798{
799 struct xfs_inode *ip;
800 int error;
801
802 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
803 if (error)
804 return error;
805
806 *inuse = !!(VFS_I(ip)->i_mode);
44a8736b 807 xfs_irele(ip);
378f681c
DW
808 return 0;
809}
810
e3a20c0b
DC
811/*
812 * Grab the inode for reclaim exclusively.
50718b8d
DC
813 *
814 * We have found this inode via a lookup under RCU, so the inode may have
815 * already been freed, or it may be in the process of being recycled by
816 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
817 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
818 * will not be set. Hence we need to check for both these flag conditions to
819 * avoid inodes that are no longer reclaim candidates.
820 *
821 * Note: checking for other state flags here, under the i_flags_lock or not, is
822 * racy and should be avoided. Those races should be resolved only after we have
823 * ensured that we are able to reclaim this inode and the world can see that we
824 * are going to reclaim it.
825 *
826 * Return true if we grabbed it, false otherwise.
e3a20c0b 827 */
50718b8d 828static bool
f1bc5c56 829xfs_reclaim_igrab(
9492750a 830 struct xfs_inode *ip,
b26b2bf1 831 struct xfs_icwalk *icw)
e3a20c0b 832{
1a3e8f3d
DC
833 ASSERT(rcu_read_lock_held());
834
e3a20c0b 835 spin_lock(&ip->i_flags_lock);
1a3e8f3d
DC
836 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
837 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
838 /* not a reclaim candidate. */
e3a20c0b 839 spin_unlock(&ip->i_flags_lock);
50718b8d 840 return false;
e3a20c0b 841 }
9492750a
DW
842
843 /* Don't reclaim a sick inode unless the caller asked for it. */
844 if (ip->i_sick &&
b26b2bf1 845 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
9492750a
DW
846 spin_unlock(&ip->i_flags_lock);
847 return false;
848 }
849
e3a20c0b
DC
850 __xfs_iflags_set(ip, XFS_IRECLAIM);
851 spin_unlock(&ip->i_flags_lock);
50718b8d 852 return true;
e3a20c0b
DC
853}
854
777df5af 855/*
02511a5a
DC
856 * Inode reclaim is non-blocking, so the default action if progress cannot be
857 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
858 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
859 * blocking anymore and hence we can wait for the inode to be able to reclaim
860 * it.
777df5af 861 *
02511a5a
DC
862 * We do no IO here - if callers require inodes to be cleaned they must push the
863 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
864 * done in the background in a non-blocking manner, and enables memory reclaim
865 * to make progress without blocking.
777df5af 866 */
4d0bab3a 867static void
c8e20be0 868xfs_reclaim_inode(
75f3cb13 869 struct xfs_inode *ip,
50718b8d 870 struct xfs_perag *pag)
fce08f2f 871{
8a17d7dd 872 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
777df5af 873
9552e14d 874 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
617825fe 875 goto out;
718ecc50 876 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
9552e14d 877 goto out_iunlock;
7a3be02b 878
01728b44
DC
879 /*
880 * Check for log shutdown because aborting the inode can move the log
881 * tail and corrupt in memory state. This is fine if the log is shut
882 * down, but if the log is still active and only the mount is shut down
883 * then the in-memory log tail movement caused by the abort can be
884 * incorrectly propagated to disk.
885 */
886 if (xlog_is_shutdown(ip->i_mount->m_log)) {
777df5af 887 xfs_iunpin_wait(ip);
d2d7c047 888 xfs_iflush_shutdown_abort(ip);
777df5af
DC
889 goto reclaim;
890 }
617825fe 891 if (xfs_ipincount(ip))
718ecc50 892 goto out_clear_flush;
617825fe 893 if (!xfs_inode_clean(ip))
718ecc50 894 goto out_clear_flush;
8a48088f 895
718ecc50 896 xfs_iflags_clear(ip, XFS_IFLUSHING);
777df5af 897reclaim:
ab23a776 898 trace_xfs_inode_reclaiming(ip);
98efe8af 899
8a17d7dd
DC
900 /*
901 * Because we use RCU freeing we need to ensure the inode always appears
902 * to be reclaimed with an invalid inode number when in the free state.
98efe8af 903 * We do this as early as possible under the ILOCK so that
f2e9ad21
OS
904 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
905 * detect races with us here. By doing this, we guarantee that once
906 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
907 * it will see either a valid inode that will serialise correctly, or it
908 * will see an invalid inode that it can skip.
8a17d7dd
DC
909 */
910 spin_lock(&ip->i_flags_lock);
911 ip->i_flags = XFS_IRECLAIM;
912 ip->i_ino = 0;
255794c7
DW
913 ip->i_sick = 0;
914 ip->i_checked = 0;
8a17d7dd
DC
915 spin_unlock(&ip->i_flags_lock);
916
c8e20be0 917 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2f11feab 918
ff6d6af2 919 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
2f11feab
DC
920 /*
921 * Remove the inode from the per-AG radix tree.
922 *
923 * Because radix_tree_delete won't complain even if the item was never
924 * added to the tree assert that it's been there before to catch
925 * problems with the inode life time early on.
926 */
1a427ab0 927 spin_lock(&pag->pag_ici_lock);
2f11feab 928 if (!radix_tree_delete(&pag->pag_ici_root,
8a17d7dd 929 XFS_INO_TO_AGINO(ip->i_mount, ino)))
2f11feab 930 ASSERT(0);
c076ae7a 931 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
1a427ab0 932 spin_unlock(&pag->pag_ici_lock);
2f11feab
DC
933
934 /*
935 * Here we do an (almost) spurious inode lock in order to coordinate
936 * with inode cache radix tree lookups. This is because the lookup
937 * can reference the inodes in the cache without taking references.
938 *
939 * We make that OK here by ensuring that we wait until the inode is
ad637a10 940 * unlocked after the lookup before we go ahead and free it.
2f11feab 941 */
ad637a10 942 xfs_ilock(ip, XFS_ILOCK_EXCL);
3ea06d73 943 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
ad637a10 944 xfs_iunlock(ip, XFS_ILOCK_EXCL);
96355d5a 945 ASSERT(xfs_inode_clean(ip));
2f11feab 946
8a17d7dd 947 __xfs_inode_free(ip);
4d0bab3a 948 return;
8a48088f 949
718ecc50
DC
950out_clear_flush:
951 xfs_iflags_clear(ip, XFS_IFLUSHING);
9552e14d 952out_iunlock:
8a48088f 953 xfs_iunlock(ip, XFS_ILOCK_EXCL);
9552e14d 954out:
617825fe 955 xfs_iflags_clear(ip, XFS_IRECLAIM);
7a3be02b
DC
956}
957
9492750a
DW
958/* Reclaim sick inodes if we're unmounting or the fs went down. */
959static inline bool
960xfs_want_reclaim_sick(
961 struct xfs_mount *mp)
962{
2e973b2c 963 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
75c8c50f 964 xfs_is_shutdown(mp);
9492750a
DW
965}
966
4d0bab3a 967void
7a3be02b 968xfs_reclaim_inodes(
4d0bab3a 969 struct xfs_mount *mp)
7a3be02b 970{
b26b2bf1
DW
971 struct xfs_icwalk icw = {
972 .icw_flags = 0,
9492750a
DW
973 };
974
975 if (xfs_want_reclaim_sick(mp))
b26b2bf1 976 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9492750a 977
4d0bab3a 978 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
617825fe 979 xfs_ail_push_all_sync(mp->m_ail);
b26b2bf1 980 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
0f4ec0f1 981 }
9bf729c0
DC
982}
983
984/*
02511a5a
DC
985 * The shrinker infrastructure determines how many inodes we should scan for
986 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
987 * push the AIL here. We also want to proactively free up memory if we can to
988 * minimise the amount of work memory reclaim has to do so we kick the
989 * background reclaim if it isn't already scheduled.
9bf729c0 990 */
0a234c6d 991long
8daaa831
DC
992xfs_reclaim_inodes_nr(
993 struct xfs_mount *mp,
10be350b 994 unsigned long nr_to_scan)
9bf729c0 995{
b26b2bf1
DW
996 struct xfs_icwalk icw = {
997 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
10be350b 998 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
f1bc5c56
DW
999 };
1000
9492750a 1001 if (xfs_want_reclaim_sick(mp))
b26b2bf1 1002 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9492750a 1003
8daaa831 1004 /* kick background reclaimer and push the AIL */
5889608d 1005 xfs_reclaim_work_queue(mp);
8daaa831 1006 xfs_ail_push_all(mp->m_ail);
a7b339f1 1007
b26b2bf1 1008 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
617825fe 1009 return 0;
8daaa831 1010}
9bf729c0 1011
8daaa831
DC
1012/*
1013 * Return the number of reclaimable inodes in the filesystem for
1014 * the shrinker to determine how much to reclaim.
1015 */
10be350b 1016long
8daaa831
DC
1017xfs_reclaim_inodes_count(
1018 struct xfs_mount *mp)
1019{
1020 struct xfs_perag *pag;
1021 xfs_agnumber_t ag = 0;
10be350b 1022 long reclaimable = 0;
9bf729c0 1023
65d0f205
DC
1024 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1025 ag = pag->pag_agno + 1;
70e60ce7
DC
1026 reclaimable += pag->pag_ici_reclaimable;
1027 xfs_perag_put(pag);
9bf729c0 1028 }
9bf729c0
DC
1029 return reclaimable;
1030}
1031
39b1cfd7 1032STATIC bool
b26b2bf1 1033xfs_icwalk_match_id(
3e3f9f58 1034 struct xfs_inode *ip,
b26b2bf1 1035 struct xfs_icwalk *icw)
3e3f9f58 1036{
b26b2bf1
DW
1037 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1038 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
39b1cfd7 1039 return false;
3e3f9f58 1040
b26b2bf1
DW
1041 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1042 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
39b1cfd7 1043 return false;
1b556048 1044
b26b2bf1
DW
1045 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1046 ip->i_projid != icw->icw_prid)
39b1cfd7 1047 return false;
1b556048 1048
39b1cfd7 1049 return true;
3e3f9f58
BF
1050}
1051
f4526397
BF
1052/*
1053 * A union-based inode filtering algorithm. Process the inode if any of the
1054 * criteria match. This is for global/internal scans only.
1055 */
39b1cfd7 1056STATIC bool
b26b2bf1 1057xfs_icwalk_match_id_union(
f4526397 1058 struct xfs_inode *ip,
b26b2bf1 1059 struct xfs_icwalk *icw)
f4526397 1060{
b26b2bf1
DW
1061 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1062 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
39b1cfd7 1063 return true;
f4526397 1064
b26b2bf1
DW
1065 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1066 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
39b1cfd7 1067 return true;
f4526397 1068
b26b2bf1
DW
1069 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1070 ip->i_projid == icw->icw_prid)
39b1cfd7 1071 return true;
f4526397 1072
39b1cfd7 1073 return false;
f4526397
BF
1074}
1075
a91bf992
DW
1076/*
1077 * Is this inode @ip eligible for eof/cow block reclamation, given some
b26b2bf1 1078 * filtering parameters @icw? The inode is eligible if @icw is null or
a91bf992
DW
1079 * if the predicate functions match.
1080 */
1081static bool
b26b2bf1 1082xfs_icwalk_match(
a91bf992 1083 struct xfs_inode *ip,
b26b2bf1 1084 struct xfs_icwalk *icw)
a91bf992 1085{
39b1cfd7 1086 bool match;
a91bf992 1087
b26b2bf1 1088 if (!icw)
a91bf992
DW
1089 return true;
1090
b26b2bf1
DW
1091 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1092 match = xfs_icwalk_match_id_union(ip, icw);
a91bf992 1093 else
b26b2bf1 1094 match = xfs_icwalk_match_id(ip, icw);
a91bf992
DW
1095 if (!match)
1096 return false;
1097
1098 /* skip the inode if the file size is too small */
b26b2bf1
DW
1099 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1100 XFS_ISIZE(ip) < icw->icw_min_file_size)
a91bf992
DW
1101 return false;
1102
1103 return true;
1104}
1105
4d0bab3a
DC
1106/*
1107 * This is a fast pass over the inode cache to try to get reclaim moving on as
1108 * many inodes as possible in a short period of time. It kicks itself every few
1109 * seconds, as well as being kicked by the inode cache shrinker when memory
02511a5a 1110 * goes low.
4d0bab3a
DC
1111 */
1112void
1113xfs_reclaim_worker(
1114 struct work_struct *work)
1115{
1116 struct xfs_mount *mp = container_of(to_delayed_work(work),
1117 struct xfs_mount, m_reclaim_work);
4d0bab3a 1118
f1bc5c56 1119 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
4d0bab3a
DC
1120 xfs_reclaim_work_queue(mp);
1121}
1122
41176a68
BF
1123STATIC int
1124xfs_inode_free_eofblocks(
1125 struct xfs_inode *ip,
b26b2bf1 1126 struct xfs_icwalk *icw,
0fa4a10a 1127 unsigned int *lockflags)
41176a68 1128{
390600f8 1129 bool wait;
390600f8 1130
b26b2bf1 1131 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
5400da7d 1132
ce2d3bbe
DW
1133 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1134 return 0;
1135
41176a68
BF
1136 /*
1137 * If the mapping is dirty the operation can block and wait for some
1138 * time. Unless we are waiting, skip it.
1139 */
390600f8 1140 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
41176a68
BF
1141 return 0;
1142
b26b2bf1 1143 if (!xfs_icwalk_match(ip, icw))
a91bf992 1144 return 0;
3e3f9f58 1145
a36b9261
BF
1146 /*
1147 * If the caller is waiting, return -EAGAIN to keep the background
1148 * scanner moving and revisit the inode in a subsequent pass.
1149 */
c3155097 1150 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
390600f8
DW
1151 if (wait)
1152 return -EAGAIN;
1153 return 0;
a36b9261 1154 }
0fa4a10a 1155 *lockflags |= XFS_IOLOCK_EXCL;
390600f8 1156
2b156ff8
DW
1157 if (xfs_can_free_eofblocks(ip, false))
1158 return xfs_free_eofblocks(ip);
1159
1160 /* inode could be preallocated or append-only */
1161 trace_xfs_inode_free_eofblocks_invalid(ip);
1162 xfs_inode_clear_eofblocks_tag(ip);
1163 return 0;
41176a68
BF
1164}
1165
83104d44 1166static void
ce2d3bbe
DW
1167xfs_blockgc_set_iflag(
1168 struct xfs_inode *ip,
ce2d3bbe 1169 unsigned long iflag)
27b52867 1170{
ce2d3bbe
DW
1171 struct xfs_mount *mp = ip->i_mount;
1172 struct xfs_perag *pag;
ce2d3bbe
DW
1173
1174 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1175
85a6e764
CH
1176 /*
1177 * Don't bother locking the AG and looking up in the radix trees
1178 * if we already know that we have the tag set.
1179 */
ce2d3bbe 1180 if (ip->i_flags & iflag)
85a6e764
CH
1181 return;
1182 spin_lock(&ip->i_flags_lock);
ce2d3bbe 1183 ip->i_flags |= iflag;
85a6e764
CH
1184 spin_unlock(&ip->i_flags_lock);
1185
27b52867
BF
1186 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1187 spin_lock(&pag->pag_ici_lock);
27b52867 1188
c076ae7a
DW
1189 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1190 XFS_ICI_BLOCKGC_TAG);
27b52867
BF
1191
1192 spin_unlock(&pag->pag_ici_lock);
1193 xfs_perag_put(pag);
1194}
1195
1196void
83104d44 1197xfs_inode_set_eofblocks_tag(
27b52867 1198 xfs_inode_t *ip)
83104d44
DW
1199{
1200 trace_xfs_inode_set_eofblocks_tag(ip);
9669f51d 1201 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1202}
1203
1204static void
ce2d3bbe
DW
1205xfs_blockgc_clear_iflag(
1206 struct xfs_inode *ip,
1207 unsigned long iflag)
27b52867 1208{
ce2d3bbe
DW
1209 struct xfs_mount *mp = ip->i_mount;
1210 struct xfs_perag *pag;
1211 bool clear_tag;
1212
1213 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1214
85a6e764 1215 spin_lock(&ip->i_flags_lock);
ce2d3bbe
DW
1216 ip->i_flags &= ~iflag;
1217 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
85a6e764
CH
1218 spin_unlock(&ip->i_flags_lock);
1219
ce2d3bbe
DW
1220 if (!clear_tag)
1221 return;
1222
27b52867
BF
1223 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1224 spin_lock(&pag->pag_ici_lock);
27b52867 1225
c076ae7a
DW
1226 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1227 XFS_ICI_BLOCKGC_TAG);
27b52867
BF
1228
1229 spin_unlock(&pag->pag_ici_lock);
1230 xfs_perag_put(pag);
1231}
1232
83104d44
DW
1233void
1234xfs_inode_clear_eofblocks_tag(
1235 xfs_inode_t *ip)
1236{
1237 trace_xfs_inode_clear_eofblocks_tag(ip);
ce2d3bbe 1238 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1239}
1240
1241/*
be78ff0e
DW
1242 * Set ourselves up to free CoW blocks from this file. If it's already clean
1243 * then we can bail out quickly, but otherwise we must back off if the file
1244 * is undergoing some kind of write.
83104d44 1245 */
be78ff0e
DW
1246static bool
1247xfs_prep_free_cowblocks(
51d62690 1248 struct xfs_inode *ip)
83104d44 1249{
39937234
BF
1250 /*
1251 * Just clear the tag if we have an empty cow fork or none at all. It's
1252 * possible the inode was fully unshared since it was originally tagged.
1253 */
51d62690 1254 if (!xfs_inode_has_cow_data(ip)) {
83104d44
DW
1255 trace_xfs_inode_free_cowblocks_invalid(ip);
1256 xfs_inode_clear_cowblocks_tag(ip);
be78ff0e 1257 return false;
83104d44
DW
1258 }
1259
1260 /*
1261 * If the mapping is dirty or under writeback we cannot touch the
1262 * CoW fork. Leave it alone if we're in the midst of a directio.
1263 */
a1b7a4de
CH
1264 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1265 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
83104d44
DW
1266 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1267 atomic_read(&VFS_I(ip)->i_dio_count))
be78ff0e
DW
1268 return false;
1269
1270 return true;
1271}
1272
1273/*
1274 * Automatic CoW Reservation Freeing
1275 *
1276 * These functions automatically garbage collect leftover CoW reservations
1277 * that were made on behalf of a cowextsize hint when we start to run out
1278 * of quota or when the reservations sit around for too long. If the file
1279 * has dirty pages or is undergoing writeback, its CoW reservations will
1280 * be retained.
1281 *
1282 * The actual garbage collection piggybacks off the same code that runs
1283 * the speculative EOF preallocation garbage collector.
1284 */
1285STATIC int
1286xfs_inode_free_cowblocks(
1287 struct xfs_inode *ip,
b26b2bf1 1288 struct xfs_icwalk *icw,
0fa4a10a 1289 unsigned int *lockflags)
be78ff0e 1290{
f41a0716 1291 bool wait;
be78ff0e
DW
1292 int ret = 0;
1293
b26b2bf1 1294 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
f41a0716 1295
ce2d3bbe
DW
1296 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1297 return 0;
1298
51d62690 1299 if (!xfs_prep_free_cowblocks(ip))
83104d44
DW
1300 return 0;
1301
b26b2bf1 1302 if (!xfs_icwalk_match(ip, icw))
a91bf992 1303 return 0;
83104d44 1304
f41a0716
DW
1305 /*
1306 * If the caller is waiting, return -EAGAIN to keep the background
1307 * scanner moving and revisit the inode in a subsequent pass.
1308 */
0fa4a10a
DW
1309 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1310 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
f41a0716
DW
1311 if (wait)
1312 return -EAGAIN;
1313 return 0;
1314 }
0fa4a10a
DW
1315 *lockflags |= XFS_IOLOCK_EXCL;
1316
f41a0716
DW
1317 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1318 if (wait)
0fa4a10a
DW
1319 return -EAGAIN;
1320 return 0;
f41a0716 1321 }
0fa4a10a 1322 *lockflags |= XFS_MMAPLOCK_EXCL;
83104d44 1323
be78ff0e
DW
1324 /*
1325 * Check again, nobody else should be able to dirty blocks or change
1326 * the reflink iflag now that we have the first two locks held.
1327 */
51d62690 1328 if (xfs_prep_free_cowblocks(ip))
be78ff0e 1329 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
83104d44
DW
1330 return ret;
1331}
1332
83104d44
DW
1333void
1334xfs_inode_set_cowblocks_tag(
1335 xfs_inode_t *ip)
1336{
7b7381f0 1337 trace_xfs_inode_set_cowblocks_tag(ip);
9669f51d 1338 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
83104d44
DW
1339}
1340
1341void
1342xfs_inode_clear_cowblocks_tag(
1343 xfs_inode_t *ip)
1344{
7b7381f0 1345 trace_xfs_inode_clear_cowblocks_tag(ip);
ce2d3bbe 1346 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
83104d44 1347}
d6b636eb
DW
1348
1349/* Disable post-EOF and CoW block auto-reclamation. */
1350void
c9a6526f 1351xfs_blockgc_stop(
d6b636eb
DW
1352 struct xfs_mount *mp)
1353{
894ecacf
DW
1354 struct xfs_perag *pag;
1355 xfs_agnumber_t agno;
1356
6f649091
DW
1357 if (!xfs_clear_blockgc_enabled(mp))
1358 return;
1359
1360 for_each_perag(mp, agno, pag)
894ecacf 1361 cancel_delayed_work_sync(&pag->pag_blockgc_work);
6f649091 1362 trace_xfs_blockgc_stop(mp, __return_address);
d6b636eb
DW
1363}
1364
1365/* Enable post-EOF and CoW block auto-reclamation. */
1366void
c9a6526f 1367xfs_blockgc_start(
d6b636eb
DW
1368 struct xfs_mount *mp)
1369{
894ecacf
DW
1370 struct xfs_perag *pag;
1371 xfs_agnumber_t agno;
1372
6f649091
DW
1373 if (xfs_set_blockgc_enabled(mp))
1374 return;
1375
1376 trace_xfs_blockgc_start(mp, __return_address);
894ecacf
DW
1377 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1378 xfs_blockgc_queue(pag);
d6b636eb 1379}
3d4feec0 1380
d20d5edc
DW
1381/* Don't try to run block gc on an inode that's in any of these states. */
1382#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
ab23a776
DC
1383 XFS_NEED_INACTIVE | \
1384 XFS_INACTIVATING | \
d20d5edc
DW
1385 XFS_IRECLAIMABLE | \
1386 XFS_IRECLAIM)
df600197 1387/*
b9baaef4
DW
1388 * Decide if the given @ip is eligible for garbage collection of speculative
1389 * preallocations, and grab it if so. Returns true if it's ready to go or
1390 * false if we should just ignore it.
df600197
DW
1391 */
1392static bool
b9baaef4 1393xfs_blockgc_igrab(
7fdff526 1394 struct xfs_inode *ip)
df600197
DW
1395{
1396 struct inode *inode = VFS_I(ip);
df600197
DW
1397
1398 ASSERT(rcu_read_lock_held());
1399
1400 /* Check for stale RCU freed inode */
1401 spin_lock(&ip->i_flags_lock);
1402 if (!ip->i_ino)
1403 goto out_unlock_noent;
1404
d20d5edc 1405 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
df600197
DW
1406 goto out_unlock_noent;
1407 spin_unlock(&ip->i_flags_lock);
1408
1409 /* nothing to sync during shutdown */
75c8c50f 1410 if (xfs_is_shutdown(ip->i_mount))
df600197
DW
1411 return false;
1412
1413 /* If we can't grab the inode, it must on it's way to reclaim. */
1414 if (!igrab(inode))
1415 return false;
1416
1417 /* inode is valid */
1418 return true;
1419
1420out_unlock_noent:
1421 spin_unlock(&ip->i_flags_lock);
1422 return false;
1423}
1424
41956753
DW
1425/* Scan one incore inode for block preallocations that we can remove. */
1426static int
1427xfs_blockgc_scan_inode(
1428 struct xfs_inode *ip,
b26b2bf1 1429 struct xfs_icwalk *icw)
85c5b270 1430{
0fa4a10a 1431 unsigned int lockflags = 0;
85c5b270
DW
1432 int error;
1433
b26b2bf1 1434 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
85c5b270 1435 if (error)
0fa4a10a 1436 goto unlock;
85c5b270 1437
b26b2bf1 1438 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
0fa4a10a
DW
1439unlock:
1440 if (lockflags)
1441 xfs_iunlock(ip, lockflags);
594ab00b 1442 xfs_irele(ip);
0fa4a10a 1443 return error;
85c5b270
DW
1444}
1445
9669f51d
DW
1446/* Background worker that trims preallocated space. */
1447void
1448xfs_blockgc_worker(
1449 struct work_struct *work)
1450{
894ecacf
DW
1451 struct xfs_perag *pag = container_of(to_delayed_work(work),
1452 struct xfs_perag, pag_blockgc_work);
1453 struct xfs_mount *mp = pag->pag_mount;
9669f51d
DW
1454 int error;
1455
6f649091
DW
1456 trace_xfs_blockgc_worker(mp, __return_address);
1457
f427cf5c 1458 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
9669f51d 1459 if (error)
894ecacf
DW
1460 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1461 pag->pag_agno, error);
894ecacf 1462 xfs_blockgc_queue(pag);
9669f51d
DW
1463}
1464
85c5b270 1465/*
2eb66502
DW
1466 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1467 * and cowblocks.
85c5b270
DW
1468 */
1469int
1470xfs_blockgc_free_space(
1471 struct xfs_mount *mp,
b26b2bf1 1472 struct xfs_icwalk *icw)
85c5b270 1473{
2eb66502
DW
1474 int error;
1475
b26b2bf1 1476 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
85c5b270 1477
2eb66502
DW
1478 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1479 if (error)
1480 return error;
1481
1482 xfs_inodegc_flush(mp);
1483 return 0;
85c5b270
DW
1484}
1485
e8d04c2a
DW
1486/*
1487 * Reclaim all the free space that we can by scheduling the background blockgc
1488 * and inodegc workers immediately and waiting for them all to clear.
1489 */
1490void
1491xfs_blockgc_flush_all(
1492 struct xfs_mount *mp)
1493{
1494 struct xfs_perag *pag;
1495 xfs_agnumber_t agno;
1496
1497 trace_xfs_blockgc_flush_all(mp, __return_address);
1498
1499 /*
1500 * For each blockgc worker, move its queue time up to now. If it
1501 * wasn't queued, it will not be requeued. Then flush whatever's
1502 * left.
1503 */
1504 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1505 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1506 &pag->pag_blockgc_work, 0);
1507
1508 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1509 flush_delayed_work(&pag->pag_blockgc_work);
1510
1511 xfs_inodegc_flush(mp);
1512}
1513
3d4feec0 1514/*
c237dd7c
DW
1515 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1516 * quota caused an allocation failure, so we make a best effort by including
1517 * each quota under low free space conditions (less than 1% free space) in the
1518 * scan.
111068f8
DW
1519 *
1520 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
2d53f66b 1521 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
111068f8 1522 * MMAPLOCK.
3d4feec0 1523 */
111068f8 1524int
c237dd7c
DW
1525xfs_blockgc_free_dquots(
1526 struct xfs_mount *mp,
1527 struct xfs_dquot *udqp,
1528 struct xfs_dquot *gdqp,
1529 struct xfs_dquot *pdqp,
2d53f66b 1530 unsigned int iwalk_flags)
3d4feec0 1531{
b26b2bf1 1532 struct xfs_icwalk icw = {0};
3d4feec0
DW
1533 bool do_work = false;
1534
c237dd7c
DW
1535 if (!udqp && !gdqp && !pdqp)
1536 return 0;
1537
3d4feec0 1538 /*
111068f8
DW
1539 * Run a scan to free blocks using the union filter to cover all
1540 * applicable quotas in a single scan.
3d4feec0 1541 */
b26b2bf1 1542 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
3d4feec0 1543
c237dd7c 1544 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
b26b2bf1
DW
1545 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1546 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
c237dd7c 1547 do_work = true;
3d4feec0
DW
1548 }
1549
c237dd7c 1550 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
b26b2bf1
DW
1551 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1552 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
c237dd7c 1553 do_work = true;
3d4feec0
DW
1554 }
1555
c237dd7c 1556 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
b26b2bf1
DW
1557 icw.icw_prid = pdqp->q_id;
1558 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
c237dd7c 1559 do_work = true;
3d4feec0
DW
1560 }
1561
1562 if (!do_work)
111068f8 1563 return 0;
3d4feec0 1564
b26b2bf1 1565 return xfs_blockgc_free_space(mp, &icw);
c237dd7c
DW
1566}
1567
1568/* Run cow/eofblocks scans on the quotas attached to the inode. */
1569int
1570xfs_blockgc_free_quota(
1571 struct xfs_inode *ip,
2d53f66b 1572 unsigned int iwalk_flags)
c237dd7c
DW
1573{
1574 return xfs_blockgc_free_dquots(ip->i_mount,
1575 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1576 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
2d53f66b 1577 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
3d4feec0 1578}
df600197
DW
1579
1580/* XFS Inode Cache Walking Code */
1581
f1bc5c56
DW
1582/*
1583 * The inode lookup is done in batches to keep the amount of lock traffic and
1584 * radix tree lookups to a minimum. The batch size is a trade off between
1585 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1586 * be too greedy.
1587 */
1588#define XFS_LOOKUP_BATCH 32
1589
1590
b9baaef4
DW
1591/*
1592 * Decide if we want to grab this inode in anticipation of doing work towards
594ab00b 1593 * the goal.
b9baaef4
DW
1594 */
1595static inline bool
1596xfs_icwalk_igrab(
1597 enum xfs_icwalk_goal goal,
9492750a 1598 struct xfs_inode *ip,
b26b2bf1 1599 struct xfs_icwalk *icw)
b9baaef4
DW
1600{
1601 switch (goal) {
b9baaef4 1602 case XFS_ICWALK_BLOCKGC:
7fdff526 1603 return xfs_blockgc_igrab(ip);
f1bc5c56 1604 case XFS_ICWALK_RECLAIM:
b26b2bf1 1605 return xfs_reclaim_igrab(ip, icw);
b9baaef4
DW
1606 default:
1607 return false;
1608 }
1609}
1610
594ab00b
DW
1611/*
1612 * Process an inode. Each processing function must handle any state changes
1613 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1614 */
f427cf5c
DW
1615static inline int
1616xfs_icwalk_process_inode(
1617 enum xfs_icwalk_goal goal,
1618 struct xfs_inode *ip,
f1bc5c56 1619 struct xfs_perag *pag,
b26b2bf1 1620 struct xfs_icwalk *icw)
f427cf5c 1621{
594ab00b 1622 int error = 0;
f427cf5c
DW
1623
1624 switch (goal) {
f427cf5c 1625 case XFS_ICWALK_BLOCKGC:
b26b2bf1 1626 error = xfs_blockgc_scan_inode(ip, icw);
f427cf5c 1627 break;
f1bc5c56
DW
1628 case XFS_ICWALK_RECLAIM:
1629 xfs_reclaim_inode(ip, pag);
1630 break;
f427cf5c 1631 }
f427cf5c
DW
1632 return error;
1633}
1634
df600197 1635/*
f427cf5c
DW
1636 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1637 * process them in some manner.
df600197
DW
1638 */
1639static int
c1115c0c 1640xfs_icwalk_ag(
df600197 1641 struct xfs_perag *pag,
f427cf5c 1642 enum xfs_icwalk_goal goal,
b26b2bf1 1643 struct xfs_icwalk *icw)
df600197
DW
1644{
1645 struct xfs_mount *mp = pag->pag_mount;
1646 uint32_t first_index;
1647 int last_error = 0;
1648 int skipped;
1649 bool done;
1650 int nr_found;
1651
1652restart:
1653 done = false;
1654 skipped = 0;
f1bc5c56
DW
1655 if (goal == XFS_ICWALK_RECLAIM)
1656 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1657 else
1658 first_index = 0;
df600197
DW
1659 nr_found = 0;
1660 do {
1661 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1662 int error = 0;
1663 int i;
1664
1665 rcu_read_lock();
1666
a437b9b4
CH
1667 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1668 (void **) batch, first_index,
1669 XFS_LOOKUP_BATCH, goal);
df600197 1670 if (!nr_found) {
f1bc5c56 1671 done = true;
df600197
DW
1672 rcu_read_unlock();
1673 break;
1674 }
1675
1676 /*
1677 * Grab the inodes before we drop the lock. if we found
1678 * nothing, nr == 0 and the loop will be skipped.
1679 */
1680 for (i = 0; i < nr_found; i++) {
1681 struct xfs_inode *ip = batch[i];
1682
b26b2bf1 1683 if (done || !xfs_icwalk_igrab(goal, ip, icw))
df600197
DW
1684 batch[i] = NULL;
1685
1686 /*
1687 * Update the index for the next lookup. Catch
1688 * overflows into the next AG range which can occur if
1689 * we have inodes in the last block of the AG and we
1690 * are currently pointing to the last inode.
1691 *
1692 * Because we may see inodes that are from the wrong AG
1693 * due to RCU freeing and reallocation, only update the
1694 * index if it lies in this AG. It was a race that lead
1695 * us to see this inode, so another lookup from the
1696 * same index will not find it again.
1697 */
1698 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1699 continue;
1700 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1701 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1702 done = true;
1703 }
1704
1705 /* unlock now we've grabbed the inodes. */
1706 rcu_read_unlock();
1707
1708 for (i = 0; i < nr_found; i++) {
1709 if (!batch[i])
1710 continue;
f1bc5c56 1711 error = xfs_icwalk_process_inode(goal, batch[i], pag,
b26b2bf1 1712 icw);
df600197
DW
1713 if (error == -EAGAIN) {
1714 skipped++;
1715 continue;
1716 }
1717 if (error && last_error != -EFSCORRUPTED)
1718 last_error = error;
1719 }
1720
1721 /* bail out if the filesystem is corrupted. */
1722 if (error == -EFSCORRUPTED)
1723 break;
1724
1725 cond_resched();
1726
b26b2bf1
DW
1727 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1728 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1729 if (icw->icw_scan_limit <= 0)
f1bc5c56
DW
1730 break;
1731 }
df600197
DW
1732 } while (nr_found && !done);
1733
f1bc5c56
DW
1734 if (goal == XFS_ICWALK_RECLAIM) {
1735 if (done)
1736 first_index = 0;
1737 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1738 }
1739
df600197
DW
1740 if (skipped) {
1741 delay(1);
1742 goto restart;
1743 }
1744 return last_error;
1745}
1746
f427cf5c 1747/* Walk all incore inodes to achieve a given goal. */
df600197 1748static int
c1115c0c 1749xfs_icwalk(
df600197 1750 struct xfs_mount *mp,
f427cf5c 1751 enum xfs_icwalk_goal goal,
b26b2bf1 1752 struct xfs_icwalk *icw)
df600197
DW
1753{
1754 struct xfs_perag *pag;
1755 int error = 0;
1756 int last_error = 0;
a437b9b4 1757 xfs_agnumber_t agno;
df600197 1758
a437b9b4 1759 for_each_perag_tag(mp, agno, pag, goal) {
b26b2bf1 1760 error = xfs_icwalk_ag(pag, goal, icw);
df600197
DW
1761 if (error) {
1762 last_error = error;
a437b9b4
CH
1763 if (error == -EFSCORRUPTED) {
1764 xfs_perag_put(pag);
df600197 1765 break;
a437b9b4 1766 }
df600197
DW
1767 }
1768 }
1769 return last_error;
2d53f66b 1770 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
df600197 1771}
c6c2066d
DW
1772
1773#ifdef DEBUG
1774static void
1775xfs_check_delalloc(
1776 struct xfs_inode *ip,
1777 int whichfork)
1778{
732436ef 1779 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
c6c2066d
DW
1780 struct xfs_bmbt_irec got;
1781 struct xfs_iext_cursor icur;
1782
1783 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1784 return;
1785 do {
1786 if (isnullstartblock(got.br_startblock)) {
1787 xfs_warn(ip->i_mount,
1788 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1789 ip->i_ino,
1790 whichfork == XFS_DATA_FORK ? "data" : "cow",
1791 got.br_startoff, got.br_blockcount);
1792 }
1793 } while (xfs_iext_next_extent(ifp, &icur, &got));
1794}
1795#else
1796#define xfs_check_delalloc(ip, whichfork) do { } while (0)
1797#endif
1798
ab23a776
DC
1799/* Schedule the inode for reclaim. */
1800static void
1801xfs_inodegc_set_reclaimable(
c6c2066d
DW
1802 struct xfs_inode *ip)
1803{
1804 struct xfs_mount *mp = ip->i_mount;
1805 struct xfs_perag *pag;
c6c2066d 1806
75c8c50f 1807 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
c6c2066d
DW
1808 xfs_check_delalloc(ip, XFS_DATA_FORK);
1809 xfs_check_delalloc(ip, XFS_COW_FORK);
1810 ASSERT(0);
1811 }
1812
c6c2066d
DW
1813 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1814 spin_lock(&pag->pag_ici_lock);
1815 spin_lock(&ip->i_flags_lock);
1816
ab23a776
DC
1817 trace_xfs_inode_set_reclaimable(ip);
1818 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1819 ip->i_flags |= XFS_IRECLAIMABLE;
c6c2066d
DW
1820 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1821 XFS_ICI_RECLAIM_TAG);
c6c2066d
DW
1822
1823 spin_unlock(&ip->i_flags_lock);
1824 spin_unlock(&pag->pag_ici_lock);
1825 xfs_perag_put(pag);
1826}
ab23a776
DC
1827
1828/*
1829 * Free all speculative preallocations and possibly even the inode itself.
1830 * This is the last chance to make changes to an otherwise unreferenced file
1831 * before incore reclamation happens.
1832 */
1833static void
1834xfs_inodegc_inactivate(
1835 struct xfs_inode *ip)
1836{
1837 trace_xfs_inode_inactivating(ip);
1838 xfs_inactive(ip);
1839 xfs_inodegc_set_reclaimable(ip);
1840}
1841
1842void
1843xfs_inodegc_worker(
1844 struct work_struct *work)
1845{
7cf2b0f9
DC
1846 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1847 struct xfs_inodegc, work);
ab23a776
DC
1848 struct llist_node *node = llist_del_all(&gc->list);
1849 struct xfs_inode *ip, *n;
1850
1851 WRITE_ONCE(gc->items, 0);
1852
1853 if (!node)
1854 return;
1855
1856 ip = llist_entry(node, struct xfs_inode, i_gclist);
40b1de00 1857 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
ab23a776 1858
40b1de00 1859 WRITE_ONCE(gc->shrinker_hits, 0);
ab23a776
DC
1860 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1861 xfs_iflags_set(ip, XFS_INACTIVATING);
1862 xfs_inodegc_inactivate(ip);
1863 }
1864}
1865
1866/*
5e672cd6
DC
1867 * Expedite all pending inodegc work to run immediately. This does not wait for
1868 * completion of the work.
ab23a776
DC
1869 */
1870void
5e672cd6 1871xfs_inodegc_push(
ab23a776
DC
1872 struct xfs_mount *mp)
1873{
ab23a776
DC
1874 if (!xfs_is_inodegc_enabled(mp))
1875 return;
5e672cd6
DC
1876 trace_xfs_inodegc_push(mp, __return_address);
1877 xfs_inodegc_queue_all(mp);
1878}
ab23a776 1879
5e672cd6
DC
1880/*
1881 * Force all currently queued inode inactivation work to run immediately and
1882 * wait for the work to finish.
1883 */
1884void
1885xfs_inodegc_flush(
1886 struct xfs_mount *mp)
1887{
1888 xfs_inodegc_push(mp);
ab23a776 1889 trace_xfs_inodegc_flush(mp, __return_address);
6191cf3a 1890 flush_workqueue(mp->m_inodegc_wq);
ab23a776
DC
1891}
1892
1893/*
1894 * Flush all the pending work and then disable the inode inactivation background
1895 * workers and wait for them to stop.
1896 */
1897void
1898xfs_inodegc_stop(
1899 struct xfs_mount *mp)
1900{
ab23a776
DC
1901 if (!xfs_clear_inodegc_enabled(mp))
1902 return;
1903
1904 xfs_inodegc_queue_all(mp);
6191cf3a 1905 drain_workqueue(mp->m_inodegc_wq);
ab23a776 1906
ab23a776
DC
1907 trace_xfs_inodegc_stop(mp, __return_address);
1908}
1909
1910/*
1911 * Enable the inode inactivation background workers and schedule deferred inode
1912 * inactivation work if there is any.
1913 */
1914void
1915xfs_inodegc_start(
1916 struct xfs_mount *mp)
1917{
1918 if (xfs_set_inodegc_enabled(mp))
1919 return;
1920
1921 trace_xfs_inodegc_start(mp, __return_address);
1922 xfs_inodegc_queue_all(mp);
1923}
1924
65f03d86
DW
1925#ifdef CONFIG_XFS_RT
1926static inline bool
1927xfs_inodegc_want_queue_rt_file(
1928 struct xfs_inode *ip)
1929{
1930 struct xfs_mount *mp = ip->i_mount;
65f03d86
DW
1931
1932 if (!XFS_IS_REALTIME_INODE(ip))
1933 return false;
1934
2229276c
DW
1935 if (__percpu_counter_compare(&mp->m_frextents,
1936 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1937 XFS_FDBLOCKS_BATCH) < 0)
1938 return true;
1939
1940 return false;
65f03d86
DW
1941}
1942#else
1943# define xfs_inodegc_want_queue_rt_file(ip) (false)
1944#endif /* CONFIG_XFS_RT */
1945
ab23a776
DC
1946/*
1947 * Schedule the inactivation worker when:
1948 *
1949 * - We've accumulated more than one inode cluster buffer's worth of inodes.
7d6f07d2 1950 * - There is less than 5% free space left.
108523b8 1951 * - Any of the quotas for this inode are near an enforcement limit.
ab23a776
DC
1952 */
1953static inline bool
1954xfs_inodegc_want_queue_work(
1955 struct xfs_inode *ip,
1956 unsigned int items)
1957{
1958 struct xfs_mount *mp = ip->i_mount;
1959
1960 if (items > mp->m_ino_geo.inodes_per_cluster)
1961 return true;
1962
7d6f07d2
DW
1963 if (__percpu_counter_compare(&mp->m_fdblocks,
1964 mp->m_low_space[XFS_LOWSP_5_PCNT],
1965 XFS_FDBLOCKS_BATCH) < 0)
1966 return true;
1967
65f03d86
DW
1968 if (xfs_inodegc_want_queue_rt_file(ip))
1969 return true;
1970
108523b8
DW
1971 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1972 return true;
1973
1974 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1975 return true;
1976
1977 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1978 return true;
1979
ab23a776
DC
1980 return false;
1981}
1982
1983/*
1984 * Upper bound on the number of inodes in each AG that can be queued for
1985 * inactivation at any given time, to avoid monopolizing the workqueue.
1986 */
1987#define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
1988
1989/*
1990 * Make the frontend wait for inactivations when:
1991 *
40b1de00 1992 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
ab23a776
DC
1993 * - The queue depth exceeds the maximum allowable percpu backlog.
1994 *
1995 * Note: If the current thread is running a transaction, we don't ever want to
1996 * wait for other transactions because that could introduce a deadlock.
1997 */
1998static inline bool
1999xfs_inodegc_want_flush_work(
2000 struct xfs_inode *ip,
40b1de00
DW
2001 unsigned int items,
2002 unsigned int shrinker_hits)
ab23a776
DC
2003{
2004 if (current->journal_info)
2005 return false;
2006
40b1de00
DW
2007 if (shrinker_hits > 0)
2008 return true;
2009
ab23a776
DC
2010 if (items > XFS_INODEGC_MAX_BACKLOG)
2011 return true;
2012
2013 return false;
2014}
2015
2016/*
2017 * Queue a background inactivation worker if there are inodes that need to be
2018 * inactivated and higher level xfs code hasn't disabled the background
2019 * workers.
2020 */
2021static void
2022xfs_inodegc_queue(
2023 struct xfs_inode *ip)
2024{
2025 struct xfs_mount *mp = ip->i_mount;
2026 struct xfs_inodegc *gc;
2027 int items;
40b1de00 2028 unsigned int shrinker_hits;
7cf2b0f9 2029 unsigned long queue_delay = 1;
ab23a776
DC
2030
2031 trace_xfs_inode_set_need_inactive(ip);
2032 spin_lock(&ip->i_flags_lock);
2033 ip->i_flags |= XFS_NEED_INACTIVE;
2034 spin_unlock(&ip->i_flags_lock);
2035
2036 gc = get_cpu_ptr(mp->m_inodegc);
2037 llist_add(&ip->i_gclist, &gc->list);
2038 items = READ_ONCE(gc->items);
2039 WRITE_ONCE(gc->items, items + 1);
40b1de00 2040 shrinker_hits = READ_ONCE(gc->shrinker_hits);
ab23a776 2041
7cf2b0f9
DC
2042 /*
2043 * We queue the work while holding the current CPU so that the work
2044 * is scheduled to run on this CPU.
2045 */
2046 if (!xfs_is_inodegc_enabled(mp)) {
2047 put_cpu_ptr(gc);
ab23a776 2048 return;
ab23a776
DC
2049 }
2050
7cf2b0f9
DC
2051 if (xfs_inodegc_want_queue_work(ip, items))
2052 queue_delay = 0;
2053
2054 trace_xfs_inodegc_queue(mp, __return_address);
2055 mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
2056 put_cpu_ptr(gc);
2057
40b1de00 2058 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
ab23a776 2059 trace_xfs_inodegc_throttle(mp, __return_address);
7cf2b0f9 2060 flush_delayed_work(&gc->work);
ab23a776
DC
2061 }
2062}
2063
2064/*
2065 * Fold the dead CPU inodegc queue into the current CPUs queue.
2066 */
2067void
2068xfs_inodegc_cpu_dead(
2069 struct xfs_mount *mp,
2070 unsigned int dead_cpu)
2071{
2072 struct xfs_inodegc *dead_gc, *gc;
2073 struct llist_node *first, *last;
2074 unsigned int count = 0;
2075
2076 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
7cf2b0f9 2077 cancel_delayed_work_sync(&dead_gc->work);
ab23a776
DC
2078
2079 if (llist_empty(&dead_gc->list))
2080 return;
2081
2082 first = dead_gc->list.first;
2083 last = first;
2084 while (last->next) {
2085 last = last->next;
2086 count++;
2087 }
2088 dead_gc->list.first = NULL;
2089 dead_gc->items = 0;
2090
2091 /* Add pending work to current CPU */
2092 gc = get_cpu_ptr(mp->m_inodegc);
2093 llist_add_batch(first, last, &gc->list);
2094 count += READ_ONCE(gc->items);
2095 WRITE_ONCE(gc->items, count);
ab23a776
DC
2096
2097 if (xfs_is_inodegc_enabled(mp)) {
2098 trace_xfs_inodegc_queue(mp, __return_address);
7cf2b0f9 2099 mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
ab23a776 2100 }
7cf2b0f9 2101 put_cpu_ptr(gc);
ab23a776
DC
2102}
2103
2104/*
2105 * We set the inode flag atomically with the radix tree tag. Once we get tag
2106 * lookups on the radix tree, this inode flag can go away.
2107 *
2108 * We always use background reclaim here because even if the inode is clean, it
2109 * still may be under IO and hence we have wait for IO completion to occur
2110 * before we can reclaim the inode. The background reclaim path handles this
2111 * more efficiently than we can here, so simply let background reclaim tear down
2112 * all inodes.
2113 */
2114void
2115xfs_inode_mark_reclaimable(
2116 struct xfs_inode *ip)
2117{
2118 struct xfs_mount *mp = ip->i_mount;
2119 bool need_inactive;
2120
2121 XFS_STATS_INC(mp, vn_reclaim);
2122
2123 /*
2124 * We should never get here with any of the reclaim flags already set.
2125 */
2126 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2127
2128 need_inactive = xfs_inode_needs_inactive(ip);
2129 if (need_inactive) {
2130 xfs_inodegc_queue(ip);
2131 return;
2132 }
2133
2134 /* Going straight to reclaim, so drop the dquots. */
2135 xfs_qm_dqdetach(ip);
2136 xfs_inodegc_set_reclaimable(ip);
2137}
40b1de00
DW
2138
2139/*
2140 * Register a phony shrinker so that we can run background inodegc sooner when
2141 * there's memory pressure. Inactivation does not itself free any memory but
2142 * it does make inodes reclaimable, which eventually frees memory.
2143 *
2144 * The count function, seek value, and batch value are crafted to trigger the
2145 * scan function during the second round of scanning. Hopefully this means
2146 * that we reclaimed enough memory that initiating metadata transactions won't
2147 * make things worse.
2148 */
2149#define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2150#define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2151
2152static unsigned long
2153xfs_inodegc_shrinker_count(
2154 struct shrinker *shrink,
2155 struct shrink_control *sc)
2156{
2157 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2158 m_inodegc_shrinker);
2159 struct xfs_inodegc *gc;
2160 int cpu;
2161
2162 if (!xfs_is_inodegc_enabled(mp))
2163 return 0;
2164
2165 for_each_online_cpu(cpu) {
2166 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2167 if (!llist_empty(&gc->list))
2168 return XFS_INODEGC_SHRINKER_COUNT;
2169 }
2170
2171 return 0;
2172}
2173
2174static unsigned long
2175xfs_inodegc_shrinker_scan(
2176 struct shrinker *shrink,
2177 struct shrink_control *sc)
2178{
2179 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2180 m_inodegc_shrinker);
2181 struct xfs_inodegc *gc;
2182 int cpu;
2183 bool no_items = true;
2184
2185 if (!xfs_is_inodegc_enabled(mp))
2186 return SHRINK_STOP;
2187
2188 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2189
2190 for_each_online_cpu(cpu) {
2191 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2192 if (!llist_empty(&gc->list)) {
2193 unsigned int h = READ_ONCE(gc->shrinker_hits);
2194
2195 WRITE_ONCE(gc->shrinker_hits, h + 1);
7cf2b0f9 2196 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
40b1de00
DW
2197 no_items = false;
2198 }
2199 }
2200
2201 /*
2202 * If there are no inodes to inactivate, we don't want the shrinker
2203 * to think there's deferred work to call us back about.
2204 */
2205 if (no_items)
2206 return LONG_MAX;
2207
2208 return SHRINK_STOP;
2209}
2210
2211/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2212int
2213xfs_inodegc_register_shrinker(
2214 struct xfs_mount *mp)
2215{
2216 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2217
2218 shrink->count_objects = xfs_inodegc_shrinker_count;
2219 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2220 shrink->seeks = 0;
2221 shrink->flags = SHRINKER_NONSLAB;
2222 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2223
2224 return register_shrinker(shrink);
2225}