Merge tag 'io_uring-6.4-2023-05-12' of git://git.kernel.dk/linux
[linux-block.git] / fs / xfs / xfs_icache.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
fe4fa4b8
DC
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
fe4fa4b8
DC
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
5467b34b 8#include "xfs_shared.h"
6ca1c906 9#include "xfs_format.h"
239880ef
DC
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
fe4fa4b8 12#include "xfs_mount.h"
fe4fa4b8 13#include "xfs_inode.h"
239880ef
DC
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
fe4fa4b8 16#include "xfs_inode_item.h"
7d095257 17#include "xfs_quota.h"
0b1b213f 18#include "xfs_trace.h"
6d8b79cf 19#include "xfs_icache.h"
c24b5dfa 20#include "xfs_bmap_util.h"
dc06f398
BF
21#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
83104d44 23#include "xfs_reflink.h"
bb8a66af 24#include "xfs_ialloc.h"
9bbafc71 25#include "xfs_ag.h"
01728b44 26#include "xfs_log_priv.h"
fe4fa4b8 27
f0e28280 28#include <linux/iversion.h>
a167b17e 29
c809d7e9
DW
30/* Radix tree tags for incore inode tree. */
31
32/* inode is to be reclaimed */
33#define XFS_ICI_RECLAIM_TAG 0
34/* Inode has speculative preallocations (posteof or cow) to clean. */
35#define XFS_ICI_BLOCKGC_TAG 1
36
37/*
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 */
41enum xfs_icwalk_goal {
c809d7e9
DW
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
f1bc5c56 44 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
c809d7e9
DW
45};
46
7fdff526 47static int xfs_icwalk(struct xfs_mount *mp,
b26b2bf1 48 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
7fdff526 49static int xfs_icwalk_ag(struct xfs_perag *pag,
b26b2bf1 50 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
df600197 51
1ad2cfe0 52/*
b26b2bf1
DW
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
1ad2cfe0 55 */
1ad2cfe0 56
f1bc5c56
DW
57/* Stop scanning after icw_scan_limit inodes. */
58#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
59
9492750a 60#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
2d53f66b 61#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
9492750a 62
777eb1fa 63#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
2d53f66b
DW
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
1ad2cfe0 66
33479e05
DC
67/*
68 * Allocate and initialise an xfs_inode.
69 */
638f4416 70struct xfs_inode *
33479e05
DC
71xfs_inode_alloc(
72 struct xfs_mount *mp,
73 xfs_ino_t ino)
74{
75 struct xfs_inode *ip;
76
77 /*
3050bd0b
CM
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
33479e05 80 */
fd60b288 81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
3050bd0b 82
33479e05 83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
182696fb 84 kmem_cache_free(xfs_inode_cache, ip);
33479e05
DC
85 return NULL;
86 }
87
f38a032b 88 /* VFS doesn't initialise i_mode or i_state! */
c19b3b05 89 VFS_I(ip)->i_mode = 0;
f38a032b 90 VFS_I(ip)->i_state = 0;
67958013 91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
c19b3b05 92
ff6d6af2 93 XFS_STATS_INC(mp, vn_active);
33479e05 94 ASSERT(atomic_read(&ip->i_pincount) == 0);
33479e05
DC
95 ASSERT(ip->i_ino == 0);
96
33479e05
DC
97 /* initialise the xfs inode */
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
3993baeb 101 ip->i_cowfp = NULL;
2ed5b09b
DW
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
3ba738df 104 memset(&ip->i_df, 0, sizeof(ip->i_df));
33479e05
DC
105 ip->i_flags = 0;
106 ip->i_delayed_blks = 0;
3e09ab8f 107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
6e73a545 108 ip->i_nblocks = 0;
7821ea30 109 ip->i_forkoff = 0;
6772c1f1
DW
110 ip->i_sick = 0;
111 ip->i_checked = 0;
cb357bf3
DW
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113 INIT_LIST_HEAD(&ip->i_ioend_list);
114 spin_lock_init(&ip->i_ioend_lock);
2fd26cc0
DC
115 ip->i_next_unlinked = NULLAGINO;
116 ip->i_prev_unlinked = NULLAGINO;
33479e05
DC
117
118 return ip;
119}
120
121STATIC void
122xfs_inode_free_callback(
123 struct rcu_head *head)
124{
125 struct inode *inode = container_of(head, struct inode, i_rcu);
126 struct xfs_inode *ip = XFS_I(inode);
127
c19b3b05 128 switch (VFS_I(ip)->i_mode & S_IFMT) {
33479e05
DC
129 case S_IFREG:
130 case S_IFDIR:
131 case S_IFLNK:
ef838512 132 xfs_idestroy_fork(&ip->i_df);
33479e05
DC
133 break;
134 }
135
e45d7cb2
DW
136 xfs_ifork_zap_attr(ip);
137
ef838512
CH
138 if (ip->i_cowfp) {
139 xfs_idestroy_fork(ip->i_cowfp);
182696fb 140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
ef838512 141 }
33479e05 142 if (ip->i_itemp) {
22525c17
DC
143 ASSERT(!test_bit(XFS_LI_IN_AIL,
144 &ip->i_itemp->ili_item.li_flags));
33479e05
DC
145 xfs_inode_item_destroy(ip);
146 ip->i_itemp = NULL;
147 }
148
182696fb 149 kmem_cache_free(xfs_inode_cache, ip);
1f2dcfe8
DC
150}
151
8a17d7dd
DC
152static void
153__xfs_inode_free(
154 struct xfs_inode *ip)
155{
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip->i_pincount) == 0);
48d55e2a 158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
8a17d7dd
DC
159 XFS_STATS_DEC(ip->i_mount, vn_active);
160
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
162}
163
1f2dcfe8
DC
164void
165xfs_inode_free(
166 struct xfs_inode *ip)
167{
718ecc50 168 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
98efe8af 169
33479e05
DC
170 /*
171 * Because we use RCU freeing we need to ensure the inode always
172 * appears to be reclaimed with an invalid inode number when in the
173 * free state. The ip->i_flags_lock provides the barrier against lookup
174 * races.
175 */
176 spin_lock(&ip->i_flags_lock);
177 ip->i_flags = XFS_IRECLAIM;
178 ip->i_ino = 0;
179 spin_unlock(&ip->i_flags_lock);
180
8a17d7dd 181 __xfs_inode_free(ip);
33479e05
DC
182}
183
ad438c40 184/*
02511a5a
DC
185 * Queue background inode reclaim work if there are reclaimable inodes and there
186 * isn't reclaim work already scheduled or in progress.
ad438c40
DC
187 */
188static void
189xfs_reclaim_work_queue(
190 struct xfs_mount *mp)
191{
192
193 rcu_read_lock();
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
196 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
197 }
198 rcu_read_unlock();
199}
200
c076ae7a
DW
201/*
202 * Background scanning to trim preallocated space. This is queued based on the
203 * 'speculative_prealloc_lifetime' tunable (5m by default).
204 */
205static inline void
206xfs_blockgc_queue(
ad438c40 207 struct xfs_perag *pag)
c076ae7a 208{
6f649091
DW
209 struct xfs_mount *mp = pag->pag_mount;
210
211 if (!xfs_is_blockgc_enabled(mp))
212 return;
213
c076ae7a
DW
214 rcu_read_lock();
215 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
ab23a776 216 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
c076ae7a
DW
217 &pag->pag_blockgc_work,
218 msecs_to_jiffies(xfs_blockgc_secs * 1000));
219 rcu_read_unlock();
220}
221
222/* Set a tag on both the AG incore inode tree and the AG radix tree. */
223static void
224xfs_perag_set_inode_tag(
225 struct xfs_perag *pag,
226 xfs_agino_t agino,
227 unsigned int tag)
ad438c40
DC
228{
229 struct xfs_mount *mp = pag->pag_mount;
c076ae7a 230 bool was_tagged;
ad438c40 231
95989c46 232 lockdep_assert_held(&pag->pag_ici_lock);
c076ae7a
DW
233
234 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
235 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
236
237 if (tag == XFS_ICI_RECLAIM_TAG)
238 pag->pag_ici_reclaimable++;
239
240 if (was_tagged)
ad438c40
DC
241 return;
242
c076ae7a 243 /* propagate the tag up into the perag radix tree */
ad438c40 244 spin_lock(&mp->m_perag_lock);
c076ae7a 245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
ad438c40
DC
246 spin_unlock(&mp->m_perag_lock);
247
c076ae7a
DW
248 /* start background work */
249 switch (tag) {
250 case XFS_ICI_RECLAIM_TAG:
251 xfs_reclaim_work_queue(mp);
252 break;
253 case XFS_ICI_BLOCKGC_TAG:
254 xfs_blockgc_queue(pag);
255 break;
256 }
ad438c40 257
368e2d09 258 trace_xfs_perag_set_inode_tag(pag, _RET_IP_);
ad438c40
DC
259}
260
c076ae7a 261/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
ad438c40 262static void
c076ae7a
DW
263xfs_perag_clear_inode_tag(
264 struct xfs_perag *pag,
265 xfs_agino_t agino,
266 unsigned int tag)
ad438c40
DC
267{
268 struct xfs_mount *mp = pag->pag_mount;
269
95989c46 270 lockdep_assert_held(&pag->pag_ici_lock);
c076ae7a
DW
271
272 /*
273 * Reclaim can signal (with a null agino) that it cleared its own tag
274 * by removing the inode from the radix tree.
275 */
276 if (agino != NULLAGINO)
277 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
278 else
279 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
280
281 if (tag == XFS_ICI_RECLAIM_TAG)
282 pag->pag_ici_reclaimable--;
283
284 if (radix_tree_tagged(&pag->pag_ici_root, tag))
ad438c40
DC
285 return;
286
c076ae7a 287 /* clear the tag from the perag radix tree */
ad438c40 288 spin_lock(&mp->m_perag_lock);
c076ae7a 289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
ad438c40 290 spin_unlock(&mp->m_perag_lock);
ad438c40 291
368e2d09 292 trace_xfs_perag_clear_inode_tag(pag, _RET_IP_);
c076ae7a 293}
ad438c40 294
50997470
DC
295/*
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
297 * part of the structure. This is made more complex by the fact we store
298 * information about the on-disk values in the VFS inode and so we can't just
83e06f21 299 * overwrite the values unconditionally. Hence we save the parameters we
50997470 300 * need to retain across reinitialisation, and rewrite them into the VFS inode
83e06f21 301 * after reinitialisation even if it fails.
50997470
DC
302 */
303static int
304xfs_reinit_inode(
305 struct xfs_mount *mp,
306 struct inode *inode)
307{
ff7bebeb
DW
308 int error;
309 uint32_t nlink = inode->i_nlink;
310 uint32_t generation = inode->i_generation;
311 uint64_t version = inode_peek_iversion(inode);
312 umode_t mode = inode->i_mode;
313 dev_t dev = inode->i_rdev;
314 kuid_t uid = inode->i_uid;
315 kgid_t gid = inode->i_gid;
50997470
DC
316
317 error = inode_init_always(mp->m_super, inode);
318
54d7b5c1 319 set_nlink(inode, nlink);
9e9a2674 320 inode->i_generation = generation;
f0e28280 321 inode_set_iversion_queried(inode, version);
c19b3b05 322 inode->i_mode = mode;
acd1d715 323 inode->i_rdev = dev;
3d8f2821
CH
324 inode->i_uid = uid;
325 inode->i_gid = gid;
67958013 326 mapping_set_large_folios(inode->i_mapping);
50997470
DC
327 return error;
328}
329
ff7bebeb
DW
330/*
331 * Carefully nudge an inode whose VFS state has been torn down back into a
332 * usable state. Drops the i_flags_lock and the rcu read lock.
333 */
334static int
335xfs_iget_recycle(
336 struct xfs_perag *pag,
337 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
338{
339 struct xfs_mount *mp = ip->i_mount;
340 struct inode *inode = VFS_I(ip);
341 int error;
342
343 trace_xfs_iget_recycle(ip);
344
28b4b059
LL
345 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
346 return -EAGAIN;
347
ff7bebeb
DW
348 /*
349 * We need to make it look like the inode is being reclaimed to prevent
350 * the actual reclaim workers from stomping over us while we recycle
351 * the inode. We can't clear the radix tree tag yet as it requires
352 * pag_ici_lock to be held exclusive.
353 */
354 ip->i_flags |= XFS_IRECLAIM;
355
356 spin_unlock(&ip->i_flags_lock);
357 rcu_read_unlock();
358
359 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
360 error = xfs_reinit_inode(mp, inode);
28b4b059 361 xfs_iunlock(ip, XFS_ILOCK_EXCL);
ff7bebeb 362 if (error) {
ff7bebeb
DW
363 /*
364 * Re-initializing the inode failed, and we are in deep
365 * trouble. Try to re-add it to the reclaim list.
366 */
367 rcu_read_lock();
368 spin_lock(&ip->i_flags_lock);
ff7bebeb 369 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
ff7bebeb
DW
370 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
371 spin_unlock(&ip->i_flags_lock);
372 rcu_read_unlock();
373
374 trace_xfs_iget_recycle_fail(ip);
375 return error;
376 }
377
378 spin_lock(&pag->pag_ici_lock);
379 spin_lock(&ip->i_flags_lock);
380
381 /*
382 * Clear the per-lifetime state in the inode as we are now effectively
383 * a new inode and need to return to the initial state before reuse
384 * occurs.
385 */
386 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
387 ip->i_flags |= XFS_INEW;
388 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
389 XFS_ICI_RECLAIM_TAG);
390 inode->i_state = I_NEW;
391 spin_unlock(&ip->i_flags_lock);
392 spin_unlock(&pag->pag_ici_lock);
393
394 return 0;
395}
396
afca6c5b
DC
397/*
398 * If we are allocating a new inode, then check what was returned is
399 * actually a free, empty inode. If we are not allocating an inode,
400 * then check we didn't find a free inode.
401 *
402 * Returns:
403 * 0 if the inode free state matches the lookup context
404 * -ENOENT if the inode is free and we are not allocating
405 * -EFSCORRUPTED if there is any state mismatch at all
406 */
407static int
408xfs_iget_check_free_state(
409 struct xfs_inode *ip,
410 int flags)
411{
412 if (flags & XFS_IGET_CREATE) {
413 /* should be a free inode */
414 if (VFS_I(ip)->i_mode != 0) {
415 xfs_warn(ip->i_mount,
416"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
417 ip->i_ino, VFS_I(ip)->i_mode);
418 return -EFSCORRUPTED;
419 }
420
6e73a545 421 if (ip->i_nblocks != 0) {
afca6c5b
DC
422 xfs_warn(ip->i_mount,
423"Corruption detected! Free inode 0x%llx has blocks allocated!",
424 ip->i_ino);
425 return -EFSCORRUPTED;
426 }
427 return 0;
428 }
429
430 /* should be an allocated inode */
431 if (VFS_I(ip)->i_mode == 0)
432 return -ENOENT;
433
434 return 0;
435}
436
ab23a776 437/* Make all pending inactivation work start immediately. */
2254a739 438static bool
ab23a776
DC
439xfs_inodegc_queue_all(
440 struct xfs_mount *mp)
441{
442 struct xfs_inodegc *gc;
443 int cpu;
2254a739 444 bool ret = false;
ab23a776
DC
445
446 for_each_online_cpu(cpu) {
447 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2254a739 448 if (!llist_empty(&gc->list)) {
7cf2b0f9 449 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2254a739
DW
450 ret = true;
451 }
ab23a776 452 }
2254a739
DW
453
454 return ret;
ab23a776
DC
455}
456
33479e05
DC
457/*
458 * Check the validity of the inode we just found it the cache
459 */
460static int
461xfs_iget_cache_hit(
462 struct xfs_perag *pag,
463 struct xfs_inode *ip,
464 xfs_ino_t ino,
465 int flags,
466 int lock_flags) __releases(RCU)
467{
468 struct inode *inode = VFS_I(ip);
469 struct xfs_mount *mp = ip->i_mount;
470 int error;
471
472 /*
473 * check for re-use of an inode within an RCU grace period due to the
474 * radix tree nodes not being updated yet. We monitor for this by
475 * setting the inode number to zero before freeing the inode structure.
476 * If the inode has been reallocated and set up, then the inode number
477 * will not match, so check for that, too.
478 */
479 spin_lock(&ip->i_flags_lock);
77b4d286
DW
480 if (ip->i_ino != ino)
481 goto out_skip;
33479e05
DC
482
483 /*
484 * If we are racing with another cache hit that is currently
485 * instantiating this inode or currently recycling it out of
ff7bebeb 486 * reclaimable state, wait for the initialisation to complete
33479e05
DC
487 * before continuing.
488 *
ab23a776
DC
489 * If we're racing with the inactivation worker we also want to wait.
490 * If we're creating a new file, it's possible that the worker
491 * previously marked the inode as free on disk but hasn't finished
492 * updating the incore state yet. The AGI buffer will be dirty and
493 * locked to the icreate transaction, so a synchronous push of the
494 * inodegc workers would result in deadlock. For a regular iget, the
495 * worker is running already, so we might as well wait.
496 *
33479e05
DC
497 * XXX(hch): eventually we should do something equivalent to
498 * wait_on_inode to wait for these flags to be cleared
499 * instead of polling for it.
500 */
ab23a776 501 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
77b4d286 502 goto out_skip;
33479e05 503
ab23a776
DC
504 if (ip->i_flags & XFS_NEED_INACTIVE) {
505 /* Unlinked inodes cannot be re-grabbed. */
506 if (VFS_I(ip)->i_nlink == 0) {
507 error = -ENOENT;
508 goto out_error;
509 }
510 goto out_inodegc_flush;
511 }
512
33479e05 513 /*
afca6c5b
DC
514 * Check the inode free state is valid. This also detects lookup
515 * racing with unlinks.
33479e05 516 */
afca6c5b
DC
517 error = xfs_iget_check_free_state(ip, flags);
518 if (error)
33479e05 519 goto out_error;
33479e05 520
77b4d286
DW
521 /* Skip inodes that have no vfs state. */
522 if ((flags & XFS_IGET_INCORE) &&
523 (ip->i_flags & XFS_IRECLAIMABLE))
524 goto out_skip;
378f681c 525
77b4d286
DW
526 /* The inode fits the selection criteria; process it. */
527 if (ip->i_flags & XFS_IRECLAIMABLE) {
ff7bebeb
DW
528 /* Drops i_flags_lock and RCU read lock. */
529 error = xfs_iget_recycle(pag, ip);
28b4b059
LL
530 if (error == -EAGAIN)
531 goto out_skip;
ff7bebeb
DW
532 if (error)
533 return error;
33479e05
DC
534 } else {
535 /* If the VFS inode is being torn down, pause and try again. */
77b4d286
DW
536 if (!igrab(inode))
537 goto out_skip;
33479e05
DC
538
539 /* We've got a live one. */
540 spin_unlock(&ip->i_flags_lock);
541 rcu_read_unlock();
542 trace_xfs_iget_hit(ip);
543 }
544
545 if (lock_flags != 0)
546 xfs_ilock(ip, lock_flags);
547
378f681c 548 if (!(flags & XFS_IGET_INCORE))
dae2f8ed 549 xfs_iflags_clear(ip, XFS_ISTALE);
ff6d6af2 550 XFS_STATS_INC(mp, xs_ig_found);
33479e05
DC
551
552 return 0;
553
77b4d286
DW
554out_skip:
555 trace_xfs_iget_skip(ip);
556 XFS_STATS_INC(mp, xs_ig_frecycle);
557 error = -EAGAIN;
33479e05
DC
558out_error:
559 spin_unlock(&ip->i_flags_lock);
560 rcu_read_unlock();
561 return error;
ab23a776
DC
562
563out_inodegc_flush:
564 spin_unlock(&ip->i_flags_lock);
565 rcu_read_unlock();
566 /*
567 * Do not wait for the workers, because the caller could hold an AGI
568 * buffer lock. We're just going to sleep in a loop anyway.
569 */
570 if (xfs_is_inodegc_enabled(mp))
571 xfs_inodegc_queue_all(mp);
572 return -EAGAIN;
33479e05
DC
573}
574
33479e05
DC
575static int
576xfs_iget_cache_miss(
577 struct xfs_mount *mp,
578 struct xfs_perag *pag,
579 xfs_trans_t *tp,
580 xfs_ino_t ino,
581 struct xfs_inode **ipp,
582 int flags,
583 int lock_flags)
584{
585 struct xfs_inode *ip;
586 int error;
587 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
588 int iflags;
589
590 ip = xfs_inode_alloc(mp, ino);
591 if (!ip)
2451337d 592 return -ENOMEM;
33479e05 593
498f0adb 594 error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags);
33479e05
DC
595 if (error)
596 goto out_destroy;
597
bb8a66af
CH
598 /*
599 * For version 5 superblocks, if we are initialising a new inode and we
0560f31a 600 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
bb8a66af
CH
601 * simply build the new inode core with a random generation number.
602 *
603 * For version 4 (and older) superblocks, log recovery is dependent on
965e0a1a 604 * the i_flushiter field being initialised from the current on-disk
bb8a66af
CH
605 * value and hence we must also read the inode off disk even when
606 * initializing new inodes.
607 */
38c26bfd 608 if (xfs_has_v3inodes(mp) &&
0560f31a 609 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
a251c17a 610 VFS_I(ip)->i_generation = get_random_u32();
bb8a66af 611 } else {
bb8a66af
CH
612 struct xfs_buf *bp;
613
af9dcdde 614 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
bb8a66af
CH
615 if (error)
616 goto out_destroy;
617
af9dcdde
CH
618 error = xfs_inode_from_disk(ip,
619 xfs_buf_offset(bp, ip->i_imap.im_boffset));
bb8a66af
CH
620 if (!error)
621 xfs_buf_set_ref(bp, XFS_INO_REF);
622 xfs_trans_brelse(tp, bp);
623
624 if (error)
625 goto out_destroy;
626 }
627
33479e05
DC
628 trace_xfs_iget_miss(ip);
629
ee457001 630 /*
afca6c5b
DC
631 * Check the inode free state is valid. This also detects lookup
632 * racing with unlinks.
ee457001 633 */
afca6c5b
DC
634 error = xfs_iget_check_free_state(ip, flags);
635 if (error)
33479e05 636 goto out_destroy;
33479e05
DC
637
638 /*
639 * Preload the radix tree so we can insert safely under the
640 * write spinlock. Note that we cannot sleep inside the preload
641 * region. Since we can be called from transaction context, don't
642 * recurse into the file system.
643 */
644 if (radix_tree_preload(GFP_NOFS)) {
2451337d 645 error = -EAGAIN;
33479e05
DC
646 goto out_destroy;
647 }
648
649 /*
650 * Because the inode hasn't been added to the radix-tree yet it can't
651 * be found by another thread, so we can do the non-sleeping lock here.
652 */
653 if (lock_flags) {
654 if (!xfs_ilock_nowait(ip, lock_flags))
655 BUG();
656 }
657
658 /*
659 * These values must be set before inserting the inode into the radix
660 * tree as the moment it is inserted a concurrent lookup (allowed by the
661 * RCU locking mechanism) can find it and that lookup must see that this
662 * is an inode currently under construction (i.e. that XFS_INEW is set).
663 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
664 * memory barrier that ensures this detection works correctly at lookup
665 * time.
666 */
667 iflags = XFS_INEW;
668 if (flags & XFS_IGET_DONTCACHE)
2c567af4 669 d_mark_dontcache(VFS_I(ip));
113a5683
CS
670 ip->i_udquot = NULL;
671 ip->i_gdquot = NULL;
92f8ff73 672 ip->i_pdquot = NULL;
33479e05
DC
673 xfs_iflags_set(ip, iflags);
674
675 /* insert the new inode */
676 spin_lock(&pag->pag_ici_lock);
677 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
678 if (unlikely(error)) {
679 WARN_ON(error != -EEXIST);
ff6d6af2 680 XFS_STATS_INC(mp, xs_ig_dup);
2451337d 681 error = -EAGAIN;
33479e05
DC
682 goto out_preload_end;
683 }
684 spin_unlock(&pag->pag_ici_lock);
685 radix_tree_preload_end();
686
687 *ipp = ip;
688 return 0;
689
690out_preload_end:
691 spin_unlock(&pag->pag_ici_lock);
692 radix_tree_preload_end();
693 if (lock_flags)
694 xfs_iunlock(ip, lock_flags);
695out_destroy:
696 __destroy_inode(VFS_I(ip));
697 xfs_inode_free(ip);
698 return error;
699}
700
701/*
02511a5a
DC
702 * Look up an inode by number in the given file system. The inode is looked up
703 * in the cache held in each AG. If the inode is found in the cache, initialise
704 * the vfs inode if necessary.
33479e05 705 *
02511a5a
DC
706 * If it is not in core, read it in from the file system's device, add it to the
707 * cache and initialise the vfs inode.
33479e05
DC
708 *
709 * The inode is locked according to the value of the lock_flags parameter.
02511a5a
DC
710 * Inode lookup is only done during metadata operations and not as part of the
711 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
33479e05
DC
712 */
713int
714xfs_iget(
02511a5a
DC
715 struct xfs_mount *mp,
716 struct xfs_trans *tp,
717 xfs_ino_t ino,
718 uint flags,
719 uint lock_flags,
720 struct xfs_inode **ipp)
33479e05 721{
02511a5a
DC
722 struct xfs_inode *ip;
723 struct xfs_perag *pag;
724 xfs_agino_t agino;
725 int error;
33479e05 726
33479e05
DC
727 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
728
729 /* reject inode numbers outside existing AGs */
730 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
2451337d 731 return -EINVAL;
33479e05 732
ff6d6af2 733 XFS_STATS_INC(mp, xs_ig_attempts);
8774cf8b 734
33479e05
DC
735 /* get the perag structure and ensure that it's inode capable */
736 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
737 agino = XFS_INO_TO_AGINO(mp, ino);
738
739again:
740 error = 0;
741 rcu_read_lock();
742 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
743
744 if (ip) {
745 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
746 if (error)
747 goto out_error_or_again;
748 } else {
749 rcu_read_unlock();
378f681c 750 if (flags & XFS_IGET_INCORE) {
ed438b47 751 error = -ENODATA;
378f681c
DW
752 goto out_error_or_again;
753 }
ff6d6af2 754 XFS_STATS_INC(mp, xs_ig_missed);
33479e05
DC
755
756 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
757 flags, lock_flags);
758 if (error)
759 goto out_error_or_again;
760 }
761 xfs_perag_put(pag);
762
763 *ipp = ip;
764
765 /*
58c90473 766 * If we have a real type for an on-disk inode, we can setup the inode
132c460e
YX
767 * now. If it's a new inode being created, xfs_init_new_inode will
768 * handle it.
33479e05 769 */
c19b3b05 770 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
58c90473 771 xfs_setup_existing_inode(ip);
33479e05
DC
772 return 0;
773
774out_error_or_again:
302436c2
DW
775 if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) &&
776 error == -EAGAIN) {
33479e05
DC
777 delay(1);
778 goto again;
779 }
780 xfs_perag_put(pag);
781 return error;
782}
783
378f681c
DW
784/*
785 * "Is this a cached inode that's also allocated?"
786 *
787 * Look up an inode by number in the given file system. If the inode is
788 * in cache and isn't in purgatory, return 1 if the inode is allocated
789 * and 0 if it is not. For all other cases (not in cache, being torn
790 * down, etc.), return a negative error code.
791 *
792 * The caller has to prevent inode allocation and freeing activity,
793 * presumably by locking the AGI buffer. This is to ensure that an
794 * inode cannot transition from allocated to freed until the caller is
795 * ready to allow that. If the inode is in an intermediate state (new,
796 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
797 * inode is not in the cache, -ENOENT will be returned. The caller must
798 * deal with these scenarios appropriately.
799 *
800 * This is a specialized use case for the online scrubber; if you're
801 * reading this, you probably want xfs_iget.
802 */
803int
804xfs_icache_inode_is_allocated(
805 struct xfs_mount *mp,
806 struct xfs_trans *tp,
807 xfs_ino_t ino,
808 bool *inuse)
809{
810 struct xfs_inode *ip;
811 int error;
812
813 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
814 if (error)
815 return error;
816
817 *inuse = !!(VFS_I(ip)->i_mode);
44a8736b 818 xfs_irele(ip);
378f681c
DW
819 return 0;
820}
821
e3a20c0b
DC
822/*
823 * Grab the inode for reclaim exclusively.
50718b8d
DC
824 *
825 * We have found this inode via a lookup under RCU, so the inode may have
826 * already been freed, or it may be in the process of being recycled by
827 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
828 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
829 * will not be set. Hence we need to check for both these flag conditions to
830 * avoid inodes that are no longer reclaim candidates.
831 *
832 * Note: checking for other state flags here, under the i_flags_lock or not, is
833 * racy and should be avoided. Those races should be resolved only after we have
834 * ensured that we are able to reclaim this inode and the world can see that we
835 * are going to reclaim it.
836 *
837 * Return true if we grabbed it, false otherwise.
e3a20c0b 838 */
50718b8d 839static bool
f1bc5c56 840xfs_reclaim_igrab(
9492750a 841 struct xfs_inode *ip,
b26b2bf1 842 struct xfs_icwalk *icw)
e3a20c0b 843{
1a3e8f3d
DC
844 ASSERT(rcu_read_lock_held());
845
e3a20c0b 846 spin_lock(&ip->i_flags_lock);
1a3e8f3d
DC
847 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
848 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
849 /* not a reclaim candidate. */
e3a20c0b 850 spin_unlock(&ip->i_flags_lock);
50718b8d 851 return false;
e3a20c0b 852 }
9492750a
DW
853
854 /* Don't reclaim a sick inode unless the caller asked for it. */
855 if (ip->i_sick &&
b26b2bf1 856 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
9492750a
DW
857 spin_unlock(&ip->i_flags_lock);
858 return false;
859 }
860
e3a20c0b
DC
861 __xfs_iflags_set(ip, XFS_IRECLAIM);
862 spin_unlock(&ip->i_flags_lock);
50718b8d 863 return true;
e3a20c0b
DC
864}
865
777df5af 866/*
02511a5a
DC
867 * Inode reclaim is non-blocking, so the default action if progress cannot be
868 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
869 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
870 * blocking anymore and hence we can wait for the inode to be able to reclaim
871 * it.
777df5af 872 *
02511a5a
DC
873 * We do no IO here - if callers require inodes to be cleaned they must push the
874 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
875 * done in the background in a non-blocking manner, and enables memory reclaim
876 * to make progress without blocking.
777df5af 877 */
4d0bab3a 878static void
c8e20be0 879xfs_reclaim_inode(
75f3cb13 880 struct xfs_inode *ip,
50718b8d 881 struct xfs_perag *pag)
fce08f2f 882{
8a17d7dd 883 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
777df5af 884
9552e14d 885 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
617825fe 886 goto out;
718ecc50 887 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
9552e14d 888 goto out_iunlock;
7a3be02b 889
01728b44
DC
890 /*
891 * Check for log shutdown because aborting the inode can move the log
892 * tail and corrupt in memory state. This is fine if the log is shut
893 * down, but if the log is still active and only the mount is shut down
894 * then the in-memory log tail movement caused by the abort can be
895 * incorrectly propagated to disk.
896 */
897 if (xlog_is_shutdown(ip->i_mount->m_log)) {
777df5af 898 xfs_iunpin_wait(ip);
d2d7c047 899 xfs_iflush_shutdown_abort(ip);
777df5af
DC
900 goto reclaim;
901 }
617825fe 902 if (xfs_ipincount(ip))
718ecc50 903 goto out_clear_flush;
617825fe 904 if (!xfs_inode_clean(ip))
718ecc50 905 goto out_clear_flush;
8a48088f 906
718ecc50 907 xfs_iflags_clear(ip, XFS_IFLUSHING);
777df5af 908reclaim:
ab23a776 909 trace_xfs_inode_reclaiming(ip);
98efe8af 910
8a17d7dd
DC
911 /*
912 * Because we use RCU freeing we need to ensure the inode always appears
913 * to be reclaimed with an invalid inode number when in the free state.
98efe8af 914 * We do this as early as possible under the ILOCK so that
f2e9ad21
OS
915 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
916 * detect races with us here. By doing this, we guarantee that once
917 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
918 * it will see either a valid inode that will serialise correctly, or it
919 * will see an invalid inode that it can skip.
8a17d7dd
DC
920 */
921 spin_lock(&ip->i_flags_lock);
922 ip->i_flags = XFS_IRECLAIM;
923 ip->i_ino = 0;
255794c7
DW
924 ip->i_sick = 0;
925 ip->i_checked = 0;
8a17d7dd
DC
926 spin_unlock(&ip->i_flags_lock);
927
fad743d7 928 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
c8e20be0 929 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2f11feab 930
ff6d6af2 931 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
2f11feab
DC
932 /*
933 * Remove the inode from the per-AG radix tree.
934 *
935 * Because radix_tree_delete won't complain even if the item was never
936 * added to the tree assert that it's been there before to catch
937 * problems with the inode life time early on.
938 */
1a427ab0 939 spin_lock(&pag->pag_ici_lock);
2f11feab 940 if (!radix_tree_delete(&pag->pag_ici_root,
8a17d7dd 941 XFS_INO_TO_AGINO(ip->i_mount, ino)))
2f11feab 942 ASSERT(0);
c076ae7a 943 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
1a427ab0 944 spin_unlock(&pag->pag_ici_lock);
2f11feab
DC
945
946 /*
947 * Here we do an (almost) spurious inode lock in order to coordinate
948 * with inode cache radix tree lookups. This is because the lookup
949 * can reference the inodes in the cache without taking references.
950 *
951 * We make that OK here by ensuring that we wait until the inode is
ad637a10 952 * unlocked after the lookup before we go ahead and free it.
2f11feab 953 */
ad637a10 954 xfs_ilock(ip, XFS_ILOCK_EXCL);
3ea06d73 955 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
ad637a10 956 xfs_iunlock(ip, XFS_ILOCK_EXCL);
96355d5a 957 ASSERT(xfs_inode_clean(ip));
2f11feab 958
8a17d7dd 959 __xfs_inode_free(ip);
4d0bab3a 960 return;
8a48088f 961
718ecc50
DC
962out_clear_flush:
963 xfs_iflags_clear(ip, XFS_IFLUSHING);
9552e14d 964out_iunlock:
8a48088f 965 xfs_iunlock(ip, XFS_ILOCK_EXCL);
9552e14d 966out:
617825fe 967 xfs_iflags_clear(ip, XFS_IRECLAIM);
7a3be02b
DC
968}
969
9492750a
DW
970/* Reclaim sick inodes if we're unmounting or the fs went down. */
971static inline bool
972xfs_want_reclaim_sick(
973 struct xfs_mount *mp)
974{
2e973b2c 975 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
75c8c50f 976 xfs_is_shutdown(mp);
9492750a
DW
977}
978
4d0bab3a 979void
7a3be02b 980xfs_reclaim_inodes(
4d0bab3a 981 struct xfs_mount *mp)
7a3be02b 982{
b26b2bf1
DW
983 struct xfs_icwalk icw = {
984 .icw_flags = 0,
9492750a
DW
985 };
986
987 if (xfs_want_reclaim_sick(mp))
b26b2bf1 988 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9492750a 989
4d0bab3a 990 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
617825fe 991 xfs_ail_push_all_sync(mp->m_ail);
b26b2bf1 992 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
0f4ec0f1 993 }
9bf729c0
DC
994}
995
996/*
02511a5a
DC
997 * The shrinker infrastructure determines how many inodes we should scan for
998 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
999 * push the AIL here. We also want to proactively free up memory if we can to
1000 * minimise the amount of work memory reclaim has to do so we kick the
1001 * background reclaim if it isn't already scheduled.
9bf729c0 1002 */
0a234c6d 1003long
8daaa831
DC
1004xfs_reclaim_inodes_nr(
1005 struct xfs_mount *mp,
10be350b 1006 unsigned long nr_to_scan)
9bf729c0 1007{
b26b2bf1
DW
1008 struct xfs_icwalk icw = {
1009 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
10be350b 1010 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
f1bc5c56
DW
1011 };
1012
9492750a 1013 if (xfs_want_reclaim_sick(mp))
b26b2bf1 1014 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9492750a 1015
8daaa831 1016 /* kick background reclaimer and push the AIL */
5889608d 1017 xfs_reclaim_work_queue(mp);
8daaa831 1018 xfs_ail_push_all(mp->m_ail);
a7b339f1 1019
b26b2bf1 1020 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
617825fe 1021 return 0;
8daaa831 1022}
9bf729c0 1023
8daaa831
DC
1024/*
1025 * Return the number of reclaimable inodes in the filesystem for
1026 * the shrinker to determine how much to reclaim.
1027 */
10be350b 1028long
8daaa831
DC
1029xfs_reclaim_inodes_count(
1030 struct xfs_mount *mp)
1031{
1032 struct xfs_perag *pag;
1033 xfs_agnumber_t ag = 0;
10be350b 1034 long reclaimable = 0;
9bf729c0 1035
65d0f205
DC
1036 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1037 ag = pag->pag_agno + 1;
70e60ce7
DC
1038 reclaimable += pag->pag_ici_reclaimable;
1039 xfs_perag_put(pag);
9bf729c0 1040 }
9bf729c0
DC
1041 return reclaimable;
1042}
1043
39b1cfd7 1044STATIC bool
b26b2bf1 1045xfs_icwalk_match_id(
3e3f9f58 1046 struct xfs_inode *ip,
b26b2bf1 1047 struct xfs_icwalk *icw)
3e3f9f58 1048{
b26b2bf1
DW
1049 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1050 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
39b1cfd7 1051 return false;
3e3f9f58 1052
b26b2bf1
DW
1053 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1054 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
39b1cfd7 1055 return false;
1b556048 1056
b26b2bf1
DW
1057 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1058 ip->i_projid != icw->icw_prid)
39b1cfd7 1059 return false;
1b556048 1060
39b1cfd7 1061 return true;
3e3f9f58
BF
1062}
1063
f4526397
BF
1064/*
1065 * A union-based inode filtering algorithm. Process the inode if any of the
1066 * criteria match. This is for global/internal scans only.
1067 */
39b1cfd7 1068STATIC bool
b26b2bf1 1069xfs_icwalk_match_id_union(
f4526397 1070 struct xfs_inode *ip,
b26b2bf1 1071 struct xfs_icwalk *icw)
f4526397 1072{
b26b2bf1
DW
1073 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1074 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
39b1cfd7 1075 return true;
f4526397 1076
b26b2bf1
DW
1077 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1078 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
39b1cfd7 1079 return true;
f4526397 1080
b26b2bf1
DW
1081 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1082 ip->i_projid == icw->icw_prid)
39b1cfd7 1083 return true;
f4526397 1084
39b1cfd7 1085 return false;
f4526397
BF
1086}
1087
a91bf992
DW
1088/*
1089 * Is this inode @ip eligible for eof/cow block reclamation, given some
b26b2bf1 1090 * filtering parameters @icw? The inode is eligible if @icw is null or
a91bf992
DW
1091 * if the predicate functions match.
1092 */
1093static bool
b26b2bf1 1094xfs_icwalk_match(
a91bf992 1095 struct xfs_inode *ip,
b26b2bf1 1096 struct xfs_icwalk *icw)
a91bf992 1097{
39b1cfd7 1098 bool match;
a91bf992 1099
b26b2bf1 1100 if (!icw)
a91bf992
DW
1101 return true;
1102
b26b2bf1
DW
1103 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1104 match = xfs_icwalk_match_id_union(ip, icw);
a91bf992 1105 else
b26b2bf1 1106 match = xfs_icwalk_match_id(ip, icw);
a91bf992
DW
1107 if (!match)
1108 return false;
1109
1110 /* skip the inode if the file size is too small */
b26b2bf1
DW
1111 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1112 XFS_ISIZE(ip) < icw->icw_min_file_size)
a91bf992
DW
1113 return false;
1114
1115 return true;
1116}
1117
4d0bab3a
DC
1118/*
1119 * This is a fast pass over the inode cache to try to get reclaim moving on as
1120 * many inodes as possible in a short period of time. It kicks itself every few
1121 * seconds, as well as being kicked by the inode cache shrinker when memory
02511a5a 1122 * goes low.
4d0bab3a
DC
1123 */
1124void
1125xfs_reclaim_worker(
1126 struct work_struct *work)
1127{
1128 struct xfs_mount *mp = container_of(to_delayed_work(work),
1129 struct xfs_mount, m_reclaim_work);
4d0bab3a 1130
f1bc5c56 1131 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
4d0bab3a
DC
1132 xfs_reclaim_work_queue(mp);
1133}
1134
41176a68
BF
1135STATIC int
1136xfs_inode_free_eofblocks(
1137 struct xfs_inode *ip,
b26b2bf1 1138 struct xfs_icwalk *icw,
0fa4a10a 1139 unsigned int *lockflags)
41176a68 1140{
390600f8 1141 bool wait;
390600f8 1142
b26b2bf1 1143 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
5400da7d 1144
ce2d3bbe
DW
1145 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1146 return 0;
1147
41176a68
BF
1148 /*
1149 * If the mapping is dirty the operation can block and wait for some
1150 * time. Unless we are waiting, skip it.
1151 */
390600f8 1152 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
41176a68
BF
1153 return 0;
1154
b26b2bf1 1155 if (!xfs_icwalk_match(ip, icw))
a91bf992 1156 return 0;
3e3f9f58 1157
a36b9261
BF
1158 /*
1159 * If the caller is waiting, return -EAGAIN to keep the background
1160 * scanner moving and revisit the inode in a subsequent pass.
1161 */
c3155097 1162 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
390600f8
DW
1163 if (wait)
1164 return -EAGAIN;
1165 return 0;
a36b9261 1166 }
0fa4a10a 1167 *lockflags |= XFS_IOLOCK_EXCL;
390600f8 1168
2b156ff8
DW
1169 if (xfs_can_free_eofblocks(ip, false))
1170 return xfs_free_eofblocks(ip);
1171
1172 /* inode could be preallocated or append-only */
1173 trace_xfs_inode_free_eofblocks_invalid(ip);
1174 xfs_inode_clear_eofblocks_tag(ip);
1175 return 0;
41176a68
BF
1176}
1177
83104d44 1178static void
ce2d3bbe
DW
1179xfs_blockgc_set_iflag(
1180 struct xfs_inode *ip,
ce2d3bbe 1181 unsigned long iflag)
27b52867 1182{
ce2d3bbe
DW
1183 struct xfs_mount *mp = ip->i_mount;
1184 struct xfs_perag *pag;
ce2d3bbe
DW
1185
1186 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1187
85a6e764
CH
1188 /*
1189 * Don't bother locking the AG and looking up in the radix trees
1190 * if we already know that we have the tag set.
1191 */
ce2d3bbe 1192 if (ip->i_flags & iflag)
85a6e764
CH
1193 return;
1194 spin_lock(&ip->i_flags_lock);
ce2d3bbe 1195 ip->i_flags |= iflag;
85a6e764
CH
1196 spin_unlock(&ip->i_flags_lock);
1197
27b52867
BF
1198 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1199 spin_lock(&pag->pag_ici_lock);
27b52867 1200
c076ae7a
DW
1201 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1202 XFS_ICI_BLOCKGC_TAG);
27b52867
BF
1203
1204 spin_unlock(&pag->pag_ici_lock);
1205 xfs_perag_put(pag);
1206}
1207
1208void
83104d44 1209xfs_inode_set_eofblocks_tag(
27b52867 1210 xfs_inode_t *ip)
83104d44
DW
1211{
1212 trace_xfs_inode_set_eofblocks_tag(ip);
9669f51d 1213 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1214}
1215
1216static void
ce2d3bbe
DW
1217xfs_blockgc_clear_iflag(
1218 struct xfs_inode *ip,
1219 unsigned long iflag)
27b52867 1220{
ce2d3bbe
DW
1221 struct xfs_mount *mp = ip->i_mount;
1222 struct xfs_perag *pag;
1223 bool clear_tag;
1224
1225 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1226
85a6e764 1227 spin_lock(&ip->i_flags_lock);
ce2d3bbe
DW
1228 ip->i_flags &= ~iflag;
1229 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
85a6e764
CH
1230 spin_unlock(&ip->i_flags_lock);
1231
ce2d3bbe
DW
1232 if (!clear_tag)
1233 return;
1234
27b52867
BF
1235 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1236 spin_lock(&pag->pag_ici_lock);
27b52867 1237
c076ae7a
DW
1238 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1239 XFS_ICI_BLOCKGC_TAG);
27b52867
BF
1240
1241 spin_unlock(&pag->pag_ici_lock);
1242 xfs_perag_put(pag);
1243}
1244
83104d44
DW
1245void
1246xfs_inode_clear_eofblocks_tag(
1247 xfs_inode_t *ip)
1248{
1249 trace_xfs_inode_clear_eofblocks_tag(ip);
ce2d3bbe 1250 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1251}
1252
1253/*
be78ff0e
DW
1254 * Set ourselves up to free CoW blocks from this file. If it's already clean
1255 * then we can bail out quickly, but otherwise we must back off if the file
1256 * is undergoing some kind of write.
83104d44 1257 */
be78ff0e
DW
1258static bool
1259xfs_prep_free_cowblocks(
51d62690 1260 struct xfs_inode *ip)
83104d44 1261{
39937234
BF
1262 /*
1263 * Just clear the tag if we have an empty cow fork or none at all. It's
1264 * possible the inode was fully unshared since it was originally tagged.
1265 */
51d62690 1266 if (!xfs_inode_has_cow_data(ip)) {
83104d44
DW
1267 trace_xfs_inode_free_cowblocks_invalid(ip);
1268 xfs_inode_clear_cowblocks_tag(ip);
be78ff0e 1269 return false;
83104d44
DW
1270 }
1271
1272 /*
1273 * If the mapping is dirty or under writeback we cannot touch the
1274 * CoW fork. Leave it alone if we're in the midst of a directio.
1275 */
a1b7a4de
CH
1276 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1277 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
83104d44
DW
1278 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1279 atomic_read(&VFS_I(ip)->i_dio_count))
be78ff0e
DW
1280 return false;
1281
1282 return true;
1283}
1284
1285/*
1286 * Automatic CoW Reservation Freeing
1287 *
1288 * These functions automatically garbage collect leftover CoW reservations
1289 * that were made on behalf of a cowextsize hint when we start to run out
1290 * of quota or when the reservations sit around for too long. If the file
1291 * has dirty pages or is undergoing writeback, its CoW reservations will
1292 * be retained.
1293 *
1294 * The actual garbage collection piggybacks off the same code that runs
1295 * the speculative EOF preallocation garbage collector.
1296 */
1297STATIC int
1298xfs_inode_free_cowblocks(
1299 struct xfs_inode *ip,
b26b2bf1 1300 struct xfs_icwalk *icw,
0fa4a10a 1301 unsigned int *lockflags)
be78ff0e 1302{
f41a0716 1303 bool wait;
be78ff0e
DW
1304 int ret = 0;
1305
b26b2bf1 1306 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
f41a0716 1307
ce2d3bbe
DW
1308 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1309 return 0;
1310
51d62690 1311 if (!xfs_prep_free_cowblocks(ip))
83104d44
DW
1312 return 0;
1313
b26b2bf1 1314 if (!xfs_icwalk_match(ip, icw))
a91bf992 1315 return 0;
83104d44 1316
f41a0716
DW
1317 /*
1318 * If the caller is waiting, return -EAGAIN to keep the background
1319 * scanner moving and revisit the inode in a subsequent pass.
1320 */
0fa4a10a
DW
1321 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1322 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
f41a0716
DW
1323 if (wait)
1324 return -EAGAIN;
1325 return 0;
1326 }
0fa4a10a
DW
1327 *lockflags |= XFS_IOLOCK_EXCL;
1328
f41a0716
DW
1329 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1330 if (wait)
0fa4a10a
DW
1331 return -EAGAIN;
1332 return 0;
f41a0716 1333 }
0fa4a10a 1334 *lockflags |= XFS_MMAPLOCK_EXCL;
83104d44 1335
be78ff0e
DW
1336 /*
1337 * Check again, nobody else should be able to dirty blocks or change
1338 * the reflink iflag now that we have the first two locks held.
1339 */
51d62690 1340 if (xfs_prep_free_cowblocks(ip))
be78ff0e 1341 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
83104d44
DW
1342 return ret;
1343}
1344
83104d44
DW
1345void
1346xfs_inode_set_cowblocks_tag(
1347 xfs_inode_t *ip)
1348{
7b7381f0 1349 trace_xfs_inode_set_cowblocks_tag(ip);
9669f51d 1350 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
83104d44
DW
1351}
1352
1353void
1354xfs_inode_clear_cowblocks_tag(
1355 xfs_inode_t *ip)
1356{
7b7381f0 1357 trace_xfs_inode_clear_cowblocks_tag(ip);
ce2d3bbe 1358 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
83104d44 1359}
d6b636eb
DW
1360
1361/* Disable post-EOF and CoW block auto-reclamation. */
1362void
c9a6526f 1363xfs_blockgc_stop(
d6b636eb
DW
1364 struct xfs_mount *mp)
1365{
894ecacf
DW
1366 struct xfs_perag *pag;
1367 xfs_agnumber_t agno;
1368
6f649091
DW
1369 if (!xfs_clear_blockgc_enabled(mp))
1370 return;
1371
1372 for_each_perag(mp, agno, pag)
894ecacf 1373 cancel_delayed_work_sync(&pag->pag_blockgc_work);
6f649091 1374 trace_xfs_blockgc_stop(mp, __return_address);
d6b636eb
DW
1375}
1376
1377/* Enable post-EOF and CoW block auto-reclamation. */
1378void
c9a6526f 1379xfs_blockgc_start(
d6b636eb
DW
1380 struct xfs_mount *mp)
1381{
894ecacf
DW
1382 struct xfs_perag *pag;
1383 xfs_agnumber_t agno;
1384
6f649091
DW
1385 if (xfs_set_blockgc_enabled(mp))
1386 return;
1387
1388 trace_xfs_blockgc_start(mp, __return_address);
894ecacf
DW
1389 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1390 xfs_blockgc_queue(pag);
d6b636eb 1391}
3d4feec0 1392
d20d5edc
DW
1393/* Don't try to run block gc on an inode that's in any of these states. */
1394#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
ab23a776
DC
1395 XFS_NEED_INACTIVE | \
1396 XFS_INACTIVATING | \
d20d5edc
DW
1397 XFS_IRECLAIMABLE | \
1398 XFS_IRECLAIM)
df600197 1399/*
b9baaef4
DW
1400 * Decide if the given @ip is eligible for garbage collection of speculative
1401 * preallocations, and grab it if so. Returns true if it's ready to go or
1402 * false if we should just ignore it.
df600197
DW
1403 */
1404static bool
b9baaef4 1405xfs_blockgc_igrab(
7fdff526 1406 struct xfs_inode *ip)
df600197
DW
1407{
1408 struct inode *inode = VFS_I(ip);
df600197
DW
1409
1410 ASSERT(rcu_read_lock_held());
1411
1412 /* Check for stale RCU freed inode */
1413 spin_lock(&ip->i_flags_lock);
1414 if (!ip->i_ino)
1415 goto out_unlock_noent;
1416
d20d5edc 1417 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
df600197
DW
1418 goto out_unlock_noent;
1419 spin_unlock(&ip->i_flags_lock);
1420
1421 /* nothing to sync during shutdown */
75c8c50f 1422 if (xfs_is_shutdown(ip->i_mount))
df600197
DW
1423 return false;
1424
1425 /* If we can't grab the inode, it must on it's way to reclaim. */
1426 if (!igrab(inode))
1427 return false;
1428
1429 /* inode is valid */
1430 return true;
1431
1432out_unlock_noent:
1433 spin_unlock(&ip->i_flags_lock);
1434 return false;
1435}
1436
41956753
DW
1437/* Scan one incore inode for block preallocations that we can remove. */
1438static int
1439xfs_blockgc_scan_inode(
1440 struct xfs_inode *ip,
b26b2bf1 1441 struct xfs_icwalk *icw)
85c5b270 1442{
0fa4a10a 1443 unsigned int lockflags = 0;
85c5b270
DW
1444 int error;
1445
b26b2bf1 1446 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
85c5b270 1447 if (error)
0fa4a10a 1448 goto unlock;
85c5b270 1449
b26b2bf1 1450 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
0fa4a10a
DW
1451unlock:
1452 if (lockflags)
1453 xfs_iunlock(ip, lockflags);
594ab00b 1454 xfs_irele(ip);
0fa4a10a 1455 return error;
85c5b270
DW
1456}
1457
9669f51d
DW
1458/* Background worker that trims preallocated space. */
1459void
1460xfs_blockgc_worker(
1461 struct work_struct *work)
1462{
894ecacf
DW
1463 struct xfs_perag *pag = container_of(to_delayed_work(work),
1464 struct xfs_perag, pag_blockgc_work);
1465 struct xfs_mount *mp = pag->pag_mount;
9669f51d
DW
1466 int error;
1467
6f649091
DW
1468 trace_xfs_blockgc_worker(mp, __return_address);
1469
f427cf5c 1470 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
9669f51d 1471 if (error)
894ecacf
DW
1472 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1473 pag->pag_agno, error);
894ecacf 1474 xfs_blockgc_queue(pag);
9669f51d
DW
1475}
1476
85c5b270 1477/*
2eb66502
DW
1478 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1479 * and cowblocks.
85c5b270
DW
1480 */
1481int
1482xfs_blockgc_free_space(
1483 struct xfs_mount *mp,
b26b2bf1 1484 struct xfs_icwalk *icw)
85c5b270 1485{
2eb66502
DW
1486 int error;
1487
b26b2bf1 1488 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
85c5b270 1489
2eb66502
DW
1490 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1491 if (error)
1492 return error;
1493
1494 xfs_inodegc_flush(mp);
1495 return 0;
85c5b270
DW
1496}
1497
e8d04c2a
DW
1498/*
1499 * Reclaim all the free space that we can by scheduling the background blockgc
1500 * and inodegc workers immediately and waiting for them all to clear.
1501 */
1502void
1503xfs_blockgc_flush_all(
1504 struct xfs_mount *mp)
1505{
1506 struct xfs_perag *pag;
1507 xfs_agnumber_t agno;
1508
1509 trace_xfs_blockgc_flush_all(mp, __return_address);
1510
1511 /*
1512 * For each blockgc worker, move its queue time up to now. If it
1513 * wasn't queued, it will not be requeued. Then flush whatever's
1514 * left.
1515 */
1516 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1517 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1518 &pag->pag_blockgc_work, 0);
1519
1520 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1521 flush_delayed_work(&pag->pag_blockgc_work);
1522
1523 xfs_inodegc_flush(mp);
1524}
1525
3d4feec0 1526/*
c237dd7c
DW
1527 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1528 * quota caused an allocation failure, so we make a best effort by including
1529 * each quota under low free space conditions (less than 1% free space) in the
1530 * scan.
111068f8
DW
1531 *
1532 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
2d53f66b 1533 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
111068f8 1534 * MMAPLOCK.
3d4feec0 1535 */
111068f8 1536int
c237dd7c
DW
1537xfs_blockgc_free_dquots(
1538 struct xfs_mount *mp,
1539 struct xfs_dquot *udqp,
1540 struct xfs_dquot *gdqp,
1541 struct xfs_dquot *pdqp,
2d53f66b 1542 unsigned int iwalk_flags)
3d4feec0 1543{
b26b2bf1 1544 struct xfs_icwalk icw = {0};
3d4feec0
DW
1545 bool do_work = false;
1546
c237dd7c
DW
1547 if (!udqp && !gdqp && !pdqp)
1548 return 0;
1549
3d4feec0 1550 /*
111068f8
DW
1551 * Run a scan to free blocks using the union filter to cover all
1552 * applicable quotas in a single scan.
3d4feec0 1553 */
b26b2bf1 1554 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
3d4feec0 1555
c237dd7c 1556 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
b26b2bf1
DW
1557 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1558 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
c237dd7c 1559 do_work = true;
3d4feec0
DW
1560 }
1561
c237dd7c 1562 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
b26b2bf1
DW
1563 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1564 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
c237dd7c 1565 do_work = true;
3d4feec0
DW
1566 }
1567
c237dd7c 1568 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
b26b2bf1
DW
1569 icw.icw_prid = pdqp->q_id;
1570 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
c237dd7c 1571 do_work = true;
3d4feec0
DW
1572 }
1573
1574 if (!do_work)
111068f8 1575 return 0;
3d4feec0 1576
b26b2bf1 1577 return xfs_blockgc_free_space(mp, &icw);
c237dd7c
DW
1578}
1579
1580/* Run cow/eofblocks scans on the quotas attached to the inode. */
1581int
1582xfs_blockgc_free_quota(
1583 struct xfs_inode *ip,
2d53f66b 1584 unsigned int iwalk_flags)
c237dd7c
DW
1585{
1586 return xfs_blockgc_free_dquots(ip->i_mount,
1587 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1588 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
2d53f66b 1589 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
3d4feec0 1590}
df600197
DW
1591
1592/* XFS Inode Cache Walking Code */
1593
f1bc5c56
DW
1594/*
1595 * The inode lookup is done in batches to keep the amount of lock traffic and
1596 * radix tree lookups to a minimum. The batch size is a trade off between
1597 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1598 * be too greedy.
1599 */
1600#define XFS_LOOKUP_BATCH 32
1601
1602
b9baaef4
DW
1603/*
1604 * Decide if we want to grab this inode in anticipation of doing work towards
594ab00b 1605 * the goal.
b9baaef4
DW
1606 */
1607static inline bool
1608xfs_icwalk_igrab(
1609 enum xfs_icwalk_goal goal,
9492750a 1610 struct xfs_inode *ip,
b26b2bf1 1611 struct xfs_icwalk *icw)
b9baaef4
DW
1612{
1613 switch (goal) {
b9baaef4 1614 case XFS_ICWALK_BLOCKGC:
7fdff526 1615 return xfs_blockgc_igrab(ip);
f1bc5c56 1616 case XFS_ICWALK_RECLAIM:
b26b2bf1 1617 return xfs_reclaim_igrab(ip, icw);
b9baaef4
DW
1618 default:
1619 return false;
1620 }
1621}
1622
594ab00b
DW
1623/*
1624 * Process an inode. Each processing function must handle any state changes
1625 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1626 */
f427cf5c
DW
1627static inline int
1628xfs_icwalk_process_inode(
1629 enum xfs_icwalk_goal goal,
1630 struct xfs_inode *ip,
f1bc5c56 1631 struct xfs_perag *pag,
b26b2bf1 1632 struct xfs_icwalk *icw)
f427cf5c 1633{
594ab00b 1634 int error = 0;
f427cf5c
DW
1635
1636 switch (goal) {
f427cf5c 1637 case XFS_ICWALK_BLOCKGC:
b26b2bf1 1638 error = xfs_blockgc_scan_inode(ip, icw);
f427cf5c 1639 break;
f1bc5c56
DW
1640 case XFS_ICWALK_RECLAIM:
1641 xfs_reclaim_inode(ip, pag);
1642 break;
f427cf5c 1643 }
f427cf5c
DW
1644 return error;
1645}
1646
df600197 1647/*
f427cf5c
DW
1648 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1649 * process them in some manner.
df600197
DW
1650 */
1651static int
c1115c0c 1652xfs_icwalk_ag(
df600197 1653 struct xfs_perag *pag,
f427cf5c 1654 enum xfs_icwalk_goal goal,
b26b2bf1 1655 struct xfs_icwalk *icw)
df600197
DW
1656{
1657 struct xfs_mount *mp = pag->pag_mount;
1658 uint32_t first_index;
1659 int last_error = 0;
1660 int skipped;
1661 bool done;
1662 int nr_found;
1663
1664restart:
1665 done = false;
1666 skipped = 0;
f1bc5c56
DW
1667 if (goal == XFS_ICWALK_RECLAIM)
1668 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1669 else
1670 first_index = 0;
df600197
DW
1671 nr_found = 0;
1672 do {
1673 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1674 int error = 0;
1675 int i;
1676
1677 rcu_read_lock();
1678
a437b9b4
CH
1679 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1680 (void **) batch, first_index,
1681 XFS_LOOKUP_BATCH, goal);
df600197 1682 if (!nr_found) {
f1bc5c56 1683 done = true;
df600197
DW
1684 rcu_read_unlock();
1685 break;
1686 }
1687
1688 /*
1689 * Grab the inodes before we drop the lock. if we found
1690 * nothing, nr == 0 and the loop will be skipped.
1691 */
1692 for (i = 0; i < nr_found; i++) {
1693 struct xfs_inode *ip = batch[i];
1694
b26b2bf1 1695 if (done || !xfs_icwalk_igrab(goal, ip, icw))
df600197
DW
1696 batch[i] = NULL;
1697
1698 /*
1699 * Update the index for the next lookup. Catch
1700 * overflows into the next AG range which can occur if
1701 * we have inodes in the last block of the AG and we
1702 * are currently pointing to the last inode.
1703 *
1704 * Because we may see inodes that are from the wrong AG
1705 * due to RCU freeing and reallocation, only update the
1706 * index if it lies in this AG. It was a race that lead
1707 * us to see this inode, so another lookup from the
1708 * same index will not find it again.
1709 */
1710 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1711 continue;
1712 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1713 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1714 done = true;
1715 }
1716
1717 /* unlock now we've grabbed the inodes. */
1718 rcu_read_unlock();
1719
1720 for (i = 0; i < nr_found; i++) {
1721 if (!batch[i])
1722 continue;
f1bc5c56 1723 error = xfs_icwalk_process_inode(goal, batch[i], pag,
b26b2bf1 1724 icw);
df600197
DW
1725 if (error == -EAGAIN) {
1726 skipped++;
1727 continue;
1728 }
1729 if (error && last_error != -EFSCORRUPTED)
1730 last_error = error;
1731 }
1732
1733 /* bail out if the filesystem is corrupted. */
1734 if (error == -EFSCORRUPTED)
1735 break;
1736
1737 cond_resched();
1738
b26b2bf1
DW
1739 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1740 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1741 if (icw->icw_scan_limit <= 0)
f1bc5c56
DW
1742 break;
1743 }
df600197
DW
1744 } while (nr_found && !done);
1745
f1bc5c56
DW
1746 if (goal == XFS_ICWALK_RECLAIM) {
1747 if (done)
1748 first_index = 0;
1749 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1750 }
1751
df600197
DW
1752 if (skipped) {
1753 delay(1);
1754 goto restart;
1755 }
1756 return last_error;
1757}
1758
f427cf5c 1759/* Walk all incore inodes to achieve a given goal. */
df600197 1760static int
c1115c0c 1761xfs_icwalk(
df600197 1762 struct xfs_mount *mp,
f427cf5c 1763 enum xfs_icwalk_goal goal,
b26b2bf1 1764 struct xfs_icwalk *icw)
df600197
DW
1765{
1766 struct xfs_perag *pag;
1767 int error = 0;
1768 int last_error = 0;
a437b9b4 1769 xfs_agnumber_t agno;
df600197 1770
a437b9b4 1771 for_each_perag_tag(mp, agno, pag, goal) {
b26b2bf1 1772 error = xfs_icwalk_ag(pag, goal, icw);
df600197
DW
1773 if (error) {
1774 last_error = error;
a437b9b4 1775 if (error == -EFSCORRUPTED) {
c4d5660a 1776 xfs_perag_rele(pag);
df600197 1777 break;
a437b9b4 1778 }
df600197
DW
1779 }
1780 }
1781 return last_error;
2d53f66b 1782 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
df600197 1783}
c6c2066d
DW
1784
1785#ifdef DEBUG
1786static void
1787xfs_check_delalloc(
1788 struct xfs_inode *ip,
1789 int whichfork)
1790{
732436ef 1791 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
c6c2066d
DW
1792 struct xfs_bmbt_irec got;
1793 struct xfs_iext_cursor icur;
1794
1795 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1796 return;
1797 do {
1798 if (isnullstartblock(got.br_startblock)) {
1799 xfs_warn(ip->i_mount,
1800 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1801 ip->i_ino,
1802 whichfork == XFS_DATA_FORK ? "data" : "cow",
1803 got.br_startoff, got.br_blockcount);
1804 }
1805 } while (xfs_iext_next_extent(ifp, &icur, &got));
1806}
1807#else
1808#define xfs_check_delalloc(ip, whichfork) do { } while (0)
1809#endif
1810
ab23a776
DC
1811/* Schedule the inode for reclaim. */
1812static void
1813xfs_inodegc_set_reclaimable(
c6c2066d
DW
1814 struct xfs_inode *ip)
1815{
1816 struct xfs_mount *mp = ip->i_mount;
1817 struct xfs_perag *pag;
c6c2066d 1818
75c8c50f 1819 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
c6c2066d
DW
1820 xfs_check_delalloc(ip, XFS_DATA_FORK);
1821 xfs_check_delalloc(ip, XFS_COW_FORK);
1822 ASSERT(0);
1823 }
1824
c6c2066d
DW
1825 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1826 spin_lock(&pag->pag_ici_lock);
1827 spin_lock(&ip->i_flags_lock);
1828
ab23a776
DC
1829 trace_xfs_inode_set_reclaimable(ip);
1830 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1831 ip->i_flags |= XFS_IRECLAIMABLE;
c6c2066d
DW
1832 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1833 XFS_ICI_RECLAIM_TAG);
c6c2066d
DW
1834
1835 spin_unlock(&ip->i_flags_lock);
1836 spin_unlock(&pag->pag_ici_lock);
1837 xfs_perag_put(pag);
1838}
ab23a776
DC
1839
1840/*
1841 * Free all speculative preallocations and possibly even the inode itself.
1842 * This is the last chance to make changes to an otherwise unreferenced file
1843 * before incore reclamation happens.
1844 */
1845static void
1846xfs_inodegc_inactivate(
1847 struct xfs_inode *ip)
1848{
1849 trace_xfs_inode_inactivating(ip);
1850 xfs_inactive(ip);
1851 xfs_inodegc_set_reclaimable(ip);
1852}
1853
1854void
1855xfs_inodegc_worker(
1856 struct work_struct *work)
1857{
7cf2b0f9
DC
1858 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1859 struct xfs_inodegc, work);
ab23a776
DC
1860 struct llist_node *node = llist_del_all(&gc->list);
1861 struct xfs_inode *ip, *n;
4da11251 1862 unsigned int nofs_flag;
ab23a776 1863
b37c4c83
DW
1864 ASSERT(gc->cpu == smp_processor_id());
1865
ab23a776
DC
1866 WRITE_ONCE(gc->items, 0);
1867
1868 if (!node)
1869 return;
1870
4da11251
WG
1871 /*
1872 * We can allocate memory here while doing writeback on behalf of
1873 * memory reclaim. To avoid memory allocation deadlocks set the
1874 * task-wide nofs context for the following operations.
1875 */
1876 nofs_flag = memalloc_nofs_save();
1877
ab23a776 1878 ip = llist_entry(node, struct xfs_inode, i_gclist);
40b1de00 1879 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
ab23a776 1880
40b1de00 1881 WRITE_ONCE(gc->shrinker_hits, 0);
ab23a776
DC
1882 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1883 xfs_iflags_set(ip, XFS_INACTIVATING);
1884 xfs_inodegc_inactivate(ip);
1885 }
4da11251
WG
1886
1887 memalloc_nofs_restore(nofs_flag);
ab23a776
DC
1888}
1889
1890/*
5e672cd6
DC
1891 * Expedite all pending inodegc work to run immediately. This does not wait for
1892 * completion of the work.
ab23a776
DC
1893 */
1894void
5e672cd6 1895xfs_inodegc_push(
ab23a776
DC
1896 struct xfs_mount *mp)
1897{
ab23a776
DC
1898 if (!xfs_is_inodegc_enabled(mp))
1899 return;
5e672cd6
DC
1900 trace_xfs_inodegc_push(mp, __return_address);
1901 xfs_inodegc_queue_all(mp);
1902}
ab23a776 1903
5e672cd6
DC
1904/*
1905 * Force all currently queued inode inactivation work to run immediately and
1906 * wait for the work to finish.
1907 */
1908void
1909xfs_inodegc_flush(
1910 struct xfs_mount *mp)
1911{
1912 xfs_inodegc_push(mp);
ab23a776 1913 trace_xfs_inodegc_flush(mp, __return_address);
6191cf3a 1914 flush_workqueue(mp->m_inodegc_wq);
ab23a776
DC
1915}
1916
1917/*
1918 * Flush all the pending work and then disable the inode inactivation background
2254a739
DW
1919 * workers and wait for them to stop. Caller must hold sb->s_umount to
1920 * coordinate changes in the inodegc_enabled state.
ab23a776
DC
1921 */
1922void
1923xfs_inodegc_stop(
1924 struct xfs_mount *mp)
1925{
2254a739
DW
1926 bool rerun;
1927
ab23a776
DC
1928 if (!xfs_clear_inodegc_enabled(mp))
1929 return;
1930
2254a739
DW
1931 /*
1932 * Drain all pending inodegc work, including inodes that could be
1933 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
1934 * threads that sample the inodegc state just prior to us clearing it.
1935 * The inodegc flag state prevents new threads from queuing more
1936 * inodes, so we queue pending work items and flush the workqueue until
1937 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
1938 * here because it does not allow other unserialized mechanisms to
1939 * reschedule inodegc work while this draining is in progress.
1940 */
ab23a776 1941 xfs_inodegc_queue_all(mp);
2254a739
DW
1942 do {
1943 flush_workqueue(mp->m_inodegc_wq);
1944 rerun = xfs_inodegc_queue_all(mp);
1945 } while (rerun);
ab23a776 1946
ab23a776
DC
1947 trace_xfs_inodegc_stop(mp, __return_address);
1948}
1949
1950/*
1951 * Enable the inode inactivation background workers and schedule deferred inode
2254a739
DW
1952 * inactivation work if there is any. Caller must hold sb->s_umount to
1953 * coordinate changes in the inodegc_enabled state.
ab23a776
DC
1954 */
1955void
1956xfs_inodegc_start(
1957 struct xfs_mount *mp)
1958{
1959 if (xfs_set_inodegc_enabled(mp))
1960 return;
1961
1962 trace_xfs_inodegc_start(mp, __return_address);
1963 xfs_inodegc_queue_all(mp);
1964}
1965
65f03d86
DW
1966#ifdef CONFIG_XFS_RT
1967static inline bool
1968xfs_inodegc_want_queue_rt_file(
1969 struct xfs_inode *ip)
1970{
1971 struct xfs_mount *mp = ip->i_mount;
65f03d86
DW
1972
1973 if (!XFS_IS_REALTIME_INODE(ip))
1974 return false;
1975
2229276c
DW
1976 if (__percpu_counter_compare(&mp->m_frextents,
1977 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1978 XFS_FDBLOCKS_BATCH) < 0)
1979 return true;
1980
1981 return false;
65f03d86
DW
1982}
1983#else
1984# define xfs_inodegc_want_queue_rt_file(ip) (false)
1985#endif /* CONFIG_XFS_RT */
1986
ab23a776
DC
1987/*
1988 * Schedule the inactivation worker when:
1989 *
1990 * - We've accumulated more than one inode cluster buffer's worth of inodes.
7d6f07d2 1991 * - There is less than 5% free space left.
108523b8 1992 * - Any of the quotas for this inode are near an enforcement limit.
ab23a776
DC
1993 */
1994static inline bool
1995xfs_inodegc_want_queue_work(
1996 struct xfs_inode *ip,
1997 unsigned int items)
1998{
1999 struct xfs_mount *mp = ip->i_mount;
2000
2001 if (items > mp->m_ino_geo.inodes_per_cluster)
2002 return true;
2003
7d6f07d2
DW
2004 if (__percpu_counter_compare(&mp->m_fdblocks,
2005 mp->m_low_space[XFS_LOWSP_5_PCNT],
2006 XFS_FDBLOCKS_BATCH) < 0)
2007 return true;
2008
65f03d86
DW
2009 if (xfs_inodegc_want_queue_rt_file(ip))
2010 return true;
2011
108523b8
DW
2012 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
2013 return true;
2014
2015 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
2016 return true;
2017
2018 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2019 return true;
2020
ab23a776
DC
2021 return false;
2022}
2023
2024/*
2025 * Upper bound on the number of inodes in each AG that can be queued for
2026 * inactivation at any given time, to avoid monopolizing the workqueue.
2027 */
2028#define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2029
2030/*
2031 * Make the frontend wait for inactivations when:
2032 *
40b1de00 2033 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
ab23a776
DC
2034 * - The queue depth exceeds the maximum allowable percpu backlog.
2035 *
2036 * Note: If the current thread is running a transaction, we don't ever want to
2037 * wait for other transactions because that could introduce a deadlock.
2038 */
2039static inline bool
2040xfs_inodegc_want_flush_work(
2041 struct xfs_inode *ip,
40b1de00
DW
2042 unsigned int items,
2043 unsigned int shrinker_hits)
ab23a776
DC
2044{
2045 if (current->journal_info)
2046 return false;
2047
40b1de00
DW
2048 if (shrinker_hits > 0)
2049 return true;
2050
ab23a776
DC
2051 if (items > XFS_INODEGC_MAX_BACKLOG)
2052 return true;
2053
2054 return false;
2055}
2056
2057/*
2058 * Queue a background inactivation worker if there are inodes that need to be
2059 * inactivated and higher level xfs code hasn't disabled the background
2060 * workers.
2061 */
2062static void
2063xfs_inodegc_queue(
2064 struct xfs_inode *ip)
2065{
2066 struct xfs_mount *mp = ip->i_mount;
2067 struct xfs_inodegc *gc;
2068 int items;
40b1de00 2069 unsigned int shrinker_hits;
7cf2b0f9 2070 unsigned long queue_delay = 1;
ab23a776
DC
2071
2072 trace_xfs_inode_set_need_inactive(ip);
2073 spin_lock(&ip->i_flags_lock);
2074 ip->i_flags |= XFS_NEED_INACTIVE;
2075 spin_unlock(&ip->i_flags_lock);
2076
2077 gc = get_cpu_ptr(mp->m_inodegc);
2078 llist_add(&ip->i_gclist, &gc->list);
2079 items = READ_ONCE(gc->items);
2080 WRITE_ONCE(gc->items, items + 1);
40b1de00 2081 shrinker_hits = READ_ONCE(gc->shrinker_hits);
ab23a776 2082
7cf2b0f9
DC
2083 /*
2084 * We queue the work while holding the current CPU so that the work
2085 * is scheduled to run on this CPU.
2086 */
2087 if (!xfs_is_inodegc_enabled(mp)) {
2088 put_cpu_ptr(gc);
ab23a776 2089 return;
ab23a776
DC
2090 }
2091
7cf2b0f9
DC
2092 if (xfs_inodegc_want_queue_work(ip, items))
2093 queue_delay = 0;
2094
2095 trace_xfs_inodegc_queue(mp, __return_address);
03e0add8
DW
2096 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2097 queue_delay);
7cf2b0f9
DC
2098 put_cpu_ptr(gc);
2099
40b1de00 2100 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
ab23a776 2101 trace_xfs_inodegc_throttle(mp, __return_address);
7cf2b0f9 2102 flush_delayed_work(&gc->work);
ab23a776
DC
2103 }
2104}
2105
2106/*
2107 * Fold the dead CPU inodegc queue into the current CPUs queue.
2108 */
2109void
2110xfs_inodegc_cpu_dead(
2111 struct xfs_mount *mp,
2112 unsigned int dead_cpu)
2113{
2114 struct xfs_inodegc *dead_gc, *gc;
2115 struct llist_node *first, *last;
2116 unsigned int count = 0;
2117
2118 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
7cf2b0f9 2119 cancel_delayed_work_sync(&dead_gc->work);
ab23a776
DC
2120
2121 if (llist_empty(&dead_gc->list))
2122 return;
2123
2124 first = dead_gc->list.first;
2125 last = first;
2126 while (last->next) {
2127 last = last->next;
2128 count++;
2129 }
2130 dead_gc->list.first = NULL;
2131 dead_gc->items = 0;
2132
2133 /* Add pending work to current CPU */
2134 gc = get_cpu_ptr(mp->m_inodegc);
2135 llist_add_batch(first, last, &gc->list);
2136 count += READ_ONCE(gc->items);
2137 WRITE_ONCE(gc->items, count);
ab23a776
DC
2138
2139 if (xfs_is_inodegc_enabled(mp)) {
2140 trace_xfs_inodegc_queue(mp, __return_address);
03e0add8
DW
2141 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2142 0);
ab23a776 2143 }
7cf2b0f9 2144 put_cpu_ptr(gc);
ab23a776
DC
2145}
2146
2147/*
2148 * We set the inode flag atomically with the radix tree tag. Once we get tag
2149 * lookups on the radix tree, this inode flag can go away.
2150 *
2151 * We always use background reclaim here because even if the inode is clean, it
2152 * still may be under IO and hence we have wait for IO completion to occur
2153 * before we can reclaim the inode. The background reclaim path handles this
2154 * more efficiently than we can here, so simply let background reclaim tear down
2155 * all inodes.
2156 */
2157void
2158xfs_inode_mark_reclaimable(
2159 struct xfs_inode *ip)
2160{
2161 struct xfs_mount *mp = ip->i_mount;
2162 bool need_inactive;
2163
2164 XFS_STATS_INC(mp, vn_reclaim);
2165
2166 /*
2167 * We should never get here with any of the reclaim flags already set.
2168 */
2169 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2170
2171 need_inactive = xfs_inode_needs_inactive(ip);
2172 if (need_inactive) {
2173 xfs_inodegc_queue(ip);
2174 return;
2175 }
2176
2177 /* Going straight to reclaim, so drop the dquots. */
2178 xfs_qm_dqdetach(ip);
2179 xfs_inodegc_set_reclaimable(ip);
2180}
40b1de00
DW
2181
2182/*
2183 * Register a phony shrinker so that we can run background inodegc sooner when
2184 * there's memory pressure. Inactivation does not itself free any memory but
2185 * it does make inodes reclaimable, which eventually frees memory.
2186 *
2187 * The count function, seek value, and batch value are crafted to trigger the
2188 * scan function during the second round of scanning. Hopefully this means
2189 * that we reclaimed enough memory that initiating metadata transactions won't
2190 * make things worse.
2191 */
2192#define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2193#define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2194
2195static unsigned long
2196xfs_inodegc_shrinker_count(
2197 struct shrinker *shrink,
2198 struct shrink_control *sc)
2199{
2200 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2201 m_inodegc_shrinker);
2202 struct xfs_inodegc *gc;
2203 int cpu;
2204
2205 if (!xfs_is_inodegc_enabled(mp))
2206 return 0;
2207
2208 for_each_online_cpu(cpu) {
2209 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2210 if (!llist_empty(&gc->list))
2211 return XFS_INODEGC_SHRINKER_COUNT;
2212 }
2213
2214 return 0;
2215}
2216
2217static unsigned long
2218xfs_inodegc_shrinker_scan(
2219 struct shrinker *shrink,
2220 struct shrink_control *sc)
2221{
2222 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2223 m_inodegc_shrinker);
2224 struct xfs_inodegc *gc;
2225 int cpu;
2226 bool no_items = true;
2227
2228 if (!xfs_is_inodegc_enabled(mp))
2229 return SHRINK_STOP;
2230
2231 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2232
2233 for_each_online_cpu(cpu) {
2234 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2235 if (!llist_empty(&gc->list)) {
2236 unsigned int h = READ_ONCE(gc->shrinker_hits);
2237
2238 WRITE_ONCE(gc->shrinker_hits, h + 1);
7cf2b0f9 2239 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
40b1de00
DW
2240 no_items = false;
2241 }
2242 }
2243
2244 /*
2245 * If there are no inodes to inactivate, we don't want the shrinker
2246 * to think there's deferred work to call us back about.
2247 */
2248 if (no_items)
2249 return LONG_MAX;
2250
2251 return SHRINK_STOP;
2252}
2253
2254/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2255int
2256xfs_inodegc_register_shrinker(
2257 struct xfs_mount *mp)
2258{
2259 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2260
2261 shrink->count_objects = xfs_inodegc_shrinker_count;
2262 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2263 shrink->seeks = 0;
2264 shrink->flags = SHRINKER_NONSLAB;
2265 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2266
e33c267a 2267 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
40b1de00 2268}