Merge tag 'gpio-fixes-for-v5.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / fs / xfs / xfs_icache.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
fe4fa4b8
DC
2/*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
fe4fa4b8
DC
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
5467b34b 8#include "xfs_shared.h"
6ca1c906 9#include "xfs_format.h"
239880ef
DC
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
fe4fa4b8 12#include "xfs_mount.h"
fe4fa4b8 13#include "xfs_inode.h"
239880ef
DC
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
fe4fa4b8 16#include "xfs_inode_item.h"
7d095257 17#include "xfs_quota.h"
0b1b213f 18#include "xfs_trace.h"
6d8b79cf 19#include "xfs_icache.h"
c24b5dfa 20#include "xfs_bmap_util.h"
dc06f398
BF
21#include "xfs_dquot_item.h"
22#include "xfs_dquot.h"
83104d44 23#include "xfs_reflink.h"
bb8a66af 24#include "xfs_ialloc.h"
9bbafc71 25#include "xfs_ag.h"
01728b44 26#include "xfs_log_priv.h"
fe4fa4b8 27
f0e28280 28#include <linux/iversion.h>
a167b17e 29
c809d7e9
DW
30/* Radix tree tags for incore inode tree. */
31
32/* inode is to be reclaimed */
33#define XFS_ICI_RECLAIM_TAG 0
34/* Inode has speculative preallocations (posteof or cow) to clean. */
35#define XFS_ICI_BLOCKGC_TAG 1
36
37/*
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 */
41enum xfs_icwalk_goal {
c809d7e9
DW
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
f1bc5c56 44 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
c809d7e9
DW
45};
46
7fdff526 47static int xfs_icwalk(struct xfs_mount *mp,
b26b2bf1 48 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
7fdff526 49static int xfs_icwalk_ag(struct xfs_perag *pag,
b26b2bf1 50 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
df600197 51
1ad2cfe0 52/*
b26b2bf1
DW
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
1ad2cfe0 55 */
1ad2cfe0 56
f1bc5c56
DW
57/* Stop scanning after icw_scan_limit inodes. */
58#define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
59
9492750a 60#define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
2d53f66b 61#define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
9492750a 62
777eb1fa 63#define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
2d53f66b
DW
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
1ad2cfe0 66
33479e05
DC
67/*
68 * Allocate and initialise an xfs_inode.
69 */
638f4416 70struct xfs_inode *
33479e05
DC
71xfs_inode_alloc(
72 struct xfs_mount *mp,
73 xfs_ino_t ino)
74{
75 struct xfs_inode *ip;
76
77 /*
3050bd0b
CM
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
33479e05 80 */
fd60b288 81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
3050bd0b 82
33479e05 83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
182696fb 84 kmem_cache_free(xfs_inode_cache, ip);
33479e05
DC
85 return NULL;
86 }
87
f38a032b 88 /* VFS doesn't initialise i_mode or i_state! */
c19b3b05 89 VFS_I(ip)->i_mode = 0;
f38a032b 90 VFS_I(ip)->i_state = 0;
67958013 91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
c19b3b05 92
ff6d6af2 93 XFS_STATS_INC(mp, vn_active);
33479e05 94 ASSERT(atomic_read(&ip->i_pincount) == 0);
33479e05
DC
95 ASSERT(ip->i_ino == 0);
96
33479e05
DC
97 /* initialise the xfs inode */
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
101 ip->i_afp = NULL;
3993baeb 102 ip->i_cowfp = NULL;
3ba738df 103 memset(&ip->i_df, 0, sizeof(ip->i_df));
33479e05
DC
104 ip->i_flags = 0;
105 ip->i_delayed_blks = 0;
3e09ab8f 106 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
6e73a545 107 ip->i_nblocks = 0;
7821ea30 108 ip->i_forkoff = 0;
6772c1f1
DW
109 ip->i_sick = 0;
110 ip->i_checked = 0;
cb357bf3
DW
111 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
112 INIT_LIST_HEAD(&ip->i_ioend_list);
113 spin_lock_init(&ip->i_ioend_lock);
33479e05
DC
114
115 return ip;
116}
117
118STATIC void
119xfs_inode_free_callback(
120 struct rcu_head *head)
121{
122 struct inode *inode = container_of(head, struct inode, i_rcu);
123 struct xfs_inode *ip = XFS_I(inode);
124
c19b3b05 125 switch (VFS_I(ip)->i_mode & S_IFMT) {
33479e05
DC
126 case S_IFREG:
127 case S_IFDIR:
128 case S_IFLNK:
ef838512 129 xfs_idestroy_fork(&ip->i_df);
33479e05
DC
130 break;
131 }
132
ef838512
CH
133 if (ip->i_afp) {
134 xfs_idestroy_fork(ip->i_afp);
182696fb 135 kmem_cache_free(xfs_ifork_cache, ip->i_afp);
ef838512
CH
136 }
137 if (ip->i_cowfp) {
138 xfs_idestroy_fork(ip->i_cowfp);
182696fb 139 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
ef838512 140 }
33479e05 141 if (ip->i_itemp) {
22525c17
DC
142 ASSERT(!test_bit(XFS_LI_IN_AIL,
143 &ip->i_itemp->ili_item.li_flags));
33479e05
DC
144 xfs_inode_item_destroy(ip);
145 ip->i_itemp = NULL;
146 }
147
182696fb 148 kmem_cache_free(xfs_inode_cache, ip);
1f2dcfe8
DC
149}
150
8a17d7dd
DC
151static void
152__xfs_inode_free(
153 struct xfs_inode *ip)
154{
155 /* asserts to verify all state is correct here */
156 ASSERT(atomic_read(&ip->i_pincount) == 0);
48d55e2a 157 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
8a17d7dd
DC
158 XFS_STATS_DEC(ip->i_mount, vn_active);
159
160 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
161}
162
1f2dcfe8
DC
163void
164xfs_inode_free(
165 struct xfs_inode *ip)
166{
718ecc50 167 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
98efe8af 168
33479e05
DC
169 /*
170 * Because we use RCU freeing we need to ensure the inode always
171 * appears to be reclaimed with an invalid inode number when in the
172 * free state. The ip->i_flags_lock provides the barrier against lookup
173 * races.
174 */
175 spin_lock(&ip->i_flags_lock);
176 ip->i_flags = XFS_IRECLAIM;
177 ip->i_ino = 0;
178 spin_unlock(&ip->i_flags_lock);
179
8a17d7dd 180 __xfs_inode_free(ip);
33479e05
DC
181}
182
ad438c40 183/*
02511a5a
DC
184 * Queue background inode reclaim work if there are reclaimable inodes and there
185 * isn't reclaim work already scheduled or in progress.
ad438c40
DC
186 */
187static void
188xfs_reclaim_work_queue(
189 struct xfs_mount *mp)
190{
191
192 rcu_read_lock();
193 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
194 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
195 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
196 }
197 rcu_read_unlock();
198}
199
c076ae7a
DW
200/*
201 * Background scanning to trim preallocated space. This is queued based on the
202 * 'speculative_prealloc_lifetime' tunable (5m by default).
203 */
204static inline void
205xfs_blockgc_queue(
ad438c40 206 struct xfs_perag *pag)
c076ae7a 207{
6f649091
DW
208 struct xfs_mount *mp = pag->pag_mount;
209
210 if (!xfs_is_blockgc_enabled(mp))
211 return;
212
c076ae7a
DW
213 rcu_read_lock();
214 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
ab23a776 215 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
c076ae7a
DW
216 &pag->pag_blockgc_work,
217 msecs_to_jiffies(xfs_blockgc_secs * 1000));
218 rcu_read_unlock();
219}
220
221/* Set a tag on both the AG incore inode tree and the AG radix tree. */
222static void
223xfs_perag_set_inode_tag(
224 struct xfs_perag *pag,
225 xfs_agino_t agino,
226 unsigned int tag)
ad438c40
DC
227{
228 struct xfs_mount *mp = pag->pag_mount;
c076ae7a 229 bool was_tagged;
ad438c40 230
95989c46 231 lockdep_assert_held(&pag->pag_ici_lock);
c076ae7a
DW
232
233 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
234 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
235
236 if (tag == XFS_ICI_RECLAIM_TAG)
237 pag->pag_ici_reclaimable++;
238
239 if (was_tagged)
ad438c40
DC
240 return;
241
c076ae7a 242 /* propagate the tag up into the perag radix tree */
ad438c40 243 spin_lock(&mp->m_perag_lock);
c076ae7a 244 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
ad438c40
DC
245 spin_unlock(&mp->m_perag_lock);
246
c076ae7a
DW
247 /* start background work */
248 switch (tag) {
249 case XFS_ICI_RECLAIM_TAG:
250 xfs_reclaim_work_queue(mp);
251 break;
252 case XFS_ICI_BLOCKGC_TAG:
253 xfs_blockgc_queue(pag);
254 break;
255 }
ad438c40 256
c076ae7a 257 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
ad438c40
DC
258}
259
c076ae7a 260/* Clear a tag on both the AG incore inode tree and the AG radix tree. */
ad438c40 261static void
c076ae7a
DW
262xfs_perag_clear_inode_tag(
263 struct xfs_perag *pag,
264 xfs_agino_t agino,
265 unsigned int tag)
ad438c40
DC
266{
267 struct xfs_mount *mp = pag->pag_mount;
268
95989c46 269 lockdep_assert_held(&pag->pag_ici_lock);
c076ae7a
DW
270
271 /*
272 * Reclaim can signal (with a null agino) that it cleared its own tag
273 * by removing the inode from the radix tree.
274 */
275 if (agino != NULLAGINO)
276 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
277 else
278 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
279
280 if (tag == XFS_ICI_RECLAIM_TAG)
281 pag->pag_ici_reclaimable--;
282
283 if (radix_tree_tagged(&pag->pag_ici_root, tag))
ad438c40
DC
284 return;
285
c076ae7a 286 /* clear the tag from the perag radix tree */
ad438c40 287 spin_lock(&mp->m_perag_lock);
c076ae7a 288 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
ad438c40 289 spin_unlock(&mp->m_perag_lock);
ad438c40 290
c076ae7a
DW
291 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
292}
ad438c40 293
50997470
DC
294/*
295 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
296 * part of the structure. This is made more complex by the fact we store
297 * information about the on-disk values in the VFS inode and so we can't just
83e06f21 298 * overwrite the values unconditionally. Hence we save the parameters we
50997470 299 * need to retain across reinitialisation, and rewrite them into the VFS inode
83e06f21 300 * after reinitialisation even if it fails.
50997470
DC
301 */
302static int
303xfs_reinit_inode(
304 struct xfs_mount *mp,
305 struct inode *inode)
306{
ff7bebeb
DW
307 int error;
308 uint32_t nlink = inode->i_nlink;
309 uint32_t generation = inode->i_generation;
310 uint64_t version = inode_peek_iversion(inode);
311 umode_t mode = inode->i_mode;
312 dev_t dev = inode->i_rdev;
313 kuid_t uid = inode->i_uid;
314 kgid_t gid = inode->i_gid;
50997470
DC
315
316 error = inode_init_always(mp->m_super, inode);
317
54d7b5c1 318 set_nlink(inode, nlink);
9e9a2674 319 inode->i_generation = generation;
f0e28280 320 inode_set_iversion_queried(inode, version);
c19b3b05 321 inode->i_mode = mode;
acd1d715 322 inode->i_rdev = dev;
3d8f2821
CH
323 inode->i_uid = uid;
324 inode->i_gid = gid;
67958013 325 mapping_set_large_folios(inode->i_mapping);
50997470
DC
326 return error;
327}
328
ff7bebeb
DW
329/*
330 * Carefully nudge an inode whose VFS state has been torn down back into a
331 * usable state. Drops the i_flags_lock and the rcu read lock.
332 */
333static int
334xfs_iget_recycle(
335 struct xfs_perag *pag,
336 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
337{
338 struct xfs_mount *mp = ip->i_mount;
339 struct inode *inode = VFS_I(ip);
340 int error;
341
342 trace_xfs_iget_recycle(ip);
343
344 /*
345 * We need to make it look like the inode is being reclaimed to prevent
346 * the actual reclaim workers from stomping over us while we recycle
347 * the inode. We can't clear the radix tree tag yet as it requires
348 * pag_ici_lock to be held exclusive.
349 */
350 ip->i_flags |= XFS_IRECLAIM;
351
352 spin_unlock(&ip->i_flags_lock);
353 rcu_read_unlock();
354
355 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
356 error = xfs_reinit_inode(mp, inode);
357 if (error) {
ff7bebeb
DW
358 /*
359 * Re-initializing the inode failed, and we are in deep
360 * trouble. Try to re-add it to the reclaim list.
361 */
362 rcu_read_lock();
363 spin_lock(&ip->i_flags_lock);
ff7bebeb 364 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
ff7bebeb
DW
365 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
366 spin_unlock(&ip->i_flags_lock);
367 rcu_read_unlock();
368
369 trace_xfs_iget_recycle_fail(ip);
370 return error;
371 }
372
373 spin_lock(&pag->pag_ici_lock);
374 spin_lock(&ip->i_flags_lock);
375
376 /*
377 * Clear the per-lifetime state in the inode as we are now effectively
378 * a new inode and need to return to the initial state before reuse
379 * occurs.
380 */
381 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
382 ip->i_flags |= XFS_INEW;
383 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
384 XFS_ICI_RECLAIM_TAG);
385 inode->i_state = I_NEW;
386 spin_unlock(&ip->i_flags_lock);
387 spin_unlock(&pag->pag_ici_lock);
388
389 return 0;
390}
391
afca6c5b
DC
392/*
393 * If we are allocating a new inode, then check what was returned is
394 * actually a free, empty inode. If we are not allocating an inode,
395 * then check we didn't find a free inode.
396 *
397 * Returns:
398 * 0 if the inode free state matches the lookup context
399 * -ENOENT if the inode is free and we are not allocating
400 * -EFSCORRUPTED if there is any state mismatch at all
401 */
402static int
403xfs_iget_check_free_state(
404 struct xfs_inode *ip,
405 int flags)
406{
407 if (flags & XFS_IGET_CREATE) {
408 /* should be a free inode */
409 if (VFS_I(ip)->i_mode != 0) {
410 xfs_warn(ip->i_mount,
411"Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
412 ip->i_ino, VFS_I(ip)->i_mode);
413 return -EFSCORRUPTED;
414 }
415
6e73a545 416 if (ip->i_nblocks != 0) {
afca6c5b
DC
417 xfs_warn(ip->i_mount,
418"Corruption detected! Free inode 0x%llx has blocks allocated!",
419 ip->i_ino);
420 return -EFSCORRUPTED;
421 }
422 return 0;
423 }
424
425 /* should be an allocated inode */
426 if (VFS_I(ip)->i_mode == 0)
427 return -ENOENT;
428
429 return 0;
430}
431
ab23a776
DC
432/* Make all pending inactivation work start immediately. */
433static void
434xfs_inodegc_queue_all(
435 struct xfs_mount *mp)
436{
437 struct xfs_inodegc *gc;
438 int cpu;
439
440 for_each_online_cpu(cpu) {
441 gc = per_cpu_ptr(mp->m_inodegc, cpu);
442 if (!llist_empty(&gc->list))
7cf2b0f9 443 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
ab23a776
DC
444 }
445}
446
33479e05
DC
447/*
448 * Check the validity of the inode we just found it the cache
449 */
450static int
451xfs_iget_cache_hit(
452 struct xfs_perag *pag,
453 struct xfs_inode *ip,
454 xfs_ino_t ino,
455 int flags,
456 int lock_flags) __releases(RCU)
457{
458 struct inode *inode = VFS_I(ip);
459 struct xfs_mount *mp = ip->i_mount;
460 int error;
461
462 /*
463 * check for re-use of an inode within an RCU grace period due to the
464 * radix tree nodes not being updated yet. We monitor for this by
465 * setting the inode number to zero before freeing the inode structure.
466 * If the inode has been reallocated and set up, then the inode number
467 * will not match, so check for that, too.
468 */
469 spin_lock(&ip->i_flags_lock);
77b4d286
DW
470 if (ip->i_ino != ino)
471 goto out_skip;
33479e05
DC
472
473 /*
474 * If we are racing with another cache hit that is currently
475 * instantiating this inode or currently recycling it out of
ff7bebeb 476 * reclaimable state, wait for the initialisation to complete
33479e05
DC
477 * before continuing.
478 *
ab23a776
DC
479 * If we're racing with the inactivation worker we also want to wait.
480 * If we're creating a new file, it's possible that the worker
481 * previously marked the inode as free on disk but hasn't finished
482 * updating the incore state yet. The AGI buffer will be dirty and
483 * locked to the icreate transaction, so a synchronous push of the
484 * inodegc workers would result in deadlock. For a regular iget, the
485 * worker is running already, so we might as well wait.
486 *
33479e05
DC
487 * XXX(hch): eventually we should do something equivalent to
488 * wait_on_inode to wait for these flags to be cleared
489 * instead of polling for it.
490 */
ab23a776 491 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
77b4d286 492 goto out_skip;
33479e05 493
ab23a776
DC
494 if (ip->i_flags & XFS_NEED_INACTIVE) {
495 /* Unlinked inodes cannot be re-grabbed. */
496 if (VFS_I(ip)->i_nlink == 0) {
497 error = -ENOENT;
498 goto out_error;
499 }
500 goto out_inodegc_flush;
501 }
502
33479e05 503 /*
afca6c5b
DC
504 * Check the inode free state is valid. This also detects lookup
505 * racing with unlinks.
33479e05 506 */
afca6c5b
DC
507 error = xfs_iget_check_free_state(ip, flags);
508 if (error)
33479e05 509 goto out_error;
33479e05 510
77b4d286
DW
511 /* Skip inodes that have no vfs state. */
512 if ((flags & XFS_IGET_INCORE) &&
513 (ip->i_flags & XFS_IRECLAIMABLE))
514 goto out_skip;
378f681c 515
77b4d286
DW
516 /* The inode fits the selection criteria; process it. */
517 if (ip->i_flags & XFS_IRECLAIMABLE) {
ff7bebeb
DW
518 /* Drops i_flags_lock and RCU read lock. */
519 error = xfs_iget_recycle(pag, ip);
520 if (error)
521 return error;
33479e05
DC
522 } else {
523 /* If the VFS inode is being torn down, pause and try again. */
77b4d286
DW
524 if (!igrab(inode))
525 goto out_skip;
33479e05
DC
526
527 /* We've got a live one. */
528 spin_unlock(&ip->i_flags_lock);
529 rcu_read_unlock();
530 trace_xfs_iget_hit(ip);
531 }
532
533 if (lock_flags != 0)
534 xfs_ilock(ip, lock_flags);
535
378f681c 536 if (!(flags & XFS_IGET_INCORE))
dae2f8ed 537 xfs_iflags_clear(ip, XFS_ISTALE);
ff6d6af2 538 XFS_STATS_INC(mp, xs_ig_found);
33479e05
DC
539
540 return 0;
541
77b4d286
DW
542out_skip:
543 trace_xfs_iget_skip(ip);
544 XFS_STATS_INC(mp, xs_ig_frecycle);
545 error = -EAGAIN;
33479e05
DC
546out_error:
547 spin_unlock(&ip->i_flags_lock);
548 rcu_read_unlock();
549 return error;
ab23a776
DC
550
551out_inodegc_flush:
552 spin_unlock(&ip->i_flags_lock);
553 rcu_read_unlock();
554 /*
555 * Do not wait for the workers, because the caller could hold an AGI
556 * buffer lock. We're just going to sleep in a loop anyway.
557 */
558 if (xfs_is_inodegc_enabled(mp))
559 xfs_inodegc_queue_all(mp);
560 return -EAGAIN;
33479e05
DC
561}
562
33479e05
DC
563static int
564xfs_iget_cache_miss(
565 struct xfs_mount *mp,
566 struct xfs_perag *pag,
567 xfs_trans_t *tp,
568 xfs_ino_t ino,
569 struct xfs_inode **ipp,
570 int flags,
571 int lock_flags)
572{
573 struct xfs_inode *ip;
574 int error;
575 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
576 int iflags;
577
578 ip = xfs_inode_alloc(mp, ino);
579 if (!ip)
2451337d 580 return -ENOMEM;
33479e05 581
bb8a66af 582 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
33479e05
DC
583 if (error)
584 goto out_destroy;
585
bb8a66af
CH
586 /*
587 * For version 5 superblocks, if we are initialising a new inode and we
0560f31a 588 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
bb8a66af
CH
589 * simply build the new inode core with a random generation number.
590 *
591 * For version 4 (and older) superblocks, log recovery is dependent on
965e0a1a 592 * the i_flushiter field being initialised from the current on-disk
bb8a66af
CH
593 * value and hence we must also read the inode off disk even when
594 * initializing new inodes.
595 */
38c26bfd 596 if (xfs_has_v3inodes(mp) &&
0560f31a 597 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
bb8a66af
CH
598 VFS_I(ip)->i_generation = prandom_u32();
599 } else {
bb8a66af
CH
600 struct xfs_buf *bp;
601
af9dcdde 602 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
bb8a66af
CH
603 if (error)
604 goto out_destroy;
605
af9dcdde
CH
606 error = xfs_inode_from_disk(ip,
607 xfs_buf_offset(bp, ip->i_imap.im_boffset));
bb8a66af
CH
608 if (!error)
609 xfs_buf_set_ref(bp, XFS_INO_REF);
610 xfs_trans_brelse(tp, bp);
611
612 if (error)
613 goto out_destroy;
614 }
615
33479e05
DC
616 trace_xfs_iget_miss(ip);
617
ee457001 618 /*
afca6c5b
DC
619 * Check the inode free state is valid. This also detects lookup
620 * racing with unlinks.
ee457001 621 */
afca6c5b
DC
622 error = xfs_iget_check_free_state(ip, flags);
623 if (error)
33479e05 624 goto out_destroy;
33479e05
DC
625
626 /*
627 * Preload the radix tree so we can insert safely under the
628 * write spinlock. Note that we cannot sleep inside the preload
629 * region. Since we can be called from transaction context, don't
630 * recurse into the file system.
631 */
632 if (radix_tree_preload(GFP_NOFS)) {
2451337d 633 error = -EAGAIN;
33479e05
DC
634 goto out_destroy;
635 }
636
637 /*
638 * Because the inode hasn't been added to the radix-tree yet it can't
639 * be found by another thread, so we can do the non-sleeping lock here.
640 */
641 if (lock_flags) {
642 if (!xfs_ilock_nowait(ip, lock_flags))
643 BUG();
644 }
645
646 /*
647 * These values must be set before inserting the inode into the radix
648 * tree as the moment it is inserted a concurrent lookup (allowed by the
649 * RCU locking mechanism) can find it and that lookup must see that this
650 * is an inode currently under construction (i.e. that XFS_INEW is set).
651 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
652 * memory barrier that ensures this detection works correctly at lookup
653 * time.
654 */
655 iflags = XFS_INEW;
656 if (flags & XFS_IGET_DONTCACHE)
2c567af4 657 d_mark_dontcache(VFS_I(ip));
113a5683
CS
658 ip->i_udquot = NULL;
659 ip->i_gdquot = NULL;
92f8ff73 660 ip->i_pdquot = NULL;
33479e05
DC
661 xfs_iflags_set(ip, iflags);
662
663 /* insert the new inode */
664 spin_lock(&pag->pag_ici_lock);
665 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
666 if (unlikely(error)) {
667 WARN_ON(error != -EEXIST);
ff6d6af2 668 XFS_STATS_INC(mp, xs_ig_dup);
2451337d 669 error = -EAGAIN;
33479e05
DC
670 goto out_preload_end;
671 }
672 spin_unlock(&pag->pag_ici_lock);
673 radix_tree_preload_end();
674
675 *ipp = ip;
676 return 0;
677
678out_preload_end:
679 spin_unlock(&pag->pag_ici_lock);
680 radix_tree_preload_end();
681 if (lock_flags)
682 xfs_iunlock(ip, lock_flags);
683out_destroy:
684 __destroy_inode(VFS_I(ip));
685 xfs_inode_free(ip);
686 return error;
687}
688
689/*
02511a5a
DC
690 * Look up an inode by number in the given file system. The inode is looked up
691 * in the cache held in each AG. If the inode is found in the cache, initialise
692 * the vfs inode if necessary.
33479e05 693 *
02511a5a
DC
694 * If it is not in core, read it in from the file system's device, add it to the
695 * cache and initialise the vfs inode.
33479e05
DC
696 *
697 * The inode is locked according to the value of the lock_flags parameter.
02511a5a
DC
698 * Inode lookup is only done during metadata operations and not as part of the
699 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
33479e05
DC
700 */
701int
702xfs_iget(
02511a5a
DC
703 struct xfs_mount *mp,
704 struct xfs_trans *tp,
705 xfs_ino_t ino,
706 uint flags,
707 uint lock_flags,
708 struct xfs_inode **ipp)
33479e05 709{
02511a5a
DC
710 struct xfs_inode *ip;
711 struct xfs_perag *pag;
712 xfs_agino_t agino;
713 int error;
33479e05 714
33479e05
DC
715 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
716
717 /* reject inode numbers outside existing AGs */
718 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
2451337d 719 return -EINVAL;
33479e05 720
ff6d6af2 721 XFS_STATS_INC(mp, xs_ig_attempts);
8774cf8b 722
33479e05
DC
723 /* get the perag structure and ensure that it's inode capable */
724 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
725 agino = XFS_INO_TO_AGINO(mp, ino);
726
727again:
728 error = 0;
729 rcu_read_lock();
730 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
731
732 if (ip) {
733 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
734 if (error)
735 goto out_error_or_again;
736 } else {
737 rcu_read_unlock();
378f681c 738 if (flags & XFS_IGET_INCORE) {
ed438b47 739 error = -ENODATA;
378f681c
DW
740 goto out_error_or_again;
741 }
ff6d6af2 742 XFS_STATS_INC(mp, xs_ig_missed);
33479e05
DC
743
744 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
745 flags, lock_flags);
746 if (error)
747 goto out_error_or_again;
748 }
749 xfs_perag_put(pag);
750
751 *ipp = ip;
752
753 /*
58c90473 754 * If we have a real type for an on-disk inode, we can setup the inode
132c460e
YX
755 * now. If it's a new inode being created, xfs_init_new_inode will
756 * handle it.
33479e05 757 */
c19b3b05 758 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
58c90473 759 xfs_setup_existing_inode(ip);
33479e05
DC
760 return 0;
761
762out_error_or_again:
378f681c 763 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
33479e05
DC
764 delay(1);
765 goto again;
766 }
767 xfs_perag_put(pag);
768 return error;
769}
770
378f681c
DW
771/*
772 * "Is this a cached inode that's also allocated?"
773 *
774 * Look up an inode by number in the given file system. If the inode is
775 * in cache and isn't in purgatory, return 1 if the inode is allocated
776 * and 0 if it is not. For all other cases (not in cache, being torn
777 * down, etc.), return a negative error code.
778 *
779 * The caller has to prevent inode allocation and freeing activity,
780 * presumably by locking the AGI buffer. This is to ensure that an
781 * inode cannot transition from allocated to freed until the caller is
782 * ready to allow that. If the inode is in an intermediate state (new,
783 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
784 * inode is not in the cache, -ENOENT will be returned. The caller must
785 * deal with these scenarios appropriately.
786 *
787 * This is a specialized use case for the online scrubber; if you're
788 * reading this, you probably want xfs_iget.
789 */
790int
791xfs_icache_inode_is_allocated(
792 struct xfs_mount *mp,
793 struct xfs_trans *tp,
794 xfs_ino_t ino,
795 bool *inuse)
796{
797 struct xfs_inode *ip;
798 int error;
799
800 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
801 if (error)
802 return error;
803
804 *inuse = !!(VFS_I(ip)->i_mode);
44a8736b 805 xfs_irele(ip);
378f681c
DW
806 return 0;
807}
808
e3a20c0b
DC
809/*
810 * Grab the inode for reclaim exclusively.
50718b8d
DC
811 *
812 * We have found this inode via a lookup under RCU, so the inode may have
813 * already been freed, or it may be in the process of being recycled by
814 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
815 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
816 * will not be set. Hence we need to check for both these flag conditions to
817 * avoid inodes that are no longer reclaim candidates.
818 *
819 * Note: checking for other state flags here, under the i_flags_lock or not, is
820 * racy and should be avoided. Those races should be resolved only after we have
821 * ensured that we are able to reclaim this inode and the world can see that we
822 * are going to reclaim it.
823 *
824 * Return true if we grabbed it, false otherwise.
e3a20c0b 825 */
50718b8d 826static bool
f1bc5c56 827xfs_reclaim_igrab(
9492750a 828 struct xfs_inode *ip,
b26b2bf1 829 struct xfs_icwalk *icw)
e3a20c0b 830{
1a3e8f3d
DC
831 ASSERT(rcu_read_lock_held());
832
e3a20c0b 833 spin_lock(&ip->i_flags_lock);
1a3e8f3d
DC
834 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
835 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
836 /* not a reclaim candidate. */
e3a20c0b 837 spin_unlock(&ip->i_flags_lock);
50718b8d 838 return false;
e3a20c0b 839 }
9492750a
DW
840
841 /* Don't reclaim a sick inode unless the caller asked for it. */
842 if (ip->i_sick &&
b26b2bf1 843 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
9492750a
DW
844 spin_unlock(&ip->i_flags_lock);
845 return false;
846 }
847
e3a20c0b
DC
848 __xfs_iflags_set(ip, XFS_IRECLAIM);
849 spin_unlock(&ip->i_flags_lock);
50718b8d 850 return true;
e3a20c0b
DC
851}
852
777df5af 853/*
02511a5a
DC
854 * Inode reclaim is non-blocking, so the default action if progress cannot be
855 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
856 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
857 * blocking anymore and hence we can wait for the inode to be able to reclaim
858 * it.
777df5af 859 *
02511a5a
DC
860 * We do no IO here - if callers require inodes to be cleaned they must push the
861 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
862 * done in the background in a non-blocking manner, and enables memory reclaim
863 * to make progress without blocking.
777df5af 864 */
4d0bab3a 865static void
c8e20be0 866xfs_reclaim_inode(
75f3cb13 867 struct xfs_inode *ip,
50718b8d 868 struct xfs_perag *pag)
fce08f2f 869{
8a17d7dd 870 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
777df5af 871
9552e14d 872 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
617825fe 873 goto out;
718ecc50 874 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
9552e14d 875 goto out_iunlock;
7a3be02b 876
01728b44
DC
877 /*
878 * Check for log shutdown because aborting the inode can move the log
879 * tail and corrupt in memory state. This is fine if the log is shut
880 * down, but if the log is still active and only the mount is shut down
881 * then the in-memory log tail movement caused by the abort can be
882 * incorrectly propagated to disk.
883 */
884 if (xlog_is_shutdown(ip->i_mount->m_log)) {
777df5af 885 xfs_iunpin_wait(ip);
d2d7c047 886 xfs_iflush_shutdown_abort(ip);
777df5af
DC
887 goto reclaim;
888 }
617825fe 889 if (xfs_ipincount(ip))
718ecc50 890 goto out_clear_flush;
617825fe 891 if (!xfs_inode_clean(ip))
718ecc50 892 goto out_clear_flush;
8a48088f 893
718ecc50 894 xfs_iflags_clear(ip, XFS_IFLUSHING);
777df5af 895reclaim:
ab23a776 896 trace_xfs_inode_reclaiming(ip);
98efe8af 897
8a17d7dd
DC
898 /*
899 * Because we use RCU freeing we need to ensure the inode always appears
900 * to be reclaimed with an invalid inode number when in the free state.
98efe8af 901 * We do this as early as possible under the ILOCK so that
f2e9ad21
OS
902 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
903 * detect races with us here. By doing this, we guarantee that once
904 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
905 * it will see either a valid inode that will serialise correctly, or it
906 * will see an invalid inode that it can skip.
8a17d7dd
DC
907 */
908 spin_lock(&ip->i_flags_lock);
909 ip->i_flags = XFS_IRECLAIM;
910 ip->i_ino = 0;
255794c7
DW
911 ip->i_sick = 0;
912 ip->i_checked = 0;
8a17d7dd
DC
913 spin_unlock(&ip->i_flags_lock);
914
c8e20be0 915 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2f11feab 916
ff6d6af2 917 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
2f11feab
DC
918 /*
919 * Remove the inode from the per-AG radix tree.
920 *
921 * Because radix_tree_delete won't complain even if the item was never
922 * added to the tree assert that it's been there before to catch
923 * problems with the inode life time early on.
924 */
1a427ab0 925 spin_lock(&pag->pag_ici_lock);
2f11feab 926 if (!radix_tree_delete(&pag->pag_ici_root,
8a17d7dd 927 XFS_INO_TO_AGINO(ip->i_mount, ino)))
2f11feab 928 ASSERT(0);
c076ae7a 929 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
1a427ab0 930 spin_unlock(&pag->pag_ici_lock);
2f11feab
DC
931
932 /*
933 * Here we do an (almost) spurious inode lock in order to coordinate
934 * with inode cache radix tree lookups. This is because the lookup
935 * can reference the inodes in the cache without taking references.
936 *
937 * We make that OK here by ensuring that we wait until the inode is
ad637a10 938 * unlocked after the lookup before we go ahead and free it.
2f11feab 939 */
ad637a10 940 xfs_ilock(ip, XFS_ILOCK_EXCL);
3ea06d73 941 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
ad637a10 942 xfs_iunlock(ip, XFS_ILOCK_EXCL);
96355d5a 943 ASSERT(xfs_inode_clean(ip));
2f11feab 944
8a17d7dd 945 __xfs_inode_free(ip);
4d0bab3a 946 return;
8a48088f 947
718ecc50
DC
948out_clear_flush:
949 xfs_iflags_clear(ip, XFS_IFLUSHING);
9552e14d 950out_iunlock:
8a48088f 951 xfs_iunlock(ip, XFS_ILOCK_EXCL);
9552e14d 952out:
617825fe 953 xfs_iflags_clear(ip, XFS_IRECLAIM);
7a3be02b
DC
954}
955
9492750a
DW
956/* Reclaim sick inodes if we're unmounting or the fs went down. */
957static inline bool
958xfs_want_reclaim_sick(
959 struct xfs_mount *mp)
960{
2e973b2c 961 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
75c8c50f 962 xfs_is_shutdown(mp);
9492750a
DW
963}
964
4d0bab3a 965void
7a3be02b 966xfs_reclaim_inodes(
4d0bab3a 967 struct xfs_mount *mp)
7a3be02b 968{
b26b2bf1
DW
969 struct xfs_icwalk icw = {
970 .icw_flags = 0,
9492750a
DW
971 };
972
973 if (xfs_want_reclaim_sick(mp))
b26b2bf1 974 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9492750a 975
4d0bab3a 976 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
617825fe 977 xfs_ail_push_all_sync(mp->m_ail);
b26b2bf1 978 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
0f4ec0f1 979 }
9bf729c0
DC
980}
981
982/*
02511a5a
DC
983 * The shrinker infrastructure determines how many inodes we should scan for
984 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
985 * push the AIL here. We also want to proactively free up memory if we can to
986 * minimise the amount of work memory reclaim has to do so we kick the
987 * background reclaim if it isn't already scheduled.
9bf729c0 988 */
0a234c6d 989long
8daaa831
DC
990xfs_reclaim_inodes_nr(
991 struct xfs_mount *mp,
10be350b 992 unsigned long nr_to_scan)
9bf729c0 993{
b26b2bf1
DW
994 struct xfs_icwalk icw = {
995 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
10be350b 996 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
f1bc5c56
DW
997 };
998
9492750a 999 if (xfs_want_reclaim_sick(mp))
b26b2bf1 1000 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
9492750a 1001
8daaa831 1002 /* kick background reclaimer and push the AIL */
5889608d 1003 xfs_reclaim_work_queue(mp);
8daaa831 1004 xfs_ail_push_all(mp->m_ail);
a7b339f1 1005
b26b2bf1 1006 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
617825fe 1007 return 0;
8daaa831 1008}
9bf729c0 1009
8daaa831
DC
1010/*
1011 * Return the number of reclaimable inodes in the filesystem for
1012 * the shrinker to determine how much to reclaim.
1013 */
10be350b 1014long
8daaa831
DC
1015xfs_reclaim_inodes_count(
1016 struct xfs_mount *mp)
1017{
1018 struct xfs_perag *pag;
1019 xfs_agnumber_t ag = 0;
10be350b 1020 long reclaimable = 0;
9bf729c0 1021
65d0f205
DC
1022 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1023 ag = pag->pag_agno + 1;
70e60ce7
DC
1024 reclaimable += pag->pag_ici_reclaimable;
1025 xfs_perag_put(pag);
9bf729c0 1026 }
9bf729c0
DC
1027 return reclaimable;
1028}
1029
39b1cfd7 1030STATIC bool
b26b2bf1 1031xfs_icwalk_match_id(
3e3f9f58 1032 struct xfs_inode *ip,
b26b2bf1 1033 struct xfs_icwalk *icw)
3e3f9f58 1034{
b26b2bf1
DW
1035 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1036 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
39b1cfd7 1037 return false;
3e3f9f58 1038
b26b2bf1
DW
1039 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1040 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
39b1cfd7 1041 return false;
1b556048 1042
b26b2bf1
DW
1043 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1044 ip->i_projid != icw->icw_prid)
39b1cfd7 1045 return false;
1b556048 1046
39b1cfd7 1047 return true;
3e3f9f58
BF
1048}
1049
f4526397
BF
1050/*
1051 * A union-based inode filtering algorithm. Process the inode if any of the
1052 * criteria match. This is for global/internal scans only.
1053 */
39b1cfd7 1054STATIC bool
b26b2bf1 1055xfs_icwalk_match_id_union(
f4526397 1056 struct xfs_inode *ip,
b26b2bf1 1057 struct xfs_icwalk *icw)
f4526397 1058{
b26b2bf1
DW
1059 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1060 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
39b1cfd7 1061 return true;
f4526397 1062
b26b2bf1
DW
1063 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1064 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
39b1cfd7 1065 return true;
f4526397 1066
b26b2bf1
DW
1067 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1068 ip->i_projid == icw->icw_prid)
39b1cfd7 1069 return true;
f4526397 1070
39b1cfd7 1071 return false;
f4526397
BF
1072}
1073
a91bf992
DW
1074/*
1075 * Is this inode @ip eligible for eof/cow block reclamation, given some
b26b2bf1 1076 * filtering parameters @icw? The inode is eligible if @icw is null or
a91bf992
DW
1077 * if the predicate functions match.
1078 */
1079static bool
b26b2bf1 1080xfs_icwalk_match(
a91bf992 1081 struct xfs_inode *ip,
b26b2bf1 1082 struct xfs_icwalk *icw)
a91bf992 1083{
39b1cfd7 1084 bool match;
a91bf992 1085
b26b2bf1 1086 if (!icw)
a91bf992
DW
1087 return true;
1088
b26b2bf1
DW
1089 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1090 match = xfs_icwalk_match_id_union(ip, icw);
a91bf992 1091 else
b26b2bf1 1092 match = xfs_icwalk_match_id(ip, icw);
a91bf992
DW
1093 if (!match)
1094 return false;
1095
1096 /* skip the inode if the file size is too small */
b26b2bf1
DW
1097 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1098 XFS_ISIZE(ip) < icw->icw_min_file_size)
a91bf992
DW
1099 return false;
1100
1101 return true;
1102}
1103
4d0bab3a
DC
1104/*
1105 * This is a fast pass over the inode cache to try to get reclaim moving on as
1106 * many inodes as possible in a short period of time. It kicks itself every few
1107 * seconds, as well as being kicked by the inode cache shrinker when memory
02511a5a 1108 * goes low.
4d0bab3a
DC
1109 */
1110void
1111xfs_reclaim_worker(
1112 struct work_struct *work)
1113{
1114 struct xfs_mount *mp = container_of(to_delayed_work(work),
1115 struct xfs_mount, m_reclaim_work);
4d0bab3a 1116
f1bc5c56 1117 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
4d0bab3a
DC
1118 xfs_reclaim_work_queue(mp);
1119}
1120
41176a68
BF
1121STATIC int
1122xfs_inode_free_eofblocks(
1123 struct xfs_inode *ip,
b26b2bf1 1124 struct xfs_icwalk *icw,
0fa4a10a 1125 unsigned int *lockflags)
41176a68 1126{
390600f8 1127 bool wait;
390600f8 1128
b26b2bf1 1129 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
5400da7d 1130
ce2d3bbe
DW
1131 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1132 return 0;
1133
41176a68
BF
1134 /*
1135 * If the mapping is dirty the operation can block and wait for some
1136 * time. Unless we are waiting, skip it.
1137 */
390600f8 1138 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
41176a68
BF
1139 return 0;
1140
b26b2bf1 1141 if (!xfs_icwalk_match(ip, icw))
a91bf992 1142 return 0;
3e3f9f58 1143
a36b9261
BF
1144 /*
1145 * If the caller is waiting, return -EAGAIN to keep the background
1146 * scanner moving and revisit the inode in a subsequent pass.
1147 */
c3155097 1148 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
390600f8
DW
1149 if (wait)
1150 return -EAGAIN;
1151 return 0;
a36b9261 1152 }
0fa4a10a 1153 *lockflags |= XFS_IOLOCK_EXCL;
390600f8 1154
2b156ff8
DW
1155 if (xfs_can_free_eofblocks(ip, false))
1156 return xfs_free_eofblocks(ip);
1157
1158 /* inode could be preallocated or append-only */
1159 trace_xfs_inode_free_eofblocks_invalid(ip);
1160 xfs_inode_clear_eofblocks_tag(ip);
1161 return 0;
41176a68
BF
1162}
1163
83104d44 1164static void
ce2d3bbe
DW
1165xfs_blockgc_set_iflag(
1166 struct xfs_inode *ip,
ce2d3bbe 1167 unsigned long iflag)
27b52867 1168{
ce2d3bbe
DW
1169 struct xfs_mount *mp = ip->i_mount;
1170 struct xfs_perag *pag;
ce2d3bbe
DW
1171
1172 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1173
85a6e764
CH
1174 /*
1175 * Don't bother locking the AG and looking up in the radix trees
1176 * if we already know that we have the tag set.
1177 */
ce2d3bbe 1178 if (ip->i_flags & iflag)
85a6e764
CH
1179 return;
1180 spin_lock(&ip->i_flags_lock);
ce2d3bbe 1181 ip->i_flags |= iflag;
85a6e764
CH
1182 spin_unlock(&ip->i_flags_lock);
1183
27b52867
BF
1184 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1185 spin_lock(&pag->pag_ici_lock);
27b52867 1186
c076ae7a
DW
1187 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1188 XFS_ICI_BLOCKGC_TAG);
27b52867
BF
1189
1190 spin_unlock(&pag->pag_ici_lock);
1191 xfs_perag_put(pag);
1192}
1193
1194void
83104d44 1195xfs_inode_set_eofblocks_tag(
27b52867 1196 xfs_inode_t *ip)
83104d44
DW
1197{
1198 trace_xfs_inode_set_eofblocks_tag(ip);
9669f51d 1199 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1200}
1201
1202static void
ce2d3bbe
DW
1203xfs_blockgc_clear_iflag(
1204 struct xfs_inode *ip,
1205 unsigned long iflag)
27b52867 1206{
ce2d3bbe
DW
1207 struct xfs_mount *mp = ip->i_mount;
1208 struct xfs_perag *pag;
1209 bool clear_tag;
1210
1211 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
27b52867 1212
85a6e764 1213 spin_lock(&ip->i_flags_lock);
ce2d3bbe
DW
1214 ip->i_flags &= ~iflag;
1215 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
85a6e764
CH
1216 spin_unlock(&ip->i_flags_lock);
1217
ce2d3bbe
DW
1218 if (!clear_tag)
1219 return;
1220
27b52867
BF
1221 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1222 spin_lock(&pag->pag_ici_lock);
27b52867 1223
c076ae7a
DW
1224 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1225 XFS_ICI_BLOCKGC_TAG);
27b52867
BF
1226
1227 spin_unlock(&pag->pag_ici_lock);
1228 xfs_perag_put(pag);
1229}
1230
83104d44
DW
1231void
1232xfs_inode_clear_eofblocks_tag(
1233 xfs_inode_t *ip)
1234{
1235 trace_xfs_inode_clear_eofblocks_tag(ip);
ce2d3bbe 1236 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
83104d44
DW
1237}
1238
1239/*
be78ff0e
DW
1240 * Set ourselves up to free CoW blocks from this file. If it's already clean
1241 * then we can bail out quickly, but otherwise we must back off if the file
1242 * is undergoing some kind of write.
83104d44 1243 */
be78ff0e
DW
1244static bool
1245xfs_prep_free_cowblocks(
51d62690 1246 struct xfs_inode *ip)
83104d44 1247{
39937234
BF
1248 /*
1249 * Just clear the tag if we have an empty cow fork or none at all. It's
1250 * possible the inode was fully unshared since it was originally tagged.
1251 */
51d62690 1252 if (!xfs_inode_has_cow_data(ip)) {
83104d44
DW
1253 trace_xfs_inode_free_cowblocks_invalid(ip);
1254 xfs_inode_clear_cowblocks_tag(ip);
be78ff0e 1255 return false;
83104d44
DW
1256 }
1257
1258 /*
1259 * If the mapping is dirty or under writeback we cannot touch the
1260 * CoW fork. Leave it alone if we're in the midst of a directio.
1261 */
a1b7a4de
CH
1262 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1263 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
83104d44
DW
1264 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1265 atomic_read(&VFS_I(ip)->i_dio_count))
be78ff0e
DW
1266 return false;
1267
1268 return true;
1269}
1270
1271/*
1272 * Automatic CoW Reservation Freeing
1273 *
1274 * These functions automatically garbage collect leftover CoW reservations
1275 * that were made on behalf of a cowextsize hint when we start to run out
1276 * of quota or when the reservations sit around for too long. If the file
1277 * has dirty pages or is undergoing writeback, its CoW reservations will
1278 * be retained.
1279 *
1280 * The actual garbage collection piggybacks off the same code that runs
1281 * the speculative EOF preallocation garbage collector.
1282 */
1283STATIC int
1284xfs_inode_free_cowblocks(
1285 struct xfs_inode *ip,
b26b2bf1 1286 struct xfs_icwalk *icw,
0fa4a10a 1287 unsigned int *lockflags)
be78ff0e 1288{
f41a0716 1289 bool wait;
be78ff0e
DW
1290 int ret = 0;
1291
b26b2bf1 1292 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
f41a0716 1293
ce2d3bbe
DW
1294 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1295 return 0;
1296
51d62690 1297 if (!xfs_prep_free_cowblocks(ip))
83104d44
DW
1298 return 0;
1299
b26b2bf1 1300 if (!xfs_icwalk_match(ip, icw))
a91bf992 1301 return 0;
83104d44 1302
f41a0716
DW
1303 /*
1304 * If the caller is waiting, return -EAGAIN to keep the background
1305 * scanner moving and revisit the inode in a subsequent pass.
1306 */
0fa4a10a
DW
1307 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1308 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
f41a0716
DW
1309 if (wait)
1310 return -EAGAIN;
1311 return 0;
1312 }
0fa4a10a
DW
1313 *lockflags |= XFS_IOLOCK_EXCL;
1314
f41a0716
DW
1315 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1316 if (wait)
0fa4a10a
DW
1317 return -EAGAIN;
1318 return 0;
f41a0716 1319 }
0fa4a10a 1320 *lockflags |= XFS_MMAPLOCK_EXCL;
83104d44 1321
be78ff0e
DW
1322 /*
1323 * Check again, nobody else should be able to dirty blocks or change
1324 * the reflink iflag now that we have the first two locks held.
1325 */
51d62690 1326 if (xfs_prep_free_cowblocks(ip))
be78ff0e 1327 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
83104d44
DW
1328 return ret;
1329}
1330
83104d44
DW
1331void
1332xfs_inode_set_cowblocks_tag(
1333 xfs_inode_t *ip)
1334{
7b7381f0 1335 trace_xfs_inode_set_cowblocks_tag(ip);
9669f51d 1336 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
83104d44
DW
1337}
1338
1339void
1340xfs_inode_clear_cowblocks_tag(
1341 xfs_inode_t *ip)
1342{
7b7381f0 1343 trace_xfs_inode_clear_cowblocks_tag(ip);
ce2d3bbe 1344 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
83104d44 1345}
d6b636eb
DW
1346
1347/* Disable post-EOF and CoW block auto-reclamation. */
1348void
c9a6526f 1349xfs_blockgc_stop(
d6b636eb
DW
1350 struct xfs_mount *mp)
1351{
894ecacf
DW
1352 struct xfs_perag *pag;
1353 xfs_agnumber_t agno;
1354
6f649091
DW
1355 if (!xfs_clear_blockgc_enabled(mp))
1356 return;
1357
1358 for_each_perag(mp, agno, pag)
894ecacf 1359 cancel_delayed_work_sync(&pag->pag_blockgc_work);
6f649091 1360 trace_xfs_blockgc_stop(mp, __return_address);
d6b636eb
DW
1361}
1362
1363/* Enable post-EOF and CoW block auto-reclamation. */
1364void
c9a6526f 1365xfs_blockgc_start(
d6b636eb
DW
1366 struct xfs_mount *mp)
1367{
894ecacf
DW
1368 struct xfs_perag *pag;
1369 xfs_agnumber_t agno;
1370
6f649091
DW
1371 if (xfs_set_blockgc_enabled(mp))
1372 return;
1373
1374 trace_xfs_blockgc_start(mp, __return_address);
894ecacf
DW
1375 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1376 xfs_blockgc_queue(pag);
d6b636eb 1377}
3d4feec0 1378
d20d5edc
DW
1379/* Don't try to run block gc on an inode that's in any of these states. */
1380#define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
ab23a776
DC
1381 XFS_NEED_INACTIVE | \
1382 XFS_INACTIVATING | \
d20d5edc
DW
1383 XFS_IRECLAIMABLE | \
1384 XFS_IRECLAIM)
df600197 1385/*
b9baaef4
DW
1386 * Decide if the given @ip is eligible for garbage collection of speculative
1387 * preallocations, and grab it if so. Returns true if it's ready to go or
1388 * false if we should just ignore it.
df600197
DW
1389 */
1390static bool
b9baaef4 1391xfs_blockgc_igrab(
7fdff526 1392 struct xfs_inode *ip)
df600197
DW
1393{
1394 struct inode *inode = VFS_I(ip);
df600197
DW
1395
1396 ASSERT(rcu_read_lock_held());
1397
1398 /* Check for stale RCU freed inode */
1399 spin_lock(&ip->i_flags_lock);
1400 if (!ip->i_ino)
1401 goto out_unlock_noent;
1402
d20d5edc 1403 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
df600197
DW
1404 goto out_unlock_noent;
1405 spin_unlock(&ip->i_flags_lock);
1406
1407 /* nothing to sync during shutdown */
75c8c50f 1408 if (xfs_is_shutdown(ip->i_mount))
df600197
DW
1409 return false;
1410
1411 /* If we can't grab the inode, it must on it's way to reclaim. */
1412 if (!igrab(inode))
1413 return false;
1414
1415 /* inode is valid */
1416 return true;
1417
1418out_unlock_noent:
1419 spin_unlock(&ip->i_flags_lock);
1420 return false;
1421}
1422
41956753
DW
1423/* Scan one incore inode for block preallocations that we can remove. */
1424static int
1425xfs_blockgc_scan_inode(
1426 struct xfs_inode *ip,
b26b2bf1 1427 struct xfs_icwalk *icw)
85c5b270 1428{
0fa4a10a 1429 unsigned int lockflags = 0;
85c5b270
DW
1430 int error;
1431
b26b2bf1 1432 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
85c5b270 1433 if (error)
0fa4a10a 1434 goto unlock;
85c5b270 1435
b26b2bf1 1436 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
0fa4a10a
DW
1437unlock:
1438 if (lockflags)
1439 xfs_iunlock(ip, lockflags);
594ab00b 1440 xfs_irele(ip);
0fa4a10a 1441 return error;
85c5b270
DW
1442}
1443
9669f51d
DW
1444/* Background worker that trims preallocated space. */
1445void
1446xfs_blockgc_worker(
1447 struct work_struct *work)
1448{
894ecacf
DW
1449 struct xfs_perag *pag = container_of(to_delayed_work(work),
1450 struct xfs_perag, pag_blockgc_work);
1451 struct xfs_mount *mp = pag->pag_mount;
9669f51d
DW
1452 int error;
1453
6f649091
DW
1454 trace_xfs_blockgc_worker(mp, __return_address);
1455
f427cf5c 1456 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
9669f51d 1457 if (error)
894ecacf
DW
1458 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1459 pag->pag_agno, error);
894ecacf 1460 xfs_blockgc_queue(pag);
9669f51d
DW
1461}
1462
85c5b270 1463/*
2eb66502
DW
1464 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1465 * and cowblocks.
85c5b270
DW
1466 */
1467int
1468xfs_blockgc_free_space(
1469 struct xfs_mount *mp,
b26b2bf1 1470 struct xfs_icwalk *icw)
85c5b270 1471{
2eb66502
DW
1472 int error;
1473
b26b2bf1 1474 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
85c5b270 1475
2eb66502
DW
1476 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1477 if (error)
1478 return error;
1479
1480 xfs_inodegc_flush(mp);
1481 return 0;
85c5b270
DW
1482}
1483
e8d04c2a
DW
1484/*
1485 * Reclaim all the free space that we can by scheduling the background blockgc
1486 * and inodegc workers immediately and waiting for them all to clear.
1487 */
1488void
1489xfs_blockgc_flush_all(
1490 struct xfs_mount *mp)
1491{
1492 struct xfs_perag *pag;
1493 xfs_agnumber_t agno;
1494
1495 trace_xfs_blockgc_flush_all(mp, __return_address);
1496
1497 /*
1498 * For each blockgc worker, move its queue time up to now. If it
1499 * wasn't queued, it will not be requeued. Then flush whatever's
1500 * left.
1501 */
1502 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1503 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1504 &pag->pag_blockgc_work, 0);
1505
1506 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1507 flush_delayed_work(&pag->pag_blockgc_work);
1508
1509 xfs_inodegc_flush(mp);
1510}
1511
3d4feec0 1512/*
c237dd7c
DW
1513 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1514 * quota caused an allocation failure, so we make a best effort by including
1515 * each quota under low free space conditions (less than 1% free space) in the
1516 * scan.
111068f8
DW
1517 *
1518 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
2d53f66b 1519 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
111068f8 1520 * MMAPLOCK.
3d4feec0 1521 */
111068f8 1522int
c237dd7c
DW
1523xfs_blockgc_free_dquots(
1524 struct xfs_mount *mp,
1525 struct xfs_dquot *udqp,
1526 struct xfs_dquot *gdqp,
1527 struct xfs_dquot *pdqp,
2d53f66b 1528 unsigned int iwalk_flags)
3d4feec0 1529{
b26b2bf1 1530 struct xfs_icwalk icw = {0};
3d4feec0
DW
1531 bool do_work = false;
1532
c237dd7c
DW
1533 if (!udqp && !gdqp && !pdqp)
1534 return 0;
1535
3d4feec0 1536 /*
111068f8
DW
1537 * Run a scan to free blocks using the union filter to cover all
1538 * applicable quotas in a single scan.
3d4feec0 1539 */
b26b2bf1 1540 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
3d4feec0 1541
c237dd7c 1542 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
b26b2bf1
DW
1543 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1544 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
c237dd7c 1545 do_work = true;
3d4feec0
DW
1546 }
1547
c237dd7c 1548 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
b26b2bf1
DW
1549 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1550 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
c237dd7c 1551 do_work = true;
3d4feec0
DW
1552 }
1553
c237dd7c 1554 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
b26b2bf1
DW
1555 icw.icw_prid = pdqp->q_id;
1556 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
c237dd7c 1557 do_work = true;
3d4feec0
DW
1558 }
1559
1560 if (!do_work)
111068f8 1561 return 0;
3d4feec0 1562
b26b2bf1 1563 return xfs_blockgc_free_space(mp, &icw);
c237dd7c
DW
1564}
1565
1566/* Run cow/eofblocks scans on the quotas attached to the inode. */
1567int
1568xfs_blockgc_free_quota(
1569 struct xfs_inode *ip,
2d53f66b 1570 unsigned int iwalk_flags)
c237dd7c
DW
1571{
1572 return xfs_blockgc_free_dquots(ip->i_mount,
1573 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1574 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
2d53f66b 1575 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
3d4feec0 1576}
df600197
DW
1577
1578/* XFS Inode Cache Walking Code */
1579
f1bc5c56
DW
1580/*
1581 * The inode lookup is done in batches to keep the amount of lock traffic and
1582 * radix tree lookups to a minimum. The batch size is a trade off between
1583 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1584 * be too greedy.
1585 */
1586#define XFS_LOOKUP_BATCH 32
1587
1588
b9baaef4
DW
1589/*
1590 * Decide if we want to grab this inode in anticipation of doing work towards
594ab00b 1591 * the goal.
b9baaef4
DW
1592 */
1593static inline bool
1594xfs_icwalk_igrab(
1595 enum xfs_icwalk_goal goal,
9492750a 1596 struct xfs_inode *ip,
b26b2bf1 1597 struct xfs_icwalk *icw)
b9baaef4
DW
1598{
1599 switch (goal) {
b9baaef4 1600 case XFS_ICWALK_BLOCKGC:
7fdff526 1601 return xfs_blockgc_igrab(ip);
f1bc5c56 1602 case XFS_ICWALK_RECLAIM:
b26b2bf1 1603 return xfs_reclaim_igrab(ip, icw);
b9baaef4
DW
1604 default:
1605 return false;
1606 }
1607}
1608
594ab00b
DW
1609/*
1610 * Process an inode. Each processing function must handle any state changes
1611 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1612 */
f427cf5c
DW
1613static inline int
1614xfs_icwalk_process_inode(
1615 enum xfs_icwalk_goal goal,
1616 struct xfs_inode *ip,
f1bc5c56 1617 struct xfs_perag *pag,
b26b2bf1 1618 struct xfs_icwalk *icw)
f427cf5c 1619{
594ab00b 1620 int error = 0;
f427cf5c
DW
1621
1622 switch (goal) {
f427cf5c 1623 case XFS_ICWALK_BLOCKGC:
b26b2bf1 1624 error = xfs_blockgc_scan_inode(ip, icw);
f427cf5c 1625 break;
f1bc5c56
DW
1626 case XFS_ICWALK_RECLAIM:
1627 xfs_reclaim_inode(ip, pag);
1628 break;
f427cf5c 1629 }
f427cf5c
DW
1630 return error;
1631}
1632
df600197 1633/*
f427cf5c
DW
1634 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1635 * process them in some manner.
df600197
DW
1636 */
1637static int
c1115c0c 1638xfs_icwalk_ag(
df600197 1639 struct xfs_perag *pag,
f427cf5c 1640 enum xfs_icwalk_goal goal,
b26b2bf1 1641 struct xfs_icwalk *icw)
df600197
DW
1642{
1643 struct xfs_mount *mp = pag->pag_mount;
1644 uint32_t first_index;
1645 int last_error = 0;
1646 int skipped;
1647 bool done;
1648 int nr_found;
1649
1650restart:
1651 done = false;
1652 skipped = 0;
f1bc5c56
DW
1653 if (goal == XFS_ICWALK_RECLAIM)
1654 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1655 else
1656 first_index = 0;
df600197
DW
1657 nr_found = 0;
1658 do {
1659 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1660 int error = 0;
1661 int i;
1662
1663 rcu_read_lock();
1664
a437b9b4
CH
1665 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1666 (void **) batch, first_index,
1667 XFS_LOOKUP_BATCH, goal);
df600197 1668 if (!nr_found) {
f1bc5c56 1669 done = true;
df600197
DW
1670 rcu_read_unlock();
1671 break;
1672 }
1673
1674 /*
1675 * Grab the inodes before we drop the lock. if we found
1676 * nothing, nr == 0 and the loop will be skipped.
1677 */
1678 for (i = 0; i < nr_found; i++) {
1679 struct xfs_inode *ip = batch[i];
1680
b26b2bf1 1681 if (done || !xfs_icwalk_igrab(goal, ip, icw))
df600197
DW
1682 batch[i] = NULL;
1683
1684 /*
1685 * Update the index for the next lookup. Catch
1686 * overflows into the next AG range which can occur if
1687 * we have inodes in the last block of the AG and we
1688 * are currently pointing to the last inode.
1689 *
1690 * Because we may see inodes that are from the wrong AG
1691 * due to RCU freeing and reallocation, only update the
1692 * index if it lies in this AG. It was a race that lead
1693 * us to see this inode, so another lookup from the
1694 * same index will not find it again.
1695 */
1696 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1697 continue;
1698 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1699 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1700 done = true;
1701 }
1702
1703 /* unlock now we've grabbed the inodes. */
1704 rcu_read_unlock();
1705
1706 for (i = 0; i < nr_found; i++) {
1707 if (!batch[i])
1708 continue;
f1bc5c56 1709 error = xfs_icwalk_process_inode(goal, batch[i], pag,
b26b2bf1 1710 icw);
df600197
DW
1711 if (error == -EAGAIN) {
1712 skipped++;
1713 continue;
1714 }
1715 if (error && last_error != -EFSCORRUPTED)
1716 last_error = error;
1717 }
1718
1719 /* bail out if the filesystem is corrupted. */
1720 if (error == -EFSCORRUPTED)
1721 break;
1722
1723 cond_resched();
1724
b26b2bf1
DW
1725 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1726 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1727 if (icw->icw_scan_limit <= 0)
f1bc5c56
DW
1728 break;
1729 }
df600197
DW
1730 } while (nr_found && !done);
1731
f1bc5c56
DW
1732 if (goal == XFS_ICWALK_RECLAIM) {
1733 if (done)
1734 first_index = 0;
1735 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1736 }
1737
df600197
DW
1738 if (skipped) {
1739 delay(1);
1740 goto restart;
1741 }
1742 return last_error;
1743}
1744
f427cf5c 1745/* Walk all incore inodes to achieve a given goal. */
df600197 1746static int
c1115c0c 1747xfs_icwalk(
df600197 1748 struct xfs_mount *mp,
f427cf5c 1749 enum xfs_icwalk_goal goal,
b26b2bf1 1750 struct xfs_icwalk *icw)
df600197
DW
1751{
1752 struct xfs_perag *pag;
1753 int error = 0;
1754 int last_error = 0;
a437b9b4 1755 xfs_agnumber_t agno;
df600197 1756
a437b9b4 1757 for_each_perag_tag(mp, agno, pag, goal) {
b26b2bf1 1758 error = xfs_icwalk_ag(pag, goal, icw);
df600197
DW
1759 if (error) {
1760 last_error = error;
a437b9b4
CH
1761 if (error == -EFSCORRUPTED) {
1762 xfs_perag_put(pag);
df600197 1763 break;
a437b9b4 1764 }
df600197
DW
1765 }
1766 }
1767 return last_error;
2d53f66b 1768 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
df600197 1769}
c6c2066d
DW
1770
1771#ifdef DEBUG
1772static void
1773xfs_check_delalloc(
1774 struct xfs_inode *ip,
1775 int whichfork)
1776{
1777 struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, whichfork);
1778 struct xfs_bmbt_irec got;
1779 struct xfs_iext_cursor icur;
1780
1781 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1782 return;
1783 do {
1784 if (isnullstartblock(got.br_startblock)) {
1785 xfs_warn(ip->i_mount,
1786 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1787 ip->i_ino,
1788 whichfork == XFS_DATA_FORK ? "data" : "cow",
1789 got.br_startoff, got.br_blockcount);
1790 }
1791 } while (xfs_iext_next_extent(ifp, &icur, &got));
1792}
1793#else
1794#define xfs_check_delalloc(ip, whichfork) do { } while (0)
1795#endif
1796
ab23a776
DC
1797/* Schedule the inode for reclaim. */
1798static void
1799xfs_inodegc_set_reclaimable(
c6c2066d
DW
1800 struct xfs_inode *ip)
1801{
1802 struct xfs_mount *mp = ip->i_mount;
1803 struct xfs_perag *pag;
c6c2066d 1804
75c8c50f 1805 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
c6c2066d
DW
1806 xfs_check_delalloc(ip, XFS_DATA_FORK);
1807 xfs_check_delalloc(ip, XFS_COW_FORK);
1808 ASSERT(0);
1809 }
1810
c6c2066d
DW
1811 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1812 spin_lock(&pag->pag_ici_lock);
1813 spin_lock(&ip->i_flags_lock);
1814
ab23a776
DC
1815 trace_xfs_inode_set_reclaimable(ip);
1816 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1817 ip->i_flags |= XFS_IRECLAIMABLE;
c6c2066d
DW
1818 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1819 XFS_ICI_RECLAIM_TAG);
c6c2066d
DW
1820
1821 spin_unlock(&ip->i_flags_lock);
1822 spin_unlock(&pag->pag_ici_lock);
1823 xfs_perag_put(pag);
1824}
ab23a776
DC
1825
1826/*
1827 * Free all speculative preallocations and possibly even the inode itself.
1828 * This is the last chance to make changes to an otherwise unreferenced file
1829 * before incore reclamation happens.
1830 */
1831static void
1832xfs_inodegc_inactivate(
1833 struct xfs_inode *ip)
1834{
1835 trace_xfs_inode_inactivating(ip);
1836 xfs_inactive(ip);
1837 xfs_inodegc_set_reclaimable(ip);
1838}
1839
1840void
1841xfs_inodegc_worker(
1842 struct work_struct *work)
1843{
7cf2b0f9
DC
1844 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1845 struct xfs_inodegc, work);
ab23a776
DC
1846 struct llist_node *node = llist_del_all(&gc->list);
1847 struct xfs_inode *ip, *n;
1848
1849 WRITE_ONCE(gc->items, 0);
1850
1851 if (!node)
1852 return;
1853
1854 ip = llist_entry(node, struct xfs_inode, i_gclist);
40b1de00 1855 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
ab23a776 1856
40b1de00 1857 WRITE_ONCE(gc->shrinker_hits, 0);
ab23a776
DC
1858 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1859 xfs_iflags_set(ip, XFS_INACTIVATING);
1860 xfs_inodegc_inactivate(ip);
1861 }
1862}
1863
1864/*
5e672cd6
DC
1865 * Expedite all pending inodegc work to run immediately. This does not wait for
1866 * completion of the work.
ab23a776
DC
1867 */
1868void
5e672cd6 1869xfs_inodegc_push(
ab23a776
DC
1870 struct xfs_mount *mp)
1871{
ab23a776
DC
1872 if (!xfs_is_inodegc_enabled(mp))
1873 return;
5e672cd6
DC
1874 trace_xfs_inodegc_push(mp, __return_address);
1875 xfs_inodegc_queue_all(mp);
1876}
ab23a776 1877
5e672cd6
DC
1878/*
1879 * Force all currently queued inode inactivation work to run immediately and
1880 * wait for the work to finish.
1881 */
1882void
1883xfs_inodegc_flush(
1884 struct xfs_mount *mp)
1885{
1886 xfs_inodegc_push(mp);
ab23a776 1887 trace_xfs_inodegc_flush(mp, __return_address);
6191cf3a 1888 flush_workqueue(mp->m_inodegc_wq);
ab23a776
DC
1889}
1890
1891/*
1892 * Flush all the pending work and then disable the inode inactivation background
1893 * workers and wait for them to stop.
1894 */
1895void
1896xfs_inodegc_stop(
1897 struct xfs_mount *mp)
1898{
ab23a776
DC
1899 if (!xfs_clear_inodegc_enabled(mp))
1900 return;
1901
1902 xfs_inodegc_queue_all(mp);
6191cf3a 1903 drain_workqueue(mp->m_inodegc_wq);
ab23a776 1904
ab23a776
DC
1905 trace_xfs_inodegc_stop(mp, __return_address);
1906}
1907
1908/*
1909 * Enable the inode inactivation background workers and schedule deferred inode
1910 * inactivation work if there is any.
1911 */
1912void
1913xfs_inodegc_start(
1914 struct xfs_mount *mp)
1915{
1916 if (xfs_set_inodegc_enabled(mp))
1917 return;
1918
1919 trace_xfs_inodegc_start(mp, __return_address);
1920 xfs_inodegc_queue_all(mp);
1921}
1922
65f03d86
DW
1923#ifdef CONFIG_XFS_RT
1924static inline bool
1925xfs_inodegc_want_queue_rt_file(
1926 struct xfs_inode *ip)
1927{
1928 struct xfs_mount *mp = ip->i_mount;
65f03d86
DW
1929
1930 if (!XFS_IS_REALTIME_INODE(ip))
1931 return false;
1932
2229276c
DW
1933 if (__percpu_counter_compare(&mp->m_frextents,
1934 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1935 XFS_FDBLOCKS_BATCH) < 0)
1936 return true;
1937
1938 return false;
65f03d86
DW
1939}
1940#else
1941# define xfs_inodegc_want_queue_rt_file(ip) (false)
1942#endif /* CONFIG_XFS_RT */
1943
ab23a776
DC
1944/*
1945 * Schedule the inactivation worker when:
1946 *
1947 * - We've accumulated more than one inode cluster buffer's worth of inodes.
7d6f07d2 1948 * - There is less than 5% free space left.
108523b8 1949 * - Any of the quotas for this inode are near an enforcement limit.
ab23a776
DC
1950 */
1951static inline bool
1952xfs_inodegc_want_queue_work(
1953 struct xfs_inode *ip,
1954 unsigned int items)
1955{
1956 struct xfs_mount *mp = ip->i_mount;
1957
1958 if (items > mp->m_ino_geo.inodes_per_cluster)
1959 return true;
1960
7d6f07d2
DW
1961 if (__percpu_counter_compare(&mp->m_fdblocks,
1962 mp->m_low_space[XFS_LOWSP_5_PCNT],
1963 XFS_FDBLOCKS_BATCH) < 0)
1964 return true;
1965
65f03d86
DW
1966 if (xfs_inodegc_want_queue_rt_file(ip))
1967 return true;
1968
108523b8
DW
1969 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1970 return true;
1971
1972 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1973 return true;
1974
1975 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
1976 return true;
1977
ab23a776
DC
1978 return false;
1979}
1980
1981/*
1982 * Upper bound on the number of inodes in each AG that can be queued for
1983 * inactivation at any given time, to avoid monopolizing the workqueue.
1984 */
1985#define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
1986
1987/*
1988 * Make the frontend wait for inactivations when:
1989 *
40b1de00 1990 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
ab23a776
DC
1991 * - The queue depth exceeds the maximum allowable percpu backlog.
1992 *
1993 * Note: If the current thread is running a transaction, we don't ever want to
1994 * wait for other transactions because that could introduce a deadlock.
1995 */
1996static inline bool
1997xfs_inodegc_want_flush_work(
1998 struct xfs_inode *ip,
40b1de00
DW
1999 unsigned int items,
2000 unsigned int shrinker_hits)
ab23a776
DC
2001{
2002 if (current->journal_info)
2003 return false;
2004
40b1de00
DW
2005 if (shrinker_hits > 0)
2006 return true;
2007
ab23a776
DC
2008 if (items > XFS_INODEGC_MAX_BACKLOG)
2009 return true;
2010
2011 return false;
2012}
2013
2014/*
2015 * Queue a background inactivation worker if there are inodes that need to be
2016 * inactivated and higher level xfs code hasn't disabled the background
2017 * workers.
2018 */
2019static void
2020xfs_inodegc_queue(
2021 struct xfs_inode *ip)
2022{
2023 struct xfs_mount *mp = ip->i_mount;
2024 struct xfs_inodegc *gc;
2025 int items;
40b1de00 2026 unsigned int shrinker_hits;
7cf2b0f9 2027 unsigned long queue_delay = 1;
ab23a776
DC
2028
2029 trace_xfs_inode_set_need_inactive(ip);
2030 spin_lock(&ip->i_flags_lock);
2031 ip->i_flags |= XFS_NEED_INACTIVE;
2032 spin_unlock(&ip->i_flags_lock);
2033
2034 gc = get_cpu_ptr(mp->m_inodegc);
2035 llist_add(&ip->i_gclist, &gc->list);
2036 items = READ_ONCE(gc->items);
2037 WRITE_ONCE(gc->items, items + 1);
40b1de00 2038 shrinker_hits = READ_ONCE(gc->shrinker_hits);
ab23a776 2039
7cf2b0f9
DC
2040 /*
2041 * We queue the work while holding the current CPU so that the work
2042 * is scheduled to run on this CPU.
2043 */
2044 if (!xfs_is_inodegc_enabled(mp)) {
2045 put_cpu_ptr(gc);
ab23a776 2046 return;
ab23a776
DC
2047 }
2048
7cf2b0f9
DC
2049 if (xfs_inodegc_want_queue_work(ip, items))
2050 queue_delay = 0;
2051
2052 trace_xfs_inodegc_queue(mp, __return_address);
2053 mod_delayed_work(mp->m_inodegc_wq, &gc->work, queue_delay);
2054 put_cpu_ptr(gc);
2055
40b1de00 2056 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
ab23a776 2057 trace_xfs_inodegc_throttle(mp, __return_address);
7cf2b0f9 2058 flush_delayed_work(&gc->work);
ab23a776
DC
2059 }
2060}
2061
2062/*
2063 * Fold the dead CPU inodegc queue into the current CPUs queue.
2064 */
2065void
2066xfs_inodegc_cpu_dead(
2067 struct xfs_mount *mp,
2068 unsigned int dead_cpu)
2069{
2070 struct xfs_inodegc *dead_gc, *gc;
2071 struct llist_node *first, *last;
2072 unsigned int count = 0;
2073
2074 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
7cf2b0f9 2075 cancel_delayed_work_sync(&dead_gc->work);
ab23a776
DC
2076
2077 if (llist_empty(&dead_gc->list))
2078 return;
2079
2080 first = dead_gc->list.first;
2081 last = first;
2082 while (last->next) {
2083 last = last->next;
2084 count++;
2085 }
2086 dead_gc->list.first = NULL;
2087 dead_gc->items = 0;
2088
2089 /* Add pending work to current CPU */
2090 gc = get_cpu_ptr(mp->m_inodegc);
2091 llist_add_batch(first, last, &gc->list);
2092 count += READ_ONCE(gc->items);
2093 WRITE_ONCE(gc->items, count);
ab23a776
DC
2094
2095 if (xfs_is_inodegc_enabled(mp)) {
2096 trace_xfs_inodegc_queue(mp, __return_address);
7cf2b0f9 2097 mod_delayed_work(mp->m_inodegc_wq, &gc->work, 0);
ab23a776 2098 }
7cf2b0f9 2099 put_cpu_ptr(gc);
ab23a776
DC
2100}
2101
2102/*
2103 * We set the inode flag atomically with the radix tree tag. Once we get tag
2104 * lookups on the radix tree, this inode flag can go away.
2105 *
2106 * We always use background reclaim here because even if the inode is clean, it
2107 * still may be under IO and hence we have wait for IO completion to occur
2108 * before we can reclaim the inode. The background reclaim path handles this
2109 * more efficiently than we can here, so simply let background reclaim tear down
2110 * all inodes.
2111 */
2112void
2113xfs_inode_mark_reclaimable(
2114 struct xfs_inode *ip)
2115{
2116 struct xfs_mount *mp = ip->i_mount;
2117 bool need_inactive;
2118
2119 XFS_STATS_INC(mp, vn_reclaim);
2120
2121 /*
2122 * We should never get here with any of the reclaim flags already set.
2123 */
2124 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2125
2126 need_inactive = xfs_inode_needs_inactive(ip);
2127 if (need_inactive) {
2128 xfs_inodegc_queue(ip);
2129 return;
2130 }
2131
2132 /* Going straight to reclaim, so drop the dquots. */
2133 xfs_qm_dqdetach(ip);
2134 xfs_inodegc_set_reclaimable(ip);
2135}
40b1de00
DW
2136
2137/*
2138 * Register a phony shrinker so that we can run background inodegc sooner when
2139 * there's memory pressure. Inactivation does not itself free any memory but
2140 * it does make inodes reclaimable, which eventually frees memory.
2141 *
2142 * The count function, seek value, and batch value are crafted to trigger the
2143 * scan function during the second round of scanning. Hopefully this means
2144 * that we reclaimed enough memory that initiating metadata transactions won't
2145 * make things worse.
2146 */
2147#define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2148#define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2149
2150static unsigned long
2151xfs_inodegc_shrinker_count(
2152 struct shrinker *shrink,
2153 struct shrink_control *sc)
2154{
2155 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2156 m_inodegc_shrinker);
2157 struct xfs_inodegc *gc;
2158 int cpu;
2159
2160 if (!xfs_is_inodegc_enabled(mp))
2161 return 0;
2162
2163 for_each_online_cpu(cpu) {
2164 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2165 if (!llist_empty(&gc->list))
2166 return XFS_INODEGC_SHRINKER_COUNT;
2167 }
2168
2169 return 0;
2170}
2171
2172static unsigned long
2173xfs_inodegc_shrinker_scan(
2174 struct shrinker *shrink,
2175 struct shrink_control *sc)
2176{
2177 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2178 m_inodegc_shrinker);
2179 struct xfs_inodegc *gc;
2180 int cpu;
2181 bool no_items = true;
2182
2183 if (!xfs_is_inodegc_enabled(mp))
2184 return SHRINK_STOP;
2185
2186 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2187
2188 for_each_online_cpu(cpu) {
2189 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2190 if (!llist_empty(&gc->list)) {
2191 unsigned int h = READ_ONCE(gc->shrinker_hits);
2192
2193 WRITE_ONCE(gc->shrinker_hits, h + 1);
7cf2b0f9 2194 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
40b1de00
DW
2195 no_items = false;
2196 }
2197 }
2198
2199 /*
2200 * If there are no inodes to inactivate, we don't want the shrinker
2201 * to think there's deferred work to call us back about.
2202 */
2203 if (no_items)
2204 return LONG_MAX;
2205
2206 return SHRINK_STOP;
2207}
2208
2209/* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2210int
2211xfs_inodegc_register_shrinker(
2212 struct xfs_mount *mp)
2213{
2214 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2215
2216 shrink->count_objects = xfs_inodegc_shrinker_count;
2217 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2218 shrink->seeks = 0;
2219 shrink->flags = SHRINKER_NONSLAB;
2220 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2221
2222 return register_shrinker(shrink);
2223}