Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fe4fa4b8 DC |
2 | /* |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
fe4fa4b8 DC |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
5467b34b | 8 | #include "xfs_shared.h" |
6ca1c906 | 9 | #include "xfs_format.h" |
239880ef DC |
10 | #include "xfs_log_format.h" |
11 | #include "xfs_trans_resv.h" | |
fe4fa4b8 | 12 | #include "xfs_mount.h" |
fe4fa4b8 | 13 | #include "xfs_inode.h" |
239880ef DC |
14 | #include "xfs_trans.h" |
15 | #include "xfs_trans_priv.h" | |
fe4fa4b8 | 16 | #include "xfs_inode_item.h" |
7d095257 | 17 | #include "xfs_quota.h" |
0b1b213f | 18 | #include "xfs_trace.h" |
6d8b79cf | 19 | #include "xfs_icache.h" |
c24b5dfa | 20 | #include "xfs_bmap_util.h" |
dc06f398 BF |
21 | #include "xfs_dquot_item.h" |
22 | #include "xfs_dquot.h" | |
83104d44 | 23 | #include "xfs_reflink.h" |
bb8a66af | 24 | #include "xfs_ialloc.h" |
9bbafc71 | 25 | #include "xfs_ag.h" |
01728b44 | 26 | #include "xfs_log_priv.h" |
baf44fa5 | 27 | #include "xfs_health.h" |
dcf60691 DW |
28 | #include "xfs_da_format.h" |
29 | #include "xfs_dir2.h" | |
30 | #include "xfs_metafile.h" | |
fe4fa4b8 | 31 | |
f0e28280 | 32 | #include <linux/iversion.h> |
a167b17e | 33 | |
c809d7e9 DW |
34 | /* Radix tree tags for incore inode tree. */ |
35 | ||
36 | /* inode is to be reclaimed */ | |
37 | #define XFS_ICI_RECLAIM_TAG 0 | |
38 | /* Inode has speculative preallocations (posteof or cow) to clean. */ | |
39 | #define XFS_ICI_BLOCKGC_TAG 1 | |
40 | ||
41 | /* | |
42 | * The goal for walking incore inodes. These can correspond with incore inode | |
43 | * radix tree tags when convenient. Avoid existing XFS_IWALK namespace. | |
44 | */ | |
45 | enum xfs_icwalk_goal { | |
c809d7e9 DW |
46 | /* Goals directly associated with tagged inodes. */ |
47 | XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG, | |
f1bc5c56 | 48 | XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG, |
c809d7e9 DW |
49 | }; |
50 | ||
7fdff526 | 51 | static int xfs_icwalk(struct xfs_mount *mp, |
b26b2bf1 | 52 | enum xfs_icwalk_goal goal, struct xfs_icwalk *icw); |
7fdff526 | 53 | static int xfs_icwalk_ag(struct xfs_perag *pag, |
b26b2bf1 | 54 | enum xfs_icwalk_goal goal, struct xfs_icwalk *icw); |
df600197 | 55 | |
1ad2cfe0 | 56 | /* |
b26b2bf1 DW |
57 | * Private inode cache walk flags for struct xfs_icwalk. Must not |
58 | * coincide with XFS_ICWALK_FLAGS_VALID. | |
1ad2cfe0 | 59 | */ |
1ad2cfe0 | 60 | |
f1bc5c56 DW |
61 | /* Stop scanning after icw_scan_limit inodes. */ |
62 | #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28) | |
63 | ||
9492750a | 64 | #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27) |
2d53f66b | 65 | #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */ |
9492750a | 66 | |
777eb1fa | 67 | #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \ |
2d53f66b DW |
68 | XFS_ICWALK_FLAG_RECLAIM_SICK | \ |
69 | XFS_ICWALK_FLAG_UNION) | |
1ad2cfe0 | 70 | |
32fa4059 CH |
71 | /* Marks for the perag xarray */ |
72 | #define XFS_PERAG_RECLAIM_MARK XA_MARK_0 | |
73 | #define XFS_PERAG_BLOCKGC_MARK XA_MARK_1 | |
74 | ||
75 | static inline xa_mark_t ici_tag_to_mark(unsigned int tag) | |
76 | { | |
77 | if (tag == XFS_ICI_RECLAIM_TAG) | |
78 | return XFS_PERAG_RECLAIM_MARK; | |
79 | ASSERT(tag == XFS_ICI_BLOCKGC_TAG); | |
80 | return XFS_PERAG_BLOCKGC_MARK; | |
81 | } | |
82 | ||
33479e05 DC |
83 | /* |
84 | * Allocate and initialise an xfs_inode. | |
85 | */ | |
638f4416 | 86 | struct xfs_inode * |
33479e05 DC |
87 | xfs_inode_alloc( |
88 | struct xfs_mount *mp, | |
89 | xfs_ino_t ino) | |
90 | { | |
91 | struct xfs_inode *ip; | |
92 | ||
93 | /* | |
3050bd0b CM |
94 | * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL |
95 | * and return NULL here on ENOMEM. | |
33479e05 | 96 | */ |
fd60b288 | 97 | ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL); |
3050bd0b | 98 | |
33479e05 | 99 | if (inode_init_always(mp->m_super, VFS_I(ip))) { |
182696fb | 100 | kmem_cache_free(xfs_inode_cache, ip); |
33479e05 DC |
101 | return NULL; |
102 | } | |
103 | ||
e9dae2fb | 104 | /* VFS doesn't initialise i_mode! */ |
c19b3b05 | 105 | VFS_I(ip)->i_mode = 0; |
7df7c204 PR |
106 | mapping_set_folio_min_order(VFS_I(ip)->i_mapping, |
107 | M_IGEO(mp)->min_folio_order); | |
c19b3b05 | 108 | |
ff6d6af2 | 109 | XFS_STATS_INC(mp, vn_active); |
33479e05 | 110 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
33479e05 DC |
111 | ASSERT(ip->i_ino == 0); |
112 | ||
33479e05 DC |
113 | /* initialise the xfs inode */ |
114 | ip->i_ino = ino; | |
115 | ip->i_mount = mp; | |
116 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | |
3993baeb | 117 | ip->i_cowfp = NULL; |
2ed5b09b DW |
118 | memset(&ip->i_af, 0, sizeof(ip->i_af)); |
119 | ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS; | |
3ba738df | 120 | memset(&ip->i_df, 0, sizeof(ip->i_df)); |
33479e05 DC |
121 | ip->i_flags = 0; |
122 | ip->i_delayed_blks = 0; | |
3e09ab8f | 123 | ip->i_diflags2 = mp->m_ino_geo.new_diflags2; |
6e73a545 | 124 | ip->i_nblocks = 0; |
7821ea30 | 125 | ip->i_forkoff = 0; |
6772c1f1 DW |
126 | ip->i_sick = 0; |
127 | ip->i_checked = 0; | |
cb357bf3 DW |
128 | INIT_WORK(&ip->i_ioend_work, xfs_end_io); |
129 | INIT_LIST_HEAD(&ip->i_ioend_list); | |
130 | spin_lock_init(&ip->i_ioend_lock); | |
2fd26cc0 | 131 | ip->i_next_unlinked = NULLAGINO; |
f12b9668 | 132 | ip->i_prev_unlinked = 0; |
33479e05 DC |
133 | |
134 | return ip; | |
135 | } | |
136 | ||
137 | STATIC void | |
138 | xfs_inode_free_callback( | |
139 | struct rcu_head *head) | |
140 | { | |
141 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
142 | struct xfs_inode *ip = XFS_I(inode); | |
143 | ||
c19b3b05 | 144 | switch (VFS_I(ip)->i_mode & S_IFMT) { |
33479e05 DC |
145 | case S_IFREG: |
146 | case S_IFDIR: | |
147 | case S_IFLNK: | |
ef838512 | 148 | xfs_idestroy_fork(&ip->i_df); |
33479e05 DC |
149 | break; |
150 | } | |
151 | ||
e45d7cb2 DW |
152 | xfs_ifork_zap_attr(ip); |
153 | ||
ef838512 CH |
154 | if (ip->i_cowfp) { |
155 | xfs_idestroy_fork(ip->i_cowfp); | |
182696fb | 156 | kmem_cache_free(xfs_ifork_cache, ip->i_cowfp); |
ef838512 | 157 | } |
33479e05 | 158 | if (ip->i_itemp) { |
22525c17 DC |
159 | ASSERT(!test_bit(XFS_LI_IN_AIL, |
160 | &ip->i_itemp->ili_item.li_flags)); | |
33479e05 DC |
161 | xfs_inode_item_destroy(ip); |
162 | ip->i_itemp = NULL; | |
163 | } | |
164 | ||
182696fb | 165 | kmem_cache_free(xfs_inode_cache, ip); |
1f2dcfe8 DC |
166 | } |
167 | ||
8a17d7dd DC |
168 | static void |
169 | __xfs_inode_free( | |
170 | struct xfs_inode *ip) | |
171 | { | |
172 | /* asserts to verify all state is correct here */ | |
173 | ASSERT(atomic_read(&ip->i_pincount) == 0); | |
48d55e2a | 174 | ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); |
8a17d7dd DC |
175 | XFS_STATS_DEC(ip->i_mount, vn_active); |
176 | ||
177 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | |
178 | } | |
179 | ||
1f2dcfe8 DC |
180 | void |
181 | xfs_inode_free( | |
182 | struct xfs_inode *ip) | |
183 | { | |
718ecc50 | 184 | ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING)); |
98efe8af | 185 | |
33479e05 DC |
186 | /* |
187 | * Because we use RCU freeing we need to ensure the inode always | |
188 | * appears to be reclaimed with an invalid inode number when in the | |
189 | * free state. The ip->i_flags_lock provides the barrier against lookup | |
190 | * races. | |
191 | */ | |
192 | spin_lock(&ip->i_flags_lock); | |
193 | ip->i_flags = XFS_IRECLAIM; | |
194 | ip->i_ino = 0; | |
195 | spin_unlock(&ip->i_flags_lock); | |
196 | ||
8a17d7dd | 197 | __xfs_inode_free(ip); |
33479e05 DC |
198 | } |
199 | ||
ad438c40 | 200 | /* |
02511a5a DC |
201 | * Queue background inode reclaim work if there are reclaimable inodes and there |
202 | * isn't reclaim work already scheduled or in progress. | |
ad438c40 DC |
203 | */ |
204 | static void | |
205 | xfs_reclaim_work_queue( | |
206 | struct xfs_mount *mp) | |
207 | { | |
208 | ||
209 | rcu_read_lock(); | |
e9c4d8bf | 210 | if (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) { |
ad438c40 DC |
211 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, |
212 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | |
213 | } | |
214 | rcu_read_unlock(); | |
215 | } | |
216 | ||
c076ae7a DW |
217 | /* |
218 | * Background scanning to trim preallocated space. This is queued based on the | |
219 | * 'speculative_prealloc_lifetime' tunable (5m by default). | |
220 | */ | |
221 | static inline void | |
222 | xfs_blockgc_queue( | |
ad438c40 | 223 | struct xfs_perag *pag) |
c076ae7a | 224 | { |
e9c4d8bf | 225 | struct xfs_mount *mp = pag_mount(pag); |
6f649091 DW |
226 | |
227 | if (!xfs_is_blockgc_enabled(mp)) | |
228 | return; | |
229 | ||
c076ae7a DW |
230 | rcu_read_lock(); |
231 | if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) | |
e9c4d8bf | 232 | queue_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, |
a877cd2a | 233 | secs_to_jiffies(xfs_blockgc_secs)); |
c076ae7a DW |
234 | rcu_read_unlock(); |
235 | } | |
236 | ||
237 | /* Set a tag on both the AG incore inode tree and the AG radix tree. */ | |
238 | static void | |
239 | xfs_perag_set_inode_tag( | |
240 | struct xfs_perag *pag, | |
241 | xfs_agino_t agino, | |
242 | unsigned int tag) | |
ad438c40 | 243 | { |
c076ae7a | 244 | bool was_tagged; |
ad438c40 | 245 | |
95989c46 | 246 | lockdep_assert_held(&pag->pag_ici_lock); |
c076ae7a DW |
247 | |
248 | was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag); | |
249 | radix_tree_tag_set(&pag->pag_ici_root, agino, tag); | |
250 | ||
251 | if (tag == XFS_ICI_RECLAIM_TAG) | |
252 | pag->pag_ici_reclaimable++; | |
253 | ||
254 | if (was_tagged) | |
ad438c40 DC |
255 | return; |
256 | ||
e9c4d8bf CH |
257 | /* propagate the tag up into the pag xarray tree */ |
258 | xfs_group_set_mark(pag_group(pag), ici_tag_to_mark(tag)); | |
ad438c40 | 259 | |
c076ae7a DW |
260 | /* start background work */ |
261 | switch (tag) { | |
262 | case XFS_ICI_RECLAIM_TAG: | |
e9c4d8bf | 263 | xfs_reclaim_work_queue(pag_mount(pag)); |
c076ae7a DW |
264 | break; |
265 | case XFS_ICI_BLOCKGC_TAG: | |
266 | xfs_blockgc_queue(pag); | |
267 | break; | |
268 | } | |
ad438c40 | 269 | |
368e2d09 | 270 | trace_xfs_perag_set_inode_tag(pag, _RET_IP_); |
ad438c40 DC |
271 | } |
272 | ||
c076ae7a | 273 | /* Clear a tag on both the AG incore inode tree and the AG radix tree. */ |
ad438c40 | 274 | static void |
c076ae7a DW |
275 | xfs_perag_clear_inode_tag( |
276 | struct xfs_perag *pag, | |
277 | xfs_agino_t agino, | |
278 | unsigned int tag) | |
ad438c40 | 279 | { |
95989c46 | 280 | lockdep_assert_held(&pag->pag_ici_lock); |
c076ae7a DW |
281 | |
282 | /* | |
283 | * Reclaim can signal (with a null agino) that it cleared its own tag | |
284 | * by removing the inode from the radix tree. | |
285 | */ | |
286 | if (agino != NULLAGINO) | |
287 | radix_tree_tag_clear(&pag->pag_ici_root, agino, tag); | |
288 | else | |
289 | ASSERT(tag == XFS_ICI_RECLAIM_TAG); | |
290 | ||
291 | if (tag == XFS_ICI_RECLAIM_TAG) | |
292 | pag->pag_ici_reclaimable--; | |
293 | ||
294 | if (radix_tree_tagged(&pag->pag_ici_root, tag)) | |
ad438c40 DC |
295 | return; |
296 | ||
e9c4d8bf CH |
297 | /* clear the tag from the pag xarray */ |
298 | xfs_group_clear_mark(pag_group(pag), ici_tag_to_mark(tag)); | |
368e2d09 | 299 | trace_xfs_perag_clear_inode_tag(pag, _RET_IP_); |
c076ae7a | 300 | } |
ad438c40 | 301 | |
f48f0a8e | 302 | /* |
f9ffd095 | 303 | * Find the next AG after @pag, or the first AG if @pag is NULL. |
f48f0a8e CH |
304 | */ |
305 | static struct xfs_perag * | |
f9ffd095 | 306 | xfs_perag_grab_next_tag( |
f48f0a8e | 307 | struct xfs_mount *mp, |
f9ffd095 | 308 | struct xfs_perag *pag, |
f48f0a8e CH |
309 | int tag) |
310 | { | |
e9c4d8bf CH |
311 | return to_perag(xfs_group_grab_next_mark(mp, |
312 | pag ? pag_group(pag) : NULL, | |
313 | ici_tag_to_mark(tag), XG_TYPE_AG)); | |
f48f0a8e CH |
314 | } |
315 | ||
50997470 DC |
316 | /* |
317 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode | |
318 | * part of the structure. This is made more complex by the fact we store | |
319 | * information about the on-disk values in the VFS inode and so we can't just | |
83e06f21 | 320 | * overwrite the values unconditionally. Hence we save the parameters we |
50997470 | 321 | * need to retain across reinitialisation, and rewrite them into the VFS inode |
83e06f21 | 322 | * after reinitialisation even if it fails. |
50997470 DC |
323 | */ |
324 | static int | |
325 | xfs_reinit_inode( | |
326 | struct xfs_mount *mp, | |
327 | struct inode *inode) | |
328 | { | |
ff7bebeb DW |
329 | int error; |
330 | uint32_t nlink = inode->i_nlink; | |
331 | uint32_t generation = inode->i_generation; | |
332 | uint64_t version = inode_peek_iversion(inode); | |
333 | umode_t mode = inode->i_mode; | |
334 | dev_t dev = inode->i_rdev; | |
335 | kuid_t uid = inode->i_uid; | |
336 | kgid_t gid = inode->i_gid; | |
ddd4cd48 | 337 | unsigned long state = inode->i_state; |
50997470 DC |
338 | |
339 | error = inode_init_always(mp->m_super, inode); | |
340 | ||
54d7b5c1 | 341 | set_nlink(inode, nlink); |
9e9a2674 | 342 | inode->i_generation = generation; |
f0e28280 | 343 | inode_set_iversion_queried(inode, version); |
c19b3b05 | 344 | inode->i_mode = mode; |
acd1d715 | 345 | inode->i_rdev = dev; |
3d8f2821 CH |
346 | inode->i_uid = uid; |
347 | inode->i_gid = gid; | |
ddd4cd48 | 348 | inode->i_state = state; |
7df7c204 PR |
349 | mapping_set_folio_min_order(inode->i_mapping, |
350 | M_IGEO(mp)->min_folio_order); | |
50997470 DC |
351 | return error; |
352 | } | |
353 | ||
ff7bebeb DW |
354 | /* |
355 | * Carefully nudge an inode whose VFS state has been torn down back into a | |
356 | * usable state. Drops the i_flags_lock and the rcu read lock. | |
357 | */ | |
358 | static int | |
359 | xfs_iget_recycle( | |
360 | struct xfs_perag *pag, | |
361 | struct xfs_inode *ip) __releases(&ip->i_flags_lock) | |
362 | { | |
363 | struct xfs_mount *mp = ip->i_mount; | |
364 | struct inode *inode = VFS_I(ip); | |
365 | int error; | |
366 | ||
367 | trace_xfs_iget_recycle(ip); | |
368 | ||
28b4b059 LL |
369 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) |
370 | return -EAGAIN; | |
371 | ||
ff7bebeb DW |
372 | /* |
373 | * We need to make it look like the inode is being reclaimed to prevent | |
374 | * the actual reclaim workers from stomping over us while we recycle | |
375 | * the inode. We can't clear the radix tree tag yet as it requires | |
376 | * pag_ici_lock to be held exclusive. | |
377 | */ | |
378 | ip->i_flags |= XFS_IRECLAIM; | |
379 | ||
380 | spin_unlock(&ip->i_flags_lock); | |
381 | rcu_read_unlock(); | |
382 | ||
383 | ASSERT(!rwsem_is_locked(&inode->i_rwsem)); | |
384 | error = xfs_reinit_inode(mp, inode); | |
28b4b059 | 385 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
ff7bebeb | 386 | if (error) { |
ff7bebeb DW |
387 | /* |
388 | * Re-initializing the inode failed, and we are in deep | |
389 | * trouble. Try to re-add it to the reclaim list. | |
390 | */ | |
391 | rcu_read_lock(); | |
392 | spin_lock(&ip->i_flags_lock); | |
ff7bebeb | 393 | ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); |
ff7bebeb DW |
394 | ASSERT(ip->i_flags & XFS_IRECLAIMABLE); |
395 | spin_unlock(&ip->i_flags_lock); | |
396 | rcu_read_unlock(); | |
397 | ||
398 | trace_xfs_iget_recycle_fail(ip); | |
399 | return error; | |
400 | } | |
401 | ||
402 | spin_lock(&pag->pag_ici_lock); | |
403 | spin_lock(&ip->i_flags_lock); | |
404 | ||
405 | /* | |
406 | * Clear the per-lifetime state in the inode as we are now effectively | |
407 | * a new inode and need to return to the initial state before reuse | |
408 | * occurs. | |
409 | */ | |
410 | ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | |
411 | ip->i_flags |= XFS_INEW; | |
412 | xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), | |
413 | XFS_ICI_RECLAIM_TAG); | |
414 | inode->i_state = I_NEW; | |
415 | spin_unlock(&ip->i_flags_lock); | |
416 | spin_unlock(&pag->pag_ici_lock); | |
417 | ||
418 | return 0; | |
419 | } | |
420 | ||
afca6c5b DC |
421 | /* |
422 | * If we are allocating a new inode, then check what was returned is | |
423 | * actually a free, empty inode. If we are not allocating an inode, | |
424 | * then check we didn't find a free inode. | |
425 | * | |
426 | * Returns: | |
427 | * 0 if the inode free state matches the lookup context | |
428 | * -ENOENT if the inode is free and we are not allocating | |
429 | * -EFSCORRUPTED if there is any state mismatch at all | |
430 | */ | |
431 | static int | |
432 | xfs_iget_check_free_state( | |
433 | struct xfs_inode *ip, | |
434 | int flags) | |
435 | { | |
436 | if (flags & XFS_IGET_CREATE) { | |
437 | /* should be a free inode */ | |
438 | if (VFS_I(ip)->i_mode != 0) { | |
439 | xfs_warn(ip->i_mount, | |
440 | "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", | |
441 | ip->i_ino, VFS_I(ip)->i_mode); | |
baf44fa5 DW |
442 | xfs_agno_mark_sick(ip->i_mount, |
443 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
444 | XFS_SICK_AG_INOBT); | |
afca6c5b DC |
445 | return -EFSCORRUPTED; |
446 | } | |
447 | ||
6e73a545 | 448 | if (ip->i_nblocks != 0) { |
afca6c5b DC |
449 | xfs_warn(ip->i_mount, |
450 | "Corruption detected! Free inode 0x%llx has blocks allocated!", | |
451 | ip->i_ino); | |
baf44fa5 DW |
452 | xfs_agno_mark_sick(ip->i_mount, |
453 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
454 | XFS_SICK_AG_INOBT); | |
afca6c5b DC |
455 | return -EFSCORRUPTED; |
456 | } | |
457 | return 0; | |
458 | } | |
459 | ||
460 | /* should be an allocated inode */ | |
461 | if (VFS_I(ip)->i_mode == 0) | |
462 | return -ENOENT; | |
463 | ||
464 | return 0; | |
465 | } | |
466 | ||
ab23a776 | 467 | /* Make all pending inactivation work start immediately. */ |
2254a739 | 468 | static bool |
ab23a776 DC |
469 | xfs_inodegc_queue_all( |
470 | struct xfs_mount *mp) | |
471 | { | |
472 | struct xfs_inodegc *gc; | |
473 | int cpu; | |
2254a739 | 474 | bool ret = false; |
ab23a776 | 475 | |
62334fab | 476 | for_each_cpu(cpu, &mp->m_inodegc_cpumask) { |
ab23a776 | 477 | gc = per_cpu_ptr(mp->m_inodegc, cpu); |
2254a739 | 478 | if (!llist_empty(&gc->list)) { |
7cf2b0f9 | 479 | mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); |
2254a739 DW |
480 | ret = true; |
481 | } | |
ab23a776 | 482 | } |
2254a739 DW |
483 | |
484 | return ret; | |
ab23a776 DC |
485 | } |
486 | ||
d4d12c02 DC |
487 | /* Wait for all queued work and collect errors */ |
488 | static int | |
489 | xfs_inodegc_wait_all( | |
490 | struct xfs_mount *mp) | |
491 | { | |
492 | int cpu; | |
493 | int error = 0; | |
494 | ||
495 | flush_workqueue(mp->m_inodegc_wq); | |
62334fab | 496 | for_each_cpu(cpu, &mp->m_inodegc_cpumask) { |
d4d12c02 DC |
497 | struct xfs_inodegc *gc; |
498 | ||
499 | gc = per_cpu_ptr(mp->m_inodegc, cpu); | |
500 | if (gc->error && !error) | |
501 | error = gc->error; | |
502 | gc->error = 0; | |
503 | } | |
504 | ||
505 | return error; | |
506 | } | |
507 | ||
33479e05 DC |
508 | /* |
509 | * Check the validity of the inode we just found it the cache | |
510 | */ | |
511 | static int | |
512 | xfs_iget_cache_hit( | |
513 | struct xfs_perag *pag, | |
514 | struct xfs_inode *ip, | |
515 | xfs_ino_t ino, | |
516 | int flags, | |
517 | int lock_flags) __releases(RCU) | |
518 | { | |
519 | struct inode *inode = VFS_I(ip); | |
520 | struct xfs_mount *mp = ip->i_mount; | |
521 | int error; | |
522 | ||
523 | /* | |
524 | * check for re-use of an inode within an RCU grace period due to the | |
525 | * radix tree nodes not being updated yet. We monitor for this by | |
526 | * setting the inode number to zero before freeing the inode structure. | |
527 | * If the inode has been reallocated and set up, then the inode number | |
528 | * will not match, so check for that, too. | |
529 | */ | |
530 | spin_lock(&ip->i_flags_lock); | |
77b4d286 DW |
531 | if (ip->i_ino != ino) |
532 | goto out_skip; | |
33479e05 DC |
533 | |
534 | /* | |
535 | * If we are racing with another cache hit that is currently | |
536 | * instantiating this inode or currently recycling it out of | |
ff7bebeb | 537 | * reclaimable state, wait for the initialisation to complete |
33479e05 DC |
538 | * before continuing. |
539 | * | |
ab23a776 DC |
540 | * If we're racing with the inactivation worker we also want to wait. |
541 | * If we're creating a new file, it's possible that the worker | |
542 | * previously marked the inode as free on disk but hasn't finished | |
543 | * updating the incore state yet. The AGI buffer will be dirty and | |
544 | * locked to the icreate transaction, so a synchronous push of the | |
545 | * inodegc workers would result in deadlock. For a regular iget, the | |
546 | * worker is running already, so we might as well wait. | |
547 | * | |
33479e05 DC |
548 | * XXX(hch): eventually we should do something equivalent to |
549 | * wait_on_inode to wait for these flags to be cleared | |
550 | * instead of polling for it. | |
551 | */ | |
ab23a776 | 552 | if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING)) |
77b4d286 | 553 | goto out_skip; |
33479e05 | 554 | |
ab23a776 DC |
555 | if (ip->i_flags & XFS_NEED_INACTIVE) { |
556 | /* Unlinked inodes cannot be re-grabbed. */ | |
557 | if (VFS_I(ip)->i_nlink == 0) { | |
558 | error = -ENOENT; | |
559 | goto out_error; | |
560 | } | |
561 | goto out_inodegc_flush; | |
562 | } | |
563 | ||
33479e05 | 564 | /* |
afca6c5b DC |
565 | * Check the inode free state is valid. This also detects lookup |
566 | * racing with unlinks. | |
33479e05 | 567 | */ |
afca6c5b DC |
568 | error = xfs_iget_check_free_state(ip, flags); |
569 | if (error) | |
33479e05 | 570 | goto out_error; |
33479e05 | 571 | |
77b4d286 DW |
572 | /* Skip inodes that have no vfs state. */ |
573 | if ((flags & XFS_IGET_INCORE) && | |
574 | (ip->i_flags & XFS_IRECLAIMABLE)) | |
575 | goto out_skip; | |
378f681c | 576 | |
77b4d286 DW |
577 | /* The inode fits the selection criteria; process it. */ |
578 | if (ip->i_flags & XFS_IRECLAIMABLE) { | |
ff7bebeb DW |
579 | /* Drops i_flags_lock and RCU read lock. */ |
580 | error = xfs_iget_recycle(pag, ip); | |
28b4b059 LL |
581 | if (error == -EAGAIN) |
582 | goto out_skip; | |
ff7bebeb DW |
583 | if (error) |
584 | return error; | |
33479e05 DC |
585 | } else { |
586 | /* If the VFS inode is being torn down, pause and try again. */ | |
77b4d286 DW |
587 | if (!igrab(inode)) |
588 | goto out_skip; | |
33479e05 DC |
589 | |
590 | /* We've got a live one. */ | |
591 | spin_unlock(&ip->i_flags_lock); | |
592 | rcu_read_unlock(); | |
593 | trace_xfs_iget_hit(ip); | |
594 | } | |
595 | ||
596 | if (lock_flags != 0) | |
597 | xfs_ilock(ip, lock_flags); | |
598 | ||
378f681c | 599 | if (!(flags & XFS_IGET_INCORE)) |
dae2f8ed | 600 | xfs_iflags_clear(ip, XFS_ISTALE); |
ff6d6af2 | 601 | XFS_STATS_INC(mp, xs_ig_found); |
33479e05 DC |
602 | |
603 | return 0; | |
604 | ||
77b4d286 DW |
605 | out_skip: |
606 | trace_xfs_iget_skip(ip); | |
607 | XFS_STATS_INC(mp, xs_ig_frecycle); | |
608 | error = -EAGAIN; | |
33479e05 DC |
609 | out_error: |
610 | spin_unlock(&ip->i_flags_lock); | |
611 | rcu_read_unlock(); | |
612 | return error; | |
ab23a776 DC |
613 | |
614 | out_inodegc_flush: | |
615 | spin_unlock(&ip->i_flags_lock); | |
616 | rcu_read_unlock(); | |
617 | /* | |
618 | * Do not wait for the workers, because the caller could hold an AGI | |
619 | * buffer lock. We're just going to sleep in a loop anyway. | |
620 | */ | |
621 | if (xfs_is_inodegc_enabled(mp)) | |
622 | xfs_inodegc_queue_all(mp); | |
623 | return -EAGAIN; | |
33479e05 DC |
624 | } |
625 | ||
33479e05 DC |
626 | static int |
627 | xfs_iget_cache_miss( | |
628 | struct xfs_mount *mp, | |
629 | struct xfs_perag *pag, | |
630 | xfs_trans_t *tp, | |
631 | xfs_ino_t ino, | |
632 | struct xfs_inode **ipp, | |
633 | int flags, | |
634 | int lock_flags) | |
635 | { | |
636 | struct xfs_inode *ip; | |
637 | int error; | |
638 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); | |
33479e05 DC |
639 | |
640 | ip = xfs_inode_alloc(mp, ino); | |
641 | if (!ip) | |
2451337d | 642 | return -ENOMEM; |
33479e05 | 643 | |
498f0adb | 644 | error = xfs_imap(pag, tp, ip->i_ino, &ip->i_imap, flags); |
33479e05 DC |
645 | if (error) |
646 | goto out_destroy; | |
647 | ||
bb8a66af CH |
648 | /* |
649 | * For version 5 superblocks, if we are initialising a new inode and we | |
0560f31a | 650 | * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can |
bb8a66af CH |
651 | * simply build the new inode core with a random generation number. |
652 | * | |
653 | * For version 4 (and older) superblocks, log recovery is dependent on | |
965e0a1a | 654 | * the i_flushiter field being initialised from the current on-disk |
bb8a66af CH |
655 | * value and hence we must also read the inode off disk even when |
656 | * initializing new inodes. | |
657 | */ | |
38c26bfd | 658 | if (xfs_has_v3inodes(mp) && |
0560f31a | 659 | (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) { |
a251c17a | 660 | VFS_I(ip)->i_generation = get_random_u32(); |
bb8a66af | 661 | } else { |
bb8a66af CH |
662 | struct xfs_buf *bp; |
663 | ||
af9dcdde | 664 | error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); |
bb8a66af CH |
665 | if (error) |
666 | goto out_destroy; | |
667 | ||
af9dcdde CH |
668 | error = xfs_inode_from_disk(ip, |
669 | xfs_buf_offset(bp, ip->i_imap.im_boffset)); | |
bb8a66af CH |
670 | if (!error) |
671 | xfs_buf_set_ref(bp, XFS_INO_REF); | |
baf44fa5 DW |
672 | else |
673 | xfs_inode_mark_sick(ip, XFS_SICK_INO_CORE); | |
bb8a66af CH |
674 | xfs_trans_brelse(tp, bp); |
675 | ||
676 | if (error) | |
677 | goto out_destroy; | |
678 | } | |
679 | ||
33479e05 DC |
680 | trace_xfs_iget_miss(ip); |
681 | ||
ee457001 | 682 | /* |
afca6c5b DC |
683 | * Check the inode free state is valid. This also detects lookup |
684 | * racing with unlinks. | |
ee457001 | 685 | */ |
afca6c5b DC |
686 | error = xfs_iget_check_free_state(ip, flags); |
687 | if (error) | |
33479e05 | 688 | goto out_destroy; |
33479e05 DC |
689 | |
690 | /* | |
691 | * Preload the radix tree so we can insert safely under the | |
692 | * write spinlock. Note that we cannot sleep inside the preload | |
94a69db2 | 693 | * region. |
33479e05 | 694 | */ |
94a69db2 | 695 | if (radix_tree_preload(GFP_KERNEL | __GFP_NOLOCKDEP)) { |
2451337d | 696 | error = -EAGAIN; |
33479e05 DC |
697 | goto out_destroy; |
698 | } | |
699 | ||
700 | /* | |
701 | * Because the inode hasn't been added to the radix-tree yet it can't | |
702 | * be found by another thread, so we can do the non-sleeping lock here. | |
703 | */ | |
704 | if (lock_flags) { | |
705 | if (!xfs_ilock_nowait(ip, lock_flags)) | |
706 | BUG(); | |
707 | } | |
708 | ||
709 | /* | |
710 | * These values must be set before inserting the inode into the radix | |
711 | * tree as the moment it is inserted a concurrent lookup (allowed by the | |
712 | * RCU locking mechanism) can find it and that lookup must see that this | |
713 | * is an inode currently under construction (i.e. that XFS_INEW is set). | |
714 | * The ip->i_flags_lock that protects the XFS_INEW flag forms the | |
715 | * memory barrier that ensures this detection works correctly at lookup | |
716 | * time. | |
717 | */ | |
33479e05 | 718 | if (flags & XFS_IGET_DONTCACHE) |
2c567af4 | 719 | d_mark_dontcache(VFS_I(ip)); |
113a5683 CS |
720 | ip->i_udquot = NULL; |
721 | ip->i_gdquot = NULL; | |
92f8ff73 | 722 | ip->i_pdquot = NULL; |
1a3f1afb | 723 | xfs_iflags_set(ip, XFS_INEW); |
33479e05 DC |
724 | |
725 | /* insert the new inode */ | |
726 | spin_lock(&pag->pag_ici_lock); | |
727 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | |
728 | if (unlikely(error)) { | |
729 | WARN_ON(error != -EEXIST); | |
ff6d6af2 | 730 | XFS_STATS_INC(mp, xs_ig_dup); |
2451337d | 731 | error = -EAGAIN; |
33479e05 DC |
732 | goto out_preload_end; |
733 | } | |
734 | spin_unlock(&pag->pag_ici_lock); | |
735 | radix_tree_preload_end(); | |
736 | ||
737 | *ipp = ip; | |
738 | return 0; | |
739 | ||
740 | out_preload_end: | |
741 | spin_unlock(&pag->pag_ici_lock); | |
742 | radix_tree_preload_end(); | |
743 | if (lock_flags) | |
744 | xfs_iunlock(ip, lock_flags); | |
745 | out_destroy: | |
746 | __destroy_inode(VFS_I(ip)); | |
747 | xfs_inode_free(ip); | |
748 | return error; | |
749 | } | |
750 | ||
751 | /* | |
02511a5a DC |
752 | * Look up an inode by number in the given file system. The inode is looked up |
753 | * in the cache held in each AG. If the inode is found in the cache, initialise | |
754 | * the vfs inode if necessary. | |
33479e05 | 755 | * |
02511a5a DC |
756 | * If it is not in core, read it in from the file system's device, add it to the |
757 | * cache and initialise the vfs inode. | |
33479e05 DC |
758 | * |
759 | * The inode is locked according to the value of the lock_flags parameter. | |
02511a5a DC |
760 | * Inode lookup is only done during metadata operations and not as part of the |
761 | * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup. | |
33479e05 DC |
762 | */ |
763 | int | |
764 | xfs_iget( | |
02511a5a DC |
765 | struct xfs_mount *mp, |
766 | struct xfs_trans *tp, | |
767 | xfs_ino_t ino, | |
768 | uint flags, | |
769 | uint lock_flags, | |
770 | struct xfs_inode **ipp) | |
33479e05 | 771 | { |
02511a5a DC |
772 | struct xfs_inode *ip; |
773 | struct xfs_perag *pag; | |
774 | xfs_agino_t agino; | |
775 | int error; | |
33479e05 | 776 | |
33479e05 DC |
777 | ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); |
778 | ||
779 | /* reject inode numbers outside existing AGs */ | |
05aba195 | 780 | if (!xfs_verify_ino(mp, ino)) |
2451337d | 781 | return -EINVAL; |
33479e05 | 782 | |
ff6d6af2 | 783 | XFS_STATS_INC(mp, xs_ig_attempts); |
8774cf8b | 784 | |
33479e05 DC |
785 | /* get the perag structure and ensure that it's inode capable */ |
786 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | |
787 | agino = XFS_INO_TO_AGINO(mp, ino); | |
788 | ||
789 | again: | |
790 | error = 0; | |
791 | rcu_read_lock(); | |
792 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); | |
793 | ||
794 | if (ip) { | |
795 | error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | |
796 | if (error) | |
797 | goto out_error_or_again; | |
798 | } else { | |
799 | rcu_read_unlock(); | |
378f681c | 800 | if (flags & XFS_IGET_INCORE) { |
ed438b47 | 801 | error = -ENODATA; |
378f681c DW |
802 | goto out_error_or_again; |
803 | } | |
ff6d6af2 | 804 | XFS_STATS_INC(mp, xs_ig_missed); |
33479e05 DC |
805 | |
806 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | |
807 | flags, lock_flags); | |
808 | if (error) | |
809 | goto out_error_or_again; | |
810 | } | |
811 | xfs_perag_put(pag); | |
812 | ||
813 | *ipp = ip; | |
814 | ||
815 | /* | |
58c90473 | 816 | * If we have a real type for an on-disk inode, we can setup the inode |
132c460e YX |
817 | * now. If it's a new inode being created, xfs_init_new_inode will |
818 | * handle it. | |
33479e05 | 819 | */ |
c19b3b05 | 820 | if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) |
58c90473 | 821 | xfs_setup_existing_inode(ip); |
33479e05 DC |
822 | return 0; |
823 | ||
824 | out_error_or_again: | |
302436c2 DW |
825 | if (!(flags & (XFS_IGET_INCORE | XFS_IGET_NORETRY)) && |
826 | error == -EAGAIN) { | |
33479e05 DC |
827 | delay(1); |
828 | goto again; | |
829 | } | |
830 | xfs_perag_put(pag); | |
831 | return error; | |
832 | } | |
833 | ||
dcf60691 DW |
834 | /* |
835 | * Get a metadata inode. | |
836 | * | |
7297fd0b DW |
837 | * The metafile type must match the file mode exactly, and for files in the |
838 | * metadata directory tree, it must match the inode's metatype exactly. | |
dcf60691 DW |
839 | */ |
840 | int | |
841 | xfs_trans_metafile_iget( | |
842 | struct xfs_trans *tp, | |
843 | xfs_ino_t ino, | |
844 | enum xfs_metafile_type metafile_type, | |
845 | struct xfs_inode **ipp) | |
846 | { | |
847 | struct xfs_mount *mp = tp->t_mountp; | |
848 | struct xfs_inode *ip; | |
849 | umode_t mode; | |
850 | int error; | |
851 | ||
852 | error = xfs_iget(mp, tp, ino, 0, 0, &ip); | |
5d9b54a4 | 853 | if (error == -EFSCORRUPTED || error == -EINVAL) |
dcf60691 DW |
854 | goto whine; |
855 | if (error) | |
856 | return error; | |
857 | ||
858 | if (VFS_I(ip)->i_nlink == 0) | |
859 | goto bad_rele; | |
860 | ||
861 | if (metafile_type == XFS_METAFILE_DIR) | |
862 | mode = S_IFDIR; | |
863 | else | |
864 | mode = S_IFREG; | |
865 | if (inode_wrong_type(VFS_I(ip), mode)) | |
866 | goto bad_rele; | |
7297fd0b DW |
867 | if (xfs_has_metadir(mp)) { |
868 | if (!xfs_is_metadir_inode(ip)) | |
869 | goto bad_rele; | |
870 | if (metafile_type != ip->i_metatype) | |
871 | goto bad_rele; | |
872 | } | |
dcf60691 DW |
873 | |
874 | *ipp = ip; | |
875 | return 0; | |
876 | bad_rele: | |
877 | xfs_irele(ip); | |
878 | whine: | |
7297fd0b DW |
879 | xfs_err(mp, "metadata inode 0x%llx type %u is corrupt", ino, |
880 | metafile_type); | |
be42fc13 | 881 | xfs_fs_mark_sick(mp, XFS_SICK_FS_METADIR); |
dcf60691 DW |
882 | return -EFSCORRUPTED; |
883 | } | |
884 | ||
885 | /* Grab a metadata file if the caller doesn't already have a transaction. */ | |
886 | int | |
887 | xfs_metafile_iget( | |
888 | struct xfs_mount *mp, | |
889 | xfs_ino_t ino, | |
890 | enum xfs_metafile_type metafile_type, | |
891 | struct xfs_inode **ipp) | |
892 | { | |
893 | struct xfs_trans *tp; | |
894 | int error; | |
895 | ||
896 | error = xfs_trans_alloc_empty(mp, &tp); | |
897 | if (error) | |
898 | return error; | |
899 | ||
900 | error = xfs_trans_metafile_iget(tp, ino, metafile_type, ipp); | |
901 | xfs_trans_cancel(tp); | |
902 | return error; | |
903 | } | |
904 | ||
e3a20c0b DC |
905 | /* |
906 | * Grab the inode for reclaim exclusively. | |
50718b8d DC |
907 | * |
908 | * We have found this inode via a lookup under RCU, so the inode may have | |
909 | * already been freed, or it may be in the process of being recycled by | |
910 | * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode | |
911 | * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE | |
912 | * will not be set. Hence we need to check for both these flag conditions to | |
913 | * avoid inodes that are no longer reclaim candidates. | |
914 | * | |
915 | * Note: checking for other state flags here, under the i_flags_lock or not, is | |
916 | * racy and should be avoided. Those races should be resolved only after we have | |
917 | * ensured that we are able to reclaim this inode and the world can see that we | |
918 | * are going to reclaim it. | |
919 | * | |
920 | * Return true if we grabbed it, false otherwise. | |
e3a20c0b | 921 | */ |
50718b8d | 922 | static bool |
f1bc5c56 | 923 | xfs_reclaim_igrab( |
9492750a | 924 | struct xfs_inode *ip, |
b26b2bf1 | 925 | struct xfs_icwalk *icw) |
e3a20c0b | 926 | { |
1a3e8f3d DC |
927 | ASSERT(rcu_read_lock_held()); |
928 | ||
e3a20c0b | 929 | spin_lock(&ip->i_flags_lock); |
1a3e8f3d DC |
930 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || |
931 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
932 | /* not a reclaim candidate. */ | |
e3a20c0b | 933 | spin_unlock(&ip->i_flags_lock); |
50718b8d | 934 | return false; |
e3a20c0b | 935 | } |
9492750a DW |
936 | |
937 | /* Don't reclaim a sick inode unless the caller asked for it. */ | |
938 | if (ip->i_sick && | |
b26b2bf1 | 939 | (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) { |
9492750a DW |
940 | spin_unlock(&ip->i_flags_lock); |
941 | return false; | |
942 | } | |
943 | ||
e3a20c0b DC |
944 | __xfs_iflags_set(ip, XFS_IRECLAIM); |
945 | spin_unlock(&ip->i_flags_lock); | |
50718b8d | 946 | return true; |
e3a20c0b DC |
947 | } |
948 | ||
777df5af | 949 | /* |
02511a5a DC |
950 | * Inode reclaim is non-blocking, so the default action if progress cannot be |
951 | * made is to "requeue" the inode for reclaim by unlocking it and clearing the | |
952 | * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about | |
953 | * blocking anymore and hence we can wait for the inode to be able to reclaim | |
954 | * it. | |
777df5af | 955 | * |
02511a5a DC |
956 | * We do no IO here - if callers require inodes to be cleaned they must push the |
957 | * AIL first to trigger writeback of dirty inodes. This enables writeback to be | |
958 | * done in the background in a non-blocking manner, and enables memory reclaim | |
959 | * to make progress without blocking. | |
777df5af | 960 | */ |
4d0bab3a | 961 | static void |
c8e20be0 | 962 | xfs_reclaim_inode( |
75f3cb13 | 963 | struct xfs_inode *ip, |
50718b8d | 964 | struct xfs_perag *pag) |
fce08f2f | 965 | { |
8a17d7dd | 966 | xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ |
777df5af | 967 | |
9552e14d | 968 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) |
617825fe | 969 | goto out; |
718ecc50 | 970 | if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING)) |
9552e14d | 971 | goto out_iunlock; |
7a3be02b | 972 | |
01728b44 DC |
973 | /* |
974 | * Check for log shutdown because aborting the inode can move the log | |
975 | * tail and corrupt in memory state. This is fine if the log is shut | |
976 | * down, but if the log is still active and only the mount is shut down | |
977 | * then the in-memory log tail movement caused by the abort can be | |
978 | * incorrectly propagated to disk. | |
979 | */ | |
980 | if (xlog_is_shutdown(ip->i_mount->m_log)) { | |
777df5af | 981 | xfs_iunpin_wait(ip); |
09234a63 DC |
982 | /* |
983 | * Avoid a ABBA deadlock on the inode cluster buffer vs | |
984 | * concurrent xfs_ifree_cluster() trying to mark the inode | |
985 | * stale. We don't need the inode locked to run the flush abort | |
986 | * code, but the flush abort needs to lock the cluster buffer. | |
987 | */ | |
988 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
d2d7c047 | 989 | xfs_iflush_shutdown_abort(ip); |
09234a63 | 990 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
777df5af DC |
991 | goto reclaim; |
992 | } | |
617825fe | 993 | if (xfs_ipincount(ip)) |
718ecc50 | 994 | goto out_clear_flush; |
617825fe | 995 | if (!xfs_inode_clean(ip)) |
718ecc50 | 996 | goto out_clear_flush; |
8a48088f | 997 | |
718ecc50 | 998 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
777df5af | 999 | reclaim: |
ab23a776 | 1000 | trace_xfs_inode_reclaiming(ip); |
98efe8af | 1001 | |
8a17d7dd DC |
1002 | /* |
1003 | * Because we use RCU freeing we need to ensure the inode always appears | |
1004 | * to be reclaimed with an invalid inode number when in the free state. | |
98efe8af | 1005 | * We do this as early as possible under the ILOCK so that |
f2e9ad21 OS |
1006 | * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to |
1007 | * detect races with us here. By doing this, we guarantee that once | |
1008 | * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that | |
1009 | * it will see either a valid inode that will serialise correctly, or it | |
1010 | * will see an invalid inode that it can skip. | |
8a17d7dd DC |
1011 | */ |
1012 | spin_lock(&ip->i_flags_lock); | |
1013 | ip->i_flags = XFS_IRECLAIM; | |
1014 | ip->i_ino = 0; | |
255794c7 DW |
1015 | ip->i_sick = 0; |
1016 | ip->i_checked = 0; | |
8a17d7dd DC |
1017 | spin_unlock(&ip->i_flags_lock); |
1018 | ||
fad743d7 | 1019 | ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL); |
c8e20be0 | 1020 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1021 | |
ff6d6af2 | 1022 | XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); |
2f11feab DC |
1023 | /* |
1024 | * Remove the inode from the per-AG radix tree. | |
1025 | * | |
1026 | * Because radix_tree_delete won't complain even if the item was never | |
1027 | * added to the tree assert that it's been there before to catch | |
1028 | * problems with the inode life time early on. | |
1029 | */ | |
1a427ab0 | 1030 | spin_lock(&pag->pag_ici_lock); |
2f11feab | 1031 | if (!radix_tree_delete(&pag->pag_ici_root, |
8a17d7dd | 1032 | XFS_INO_TO_AGINO(ip->i_mount, ino))) |
2f11feab | 1033 | ASSERT(0); |
c076ae7a | 1034 | xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG); |
1a427ab0 | 1035 | spin_unlock(&pag->pag_ici_lock); |
2f11feab DC |
1036 | |
1037 | /* | |
1038 | * Here we do an (almost) spurious inode lock in order to coordinate | |
1039 | * with inode cache radix tree lookups. This is because the lookup | |
1040 | * can reference the inodes in the cache without taking references. | |
1041 | * | |
1042 | * We make that OK here by ensuring that we wait until the inode is | |
ad637a10 | 1043 | * unlocked after the lookup before we go ahead and free it. |
2f11feab | 1044 | */ |
ad637a10 | 1045 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
3ea06d73 | 1046 | ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot); |
ad637a10 | 1047 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
96355d5a | 1048 | ASSERT(xfs_inode_clean(ip)); |
2f11feab | 1049 | |
8a17d7dd | 1050 | __xfs_inode_free(ip); |
4d0bab3a | 1051 | return; |
8a48088f | 1052 | |
718ecc50 DC |
1053 | out_clear_flush: |
1054 | xfs_iflags_clear(ip, XFS_IFLUSHING); | |
9552e14d | 1055 | out_iunlock: |
8a48088f | 1056 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
9552e14d | 1057 | out: |
617825fe | 1058 | xfs_iflags_clear(ip, XFS_IRECLAIM); |
7a3be02b DC |
1059 | } |
1060 | ||
9492750a DW |
1061 | /* Reclaim sick inodes if we're unmounting or the fs went down. */ |
1062 | static inline bool | |
1063 | xfs_want_reclaim_sick( | |
1064 | struct xfs_mount *mp) | |
1065 | { | |
2e973b2c | 1066 | return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) || |
75c8c50f | 1067 | xfs_is_shutdown(mp); |
9492750a DW |
1068 | } |
1069 | ||
4d0bab3a | 1070 | void |
7a3be02b | 1071 | xfs_reclaim_inodes( |
4d0bab3a | 1072 | struct xfs_mount *mp) |
7a3be02b | 1073 | { |
b26b2bf1 DW |
1074 | struct xfs_icwalk icw = { |
1075 | .icw_flags = 0, | |
9492750a DW |
1076 | }; |
1077 | ||
1078 | if (xfs_want_reclaim_sick(mp)) | |
b26b2bf1 | 1079 | icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK; |
9492750a | 1080 | |
e9c4d8bf | 1081 | while (xfs_group_marked(mp, XG_TYPE_AG, XFS_PERAG_RECLAIM_MARK)) { |
617825fe | 1082 | xfs_ail_push_all_sync(mp->m_ail); |
b26b2bf1 | 1083 | xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); |
0f4ec0f1 | 1084 | } |
9bf729c0 DC |
1085 | } |
1086 | ||
1087 | /* | |
02511a5a DC |
1088 | * The shrinker infrastructure determines how many inodes we should scan for |
1089 | * reclaim. We want as many clean inodes ready to reclaim as possible, so we | |
1090 | * push the AIL here. We also want to proactively free up memory if we can to | |
1091 | * minimise the amount of work memory reclaim has to do so we kick the | |
1092 | * background reclaim if it isn't already scheduled. | |
9bf729c0 | 1093 | */ |
0a234c6d | 1094 | long |
8daaa831 DC |
1095 | xfs_reclaim_inodes_nr( |
1096 | struct xfs_mount *mp, | |
10be350b | 1097 | unsigned long nr_to_scan) |
9bf729c0 | 1098 | { |
b26b2bf1 DW |
1099 | struct xfs_icwalk icw = { |
1100 | .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT, | |
10be350b | 1101 | .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan), |
f1bc5c56 DW |
1102 | }; |
1103 | ||
9492750a | 1104 | if (xfs_want_reclaim_sick(mp)) |
b26b2bf1 | 1105 | icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK; |
9492750a | 1106 | |
8daaa831 | 1107 | /* kick background reclaimer and push the AIL */ |
5889608d | 1108 | xfs_reclaim_work_queue(mp); |
8daaa831 | 1109 | xfs_ail_push_all(mp->m_ail); |
a7b339f1 | 1110 | |
b26b2bf1 | 1111 | xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw); |
617825fe | 1112 | return 0; |
8daaa831 | 1113 | } |
9bf729c0 | 1114 | |
8daaa831 DC |
1115 | /* |
1116 | * Return the number of reclaimable inodes in the filesystem for | |
1117 | * the shrinker to determine how much to reclaim. | |
1118 | */ | |
10be350b | 1119 | long |
8daaa831 DC |
1120 | xfs_reclaim_inodes_count( |
1121 | struct xfs_mount *mp) | |
1122 | { | |
e9c4d8bf | 1123 | XA_STATE (xas, &mp->m_groups[XG_TYPE_AG].xa, 0); |
10be350b | 1124 | long reclaimable = 0; |
866cf1dd | 1125 | struct xfs_perag *pag; |
9bf729c0 | 1126 | |
866cf1dd CH |
1127 | rcu_read_lock(); |
1128 | xas_for_each_marked(&xas, pag, ULONG_MAX, XFS_PERAG_RECLAIM_MARK) { | |
1129 | trace_xfs_reclaim_inodes_count(pag, _THIS_IP_); | |
70e60ce7 | 1130 | reclaimable += pag->pag_ici_reclaimable; |
866cf1dd CH |
1131 | } |
1132 | rcu_read_unlock(); | |
1133 | ||
9bf729c0 DC |
1134 | return reclaimable; |
1135 | } | |
1136 | ||
39b1cfd7 | 1137 | STATIC bool |
b26b2bf1 | 1138 | xfs_icwalk_match_id( |
3e3f9f58 | 1139 | struct xfs_inode *ip, |
b26b2bf1 | 1140 | struct xfs_icwalk *icw) |
3e3f9f58 | 1141 | { |
b26b2bf1 DW |
1142 | if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && |
1143 | !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) | |
39b1cfd7 | 1144 | return false; |
3e3f9f58 | 1145 | |
b26b2bf1 DW |
1146 | if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && |
1147 | !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) | |
39b1cfd7 | 1148 | return false; |
1b556048 | 1149 | |
b26b2bf1 DW |
1150 | if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && |
1151 | ip->i_projid != icw->icw_prid) | |
39b1cfd7 | 1152 | return false; |
1b556048 | 1153 | |
39b1cfd7 | 1154 | return true; |
3e3f9f58 BF |
1155 | } |
1156 | ||
f4526397 BF |
1157 | /* |
1158 | * A union-based inode filtering algorithm. Process the inode if any of the | |
1159 | * criteria match. This is for global/internal scans only. | |
1160 | */ | |
39b1cfd7 | 1161 | STATIC bool |
b26b2bf1 | 1162 | xfs_icwalk_match_id_union( |
f4526397 | 1163 | struct xfs_inode *ip, |
b26b2bf1 | 1164 | struct xfs_icwalk *icw) |
f4526397 | 1165 | { |
b26b2bf1 DW |
1166 | if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) && |
1167 | uid_eq(VFS_I(ip)->i_uid, icw->icw_uid)) | |
39b1cfd7 | 1168 | return true; |
f4526397 | 1169 | |
b26b2bf1 DW |
1170 | if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) && |
1171 | gid_eq(VFS_I(ip)->i_gid, icw->icw_gid)) | |
39b1cfd7 | 1172 | return true; |
f4526397 | 1173 | |
b26b2bf1 DW |
1174 | if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) && |
1175 | ip->i_projid == icw->icw_prid) | |
39b1cfd7 | 1176 | return true; |
f4526397 | 1177 | |
39b1cfd7 | 1178 | return false; |
f4526397 BF |
1179 | } |
1180 | ||
a91bf992 DW |
1181 | /* |
1182 | * Is this inode @ip eligible for eof/cow block reclamation, given some | |
b26b2bf1 | 1183 | * filtering parameters @icw? The inode is eligible if @icw is null or |
a91bf992 DW |
1184 | * if the predicate functions match. |
1185 | */ | |
1186 | static bool | |
b26b2bf1 | 1187 | xfs_icwalk_match( |
a91bf992 | 1188 | struct xfs_inode *ip, |
b26b2bf1 | 1189 | struct xfs_icwalk *icw) |
a91bf992 | 1190 | { |
39b1cfd7 | 1191 | bool match; |
a91bf992 | 1192 | |
b26b2bf1 | 1193 | if (!icw) |
a91bf992 DW |
1194 | return true; |
1195 | ||
b26b2bf1 DW |
1196 | if (icw->icw_flags & XFS_ICWALK_FLAG_UNION) |
1197 | match = xfs_icwalk_match_id_union(ip, icw); | |
a91bf992 | 1198 | else |
b26b2bf1 | 1199 | match = xfs_icwalk_match_id(ip, icw); |
a91bf992 DW |
1200 | if (!match) |
1201 | return false; | |
1202 | ||
1203 | /* skip the inode if the file size is too small */ | |
b26b2bf1 DW |
1204 | if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) && |
1205 | XFS_ISIZE(ip) < icw->icw_min_file_size) | |
a91bf992 DW |
1206 | return false; |
1207 | ||
1208 | return true; | |
1209 | } | |
1210 | ||
4d0bab3a DC |
1211 | /* |
1212 | * This is a fast pass over the inode cache to try to get reclaim moving on as | |
1213 | * many inodes as possible in a short period of time. It kicks itself every few | |
1214 | * seconds, as well as being kicked by the inode cache shrinker when memory | |
02511a5a | 1215 | * goes low. |
4d0bab3a DC |
1216 | */ |
1217 | void | |
1218 | xfs_reclaim_worker( | |
1219 | struct work_struct *work) | |
1220 | { | |
1221 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
1222 | struct xfs_mount, m_reclaim_work); | |
4d0bab3a | 1223 | |
f1bc5c56 | 1224 | xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); |
4d0bab3a DC |
1225 | xfs_reclaim_work_queue(mp); |
1226 | } | |
1227 | ||
41176a68 BF |
1228 | STATIC int |
1229 | xfs_inode_free_eofblocks( | |
1230 | struct xfs_inode *ip, | |
b26b2bf1 | 1231 | struct xfs_icwalk *icw, |
0fa4a10a | 1232 | unsigned int *lockflags) |
41176a68 | 1233 | { |
390600f8 | 1234 | bool wait; |
390600f8 | 1235 | |
b26b2bf1 | 1236 | wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); |
5400da7d | 1237 | |
ce2d3bbe DW |
1238 | if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS)) |
1239 | return 0; | |
1240 | ||
41176a68 BF |
1241 | /* |
1242 | * If the mapping is dirty the operation can block and wait for some | |
1243 | * time. Unless we are waiting, skip it. | |
1244 | */ | |
390600f8 | 1245 | if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) |
41176a68 BF |
1246 | return 0; |
1247 | ||
b26b2bf1 | 1248 | if (!xfs_icwalk_match(ip, icw)) |
a91bf992 | 1249 | return 0; |
3e3f9f58 | 1250 | |
a36b9261 BF |
1251 | /* |
1252 | * If the caller is waiting, return -EAGAIN to keep the background | |
1253 | * scanner moving and revisit the inode in a subsequent pass. | |
1254 | */ | |
c3155097 | 1255 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
390600f8 DW |
1256 | if (wait) |
1257 | return -EAGAIN; | |
1258 | return 0; | |
a36b9261 | 1259 | } |
0fa4a10a | 1260 | *lockflags |= XFS_IOLOCK_EXCL; |
390600f8 | 1261 | |
610b2916 | 1262 | if (xfs_can_free_eofblocks(ip)) |
2b156ff8 DW |
1263 | return xfs_free_eofblocks(ip); |
1264 | ||
9372dce0 | 1265 | /* inode could be preallocated */ |
2b156ff8 DW |
1266 | trace_xfs_inode_free_eofblocks_invalid(ip); |
1267 | xfs_inode_clear_eofblocks_tag(ip); | |
1268 | return 0; | |
41176a68 BF |
1269 | } |
1270 | ||
83104d44 | 1271 | static void |
ce2d3bbe DW |
1272 | xfs_blockgc_set_iflag( |
1273 | struct xfs_inode *ip, | |
ce2d3bbe | 1274 | unsigned long iflag) |
27b52867 | 1275 | { |
ce2d3bbe DW |
1276 | struct xfs_mount *mp = ip->i_mount; |
1277 | struct xfs_perag *pag; | |
ce2d3bbe DW |
1278 | |
1279 | ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); | |
27b52867 | 1280 | |
85a6e764 CH |
1281 | /* |
1282 | * Don't bother locking the AG and looking up in the radix trees | |
1283 | * if we already know that we have the tag set. | |
1284 | */ | |
ce2d3bbe | 1285 | if (ip->i_flags & iflag) |
85a6e764 CH |
1286 | return; |
1287 | spin_lock(&ip->i_flags_lock); | |
ce2d3bbe | 1288 | ip->i_flags |= iflag; |
85a6e764 CH |
1289 | spin_unlock(&ip->i_flags_lock); |
1290 | ||
27b52867 BF |
1291 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1292 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1293 | |
c076ae7a DW |
1294 | xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
1295 | XFS_ICI_BLOCKGC_TAG); | |
27b52867 BF |
1296 | |
1297 | spin_unlock(&pag->pag_ici_lock); | |
1298 | xfs_perag_put(pag); | |
1299 | } | |
1300 | ||
1301 | void | |
83104d44 | 1302 | xfs_inode_set_eofblocks_tag( |
27b52867 | 1303 | xfs_inode_t *ip) |
83104d44 DW |
1304 | { |
1305 | trace_xfs_inode_set_eofblocks_tag(ip); | |
9669f51d | 1306 | return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS); |
83104d44 DW |
1307 | } |
1308 | ||
1309 | static void | |
ce2d3bbe DW |
1310 | xfs_blockgc_clear_iflag( |
1311 | struct xfs_inode *ip, | |
1312 | unsigned long iflag) | |
27b52867 | 1313 | { |
ce2d3bbe DW |
1314 | struct xfs_mount *mp = ip->i_mount; |
1315 | struct xfs_perag *pag; | |
1316 | bool clear_tag; | |
1317 | ||
1318 | ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); | |
27b52867 | 1319 | |
85a6e764 | 1320 | spin_lock(&ip->i_flags_lock); |
ce2d3bbe DW |
1321 | ip->i_flags &= ~iflag; |
1322 | clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; | |
85a6e764 CH |
1323 | spin_unlock(&ip->i_flags_lock); |
1324 | ||
ce2d3bbe DW |
1325 | if (!clear_tag) |
1326 | return; | |
1327 | ||
27b52867 BF |
1328 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1329 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1330 | |
c076ae7a DW |
1331 | xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
1332 | XFS_ICI_BLOCKGC_TAG); | |
27b52867 BF |
1333 | |
1334 | spin_unlock(&pag->pag_ici_lock); | |
1335 | xfs_perag_put(pag); | |
1336 | } | |
1337 | ||
83104d44 DW |
1338 | void |
1339 | xfs_inode_clear_eofblocks_tag( | |
1340 | xfs_inode_t *ip) | |
1341 | { | |
1342 | trace_xfs_inode_clear_eofblocks_tag(ip); | |
ce2d3bbe | 1343 | return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS); |
83104d44 DW |
1344 | } |
1345 | ||
1346 | /* | |
90a71daa | 1347 | * Prepare to free COW fork blocks from an inode. |
83104d44 | 1348 | */ |
be78ff0e DW |
1349 | static bool |
1350 | xfs_prep_free_cowblocks( | |
90a71daa BF |
1351 | struct xfs_inode *ip, |
1352 | struct xfs_icwalk *icw) | |
83104d44 | 1353 | { |
90a71daa BF |
1354 | bool sync; |
1355 | ||
1356 | sync = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); | |
1357 | ||
39937234 BF |
1358 | /* |
1359 | * Just clear the tag if we have an empty cow fork or none at all. It's | |
1360 | * possible the inode was fully unshared since it was originally tagged. | |
1361 | */ | |
51d62690 | 1362 | if (!xfs_inode_has_cow_data(ip)) { |
83104d44 DW |
1363 | trace_xfs_inode_free_cowblocks_invalid(ip); |
1364 | xfs_inode_clear_cowblocks_tag(ip); | |
be78ff0e | 1365 | return false; |
83104d44 DW |
1366 | } |
1367 | ||
1368 | /* | |
90a71daa BF |
1369 | * A cowblocks trim of an inode can have a significant effect on |
1370 | * fragmentation even when a reasonable COW extent size hint is set. | |
1371 | * Therefore, we prefer to not process cowblocks unless they are clean | |
1372 | * and idle. We can never process a cowblocks inode that is dirty or has | |
1373 | * in-flight I/O under any circumstances, because outstanding writeback | |
1374 | * or dio expects targeted COW fork blocks exist through write | |
1375 | * completion where they can be remapped into the data fork. | |
1376 | * | |
1377 | * Therefore, the heuristic used here is to never process inodes | |
1378 | * currently opened for write from background (i.e. non-sync) scans. For | |
1379 | * sync scans, use the pagecache/dio state of the inode to ensure we | |
1380 | * never free COW fork blocks out from under pending I/O. | |
83104d44 | 1381 | */ |
90a71daa BF |
1382 | if (!sync && inode_is_open_for_write(VFS_I(ip))) |
1383 | return false; | |
4390f019 | 1384 | return xfs_can_free_cowblocks(ip); |
be78ff0e DW |
1385 | } |
1386 | ||
1387 | /* | |
1388 | * Automatic CoW Reservation Freeing | |
1389 | * | |
1390 | * These functions automatically garbage collect leftover CoW reservations | |
1391 | * that were made on behalf of a cowextsize hint when we start to run out | |
1392 | * of quota or when the reservations sit around for too long. If the file | |
1393 | * has dirty pages or is undergoing writeback, its CoW reservations will | |
1394 | * be retained. | |
1395 | * | |
1396 | * The actual garbage collection piggybacks off the same code that runs | |
1397 | * the speculative EOF preallocation garbage collector. | |
1398 | */ | |
1399 | STATIC int | |
1400 | xfs_inode_free_cowblocks( | |
1401 | struct xfs_inode *ip, | |
b26b2bf1 | 1402 | struct xfs_icwalk *icw, |
0fa4a10a | 1403 | unsigned int *lockflags) |
be78ff0e | 1404 | { |
f41a0716 | 1405 | bool wait; |
be78ff0e DW |
1406 | int ret = 0; |
1407 | ||
b26b2bf1 | 1408 | wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC); |
f41a0716 | 1409 | |
ce2d3bbe DW |
1410 | if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS)) |
1411 | return 0; | |
1412 | ||
90a71daa | 1413 | if (!xfs_prep_free_cowblocks(ip, icw)) |
83104d44 DW |
1414 | return 0; |
1415 | ||
b26b2bf1 | 1416 | if (!xfs_icwalk_match(ip, icw)) |
a91bf992 | 1417 | return 0; |
83104d44 | 1418 | |
f41a0716 DW |
1419 | /* |
1420 | * If the caller is waiting, return -EAGAIN to keep the background | |
1421 | * scanner moving and revisit the inode in a subsequent pass. | |
1422 | */ | |
0fa4a10a DW |
1423 | if (!(*lockflags & XFS_IOLOCK_EXCL) && |
1424 | !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { | |
f41a0716 DW |
1425 | if (wait) |
1426 | return -EAGAIN; | |
1427 | return 0; | |
1428 | } | |
0fa4a10a DW |
1429 | *lockflags |= XFS_IOLOCK_EXCL; |
1430 | ||
f41a0716 DW |
1431 | if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) { |
1432 | if (wait) | |
0fa4a10a DW |
1433 | return -EAGAIN; |
1434 | return 0; | |
f41a0716 | 1435 | } |
0fa4a10a | 1436 | *lockflags |= XFS_MMAPLOCK_EXCL; |
83104d44 | 1437 | |
be78ff0e DW |
1438 | /* |
1439 | * Check again, nobody else should be able to dirty blocks or change | |
1440 | * the reflink iflag now that we have the first two locks held. | |
1441 | */ | |
90a71daa | 1442 | if (xfs_prep_free_cowblocks(ip, icw)) |
be78ff0e | 1443 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); |
83104d44 DW |
1444 | return ret; |
1445 | } | |
1446 | ||
83104d44 DW |
1447 | void |
1448 | xfs_inode_set_cowblocks_tag( | |
1449 | xfs_inode_t *ip) | |
1450 | { | |
7b7381f0 | 1451 | trace_xfs_inode_set_cowblocks_tag(ip); |
9669f51d | 1452 | return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS); |
83104d44 DW |
1453 | } |
1454 | ||
1455 | void | |
1456 | xfs_inode_clear_cowblocks_tag( | |
1457 | xfs_inode_t *ip) | |
1458 | { | |
7b7381f0 | 1459 | trace_xfs_inode_clear_cowblocks_tag(ip); |
ce2d3bbe | 1460 | return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS); |
83104d44 | 1461 | } |
d6b636eb DW |
1462 | |
1463 | /* Disable post-EOF and CoW block auto-reclamation. */ | |
1464 | void | |
c9a6526f | 1465 | xfs_blockgc_stop( |
d6b636eb DW |
1466 | struct xfs_mount *mp) |
1467 | { | |
86437e6a | 1468 | struct xfs_perag *pag = NULL; |
894ecacf | 1469 | |
6f649091 DW |
1470 | if (!xfs_clear_blockgc_enabled(mp)) |
1471 | return; | |
1472 | ||
86437e6a | 1473 | while ((pag = xfs_perag_next(mp, pag))) |
894ecacf | 1474 | cancel_delayed_work_sync(&pag->pag_blockgc_work); |
6f649091 | 1475 | trace_xfs_blockgc_stop(mp, __return_address); |
d6b636eb DW |
1476 | } |
1477 | ||
1478 | /* Enable post-EOF and CoW block auto-reclamation. */ | |
1479 | void | |
c9a6526f | 1480 | xfs_blockgc_start( |
d6b636eb DW |
1481 | struct xfs_mount *mp) |
1482 | { | |
f9ffd095 | 1483 | struct xfs_perag *pag = NULL; |
894ecacf | 1484 | |
6f649091 DW |
1485 | if (xfs_set_blockgc_enabled(mp)) |
1486 | return; | |
1487 | ||
1488 | trace_xfs_blockgc_start(mp, __return_address); | |
f9ffd095 | 1489 | while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) |
894ecacf | 1490 | xfs_blockgc_queue(pag); |
d6b636eb | 1491 | } |
3d4feec0 | 1492 | |
d20d5edc DW |
1493 | /* Don't try to run block gc on an inode that's in any of these states. */ |
1494 | #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \ | |
ab23a776 DC |
1495 | XFS_NEED_INACTIVE | \ |
1496 | XFS_INACTIVATING | \ | |
d20d5edc DW |
1497 | XFS_IRECLAIMABLE | \ |
1498 | XFS_IRECLAIM) | |
df600197 | 1499 | /* |
b9baaef4 DW |
1500 | * Decide if the given @ip is eligible for garbage collection of speculative |
1501 | * preallocations, and grab it if so. Returns true if it's ready to go or | |
1502 | * false if we should just ignore it. | |
df600197 DW |
1503 | */ |
1504 | static bool | |
b9baaef4 | 1505 | xfs_blockgc_igrab( |
7fdff526 | 1506 | struct xfs_inode *ip) |
df600197 DW |
1507 | { |
1508 | struct inode *inode = VFS_I(ip); | |
df600197 DW |
1509 | |
1510 | ASSERT(rcu_read_lock_held()); | |
1511 | ||
1512 | /* Check for stale RCU freed inode */ | |
1513 | spin_lock(&ip->i_flags_lock); | |
1514 | if (!ip->i_ino) | |
1515 | goto out_unlock_noent; | |
1516 | ||
d20d5edc | 1517 | if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS) |
df600197 DW |
1518 | goto out_unlock_noent; |
1519 | spin_unlock(&ip->i_flags_lock); | |
1520 | ||
1521 | /* nothing to sync during shutdown */ | |
75c8c50f | 1522 | if (xfs_is_shutdown(ip->i_mount)) |
df600197 DW |
1523 | return false; |
1524 | ||
1525 | /* If we can't grab the inode, it must on it's way to reclaim. */ | |
1526 | if (!igrab(inode)) | |
1527 | return false; | |
1528 | ||
1529 | /* inode is valid */ | |
1530 | return true; | |
1531 | ||
1532 | out_unlock_noent: | |
1533 | spin_unlock(&ip->i_flags_lock); | |
1534 | return false; | |
1535 | } | |
1536 | ||
41956753 DW |
1537 | /* Scan one incore inode for block preallocations that we can remove. */ |
1538 | static int | |
1539 | xfs_blockgc_scan_inode( | |
1540 | struct xfs_inode *ip, | |
b26b2bf1 | 1541 | struct xfs_icwalk *icw) |
85c5b270 | 1542 | { |
0fa4a10a | 1543 | unsigned int lockflags = 0; |
85c5b270 DW |
1544 | int error; |
1545 | ||
b26b2bf1 | 1546 | error = xfs_inode_free_eofblocks(ip, icw, &lockflags); |
85c5b270 | 1547 | if (error) |
0fa4a10a | 1548 | goto unlock; |
85c5b270 | 1549 | |
b26b2bf1 | 1550 | error = xfs_inode_free_cowblocks(ip, icw, &lockflags); |
0fa4a10a DW |
1551 | unlock: |
1552 | if (lockflags) | |
1553 | xfs_iunlock(ip, lockflags); | |
594ab00b | 1554 | xfs_irele(ip); |
0fa4a10a | 1555 | return error; |
85c5b270 DW |
1556 | } |
1557 | ||
9669f51d DW |
1558 | /* Background worker that trims preallocated space. */ |
1559 | void | |
1560 | xfs_blockgc_worker( | |
1561 | struct work_struct *work) | |
1562 | { | |
894ecacf DW |
1563 | struct xfs_perag *pag = container_of(to_delayed_work(work), |
1564 | struct xfs_perag, pag_blockgc_work); | |
e9c4d8bf | 1565 | struct xfs_mount *mp = pag_mount(pag); |
9669f51d DW |
1566 | int error; |
1567 | ||
6f649091 DW |
1568 | trace_xfs_blockgc_worker(mp, __return_address); |
1569 | ||
f427cf5c | 1570 | error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL); |
9669f51d | 1571 | if (error) |
894ecacf | 1572 | xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", |
e9c4d8bf | 1573 | pag_agno(pag), error); |
894ecacf | 1574 | xfs_blockgc_queue(pag); |
9669f51d DW |
1575 | } |
1576 | ||
85c5b270 | 1577 | /* |
2eb66502 DW |
1578 | * Try to free space in the filesystem by purging inactive inodes, eofblocks |
1579 | * and cowblocks. | |
85c5b270 DW |
1580 | */ |
1581 | int | |
1582 | xfs_blockgc_free_space( | |
1583 | struct xfs_mount *mp, | |
b26b2bf1 | 1584 | struct xfs_icwalk *icw) |
85c5b270 | 1585 | { |
2eb66502 DW |
1586 | int error; |
1587 | ||
b26b2bf1 | 1588 | trace_xfs_blockgc_free_space(mp, icw, _RET_IP_); |
85c5b270 | 1589 | |
2eb66502 DW |
1590 | error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw); |
1591 | if (error) | |
1592 | return error; | |
1593 | ||
d4d12c02 | 1594 | return xfs_inodegc_flush(mp); |
85c5b270 DW |
1595 | } |
1596 | ||
e8d04c2a DW |
1597 | /* |
1598 | * Reclaim all the free space that we can by scheduling the background blockgc | |
1599 | * and inodegc workers immediately and waiting for them all to clear. | |
1600 | */ | |
d4d12c02 | 1601 | int |
e8d04c2a DW |
1602 | xfs_blockgc_flush_all( |
1603 | struct xfs_mount *mp) | |
1604 | { | |
f9ffd095 | 1605 | struct xfs_perag *pag = NULL; |
e8d04c2a DW |
1606 | |
1607 | trace_xfs_blockgc_flush_all(mp, __return_address); | |
1608 | ||
1609 | /* | |
f9ffd095 CH |
1610 | * For each blockgc worker, move its queue time up to now. If it wasn't |
1611 | * queued, it will not be requeued. Then flush whatever is left. | |
e8d04c2a | 1612 | */ |
f9ffd095 | 1613 | while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) |
e9c4d8bf | 1614 | mod_delayed_work(mp->m_blockgc_wq, &pag->pag_blockgc_work, 0); |
e8d04c2a | 1615 | |
f9ffd095 | 1616 | while ((pag = xfs_perag_grab_next_tag(mp, pag, XFS_ICI_BLOCKGC_TAG))) |
e8d04c2a DW |
1617 | flush_delayed_work(&pag->pag_blockgc_work); |
1618 | ||
d4d12c02 | 1619 | return xfs_inodegc_flush(mp); |
e8d04c2a DW |
1620 | } |
1621 | ||
3d4feec0 | 1622 | /* |
c237dd7c DW |
1623 | * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which |
1624 | * quota caused an allocation failure, so we make a best effort by including | |
1625 | * each quota under low free space conditions (less than 1% free space) in the | |
1626 | * scan. | |
111068f8 DW |
1627 | * |
1628 | * Callers must not hold any inode's ILOCK. If requesting a synchronous scan | |
2d53f66b | 1629 | * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or |
111068f8 | 1630 | * MMAPLOCK. |
3d4feec0 | 1631 | */ |
111068f8 | 1632 | int |
c237dd7c DW |
1633 | xfs_blockgc_free_dquots( |
1634 | struct xfs_mount *mp, | |
1635 | struct xfs_dquot *udqp, | |
1636 | struct xfs_dquot *gdqp, | |
1637 | struct xfs_dquot *pdqp, | |
2d53f66b | 1638 | unsigned int iwalk_flags) |
3d4feec0 | 1639 | { |
b26b2bf1 | 1640 | struct xfs_icwalk icw = {0}; |
3d4feec0 DW |
1641 | bool do_work = false; |
1642 | ||
c237dd7c DW |
1643 | if (!udqp && !gdqp && !pdqp) |
1644 | return 0; | |
1645 | ||
3d4feec0 | 1646 | /* |
111068f8 DW |
1647 | * Run a scan to free blocks using the union filter to cover all |
1648 | * applicable quotas in a single scan. | |
3d4feec0 | 1649 | */ |
b26b2bf1 | 1650 | icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags; |
3d4feec0 | 1651 | |
c237dd7c | 1652 | if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { |
b26b2bf1 DW |
1653 | icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); |
1654 | icw.icw_flags |= XFS_ICWALK_FLAG_UID; | |
c237dd7c | 1655 | do_work = true; |
3d4feec0 DW |
1656 | } |
1657 | ||
c237dd7c | 1658 | if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { |
b26b2bf1 DW |
1659 | icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); |
1660 | icw.icw_flags |= XFS_ICWALK_FLAG_GID; | |
c237dd7c | 1661 | do_work = true; |
3d4feec0 DW |
1662 | } |
1663 | ||
c237dd7c | 1664 | if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { |
b26b2bf1 DW |
1665 | icw.icw_prid = pdqp->q_id; |
1666 | icw.icw_flags |= XFS_ICWALK_FLAG_PRID; | |
c237dd7c | 1667 | do_work = true; |
3d4feec0 DW |
1668 | } |
1669 | ||
1670 | if (!do_work) | |
111068f8 | 1671 | return 0; |
3d4feec0 | 1672 | |
b26b2bf1 | 1673 | return xfs_blockgc_free_space(mp, &icw); |
c237dd7c DW |
1674 | } |
1675 | ||
1676 | /* Run cow/eofblocks scans on the quotas attached to the inode. */ | |
1677 | int | |
1678 | xfs_blockgc_free_quota( | |
1679 | struct xfs_inode *ip, | |
2d53f66b | 1680 | unsigned int iwalk_flags) |
c237dd7c DW |
1681 | { |
1682 | return xfs_blockgc_free_dquots(ip->i_mount, | |
1683 | xfs_inode_dquot(ip, XFS_DQTYPE_USER), | |
1684 | xfs_inode_dquot(ip, XFS_DQTYPE_GROUP), | |
2d53f66b | 1685 | xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags); |
3d4feec0 | 1686 | } |
df600197 DW |
1687 | |
1688 | /* XFS Inode Cache Walking Code */ | |
1689 | ||
f1bc5c56 DW |
1690 | /* |
1691 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
1692 | * radix tree lookups to a minimum. The batch size is a trade off between | |
1693 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
1694 | * be too greedy. | |
1695 | */ | |
1696 | #define XFS_LOOKUP_BATCH 32 | |
1697 | ||
1698 | ||
b9baaef4 DW |
1699 | /* |
1700 | * Decide if we want to grab this inode in anticipation of doing work towards | |
594ab00b | 1701 | * the goal. |
b9baaef4 DW |
1702 | */ |
1703 | static inline bool | |
1704 | xfs_icwalk_igrab( | |
1705 | enum xfs_icwalk_goal goal, | |
9492750a | 1706 | struct xfs_inode *ip, |
b26b2bf1 | 1707 | struct xfs_icwalk *icw) |
b9baaef4 DW |
1708 | { |
1709 | switch (goal) { | |
b9baaef4 | 1710 | case XFS_ICWALK_BLOCKGC: |
7fdff526 | 1711 | return xfs_blockgc_igrab(ip); |
f1bc5c56 | 1712 | case XFS_ICWALK_RECLAIM: |
b26b2bf1 | 1713 | return xfs_reclaim_igrab(ip, icw); |
b9baaef4 DW |
1714 | default: |
1715 | return false; | |
1716 | } | |
1717 | } | |
1718 | ||
594ab00b DW |
1719 | /* |
1720 | * Process an inode. Each processing function must handle any state changes | |
1721 | * made by the icwalk igrab function. Return -EAGAIN to skip an inode. | |
1722 | */ | |
f427cf5c DW |
1723 | static inline int |
1724 | xfs_icwalk_process_inode( | |
1725 | enum xfs_icwalk_goal goal, | |
1726 | struct xfs_inode *ip, | |
f1bc5c56 | 1727 | struct xfs_perag *pag, |
b26b2bf1 | 1728 | struct xfs_icwalk *icw) |
f427cf5c | 1729 | { |
594ab00b | 1730 | int error = 0; |
f427cf5c DW |
1731 | |
1732 | switch (goal) { | |
f427cf5c | 1733 | case XFS_ICWALK_BLOCKGC: |
b26b2bf1 | 1734 | error = xfs_blockgc_scan_inode(ip, icw); |
f427cf5c | 1735 | break; |
f1bc5c56 DW |
1736 | case XFS_ICWALK_RECLAIM: |
1737 | xfs_reclaim_inode(ip, pag); | |
1738 | break; | |
f427cf5c | 1739 | } |
f427cf5c DW |
1740 | return error; |
1741 | } | |
1742 | ||
df600197 | 1743 | /* |
f427cf5c DW |
1744 | * For a given per-AG structure @pag and a goal, grab qualifying inodes and |
1745 | * process them in some manner. | |
df600197 DW |
1746 | */ |
1747 | static int | |
c1115c0c | 1748 | xfs_icwalk_ag( |
df600197 | 1749 | struct xfs_perag *pag, |
f427cf5c | 1750 | enum xfs_icwalk_goal goal, |
b26b2bf1 | 1751 | struct xfs_icwalk *icw) |
df600197 | 1752 | { |
e9c4d8bf | 1753 | struct xfs_mount *mp = pag_mount(pag); |
df600197 DW |
1754 | uint32_t first_index; |
1755 | int last_error = 0; | |
1756 | int skipped; | |
1757 | bool done; | |
1758 | int nr_found; | |
1759 | ||
1760 | restart: | |
1761 | done = false; | |
1762 | skipped = 0; | |
f1bc5c56 DW |
1763 | if (goal == XFS_ICWALK_RECLAIM) |
1764 | first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); | |
1765 | else | |
1766 | first_index = 0; | |
df600197 DW |
1767 | nr_found = 0; |
1768 | do { | |
1769 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | |
1770 | int error = 0; | |
1771 | int i; | |
1772 | ||
1773 | rcu_read_lock(); | |
1774 | ||
a437b9b4 CH |
1775 | nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root, |
1776 | (void **) batch, first_index, | |
1777 | XFS_LOOKUP_BATCH, goal); | |
df600197 | 1778 | if (!nr_found) { |
f1bc5c56 | 1779 | done = true; |
df600197 DW |
1780 | rcu_read_unlock(); |
1781 | break; | |
1782 | } | |
1783 | ||
1784 | /* | |
1785 | * Grab the inodes before we drop the lock. if we found | |
1786 | * nothing, nr == 0 and the loop will be skipped. | |
1787 | */ | |
1788 | for (i = 0; i < nr_found; i++) { | |
1789 | struct xfs_inode *ip = batch[i]; | |
1790 | ||
b26b2bf1 | 1791 | if (done || !xfs_icwalk_igrab(goal, ip, icw)) |
df600197 DW |
1792 | batch[i] = NULL; |
1793 | ||
1794 | /* | |
1795 | * Update the index for the next lookup. Catch | |
1796 | * overflows into the next AG range which can occur if | |
1797 | * we have inodes in the last block of the AG and we | |
1798 | * are currently pointing to the last inode. | |
1799 | * | |
1800 | * Because we may see inodes that are from the wrong AG | |
1801 | * due to RCU freeing and reallocation, only update the | |
1802 | * index if it lies in this AG. It was a race that lead | |
1803 | * us to see this inode, so another lookup from the | |
1804 | * same index will not find it again. | |
1805 | */ | |
e9c4d8bf | 1806 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag_agno(pag)) |
df600197 DW |
1807 | continue; |
1808 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | |
1809 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
1810 | done = true; | |
1811 | } | |
1812 | ||
1813 | /* unlock now we've grabbed the inodes. */ | |
1814 | rcu_read_unlock(); | |
1815 | ||
1816 | for (i = 0; i < nr_found; i++) { | |
1817 | if (!batch[i]) | |
1818 | continue; | |
f1bc5c56 | 1819 | error = xfs_icwalk_process_inode(goal, batch[i], pag, |
b26b2bf1 | 1820 | icw); |
df600197 DW |
1821 | if (error == -EAGAIN) { |
1822 | skipped++; | |
1823 | continue; | |
1824 | } | |
1825 | if (error && last_error != -EFSCORRUPTED) | |
1826 | last_error = error; | |
1827 | } | |
1828 | ||
1829 | /* bail out if the filesystem is corrupted. */ | |
1830 | if (error == -EFSCORRUPTED) | |
1831 | break; | |
1832 | ||
1833 | cond_resched(); | |
1834 | ||
b26b2bf1 DW |
1835 | if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) { |
1836 | icw->icw_scan_limit -= XFS_LOOKUP_BATCH; | |
1837 | if (icw->icw_scan_limit <= 0) | |
f1bc5c56 DW |
1838 | break; |
1839 | } | |
df600197 DW |
1840 | } while (nr_found && !done); |
1841 | ||
f1bc5c56 DW |
1842 | if (goal == XFS_ICWALK_RECLAIM) { |
1843 | if (done) | |
1844 | first_index = 0; | |
1845 | WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); | |
1846 | } | |
1847 | ||
df600197 DW |
1848 | if (skipped) { |
1849 | delay(1); | |
1850 | goto restart; | |
1851 | } | |
1852 | return last_error; | |
1853 | } | |
1854 | ||
f427cf5c | 1855 | /* Walk all incore inodes to achieve a given goal. */ |
df600197 | 1856 | static int |
c1115c0c | 1857 | xfs_icwalk( |
df600197 | 1858 | struct xfs_mount *mp, |
f427cf5c | 1859 | enum xfs_icwalk_goal goal, |
b26b2bf1 | 1860 | struct xfs_icwalk *icw) |
df600197 | 1861 | { |
f9ffd095 | 1862 | struct xfs_perag *pag = NULL; |
df600197 DW |
1863 | int error = 0; |
1864 | int last_error = 0; | |
df600197 | 1865 | |
f9ffd095 | 1866 | while ((pag = xfs_perag_grab_next_tag(mp, pag, goal))) { |
b26b2bf1 | 1867 | error = xfs_icwalk_ag(pag, goal, icw); |
df600197 DW |
1868 | if (error) { |
1869 | last_error = error; | |
a437b9b4 | 1870 | if (error == -EFSCORRUPTED) { |
c4d5660a | 1871 | xfs_perag_rele(pag); |
df600197 | 1872 | break; |
a437b9b4 | 1873 | } |
df600197 DW |
1874 | } |
1875 | } | |
1876 | return last_error; | |
2d53f66b | 1877 | BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID); |
df600197 | 1878 | } |
c6c2066d DW |
1879 | |
1880 | #ifdef DEBUG | |
1881 | static void | |
1882 | xfs_check_delalloc( | |
1883 | struct xfs_inode *ip, | |
1884 | int whichfork) | |
1885 | { | |
732436ef | 1886 | struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); |
c6c2066d DW |
1887 | struct xfs_bmbt_irec got; |
1888 | struct xfs_iext_cursor icur; | |
1889 | ||
1890 | if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got)) | |
1891 | return; | |
1892 | do { | |
1893 | if (isnullstartblock(got.br_startblock)) { | |
1894 | xfs_warn(ip->i_mount, | |
1895 | "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]", | |
1896 | ip->i_ino, | |
1897 | whichfork == XFS_DATA_FORK ? "data" : "cow", | |
1898 | got.br_startoff, got.br_blockcount); | |
1899 | } | |
1900 | } while (xfs_iext_next_extent(ifp, &icur, &got)); | |
1901 | } | |
1902 | #else | |
1903 | #define xfs_check_delalloc(ip, whichfork) do { } while (0) | |
1904 | #endif | |
1905 | ||
ab23a776 DC |
1906 | /* Schedule the inode for reclaim. */ |
1907 | static void | |
1908 | xfs_inodegc_set_reclaimable( | |
c6c2066d DW |
1909 | struct xfs_inode *ip) |
1910 | { | |
1911 | struct xfs_mount *mp = ip->i_mount; | |
1912 | struct xfs_perag *pag; | |
c6c2066d | 1913 | |
75c8c50f | 1914 | if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) { |
c6c2066d DW |
1915 | xfs_check_delalloc(ip, XFS_DATA_FORK); |
1916 | xfs_check_delalloc(ip, XFS_COW_FORK); | |
1917 | ASSERT(0); | |
1918 | } | |
1919 | ||
c6c2066d DW |
1920 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1921 | spin_lock(&pag->pag_ici_lock); | |
1922 | spin_lock(&ip->i_flags_lock); | |
1923 | ||
ab23a776 DC |
1924 | trace_xfs_inode_set_reclaimable(ip); |
1925 | ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING); | |
1926 | ip->i_flags |= XFS_IRECLAIMABLE; | |
c6c2066d DW |
1927 | xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
1928 | XFS_ICI_RECLAIM_TAG); | |
c6c2066d DW |
1929 | |
1930 | spin_unlock(&ip->i_flags_lock); | |
1931 | spin_unlock(&pag->pag_ici_lock); | |
1932 | xfs_perag_put(pag); | |
1933 | } | |
ab23a776 DC |
1934 | |
1935 | /* | |
1936 | * Free all speculative preallocations and possibly even the inode itself. | |
1937 | * This is the last chance to make changes to an otherwise unreferenced file | |
1938 | * before incore reclamation happens. | |
1939 | */ | |
d4d12c02 | 1940 | static int |
ab23a776 DC |
1941 | xfs_inodegc_inactivate( |
1942 | struct xfs_inode *ip) | |
1943 | { | |
d4d12c02 DC |
1944 | int error; |
1945 | ||
ab23a776 | 1946 | trace_xfs_inode_inactivating(ip); |
d4d12c02 | 1947 | error = xfs_inactive(ip); |
ab23a776 | 1948 | xfs_inodegc_set_reclaimable(ip); |
d4d12c02 DC |
1949 | return error; |
1950 | ||
ab23a776 DC |
1951 | } |
1952 | ||
1953 | void | |
1954 | xfs_inodegc_worker( | |
1955 | struct work_struct *work) | |
1956 | { | |
7cf2b0f9 DC |
1957 | struct xfs_inodegc *gc = container_of(to_delayed_work(work), |
1958 | struct xfs_inodegc, work); | |
ab23a776 DC |
1959 | struct llist_node *node = llist_del_all(&gc->list); |
1960 | struct xfs_inode *ip, *n; | |
62334fab | 1961 | struct xfs_mount *mp = gc->mp; |
4da11251 | 1962 | unsigned int nofs_flag; |
ab23a776 | 1963 | |
62334fab DW |
1964 | /* |
1965 | * Clear the cpu mask bit and ensure that we have seen the latest | |
1966 | * update of the gc structure associated with this CPU. This matches | |
1967 | * with the release semantics used when setting the cpumask bit in | |
1968 | * xfs_inodegc_queue. | |
1969 | */ | |
1970 | cpumask_clear_cpu(gc->cpu, &mp->m_inodegc_cpumask); | |
1971 | smp_mb__after_atomic(); | |
b37c4c83 | 1972 | |
ab23a776 DC |
1973 | WRITE_ONCE(gc->items, 0); |
1974 | ||
1975 | if (!node) | |
1976 | return; | |
1977 | ||
4da11251 WG |
1978 | /* |
1979 | * We can allocate memory here while doing writeback on behalf of | |
1980 | * memory reclaim. To avoid memory allocation deadlocks set the | |
1981 | * task-wide nofs context for the following operations. | |
1982 | */ | |
1983 | nofs_flag = memalloc_nofs_save(); | |
1984 | ||
ab23a776 | 1985 | ip = llist_entry(node, struct xfs_inode, i_gclist); |
62334fab | 1986 | trace_xfs_inodegc_worker(mp, READ_ONCE(gc->shrinker_hits)); |
ab23a776 | 1987 | |
40b1de00 | 1988 | WRITE_ONCE(gc->shrinker_hits, 0); |
ab23a776 | 1989 | llist_for_each_entry_safe(ip, n, node, i_gclist) { |
d4d12c02 DC |
1990 | int error; |
1991 | ||
ab23a776 | 1992 | xfs_iflags_set(ip, XFS_INACTIVATING); |
d4d12c02 DC |
1993 | error = xfs_inodegc_inactivate(ip); |
1994 | if (error && !gc->error) | |
1995 | gc->error = error; | |
ab23a776 | 1996 | } |
4da11251 WG |
1997 | |
1998 | memalloc_nofs_restore(nofs_flag); | |
ab23a776 DC |
1999 | } |
2000 | ||
2001 | /* | |
5e672cd6 DC |
2002 | * Expedite all pending inodegc work to run immediately. This does not wait for |
2003 | * completion of the work. | |
ab23a776 DC |
2004 | */ |
2005 | void | |
5e672cd6 | 2006 | xfs_inodegc_push( |
ab23a776 DC |
2007 | struct xfs_mount *mp) |
2008 | { | |
ab23a776 DC |
2009 | if (!xfs_is_inodegc_enabled(mp)) |
2010 | return; | |
5e672cd6 DC |
2011 | trace_xfs_inodegc_push(mp, __return_address); |
2012 | xfs_inodegc_queue_all(mp); | |
2013 | } | |
ab23a776 | 2014 | |
5e672cd6 DC |
2015 | /* |
2016 | * Force all currently queued inode inactivation work to run immediately and | |
2017 | * wait for the work to finish. | |
2018 | */ | |
d4d12c02 | 2019 | int |
5e672cd6 DC |
2020 | xfs_inodegc_flush( |
2021 | struct xfs_mount *mp) | |
2022 | { | |
2023 | xfs_inodegc_push(mp); | |
ab23a776 | 2024 | trace_xfs_inodegc_flush(mp, __return_address); |
d4d12c02 | 2025 | return xfs_inodegc_wait_all(mp); |
ab23a776 DC |
2026 | } |
2027 | ||
2028 | /* | |
2029 | * Flush all the pending work and then disable the inode inactivation background | |
2254a739 DW |
2030 | * workers and wait for them to stop. Caller must hold sb->s_umount to |
2031 | * coordinate changes in the inodegc_enabled state. | |
ab23a776 DC |
2032 | */ |
2033 | void | |
2034 | xfs_inodegc_stop( | |
2035 | struct xfs_mount *mp) | |
2036 | { | |
2254a739 DW |
2037 | bool rerun; |
2038 | ||
ab23a776 DC |
2039 | if (!xfs_clear_inodegc_enabled(mp)) |
2040 | return; | |
2041 | ||
2254a739 DW |
2042 | /* |
2043 | * Drain all pending inodegc work, including inodes that could be | |
2044 | * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan | |
2045 | * threads that sample the inodegc state just prior to us clearing it. | |
2046 | * The inodegc flag state prevents new threads from queuing more | |
2047 | * inodes, so we queue pending work items and flush the workqueue until | |
2048 | * all inodegc lists are empty. IOWs, we cannot use drain_workqueue | |
2049 | * here because it does not allow other unserialized mechanisms to | |
2050 | * reschedule inodegc work while this draining is in progress. | |
2051 | */ | |
ab23a776 | 2052 | xfs_inodegc_queue_all(mp); |
2254a739 DW |
2053 | do { |
2054 | flush_workqueue(mp->m_inodegc_wq); | |
2055 | rerun = xfs_inodegc_queue_all(mp); | |
2056 | } while (rerun); | |
ab23a776 | 2057 | |
ab23a776 DC |
2058 | trace_xfs_inodegc_stop(mp, __return_address); |
2059 | } | |
2060 | ||
2061 | /* | |
2062 | * Enable the inode inactivation background workers and schedule deferred inode | |
2254a739 DW |
2063 | * inactivation work if there is any. Caller must hold sb->s_umount to |
2064 | * coordinate changes in the inodegc_enabled state. | |
ab23a776 DC |
2065 | */ |
2066 | void | |
2067 | xfs_inodegc_start( | |
2068 | struct xfs_mount *mp) | |
2069 | { | |
2070 | if (xfs_set_inodegc_enabled(mp)) | |
2071 | return; | |
2072 | ||
2073 | trace_xfs_inodegc_start(mp, __return_address); | |
2074 | xfs_inodegc_queue_all(mp); | |
2075 | } | |
2076 | ||
65f03d86 DW |
2077 | #ifdef CONFIG_XFS_RT |
2078 | static inline bool | |
2079 | xfs_inodegc_want_queue_rt_file( | |
2080 | struct xfs_inode *ip) | |
2081 | { | |
2082 | struct xfs_mount *mp = ip->i_mount; | |
65f03d86 | 2083 | |
0cb53d77 | 2084 | if (!XFS_IS_REALTIME_INODE(ip) || xfs_has_zoned(mp)) |
65f03d86 DW |
2085 | return false; |
2086 | ||
712bae96 | 2087 | if (xfs_compare_freecounter(mp, XC_FREE_RTEXTENTS, |
2229276c DW |
2088 | mp->m_low_rtexts[XFS_LOWSP_5_PCNT], |
2089 | XFS_FDBLOCKS_BATCH) < 0) | |
2090 | return true; | |
2091 | ||
2092 | return false; | |
65f03d86 DW |
2093 | } |
2094 | #else | |
2095 | # define xfs_inodegc_want_queue_rt_file(ip) (false) | |
2096 | #endif /* CONFIG_XFS_RT */ | |
2097 | ||
ab23a776 DC |
2098 | /* |
2099 | * Schedule the inactivation worker when: | |
2100 | * | |
2101 | * - We've accumulated more than one inode cluster buffer's worth of inodes. | |
7d6f07d2 | 2102 | * - There is less than 5% free space left. |
108523b8 | 2103 | * - Any of the quotas for this inode are near an enforcement limit. |
ab23a776 DC |
2104 | */ |
2105 | static inline bool | |
2106 | xfs_inodegc_want_queue_work( | |
2107 | struct xfs_inode *ip, | |
2108 | unsigned int items) | |
2109 | { | |
2110 | struct xfs_mount *mp = ip->i_mount; | |
2111 | ||
2112 | if (items > mp->m_ino_geo.inodes_per_cluster) | |
2113 | return true; | |
2114 | ||
712bae96 | 2115 | if (xfs_compare_freecounter(mp, XC_FREE_BLOCKS, |
7d6f07d2 DW |
2116 | mp->m_low_space[XFS_LOWSP_5_PCNT], |
2117 | XFS_FDBLOCKS_BATCH) < 0) | |
2118 | return true; | |
2119 | ||
65f03d86 DW |
2120 | if (xfs_inodegc_want_queue_rt_file(ip)) |
2121 | return true; | |
2122 | ||
108523b8 DW |
2123 | if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER)) |
2124 | return true; | |
2125 | ||
2126 | if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP)) | |
2127 | return true; | |
2128 | ||
2129 | if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ)) | |
2130 | return true; | |
2131 | ||
ab23a776 DC |
2132 | return false; |
2133 | } | |
2134 | ||
2135 | /* | |
2136 | * Upper bound on the number of inodes in each AG that can be queued for | |
2137 | * inactivation at any given time, to avoid monopolizing the workqueue. | |
2138 | */ | |
2139 | #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK) | |
2140 | ||
2141 | /* | |
2142 | * Make the frontend wait for inactivations when: | |
2143 | * | |
40b1de00 | 2144 | * - Memory shrinkers queued the inactivation worker and it hasn't finished. |
ab23a776 DC |
2145 | * - The queue depth exceeds the maximum allowable percpu backlog. |
2146 | * | |
f2e812c1 DC |
2147 | * Note: If we are in a NOFS context here (e.g. current thread is running a |
2148 | * transaction) the we don't want to block here as inodegc progress may require | |
2149 | * filesystem resources we hold to make progress and that could result in a | |
2150 | * deadlock. Hence we skip out of here if we are in a scoped NOFS context. | |
ab23a776 DC |
2151 | */ |
2152 | static inline bool | |
2153 | xfs_inodegc_want_flush_work( | |
2154 | struct xfs_inode *ip, | |
40b1de00 DW |
2155 | unsigned int items, |
2156 | unsigned int shrinker_hits) | |
ab23a776 | 2157 | { |
f2e812c1 | 2158 | if (current->flags & PF_MEMALLOC_NOFS) |
ab23a776 DC |
2159 | return false; |
2160 | ||
40b1de00 DW |
2161 | if (shrinker_hits > 0) |
2162 | return true; | |
2163 | ||
ab23a776 DC |
2164 | if (items > XFS_INODEGC_MAX_BACKLOG) |
2165 | return true; | |
2166 | ||
2167 | return false; | |
2168 | } | |
2169 | ||
2170 | /* | |
2171 | * Queue a background inactivation worker if there are inodes that need to be | |
2172 | * inactivated and higher level xfs code hasn't disabled the background | |
2173 | * workers. | |
2174 | */ | |
2175 | static void | |
2176 | xfs_inodegc_queue( | |
2177 | struct xfs_inode *ip) | |
2178 | { | |
2179 | struct xfs_mount *mp = ip->i_mount; | |
2180 | struct xfs_inodegc *gc; | |
2181 | int items; | |
40b1de00 | 2182 | unsigned int shrinker_hits; |
62334fab | 2183 | unsigned int cpu_nr; |
7cf2b0f9 | 2184 | unsigned long queue_delay = 1; |
ab23a776 DC |
2185 | |
2186 | trace_xfs_inode_set_need_inactive(ip); | |
2187 | spin_lock(&ip->i_flags_lock); | |
2188 | ip->i_flags |= XFS_NEED_INACTIVE; | |
2189 | spin_unlock(&ip->i_flags_lock); | |
2190 | ||
62334fab DW |
2191 | cpu_nr = get_cpu(); |
2192 | gc = this_cpu_ptr(mp->m_inodegc); | |
ab23a776 DC |
2193 | llist_add(&ip->i_gclist, &gc->list); |
2194 | items = READ_ONCE(gc->items); | |
2195 | WRITE_ONCE(gc->items, items + 1); | |
40b1de00 | 2196 | shrinker_hits = READ_ONCE(gc->shrinker_hits); |
ab23a776 | 2197 | |
62334fab DW |
2198 | /* |
2199 | * Ensure the list add is always seen by anyone who finds the cpumask | |
2200 | * bit set. This effectively gives the cpumask bit set operation | |
2201 | * release ordering semantics. | |
2202 | */ | |
2203 | smp_mb__before_atomic(); | |
2204 | if (!cpumask_test_cpu(cpu_nr, &mp->m_inodegc_cpumask)) | |
2205 | cpumask_test_and_set_cpu(cpu_nr, &mp->m_inodegc_cpumask); | |
2206 | ||
7cf2b0f9 DC |
2207 | /* |
2208 | * We queue the work while holding the current CPU so that the work | |
2209 | * is scheduled to run on this CPU. | |
2210 | */ | |
2211 | if (!xfs_is_inodegc_enabled(mp)) { | |
62334fab | 2212 | put_cpu(); |
ab23a776 | 2213 | return; |
ab23a776 DC |
2214 | } |
2215 | ||
7cf2b0f9 DC |
2216 | if (xfs_inodegc_want_queue_work(ip, items)) |
2217 | queue_delay = 0; | |
2218 | ||
2219 | trace_xfs_inodegc_queue(mp, __return_address); | |
03e0add8 DW |
2220 | mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work, |
2221 | queue_delay); | |
62334fab | 2222 | put_cpu(); |
7cf2b0f9 | 2223 | |
40b1de00 | 2224 | if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) { |
ab23a776 | 2225 | trace_xfs_inodegc_throttle(mp, __return_address); |
7cf2b0f9 | 2226 | flush_delayed_work(&gc->work); |
ab23a776 DC |
2227 | } |
2228 | } | |
2229 | ||
ab23a776 DC |
2230 | /* |
2231 | * We set the inode flag atomically with the radix tree tag. Once we get tag | |
2232 | * lookups on the radix tree, this inode flag can go away. | |
2233 | * | |
2234 | * We always use background reclaim here because even if the inode is clean, it | |
2235 | * still may be under IO and hence we have wait for IO completion to occur | |
2236 | * before we can reclaim the inode. The background reclaim path handles this | |
2237 | * more efficiently than we can here, so simply let background reclaim tear down | |
2238 | * all inodes. | |
2239 | */ | |
2240 | void | |
2241 | xfs_inode_mark_reclaimable( | |
2242 | struct xfs_inode *ip) | |
2243 | { | |
2244 | struct xfs_mount *mp = ip->i_mount; | |
2245 | bool need_inactive; | |
2246 | ||
2247 | XFS_STATS_INC(mp, vn_reclaim); | |
2248 | ||
2249 | /* | |
2250 | * We should never get here with any of the reclaim flags already set. | |
2251 | */ | |
2252 | ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS)); | |
2253 | ||
2254 | need_inactive = xfs_inode_needs_inactive(ip); | |
2255 | if (need_inactive) { | |
2256 | xfs_inodegc_queue(ip); | |
2257 | return; | |
2258 | } | |
2259 | ||
2260 | /* Going straight to reclaim, so drop the dquots. */ | |
2261 | xfs_qm_dqdetach(ip); | |
2262 | xfs_inodegc_set_reclaimable(ip); | |
2263 | } | |
40b1de00 DW |
2264 | |
2265 | /* | |
2266 | * Register a phony shrinker so that we can run background inodegc sooner when | |
2267 | * there's memory pressure. Inactivation does not itself free any memory but | |
2268 | * it does make inodes reclaimable, which eventually frees memory. | |
2269 | * | |
2270 | * The count function, seek value, and batch value are crafted to trigger the | |
2271 | * scan function during the second round of scanning. Hopefully this means | |
2272 | * that we reclaimed enough memory that initiating metadata transactions won't | |
2273 | * make things worse. | |
2274 | */ | |
2275 | #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY) | |
2276 | #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1) | |
2277 | ||
2278 | static unsigned long | |
2279 | xfs_inodegc_shrinker_count( | |
2280 | struct shrinker *shrink, | |
2281 | struct shrink_control *sc) | |
2282 | { | |
1a86a53d | 2283 | struct xfs_mount *mp = shrink->private_data; |
40b1de00 DW |
2284 | struct xfs_inodegc *gc; |
2285 | int cpu; | |
2286 | ||
2287 | if (!xfs_is_inodegc_enabled(mp)) | |
2288 | return 0; | |
2289 | ||
62334fab | 2290 | for_each_cpu(cpu, &mp->m_inodegc_cpumask) { |
40b1de00 DW |
2291 | gc = per_cpu_ptr(mp->m_inodegc, cpu); |
2292 | if (!llist_empty(&gc->list)) | |
2293 | return XFS_INODEGC_SHRINKER_COUNT; | |
2294 | } | |
2295 | ||
2296 | return 0; | |
2297 | } | |
2298 | ||
2299 | static unsigned long | |
2300 | xfs_inodegc_shrinker_scan( | |
2301 | struct shrinker *shrink, | |
2302 | struct shrink_control *sc) | |
2303 | { | |
1a86a53d | 2304 | struct xfs_mount *mp = shrink->private_data; |
40b1de00 DW |
2305 | struct xfs_inodegc *gc; |
2306 | int cpu; | |
2307 | bool no_items = true; | |
2308 | ||
2309 | if (!xfs_is_inodegc_enabled(mp)) | |
2310 | return SHRINK_STOP; | |
2311 | ||
2312 | trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address); | |
2313 | ||
62334fab | 2314 | for_each_cpu(cpu, &mp->m_inodegc_cpumask) { |
40b1de00 DW |
2315 | gc = per_cpu_ptr(mp->m_inodegc, cpu); |
2316 | if (!llist_empty(&gc->list)) { | |
2317 | unsigned int h = READ_ONCE(gc->shrinker_hits); | |
2318 | ||
2319 | WRITE_ONCE(gc->shrinker_hits, h + 1); | |
7cf2b0f9 | 2320 | mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0); |
40b1de00 DW |
2321 | no_items = false; |
2322 | } | |
2323 | } | |
2324 | ||
2325 | /* | |
2326 | * If there are no inodes to inactivate, we don't want the shrinker | |
2327 | * to think there's deferred work to call us back about. | |
2328 | */ | |
2329 | if (no_items) | |
2330 | return LONG_MAX; | |
2331 | ||
2332 | return SHRINK_STOP; | |
2333 | } | |
2334 | ||
2335 | /* Register a shrinker so we can accelerate inodegc and throttle queuing. */ | |
2336 | int | |
2337 | xfs_inodegc_register_shrinker( | |
2338 | struct xfs_mount *mp) | |
2339 | { | |
1a86a53d QZ |
2340 | mp->m_inodegc_shrinker = shrinker_alloc(SHRINKER_NONSLAB, |
2341 | "xfs-inodegc:%s", | |
2342 | mp->m_super->s_id); | |
2343 | if (!mp->m_inodegc_shrinker) | |
2344 | return -ENOMEM; | |
2345 | ||
2346 | mp->m_inodegc_shrinker->count_objects = xfs_inodegc_shrinker_count; | |
2347 | mp->m_inodegc_shrinker->scan_objects = xfs_inodegc_shrinker_scan; | |
2348 | mp->m_inodegc_shrinker->seeks = 0; | |
2349 | mp->m_inodegc_shrinker->batch = XFS_INODEGC_SHRINKER_BATCH; | |
2350 | mp->m_inodegc_shrinker->private_data = mp; | |
40b1de00 | 2351 | |
1a86a53d | 2352 | shrinker_register(mp->m_inodegc_shrinker); |
40b1de00 | 2353 | |
1a86a53d | 2354 | return 0; |
40b1de00 | 2355 | } |