Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fe4fa4b8 DC |
2 | /* |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
fe4fa4b8 DC |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
5467b34b | 8 | #include "xfs_shared.h" |
6ca1c906 | 9 | #include "xfs_format.h" |
239880ef DC |
10 | #include "xfs_log_format.h" |
11 | #include "xfs_trans_resv.h" | |
fe4fa4b8 | 12 | #include "xfs_sb.h" |
fe4fa4b8 | 13 | #include "xfs_mount.h" |
fe4fa4b8 | 14 | #include "xfs_inode.h" |
239880ef DC |
15 | #include "xfs_trans.h" |
16 | #include "xfs_trans_priv.h" | |
fe4fa4b8 | 17 | #include "xfs_inode_item.h" |
7d095257 | 18 | #include "xfs_quota.h" |
0b1b213f | 19 | #include "xfs_trace.h" |
6d8b79cf | 20 | #include "xfs_icache.h" |
c24b5dfa | 21 | #include "xfs_bmap_util.h" |
dc06f398 BF |
22 | #include "xfs_dquot_item.h" |
23 | #include "xfs_dquot.h" | |
83104d44 | 24 | #include "xfs_reflink.h" |
bb8a66af | 25 | #include "xfs_ialloc.h" |
fe4fa4b8 | 26 | |
f0e28280 | 27 | #include <linux/iversion.h> |
a167b17e | 28 | |
c809d7e9 DW |
29 | /* Radix tree tags for incore inode tree. */ |
30 | ||
31 | /* inode is to be reclaimed */ | |
32 | #define XFS_ICI_RECLAIM_TAG 0 | |
33 | /* Inode has speculative preallocations (posteof or cow) to clean. */ | |
34 | #define XFS_ICI_BLOCKGC_TAG 1 | |
35 | ||
36 | /* | |
37 | * The goal for walking incore inodes. These can correspond with incore inode | |
38 | * radix tree tags when convenient. Avoid existing XFS_IWALK namespace. | |
39 | */ | |
40 | enum xfs_icwalk_goal { | |
41 | /* Goals that are not related to tags; these must be < 0. */ | |
42 | XFS_ICWALK_DQRELE = -1, | |
43 | ||
44 | /* Goals directly associated with tagged inodes. */ | |
45 | XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG, | |
f1bc5c56 | 46 | XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG, |
c809d7e9 DW |
47 | }; |
48 | ||
49 | #define XFS_ICWALK_NULL_TAG (-1U) | |
50 | ||
51 | /* Compute the inode radix tree tag for this goal. */ | |
52 | static inline unsigned int | |
53 | xfs_icwalk_tag(enum xfs_icwalk_goal goal) | |
54 | { | |
55 | return goal < 0 ? XFS_ICWALK_NULL_TAG : goal; | |
56 | } | |
57 | ||
7fdff526 | 58 | static int xfs_icwalk(struct xfs_mount *mp, |
9d5ee837 | 59 | enum xfs_icwalk_goal goal, struct xfs_eofblocks *eofb); |
7fdff526 | 60 | static int xfs_icwalk_ag(struct xfs_perag *pag, |
9d5ee837 | 61 | enum xfs_icwalk_goal goal, struct xfs_eofblocks *eofb); |
df600197 | 62 | |
1ad2cfe0 DW |
63 | /* |
64 | * Private inode cache walk flags for struct xfs_eofblocks. Must not coincide | |
65 | * with XFS_EOF_FLAGS_*. | |
66 | */ | |
67 | #define XFS_ICWALK_FLAG_DROP_UDQUOT (1U << 31) | |
68 | #define XFS_ICWALK_FLAG_DROP_GDQUOT (1U << 30) | |
69 | #define XFS_ICWALK_FLAG_DROP_PDQUOT (1U << 29) | |
70 | ||
f1bc5c56 DW |
71 | /* Stop scanning after icw_scan_limit inodes. */ |
72 | #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28) | |
73 | ||
1ad2cfe0 DW |
74 | #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_DROP_UDQUOT | \ |
75 | XFS_ICWALK_FLAG_DROP_GDQUOT | \ | |
f1bc5c56 DW |
76 | XFS_ICWALK_FLAG_DROP_PDQUOT | \ |
77 | XFS_ICWALK_FLAG_SCAN_LIMIT) | |
1ad2cfe0 | 78 | |
33479e05 DC |
79 | /* |
80 | * Allocate and initialise an xfs_inode. | |
81 | */ | |
638f4416 | 82 | struct xfs_inode * |
33479e05 DC |
83 | xfs_inode_alloc( |
84 | struct xfs_mount *mp, | |
85 | xfs_ino_t ino) | |
86 | { | |
87 | struct xfs_inode *ip; | |
88 | ||
89 | /* | |
3050bd0b CM |
90 | * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL |
91 | * and return NULL here on ENOMEM. | |
33479e05 | 92 | */ |
3050bd0b CM |
93 | ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL); |
94 | ||
33479e05 | 95 | if (inode_init_always(mp->m_super, VFS_I(ip))) { |
377bcd5f | 96 | kmem_cache_free(xfs_inode_zone, ip); |
33479e05 DC |
97 | return NULL; |
98 | } | |
99 | ||
c19b3b05 DC |
100 | /* VFS doesn't initialise i_mode! */ |
101 | VFS_I(ip)->i_mode = 0; | |
102 | ||
ff6d6af2 | 103 | XFS_STATS_INC(mp, vn_active); |
33479e05 | 104 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
33479e05 DC |
105 | ASSERT(ip->i_ino == 0); |
106 | ||
33479e05 DC |
107 | /* initialise the xfs inode */ |
108 | ip->i_ino = ino; | |
109 | ip->i_mount = mp; | |
110 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | |
111 | ip->i_afp = NULL; | |
3993baeb | 112 | ip->i_cowfp = NULL; |
3ba738df | 113 | memset(&ip->i_df, 0, sizeof(ip->i_df)); |
33479e05 DC |
114 | ip->i_flags = 0; |
115 | ip->i_delayed_blks = 0; | |
3e09ab8f | 116 | ip->i_diflags2 = mp->m_ino_geo.new_diflags2; |
6e73a545 | 117 | ip->i_nblocks = 0; |
7821ea30 | 118 | ip->i_forkoff = 0; |
6772c1f1 DW |
119 | ip->i_sick = 0; |
120 | ip->i_checked = 0; | |
cb357bf3 DW |
121 | INIT_WORK(&ip->i_ioend_work, xfs_end_io); |
122 | INIT_LIST_HEAD(&ip->i_ioend_list); | |
123 | spin_lock_init(&ip->i_ioend_lock); | |
33479e05 DC |
124 | |
125 | return ip; | |
126 | } | |
127 | ||
128 | STATIC void | |
129 | xfs_inode_free_callback( | |
130 | struct rcu_head *head) | |
131 | { | |
132 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
133 | struct xfs_inode *ip = XFS_I(inode); | |
134 | ||
c19b3b05 | 135 | switch (VFS_I(ip)->i_mode & S_IFMT) { |
33479e05 DC |
136 | case S_IFREG: |
137 | case S_IFDIR: | |
138 | case S_IFLNK: | |
ef838512 | 139 | xfs_idestroy_fork(&ip->i_df); |
33479e05 DC |
140 | break; |
141 | } | |
142 | ||
ef838512 CH |
143 | if (ip->i_afp) { |
144 | xfs_idestroy_fork(ip->i_afp); | |
145 | kmem_cache_free(xfs_ifork_zone, ip->i_afp); | |
146 | } | |
147 | if (ip->i_cowfp) { | |
148 | xfs_idestroy_fork(ip->i_cowfp); | |
149 | kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); | |
150 | } | |
33479e05 | 151 | if (ip->i_itemp) { |
22525c17 DC |
152 | ASSERT(!test_bit(XFS_LI_IN_AIL, |
153 | &ip->i_itemp->ili_item.li_flags)); | |
33479e05 DC |
154 | xfs_inode_item_destroy(ip); |
155 | ip->i_itemp = NULL; | |
156 | } | |
157 | ||
377bcd5f | 158 | kmem_cache_free(xfs_inode_zone, ip); |
1f2dcfe8 DC |
159 | } |
160 | ||
8a17d7dd DC |
161 | static void |
162 | __xfs_inode_free( | |
163 | struct xfs_inode *ip) | |
164 | { | |
165 | /* asserts to verify all state is correct here */ | |
166 | ASSERT(atomic_read(&ip->i_pincount) == 0); | |
48d55e2a | 167 | ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); |
8a17d7dd DC |
168 | XFS_STATS_DEC(ip->i_mount, vn_active); |
169 | ||
170 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | |
171 | } | |
172 | ||
1f2dcfe8 DC |
173 | void |
174 | xfs_inode_free( | |
175 | struct xfs_inode *ip) | |
176 | { | |
718ecc50 | 177 | ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING)); |
98efe8af | 178 | |
33479e05 DC |
179 | /* |
180 | * Because we use RCU freeing we need to ensure the inode always | |
181 | * appears to be reclaimed with an invalid inode number when in the | |
182 | * free state. The ip->i_flags_lock provides the barrier against lookup | |
183 | * races. | |
184 | */ | |
185 | spin_lock(&ip->i_flags_lock); | |
186 | ip->i_flags = XFS_IRECLAIM; | |
187 | ip->i_ino = 0; | |
188 | spin_unlock(&ip->i_flags_lock); | |
189 | ||
8a17d7dd | 190 | __xfs_inode_free(ip); |
33479e05 DC |
191 | } |
192 | ||
ad438c40 | 193 | /* |
02511a5a DC |
194 | * Queue background inode reclaim work if there are reclaimable inodes and there |
195 | * isn't reclaim work already scheduled or in progress. | |
ad438c40 DC |
196 | */ |
197 | static void | |
198 | xfs_reclaim_work_queue( | |
199 | struct xfs_mount *mp) | |
200 | { | |
201 | ||
202 | rcu_read_lock(); | |
203 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | |
204 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | |
205 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | |
206 | } | |
207 | rcu_read_unlock(); | |
208 | } | |
209 | ||
c076ae7a DW |
210 | /* |
211 | * Background scanning to trim preallocated space. This is queued based on the | |
212 | * 'speculative_prealloc_lifetime' tunable (5m by default). | |
213 | */ | |
214 | static inline void | |
215 | xfs_blockgc_queue( | |
ad438c40 | 216 | struct xfs_perag *pag) |
c076ae7a DW |
217 | { |
218 | rcu_read_lock(); | |
219 | if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) | |
220 | queue_delayed_work(pag->pag_mount->m_gc_workqueue, | |
221 | &pag->pag_blockgc_work, | |
222 | msecs_to_jiffies(xfs_blockgc_secs * 1000)); | |
223 | rcu_read_unlock(); | |
224 | } | |
225 | ||
226 | /* Set a tag on both the AG incore inode tree and the AG radix tree. */ | |
227 | static void | |
228 | xfs_perag_set_inode_tag( | |
229 | struct xfs_perag *pag, | |
230 | xfs_agino_t agino, | |
231 | unsigned int tag) | |
ad438c40 DC |
232 | { |
233 | struct xfs_mount *mp = pag->pag_mount; | |
c076ae7a | 234 | bool was_tagged; |
ad438c40 | 235 | |
95989c46 | 236 | lockdep_assert_held(&pag->pag_ici_lock); |
c076ae7a DW |
237 | |
238 | was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag); | |
239 | radix_tree_tag_set(&pag->pag_ici_root, agino, tag); | |
240 | ||
241 | if (tag == XFS_ICI_RECLAIM_TAG) | |
242 | pag->pag_ici_reclaimable++; | |
243 | ||
244 | if (was_tagged) | |
ad438c40 DC |
245 | return; |
246 | ||
c076ae7a | 247 | /* propagate the tag up into the perag radix tree */ |
ad438c40 | 248 | spin_lock(&mp->m_perag_lock); |
c076ae7a | 249 | radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag); |
ad438c40 DC |
250 | spin_unlock(&mp->m_perag_lock); |
251 | ||
c076ae7a DW |
252 | /* start background work */ |
253 | switch (tag) { | |
254 | case XFS_ICI_RECLAIM_TAG: | |
255 | xfs_reclaim_work_queue(mp); | |
256 | break; | |
257 | case XFS_ICI_BLOCKGC_TAG: | |
258 | xfs_blockgc_queue(pag); | |
259 | break; | |
260 | } | |
ad438c40 | 261 | |
c076ae7a | 262 | trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); |
ad438c40 DC |
263 | } |
264 | ||
c076ae7a | 265 | /* Clear a tag on both the AG incore inode tree and the AG radix tree. */ |
ad438c40 | 266 | static void |
c076ae7a DW |
267 | xfs_perag_clear_inode_tag( |
268 | struct xfs_perag *pag, | |
269 | xfs_agino_t agino, | |
270 | unsigned int tag) | |
ad438c40 DC |
271 | { |
272 | struct xfs_mount *mp = pag->pag_mount; | |
273 | ||
95989c46 | 274 | lockdep_assert_held(&pag->pag_ici_lock); |
c076ae7a DW |
275 | |
276 | /* | |
277 | * Reclaim can signal (with a null agino) that it cleared its own tag | |
278 | * by removing the inode from the radix tree. | |
279 | */ | |
280 | if (agino != NULLAGINO) | |
281 | radix_tree_tag_clear(&pag->pag_ici_root, agino, tag); | |
282 | else | |
283 | ASSERT(tag == XFS_ICI_RECLAIM_TAG); | |
284 | ||
285 | if (tag == XFS_ICI_RECLAIM_TAG) | |
286 | pag->pag_ici_reclaimable--; | |
287 | ||
288 | if (radix_tree_tagged(&pag->pag_ici_root, tag)) | |
ad438c40 DC |
289 | return; |
290 | ||
c076ae7a | 291 | /* clear the tag from the perag radix tree */ |
ad438c40 | 292 | spin_lock(&mp->m_perag_lock); |
c076ae7a | 293 | radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag); |
ad438c40 | 294 | spin_unlock(&mp->m_perag_lock); |
ad438c40 | 295 | |
c076ae7a DW |
296 | trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_); |
297 | } | |
ad438c40 DC |
298 | |
299 | /* | |
300 | * We set the inode flag atomically with the radix tree tag. | |
301 | * Once we get tag lookups on the radix tree, this inode flag | |
302 | * can go away. | |
303 | */ | |
304 | void | |
c076ae7a | 305 | xfs_inode_mark_reclaimable( |
ad438c40 DC |
306 | struct xfs_inode *ip) |
307 | { | |
308 | struct xfs_mount *mp = ip->i_mount; | |
309 | struct xfs_perag *pag; | |
310 | ||
311 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | |
312 | spin_lock(&pag->pag_ici_lock); | |
313 | spin_lock(&ip->i_flags_lock); | |
314 | ||
c076ae7a DW |
315 | xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
316 | XFS_ICI_RECLAIM_TAG); | |
ad438c40 DC |
317 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); |
318 | ||
319 | spin_unlock(&ip->i_flags_lock); | |
320 | spin_unlock(&pag->pag_ici_lock); | |
321 | xfs_perag_put(pag); | |
322 | } | |
323 | ||
7fdff526 | 324 | static inline void |
ae2c4ac2 BF |
325 | xfs_inew_wait( |
326 | struct xfs_inode *ip) | |
327 | { | |
328 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); | |
329 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); | |
330 | ||
331 | do { | |
21417136 | 332 | prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); |
ae2c4ac2 BF |
333 | if (!xfs_iflags_test(ip, XFS_INEW)) |
334 | break; | |
335 | schedule(); | |
336 | } while (true); | |
21417136 | 337 | finish_wait(wq, &wait.wq_entry); |
ae2c4ac2 BF |
338 | } |
339 | ||
50997470 DC |
340 | /* |
341 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode | |
342 | * part of the structure. This is made more complex by the fact we store | |
343 | * information about the on-disk values in the VFS inode and so we can't just | |
83e06f21 | 344 | * overwrite the values unconditionally. Hence we save the parameters we |
50997470 | 345 | * need to retain across reinitialisation, and rewrite them into the VFS inode |
83e06f21 | 346 | * after reinitialisation even if it fails. |
50997470 DC |
347 | */ |
348 | static int | |
349 | xfs_reinit_inode( | |
350 | struct xfs_mount *mp, | |
351 | struct inode *inode) | |
352 | { | |
353 | int error; | |
54d7b5c1 | 354 | uint32_t nlink = inode->i_nlink; |
9e9a2674 | 355 | uint32_t generation = inode->i_generation; |
f0e28280 | 356 | uint64_t version = inode_peek_iversion(inode); |
c19b3b05 | 357 | umode_t mode = inode->i_mode; |
acd1d715 | 358 | dev_t dev = inode->i_rdev; |
3d8f2821 CH |
359 | kuid_t uid = inode->i_uid; |
360 | kgid_t gid = inode->i_gid; | |
50997470 DC |
361 | |
362 | error = inode_init_always(mp->m_super, inode); | |
363 | ||
54d7b5c1 | 364 | set_nlink(inode, nlink); |
9e9a2674 | 365 | inode->i_generation = generation; |
f0e28280 | 366 | inode_set_iversion_queried(inode, version); |
c19b3b05 | 367 | inode->i_mode = mode; |
acd1d715 | 368 | inode->i_rdev = dev; |
3d8f2821 CH |
369 | inode->i_uid = uid; |
370 | inode->i_gid = gid; | |
50997470 DC |
371 | return error; |
372 | } | |
373 | ||
afca6c5b DC |
374 | /* |
375 | * If we are allocating a new inode, then check what was returned is | |
376 | * actually a free, empty inode. If we are not allocating an inode, | |
377 | * then check we didn't find a free inode. | |
378 | * | |
379 | * Returns: | |
380 | * 0 if the inode free state matches the lookup context | |
381 | * -ENOENT if the inode is free and we are not allocating | |
382 | * -EFSCORRUPTED if there is any state mismatch at all | |
383 | */ | |
384 | static int | |
385 | xfs_iget_check_free_state( | |
386 | struct xfs_inode *ip, | |
387 | int flags) | |
388 | { | |
389 | if (flags & XFS_IGET_CREATE) { | |
390 | /* should be a free inode */ | |
391 | if (VFS_I(ip)->i_mode != 0) { | |
392 | xfs_warn(ip->i_mount, | |
393 | "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", | |
394 | ip->i_ino, VFS_I(ip)->i_mode); | |
395 | return -EFSCORRUPTED; | |
396 | } | |
397 | ||
6e73a545 | 398 | if (ip->i_nblocks != 0) { |
afca6c5b DC |
399 | xfs_warn(ip->i_mount, |
400 | "Corruption detected! Free inode 0x%llx has blocks allocated!", | |
401 | ip->i_ino); | |
402 | return -EFSCORRUPTED; | |
403 | } | |
404 | return 0; | |
405 | } | |
406 | ||
407 | /* should be an allocated inode */ | |
408 | if (VFS_I(ip)->i_mode == 0) | |
409 | return -ENOENT; | |
410 | ||
411 | return 0; | |
412 | } | |
413 | ||
33479e05 DC |
414 | /* |
415 | * Check the validity of the inode we just found it the cache | |
416 | */ | |
417 | static int | |
418 | xfs_iget_cache_hit( | |
419 | struct xfs_perag *pag, | |
420 | struct xfs_inode *ip, | |
421 | xfs_ino_t ino, | |
422 | int flags, | |
423 | int lock_flags) __releases(RCU) | |
424 | { | |
425 | struct inode *inode = VFS_I(ip); | |
426 | struct xfs_mount *mp = ip->i_mount; | |
427 | int error; | |
428 | ||
429 | /* | |
430 | * check for re-use of an inode within an RCU grace period due to the | |
431 | * radix tree nodes not being updated yet. We monitor for this by | |
432 | * setting the inode number to zero before freeing the inode structure. | |
433 | * If the inode has been reallocated and set up, then the inode number | |
434 | * will not match, so check for that, too. | |
435 | */ | |
436 | spin_lock(&ip->i_flags_lock); | |
437 | if (ip->i_ino != ino) { | |
438 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 439 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 440 | error = -EAGAIN; |
33479e05 DC |
441 | goto out_error; |
442 | } | |
443 | ||
444 | ||
445 | /* | |
446 | * If we are racing with another cache hit that is currently | |
447 | * instantiating this inode or currently recycling it out of | |
448 | * reclaimabe state, wait for the initialisation to complete | |
449 | * before continuing. | |
450 | * | |
451 | * XXX(hch): eventually we should do something equivalent to | |
452 | * wait_on_inode to wait for these flags to be cleared | |
453 | * instead of polling for it. | |
454 | */ | |
455 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | |
456 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 457 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 458 | error = -EAGAIN; |
33479e05 DC |
459 | goto out_error; |
460 | } | |
461 | ||
462 | /* | |
afca6c5b DC |
463 | * Check the inode free state is valid. This also detects lookup |
464 | * racing with unlinks. | |
33479e05 | 465 | */ |
afca6c5b DC |
466 | error = xfs_iget_check_free_state(ip, flags); |
467 | if (error) | |
33479e05 | 468 | goto out_error; |
33479e05 DC |
469 | |
470 | /* | |
471 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. | |
472 | * Need to carefully get it back into useable state. | |
473 | */ | |
474 | if (ip->i_flags & XFS_IRECLAIMABLE) { | |
475 | trace_xfs_iget_reclaim(ip); | |
476 | ||
378f681c DW |
477 | if (flags & XFS_IGET_INCORE) { |
478 | error = -EAGAIN; | |
479 | goto out_error; | |
480 | } | |
481 | ||
33479e05 DC |
482 | /* |
483 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode | |
484 | * from stomping over us while we recycle the inode. We can't | |
485 | * clear the radix tree reclaimable tag yet as it requires | |
486 | * pag_ici_lock to be held exclusive. | |
487 | */ | |
488 | ip->i_flags |= XFS_IRECLAIM; | |
489 | ||
490 | spin_unlock(&ip->i_flags_lock); | |
491 | rcu_read_unlock(); | |
492 | ||
d45344d6 | 493 | ASSERT(!rwsem_is_locked(&inode->i_rwsem)); |
50997470 | 494 | error = xfs_reinit_inode(mp, inode); |
33479e05 | 495 | if (error) { |
756baca2 | 496 | bool wake; |
33479e05 DC |
497 | /* |
498 | * Re-initializing the inode failed, and we are in deep | |
499 | * trouble. Try to re-add it to the reclaim list. | |
500 | */ | |
501 | rcu_read_lock(); | |
502 | spin_lock(&ip->i_flags_lock); | |
756baca2 | 503 | wake = !!__xfs_iflags_test(ip, XFS_INEW); |
33479e05 | 504 | ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); |
756baca2 BF |
505 | if (wake) |
506 | wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); | |
33479e05 DC |
507 | ASSERT(ip->i_flags & XFS_IRECLAIMABLE); |
508 | trace_xfs_iget_reclaim_fail(ip); | |
509 | goto out_error; | |
510 | } | |
511 | ||
512 | spin_lock(&pag->pag_ici_lock); | |
513 | spin_lock(&ip->i_flags_lock); | |
514 | ||
515 | /* | |
516 | * Clear the per-lifetime state in the inode as we are now | |
517 | * effectively a new inode and need to return to the initial | |
518 | * state before reuse occurs. | |
519 | */ | |
520 | ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | |
521 | ip->i_flags |= XFS_INEW; | |
c076ae7a DW |
522 | xfs_perag_clear_inode_tag(pag, |
523 | XFS_INO_TO_AGINO(pag->pag_mount, ino), | |
524 | XFS_ICI_RECLAIM_TAG); | |
33479e05 | 525 | inode->i_state = I_NEW; |
6772c1f1 DW |
526 | ip->i_sick = 0; |
527 | ip->i_checked = 0; | |
33479e05 | 528 | |
33479e05 DC |
529 | spin_unlock(&ip->i_flags_lock); |
530 | spin_unlock(&pag->pag_ici_lock); | |
531 | } else { | |
532 | /* If the VFS inode is being torn down, pause and try again. */ | |
533 | if (!igrab(inode)) { | |
534 | trace_xfs_iget_skip(ip); | |
2451337d | 535 | error = -EAGAIN; |
33479e05 DC |
536 | goto out_error; |
537 | } | |
538 | ||
539 | /* We've got a live one. */ | |
540 | spin_unlock(&ip->i_flags_lock); | |
541 | rcu_read_unlock(); | |
542 | trace_xfs_iget_hit(ip); | |
543 | } | |
544 | ||
545 | if (lock_flags != 0) | |
546 | xfs_ilock(ip, lock_flags); | |
547 | ||
378f681c | 548 | if (!(flags & XFS_IGET_INCORE)) |
dae2f8ed | 549 | xfs_iflags_clear(ip, XFS_ISTALE); |
ff6d6af2 | 550 | XFS_STATS_INC(mp, xs_ig_found); |
33479e05 DC |
551 | |
552 | return 0; | |
553 | ||
554 | out_error: | |
555 | spin_unlock(&ip->i_flags_lock); | |
556 | rcu_read_unlock(); | |
557 | return error; | |
558 | } | |
559 | ||
560 | ||
561 | static int | |
562 | xfs_iget_cache_miss( | |
563 | struct xfs_mount *mp, | |
564 | struct xfs_perag *pag, | |
565 | xfs_trans_t *tp, | |
566 | xfs_ino_t ino, | |
567 | struct xfs_inode **ipp, | |
568 | int flags, | |
569 | int lock_flags) | |
570 | { | |
571 | struct xfs_inode *ip; | |
572 | int error; | |
573 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); | |
574 | int iflags; | |
575 | ||
576 | ip = xfs_inode_alloc(mp, ino); | |
577 | if (!ip) | |
2451337d | 578 | return -ENOMEM; |
33479e05 | 579 | |
bb8a66af | 580 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); |
33479e05 DC |
581 | if (error) |
582 | goto out_destroy; | |
583 | ||
bb8a66af CH |
584 | /* |
585 | * For version 5 superblocks, if we are initialising a new inode and we | |
586 | * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can | |
587 | * simply build the new inode core with a random generation number. | |
588 | * | |
589 | * For version 4 (and older) superblocks, log recovery is dependent on | |
965e0a1a | 590 | * the i_flushiter field being initialised from the current on-disk |
bb8a66af CH |
591 | * value and hence we must also read the inode off disk even when |
592 | * initializing new inodes. | |
593 | */ | |
594 | if (xfs_sb_version_has_v3inode(&mp->m_sb) && | |
595 | (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) { | |
596 | VFS_I(ip)->i_generation = prandom_u32(); | |
597 | } else { | |
bb8a66af CH |
598 | struct xfs_buf *bp; |
599 | ||
af9dcdde | 600 | error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp); |
bb8a66af CH |
601 | if (error) |
602 | goto out_destroy; | |
603 | ||
af9dcdde CH |
604 | error = xfs_inode_from_disk(ip, |
605 | xfs_buf_offset(bp, ip->i_imap.im_boffset)); | |
bb8a66af CH |
606 | if (!error) |
607 | xfs_buf_set_ref(bp, XFS_INO_REF); | |
608 | xfs_trans_brelse(tp, bp); | |
609 | ||
610 | if (error) | |
611 | goto out_destroy; | |
612 | } | |
613 | ||
33479e05 DC |
614 | trace_xfs_iget_miss(ip); |
615 | ||
ee457001 | 616 | /* |
afca6c5b DC |
617 | * Check the inode free state is valid. This also detects lookup |
618 | * racing with unlinks. | |
ee457001 | 619 | */ |
afca6c5b DC |
620 | error = xfs_iget_check_free_state(ip, flags); |
621 | if (error) | |
33479e05 | 622 | goto out_destroy; |
33479e05 DC |
623 | |
624 | /* | |
625 | * Preload the radix tree so we can insert safely under the | |
626 | * write spinlock. Note that we cannot sleep inside the preload | |
627 | * region. Since we can be called from transaction context, don't | |
628 | * recurse into the file system. | |
629 | */ | |
630 | if (radix_tree_preload(GFP_NOFS)) { | |
2451337d | 631 | error = -EAGAIN; |
33479e05 DC |
632 | goto out_destroy; |
633 | } | |
634 | ||
635 | /* | |
636 | * Because the inode hasn't been added to the radix-tree yet it can't | |
637 | * be found by another thread, so we can do the non-sleeping lock here. | |
638 | */ | |
639 | if (lock_flags) { | |
640 | if (!xfs_ilock_nowait(ip, lock_flags)) | |
641 | BUG(); | |
642 | } | |
643 | ||
644 | /* | |
645 | * These values must be set before inserting the inode into the radix | |
646 | * tree as the moment it is inserted a concurrent lookup (allowed by the | |
647 | * RCU locking mechanism) can find it and that lookup must see that this | |
648 | * is an inode currently under construction (i.e. that XFS_INEW is set). | |
649 | * The ip->i_flags_lock that protects the XFS_INEW flag forms the | |
650 | * memory barrier that ensures this detection works correctly at lookup | |
651 | * time. | |
652 | */ | |
653 | iflags = XFS_INEW; | |
654 | if (flags & XFS_IGET_DONTCACHE) | |
2c567af4 | 655 | d_mark_dontcache(VFS_I(ip)); |
113a5683 CS |
656 | ip->i_udquot = NULL; |
657 | ip->i_gdquot = NULL; | |
92f8ff73 | 658 | ip->i_pdquot = NULL; |
33479e05 DC |
659 | xfs_iflags_set(ip, iflags); |
660 | ||
661 | /* insert the new inode */ | |
662 | spin_lock(&pag->pag_ici_lock); | |
663 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | |
664 | if (unlikely(error)) { | |
665 | WARN_ON(error != -EEXIST); | |
ff6d6af2 | 666 | XFS_STATS_INC(mp, xs_ig_dup); |
2451337d | 667 | error = -EAGAIN; |
33479e05 DC |
668 | goto out_preload_end; |
669 | } | |
670 | spin_unlock(&pag->pag_ici_lock); | |
671 | radix_tree_preload_end(); | |
672 | ||
673 | *ipp = ip; | |
674 | return 0; | |
675 | ||
676 | out_preload_end: | |
677 | spin_unlock(&pag->pag_ici_lock); | |
678 | radix_tree_preload_end(); | |
679 | if (lock_flags) | |
680 | xfs_iunlock(ip, lock_flags); | |
681 | out_destroy: | |
682 | __destroy_inode(VFS_I(ip)); | |
683 | xfs_inode_free(ip); | |
684 | return error; | |
685 | } | |
686 | ||
687 | /* | |
02511a5a DC |
688 | * Look up an inode by number in the given file system. The inode is looked up |
689 | * in the cache held in each AG. If the inode is found in the cache, initialise | |
690 | * the vfs inode if necessary. | |
33479e05 | 691 | * |
02511a5a DC |
692 | * If it is not in core, read it in from the file system's device, add it to the |
693 | * cache and initialise the vfs inode. | |
33479e05 DC |
694 | * |
695 | * The inode is locked according to the value of the lock_flags parameter. | |
02511a5a DC |
696 | * Inode lookup is only done during metadata operations and not as part of the |
697 | * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup. | |
33479e05 DC |
698 | */ |
699 | int | |
700 | xfs_iget( | |
02511a5a DC |
701 | struct xfs_mount *mp, |
702 | struct xfs_trans *tp, | |
703 | xfs_ino_t ino, | |
704 | uint flags, | |
705 | uint lock_flags, | |
706 | struct xfs_inode **ipp) | |
33479e05 | 707 | { |
02511a5a DC |
708 | struct xfs_inode *ip; |
709 | struct xfs_perag *pag; | |
710 | xfs_agino_t agino; | |
711 | int error; | |
33479e05 | 712 | |
33479e05 DC |
713 | ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); |
714 | ||
715 | /* reject inode numbers outside existing AGs */ | |
716 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | |
2451337d | 717 | return -EINVAL; |
33479e05 | 718 | |
ff6d6af2 | 719 | XFS_STATS_INC(mp, xs_ig_attempts); |
8774cf8b | 720 | |
33479e05 DC |
721 | /* get the perag structure and ensure that it's inode capable */ |
722 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | |
723 | agino = XFS_INO_TO_AGINO(mp, ino); | |
724 | ||
725 | again: | |
726 | error = 0; | |
727 | rcu_read_lock(); | |
728 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); | |
729 | ||
730 | if (ip) { | |
731 | error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | |
732 | if (error) | |
733 | goto out_error_or_again; | |
734 | } else { | |
735 | rcu_read_unlock(); | |
378f681c | 736 | if (flags & XFS_IGET_INCORE) { |
ed438b47 | 737 | error = -ENODATA; |
378f681c DW |
738 | goto out_error_or_again; |
739 | } | |
ff6d6af2 | 740 | XFS_STATS_INC(mp, xs_ig_missed); |
33479e05 DC |
741 | |
742 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | |
743 | flags, lock_flags); | |
744 | if (error) | |
745 | goto out_error_or_again; | |
746 | } | |
747 | xfs_perag_put(pag); | |
748 | ||
749 | *ipp = ip; | |
750 | ||
751 | /* | |
58c90473 | 752 | * If we have a real type for an on-disk inode, we can setup the inode |
33479e05 DC |
753 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
754 | */ | |
c19b3b05 | 755 | if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) |
58c90473 | 756 | xfs_setup_existing_inode(ip); |
33479e05 DC |
757 | return 0; |
758 | ||
759 | out_error_or_again: | |
378f681c | 760 | if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { |
33479e05 DC |
761 | delay(1); |
762 | goto again; | |
763 | } | |
764 | xfs_perag_put(pag); | |
765 | return error; | |
766 | } | |
767 | ||
378f681c DW |
768 | /* |
769 | * "Is this a cached inode that's also allocated?" | |
770 | * | |
771 | * Look up an inode by number in the given file system. If the inode is | |
772 | * in cache and isn't in purgatory, return 1 if the inode is allocated | |
773 | * and 0 if it is not. For all other cases (not in cache, being torn | |
774 | * down, etc.), return a negative error code. | |
775 | * | |
776 | * The caller has to prevent inode allocation and freeing activity, | |
777 | * presumably by locking the AGI buffer. This is to ensure that an | |
778 | * inode cannot transition from allocated to freed until the caller is | |
779 | * ready to allow that. If the inode is in an intermediate state (new, | |
780 | * reclaimable, or being reclaimed), -EAGAIN will be returned; if the | |
781 | * inode is not in the cache, -ENOENT will be returned. The caller must | |
782 | * deal with these scenarios appropriately. | |
783 | * | |
784 | * This is a specialized use case for the online scrubber; if you're | |
785 | * reading this, you probably want xfs_iget. | |
786 | */ | |
787 | int | |
788 | xfs_icache_inode_is_allocated( | |
789 | struct xfs_mount *mp, | |
790 | struct xfs_trans *tp, | |
791 | xfs_ino_t ino, | |
792 | bool *inuse) | |
793 | { | |
794 | struct xfs_inode *ip; | |
795 | int error; | |
796 | ||
797 | error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); | |
798 | if (error) | |
799 | return error; | |
800 | ||
801 | *inuse = !!(VFS_I(ip)->i_mode); | |
44a8736b | 802 | xfs_irele(ip); |
378f681c DW |
803 | return 0; |
804 | } | |
805 | ||
1ad2cfe0 | 806 | #ifdef CONFIG_XFS_QUOTA |
b9baaef4 DW |
807 | /* Decide if we want to grab this inode to drop its dquots. */ |
808 | static bool | |
809 | xfs_dqrele_igrab( | |
810 | struct xfs_inode *ip) | |
811 | { | |
812 | bool ret = false; | |
813 | ||
814 | ASSERT(rcu_read_lock_held()); | |
815 | ||
816 | /* Check for stale RCU freed inode */ | |
817 | spin_lock(&ip->i_flags_lock); | |
818 | if (!ip->i_ino) | |
819 | goto out_unlock; | |
820 | ||
821 | /* | |
822 | * Skip inodes that are anywhere in the reclaim machinery because we | |
823 | * drop dquots before tagging an inode for reclamation. | |
824 | */ | |
825 | if (ip->i_flags & (XFS_IRECLAIM | XFS_IRECLAIMABLE)) | |
826 | goto out_unlock; | |
827 | ||
828 | /* | |
829 | * The inode looks alive; try to grab a VFS reference so that it won't | |
830 | * get destroyed. If we got the reference, return true to say that | |
831 | * we grabbed the inode. | |
832 | * | |
833 | * If we can't get the reference, then we know the inode had its VFS | |
834 | * state torn down and hasn't yet entered the reclaim machinery. Since | |
835 | * we also know that dquots are detached from an inode before it enters | |
836 | * reclaim, we can skip the inode. | |
837 | */ | |
838 | ret = igrab(VFS_I(ip)) != NULL; | |
839 | ||
840 | out_unlock: | |
841 | spin_unlock(&ip->i_flags_lock); | |
842 | return ret; | |
843 | } | |
844 | ||
1ad2cfe0 | 845 | /* Drop this inode's dquots. */ |
594ab00b | 846 | static void |
1ad2cfe0 DW |
847 | xfs_dqrele_inode( |
848 | struct xfs_inode *ip, | |
9d5ee837 | 849 | struct xfs_eofblocks *eofb) |
1ad2cfe0 | 850 | { |
9d2793ce DW |
851 | if (xfs_iflags_test(ip, XFS_INEW)) |
852 | xfs_inew_wait(ip); | |
853 | ||
1ad2cfe0 DW |
854 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
855 | if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_UDQUOT) { | |
856 | xfs_qm_dqrele(ip->i_udquot); | |
857 | ip->i_udquot = NULL; | |
858 | } | |
859 | if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_GDQUOT) { | |
860 | xfs_qm_dqrele(ip->i_gdquot); | |
861 | ip->i_gdquot = NULL; | |
862 | } | |
863 | if (eofb->eof_flags & XFS_ICWALK_FLAG_DROP_PDQUOT) { | |
864 | xfs_qm_dqrele(ip->i_pdquot); | |
865 | ip->i_pdquot = NULL; | |
866 | } | |
867 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
594ab00b | 868 | xfs_irele(ip); |
1ad2cfe0 DW |
869 | } |
870 | ||
871 | /* | |
872 | * Detach all dquots from incore inodes if we can. The caller must already | |
873 | * have dropped the relevant XFS_[UGP]QUOTA_ACTIVE flags so that dquots will | |
874 | * not get reattached. | |
875 | */ | |
876 | int | |
877 | xfs_dqrele_all_inodes( | |
878 | struct xfs_mount *mp, | |
879 | unsigned int qflags) | |
880 | { | |
881 | struct xfs_eofblocks eofb = { .eof_flags = 0 }; | |
882 | ||
883 | if (qflags & XFS_UQUOTA_ACCT) | |
884 | eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_UDQUOT; | |
885 | if (qflags & XFS_GQUOTA_ACCT) | |
886 | eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_GDQUOT; | |
887 | if (qflags & XFS_PQUOTA_ACCT) | |
888 | eofb.eof_flags |= XFS_ICWALK_FLAG_DROP_PDQUOT; | |
889 | ||
f427cf5c | 890 | return xfs_icwalk(mp, XFS_ICWALK_DQRELE, &eofb); |
5662d38c | 891 | } |
b9baaef4 DW |
892 | #else |
893 | # define xfs_dqrele_igrab(ip) (false) | |
594ab00b | 894 | # define xfs_dqrele_inode(ip, priv) ((void)0) |
1ad2cfe0 | 895 | #endif /* CONFIG_XFS_QUOTA */ |
5662d38c | 896 | |
e3a20c0b DC |
897 | /* |
898 | * Grab the inode for reclaim exclusively. | |
50718b8d DC |
899 | * |
900 | * We have found this inode via a lookup under RCU, so the inode may have | |
901 | * already been freed, or it may be in the process of being recycled by | |
902 | * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode | |
903 | * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE | |
904 | * will not be set. Hence we need to check for both these flag conditions to | |
905 | * avoid inodes that are no longer reclaim candidates. | |
906 | * | |
907 | * Note: checking for other state flags here, under the i_flags_lock or not, is | |
908 | * racy and should be avoided. Those races should be resolved only after we have | |
909 | * ensured that we are able to reclaim this inode and the world can see that we | |
910 | * are going to reclaim it. | |
911 | * | |
912 | * Return true if we grabbed it, false otherwise. | |
e3a20c0b | 913 | */ |
50718b8d | 914 | static bool |
f1bc5c56 | 915 | xfs_reclaim_igrab( |
50718b8d | 916 | struct xfs_inode *ip) |
e3a20c0b | 917 | { |
1a3e8f3d DC |
918 | ASSERT(rcu_read_lock_held()); |
919 | ||
e3a20c0b | 920 | spin_lock(&ip->i_flags_lock); |
1a3e8f3d DC |
921 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || |
922 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
923 | /* not a reclaim candidate. */ | |
e3a20c0b | 924 | spin_unlock(&ip->i_flags_lock); |
50718b8d | 925 | return false; |
e3a20c0b DC |
926 | } |
927 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
928 | spin_unlock(&ip->i_flags_lock); | |
50718b8d | 929 | return true; |
e3a20c0b DC |
930 | } |
931 | ||
777df5af | 932 | /* |
02511a5a DC |
933 | * Inode reclaim is non-blocking, so the default action if progress cannot be |
934 | * made is to "requeue" the inode for reclaim by unlocking it and clearing the | |
935 | * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about | |
936 | * blocking anymore and hence we can wait for the inode to be able to reclaim | |
937 | * it. | |
777df5af | 938 | * |
02511a5a DC |
939 | * We do no IO here - if callers require inodes to be cleaned they must push the |
940 | * AIL first to trigger writeback of dirty inodes. This enables writeback to be | |
941 | * done in the background in a non-blocking manner, and enables memory reclaim | |
942 | * to make progress without blocking. | |
777df5af | 943 | */ |
4d0bab3a | 944 | static void |
c8e20be0 | 945 | xfs_reclaim_inode( |
75f3cb13 | 946 | struct xfs_inode *ip, |
50718b8d | 947 | struct xfs_perag *pag) |
fce08f2f | 948 | { |
8a17d7dd | 949 | xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ |
777df5af | 950 | |
9552e14d | 951 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) |
617825fe | 952 | goto out; |
718ecc50 | 953 | if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING)) |
9552e14d | 954 | goto out_iunlock; |
7a3be02b | 955 | |
777df5af DC |
956 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
957 | xfs_iunpin_wait(ip); | |
88fc1879 | 958 | xfs_iflush_abort(ip); |
777df5af DC |
959 | goto reclaim; |
960 | } | |
617825fe | 961 | if (xfs_ipincount(ip)) |
718ecc50 | 962 | goto out_clear_flush; |
617825fe | 963 | if (!xfs_inode_clean(ip)) |
718ecc50 | 964 | goto out_clear_flush; |
8a48088f | 965 | |
718ecc50 | 966 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
777df5af | 967 | reclaim: |
98efe8af | 968 | |
8a17d7dd DC |
969 | /* |
970 | * Because we use RCU freeing we need to ensure the inode always appears | |
971 | * to be reclaimed with an invalid inode number when in the free state. | |
98efe8af | 972 | * We do this as early as possible under the ILOCK so that |
f2e9ad21 OS |
973 | * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to |
974 | * detect races with us here. By doing this, we guarantee that once | |
975 | * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that | |
976 | * it will see either a valid inode that will serialise correctly, or it | |
977 | * will see an invalid inode that it can skip. | |
8a17d7dd DC |
978 | */ |
979 | spin_lock(&ip->i_flags_lock); | |
980 | ip->i_flags = XFS_IRECLAIM; | |
981 | ip->i_ino = 0; | |
982 | spin_unlock(&ip->i_flags_lock); | |
983 | ||
c8e20be0 | 984 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 985 | |
ff6d6af2 | 986 | XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); |
2f11feab DC |
987 | /* |
988 | * Remove the inode from the per-AG radix tree. | |
989 | * | |
990 | * Because radix_tree_delete won't complain even if the item was never | |
991 | * added to the tree assert that it's been there before to catch | |
992 | * problems with the inode life time early on. | |
993 | */ | |
1a427ab0 | 994 | spin_lock(&pag->pag_ici_lock); |
2f11feab | 995 | if (!radix_tree_delete(&pag->pag_ici_root, |
8a17d7dd | 996 | XFS_INO_TO_AGINO(ip->i_mount, ino))) |
2f11feab | 997 | ASSERT(0); |
c076ae7a | 998 | xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG); |
1a427ab0 | 999 | spin_unlock(&pag->pag_ici_lock); |
2f11feab DC |
1000 | |
1001 | /* | |
1002 | * Here we do an (almost) spurious inode lock in order to coordinate | |
1003 | * with inode cache radix tree lookups. This is because the lookup | |
1004 | * can reference the inodes in the cache without taking references. | |
1005 | * | |
1006 | * We make that OK here by ensuring that we wait until the inode is | |
ad637a10 | 1007 | * unlocked after the lookup before we go ahead and free it. |
2f11feab | 1008 | */ |
ad637a10 | 1009 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
3ea06d73 | 1010 | ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot); |
ad637a10 | 1011 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
96355d5a | 1012 | ASSERT(xfs_inode_clean(ip)); |
2f11feab | 1013 | |
8a17d7dd | 1014 | __xfs_inode_free(ip); |
4d0bab3a | 1015 | return; |
8a48088f | 1016 | |
718ecc50 DC |
1017 | out_clear_flush: |
1018 | xfs_iflags_clear(ip, XFS_IFLUSHING); | |
9552e14d | 1019 | out_iunlock: |
8a48088f | 1020 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
9552e14d | 1021 | out: |
617825fe | 1022 | xfs_iflags_clear(ip, XFS_IRECLAIM); |
7a3be02b DC |
1023 | } |
1024 | ||
4d0bab3a | 1025 | void |
7a3be02b | 1026 | xfs_reclaim_inodes( |
4d0bab3a | 1027 | struct xfs_mount *mp) |
7a3be02b | 1028 | { |
4d0bab3a | 1029 | while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { |
617825fe | 1030 | xfs_ail_push_all_sync(mp->m_ail); |
f1bc5c56 | 1031 | xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); |
0f4ec0f1 | 1032 | } |
9bf729c0 DC |
1033 | } |
1034 | ||
1035 | /* | |
02511a5a DC |
1036 | * The shrinker infrastructure determines how many inodes we should scan for |
1037 | * reclaim. We want as many clean inodes ready to reclaim as possible, so we | |
1038 | * push the AIL here. We also want to proactively free up memory if we can to | |
1039 | * minimise the amount of work memory reclaim has to do so we kick the | |
1040 | * background reclaim if it isn't already scheduled. | |
9bf729c0 | 1041 | */ |
0a234c6d | 1042 | long |
8daaa831 DC |
1043 | xfs_reclaim_inodes_nr( |
1044 | struct xfs_mount *mp, | |
1045 | int nr_to_scan) | |
9bf729c0 | 1046 | { |
f1bc5c56 DW |
1047 | struct xfs_eofblocks eofb = { |
1048 | .eof_flags = XFS_ICWALK_FLAG_SCAN_LIMIT, | |
1049 | .icw_scan_limit = nr_to_scan, | |
1050 | }; | |
1051 | ||
8daaa831 | 1052 | /* kick background reclaimer and push the AIL */ |
5889608d | 1053 | xfs_reclaim_work_queue(mp); |
8daaa831 | 1054 | xfs_ail_push_all(mp->m_ail); |
a7b339f1 | 1055 | |
f1bc5c56 | 1056 | xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &eofb); |
617825fe | 1057 | return 0; |
8daaa831 | 1058 | } |
9bf729c0 | 1059 | |
8daaa831 DC |
1060 | /* |
1061 | * Return the number of reclaimable inodes in the filesystem for | |
1062 | * the shrinker to determine how much to reclaim. | |
1063 | */ | |
1064 | int | |
1065 | xfs_reclaim_inodes_count( | |
1066 | struct xfs_mount *mp) | |
1067 | { | |
1068 | struct xfs_perag *pag; | |
1069 | xfs_agnumber_t ag = 0; | |
1070 | int reclaimable = 0; | |
9bf729c0 | 1071 | |
65d0f205 DC |
1072 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1073 | ag = pag->pag_agno + 1; | |
70e60ce7 DC |
1074 | reclaimable += pag->pag_ici_reclaimable; |
1075 | xfs_perag_put(pag); | |
9bf729c0 | 1076 | } |
9bf729c0 DC |
1077 | return reclaimable; |
1078 | } | |
1079 | ||
39b1cfd7 | 1080 | STATIC bool |
3e3f9f58 BF |
1081 | xfs_inode_match_id( |
1082 | struct xfs_inode *ip, | |
1083 | struct xfs_eofblocks *eofb) | |
1084 | { | |
b9fe5052 DE |
1085 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && |
1086 | !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
39b1cfd7 | 1087 | return false; |
3e3f9f58 | 1088 | |
b9fe5052 DE |
1089 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && |
1090 | !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
39b1cfd7 | 1091 | return false; |
1b556048 | 1092 | |
b9fe5052 | 1093 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && |
ceaf603c | 1094 | ip->i_projid != eofb->eof_prid) |
39b1cfd7 | 1095 | return false; |
1b556048 | 1096 | |
39b1cfd7 | 1097 | return true; |
3e3f9f58 BF |
1098 | } |
1099 | ||
f4526397 BF |
1100 | /* |
1101 | * A union-based inode filtering algorithm. Process the inode if any of the | |
1102 | * criteria match. This is for global/internal scans only. | |
1103 | */ | |
39b1cfd7 | 1104 | STATIC bool |
f4526397 BF |
1105 | xfs_inode_match_id_union( |
1106 | struct xfs_inode *ip, | |
1107 | struct xfs_eofblocks *eofb) | |
1108 | { | |
1109 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && | |
1110 | uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
39b1cfd7 | 1111 | return true; |
f4526397 BF |
1112 | |
1113 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && | |
1114 | gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
39b1cfd7 | 1115 | return true; |
f4526397 BF |
1116 | |
1117 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && | |
ceaf603c | 1118 | ip->i_projid == eofb->eof_prid) |
39b1cfd7 | 1119 | return true; |
f4526397 | 1120 | |
39b1cfd7 | 1121 | return false; |
f4526397 BF |
1122 | } |
1123 | ||
a91bf992 DW |
1124 | /* |
1125 | * Is this inode @ip eligible for eof/cow block reclamation, given some | |
1126 | * filtering parameters @eofb? The inode is eligible if @eofb is null or | |
1127 | * if the predicate functions match. | |
1128 | */ | |
1129 | static bool | |
1130 | xfs_inode_matches_eofb( | |
1131 | struct xfs_inode *ip, | |
1132 | struct xfs_eofblocks *eofb) | |
1133 | { | |
39b1cfd7 | 1134 | bool match; |
a91bf992 DW |
1135 | |
1136 | if (!eofb) | |
1137 | return true; | |
1138 | ||
1139 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) | |
1140 | match = xfs_inode_match_id_union(ip, eofb); | |
1141 | else | |
1142 | match = xfs_inode_match_id(ip, eofb); | |
1143 | if (!match) | |
1144 | return false; | |
1145 | ||
1146 | /* skip the inode if the file size is too small */ | |
1147 | if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) && | |
1148 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | |
1149 | return false; | |
1150 | ||
1151 | return true; | |
1152 | } | |
1153 | ||
4d0bab3a DC |
1154 | /* |
1155 | * This is a fast pass over the inode cache to try to get reclaim moving on as | |
1156 | * many inodes as possible in a short period of time. It kicks itself every few | |
1157 | * seconds, as well as being kicked by the inode cache shrinker when memory | |
02511a5a | 1158 | * goes low. |
4d0bab3a DC |
1159 | */ |
1160 | void | |
1161 | xfs_reclaim_worker( | |
1162 | struct work_struct *work) | |
1163 | { | |
1164 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
1165 | struct xfs_mount, m_reclaim_work); | |
4d0bab3a | 1166 | |
f1bc5c56 | 1167 | xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL); |
4d0bab3a DC |
1168 | xfs_reclaim_work_queue(mp); |
1169 | } | |
1170 | ||
41176a68 BF |
1171 | STATIC int |
1172 | xfs_inode_free_eofblocks( | |
1173 | struct xfs_inode *ip, | |
9d5ee837 | 1174 | struct xfs_eofblocks *eofb, |
0fa4a10a | 1175 | unsigned int *lockflags) |
41176a68 | 1176 | { |
390600f8 | 1177 | bool wait; |
390600f8 DW |
1178 | |
1179 | wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); | |
5400da7d | 1180 | |
ce2d3bbe DW |
1181 | if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS)) |
1182 | return 0; | |
1183 | ||
41176a68 BF |
1184 | /* |
1185 | * If the mapping is dirty the operation can block and wait for some | |
1186 | * time. Unless we are waiting, skip it. | |
1187 | */ | |
390600f8 | 1188 | if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) |
41176a68 BF |
1189 | return 0; |
1190 | ||
a91bf992 DW |
1191 | if (!xfs_inode_matches_eofb(ip, eofb)) |
1192 | return 0; | |
3e3f9f58 | 1193 | |
a36b9261 BF |
1194 | /* |
1195 | * If the caller is waiting, return -EAGAIN to keep the background | |
1196 | * scanner moving and revisit the inode in a subsequent pass. | |
1197 | */ | |
c3155097 | 1198 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
390600f8 DW |
1199 | if (wait) |
1200 | return -EAGAIN; | |
1201 | return 0; | |
a36b9261 | 1202 | } |
0fa4a10a | 1203 | *lockflags |= XFS_IOLOCK_EXCL; |
390600f8 | 1204 | |
2b156ff8 DW |
1205 | if (xfs_can_free_eofblocks(ip, false)) |
1206 | return xfs_free_eofblocks(ip); | |
1207 | ||
1208 | /* inode could be preallocated or append-only */ | |
1209 | trace_xfs_inode_free_eofblocks_invalid(ip); | |
1210 | xfs_inode_clear_eofblocks_tag(ip); | |
1211 | return 0; | |
41176a68 BF |
1212 | } |
1213 | ||
83104d44 | 1214 | static void |
ce2d3bbe DW |
1215 | xfs_blockgc_set_iflag( |
1216 | struct xfs_inode *ip, | |
ce2d3bbe | 1217 | unsigned long iflag) |
27b52867 | 1218 | { |
ce2d3bbe DW |
1219 | struct xfs_mount *mp = ip->i_mount; |
1220 | struct xfs_perag *pag; | |
ce2d3bbe DW |
1221 | |
1222 | ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); | |
27b52867 | 1223 | |
85a6e764 CH |
1224 | /* |
1225 | * Don't bother locking the AG and looking up in the radix trees | |
1226 | * if we already know that we have the tag set. | |
1227 | */ | |
ce2d3bbe | 1228 | if (ip->i_flags & iflag) |
85a6e764 CH |
1229 | return; |
1230 | spin_lock(&ip->i_flags_lock); | |
ce2d3bbe | 1231 | ip->i_flags |= iflag; |
85a6e764 CH |
1232 | spin_unlock(&ip->i_flags_lock); |
1233 | ||
27b52867 BF |
1234 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1235 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1236 | |
c076ae7a DW |
1237 | xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
1238 | XFS_ICI_BLOCKGC_TAG); | |
27b52867 BF |
1239 | |
1240 | spin_unlock(&pag->pag_ici_lock); | |
1241 | xfs_perag_put(pag); | |
1242 | } | |
1243 | ||
1244 | void | |
83104d44 | 1245 | xfs_inode_set_eofblocks_tag( |
27b52867 | 1246 | xfs_inode_t *ip) |
83104d44 DW |
1247 | { |
1248 | trace_xfs_inode_set_eofblocks_tag(ip); | |
9669f51d | 1249 | return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS); |
83104d44 DW |
1250 | } |
1251 | ||
1252 | static void | |
ce2d3bbe DW |
1253 | xfs_blockgc_clear_iflag( |
1254 | struct xfs_inode *ip, | |
1255 | unsigned long iflag) | |
27b52867 | 1256 | { |
ce2d3bbe DW |
1257 | struct xfs_mount *mp = ip->i_mount; |
1258 | struct xfs_perag *pag; | |
1259 | bool clear_tag; | |
1260 | ||
1261 | ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); | |
27b52867 | 1262 | |
85a6e764 | 1263 | spin_lock(&ip->i_flags_lock); |
ce2d3bbe DW |
1264 | ip->i_flags &= ~iflag; |
1265 | clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; | |
85a6e764 CH |
1266 | spin_unlock(&ip->i_flags_lock); |
1267 | ||
ce2d3bbe DW |
1268 | if (!clear_tag) |
1269 | return; | |
1270 | ||
27b52867 BF |
1271 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1272 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1273 | |
c076ae7a DW |
1274 | xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
1275 | XFS_ICI_BLOCKGC_TAG); | |
27b52867 BF |
1276 | |
1277 | spin_unlock(&pag->pag_ici_lock); | |
1278 | xfs_perag_put(pag); | |
1279 | } | |
1280 | ||
83104d44 DW |
1281 | void |
1282 | xfs_inode_clear_eofblocks_tag( | |
1283 | xfs_inode_t *ip) | |
1284 | { | |
1285 | trace_xfs_inode_clear_eofblocks_tag(ip); | |
ce2d3bbe | 1286 | return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS); |
83104d44 DW |
1287 | } |
1288 | ||
1289 | /* | |
be78ff0e DW |
1290 | * Set ourselves up to free CoW blocks from this file. If it's already clean |
1291 | * then we can bail out quickly, but otherwise we must back off if the file | |
1292 | * is undergoing some kind of write. | |
83104d44 | 1293 | */ |
be78ff0e DW |
1294 | static bool |
1295 | xfs_prep_free_cowblocks( | |
51d62690 | 1296 | struct xfs_inode *ip) |
83104d44 | 1297 | { |
39937234 BF |
1298 | /* |
1299 | * Just clear the tag if we have an empty cow fork or none at all. It's | |
1300 | * possible the inode was fully unshared since it was originally tagged. | |
1301 | */ | |
51d62690 | 1302 | if (!xfs_inode_has_cow_data(ip)) { |
83104d44 DW |
1303 | trace_xfs_inode_free_cowblocks_invalid(ip); |
1304 | xfs_inode_clear_cowblocks_tag(ip); | |
be78ff0e | 1305 | return false; |
83104d44 DW |
1306 | } |
1307 | ||
1308 | /* | |
1309 | * If the mapping is dirty or under writeback we cannot touch the | |
1310 | * CoW fork. Leave it alone if we're in the midst of a directio. | |
1311 | */ | |
a1b7a4de CH |
1312 | if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || |
1313 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || | |
83104d44 DW |
1314 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || |
1315 | atomic_read(&VFS_I(ip)->i_dio_count)) | |
be78ff0e DW |
1316 | return false; |
1317 | ||
1318 | return true; | |
1319 | } | |
1320 | ||
1321 | /* | |
1322 | * Automatic CoW Reservation Freeing | |
1323 | * | |
1324 | * These functions automatically garbage collect leftover CoW reservations | |
1325 | * that were made on behalf of a cowextsize hint when we start to run out | |
1326 | * of quota or when the reservations sit around for too long. If the file | |
1327 | * has dirty pages or is undergoing writeback, its CoW reservations will | |
1328 | * be retained. | |
1329 | * | |
1330 | * The actual garbage collection piggybacks off the same code that runs | |
1331 | * the speculative EOF preallocation garbage collector. | |
1332 | */ | |
1333 | STATIC int | |
1334 | xfs_inode_free_cowblocks( | |
1335 | struct xfs_inode *ip, | |
9d5ee837 | 1336 | struct xfs_eofblocks *eofb, |
0fa4a10a | 1337 | unsigned int *lockflags) |
be78ff0e | 1338 | { |
f41a0716 | 1339 | bool wait; |
be78ff0e DW |
1340 | int ret = 0; |
1341 | ||
f41a0716 DW |
1342 | wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); |
1343 | ||
ce2d3bbe DW |
1344 | if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS)) |
1345 | return 0; | |
1346 | ||
51d62690 | 1347 | if (!xfs_prep_free_cowblocks(ip)) |
83104d44 DW |
1348 | return 0; |
1349 | ||
a91bf992 DW |
1350 | if (!xfs_inode_matches_eofb(ip, eofb)) |
1351 | return 0; | |
83104d44 | 1352 | |
f41a0716 DW |
1353 | /* |
1354 | * If the caller is waiting, return -EAGAIN to keep the background | |
1355 | * scanner moving and revisit the inode in a subsequent pass. | |
1356 | */ | |
0fa4a10a DW |
1357 | if (!(*lockflags & XFS_IOLOCK_EXCL) && |
1358 | !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { | |
f41a0716 DW |
1359 | if (wait) |
1360 | return -EAGAIN; | |
1361 | return 0; | |
1362 | } | |
0fa4a10a DW |
1363 | *lockflags |= XFS_IOLOCK_EXCL; |
1364 | ||
f41a0716 DW |
1365 | if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) { |
1366 | if (wait) | |
0fa4a10a DW |
1367 | return -EAGAIN; |
1368 | return 0; | |
f41a0716 | 1369 | } |
0fa4a10a | 1370 | *lockflags |= XFS_MMAPLOCK_EXCL; |
83104d44 | 1371 | |
be78ff0e DW |
1372 | /* |
1373 | * Check again, nobody else should be able to dirty blocks or change | |
1374 | * the reflink iflag now that we have the first two locks held. | |
1375 | */ | |
51d62690 | 1376 | if (xfs_prep_free_cowblocks(ip)) |
be78ff0e | 1377 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); |
83104d44 DW |
1378 | return ret; |
1379 | } | |
1380 | ||
83104d44 DW |
1381 | void |
1382 | xfs_inode_set_cowblocks_tag( | |
1383 | xfs_inode_t *ip) | |
1384 | { | |
7b7381f0 | 1385 | trace_xfs_inode_set_cowblocks_tag(ip); |
9669f51d | 1386 | return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS); |
83104d44 DW |
1387 | } |
1388 | ||
1389 | void | |
1390 | xfs_inode_clear_cowblocks_tag( | |
1391 | xfs_inode_t *ip) | |
1392 | { | |
7b7381f0 | 1393 | trace_xfs_inode_clear_cowblocks_tag(ip); |
ce2d3bbe | 1394 | return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS); |
83104d44 | 1395 | } |
d6b636eb | 1396 | |
894ecacf DW |
1397 | #define for_each_perag_tag(mp, next_agno, pag, tag) \ |
1398 | for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \ | |
1399 | (pag) != NULL; \ | |
1400 | (next_agno) = (pag)->pag_agno + 1, \ | |
1401 | xfs_perag_put(pag), \ | |
1402 | (pag) = xfs_perag_get_tag((mp), (next_agno), (tag))) | |
1403 | ||
1404 | ||
d6b636eb DW |
1405 | /* Disable post-EOF and CoW block auto-reclamation. */ |
1406 | void | |
c9a6526f | 1407 | xfs_blockgc_stop( |
d6b636eb DW |
1408 | struct xfs_mount *mp) |
1409 | { | |
894ecacf DW |
1410 | struct xfs_perag *pag; |
1411 | xfs_agnumber_t agno; | |
1412 | ||
1413 | for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) | |
1414 | cancel_delayed_work_sync(&pag->pag_blockgc_work); | |
d6b636eb DW |
1415 | } |
1416 | ||
1417 | /* Enable post-EOF and CoW block auto-reclamation. */ | |
1418 | void | |
c9a6526f | 1419 | xfs_blockgc_start( |
d6b636eb DW |
1420 | struct xfs_mount *mp) |
1421 | { | |
894ecacf DW |
1422 | struct xfs_perag *pag; |
1423 | xfs_agnumber_t agno; | |
1424 | ||
1425 | for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) | |
1426 | xfs_blockgc_queue(pag); | |
d6b636eb | 1427 | } |
3d4feec0 | 1428 | |
d20d5edc DW |
1429 | /* Don't try to run block gc on an inode that's in any of these states. */ |
1430 | #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \ | |
1431 | XFS_IRECLAIMABLE | \ | |
1432 | XFS_IRECLAIM) | |
df600197 | 1433 | /* |
b9baaef4 DW |
1434 | * Decide if the given @ip is eligible for garbage collection of speculative |
1435 | * preallocations, and grab it if so. Returns true if it's ready to go or | |
1436 | * false if we should just ignore it. | |
df600197 DW |
1437 | */ |
1438 | static bool | |
b9baaef4 | 1439 | xfs_blockgc_igrab( |
7fdff526 | 1440 | struct xfs_inode *ip) |
df600197 DW |
1441 | { |
1442 | struct inode *inode = VFS_I(ip); | |
df600197 DW |
1443 | |
1444 | ASSERT(rcu_read_lock_held()); | |
1445 | ||
1446 | /* Check for stale RCU freed inode */ | |
1447 | spin_lock(&ip->i_flags_lock); | |
1448 | if (!ip->i_ino) | |
1449 | goto out_unlock_noent; | |
1450 | ||
d20d5edc | 1451 | if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS) |
df600197 DW |
1452 | goto out_unlock_noent; |
1453 | spin_unlock(&ip->i_flags_lock); | |
1454 | ||
1455 | /* nothing to sync during shutdown */ | |
1456 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
1457 | return false; | |
1458 | ||
1459 | /* If we can't grab the inode, it must on it's way to reclaim. */ | |
1460 | if (!igrab(inode)) | |
1461 | return false; | |
1462 | ||
1463 | /* inode is valid */ | |
1464 | return true; | |
1465 | ||
1466 | out_unlock_noent: | |
1467 | spin_unlock(&ip->i_flags_lock); | |
1468 | return false; | |
1469 | } | |
1470 | ||
41956753 DW |
1471 | /* Scan one incore inode for block preallocations that we can remove. */ |
1472 | static int | |
1473 | xfs_blockgc_scan_inode( | |
1474 | struct xfs_inode *ip, | |
9d5ee837 | 1475 | struct xfs_eofblocks *eofb) |
85c5b270 | 1476 | { |
0fa4a10a | 1477 | unsigned int lockflags = 0; |
85c5b270 DW |
1478 | int error; |
1479 | ||
9d5ee837 | 1480 | error = xfs_inode_free_eofblocks(ip, eofb, &lockflags); |
85c5b270 | 1481 | if (error) |
0fa4a10a | 1482 | goto unlock; |
85c5b270 | 1483 | |
9d5ee837 | 1484 | error = xfs_inode_free_cowblocks(ip, eofb, &lockflags); |
0fa4a10a DW |
1485 | unlock: |
1486 | if (lockflags) | |
1487 | xfs_iunlock(ip, lockflags); | |
594ab00b | 1488 | xfs_irele(ip); |
0fa4a10a | 1489 | return error; |
85c5b270 DW |
1490 | } |
1491 | ||
9669f51d DW |
1492 | /* Background worker that trims preallocated space. */ |
1493 | void | |
1494 | xfs_blockgc_worker( | |
1495 | struct work_struct *work) | |
1496 | { | |
894ecacf DW |
1497 | struct xfs_perag *pag = container_of(to_delayed_work(work), |
1498 | struct xfs_perag, pag_blockgc_work); | |
1499 | struct xfs_mount *mp = pag->pag_mount; | |
9669f51d DW |
1500 | int error; |
1501 | ||
1502 | if (!sb_start_write_trylock(mp->m_super)) | |
1503 | return; | |
f427cf5c | 1504 | error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL); |
9669f51d | 1505 | if (error) |
894ecacf DW |
1506 | xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", |
1507 | pag->pag_agno, error); | |
9669f51d | 1508 | sb_end_write(mp->m_super); |
894ecacf | 1509 | xfs_blockgc_queue(pag); |
9669f51d DW |
1510 | } |
1511 | ||
85c5b270 DW |
1512 | /* |
1513 | * Try to free space in the filesystem by purging eofblocks and cowblocks. | |
1514 | */ | |
1515 | int | |
1516 | xfs_blockgc_free_space( | |
1517 | struct xfs_mount *mp, | |
1518 | struct xfs_eofblocks *eofb) | |
1519 | { | |
1520 | trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_); | |
1521 | ||
f427cf5c | 1522 | return xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, eofb); |
85c5b270 DW |
1523 | } |
1524 | ||
3d4feec0 | 1525 | /* |
c237dd7c DW |
1526 | * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which |
1527 | * quota caused an allocation failure, so we make a best effort by including | |
1528 | * each quota under low free space conditions (less than 1% free space) in the | |
1529 | * scan. | |
111068f8 DW |
1530 | * |
1531 | * Callers must not hold any inode's ILOCK. If requesting a synchronous scan | |
1532 | * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or | |
1533 | * MMAPLOCK. | |
3d4feec0 | 1534 | */ |
111068f8 | 1535 | int |
c237dd7c DW |
1536 | xfs_blockgc_free_dquots( |
1537 | struct xfs_mount *mp, | |
1538 | struct xfs_dquot *udqp, | |
1539 | struct xfs_dquot *gdqp, | |
1540 | struct xfs_dquot *pdqp, | |
111068f8 | 1541 | unsigned int eof_flags) |
3d4feec0 DW |
1542 | { |
1543 | struct xfs_eofblocks eofb = {0}; | |
3d4feec0 DW |
1544 | bool do_work = false; |
1545 | ||
c237dd7c DW |
1546 | if (!udqp && !gdqp && !pdqp) |
1547 | return 0; | |
1548 | ||
3d4feec0 | 1549 | /* |
111068f8 DW |
1550 | * Run a scan to free blocks using the union filter to cover all |
1551 | * applicable quotas in a single scan. | |
3d4feec0 | 1552 | */ |
111068f8 | 1553 | eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags; |
3d4feec0 | 1554 | |
c237dd7c DW |
1555 | if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { |
1556 | eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); | |
1557 | eofb.eof_flags |= XFS_EOF_FLAGS_UID; | |
1558 | do_work = true; | |
3d4feec0 DW |
1559 | } |
1560 | ||
c237dd7c DW |
1561 | if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { |
1562 | eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); | |
1563 | eofb.eof_flags |= XFS_EOF_FLAGS_GID; | |
1564 | do_work = true; | |
3d4feec0 DW |
1565 | } |
1566 | ||
c237dd7c DW |
1567 | if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { |
1568 | eofb.eof_prid = pdqp->q_id; | |
1569 | eofb.eof_flags |= XFS_EOF_FLAGS_PRID; | |
1570 | do_work = true; | |
3d4feec0 DW |
1571 | } |
1572 | ||
1573 | if (!do_work) | |
111068f8 | 1574 | return 0; |
3d4feec0 | 1575 | |
85c5b270 | 1576 | return xfs_blockgc_free_space(mp, &eofb); |
c237dd7c DW |
1577 | } |
1578 | ||
1579 | /* Run cow/eofblocks scans on the quotas attached to the inode. */ | |
1580 | int | |
1581 | xfs_blockgc_free_quota( | |
1582 | struct xfs_inode *ip, | |
1583 | unsigned int eof_flags) | |
1584 | { | |
1585 | return xfs_blockgc_free_dquots(ip->i_mount, | |
1586 | xfs_inode_dquot(ip, XFS_DQTYPE_USER), | |
1587 | xfs_inode_dquot(ip, XFS_DQTYPE_GROUP), | |
1588 | xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags); | |
3d4feec0 | 1589 | } |
df600197 DW |
1590 | |
1591 | /* XFS Inode Cache Walking Code */ | |
1592 | ||
f1bc5c56 DW |
1593 | /* |
1594 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
1595 | * radix tree lookups to a minimum. The batch size is a trade off between | |
1596 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
1597 | * be too greedy. | |
1598 | */ | |
1599 | #define XFS_LOOKUP_BATCH 32 | |
1600 | ||
1601 | ||
b9baaef4 DW |
1602 | /* |
1603 | * Decide if we want to grab this inode in anticipation of doing work towards | |
594ab00b | 1604 | * the goal. |
b9baaef4 DW |
1605 | */ |
1606 | static inline bool | |
1607 | xfs_icwalk_igrab( | |
1608 | enum xfs_icwalk_goal goal, | |
7fdff526 | 1609 | struct xfs_inode *ip) |
b9baaef4 DW |
1610 | { |
1611 | switch (goal) { | |
1612 | case XFS_ICWALK_DQRELE: | |
1613 | return xfs_dqrele_igrab(ip); | |
1614 | case XFS_ICWALK_BLOCKGC: | |
7fdff526 | 1615 | return xfs_blockgc_igrab(ip); |
f1bc5c56 DW |
1616 | case XFS_ICWALK_RECLAIM: |
1617 | return xfs_reclaim_igrab(ip); | |
b9baaef4 DW |
1618 | default: |
1619 | return false; | |
1620 | } | |
1621 | } | |
1622 | ||
594ab00b DW |
1623 | /* |
1624 | * Process an inode. Each processing function must handle any state changes | |
1625 | * made by the icwalk igrab function. Return -EAGAIN to skip an inode. | |
1626 | */ | |
f427cf5c DW |
1627 | static inline int |
1628 | xfs_icwalk_process_inode( | |
1629 | enum xfs_icwalk_goal goal, | |
1630 | struct xfs_inode *ip, | |
f1bc5c56 | 1631 | struct xfs_perag *pag, |
9d5ee837 | 1632 | struct xfs_eofblocks *eofb) |
f427cf5c | 1633 | { |
594ab00b | 1634 | int error = 0; |
f427cf5c DW |
1635 | |
1636 | switch (goal) { | |
1637 | case XFS_ICWALK_DQRELE: | |
9d5ee837 | 1638 | xfs_dqrele_inode(ip, eofb); |
f427cf5c DW |
1639 | break; |
1640 | case XFS_ICWALK_BLOCKGC: | |
9d5ee837 | 1641 | error = xfs_blockgc_scan_inode(ip, eofb); |
f427cf5c | 1642 | break; |
f1bc5c56 DW |
1643 | case XFS_ICWALK_RECLAIM: |
1644 | xfs_reclaim_inode(ip, pag); | |
1645 | break; | |
f427cf5c | 1646 | } |
f427cf5c DW |
1647 | return error; |
1648 | } | |
1649 | ||
df600197 | 1650 | /* |
f427cf5c DW |
1651 | * For a given per-AG structure @pag and a goal, grab qualifying inodes and |
1652 | * process them in some manner. | |
df600197 DW |
1653 | */ |
1654 | static int | |
c1115c0c | 1655 | xfs_icwalk_ag( |
df600197 | 1656 | struct xfs_perag *pag, |
f427cf5c | 1657 | enum xfs_icwalk_goal goal, |
9d5ee837 | 1658 | struct xfs_eofblocks *eofb) |
df600197 DW |
1659 | { |
1660 | struct xfs_mount *mp = pag->pag_mount; | |
1661 | uint32_t first_index; | |
1662 | int last_error = 0; | |
1663 | int skipped; | |
1664 | bool done; | |
1665 | int nr_found; | |
1666 | ||
1667 | restart: | |
1668 | done = false; | |
1669 | skipped = 0; | |
f1bc5c56 DW |
1670 | if (goal == XFS_ICWALK_RECLAIM) |
1671 | first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); | |
1672 | else | |
1673 | first_index = 0; | |
df600197 DW |
1674 | nr_found = 0; |
1675 | do { | |
1676 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; | |
c809d7e9 | 1677 | unsigned int tag = xfs_icwalk_tag(goal); |
df600197 DW |
1678 | int error = 0; |
1679 | int i; | |
1680 | ||
1681 | rcu_read_lock(); | |
1682 | ||
c809d7e9 | 1683 | if (tag == XFS_ICWALK_NULL_TAG) |
df600197 DW |
1684 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, |
1685 | (void **)batch, first_index, | |
1686 | XFS_LOOKUP_BATCH); | |
1687 | else | |
1688 | nr_found = radix_tree_gang_lookup_tag( | |
1689 | &pag->pag_ici_root, | |
1690 | (void **) batch, first_index, | |
1691 | XFS_LOOKUP_BATCH, tag); | |
1692 | ||
1693 | if (!nr_found) { | |
f1bc5c56 | 1694 | done = true; |
df600197 DW |
1695 | rcu_read_unlock(); |
1696 | break; | |
1697 | } | |
1698 | ||
1699 | /* | |
1700 | * Grab the inodes before we drop the lock. if we found | |
1701 | * nothing, nr == 0 and the loop will be skipped. | |
1702 | */ | |
1703 | for (i = 0; i < nr_found; i++) { | |
1704 | struct xfs_inode *ip = batch[i]; | |
1705 | ||
7fdff526 | 1706 | if (done || !xfs_icwalk_igrab(goal, ip)) |
df600197 DW |
1707 | batch[i] = NULL; |
1708 | ||
1709 | /* | |
1710 | * Update the index for the next lookup. Catch | |
1711 | * overflows into the next AG range which can occur if | |
1712 | * we have inodes in the last block of the AG and we | |
1713 | * are currently pointing to the last inode. | |
1714 | * | |
1715 | * Because we may see inodes that are from the wrong AG | |
1716 | * due to RCU freeing and reallocation, only update the | |
1717 | * index if it lies in this AG. It was a race that lead | |
1718 | * us to see this inode, so another lookup from the | |
1719 | * same index will not find it again. | |
1720 | */ | |
1721 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) | |
1722 | continue; | |
1723 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); | |
1724 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
1725 | done = true; | |
1726 | } | |
1727 | ||
1728 | /* unlock now we've grabbed the inodes. */ | |
1729 | rcu_read_unlock(); | |
1730 | ||
1731 | for (i = 0; i < nr_found; i++) { | |
1732 | if (!batch[i]) | |
1733 | continue; | |
f1bc5c56 DW |
1734 | error = xfs_icwalk_process_inode(goal, batch[i], pag, |
1735 | eofb); | |
df600197 DW |
1736 | if (error == -EAGAIN) { |
1737 | skipped++; | |
1738 | continue; | |
1739 | } | |
1740 | if (error && last_error != -EFSCORRUPTED) | |
1741 | last_error = error; | |
1742 | } | |
1743 | ||
1744 | /* bail out if the filesystem is corrupted. */ | |
1745 | if (error == -EFSCORRUPTED) | |
1746 | break; | |
1747 | ||
1748 | cond_resched(); | |
1749 | ||
f1bc5c56 DW |
1750 | if (eofb && (eofb->eof_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) { |
1751 | eofb->icw_scan_limit -= XFS_LOOKUP_BATCH; | |
1752 | if (eofb->icw_scan_limit <= 0) | |
1753 | break; | |
1754 | } | |
df600197 DW |
1755 | } while (nr_found && !done); |
1756 | ||
f1bc5c56 DW |
1757 | if (goal == XFS_ICWALK_RECLAIM) { |
1758 | if (done) | |
1759 | first_index = 0; | |
1760 | WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); | |
1761 | } | |
1762 | ||
df600197 DW |
1763 | if (skipped) { |
1764 | delay(1); | |
1765 | goto restart; | |
1766 | } | |
1767 | return last_error; | |
1768 | } | |
1769 | ||
1770 | /* Fetch the next (possibly tagged) per-AG structure. */ | |
1771 | static inline struct xfs_perag * | |
c1115c0c | 1772 | xfs_icwalk_get_perag( |
df600197 DW |
1773 | struct xfs_mount *mp, |
1774 | xfs_agnumber_t agno, | |
c809d7e9 | 1775 | enum xfs_icwalk_goal goal) |
df600197 | 1776 | { |
c809d7e9 DW |
1777 | unsigned int tag = xfs_icwalk_tag(goal); |
1778 | ||
1779 | if (tag == XFS_ICWALK_NULL_TAG) | |
df600197 DW |
1780 | return xfs_perag_get(mp, agno); |
1781 | return xfs_perag_get_tag(mp, agno, tag); | |
1782 | } | |
1783 | ||
f427cf5c | 1784 | /* Walk all incore inodes to achieve a given goal. */ |
df600197 | 1785 | static int |
c1115c0c | 1786 | xfs_icwalk( |
df600197 | 1787 | struct xfs_mount *mp, |
f427cf5c | 1788 | enum xfs_icwalk_goal goal, |
9d5ee837 | 1789 | struct xfs_eofblocks *eofb) |
df600197 DW |
1790 | { |
1791 | struct xfs_perag *pag; | |
1792 | int error = 0; | |
1793 | int last_error = 0; | |
1794 | xfs_agnumber_t agno = 0; | |
1795 | ||
c809d7e9 | 1796 | while ((pag = xfs_icwalk_get_perag(mp, agno, goal))) { |
df600197 | 1797 | agno = pag->pag_agno + 1; |
9d5ee837 | 1798 | error = xfs_icwalk_ag(pag, goal, eofb); |
df600197 DW |
1799 | xfs_perag_put(pag); |
1800 | if (error) { | |
1801 | last_error = error; | |
1802 | if (error == -EFSCORRUPTED) | |
1803 | break; | |
1804 | } | |
1805 | } | |
1806 | return last_error; | |
1807 | BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_EOF_FLAGS_VALID); | |
1808 | } |