Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fe4fa4b8 DC |
2 | /* |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
fe4fa4b8 DC |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
5467b34b | 8 | #include "xfs_shared.h" |
6ca1c906 | 9 | #include "xfs_format.h" |
239880ef DC |
10 | #include "xfs_log_format.h" |
11 | #include "xfs_trans_resv.h" | |
fe4fa4b8 | 12 | #include "xfs_sb.h" |
fe4fa4b8 | 13 | #include "xfs_mount.h" |
fe4fa4b8 | 14 | #include "xfs_inode.h" |
239880ef DC |
15 | #include "xfs_trans.h" |
16 | #include "xfs_trans_priv.h" | |
fe4fa4b8 | 17 | #include "xfs_inode_item.h" |
7d095257 | 18 | #include "xfs_quota.h" |
0b1b213f | 19 | #include "xfs_trace.h" |
6d8b79cf | 20 | #include "xfs_icache.h" |
c24b5dfa | 21 | #include "xfs_bmap_util.h" |
dc06f398 BF |
22 | #include "xfs_dquot_item.h" |
23 | #include "xfs_dquot.h" | |
83104d44 | 24 | #include "xfs_reflink.h" |
fe4fa4b8 | 25 | |
f0e28280 | 26 | #include <linux/iversion.h> |
a167b17e | 27 | |
33479e05 DC |
28 | /* |
29 | * Allocate and initialise an xfs_inode. | |
30 | */ | |
638f4416 | 31 | struct xfs_inode * |
33479e05 DC |
32 | xfs_inode_alloc( |
33 | struct xfs_mount *mp, | |
34 | xfs_ino_t ino) | |
35 | { | |
36 | struct xfs_inode *ip; | |
37 | ||
38 | /* | |
39 | * if this didn't occur in transactions, we could use | |
40 | * KM_MAYFAIL and return NULL here on ENOMEM. Set the | |
41 | * code up to do this anyway. | |
42 | */ | |
707e0dda | 43 | ip = kmem_zone_alloc(xfs_inode_zone, 0); |
33479e05 DC |
44 | if (!ip) |
45 | return NULL; | |
46 | if (inode_init_always(mp->m_super, VFS_I(ip))) { | |
377bcd5f | 47 | kmem_cache_free(xfs_inode_zone, ip); |
33479e05 DC |
48 | return NULL; |
49 | } | |
50 | ||
c19b3b05 DC |
51 | /* VFS doesn't initialise i_mode! */ |
52 | VFS_I(ip)->i_mode = 0; | |
53 | ||
ff6d6af2 | 54 | XFS_STATS_INC(mp, vn_active); |
33479e05 | 55 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
33479e05 DC |
56 | ASSERT(!xfs_isiflocked(ip)); |
57 | ASSERT(ip->i_ino == 0); | |
58 | ||
33479e05 DC |
59 | /* initialise the xfs inode */ |
60 | ip->i_ino = ino; | |
61 | ip->i_mount = mp; | |
62 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | |
63 | ip->i_afp = NULL; | |
3993baeb DW |
64 | ip->i_cowfp = NULL; |
65 | ip->i_cnextents = 0; | |
66 | ip->i_cformat = XFS_DINODE_FMT_EXTENTS; | |
3ba738df | 67 | memset(&ip->i_df, 0, sizeof(ip->i_df)); |
33479e05 DC |
68 | ip->i_flags = 0; |
69 | ip->i_delayed_blks = 0; | |
f8d55aa0 | 70 | memset(&ip->i_d, 0, sizeof(ip->i_d)); |
6772c1f1 DW |
71 | ip->i_sick = 0; |
72 | ip->i_checked = 0; | |
cb357bf3 DW |
73 | INIT_WORK(&ip->i_ioend_work, xfs_end_io); |
74 | INIT_LIST_HEAD(&ip->i_ioend_list); | |
75 | spin_lock_init(&ip->i_ioend_lock); | |
33479e05 DC |
76 | |
77 | return ip; | |
78 | } | |
79 | ||
80 | STATIC void | |
81 | xfs_inode_free_callback( | |
82 | struct rcu_head *head) | |
83 | { | |
84 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
85 | struct xfs_inode *ip = XFS_I(inode); | |
86 | ||
c19b3b05 | 87 | switch (VFS_I(ip)->i_mode & S_IFMT) { |
33479e05 DC |
88 | case S_IFREG: |
89 | case S_IFDIR: | |
90 | case S_IFLNK: | |
91 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | |
92 | break; | |
93 | } | |
94 | ||
95 | if (ip->i_afp) | |
96 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | |
3993baeb DW |
97 | if (ip->i_cowfp) |
98 | xfs_idestroy_fork(ip, XFS_COW_FORK); | |
33479e05 DC |
99 | |
100 | if (ip->i_itemp) { | |
22525c17 DC |
101 | ASSERT(!test_bit(XFS_LI_IN_AIL, |
102 | &ip->i_itemp->ili_item.li_flags)); | |
33479e05 DC |
103 | xfs_inode_item_destroy(ip); |
104 | ip->i_itemp = NULL; | |
105 | } | |
106 | ||
377bcd5f | 107 | kmem_cache_free(xfs_inode_zone, ip); |
1f2dcfe8 DC |
108 | } |
109 | ||
8a17d7dd DC |
110 | static void |
111 | __xfs_inode_free( | |
112 | struct xfs_inode *ip) | |
113 | { | |
114 | /* asserts to verify all state is correct here */ | |
115 | ASSERT(atomic_read(&ip->i_pincount) == 0); | |
8a17d7dd DC |
116 | XFS_STATS_DEC(ip->i_mount, vn_active); |
117 | ||
118 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | |
119 | } | |
120 | ||
1f2dcfe8 DC |
121 | void |
122 | xfs_inode_free( | |
123 | struct xfs_inode *ip) | |
124 | { | |
98efe8af BF |
125 | ASSERT(!xfs_isiflocked(ip)); |
126 | ||
33479e05 DC |
127 | /* |
128 | * Because we use RCU freeing we need to ensure the inode always | |
129 | * appears to be reclaimed with an invalid inode number when in the | |
130 | * free state. The ip->i_flags_lock provides the barrier against lookup | |
131 | * races. | |
132 | */ | |
133 | spin_lock(&ip->i_flags_lock); | |
134 | ip->i_flags = XFS_IRECLAIM; | |
135 | ip->i_ino = 0; | |
136 | spin_unlock(&ip->i_flags_lock); | |
137 | ||
8a17d7dd | 138 | __xfs_inode_free(ip); |
33479e05 DC |
139 | } |
140 | ||
ad438c40 DC |
141 | /* |
142 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | |
143 | * isn't a reclaim pass already in progress. By default it runs every 5s based | |
144 | * on the xfs periodic sync default of 30s. Perhaps this should have it's own | |
145 | * tunable, but that can be done if this method proves to be ineffective or too | |
146 | * aggressive. | |
147 | */ | |
148 | static void | |
149 | xfs_reclaim_work_queue( | |
150 | struct xfs_mount *mp) | |
151 | { | |
152 | ||
153 | rcu_read_lock(); | |
154 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | |
155 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | |
156 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | |
157 | } | |
158 | rcu_read_unlock(); | |
159 | } | |
160 | ||
161 | /* | |
162 | * This is a fast pass over the inode cache to try to get reclaim moving on as | |
163 | * many inodes as possible in a short period of time. It kicks itself every few | |
164 | * seconds, as well as being kicked by the inode cache shrinker when memory | |
165 | * goes low. It scans as quickly as possible avoiding locked inodes or those | |
166 | * already being flushed, and once done schedules a future pass. | |
167 | */ | |
168 | void | |
169 | xfs_reclaim_worker( | |
170 | struct work_struct *work) | |
171 | { | |
172 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
173 | struct xfs_mount, m_reclaim_work); | |
174 | ||
175 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | |
176 | xfs_reclaim_work_queue(mp); | |
177 | } | |
178 | ||
179 | static void | |
180 | xfs_perag_set_reclaim_tag( | |
181 | struct xfs_perag *pag) | |
182 | { | |
183 | struct xfs_mount *mp = pag->pag_mount; | |
184 | ||
95989c46 | 185 | lockdep_assert_held(&pag->pag_ici_lock); |
ad438c40 DC |
186 | if (pag->pag_ici_reclaimable++) |
187 | return; | |
188 | ||
189 | /* propagate the reclaim tag up into the perag radix tree */ | |
190 | spin_lock(&mp->m_perag_lock); | |
191 | radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, | |
192 | XFS_ICI_RECLAIM_TAG); | |
193 | spin_unlock(&mp->m_perag_lock); | |
194 | ||
195 | /* schedule periodic background inode reclaim */ | |
196 | xfs_reclaim_work_queue(mp); | |
197 | ||
198 | trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | |
199 | } | |
200 | ||
201 | static void | |
202 | xfs_perag_clear_reclaim_tag( | |
203 | struct xfs_perag *pag) | |
204 | { | |
205 | struct xfs_mount *mp = pag->pag_mount; | |
206 | ||
95989c46 | 207 | lockdep_assert_held(&pag->pag_ici_lock); |
ad438c40 DC |
208 | if (--pag->pag_ici_reclaimable) |
209 | return; | |
210 | ||
211 | /* clear the reclaim tag from the perag radix tree */ | |
212 | spin_lock(&mp->m_perag_lock); | |
213 | radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, | |
214 | XFS_ICI_RECLAIM_TAG); | |
215 | spin_unlock(&mp->m_perag_lock); | |
216 | trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | |
217 | } | |
218 | ||
219 | ||
220 | /* | |
221 | * We set the inode flag atomically with the radix tree tag. | |
222 | * Once we get tag lookups on the radix tree, this inode flag | |
223 | * can go away. | |
224 | */ | |
225 | void | |
226 | xfs_inode_set_reclaim_tag( | |
227 | struct xfs_inode *ip) | |
228 | { | |
229 | struct xfs_mount *mp = ip->i_mount; | |
230 | struct xfs_perag *pag; | |
231 | ||
232 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | |
233 | spin_lock(&pag->pag_ici_lock); | |
234 | spin_lock(&ip->i_flags_lock); | |
235 | ||
236 | radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), | |
237 | XFS_ICI_RECLAIM_TAG); | |
238 | xfs_perag_set_reclaim_tag(pag); | |
239 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | |
240 | ||
241 | spin_unlock(&ip->i_flags_lock); | |
242 | spin_unlock(&pag->pag_ici_lock); | |
243 | xfs_perag_put(pag); | |
244 | } | |
245 | ||
246 | STATIC void | |
247 | xfs_inode_clear_reclaim_tag( | |
248 | struct xfs_perag *pag, | |
249 | xfs_ino_t ino) | |
250 | { | |
251 | radix_tree_tag_clear(&pag->pag_ici_root, | |
252 | XFS_INO_TO_AGINO(pag->pag_mount, ino), | |
253 | XFS_ICI_RECLAIM_TAG); | |
254 | xfs_perag_clear_reclaim_tag(pag); | |
255 | } | |
256 | ||
ae2c4ac2 BF |
257 | static void |
258 | xfs_inew_wait( | |
259 | struct xfs_inode *ip) | |
260 | { | |
261 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); | |
262 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); | |
263 | ||
264 | do { | |
21417136 | 265 | prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); |
ae2c4ac2 BF |
266 | if (!xfs_iflags_test(ip, XFS_INEW)) |
267 | break; | |
268 | schedule(); | |
269 | } while (true); | |
21417136 | 270 | finish_wait(wq, &wait.wq_entry); |
ae2c4ac2 BF |
271 | } |
272 | ||
50997470 DC |
273 | /* |
274 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode | |
275 | * part of the structure. This is made more complex by the fact we store | |
276 | * information about the on-disk values in the VFS inode and so we can't just | |
83e06f21 | 277 | * overwrite the values unconditionally. Hence we save the parameters we |
50997470 | 278 | * need to retain across reinitialisation, and rewrite them into the VFS inode |
83e06f21 | 279 | * after reinitialisation even if it fails. |
50997470 DC |
280 | */ |
281 | static int | |
282 | xfs_reinit_inode( | |
283 | struct xfs_mount *mp, | |
284 | struct inode *inode) | |
285 | { | |
286 | int error; | |
54d7b5c1 | 287 | uint32_t nlink = inode->i_nlink; |
9e9a2674 | 288 | uint32_t generation = inode->i_generation; |
f0e28280 | 289 | uint64_t version = inode_peek_iversion(inode); |
c19b3b05 | 290 | umode_t mode = inode->i_mode; |
acd1d715 | 291 | dev_t dev = inode->i_rdev; |
3d8f2821 CH |
292 | kuid_t uid = inode->i_uid; |
293 | kgid_t gid = inode->i_gid; | |
50997470 DC |
294 | |
295 | error = inode_init_always(mp->m_super, inode); | |
296 | ||
54d7b5c1 | 297 | set_nlink(inode, nlink); |
9e9a2674 | 298 | inode->i_generation = generation; |
f0e28280 | 299 | inode_set_iversion_queried(inode, version); |
c19b3b05 | 300 | inode->i_mode = mode; |
acd1d715 | 301 | inode->i_rdev = dev; |
3d8f2821 CH |
302 | inode->i_uid = uid; |
303 | inode->i_gid = gid; | |
50997470 DC |
304 | return error; |
305 | } | |
306 | ||
afca6c5b DC |
307 | /* |
308 | * If we are allocating a new inode, then check what was returned is | |
309 | * actually a free, empty inode. If we are not allocating an inode, | |
310 | * then check we didn't find a free inode. | |
311 | * | |
312 | * Returns: | |
313 | * 0 if the inode free state matches the lookup context | |
314 | * -ENOENT if the inode is free and we are not allocating | |
315 | * -EFSCORRUPTED if there is any state mismatch at all | |
316 | */ | |
317 | static int | |
318 | xfs_iget_check_free_state( | |
319 | struct xfs_inode *ip, | |
320 | int flags) | |
321 | { | |
322 | if (flags & XFS_IGET_CREATE) { | |
323 | /* should be a free inode */ | |
324 | if (VFS_I(ip)->i_mode != 0) { | |
325 | xfs_warn(ip->i_mount, | |
326 | "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", | |
327 | ip->i_ino, VFS_I(ip)->i_mode); | |
328 | return -EFSCORRUPTED; | |
329 | } | |
330 | ||
331 | if (ip->i_d.di_nblocks != 0) { | |
332 | xfs_warn(ip->i_mount, | |
333 | "Corruption detected! Free inode 0x%llx has blocks allocated!", | |
334 | ip->i_ino); | |
335 | return -EFSCORRUPTED; | |
336 | } | |
337 | return 0; | |
338 | } | |
339 | ||
340 | /* should be an allocated inode */ | |
341 | if (VFS_I(ip)->i_mode == 0) | |
342 | return -ENOENT; | |
343 | ||
344 | return 0; | |
345 | } | |
346 | ||
33479e05 DC |
347 | /* |
348 | * Check the validity of the inode we just found it the cache | |
349 | */ | |
350 | static int | |
351 | xfs_iget_cache_hit( | |
352 | struct xfs_perag *pag, | |
353 | struct xfs_inode *ip, | |
354 | xfs_ino_t ino, | |
355 | int flags, | |
356 | int lock_flags) __releases(RCU) | |
357 | { | |
358 | struct inode *inode = VFS_I(ip); | |
359 | struct xfs_mount *mp = ip->i_mount; | |
360 | int error; | |
361 | ||
362 | /* | |
363 | * check for re-use of an inode within an RCU grace period due to the | |
364 | * radix tree nodes not being updated yet. We monitor for this by | |
365 | * setting the inode number to zero before freeing the inode structure. | |
366 | * If the inode has been reallocated and set up, then the inode number | |
367 | * will not match, so check for that, too. | |
368 | */ | |
369 | spin_lock(&ip->i_flags_lock); | |
370 | if (ip->i_ino != ino) { | |
371 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 372 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 373 | error = -EAGAIN; |
33479e05 DC |
374 | goto out_error; |
375 | } | |
376 | ||
377 | ||
378 | /* | |
379 | * If we are racing with another cache hit that is currently | |
380 | * instantiating this inode or currently recycling it out of | |
381 | * reclaimabe state, wait for the initialisation to complete | |
382 | * before continuing. | |
383 | * | |
384 | * XXX(hch): eventually we should do something equivalent to | |
385 | * wait_on_inode to wait for these flags to be cleared | |
386 | * instead of polling for it. | |
387 | */ | |
388 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | |
389 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 390 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 391 | error = -EAGAIN; |
33479e05 DC |
392 | goto out_error; |
393 | } | |
394 | ||
395 | /* | |
afca6c5b DC |
396 | * Check the inode free state is valid. This also detects lookup |
397 | * racing with unlinks. | |
33479e05 | 398 | */ |
afca6c5b DC |
399 | error = xfs_iget_check_free_state(ip, flags); |
400 | if (error) | |
33479e05 | 401 | goto out_error; |
33479e05 DC |
402 | |
403 | /* | |
404 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. | |
405 | * Need to carefully get it back into useable state. | |
406 | */ | |
407 | if (ip->i_flags & XFS_IRECLAIMABLE) { | |
408 | trace_xfs_iget_reclaim(ip); | |
409 | ||
378f681c DW |
410 | if (flags & XFS_IGET_INCORE) { |
411 | error = -EAGAIN; | |
412 | goto out_error; | |
413 | } | |
414 | ||
33479e05 DC |
415 | /* |
416 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode | |
417 | * from stomping over us while we recycle the inode. We can't | |
418 | * clear the radix tree reclaimable tag yet as it requires | |
419 | * pag_ici_lock to be held exclusive. | |
420 | */ | |
421 | ip->i_flags |= XFS_IRECLAIM; | |
422 | ||
423 | spin_unlock(&ip->i_flags_lock); | |
424 | rcu_read_unlock(); | |
425 | ||
50997470 | 426 | error = xfs_reinit_inode(mp, inode); |
33479e05 | 427 | if (error) { |
756baca2 | 428 | bool wake; |
33479e05 DC |
429 | /* |
430 | * Re-initializing the inode failed, and we are in deep | |
431 | * trouble. Try to re-add it to the reclaim list. | |
432 | */ | |
433 | rcu_read_lock(); | |
434 | spin_lock(&ip->i_flags_lock); | |
756baca2 | 435 | wake = !!__xfs_iflags_test(ip, XFS_INEW); |
33479e05 | 436 | ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); |
756baca2 BF |
437 | if (wake) |
438 | wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); | |
33479e05 DC |
439 | ASSERT(ip->i_flags & XFS_IRECLAIMABLE); |
440 | trace_xfs_iget_reclaim_fail(ip); | |
441 | goto out_error; | |
442 | } | |
443 | ||
444 | spin_lock(&pag->pag_ici_lock); | |
445 | spin_lock(&ip->i_flags_lock); | |
446 | ||
447 | /* | |
448 | * Clear the per-lifetime state in the inode as we are now | |
449 | * effectively a new inode and need to return to the initial | |
450 | * state before reuse occurs. | |
451 | */ | |
452 | ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | |
453 | ip->i_flags |= XFS_INEW; | |
545c0889 | 454 | xfs_inode_clear_reclaim_tag(pag, ip->i_ino); |
33479e05 | 455 | inode->i_state = I_NEW; |
6772c1f1 DW |
456 | ip->i_sick = 0; |
457 | ip->i_checked = 0; | |
33479e05 | 458 | |
65523218 CH |
459 | ASSERT(!rwsem_is_locked(&inode->i_rwsem)); |
460 | init_rwsem(&inode->i_rwsem); | |
33479e05 DC |
461 | |
462 | spin_unlock(&ip->i_flags_lock); | |
463 | spin_unlock(&pag->pag_ici_lock); | |
464 | } else { | |
465 | /* If the VFS inode is being torn down, pause and try again. */ | |
466 | if (!igrab(inode)) { | |
467 | trace_xfs_iget_skip(ip); | |
2451337d | 468 | error = -EAGAIN; |
33479e05 DC |
469 | goto out_error; |
470 | } | |
471 | ||
472 | /* We've got a live one. */ | |
473 | spin_unlock(&ip->i_flags_lock); | |
474 | rcu_read_unlock(); | |
475 | trace_xfs_iget_hit(ip); | |
476 | } | |
477 | ||
478 | if (lock_flags != 0) | |
479 | xfs_ilock(ip, lock_flags); | |
480 | ||
378f681c DW |
481 | if (!(flags & XFS_IGET_INCORE)) |
482 | xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); | |
ff6d6af2 | 483 | XFS_STATS_INC(mp, xs_ig_found); |
33479e05 DC |
484 | |
485 | return 0; | |
486 | ||
487 | out_error: | |
488 | spin_unlock(&ip->i_flags_lock); | |
489 | rcu_read_unlock(); | |
490 | return error; | |
491 | } | |
492 | ||
493 | ||
494 | static int | |
495 | xfs_iget_cache_miss( | |
496 | struct xfs_mount *mp, | |
497 | struct xfs_perag *pag, | |
498 | xfs_trans_t *tp, | |
499 | xfs_ino_t ino, | |
500 | struct xfs_inode **ipp, | |
501 | int flags, | |
502 | int lock_flags) | |
503 | { | |
504 | struct xfs_inode *ip; | |
505 | int error; | |
506 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); | |
507 | int iflags; | |
508 | ||
509 | ip = xfs_inode_alloc(mp, ino); | |
510 | if (!ip) | |
2451337d | 511 | return -ENOMEM; |
33479e05 DC |
512 | |
513 | error = xfs_iread(mp, tp, ip, flags); | |
514 | if (error) | |
515 | goto out_destroy; | |
516 | ||
9cfb9b47 DW |
517 | if (!xfs_inode_verify_forks(ip)) { |
518 | error = -EFSCORRUPTED; | |
519 | goto out_destroy; | |
520 | } | |
521 | ||
33479e05 DC |
522 | trace_xfs_iget_miss(ip); |
523 | ||
ee457001 DC |
524 | |
525 | /* | |
afca6c5b DC |
526 | * Check the inode free state is valid. This also detects lookup |
527 | * racing with unlinks. | |
ee457001 | 528 | */ |
afca6c5b DC |
529 | error = xfs_iget_check_free_state(ip, flags); |
530 | if (error) | |
33479e05 | 531 | goto out_destroy; |
33479e05 DC |
532 | |
533 | /* | |
534 | * Preload the radix tree so we can insert safely under the | |
535 | * write spinlock. Note that we cannot sleep inside the preload | |
536 | * region. Since we can be called from transaction context, don't | |
537 | * recurse into the file system. | |
538 | */ | |
539 | if (radix_tree_preload(GFP_NOFS)) { | |
2451337d | 540 | error = -EAGAIN; |
33479e05 DC |
541 | goto out_destroy; |
542 | } | |
543 | ||
544 | /* | |
545 | * Because the inode hasn't been added to the radix-tree yet it can't | |
546 | * be found by another thread, so we can do the non-sleeping lock here. | |
547 | */ | |
548 | if (lock_flags) { | |
549 | if (!xfs_ilock_nowait(ip, lock_flags)) | |
550 | BUG(); | |
551 | } | |
552 | ||
553 | /* | |
554 | * These values must be set before inserting the inode into the radix | |
555 | * tree as the moment it is inserted a concurrent lookup (allowed by the | |
556 | * RCU locking mechanism) can find it and that lookup must see that this | |
557 | * is an inode currently under construction (i.e. that XFS_INEW is set). | |
558 | * The ip->i_flags_lock that protects the XFS_INEW flag forms the | |
559 | * memory barrier that ensures this detection works correctly at lookup | |
560 | * time. | |
561 | */ | |
562 | iflags = XFS_INEW; | |
563 | if (flags & XFS_IGET_DONTCACHE) | |
564 | iflags |= XFS_IDONTCACHE; | |
113a5683 CS |
565 | ip->i_udquot = NULL; |
566 | ip->i_gdquot = NULL; | |
92f8ff73 | 567 | ip->i_pdquot = NULL; |
33479e05 DC |
568 | xfs_iflags_set(ip, iflags); |
569 | ||
570 | /* insert the new inode */ | |
571 | spin_lock(&pag->pag_ici_lock); | |
572 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | |
573 | if (unlikely(error)) { | |
574 | WARN_ON(error != -EEXIST); | |
ff6d6af2 | 575 | XFS_STATS_INC(mp, xs_ig_dup); |
2451337d | 576 | error = -EAGAIN; |
33479e05 DC |
577 | goto out_preload_end; |
578 | } | |
579 | spin_unlock(&pag->pag_ici_lock); | |
580 | radix_tree_preload_end(); | |
581 | ||
582 | *ipp = ip; | |
583 | return 0; | |
584 | ||
585 | out_preload_end: | |
586 | spin_unlock(&pag->pag_ici_lock); | |
587 | radix_tree_preload_end(); | |
588 | if (lock_flags) | |
589 | xfs_iunlock(ip, lock_flags); | |
590 | out_destroy: | |
591 | __destroy_inode(VFS_I(ip)); | |
592 | xfs_inode_free(ip); | |
593 | return error; | |
594 | } | |
595 | ||
596 | /* | |
597 | * Look up an inode by number in the given file system. | |
598 | * The inode is looked up in the cache held in each AG. | |
599 | * If the inode is found in the cache, initialise the vfs inode | |
600 | * if necessary. | |
601 | * | |
602 | * If it is not in core, read it in from the file system's device, | |
603 | * add it to the cache and initialise the vfs inode. | |
604 | * | |
605 | * The inode is locked according to the value of the lock_flags parameter. | |
606 | * This flag parameter indicates how and if the inode's IO lock and inode lock | |
607 | * should be taken. | |
608 | * | |
609 | * mp -- the mount point structure for the current file system. It points | |
610 | * to the inode hash table. | |
611 | * tp -- a pointer to the current transaction if there is one. This is | |
612 | * simply passed through to the xfs_iread() call. | |
613 | * ino -- the number of the inode desired. This is the unique identifier | |
614 | * within the file system for the inode being requested. | |
615 | * lock_flags -- flags indicating how to lock the inode. See the comment | |
616 | * for xfs_ilock() for a list of valid values. | |
617 | */ | |
618 | int | |
619 | xfs_iget( | |
620 | xfs_mount_t *mp, | |
621 | xfs_trans_t *tp, | |
622 | xfs_ino_t ino, | |
623 | uint flags, | |
624 | uint lock_flags, | |
625 | xfs_inode_t **ipp) | |
626 | { | |
627 | xfs_inode_t *ip; | |
628 | int error; | |
629 | xfs_perag_t *pag; | |
630 | xfs_agino_t agino; | |
631 | ||
632 | /* | |
633 | * xfs_reclaim_inode() uses the ILOCK to ensure an inode | |
634 | * doesn't get freed while it's being referenced during a | |
635 | * radix tree traversal here. It assumes this function | |
636 | * aqcuires only the ILOCK (and therefore it has no need to | |
637 | * involve the IOLOCK in this synchronization). | |
638 | */ | |
639 | ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); | |
640 | ||
641 | /* reject inode numbers outside existing AGs */ | |
642 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | |
2451337d | 643 | return -EINVAL; |
33479e05 | 644 | |
ff6d6af2 | 645 | XFS_STATS_INC(mp, xs_ig_attempts); |
8774cf8b | 646 | |
33479e05 DC |
647 | /* get the perag structure and ensure that it's inode capable */ |
648 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | |
649 | agino = XFS_INO_TO_AGINO(mp, ino); | |
650 | ||
651 | again: | |
652 | error = 0; | |
653 | rcu_read_lock(); | |
654 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); | |
655 | ||
656 | if (ip) { | |
657 | error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | |
658 | if (error) | |
659 | goto out_error_or_again; | |
660 | } else { | |
661 | rcu_read_unlock(); | |
378f681c | 662 | if (flags & XFS_IGET_INCORE) { |
ed438b47 | 663 | error = -ENODATA; |
378f681c DW |
664 | goto out_error_or_again; |
665 | } | |
ff6d6af2 | 666 | XFS_STATS_INC(mp, xs_ig_missed); |
33479e05 DC |
667 | |
668 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | |
669 | flags, lock_flags); | |
670 | if (error) | |
671 | goto out_error_or_again; | |
672 | } | |
673 | xfs_perag_put(pag); | |
674 | ||
675 | *ipp = ip; | |
676 | ||
677 | /* | |
58c90473 | 678 | * If we have a real type for an on-disk inode, we can setup the inode |
33479e05 DC |
679 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
680 | */ | |
c19b3b05 | 681 | if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) |
58c90473 | 682 | xfs_setup_existing_inode(ip); |
33479e05 DC |
683 | return 0; |
684 | ||
685 | out_error_or_again: | |
378f681c | 686 | if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { |
33479e05 DC |
687 | delay(1); |
688 | goto again; | |
689 | } | |
690 | xfs_perag_put(pag); | |
691 | return error; | |
692 | } | |
693 | ||
378f681c DW |
694 | /* |
695 | * "Is this a cached inode that's also allocated?" | |
696 | * | |
697 | * Look up an inode by number in the given file system. If the inode is | |
698 | * in cache and isn't in purgatory, return 1 if the inode is allocated | |
699 | * and 0 if it is not. For all other cases (not in cache, being torn | |
700 | * down, etc.), return a negative error code. | |
701 | * | |
702 | * The caller has to prevent inode allocation and freeing activity, | |
703 | * presumably by locking the AGI buffer. This is to ensure that an | |
704 | * inode cannot transition from allocated to freed until the caller is | |
705 | * ready to allow that. If the inode is in an intermediate state (new, | |
706 | * reclaimable, or being reclaimed), -EAGAIN will be returned; if the | |
707 | * inode is not in the cache, -ENOENT will be returned. The caller must | |
708 | * deal with these scenarios appropriately. | |
709 | * | |
710 | * This is a specialized use case for the online scrubber; if you're | |
711 | * reading this, you probably want xfs_iget. | |
712 | */ | |
713 | int | |
714 | xfs_icache_inode_is_allocated( | |
715 | struct xfs_mount *mp, | |
716 | struct xfs_trans *tp, | |
717 | xfs_ino_t ino, | |
718 | bool *inuse) | |
719 | { | |
720 | struct xfs_inode *ip; | |
721 | int error; | |
722 | ||
723 | error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); | |
724 | if (error) | |
725 | return error; | |
726 | ||
727 | *inuse = !!(VFS_I(ip)->i_mode); | |
44a8736b | 728 | xfs_irele(ip); |
378f681c DW |
729 | return 0; |
730 | } | |
731 | ||
78ae5256 DC |
732 | /* |
733 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
734 | * radix tree lookups to a minimum. The batch size is a trade off between | |
735 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
736 | * be too greedy. | |
737 | */ | |
738 | #define XFS_LOOKUP_BATCH 32 | |
739 | ||
e13de955 DC |
740 | STATIC int |
741 | xfs_inode_ag_walk_grab( | |
ae2c4ac2 BF |
742 | struct xfs_inode *ip, |
743 | int flags) | |
e13de955 DC |
744 | { |
745 | struct inode *inode = VFS_I(ip); | |
ae2c4ac2 | 746 | bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); |
e13de955 | 747 | |
1a3e8f3d DC |
748 | ASSERT(rcu_read_lock_held()); |
749 | ||
750 | /* | |
751 | * check for stale RCU freed inode | |
752 | * | |
753 | * If the inode has been reallocated, it doesn't matter if it's not in | |
754 | * the AG we are walking - we are walking for writeback, so if it | |
755 | * passes all the "valid inode" checks and is dirty, then we'll write | |
756 | * it back anyway. If it has been reallocated and still being | |
757 | * initialised, the XFS_INEW check below will catch it. | |
758 | */ | |
759 | spin_lock(&ip->i_flags_lock); | |
760 | if (!ip->i_ino) | |
761 | goto out_unlock_noent; | |
762 | ||
763 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | |
ae2c4ac2 BF |
764 | if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || |
765 | __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) | |
1a3e8f3d DC |
766 | goto out_unlock_noent; |
767 | spin_unlock(&ip->i_flags_lock); | |
768 | ||
e13de955 DC |
769 | /* nothing to sync during shutdown */ |
770 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
2451337d | 771 | return -EFSCORRUPTED; |
e13de955 | 772 | |
e13de955 DC |
773 | /* If we can't grab the inode, it must on it's way to reclaim. */ |
774 | if (!igrab(inode)) | |
2451337d | 775 | return -ENOENT; |
e13de955 | 776 | |
e13de955 DC |
777 | /* inode is valid */ |
778 | return 0; | |
1a3e8f3d DC |
779 | |
780 | out_unlock_noent: | |
781 | spin_unlock(&ip->i_flags_lock); | |
2451337d | 782 | return -ENOENT; |
e13de955 DC |
783 | } |
784 | ||
75f3cb13 DC |
785 | STATIC int |
786 | xfs_inode_ag_walk( | |
787 | struct xfs_mount *mp, | |
5017e97d | 788 | struct xfs_perag *pag, |
e0094008 | 789 | int (*execute)(struct xfs_inode *ip, int flags, |
a454f742 BF |
790 | void *args), |
791 | int flags, | |
792 | void *args, | |
ae2c4ac2 BF |
793 | int tag, |
794 | int iter_flags) | |
75f3cb13 | 795 | { |
75f3cb13 DC |
796 | uint32_t first_index; |
797 | int last_error = 0; | |
798 | int skipped; | |
65d0f205 | 799 | int done; |
78ae5256 | 800 | int nr_found; |
75f3cb13 DC |
801 | |
802 | restart: | |
65d0f205 | 803 | done = 0; |
75f3cb13 DC |
804 | skipped = 0; |
805 | first_index = 0; | |
78ae5256 | 806 | nr_found = 0; |
75f3cb13 | 807 | do { |
78ae5256 | 808 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
75f3cb13 | 809 | int error = 0; |
78ae5256 | 810 | int i; |
75f3cb13 | 811 | |
1a3e8f3d | 812 | rcu_read_lock(); |
a454f742 BF |
813 | |
814 | if (tag == -1) | |
815 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | |
78ae5256 DC |
816 | (void **)batch, first_index, |
817 | XFS_LOOKUP_BATCH); | |
a454f742 BF |
818 | else |
819 | nr_found = radix_tree_gang_lookup_tag( | |
820 | &pag->pag_ici_root, | |
821 | (void **) batch, first_index, | |
822 | XFS_LOOKUP_BATCH, tag); | |
823 | ||
65d0f205 | 824 | if (!nr_found) { |
1a3e8f3d | 825 | rcu_read_unlock(); |
75f3cb13 | 826 | break; |
c8e20be0 | 827 | } |
75f3cb13 | 828 | |
65d0f205 | 829 | /* |
78ae5256 DC |
830 | * Grab the inodes before we drop the lock. if we found |
831 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 832 | */ |
78ae5256 DC |
833 | for (i = 0; i < nr_found; i++) { |
834 | struct xfs_inode *ip = batch[i]; | |
835 | ||
ae2c4ac2 | 836 | if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) |
78ae5256 DC |
837 | batch[i] = NULL; |
838 | ||
839 | /* | |
1a3e8f3d DC |
840 | * Update the index for the next lookup. Catch |
841 | * overflows into the next AG range which can occur if | |
842 | * we have inodes in the last block of the AG and we | |
843 | * are currently pointing to the last inode. | |
844 | * | |
845 | * Because we may see inodes that are from the wrong AG | |
846 | * due to RCU freeing and reallocation, only update the | |
847 | * index if it lies in this AG. It was a race that lead | |
848 | * us to see this inode, so another lookup from the | |
849 | * same index will not find it again. | |
78ae5256 | 850 | */ |
1a3e8f3d DC |
851 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) |
852 | continue; | |
78ae5256 DC |
853 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
854 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
855 | done = 1; | |
e13de955 | 856 | } |
78ae5256 DC |
857 | |
858 | /* unlock now we've grabbed the inodes. */ | |
1a3e8f3d | 859 | rcu_read_unlock(); |
e13de955 | 860 | |
78ae5256 DC |
861 | for (i = 0; i < nr_found; i++) { |
862 | if (!batch[i]) | |
863 | continue; | |
ae2c4ac2 BF |
864 | if ((iter_flags & XFS_AGITER_INEW_WAIT) && |
865 | xfs_iflags_test(batch[i], XFS_INEW)) | |
866 | xfs_inew_wait(batch[i]); | |
e0094008 | 867 | error = execute(batch[i], flags, args); |
44a8736b | 868 | xfs_irele(batch[i]); |
2451337d | 869 | if (error == -EAGAIN) { |
78ae5256 DC |
870 | skipped++; |
871 | continue; | |
872 | } | |
2451337d | 873 | if (error && last_error != -EFSCORRUPTED) |
78ae5256 | 874 | last_error = error; |
75f3cb13 | 875 | } |
c8e20be0 DC |
876 | |
877 | /* bail out if the filesystem is corrupted. */ | |
2451337d | 878 | if (error == -EFSCORRUPTED) |
75f3cb13 DC |
879 | break; |
880 | ||
8daaa831 DC |
881 | cond_resched(); |
882 | ||
78ae5256 | 883 | } while (nr_found && !done); |
75f3cb13 DC |
884 | |
885 | if (skipped) { | |
886 | delay(1); | |
887 | goto restart; | |
888 | } | |
75f3cb13 DC |
889 | return last_error; |
890 | } | |
891 | ||
579b62fa BF |
892 | /* |
893 | * Background scanning to trim post-EOF preallocated space. This is queued | |
b9fe5052 | 894 | * based on the 'speculative_prealloc_lifetime' tunable (5m by default). |
579b62fa | 895 | */ |
fa5a4f57 | 896 | void |
579b62fa BF |
897 | xfs_queue_eofblocks( |
898 | struct xfs_mount *mp) | |
899 | { | |
900 | rcu_read_lock(); | |
901 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) | |
902 | queue_delayed_work(mp->m_eofblocks_workqueue, | |
903 | &mp->m_eofblocks_work, | |
904 | msecs_to_jiffies(xfs_eofb_secs * 1000)); | |
905 | rcu_read_unlock(); | |
906 | } | |
907 | ||
908 | void | |
909 | xfs_eofblocks_worker( | |
910 | struct work_struct *work) | |
911 | { | |
912 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
913 | struct xfs_mount, m_eofblocks_work); | |
4b674b9a BF |
914 | |
915 | if (!sb_start_write_trylock(mp->m_super)) | |
916 | return; | |
579b62fa | 917 | xfs_icache_free_eofblocks(mp, NULL); |
4b674b9a BF |
918 | sb_end_write(mp->m_super); |
919 | ||
579b62fa BF |
920 | xfs_queue_eofblocks(mp); |
921 | } | |
922 | ||
83104d44 DW |
923 | /* |
924 | * Background scanning to trim preallocated CoW space. This is queued | |
925 | * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). | |
926 | * (We'll just piggyback on the post-EOF prealloc space workqueue.) | |
927 | */ | |
10ddf64e | 928 | void |
83104d44 DW |
929 | xfs_queue_cowblocks( |
930 | struct xfs_mount *mp) | |
931 | { | |
932 | rcu_read_lock(); | |
933 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) | |
934 | queue_delayed_work(mp->m_eofblocks_workqueue, | |
935 | &mp->m_cowblocks_work, | |
936 | msecs_to_jiffies(xfs_cowb_secs * 1000)); | |
937 | rcu_read_unlock(); | |
938 | } | |
939 | ||
940 | void | |
941 | xfs_cowblocks_worker( | |
942 | struct work_struct *work) | |
943 | { | |
944 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
945 | struct xfs_mount, m_cowblocks_work); | |
4b674b9a BF |
946 | |
947 | if (!sb_start_write_trylock(mp->m_super)) | |
948 | return; | |
83104d44 | 949 | xfs_icache_free_cowblocks(mp, NULL); |
4b674b9a BF |
950 | sb_end_write(mp->m_super); |
951 | ||
83104d44 DW |
952 | xfs_queue_cowblocks(mp); |
953 | } | |
954 | ||
fe588ed3 | 955 | int |
ae2c4ac2 | 956 | xfs_inode_ag_iterator_flags( |
75f3cb13 | 957 | struct xfs_mount *mp, |
e0094008 | 958 | int (*execute)(struct xfs_inode *ip, int flags, |
a454f742 BF |
959 | void *args), |
960 | int flags, | |
ae2c4ac2 BF |
961 | void *args, |
962 | int iter_flags) | |
75f3cb13 | 963 | { |
16fd5367 | 964 | struct xfs_perag *pag; |
75f3cb13 DC |
965 | int error = 0; |
966 | int last_error = 0; | |
967 | xfs_agnumber_t ag; | |
968 | ||
16fd5367 | 969 | ag = 0; |
65d0f205 DC |
970 | while ((pag = xfs_perag_get(mp, ag))) { |
971 | ag = pag->pag_agno + 1; | |
ae2c4ac2 BF |
972 | error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, |
973 | iter_flags); | |
a454f742 BF |
974 | xfs_perag_put(pag); |
975 | if (error) { | |
976 | last_error = error; | |
2451337d | 977 | if (error == -EFSCORRUPTED) |
a454f742 BF |
978 | break; |
979 | } | |
980 | } | |
b474c7ae | 981 | return last_error; |
a454f742 BF |
982 | } |
983 | ||
ae2c4ac2 BF |
984 | int |
985 | xfs_inode_ag_iterator( | |
986 | struct xfs_mount *mp, | |
987 | int (*execute)(struct xfs_inode *ip, int flags, | |
988 | void *args), | |
989 | int flags, | |
990 | void *args) | |
991 | { | |
992 | return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); | |
993 | } | |
994 | ||
a454f742 BF |
995 | int |
996 | xfs_inode_ag_iterator_tag( | |
997 | struct xfs_mount *mp, | |
e0094008 | 998 | int (*execute)(struct xfs_inode *ip, int flags, |
a454f742 BF |
999 | void *args), |
1000 | int flags, | |
1001 | void *args, | |
1002 | int tag) | |
1003 | { | |
1004 | struct xfs_perag *pag; | |
1005 | int error = 0; | |
1006 | int last_error = 0; | |
1007 | xfs_agnumber_t ag; | |
1008 | ||
1009 | ag = 0; | |
1010 | while ((pag = xfs_perag_get_tag(mp, ag, tag))) { | |
1011 | ag = pag->pag_agno + 1; | |
ae2c4ac2 BF |
1012 | error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, |
1013 | 0); | |
5017e97d | 1014 | xfs_perag_put(pag); |
75f3cb13 DC |
1015 | if (error) { |
1016 | last_error = error; | |
2451337d | 1017 | if (error == -EFSCORRUPTED) |
75f3cb13 DC |
1018 | break; |
1019 | } | |
1020 | } | |
b474c7ae | 1021 | return last_error; |
75f3cb13 DC |
1022 | } |
1023 | ||
e3a20c0b DC |
1024 | /* |
1025 | * Grab the inode for reclaim exclusively. | |
1026 | * Return 0 if we grabbed it, non-zero otherwise. | |
1027 | */ | |
1028 | STATIC int | |
1029 | xfs_reclaim_inode_grab( | |
1030 | struct xfs_inode *ip, | |
1031 | int flags) | |
1032 | { | |
1a3e8f3d DC |
1033 | ASSERT(rcu_read_lock_held()); |
1034 | ||
1035 | /* quick check for stale RCU freed inode */ | |
1036 | if (!ip->i_ino) | |
1037 | return 1; | |
e3a20c0b DC |
1038 | |
1039 | /* | |
474fce06 CH |
1040 | * If we are asked for non-blocking operation, do unlocked checks to |
1041 | * see if the inode already is being flushed or in reclaim to avoid | |
1042 | * lock traffic. | |
e3a20c0b DC |
1043 | */ |
1044 | if ((flags & SYNC_TRYLOCK) && | |
474fce06 | 1045 | __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) |
e3a20c0b | 1046 | return 1; |
e3a20c0b DC |
1047 | |
1048 | /* | |
1049 | * The radix tree lock here protects a thread in xfs_iget from racing | |
1050 | * with us starting reclaim on the inode. Once we have the | |
1051 | * XFS_IRECLAIM flag set it will not touch us. | |
1a3e8f3d DC |
1052 | * |
1053 | * Due to RCU lookup, we may find inodes that have been freed and only | |
1054 | * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that | |
1055 | * aren't candidates for reclaim at all, so we must check the | |
1056 | * XFS_IRECLAIMABLE is set first before proceeding to reclaim. | |
e3a20c0b DC |
1057 | */ |
1058 | spin_lock(&ip->i_flags_lock); | |
1a3e8f3d DC |
1059 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || |
1060 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
1061 | /* not a reclaim candidate. */ | |
e3a20c0b DC |
1062 | spin_unlock(&ip->i_flags_lock); |
1063 | return 1; | |
1064 | } | |
1065 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
1066 | spin_unlock(&ip->i_flags_lock); | |
1067 | return 0; | |
1068 | } | |
1069 | ||
777df5af | 1070 | /* |
8a48088f CH |
1071 | * Inodes in different states need to be treated differently. The following |
1072 | * table lists the inode states and the reclaim actions necessary: | |
777df5af DC |
1073 | * |
1074 | * inode state iflush ret required action | |
1075 | * --------------- ---------- --------------- | |
1076 | * bad - reclaim | |
1077 | * shutdown EIO unpin and reclaim | |
1078 | * clean, unpinned 0 reclaim | |
1079 | * stale, unpinned 0 reclaim | |
c854363e DC |
1080 | * clean, pinned(*) 0 requeue |
1081 | * stale, pinned EAGAIN requeue | |
8a48088f CH |
1082 | * dirty, async - requeue |
1083 | * dirty, sync 0 reclaim | |
777df5af DC |
1084 | * |
1085 | * (*) dgc: I don't think the clean, pinned state is possible but it gets | |
1086 | * handled anyway given the order of checks implemented. | |
1087 | * | |
c854363e DC |
1088 | * Also, because we get the flush lock first, we know that any inode that has |
1089 | * been flushed delwri has had the flush completed by the time we check that | |
8a48088f | 1090 | * the inode is clean. |
c854363e | 1091 | * |
8a48088f CH |
1092 | * Note that because the inode is flushed delayed write by AIL pushing, the |
1093 | * flush lock may already be held here and waiting on it can result in very | |
1094 | * long latencies. Hence for sync reclaims, where we wait on the flush lock, | |
1095 | * the caller should push the AIL first before trying to reclaim inodes to | |
1096 | * minimise the amount of time spent waiting. For background relaim, we only | |
1097 | * bother to reclaim clean inodes anyway. | |
c854363e | 1098 | * |
777df5af DC |
1099 | * Hence the order of actions after gaining the locks should be: |
1100 | * bad => reclaim | |
1101 | * shutdown => unpin and reclaim | |
8a48088f | 1102 | * pinned, async => requeue |
c854363e | 1103 | * pinned, sync => unpin |
777df5af DC |
1104 | * stale => reclaim |
1105 | * clean => reclaim | |
8a48088f | 1106 | * dirty, async => requeue |
c854363e | 1107 | * dirty, sync => flush, wait and reclaim |
777df5af | 1108 | */ |
75f3cb13 | 1109 | STATIC int |
c8e20be0 | 1110 | xfs_reclaim_inode( |
75f3cb13 DC |
1111 | struct xfs_inode *ip, |
1112 | struct xfs_perag *pag, | |
c8e20be0 | 1113 | int sync_mode) |
fce08f2f | 1114 | { |
4c46819a | 1115 | struct xfs_buf *bp = NULL; |
8a17d7dd | 1116 | xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ |
4c46819a | 1117 | int error; |
777df5af | 1118 | |
1bfd8d04 DC |
1119 | restart: |
1120 | error = 0; | |
c8e20be0 | 1121 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
c854363e DC |
1122 | if (!xfs_iflock_nowait(ip)) { |
1123 | if (!(sync_mode & SYNC_WAIT)) | |
1124 | goto out; | |
1125 | xfs_iflock(ip); | |
1126 | } | |
7a3be02b | 1127 | |
777df5af DC |
1128 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
1129 | xfs_iunpin_wait(ip); | |
98efe8af | 1130 | /* xfs_iflush_abort() drops the flush lock */ |
04913fdd | 1131 | xfs_iflush_abort(ip, false); |
777df5af DC |
1132 | goto reclaim; |
1133 | } | |
c854363e | 1134 | if (xfs_ipincount(ip)) { |
8a48088f CH |
1135 | if (!(sync_mode & SYNC_WAIT)) |
1136 | goto out_ifunlock; | |
777df5af | 1137 | xfs_iunpin_wait(ip); |
c854363e | 1138 | } |
98efe8af BF |
1139 | if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { |
1140 | xfs_ifunlock(ip); | |
777df5af | 1141 | goto reclaim; |
98efe8af | 1142 | } |
777df5af | 1143 | |
8a48088f CH |
1144 | /* |
1145 | * Never flush out dirty data during non-blocking reclaim, as it would | |
1146 | * just contend with AIL pushing trying to do the same job. | |
1147 | */ | |
1148 | if (!(sync_mode & SYNC_WAIT)) | |
1149 | goto out_ifunlock; | |
1150 | ||
1bfd8d04 DC |
1151 | /* |
1152 | * Now we have an inode that needs flushing. | |
1153 | * | |
4c46819a | 1154 | * Note that xfs_iflush will never block on the inode buffer lock, as |
1bfd8d04 | 1155 | * xfs_ifree_cluster() can lock the inode buffer before it locks the |
4c46819a | 1156 | * ip->i_lock, and we are doing the exact opposite here. As a result, |
475ee413 CH |
1157 | * doing a blocking xfs_imap_to_bp() to get the cluster buffer would |
1158 | * result in an ABBA deadlock with xfs_ifree_cluster(). | |
1bfd8d04 DC |
1159 | * |
1160 | * As xfs_ifree_cluser() must gather all inodes that are active in the | |
1161 | * cache to mark them stale, if we hit this case we don't actually want | |
1162 | * to do IO here - we want the inode marked stale so we can simply | |
4c46819a CH |
1163 | * reclaim it. Hence if we get an EAGAIN error here, just unlock the |
1164 | * inode, back off and try again. Hopefully the next pass through will | |
1165 | * see the stale flag set on the inode. | |
1bfd8d04 | 1166 | */ |
4c46819a | 1167 | error = xfs_iflush(ip, &bp); |
2451337d | 1168 | if (error == -EAGAIN) { |
8a48088f CH |
1169 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1170 | /* backoff longer than in xfs_ifree_cluster */ | |
1171 | delay(2); | |
1172 | goto restart; | |
c854363e | 1173 | } |
c854363e | 1174 | |
4c46819a CH |
1175 | if (!error) { |
1176 | error = xfs_bwrite(bp); | |
1177 | xfs_buf_relse(bp); | |
1178 | } | |
1179 | ||
777df5af | 1180 | reclaim: |
98efe8af BF |
1181 | ASSERT(!xfs_isiflocked(ip)); |
1182 | ||
8a17d7dd DC |
1183 | /* |
1184 | * Because we use RCU freeing we need to ensure the inode always appears | |
1185 | * to be reclaimed with an invalid inode number when in the free state. | |
98efe8af | 1186 | * We do this as early as possible under the ILOCK so that |
f2e9ad21 OS |
1187 | * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to |
1188 | * detect races with us here. By doing this, we guarantee that once | |
1189 | * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that | |
1190 | * it will see either a valid inode that will serialise correctly, or it | |
1191 | * will see an invalid inode that it can skip. | |
8a17d7dd DC |
1192 | */ |
1193 | spin_lock(&ip->i_flags_lock); | |
1194 | ip->i_flags = XFS_IRECLAIM; | |
1195 | ip->i_ino = 0; | |
1196 | spin_unlock(&ip->i_flags_lock); | |
1197 | ||
c8e20be0 | 1198 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1199 | |
ff6d6af2 | 1200 | XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); |
2f11feab DC |
1201 | /* |
1202 | * Remove the inode from the per-AG radix tree. | |
1203 | * | |
1204 | * Because radix_tree_delete won't complain even if the item was never | |
1205 | * added to the tree assert that it's been there before to catch | |
1206 | * problems with the inode life time early on. | |
1207 | */ | |
1a427ab0 | 1208 | spin_lock(&pag->pag_ici_lock); |
2f11feab | 1209 | if (!radix_tree_delete(&pag->pag_ici_root, |
8a17d7dd | 1210 | XFS_INO_TO_AGINO(ip->i_mount, ino))) |
2f11feab | 1211 | ASSERT(0); |
545c0889 | 1212 | xfs_perag_clear_reclaim_tag(pag); |
1a427ab0 | 1213 | spin_unlock(&pag->pag_ici_lock); |
2f11feab DC |
1214 | |
1215 | /* | |
1216 | * Here we do an (almost) spurious inode lock in order to coordinate | |
1217 | * with inode cache radix tree lookups. This is because the lookup | |
1218 | * can reference the inodes in the cache without taking references. | |
1219 | * | |
1220 | * We make that OK here by ensuring that we wait until the inode is | |
ad637a10 | 1221 | * unlocked after the lookup before we go ahead and free it. |
2f11feab | 1222 | */ |
ad637a10 | 1223 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1224 | xfs_qm_dqdetach(ip); |
ad637a10 | 1225 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1226 | |
8a17d7dd | 1227 | __xfs_inode_free(ip); |
ad637a10 | 1228 | return error; |
8a48088f CH |
1229 | |
1230 | out_ifunlock: | |
1231 | xfs_ifunlock(ip); | |
1232 | out: | |
1233 | xfs_iflags_clear(ip, XFS_IRECLAIM); | |
1234 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1235 | /* | |
2451337d | 1236 | * We could return -EAGAIN here to make reclaim rescan the inode tree in |
8a48088f | 1237 | * a short while. However, this just burns CPU time scanning the tree |
5889608d DC |
1238 | * waiting for IO to complete and the reclaim work never goes back to |
1239 | * the idle state. Instead, return 0 to let the next scheduled | |
1240 | * background reclaim attempt to reclaim the inode again. | |
8a48088f CH |
1241 | */ |
1242 | return 0; | |
7a3be02b DC |
1243 | } |
1244 | ||
65d0f205 DC |
1245 | /* |
1246 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | |
1247 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | |
1248 | * then a shut down during filesystem unmount reclaim walk leak all the | |
1249 | * unreclaimed inodes. | |
1250 | */ | |
33479e05 | 1251 | STATIC int |
65d0f205 DC |
1252 | xfs_reclaim_inodes_ag( |
1253 | struct xfs_mount *mp, | |
1254 | int flags, | |
1255 | int *nr_to_scan) | |
1256 | { | |
1257 | struct xfs_perag *pag; | |
1258 | int error = 0; | |
1259 | int last_error = 0; | |
1260 | xfs_agnumber_t ag; | |
69b491c2 DC |
1261 | int trylock = flags & SYNC_TRYLOCK; |
1262 | int skipped; | |
65d0f205 | 1263 | |
69b491c2 | 1264 | restart: |
65d0f205 | 1265 | ag = 0; |
69b491c2 | 1266 | skipped = 0; |
65d0f205 DC |
1267 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1268 | unsigned long first_index = 0; | |
1269 | int done = 0; | |
e3a20c0b | 1270 | int nr_found = 0; |
65d0f205 DC |
1271 | |
1272 | ag = pag->pag_agno + 1; | |
1273 | ||
69b491c2 DC |
1274 | if (trylock) { |
1275 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | |
1276 | skipped++; | |
f83282a8 | 1277 | xfs_perag_put(pag); |
69b491c2 DC |
1278 | continue; |
1279 | } | |
1280 | first_index = pag->pag_ici_reclaim_cursor; | |
1281 | } else | |
1282 | mutex_lock(&pag->pag_ici_reclaim_lock); | |
1283 | ||
65d0f205 | 1284 | do { |
e3a20c0b DC |
1285 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
1286 | int i; | |
65d0f205 | 1287 | |
1a3e8f3d | 1288 | rcu_read_lock(); |
e3a20c0b DC |
1289 | nr_found = radix_tree_gang_lookup_tag( |
1290 | &pag->pag_ici_root, | |
1291 | (void **)batch, first_index, | |
1292 | XFS_LOOKUP_BATCH, | |
65d0f205 DC |
1293 | XFS_ICI_RECLAIM_TAG); |
1294 | if (!nr_found) { | |
b2232219 | 1295 | done = 1; |
1a3e8f3d | 1296 | rcu_read_unlock(); |
65d0f205 DC |
1297 | break; |
1298 | } | |
1299 | ||
1300 | /* | |
e3a20c0b DC |
1301 | * Grab the inodes before we drop the lock. if we found |
1302 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 1303 | */ |
e3a20c0b DC |
1304 | for (i = 0; i < nr_found; i++) { |
1305 | struct xfs_inode *ip = batch[i]; | |
1306 | ||
1307 | if (done || xfs_reclaim_inode_grab(ip, flags)) | |
1308 | batch[i] = NULL; | |
1309 | ||
1310 | /* | |
1311 | * Update the index for the next lookup. Catch | |
1312 | * overflows into the next AG range which can | |
1313 | * occur if we have inodes in the last block of | |
1314 | * the AG and we are currently pointing to the | |
1315 | * last inode. | |
1a3e8f3d DC |
1316 | * |
1317 | * Because we may see inodes that are from the | |
1318 | * wrong AG due to RCU freeing and | |
1319 | * reallocation, only update the index if it | |
1320 | * lies in this AG. It was a race that lead us | |
1321 | * to see this inode, so another lookup from | |
1322 | * the same index will not find it again. | |
e3a20c0b | 1323 | */ |
1a3e8f3d DC |
1324 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != |
1325 | pag->pag_agno) | |
1326 | continue; | |
e3a20c0b DC |
1327 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
1328 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
1329 | done = 1; | |
1330 | } | |
65d0f205 | 1331 | |
e3a20c0b | 1332 | /* unlock now we've grabbed the inodes. */ |
1a3e8f3d | 1333 | rcu_read_unlock(); |
e3a20c0b DC |
1334 | |
1335 | for (i = 0; i < nr_found; i++) { | |
1336 | if (!batch[i]) | |
1337 | continue; | |
1338 | error = xfs_reclaim_inode(batch[i], pag, flags); | |
2451337d | 1339 | if (error && last_error != -EFSCORRUPTED) |
e3a20c0b DC |
1340 | last_error = error; |
1341 | } | |
1342 | ||
1343 | *nr_to_scan -= XFS_LOOKUP_BATCH; | |
65d0f205 | 1344 | |
8daaa831 DC |
1345 | cond_resched(); |
1346 | ||
e3a20c0b | 1347 | } while (nr_found && !done && *nr_to_scan > 0); |
65d0f205 | 1348 | |
69b491c2 DC |
1349 | if (trylock && !done) |
1350 | pag->pag_ici_reclaim_cursor = first_index; | |
1351 | else | |
1352 | pag->pag_ici_reclaim_cursor = 0; | |
1353 | mutex_unlock(&pag->pag_ici_reclaim_lock); | |
65d0f205 DC |
1354 | xfs_perag_put(pag); |
1355 | } | |
69b491c2 DC |
1356 | |
1357 | /* | |
1358 | * if we skipped any AG, and we still have scan count remaining, do | |
1359 | * another pass this time using blocking reclaim semantics (i.e | |
1360 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | |
1361 | * ensure that when we get more reclaimers than AGs we block rather | |
1362 | * than spin trying to execute reclaim. | |
1363 | */ | |
8daaa831 | 1364 | if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { |
69b491c2 DC |
1365 | trylock = 0; |
1366 | goto restart; | |
1367 | } | |
b474c7ae | 1368 | return last_error; |
65d0f205 DC |
1369 | } |
1370 | ||
7a3be02b DC |
1371 | int |
1372 | xfs_reclaim_inodes( | |
1373 | xfs_mount_t *mp, | |
7a3be02b DC |
1374 | int mode) |
1375 | { | |
65d0f205 DC |
1376 | int nr_to_scan = INT_MAX; |
1377 | ||
1378 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | |
9bf729c0 DC |
1379 | } |
1380 | ||
1381 | /* | |
8daaa831 | 1382 | * Scan a certain number of inodes for reclaim. |
a7b339f1 DC |
1383 | * |
1384 | * When called we make sure that there is a background (fast) inode reclaim in | |
8daaa831 | 1385 | * progress, while we will throttle the speed of reclaim via doing synchronous |
a7b339f1 DC |
1386 | * reclaim of inodes. That means if we come across dirty inodes, we wait for |
1387 | * them to be cleaned, which we hope will not be very long due to the | |
1388 | * background walker having already kicked the IO off on those dirty inodes. | |
9bf729c0 | 1389 | */ |
0a234c6d | 1390 | long |
8daaa831 DC |
1391 | xfs_reclaim_inodes_nr( |
1392 | struct xfs_mount *mp, | |
1393 | int nr_to_scan) | |
9bf729c0 | 1394 | { |
8daaa831 | 1395 | /* kick background reclaimer and push the AIL */ |
5889608d | 1396 | xfs_reclaim_work_queue(mp); |
8daaa831 | 1397 | xfs_ail_push_all(mp->m_ail); |
a7b339f1 | 1398 | |
0a234c6d | 1399 | return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); |
8daaa831 | 1400 | } |
9bf729c0 | 1401 | |
8daaa831 DC |
1402 | /* |
1403 | * Return the number of reclaimable inodes in the filesystem for | |
1404 | * the shrinker to determine how much to reclaim. | |
1405 | */ | |
1406 | int | |
1407 | xfs_reclaim_inodes_count( | |
1408 | struct xfs_mount *mp) | |
1409 | { | |
1410 | struct xfs_perag *pag; | |
1411 | xfs_agnumber_t ag = 0; | |
1412 | int reclaimable = 0; | |
9bf729c0 | 1413 | |
65d0f205 DC |
1414 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1415 | ag = pag->pag_agno + 1; | |
70e60ce7 DC |
1416 | reclaimable += pag->pag_ici_reclaimable; |
1417 | xfs_perag_put(pag); | |
9bf729c0 | 1418 | } |
9bf729c0 DC |
1419 | return reclaimable; |
1420 | } | |
1421 | ||
3e3f9f58 BF |
1422 | STATIC int |
1423 | xfs_inode_match_id( | |
1424 | struct xfs_inode *ip, | |
1425 | struct xfs_eofblocks *eofb) | |
1426 | { | |
b9fe5052 DE |
1427 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && |
1428 | !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
1b556048 | 1429 | return 0; |
3e3f9f58 | 1430 | |
b9fe5052 DE |
1431 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && |
1432 | !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
1b556048 BF |
1433 | return 0; |
1434 | ||
b9fe5052 | 1435 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && |
de7a866f | 1436 | ip->i_d.di_projid != eofb->eof_prid) |
1b556048 BF |
1437 | return 0; |
1438 | ||
1439 | return 1; | |
3e3f9f58 BF |
1440 | } |
1441 | ||
f4526397 BF |
1442 | /* |
1443 | * A union-based inode filtering algorithm. Process the inode if any of the | |
1444 | * criteria match. This is for global/internal scans only. | |
1445 | */ | |
1446 | STATIC int | |
1447 | xfs_inode_match_id_union( | |
1448 | struct xfs_inode *ip, | |
1449 | struct xfs_eofblocks *eofb) | |
1450 | { | |
1451 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && | |
1452 | uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
1453 | return 1; | |
1454 | ||
1455 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && | |
1456 | gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
1457 | return 1; | |
1458 | ||
1459 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && | |
de7a866f | 1460 | ip->i_d.di_projid == eofb->eof_prid) |
f4526397 BF |
1461 | return 1; |
1462 | ||
1463 | return 0; | |
1464 | } | |
1465 | ||
41176a68 BF |
1466 | STATIC int |
1467 | xfs_inode_free_eofblocks( | |
1468 | struct xfs_inode *ip, | |
41176a68 BF |
1469 | int flags, |
1470 | void *args) | |
1471 | { | |
a36b9261 | 1472 | int ret = 0; |
3e3f9f58 | 1473 | struct xfs_eofblocks *eofb = args; |
f4526397 | 1474 | int match; |
5400da7d | 1475 | |
41176a68 BF |
1476 | if (!xfs_can_free_eofblocks(ip, false)) { |
1477 | /* inode could be preallocated or append-only */ | |
1478 | trace_xfs_inode_free_eofblocks_invalid(ip); | |
1479 | xfs_inode_clear_eofblocks_tag(ip); | |
1480 | return 0; | |
1481 | } | |
1482 | ||
1483 | /* | |
1484 | * If the mapping is dirty the operation can block and wait for some | |
1485 | * time. Unless we are waiting, skip it. | |
1486 | */ | |
1487 | if (!(flags & SYNC_WAIT) && | |
1488 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) | |
1489 | return 0; | |
1490 | ||
00ca79a0 | 1491 | if (eofb) { |
f4526397 BF |
1492 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) |
1493 | match = xfs_inode_match_id_union(ip, eofb); | |
1494 | else | |
1495 | match = xfs_inode_match_id(ip, eofb); | |
1496 | if (!match) | |
00ca79a0 BF |
1497 | return 0; |
1498 | ||
1499 | /* skip the inode if the file size is too small */ | |
1500 | if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && | |
1501 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | |
1502 | return 0; | |
1503 | } | |
3e3f9f58 | 1504 | |
a36b9261 BF |
1505 | /* |
1506 | * If the caller is waiting, return -EAGAIN to keep the background | |
1507 | * scanner moving and revisit the inode in a subsequent pass. | |
1508 | */ | |
c3155097 | 1509 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
a36b9261 BF |
1510 | if (flags & SYNC_WAIT) |
1511 | ret = -EAGAIN; | |
1512 | return ret; | |
1513 | } | |
1514 | ret = xfs_free_eofblocks(ip); | |
c3155097 | 1515 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
41176a68 BF |
1516 | |
1517 | return ret; | |
1518 | } | |
1519 | ||
83104d44 DW |
1520 | static int |
1521 | __xfs_icache_free_eofblocks( | |
41176a68 | 1522 | struct xfs_mount *mp, |
83104d44 DW |
1523 | struct xfs_eofblocks *eofb, |
1524 | int (*execute)(struct xfs_inode *ip, int flags, | |
1525 | void *args), | |
1526 | int tag) | |
41176a68 | 1527 | { |
8ca149de BF |
1528 | int flags = SYNC_TRYLOCK; |
1529 | ||
1530 | if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) | |
1531 | flags = SYNC_WAIT; | |
1532 | ||
83104d44 DW |
1533 | return xfs_inode_ag_iterator_tag(mp, execute, flags, |
1534 | eofb, tag); | |
1535 | } | |
1536 | ||
1537 | int | |
1538 | xfs_icache_free_eofblocks( | |
1539 | struct xfs_mount *mp, | |
1540 | struct xfs_eofblocks *eofb) | |
1541 | { | |
1542 | return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, | |
1543 | XFS_ICI_EOFBLOCKS_TAG); | |
41176a68 BF |
1544 | } |
1545 | ||
dc06f398 BF |
1546 | /* |
1547 | * Run eofblocks scans on the quotas applicable to the inode. For inodes with | |
1548 | * multiple quotas, we don't know exactly which quota caused an allocation | |
1549 | * failure. We make a best effort by including each quota under low free space | |
1550 | * conditions (less than 1% free space) in the scan. | |
1551 | */ | |
83104d44 DW |
1552 | static int |
1553 | __xfs_inode_free_quota_eofblocks( | |
1554 | struct xfs_inode *ip, | |
1555 | int (*execute)(struct xfs_mount *mp, | |
1556 | struct xfs_eofblocks *eofb)) | |
dc06f398 BF |
1557 | { |
1558 | int scan = 0; | |
1559 | struct xfs_eofblocks eofb = {0}; | |
1560 | struct xfs_dquot *dq; | |
1561 | ||
dc06f398 | 1562 | /* |
c3155097 | 1563 | * Run a sync scan to increase effectiveness and use the union filter to |
dc06f398 BF |
1564 | * cover all applicable quotas in a single scan. |
1565 | */ | |
dc06f398 BF |
1566 | eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; |
1567 | ||
1568 | if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { | |
1569 | dq = xfs_inode_dquot(ip, XFS_DQ_USER); | |
1570 | if (dq && xfs_dquot_lowsp(dq)) { | |
1571 | eofb.eof_uid = VFS_I(ip)->i_uid; | |
1572 | eofb.eof_flags |= XFS_EOF_FLAGS_UID; | |
1573 | scan = 1; | |
1574 | } | |
1575 | } | |
1576 | ||
1577 | if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { | |
1578 | dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); | |
1579 | if (dq && xfs_dquot_lowsp(dq)) { | |
1580 | eofb.eof_gid = VFS_I(ip)->i_gid; | |
1581 | eofb.eof_flags |= XFS_EOF_FLAGS_GID; | |
1582 | scan = 1; | |
1583 | } | |
1584 | } | |
1585 | ||
1586 | if (scan) | |
83104d44 | 1587 | execute(ip->i_mount, &eofb); |
dc06f398 BF |
1588 | |
1589 | return scan; | |
1590 | } | |
1591 | ||
83104d44 DW |
1592 | int |
1593 | xfs_inode_free_quota_eofblocks( | |
1594 | struct xfs_inode *ip) | |
1595 | { | |
1596 | return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); | |
1597 | } | |
1598 | ||
91aae6be DW |
1599 | static inline unsigned long |
1600 | xfs_iflag_for_tag( | |
1601 | int tag) | |
1602 | { | |
1603 | switch (tag) { | |
1604 | case XFS_ICI_EOFBLOCKS_TAG: | |
1605 | return XFS_IEOFBLOCKS; | |
1606 | case XFS_ICI_COWBLOCKS_TAG: | |
1607 | return XFS_ICOWBLOCKS; | |
1608 | default: | |
1609 | ASSERT(0); | |
1610 | return 0; | |
1611 | } | |
1612 | } | |
1613 | ||
83104d44 | 1614 | static void |
91aae6be | 1615 | __xfs_inode_set_blocks_tag( |
83104d44 DW |
1616 | xfs_inode_t *ip, |
1617 | void (*execute)(struct xfs_mount *mp), | |
1618 | void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, | |
1619 | int error, unsigned long caller_ip), | |
1620 | int tag) | |
27b52867 BF |
1621 | { |
1622 | struct xfs_mount *mp = ip->i_mount; | |
1623 | struct xfs_perag *pag; | |
1624 | int tagged; | |
1625 | ||
85a6e764 CH |
1626 | /* |
1627 | * Don't bother locking the AG and looking up in the radix trees | |
1628 | * if we already know that we have the tag set. | |
1629 | */ | |
91aae6be | 1630 | if (ip->i_flags & xfs_iflag_for_tag(tag)) |
85a6e764 CH |
1631 | return; |
1632 | spin_lock(&ip->i_flags_lock); | |
91aae6be | 1633 | ip->i_flags |= xfs_iflag_for_tag(tag); |
85a6e764 CH |
1634 | spin_unlock(&ip->i_flags_lock); |
1635 | ||
27b52867 BF |
1636 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1637 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1638 | |
83104d44 | 1639 | tagged = radix_tree_tagged(&pag->pag_ici_root, tag); |
27b52867 | 1640 | radix_tree_tag_set(&pag->pag_ici_root, |
83104d44 | 1641 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); |
27b52867 BF |
1642 | if (!tagged) { |
1643 | /* propagate the eofblocks tag up into the perag radix tree */ | |
1644 | spin_lock(&ip->i_mount->m_perag_lock); | |
1645 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | |
1646 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
83104d44 | 1647 | tag); |
27b52867 | 1648 | spin_unlock(&ip->i_mount->m_perag_lock); |
579b62fa BF |
1649 | |
1650 | /* kick off background trimming */ | |
83104d44 | 1651 | execute(ip->i_mount); |
27b52867 | 1652 | |
83104d44 | 1653 | set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); |
27b52867 BF |
1654 | } |
1655 | ||
1656 | spin_unlock(&pag->pag_ici_lock); | |
1657 | xfs_perag_put(pag); | |
1658 | } | |
1659 | ||
1660 | void | |
83104d44 | 1661 | xfs_inode_set_eofblocks_tag( |
27b52867 | 1662 | xfs_inode_t *ip) |
83104d44 DW |
1663 | { |
1664 | trace_xfs_inode_set_eofblocks_tag(ip); | |
91aae6be | 1665 | return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks, |
83104d44 DW |
1666 | trace_xfs_perag_set_eofblocks, |
1667 | XFS_ICI_EOFBLOCKS_TAG); | |
1668 | } | |
1669 | ||
1670 | static void | |
91aae6be | 1671 | __xfs_inode_clear_blocks_tag( |
83104d44 DW |
1672 | xfs_inode_t *ip, |
1673 | void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, | |
1674 | int error, unsigned long caller_ip), | |
1675 | int tag) | |
27b52867 BF |
1676 | { |
1677 | struct xfs_mount *mp = ip->i_mount; | |
1678 | struct xfs_perag *pag; | |
1679 | ||
85a6e764 | 1680 | spin_lock(&ip->i_flags_lock); |
91aae6be | 1681 | ip->i_flags &= ~xfs_iflag_for_tag(tag); |
85a6e764 CH |
1682 | spin_unlock(&ip->i_flags_lock); |
1683 | ||
27b52867 BF |
1684 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1685 | spin_lock(&pag->pag_ici_lock); | |
27b52867 BF |
1686 | |
1687 | radix_tree_tag_clear(&pag->pag_ici_root, | |
83104d44 DW |
1688 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); |
1689 | if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { | |
27b52867 BF |
1690 | /* clear the eofblocks tag from the perag radix tree */ |
1691 | spin_lock(&ip->i_mount->m_perag_lock); | |
1692 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | |
1693 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
83104d44 | 1694 | tag); |
27b52867 | 1695 | spin_unlock(&ip->i_mount->m_perag_lock); |
83104d44 | 1696 | clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); |
27b52867 BF |
1697 | } |
1698 | ||
1699 | spin_unlock(&pag->pag_ici_lock); | |
1700 | xfs_perag_put(pag); | |
1701 | } | |
1702 | ||
83104d44 DW |
1703 | void |
1704 | xfs_inode_clear_eofblocks_tag( | |
1705 | xfs_inode_t *ip) | |
1706 | { | |
1707 | trace_xfs_inode_clear_eofblocks_tag(ip); | |
91aae6be | 1708 | return __xfs_inode_clear_blocks_tag(ip, |
83104d44 DW |
1709 | trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); |
1710 | } | |
1711 | ||
1712 | /* | |
be78ff0e DW |
1713 | * Set ourselves up to free CoW blocks from this file. If it's already clean |
1714 | * then we can bail out quickly, but otherwise we must back off if the file | |
1715 | * is undergoing some kind of write. | |
83104d44 | 1716 | */ |
be78ff0e DW |
1717 | static bool |
1718 | xfs_prep_free_cowblocks( | |
51d62690 | 1719 | struct xfs_inode *ip) |
83104d44 | 1720 | { |
39937234 BF |
1721 | /* |
1722 | * Just clear the tag if we have an empty cow fork or none at all. It's | |
1723 | * possible the inode was fully unshared since it was originally tagged. | |
1724 | */ | |
51d62690 | 1725 | if (!xfs_inode_has_cow_data(ip)) { |
83104d44 DW |
1726 | trace_xfs_inode_free_cowblocks_invalid(ip); |
1727 | xfs_inode_clear_cowblocks_tag(ip); | |
be78ff0e | 1728 | return false; |
83104d44 DW |
1729 | } |
1730 | ||
1731 | /* | |
1732 | * If the mapping is dirty or under writeback we cannot touch the | |
1733 | * CoW fork. Leave it alone if we're in the midst of a directio. | |
1734 | */ | |
a1b7a4de CH |
1735 | if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || |
1736 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || | |
83104d44 DW |
1737 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || |
1738 | atomic_read(&VFS_I(ip)->i_dio_count)) | |
be78ff0e DW |
1739 | return false; |
1740 | ||
1741 | return true; | |
1742 | } | |
1743 | ||
1744 | /* | |
1745 | * Automatic CoW Reservation Freeing | |
1746 | * | |
1747 | * These functions automatically garbage collect leftover CoW reservations | |
1748 | * that were made on behalf of a cowextsize hint when we start to run out | |
1749 | * of quota or when the reservations sit around for too long. If the file | |
1750 | * has dirty pages or is undergoing writeback, its CoW reservations will | |
1751 | * be retained. | |
1752 | * | |
1753 | * The actual garbage collection piggybacks off the same code that runs | |
1754 | * the speculative EOF preallocation garbage collector. | |
1755 | */ | |
1756 | STATIC int | |
1757 | xfs_inode_free_cowblocks( | |
1758 | struct xfs_inode *ip, | |
1759 | int flags, | |
1760 | void *args) | |
1761 | { | |
1762 | struct xfs_eofblocks *eofb = args; | |
be78ff0e DW |
1763 | int match; |
1764 | int ret = 0; | |
1765 | ||
51d62690 | 1766 | if (!xfs_prep_free_cowblocks(ip)) |
83104d44 DW |
1767 | return 0; |
1768 | ||
1769 | if (eofb) { | |
1770 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) | |
1771 | match = xfs_inode_match_id_union(ip, eofb); | |
1772 | else | |
1773 | match = xfs_inode_match_id(ip, eofb); | |
1774 | if (!match) | |
1775 | return 0; | |
1776 | ||
1777 | /* skip the inode if the file size is too small */ | |
1778 | if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && | |
1779 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | |
1780 | return 0; | |
83104d44 DW |
1781 | } |
1782 | ||
1783 | /* Free the CoW blocks */ | |
c3155097 BF |
1784 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
1785 | xfs_ilock(ip, XFS_MMAPLOCK_EXCL); | |
83104d44 | 1786 | |
be78ff0e DW |
1787 | /* |
1788 | * Check again, nobody else should be able to dirty blocks or change | |
1789 | * the reflink iflag now that we have the first two locks held. | |
1790 | */ | |
51d62690 | 1791 | if (xfs_prep_free_cowblocks(ip)) |
be78ff0e | 1792 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); |
83104d44 | 1793 | |
c3155097 BF |
1794 | xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); |
1795 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
83104d44 DW |
1796 | |
1797 | return ret; | |
1798 | } | |
1799 | ||
1800 | int | |
1801 | xfs_icache_free_cowblocks( | |
1802 | struct xfs_mount *mp, | |
1803 | struct xfs_eofblocks *eofb) | |
1804 | { | |
1805 | return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, | |
1806 | XFS_ICI_COWBLOCKS_TAG); | |
1807 | } | |
1808 | ||
1809 | int | |
1810 | xfs_inode_free_quota_cowblocks( | |
1811 | struct xfs_inode *ip) | |
1812 | { | |
1813 | return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); | |
1814 | } | |
1815 | ||
1816 | void | |
1817 | xfs_inode_set_cowblocks_tag( | |
1818 | xfs_inode_t *ip) | |
1819 | { | |
7b7381f0 | 1820 | trace_xfs_inode_set_cowblocks_tag(ip); |
91aae6be | 1821 | return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks, |
7b7381f0 | 1822 | trace_xfs_perag_set_cowblocks, |
83104d44 DW |
1823 | XFS_ICI_COWBLOCKS_TAG); |
1824 | } | |
1825 | ||
1826 | void | |
1827 | xfs_inode_clear_cowblocks_tag( | |
1828 | xfs_inode_t *ip) | |
1829 | { | |
7b7381f0 | 1830 | trace_xfs_inode_clear_cowblocks_tag(ip); |
91aae6be | 1831 | return __xfs_inode_clear_blocks_tag(ip, |
7b7381f0 | 1832 | trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); |
83104d44 | 1833 | } |
d6b636eb DW |
1834 | |
1835 | /* Disable post-EOF and CoW block auto-reclamation. */ | |
1836 | void | |
ed30dcbd | 1837 | xfs_stop_block_reaping( |
d6b636eb DW |
1838 | struct xfs_mount *mp) |
1839 | { | |
1840 | cancel_delayed_work_sync(&mp->m_eofblocks_work); | |
1841 | cancel_delayed_work_sync(&mp->m_cowblocks_work); | |
1842 | } | |
1843 | ||
1844 | /* Enable post-EOF and CoW block auto-reclamation. */ | |
1845 | void | |
ed30dcbd | 1846 | xfs_start_block_reaping( |
d6b636eb DW |
1847 | struct xfs_mount *mp) |
1848 | { | |
1849 | xfs_queue_eofblocks(mp); | |
1850 | xfs_queue_cowblocks(mp); | |
1851 | } |