Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
fe4fa4b8 DC |
2 | /* |
3 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
4 | * All Rights Reserved. | |
fe4fa4b8 DC |
5 | */ |
6 | #include "xfs.h" | |
7 | #include "xfs_fs.h" | |
5467b34b | 8 | #include "xfs_shared.h" |
6ca1c906 | 9 | #include "xfs_format.h" |
239880ef DC |
10 | #include "xfs_log_format.h" |
11 | #include "xfs_trans_resv.h" | |
fe4fa4b8 | 12 | #include "xfs_sb.h" |
fe4fa4b8 | 13 | #include "xfs_mount.h" |
fe4fa4b8 | 14 | #include "xfs_inode.h" |
239880ef DC |
15 | #include "xfs_trans.h" |
16 | #include "xfs_trans_priv.h" | |
fe4fa4b8 | 17 | #include "xfs_inode_item.h" |
7d095257 | 18 | #include "xfs_quota.h" |
0b1b213f | 19 | #include "xfs_trace.h" |
6d8b79cf | 20 | #include "xfs_icache.h" |
c24b5dfa | 21 | #include "xfs_bmap_util.h" |
dc06f398 BF |
22 | #include "xfs_dquot_item.h" |
23 | #include "xfs_dquot.h" | |
83104d44 | 24 | #include "xfs_reflink.h" |
bb8a66af | 25 | #include "xfs_ialloc.h" |
fe4fa4b8 | 26 | |
f0e28280 | 27 | #include <linux/iversion.h> |
a167b17e | 28 | |
33479e05 DC |
29 | /* |
30 | * Allocate and initialise an xfs_inode. | |
31 | */ | |
638f4416 | 32 | struct xfs_inode * |
33479e05 DC |
33 | xfs_inode_alloc( |
34 | struct xfs_mount *mp, | |
35 | xfs_ino_t ino) | |
36 | { | |
37 | struct xfs_inode *ip; | |
38 | ||
39 | /* | |
3050bd0b CM |
40 | * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL |
41 | * and return NULL here on ENOMEM. | |
33479e05 | 42 | */ |
3050bd0b CM |
43 | ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL); |
44 | ||
33479e05 | 45 | if (inode_init_always(mp->m_super, VFS_I(ip))) { |
377bcd5f | 46 | kmem_cache_free(xfs_inode_zone, ip); |
33479e05 DC |
47 | return NULL; |
48 | } | |
49 | ||
c19b3b05 DC |
50 | /* VFS doesn't initialise i_mode! */ |
51 | VFS_I(ip)->i_mode = 0; | |
52 | ||
ff6d6af2 | 53 | XFS_STATS_INC(mp, vn_active); |
33479e05 | 54 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
33479e05 DC |
55 | ASSERT(ip->i_ino == 0); |
56 | ||
33479e05 DC |
57 | /* initialise the xfs inode */ |
58 | ip->i_ino = ino; | |
59 | ip->i_mount = mp; | |
60 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | |
61 | ip->i_afp = NULL; | |
3993baeb | 62 | ip->i_cowfp = NULL; |
3ba738df | 63 | memset(&ip->i_df, 0, sizeof(ip->i_df)); |
33479e05 DC |
64 | ip->i_flags = 0; |
65 | ip->i_delayed_blks = 0; | |
f8d55aa0 | 66 | memset(&ip->i_d, 0, sizeof(ip->i_d)); |
6772c1f1 DW |
67 | ip->i_sick = 0; |
68 | ip->i_checked = 0; | |
cb357bf3 DW |
69 | INIT_WORK(&ip->i_ioend_work, xfs_end_io); |
70 | INIT_LIST_HEAD(&ip->i_ioend_list); | |
71 | spin_lock_init(&ip->i_ioend_lock); | |
33479e05 DC |
72 | |
73 | return ip; | |
74 | } | |
75 | ||
76 | STATIC void | |
77 | xfs_inode_free_callback( | |
78 | struct rcu_head *head) | |
79 | { | |
80 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
81 | struct xfs_inode *ip = XFS_I(inode); | |
82 | ||
c19b3b05 | 83 | switch (VFS_I(ip)->i_mode & S_IFMT) { |
33479e05 DC |
84 | case S_IFREG: |
85 | case S_IFDIR: | |
86 | case S_IFLNK: | |
ef838512 | 87 | xfs_idestroy_fork(&ip->i_df); |
33479e05 DC |
88 | break; |
89 | } | |
90 | ||
ef838512 CH |
91 | if (ip->i_afp) { |
92 | xfs_idestroy_fork(ip->i_afp); | |
93 | kmem_cache_free(xfs_ifork_zone, ip->i_afp); | |
94 | } | |
95 | if (ip->i_cowfp) { | |
96 | xfs_idestroy_fork(ip->i_cowfp); | |
97 | kmem_cache_free(xfs_ifork_zone, ip->i_cowfp); | |
98 | } | |
33479e05 | 99 | if (ip->i_itemp) { |
22525c17 DC |
100 | ASSERT(!test_bit(XFS_LI_IN_AIL, |
101 | &ip->i_itemp->ili_item.li_flags)); | |
33479e05 DC |
102 | xfs_inode_item_destroy(ip); |
103 | ip->i_itemp = NULL; | |
104 | } | |
105 | ||
377bcd5f | 106 | kmem_cache_free(xfs_inode_zone, ip); |
1f2dcfe8 DC |
107 | } |
108 | ||
8a17d7dd DC |
109 | static void |
110 | __xfs_inode_free( | |
111 | struct xfs_inode *ip) | |
112 | { | |
113 | /* asserts to verify all state is correct here */ | |
114 | ASSERT(atomic_read(&ip->i_pincount) == 0); | |
48d55e2a | 115 | ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); |
8a17d7dd DC |
116 | XFS_STATS_DEC(ip->i_mount, vn_active); |
117 | ||
118 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | |
119 | } | |
120 | ||
1f2dcfe8 DC |
121 | void |
122 | xfs_inode_free( | |
123 | struct xfs_inode *ip) | |
124 | { | |
718ecc50 | 125 | ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING)); |
98efe8af | 126 | |
33479e05 DC |
127 | /* |
128 | * Because we use RCU freeing we need to ensure the inode always | |
129 | * appears to be reclaimed with an invalid inode number when in the | |
130 | * free state. The ip->i_flags_lock provides the barrier against lookup | |
131 | * races. | |
132 | */ | |
133 | spin_lock(&ip->i_flags_lock); | |
134 | ip->i_flags = XFS_IRECLAIM; | |
135 | ip->i_ino = 0; | |
136 | spin_unlock(&ip->i_flags_lock); | |
137 | ||
8a17d7dd | 138 | __xfs_inode_free(ip); |
33479e05 DC |
139 | } |
140 | ||
ad438c40 | 141 | /* |
02511a5a DC |
142 | * Queue background inode reclaim work if there are reclaimable inodes and there |
143 | * isn't reclaim work already scheduled or in progress. | |
ad438c40 DC |
144 | */ |
145 | static void | |
146 | xfs_reclaim_work_queue( | |
147 | struct xfs_mount *mp) | |
148 | { | |
149 | ||
150 | rcu_read_lock(); | |
151 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | |
152 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | |
153 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | |
154 | } | |
155 | rcu_read_unlock(); | |
156 | } | |
157 | ||
ad438c40 DC |
158 | static void |
159 | xfs_perag_set_reclaim_tag( | |
160 | struct xfs_perag *pag) | |
161 | { | |
162 | struct xfs_mount *mp = pag->pag_mount; | |
163 | ||
95989c46 | 164 | lockdep_assert_held(&pag->pag_ici_lock); |
ad438c40 DC |
165 | if (pag->pag_ici_reclaimable++) |
166 | return; | |
167 | ||
168 | /* propagate the reclaim tag up into the perag radix tree */ | |
169 | spin_lock(&mp->m_perag_lock); | |
170 | radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, | |
171 | XFS_ICI_RECLAIM_TAG); | |
172 | spin_unlock(&mp->m_perag_lock); | |
173 | ||
174 | /* schedule periodic background inode reclaim */ | |
175 | xfs_reclaim_work_queue(mp); | |
176 | ||
177 | trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | |
178 | } | |
179 | ||
180 | static void | |
181 | xfs_perag_clear_reclaim_tag( | |
182 | struct xfs_perag *pag) | |
183 | { | |
184 | struct xfs_mount *mp = pag->pag_mount; | |
185 | ||
95989c46 | 186 | lockdep_assert_held(&pag->pag_ici_lock); |
ad438c40 DC |
187 | if (--pag->pag_ici_reclaimable) |
188 | return; | |
189 | ||
190 | /* clear the reclaim tag from the perag radix tree */ | |
191 | spin_lock(&mp->m_perag_lock); | |
192 | radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, | |
193 | XFS_ICI_RECLAIM_TAG); | |
194 | spin_unlock(&mp->m_perag_lock); | |
195 | trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | |
196 | } | |
197 | ||
198 | ||
199 | /* | |
200 | * We set the inode flag atomically with the radix tree tag. | |
201 | * Once we get tag lookups on the radix tree, this inode flag | |
202 | * can go away. | |
203 | */ | |
204 | void | |
205 | xfs_inode_set_reclaim_tag( | |
206 | struct xfs_inode *ip) | |
207 | { | |
208 | struct xfs_mount *mp = ip->i_mount; | |
209 | struct xfs_perag *pag; | |
210 | ||
211 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | |
212 | spin_lock(&pag->pag_ici_lock); | |
213 | spin_lock(&ip->i_flags_lock); | |
214 | ||
215 | radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), | |
216 | XFS_ICI_RECLAIM_TAG); | |
217 | xfs_perag_set_reclaim_tag(pag); | |
218 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | |
219 | ||
220 | spin_unlock(&ip->i_flags_lock); | |
221 | spin_unlock(&pag->pag_ici_lock); | |
222 | xfs_perag_put(pag); | |
223 | } | |
224 | ||
225 | STATIC void | |
226 | xfs_inode_clear_reclaim_tag( | |
227 | struct xfs_perag *pag, | |
228 | xfs_ino_t ino) | |
229 | { | |
230 | radix_tree_tag_clear(&pag->pag_ici_root, | |
231 | XFS_INO_TO_AGINO(pag->pag_mount, ino), | |
232 | XFS_ICI_RECLAIM_TAG); | |
233 | xfs_perag_clear_reclaim_tag(pag); | |
234 | } | |
235 | ||
ae2c4ac2 BF |
236 | static void |
237 | xfs_inew_wait( | |
238 | struct xfs_inode *ip) | |
239 | { | |
240 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); | |
241 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); | |
242 | ||
243 | do { | |
21417136 | 244 | prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); |
ae2c4ac2 BF |
245 | if (!xfs_iflags_test(ip, XFS_INEW)) |
246 | break; | |
247 | schedule(); | |
248 | } while (true); | |
21417136 | 249 | finish_wait(wq, &wait.wq_entry); |
ae2c4ac2 BF |
250 | } |
251 | ||
50997470 DC |
252 | /* |
253 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode | |
254 | * part of the structure. This is made more complex by the fact we store | |
255 | * information about the on-disk values in the VFS inode and so we can't just | |
83e06f21 | 256 | * overwrite the values unconditionally. Hence we save the parameters we |
50997470 | 257 | * need to retain across reinitialisation, and rewrite them into the VFS inode |
83e06f21 | 258 | * after reinitialisation even if it fails. |
50997470 DC |
259 | */ |
260 | static int | |
261 | xfs_reinit_inode( | |
262 | struct xfs_mount *mp, | |
263 | struct inode *inode) | |
264 | { | |
265 | int error; | |
54d7b5c1 | 266 | uint32_t nlink = inode->i_nlink; |
9e9a2674 | 267 | uint32_t generation = inode->i_generation; |
f0e28280 | 268 | uint64_t version = inode_peek_iversion(inode); |
c19b3b05 | 269 | umode_t mode = inode->i_mode; |
acd1d715 | 270 | dev_t dev = inode->i_rdev; |
3d8f2821 CH |
271 | kuid_t uid = inode->i_uid; |
272 | kgid_t gid = inode->i_gid; | |
50997470 DC |
273 | |
274 | error = inode_init_always(mp->m_super, inode); | |
275 | ||
54d7b5c1 | 276 | set_nlink(inode, nlink); |
9e9a2674 | 277 | inode->i_generation = generation; |
f0e28280 | 278 | inode_set_iversion_queried(inode, version); |
c19b3b05 | 279 | inode->i_mode = mode; |
acd1d715 | 280 | inode->i_rdev = dev; |
3d8f2821 CH |
281 | inode->i_uid = uid; |
282 | inode->i_gid = gid; | |
50997470 DC |
283 | return error; |
284 | } | |
285 | ||
afca6c5b DC |
286 | /* |
287 | * If we are allocating a new inode, then check what was returned is | |
288 | * actually a free, empty inode. If we are not allocating an inode, | |
289 | * then check we didn't find a free inode. | |
290 | * | |
291 | * Returns: | |
292 | * 0 if the inode free state matches the lookup context | |
293 | * -ENOENT if the inode is free and we are not allocating | |
294 | * -EFSCORRUPTED if there is any state mismatch at all | |
295 | */ | |
296 | static int | |
297 | xfs_iget_check_free_state( | |
298 | struct xfs_inode *ip, | |
299 | int flags) | |
300 | { | |
301 | if (flags & XFS_IGET_CREATE) { | |
302 | /* should be a free inode */ | |
303 | if (VFS_I(ip)->i_mode != 0) { | |
304 | xfs_warn(ip->i_mount, | |
305 | "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)", | |
306 | ip->i_ino, VFS_I(ip)->i_mode); | |
307 | return -EFSCORRUPTED; | |
308 | } | |
309 | ||
310 | if (ip->i_d.di_nblocks != 0) { | |
311 | xfs_warn(ip->i_mount, | |
312 | "Corruption detected! Free inode 0x%llx has blocks allocated!", | |
313 | ip->i_ino); | |
314 | return -EFSCORRUPTED; | |
315 | } | |
316 | return 0; | |
317 | } | |
318 | ||
319 | /* should be an allocated inode */ | |
320 | if (VFS_I(ip)->i_mode == 0) | |
321 | return -ENOENT; | |
322 | ||
323 | return 0; | |
324 | } | |
325 | ||
33479e05 DC |
326 | /* |
327 | * Check the validity of the inode we just found it the cache | |
328 | */ | |
329 | static int | |
330 | xfs_iget_cache_hit( | |
331 | struct xfs_perag *pag, | |
332 | struct xfs_inode *ip, | |
333 | xfs_ino_t ino, | |
334 | int flags, | |
335 | int lock_flags) __releases(RCU) | |
336 | { | |
337 | struct inode *inode = VFS_I(ip); | |
338 | struct xfs_mount *mp = ip->i_mount; | |
339 | int error; | |
340 | ||
341 | /* | |
342 | * check for re-use of an inode within an RCU grace period due to the | |
343 | * radix tree nodes not being updated yet. We monitor for this by | |
344 | * setting the inode number to zero before freeing the inode structure. | |
345 | * If the inode has been reallocated and set up, then the inode number | |
346 | * will not match, so check for that, too. | |
347 | */ | |
348 | spin_lock(&ip->i_flags_lock); | |
349 | if (ip->i_ino != ino) { | |
350 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 351 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 352 | error = -EAGAIN; |
33479e05 DC |
353 | goto out_error; |
354 | } | |
355 | ||
356 | ||
357 | /* | |
358 | * If we are racing with another cache hit that is currently | |
359 | * instantiating this inode or currently recycling it out of | |
360 | * reclaimabe state, wait for the initialisation to complete | |
361 | * before continuing. | |
362 | * | |
363 | * XXX(hch): eventually we should do something equivalent to | |
364 | * wait_on_inode to wait for these flags to be cleared | |
365 | * instead of polling for it. | |
366 | */ | |
367 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | |
368 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 369 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 370 | error = -EAGAIN; |
33479e05 DC |
371 | goto out_error; |
372 | } | |
373 | ||
374 | /* | |
afca6c5b DC |
375 | * Check the inode free state is valid. This also detects lookup |
376 | * racing with unlinks. | |
33479e05 | 377 | */ |
afca6c5b DC |
378 | error = xfs_iget_check_free_state(ip, flags); |
379 | if (error) | |
33479e05 | 380 | goto out_error; |
33479e05 DC |
381 | |
382 | /* | |
383 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. | |
384 | * Need to carefully get it back into useable state. | |
385 | */ | |
386 | if (ip->i_flags & XFS_IRECLAIMABLE) { | |
387 | trace_xfs_iget_reclaim(ip); | |
388 | ||
378f681c DW |
389 | if (flags & XFS_IGET_INCORE) { |
390 | error = -EAGAIN; | |
391 | goto out_error; | |
392 | } | |
393 | ||
33479e05 DC |
394 | /* |
395 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode | |
396 | * from stomping over us while we recycle the inode. We can't | |
397 | * clear the radix tree reclaimable tag yet as it requires | |
398 | * pag_ici_lock to be held exclusive. | |
399 | */ | |
400 | ip->i_flags |= XFS_IRECLAIM; | |
401 | ||
402 | spin_unlock(&ip->i_flags_lock); | |
403 | rcu_read_unlock(); | |
404 | ||
d45344d6 | 405 | ASSERT(!rwsem_is_locked(&inode->i_rwsem)); |
50997470 | 406 | error = xfs_reinit_inode(mp, inode); |
33479e05 | 407 | if (error) { |
756baca2 | 408 | bool wake; |
33479e05 DC |
409 | /* |
410 | * Re-initializing the inode failed, and we are in deep | |
411 | * trouble. Try to re-add it to the reclaim list. | |
412 | */ | |
413 | rcu_read_lock(); | |
414 | spin_lock(&ip->i_flags_lock); | |
756baca2 | 415 | wake = !!__xfs_iflags_test(ip, XFS_INEW); |
33479e05 | 416 | ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); |
756baca2 BF |
417 | if (wake) |
418 | wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); | |
33479e05 DC |
419 | ASSERT(ip->i_flags & XFS_IRECLAIMABLE); |
420 | trace_xfs_iget_reclaim_fail(ip); | |
421 | goto out_error; | |
422 | } | |
423 | ||
424 | spin_lock(&pag->pag_ici_lock); | |
425 | spin_lock(&ip->i_flags_lock); | |
426 | ||
427 | /* | |
428 | * Clear the per-lifetime state in the inode as we are now | |
429 | * effectively a new inode and need to return to the initial | |
430 | * state before reuse occurs. | |
431 | */ | |
432 | ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | |
433 | ip->i_flags |= XFS_INEW; | |
545c0889 | 434 | xfs_inode_clear_reclaim_tag(pag, ip->i_ino); |
33479e05 | 435 | inode->i_state = I_NEW; |
6772c1f1 DW |
436 | ip->i_sick = 0; |
437 | ip->i_checked = 0; | |
33479e05 | 438 | |
33479e05 DC |
439 | spin_unlock(&ip->i_flags_lock); |
440 | spin_unlock(&pag->pag_ici_lock); | |
441 | } else { | |
442 | /* If the VFS inode is being torn down, pause and try again. */ | |
443 | if (!igrab(inode)) { | |
444 | trace_xfs_iget_skip(ip); | |
2451337d | 445 | error = -EAGAIN; |
33479e05 DC |
446 | goto out_error; |
447 | } | |
448 | ||
449 | /* We've got a live one. */ | |
450 | spin_unlock(&ip->i_flags_lock); | |
451 | rcu_read_unlock(); | |
452 | trace_xfs_iget_hit(ip); | |
453 | } | |
454 | ||
455 | if (lock_flags != 0) | |
456 | xfs_ilock(ip, lock_flags); | |
457 | ||
378f681c | 458 | if (!(flags & XFS_IGET_INCORE)) |
dae2f8ed | 459 | xfs_iflags_clear(ip, XFS_ISTALE); |
ff6d6af2 | 460 | XFS_STATS_INC(mp, xs_ig_found); |
33479e05 DC |
461 | |
462 | return 0; | |
463 | ||
464 | out_error: | |
465 | spin_unlock(&ip->i_flags_lock); | |
466 | rcu_read_unlock(); | |
467 | return error; | |
468 | } | |
469 | ||
470 | ||
471 | static int | |
472 | xfs_iget_cache_miss( | |
473 | struct xfs_mount *mp, | |
474 | struct xfs_perag *pag, | |
475 | xfs_trans_t *tp, | |
476 | xfs_ino_t ino, | |
477 | struct xfs_inode **ipp, | |
478 | int flags, | |
479 | int lock_flags) | |
480 | { | |
481 | struct xfs_inode *ip; | |
482 | int error; | |
483 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); | |
484 | int iflags; | |
485 | ||
486 | ip = xfs_inode_alloc(mp, ino); | |
487 | if (!ip) | |
2451337d | 488 | return -ENOMEM; |
33479e05 | 489 | |
bb8a66af | 490 | error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags); |
33479e05 DC |
491 | if (error) |
492 | goto out_destroy; | |
493 | ||
bb8a66af CH |
494 | /* |
495 | * For version 5 superblocks, if we are initialising a new inode and we | |
496 | * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can | |
497 | * simply build the new inode core with a random generation number. | |
498 | * | |
499 | * For version 4 (and older) superblocks, log recovery is dependent on | |
500 | * the di_flushiter field being initialised from the current on-disk | |
501 | * value and hence we must also read the inode off disk even when | |
502 | * initializing new inodes. | |
503 | */ | |
504 | if (xfs_sb_version_has_v3inode(&mp->m_sb) && | |
505 | (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) { | |
506 | VFS_I(ip)->i_generation = prandom_u32(); | |
507 | } else { | |
508 | struct xfs_dinode *dip; | |
509 | struct xfs_buf *bp; | |
510 | ||
511 | error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0); | |
512 | if (error) | |
513 | goto out_destroy; | |
514 | ||
515 | error = xfs_inode_from_disk(ip, dip); | |
516 | if (!error) | |
517 | xfs_buf_set_ref(bp, XFS_INO_REF); | |
518 | xfs_trans_brelse(tp, bp); | |
519 | ||
520 | if (error) | |
521 | goto out_destroy; | |
522 | } | |
523 | ||
33479e05 DC |
524 | trace_xfs_iget_miss(ip); |
525 | ||
ee457001 | 526 | /* |
afca6c5b DC |
527 | * Check the inode free state is valid. This also detects lookup |
528 | * racing with unlinks. | |
ee457001 | 529 | */ |
afca6c5b DC |
530 | error = xfs_iget_check_free_state(ip, flags); |
531 | if (error) | |
33479e05 | 532 | goto out_destroy; |
33479e05 DC |
533 | |
534 | /* | |
535 | * Preload the radix tree so we can insert safely under the | |
536 | * write spinlock. Note that we cannot sleep inside the preload | |
537 | * region. Since we can be called from transaction context, don't | |
538 | * recurse into the file system. | |
539 | */ | |
540 | if (radix_tree_preload(GFP_NOFS)) { | |
2451337d | 541 | error = -EAGAIN; |
33479e05 DC |
542 | goto out_destroy; |
543 | } | |
544 | ||
545 | /* | |
546 | * Because the inode hasn't been added to the radix-tree yet it can't | |
547 | * be found by another thread, so we can do the non-sleeping lock here. | |
548 | */ | |
549 | if (lock_flags) { | |
550 | if (!xfs_ilock_nowait(ip, lock_flags)) | |
551 | BUG(); | |
552 | } | |
553 | ||
554 | /* | |
555 | * These values must be set before inserting the inode into the radix | |
556 | * tree as the moment it is inserted a concurrent lookup (allowed by the | |
557 | * RCU locking mechanism) can find it and that lookup must see that this | |
558 | * is an inode currently under construction (i.e. that XFS_INEW is set). | |
559 | * The ip->i_flags_lock that protects the XFS_INEW flag forms the | |
560 | * memory barrier that ensures this detection works correctly at lookup | |
561 | * time. | |
562 | */ | |
563 | iflags = XFS_INEW; | |
564 | if (flags & XFS_IGET_DONTCACHE) | |
2c567af4 | 565 | d_mark_dontcache(VFS_I(ip)); |
113a5683 CS |
566 | ip->i_udquot = NULL; |
567 | ip->i_gdquot = NULL; | |
92f8ff73 | 568 | ip->i_pdquot = NULL; |
33479e05 DC |
569 | xfs_iflags_set(ip, iflags); |
570 | ||
571 | /* insert the new inode */ | |
572 | spin_lock(&pag->pag_ici_lock); | |
573 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | |
574 | if (unlikely(error)) { | |
575 | WARN_ON(error != -EEXIST); | |
ff6d6af2 | 576 | XFS_STATS_INC(mp, xs_ig_dup); |
2451337d | 577 | error = -EAGAIN; |
33479e05 DC |
578 | goto out_preload_end; |
579 | } | |
580 | spin_unlock(&pag->pag_ici_lock); | |
581 | radix_tree_preload_end(); | |
582 | ||
583 | *ipp = ip; | |
584 | return 0; | |
585 | ||
586 | out_preload_end: | |
587 | spin_unlock(&pag->pag_ici_lock); | |
588 | radix_tree_preload_end(); | |
589 | if (lock_flags) | |
590 | xfs_iunlock(ip, lock_flags); | |
591 | out_destroy: | |
592 | __destroy_inode(VFS_I(ip)); | |
593 | xfs_inode_free(ip); | |
594 | return error; | |
595 | } | |
596 | ||
597 | /* | |
02511a5a DC |
598 | * Look up an inode by number in the given file system. The inode is looked up |
599 | * in the cache held in each AG. If the inode is found in the cache, initialise | |
600 | * the vfs inode if necessary. | |
33479e05 | 601 | * |
02511a5a DC |
602 | * If it is not in core, read it in from the file system's device, add it to the |
603 | * cache and initialise the vfs inode. | |
33479e05 DC |
604 | * |
605 | * The inode is locked according to the value of the lock_flags parameter. | |
02511a5a DC |
606 | * Inode lookup is only done during metadata operations and not as part of the |
607 | * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup. | |
33479e05 DC |
608 | */ |
609 | int | |
610 | xfs_iget( | |
02511a5a DC |
611 | struct xfs_mount *mp, |
612 | struct xfs_trans *tp, | |
613 | xfs_ino_t ino, | |
614 | uint flags, | |
615 | uint lock_flags, | |
616 | struct xfs_inode **ipp) | |
33479e05 | 617 | { |
02511a5a DC |
618 | struct xfs_inode *ip; |
619 | struct xfs_perag *pag; | |
620 | xfs_agino_t agino; | |
621 | int error; | |
33479e05 | 622 | |
33479e05 DC |
623 | ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); |
624 | ||
625 | /* reject inode numbers outside existing AGs */ | |
626 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | |
2451337d | 627 | return -EINVAL; |
33479e05 | 628 | |
ff6d6af2 | 629 | XFS_STATS_INC(mp, xs_ig_attempts); |
8774cf8b | 630 | |
33479e05 DC |
631 | /* get the perag structure and ensure that it's inode capable */ |
632 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | |
633 | agino = XFS_INO_TO_AGINO(mp, ino); | |
634 | ||
635 | again: | |
636 | error = 0; | |
637 | rcu_read_lock(); | |
638 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); | |
639 | ||
640 | if (ip) { | |
641 | error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | |
642 | if (error) | |
643 | goto out_error_or_again; | |
644 | } else { | |
645 | rcu_read_unlock(); | |
378f681c | 646 | if (flags & XFS_IGET_INCORE) { |
ed438b47 | 647 | error = -ENODATA; |
378f681c DW |
648 | goto out_error_or_again; |
649 | } | |
ff6d6af2 | 650 | XFS_STATS_INC(mp, xs_ig_missed); |
33479e05 DC |
651 | |
652 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | |
653 | flags, lock_flags); | |
654 | if (error) | |
655 | goto out_error_or_again; | |
656 | } | |
657 | xfs_perag_put(pag); | |
658 | ||
659 | *ipp = ip; | |
660 | ||
661 | /* | |
58c90473 | 662 | * If we have a real type for an on-disk inode, we can setup the inode |
33479e05 DC |
663 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
664 | */ | |
c19b3b05 | 665 | if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) |
58c90473 | 666 | xfs_setup_existing_inode(ip); |
33479e05 DC |
667 | return 0; |
668 | ||
669 | out_error_or_again: | |
378f681c | 670 | if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) { |
33479e05 DC |
671 | delay(1); |
672 | goto again; | |
673 | } | |
674 | xfs_perag_put(pag); | |
675 | return error; | |
676 | } | |
677 | ||
378f681c DW |
678 | /* |
679 | * "Is this a cached inode that's also allocated?" | |
680 | * | |
681 | * Look up an inode by number in the given file system. If the inode is | |
682 | * in cache and isn't in purgatory, return 1 if the inode is allocated | |
683 | * and 0 if it is not. For all other cases (not in cache, being torn | |
684 | * down, etc.), return a negative error code. | |
685 | * | |
686 | * The caller has to prevent inode allocation and freeing activity, | |
687 | * presumably by locking the AGI buffer. This is to ensure that an | |
688 | * inode cannot transition from allocated to freed until the caller is | |
689 | * ready to allow that. If the inode is in an intermediate state (new, | |
690 | * reclaimable, or being reclaimed), -EAGAIN will be returned; if the | |
691 | * inode is not in the cache, -ENOENT will be returned. The caller must | |
692 | * deal with these scenarios appropriately. | |
693 | * | |
694 | * This is a specialized use case for the online scrubber; if you're | |
695 | * reading this, you probably want xfs_iget. | |
696 | */ | |
697 | int | |
698 | xfs_icache_inode_is_allocated( | |
699 | struct xfs_mount *mp, | |
700 | struct xfs_trans *tp, | |
701 | xfs_ino_t ino, | |
702 | bool *inuse) | |
703 | { | |
704 | struct xfs_inode *ip; | |
705 | int error; | |
706 | ||
707 | error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip); | |
708 | if (error) | |
709 | return error; | |
710 | ||
711 | *inuse = !!(VFS_I(ip)->i_mode); | |
44a8736b | 712 | xfs_irele(ip); |
378f681c DW |
713 | return 0; |
714 | } | |
715 | ||
78ae5256 DC |
716 | /* |
717 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
718 | * radix tree lookups to a minimum. The batch size is a trade off between | |
719 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
720 | * be too greedy. | |
721 | */ | |
722 | #define XFS_LOOKUP_BATCH 32 | |
723 | ||
39b1cfd7 DW |
724 | /* |
725 | * Decide if the given @ip is eligible to be a part of the inode walk, and | |
726 | * grab it if so. Returns true if it's ready to go or false if we should just | |
727 | * ignore it. | |
728 | */ | |
729 | STATIC bool | |
042f65f4 | 730 | xfs_inode_walk_ag_grab( |
ae2c4ac2 BF |
731 | struct xfs_inode *ip, |
732 | int flags) | |
e13de955 DC |
733 | { |
734 | struct inode *inode = VFS_I(ip); | |
042f65f4 | 735 | bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT); |
e13de955 | 736 | |
1a3e8f3d DC |
737 | ASSERT(rcu_read_lock_held()); |
738 | ||
02511a5a | 739 | /* Check for stale RCU freed inode */ |
1a3e8f3d DC |
740 | spin_lock(&ip->i_flags_lock); |
741 | if (!ip->i_ino) | |
742 | goto out_unlock_noent; | |
743 | ||
744 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | |
ae2c4ac2 BF |
745 | if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || |
746 | __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) | |
1a3e8f3d DC |
747 | goto out_unlock_noent; |
748 | spin_unlock(&ip->i_flags_lock); | |
749 | ||
e13de955 DC |
750 | /* nothing to sync during shutdown */ |
751 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
39b1cfd7 | 752 | return false; |
e13de955 | 753 | |
e13de955 DC |
754 | /* If we can't grab the inode, it must on it's way to reclaim. */ |
755 | if (!igrab(inode)) | |
39b1cfd7 | 756 | return false; |
e13de955 | 757 | |
e13de955 | 758 | /* inode is valid */ |
39b1cfd7 | 759 | return true; |
1a3e8f3d DC |
760 | |
761 | out_unlock_noent: | |
762 | spin_unlock(&ip->i_flags_lock); | |
39b1cfd7 | 763 | return false; |
e13de955 DC |
764 | } |
765 | ||
5662d38c DW |
766 | /* |
767 | * For a given per-AG structure @pag, grab, @execute, and rele all incore | |
768 | * inodes with the given radix tree @tag. | |
769 | */ | |
75f3cb13 | 770 | STATIC int |
042f65f4 | 771 | xfs_inode_walk_ag( |
5017e97d | 772 | struct xfs_perag *pag, |
964176bd | 773 | int iter_flags, |
390600f8 | 774 | int (*execute)(struct xfs_inode *ip, void *args), |
a454f742 | 775 | void *args, |
964176bd | 776 | int tag) |
75f3cb13 | 777 | { |
964176bd | 778 | struct xfs_mount *mp = pag->pag_mount; |
75f3cb13 DC |
779 | uint32_t first_index; |
780 | int last_error = 0; | |
781 | int skipped; | |
7e88d314 | 782 | bool done; |
78ae5256 | 783 | int nr_found; |
75f3cb13 DC |
784 | |
785 | restart: | |
7e88d314 | 786 | done = false; |
75f3cb13 DC |
787 | skipped = 0; |
788 | first_index = 0; | |
78ae5256 | 789 | nr_found = 0; |
75f3cb13 | 790 | do { |
78ae5256 | 791 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
75f3cb13 | 792 | int error = 0; |
78ae5256 | 793 | int i; |
75f3cb13 | 794 | |
1a3e8f3d | 795 | rcu_read_lock(); |
a454f742 | 796 | |
fc96be95 | 797 | if (tag == XFS_ICI_NO_TAG) |
a454f742 | 798 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, |
78ae5256 DC |
799 | (void **)batch, first_index, |
800 | XFS_LOOKUP_BATCH); | |
a454f742 BF |
801 | else |
802 | nr_found = radix_tree_gang_lookup_tag( | |
803 | &pag->pag_ici_root, | |
804 | (void **) batch, first_index, | |
805 | XFS_LOOKUP_BATCH, tag); | |
806 | ||
65d0f205 | 807 | if (!nr_found) { |
1a3e8f3d | 808 | rcu_read_unlock(); |
75f3cb13 | 809 | break; |
c8e20be0 | 810 | } |
75f3cb13 | 811 | |
65d0f205 | 812 | /* |
78ae5256 DC |
813 | * Grab the inodes before we drop the lock. if we found |
814 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 815 | */ |
78ae5256 DC |
816 | for (i = 0; i < nr_found; i++) { |
817 | struct xfs_inode *ip = batch[i]; | |
818 | ||
042f65f4 | 819 | if (done || !xfs_inode_walk_ag_grab(ip, iter_flags)) |
78ae5256 DC |
820 | batch[i] = NULL; |
821 | ||
822 | /* | |
1a3e8f3d DC |
823 | * Update the index for the next lookup. Catch |
824 | * overflows into the next AG range which can occur if | |
825 | * we have inodes in the last block of the AG and we | |
826 | * are currently pointing to the last inode. | |
827 | * | |
828 | * Because we may see inodes that are from the wrong AG | |
829 | * due to RCU freeing and reallocation, only update the | |
830 | * index if it lies in this AG. It was a race that lead | |
831 | * us to see this inode, so another lookup from the | |
832 | * same index will not find it again. | |
78ae5256 | 833 | */ |
1a3e8f3d DC |
834 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) |
835 | continue; | |
78ae5256 DC |
836 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
837 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
7e88d314 | 838 | done = true; |
e13de955 | 839 | } |
78ae5256 DC |
840 | |
841 | /* unlock now we've grabbed the inodes. */ | |
1a3e8f3d | 842 | rcu_read_unlock(); |
e13de955 | 843 | |
78ae5256 DC |
844 | for (i = 0; i < nr_found; i++) { |
845 | if (!batch[i]) | |
846 | continue; | |
042f65f4 | 847 | if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) && |
ae2c4ac2 BF |
848 | xfs_iflags_test(batch[i], XFS_INEW)) |
849 | xfs_inew_wait(batch[i]); | |
390600f8 | 850 | error = execute(batch[i], args); |
44a8736b | 851 | xfs_irele(batch[i]); |
2451337d | 852 | if (error == -EAGAIN) { |
78ae5256 DC |
853 | skipped++; |
854 | continue; | |
855 | } | |
2451337d | 856 | if (error && last_error != -EFSCORRUPTED) |
78ae5256 | 857 | last_error = error; |
75f3cb13 | 858 | } |
c8e20be0 DC |
859 | |
860 | /* bail out if the filesystem is corrupted. */ | |
2451337d | 861 | if (error == -EFSCORRUPTED) |
75f3cb13 DC |
862 | break; |
863 | ||
8daaa831 DC |
864 | cond_resched(); |
865 | ||
78ae5256 | 866 | } while (nr_found && !done); |
75f3cb13 DC |
867 | |
868 | if (skipped) { | |
869 | delay(1); | |
870 | goto restart; | |
871 | } | |
75f3cb13 DC |
872 | return last_error; |
873 | } | |
874 | ||
5662d38c DW |
875 | /* Fetch the next (possibly tagged) per-AG structure. */ |
876 | static inline struct xfs_perag * | |
877 | xfs_inode_walk_get_perag( | |
878 | struct xfs_mount *mp, | |
879 | xfs_agnumber_t agno, | |
880 | int tag) | |
881 | { | |
882 | if (tag == XFS_ICI_NO_TAG) | |
883 | return xfs_perag_get(mp, agno); | |
884 | return xfs_perag_get_tag(mp, agno, tag); | |
885 | } | |
886 | ||
887 | /* | |
888 | * Call the @execute function on all incore inodes matching the radix tree | |
889 | * @tag. | |
890 | */ | |
891 | int | |
042f65f4 | 892 | xfs_inode_walk( |
5662d38c DW |
893 | struct xfs_mount *mp, |
894 | int iter_flags, | |
895 | int (*execute)(struct xfs_inode *ip, void *args), | |
896 | void *args, | |
897 | int tag) | |
898 | { | |
899 | struct xfs_perag *pag; | |
900 | int error = 0; | |
901 | int last_error = 0; | |
902 | xfs_agnumber_t ag; | |
903 | ||
904 | ag = 0; | |
905 | while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) { | |
906 | ag = pag->pag_agno + 1; | |
964176bd | 907 | error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag); |
5662d38c DW |
908 | xfs_perag_put(pag); |
909 | if (error) { | |
910 | last_error = error; | |
911 | if (error == -EFSCORRUPTED) | |
912 | break; | |
913 | } | |
914 | } | |
915 | return last_error; | |
916 | } | |
917 | ||
e3a20c0b DC |
918 | /* |
919 | * Grab the inode for reclaim exclusively. | |
50718b8d DC |
920 | * |
921 | * We have found this inode via a lookup under RCU, so the inode may have | |
922 | * already been freed, or it may be in the process of being recycled by | |
923 | * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode | |
924 | * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE | |
925 | * will not be set. Hence we need to check for both these flag conditions to | |
926 | * avoid inodes that are no longer reclaim candidates. | |
927 | * | |
928 | * Note: checking for other state flags here, under the i_flags_lock or not, is | |
929 | * racy and should be avoided. Those races should be resolved only after we have | |
930 | * ensured that we are able to reclaim this inode and the world can see that we | |
931 | * are going to reclaim it. | |
932 | * | |
933 | * Return true if we grabbed it, false otherwise. | |
e3a20c0b | 934 | */ |
50718b8d | 935 | static bool |
e3a20c0b | 936 | xfs_reclaim_inode_grab( |
50718b8d | 937 | struct xfs_inode *ip) |
e3a20c0b | 938 | { |
1a3e8f3d DC |
939 | ASSERT(rcu_read_lock_held()); |
940 | ||
e3a20c0b | 941 | spin_lock(&ip->i_flags_lock); |
1a3e8f3d DC |
942 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || |
943 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
944 | /* not a reclaim candidate. */ | |
e3a20c0b | 945 | spin_unlock(&ip->i_flags_lock); |
50718b8d | 946 | return false; |
e3a20c0b DC |
947 | } |
948 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
949 | spin_unlock(&ip->i_flags_lock); | |
50718b8d | 950 | return true; |
e3a20c0b DC |
951 | } |
952 | ||
777df5af | 953 | /* |
02511a5a DC |
954 | * Inode reclaim is non-blocking, so the default action if progress cannot be |
955 | * made is to "requeue" the inode for reclaim by unlocking it and clearing the | |
956 | * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about | |
957 | * blocking anymore and hence we can wait for the inode to be able to reclaim | |
958 | * it. | |
777df5af | 959 | * |
02511a5a DC |
960 | * We do no IO here - if callers require inodes to be cleaned they must push the |
961 | * AIL first to trigger writeback of dirty inodes. This enables writeback to be | |
962 | * done in the background in a non-blocking manner, and enables memory reclaim | |
963 | * to make progress without blocking. | |
777df5af | 964 | */ |
4d0bab3a | 965 | static void |
c8e20be0 | 966 | xfs_reclaim_inode( |
75f3cb13 | 967 | struct xfs_inode *ip, |
50718b8d | 968 | struct xfs_perag *pag) |
fce08f2f | 969 | { |
8a17d7dd | 970 | xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ |
777df5af | 971 | |
9552e14d | 972 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) |
617825fe | 973 | goto out; |
718ecc50 | 974 | if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING)) |
9552e14d | 975 | goto out_iunlock; |
7a3be02b | 976 | |
777df5af DC |
977 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
978 | xfs_iunpin_wait(ip); | |
88fc1879 | 979 | xfs_iflush_abort(ip); |
777df5af DC |
980 | goto reclaim; |
981 | } | |
617825fe | 982 | if (xfs_ipincount(ip)) |
718ecc50 | 983 | goto out_clear_flush; |
617825fe | 984 | if (!xfs_inode_clean(ip)) |
718ecc50 | 985 | goto out_clear_flush; |
8a48088f | 986 | |
718ecc50 | 987 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
777df5af | 988 | reclaim: |
98efe8af | 989 | |
8a17d7dd DC |
990 | /* |
991 | * Because we use RCU freeing we need to ensure the inode always appears | |
992 | * to be reclaimed with an invalid inode number when in the free state. | |
98efe8af | 993 | * We do this as early as possible under the ILOCK so that |
f2e9ad21 OS |
994 | * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to |
995 | * detect races with us here. By doing this, we guarantee that once | |
996 | * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that | |
997 | * it will see either a valid inode that will serialise correctly, or it | |
998 | * will see an invalid inode that it can skip. | |
8a17d7dd DC |
999 | */ |
1000 | spin_lock(&ip->i_flags_lock); | |
1001 | ip->i_flags = XFS_IRECLAIM; | |
1002 | ip->i_ino = 0; | |
1003 | spin_unlock(&ip->i_flags_lock); | |
1004 | ||
c8e20be0 | 1005 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1006 | |
ff6d6af2 | 1007 | XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); |
2f11feab DC |
1008 | /* |
1009 | * Remove the inode from the per-AG radix tree. | |
1010 | * | |
1011 | * Because radix_tree_delete won't complain even if the item was never | |
1012 | * added to the tree assert that it's been there before to catch | |
1013 | * problems with the inode life time early on. | |
1014 | */ | |
1a427ab0 | 1015 | spin_lock(&pag->pag_ici_lock); |
2f11feab | 1016 | if (!radix_tree_delete(&pag->pag_ici_root, |
8a17d7dd | 1017 | XFS_INO_TO_AGINO(ip->i_mount, ino))) |
2f11feab | 1018 | ASSERT(0); |
545c0889 | 1019 | xfs_perag_clear_reclaim_tag(pag); |
1a427ab0 | 1020 | spin_unlock(&pag->pag_ici_lock); |
2f11feab DC |
1021 | |
1022 | /* | |
1023 | * Here we do an (almost) spurious inode lock in order to coordinate | |
1024 | * with inode cache radix tree lookups. This is because the lookup | |
1025 | * can reference the inodes in the cache without taking references. | |
1026 | * | |
1027 | * We make that OK here by ensuring that we wait until the inode is | |
ad637a10 | 1028 | * unlocked after the lookup before we go ahead and free it. |
2f11feab | 1029 | */ |
ad637a10 | 1030 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1031 | xfs_qm_dqdetach(ip); |
ad637a10 | 1032 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
96355d5a | 1033 | ASSERT(xfs_inode_clean(ip)); |
2f11feab | 1034 | |
8a17d7dd | 1035 | __xfs_inode_free(ip); |
4d0bab3a | 1036 | return; |
8a48088f | 1037 | |
718ecc50 DC |
1038 | out_clear_flush: |
1039 | xfs_iflags_clear(ip, XFS_IFLUSHING); | |
9552e14d | 1040 | out_iunlock: |
8a48088f | 1041 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
9552e14d | 1042 | out: |
617825fe | 1043 | xfs_iflags_clear(ip, XFS_IRECLAIM); |
7a3be02b DC |
1044 | } |
1045 | ||
65d0f205 DC |
1046 | /* |
1047 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | |
1048 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | |
1049 | * then a shut down during filesystem unmount reclaim walk leak all the | |
1050 | * unreclaimed inodes. | |
617825fe DC |
1051 | * |
1052 | * Returns non-zero if any AGs or inodes were skipped in the reclaim pass | |
1053 | * so that callers that want to block until all dirty inodes are written back | |
1054 | * and reclaimed can sanely loop. | |
65d0f205 | 1055 | */ |
4d0bab3a | 1056 | static void |
65d0f205 DC |
1057 | xfs_reclaim_inodes_ag( |
1058 | struct xfs_mount *mp, | |
65d0f205 DC |
1059 | int *nr_to_scan) |
1060 | { | |
1061 | struct xfs_perag *pag; | |
0e8e2c63 | 1062 | xfs_agnumber_t ag = 0; |
65d0f205 | 1063 | |
65d0f205 DC |
1064 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1065 | unsigned long first_index = 0; | |
1066 | int done = 0; | |
e3a20c0b | 1067 | int nr_found = 0; |
65d0f205 DC |
1068 | |
1069 | ag = pag->pag_agno + 1; | |
1070 | ||
0e8e2c63 | 1071 | first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); |
65d0f205 | 1072 | do { |
e3a20c0b DC |
1073 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
1074 | int i; | |
65d0f205 | 1075 | |
1a3e8f3d | 1076 | rcu_read_lock(); |
e3a20c0b DC |
1077 | nr_found = radix_tree_gang_lookup_tag( |
1078 | &pag->pag_ici_root, | |
1079 | (void **)batch, first_index, | |
1080 | XFS_LOOKUP_BATCH, | |
65d0f205 DC |
1081 | XFS_ICI_RECLAIM_TAG); |
1082 | if (!nr_found) { | |
b2232219 | 1083 | done = 1; |
1a3e8f3d | 1084 | rcu_read_unlock(); |
65d0f205 DC |
1085 | break; |
1086 | } | |
1087 | ||
1088 | /* | |
e3a20c0b DC |
1089 | * Grab the inodes before we drop the lock. if we found |
1090 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 1091 | */ |
e3a20c0b DC |
1092 | for (i = 0; i < nr_found; i++) { |
1093 | struct xfs_inode *ip = batch[i]; | |
1094 | ||
50718b8d | 1095 | if (done || !xfs_reclaim_inode_grab(ip)) |
e3a20c0b DC |
1096 | batch[i] = NULL; |
1097 | ||
1098 | /* | |
1099 | * Update the index for the next lookup. Catch | |
1100 | * overflows into the next AG range which can | |
1101 | * occur if we have inodes in the last block of | |
1102 | * the AG and we are currently pointing to the | |
1103 | * last inode. | |
1a3e8f3d DC |
1104 | * |
1105 | * Because we may see inodes that are from the | |
1106 | * wrong AG due to RCU freeing and | |
1107 | * reallocation, only update the index if it | |
1108 | * lies in this AG. It was a race that lead us | |
1109 | * to see this inode, so another lookup from | |
1110 | * the same index will not find it again. | |
e3a20c0b | 1111 | */ |
1a3e8f3d DC |
1112 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != |
1113 | pag->pag_agno) | |
1114 | continue; | |
e3a20c0b DC |
1115 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
1116 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
1117 | done = 1; | |
1118 | } | |
65d0f205 | 1119 | |
e3a20c0b | 1120 | /* unlock now we've grabbed the inodes. */ |
1a3e8f3d | 1121 | rcu_read_unlock(); |
e3a20c0b DC |
1122 | |
1123 | for (i = 0; i < nr_found; i++) { | |
4d0bab3a DC |
1124 | if (batch[i]) |
1125 | xfs_reclaim_inode(batch[i], pag); | |
e3a20c0b DC |
1126 | } |
1127 | ||
1128 | *nr_to_scan -= XFS_LOOKUP_BATCH; | |
8daaa831 | 1129 | cond_resched(); |
e3a20c0b | 1130 | } while (nr_found && !done && *nr_to_scan > 0); |
65d0f205 | 1131 | |
0e8e2c63 DC |
1132 | if (done) |
1133 | first_index = 0; | |
1134 | WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); | |
65d0f205 DC |
1135 | xfs_perag_put(pag); |
1136 | } | |
65d0f205 DC |
1137 | } |
1138 | ||
4d0bab3a | 1139 | void |
7a3be02b | 1140 | xfs_reclaim_inodes( |
4d0bab3a | 1141 | struct xfs_mount *mp) |
7a3be02b | 1142 | { |
65d0f205 DC |
1143 | int nr_to_scan = INT_MAX; |
1144 | ||
4d0bab3a | 1145 | while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { |
617825fe | 1146 | xfs_ail_push_all_sync(mp->m_ail); |
4d0bab3a | 1147 | xfs_reclaim_inodes_ag(mp, &nr_to_scan); |
0f4ec0f1 | 1148 | } |
9bf729c0 DC |
1149 | } |
1150 | ||
1151 | /* | |
02511a5a DC |
1152 | * The shrinker infrastructure determines how many inodes we should scan for |
1153 | * reclaim. We want as many clean inodes ready to reclaim as possible, so we | |
1154 | * push the AIL here. We also want to proactively free up memory if we can to | |
1155 | * minimise the amount of work memory reclaim has to do so we kick the | |
1156 | * background reclaim if it isn't already scheduled. | |
9bf729c0 | 1157 | */ |
0a234c6d | 1158 | long |
8daaa831 DC |
1159 | xfs_reclaim_inodes_nr( |
1160 | struct xfs_mount *mp, | |
1161 | int nr_to_scan) | |
9bf729c0 | 1162 | { |
8daaa831 | 1163 | /* kick background reclaimer and push the AIL */ |
5889608d | 1164 | xfs_reclaim_work_queue(mp); |
8daaa831 | 1165 | xfs_ail_push_all(mp->m_ail); |
a7b339f1 | 1166 | |
50718b8d | 1167 | xfs_reclaim_inodes_ag(mp, &nr_to_scan); |
617825fe | 1168 | return 0; |
8daaa831 | 1169 | } |
9bf729c0 | 1170 | |
8daaa831 DC |
1171 | /* |
1172 | * Return the number of reclaimable inodes in the filesystem for | |
1173 | * the shrinker to determine how much to reclaim. | |
1174 | */ | |
1175 | int | |
1176 | xfs_reclaim_inodes_count( | |
1177 | struct xfs_mount *mp) | |
1178 | { | |
1179 | struct xfs_perag *pag; | |
1180 | xfs_agnumber_t ag = 0; | |
1181 | int reclaimable = 0; | |
9bf729c0 | 1182 | |
65d0f205 DC |
1183 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1184 | ag = pag->pag_agno + 1; | |
70e60ce7 DC |
1185 | reclaimable += pag->pag_ici_reclaimable; |
1186 | xfs_perag_put(pag); | |
9bf729c0 | 1187 | } |
9bf729c0 DC |
1188 | return reclaimable; |
1189 | } | |
1190 | ||
39b1cfd7 | 1191 | STATIC bool |
3e3f9f58 BF |
1192 | xfs_inode_match_id( |
1193 | struct xfs_inode *ip, | |
1194 | struct xfs_eofblocks *eofb) | |
1195 | { | |
b9fe5052 DE |
1196 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && |
1197 | !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
39b1cfd7 | 1198 | return false; |
3e3f9f58 | 1199 | |
b9fe5052 DE |
1200 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && |
1201 | !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
39b1cfd7 | 1202 | return false; |
1b556048 | 1203 | |
b9fe5052 | 1204 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && |
de7a866f | 1205 | ip->i_d.di_projid != eofb->eof_prid) |
39b1cfd7 | 1206 | return false; |
1b556048 | 1207 | |
39b1cfd7 | 1208 | return true; |
3e3f9f58 BF |
1209 | } |
1210 | ||
f4526397 BF |
1211 | /* |
1212 | * A union-based inode filtering algorithm. Process the inode if any of the | |
1213 | * criteria match. This is for global/internal scans only. | |
1214 | */ | |
39b1cfd7 | 1215 | STATIC bool |
f4526397 BF |
1216 | xfs_inode_match_id_union( |
1217 | struct xfs_inode *ip, | |
1218 | struct xfs_eofblocks *eofb) | |
1219 | { | |
1220 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && | |
1221 | uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
39b1cfd7 | 1222 | return true; |
f4526397 BF |
1223 | |
1224 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && | |
1225 | gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
39b1cfd7 | 1226 | return true; |
f4526397 BF |
1227 | |
1228 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && | |
de7a866f | 1229 | ip->i_d.di_projid == eofb->eof_prid) |
39b1cfd7 | 1230 | return true; |
f4526397 | 1231 | |
39b1cfd7 | 1232 | return false; |
f4526397 BF |
1233 | } |
1234 | ||
a91bf992 DW |
1235 | /* |
1236 | * Is this inode @ip eligible for eof/cow block reclamation, given some | |
1237 | * filtering parameters @eofb? The inode is eligible if @eofb is null or | |
1238 | * if the predicate functions match. | |
1239 | */ | |
1240 | static bool | |
1241 | xfs_inode_matches_eofb( | |
1242 | struct xfs_inode *ip, | |
1243 | struct xfs_eofblocks *eofb) | |
1244 | { | |
39b1cfd7 | 1245 | bool match; |
a91bf992 DW |
1246 | |
1247 | if (!eofb) | |
1248 | return true; | |
1249 | ||
1250 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) | |
1251 | match = xfs_inode_match_id_union(ip, eofb); | |
1252 | else | |
1253 | match = xfs_inode_match_id(ip, eofb); | |
1254 | if (!match) | |
1255 | return false; | |
1256 | ||
1257 | /* skip the inode if the file size is too small */ | |
1258 | if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) && | |
1259 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | |
1260 | return false; | |
1261 | ||
1262 | return true; | |
1263 | } | |
1264 | ||
4d0bab3a DC |
1265 | /* |
1266 | * This is a fast pass over the inode cache to try to get reclaim moving on as | |
1267 | * many inodes as possible in a short period of time. It kicks itself every few | |
1268 | * seconds, as well as being kicked by the inode cache shrinker when memory | |
02511a5a | 1269 | * goes low. |
4d0bab3a DC |
1270 | */ |
1271 | void | |
1272 | xfs_reclaim_worker( | |
1273 | struct work_struct *work) | |
1274 | { | |
1275 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
1276 | struct xfs_mount, m_reclaim_work); | |
1277 | int nr_to_scan = INT_MAX; | |
1278 | ||
1279 | xfs_reclaim_inodes_ag(mp, &nr_to_scan); | |
1280 | xfs_reclaim_work_queue(mp); | |
1281 | } | |
1282 | ||
41176a68 BF |
1283 | STATIC int |
1284 | xfs_inode_free_eofblocks( | |
1285 | struct xfs_inode *ip, | |
0fa4a10a DW |
1286 | void *args, |
1287 | unsigned int *lockflags) | |
41176a68 | 1288 | { |
390600f8 DW |
1289 | struct xfs_eofblocks *eofb = args; |
1290 | bool wait; | |
390600f8 DW |
1291 | |
1292 | wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); | |
5400da7d | 1293 | |
ce2d3bbe DW |
1294 | if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS)) |
1295 | return 0; | |
1296 | ||
41176a68 BF |
1297 | if (!xfs_can_free_eofblocks(ip, false)) { |
1298 | /* inode could be preallocated or append-only */ | |
1299 | trace_xfs_inode_free_eofblocks_invalid(ip); | |
1300 | xfs_inode_clear_eofblocks_tag(ip); | |
1301 | return 0; | |
1302 | } | |
1303 | ||
1304 | /* | |
1305 | * If the mapping is dirty the operation can block and wait for some | |
1306 | * time. Unless we are waiting, skip it. | |
1307 | */ | |
390600f8 | 1308 | if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) |
41176a68 BF |
1309 | return 0; |
1310 | ||
a91bf992 DW |
1311 | if (!xfs_inode_matches_eofb(ip, eofb)) |
1312 | return 0; | |
3e3f9f58 | 1313 | |
a36b9261 BF |
1314 | /* |
1315 | * If the caller is waiting, return -EAGAIN to keep the background | |
1316 | * scanner moving and revisit the inode in a subsequent pass. | |
1317 | */ | |
c3155097 | 1318 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
390600f8 DW |
1319 | if (wait) |
1320 | return -EAGAIN; | |
1321 | return 0; | |
a36b9261 | 1322 | } |
0fa4a10a | 1323 | *lockflags |= XFS_IOLOCK_EXCL; |
390600f8 | 1324 | |
0fa4a10a | 1325 | return xfs_free_eofblocks(ip); |
41176a68 BF |
1326 | } |
1327 | ||
f9296569 | 1328 | /* |
9669f51d DW |
1329 | * Background scanning to trim preallocated space. This is queued based on the |
1330 | * 'speculative_prealloc_lifetime' tunable (5m by default). | |
f9296569 | 1331 | */ |
9669f51d DW |
1332 | static inline void |
1333 | xfs_blockgc_queue( | |
894ecacf | 1334 | struct xfs_perag *pag) |
f9296569 DW |
1335 | { |
1336 | rcu_read_lock(); | |
894ecacf | 1337 | if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) |
3fef46fc | 1338 | queue_delayed_work(pag->pag_mount->m_gc_workqueue, |
894ecacf | 1339 | &pag->pag_blockgc_work, |
9669f51d | 1340 | msecs_to_jiffies(xfs_blockgc_secs * 1000)); |
f9296569 DW |
1341 | rcu_read_unlock(); |
1342 | } | |
1343 | ||
83104d44 | 1344 | static void |
ce2d3bbe DW |
1345 | xfs_blockgc_set_iflag( |
1346 | struct xfs_inode *ip, | |
ce2d3bbe | 1347 | unsigned long iflag) |
27b52867 | 1348 | { |
ce2d3bbe DW |
1349 | struct xfs_mount *mp = ip->i_mount; |
1350 | struct xfs_perag *pag; | |
1351 | int tagged; | |
1352 | ||
1353 | ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); | |
27b52867 | 1354 | |
85a6e764 CH |
1355 | /* |
1356 | * Don't bother locking the AG and looking up in the radix trees | |
1357 | * if we already know that we have the tag set. | |
1358 | */ | |
ce2d3bbe | 1359 | if (ip->i_flags & iflag) |
85a6e764 CH |
1360 | return; |
1361 | spin_lock(&ip->i_flags_lock); | |
ce2d3bbe | 1362 | ip->i_flags |= iflag; |
85a6e764 CH |
1363 | spin_unlock(&ip->i_flags_lock); |
1364 | ||
27b52867 BF |
1365 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1366 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1367 | |
ce2d3bbe | 1368 | tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG); |
27b52867 | 1369 | radix_tree_tag_set(&pag->pag_ici_root, |
ce2d3bbe DW |
1370 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), |
1371 | XFS_ICI_BLOCKGC_TAG); | |
27b52867 | 1372 | if (!tagged) { |
ce2d3bbe | 1373 | /* propagate the blockgc tag up into the perag radix tree */ |
27b52867 BF |
1374 | spin_lock(&ip->i_mount->m_perag_lock); |
1375 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | |
1376 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
ce2d3bbe | 1377 | XFS_ICI_BLOCKGC_TAG); |
27b52867 | 1378 | spin_unlock(&ip->i_mount->m_perag_lock); |
579b62fa BF |
1379 | |
1380 | /* kick off background trimming */ | |
894ecacf | 1381 | xfs_blockgc_queue(pag); |
27b52867 | 1382 | |
ce2d3bbe DW |
1383 | trace_xfs_perag_set_blockgc(ip->i_mount, pag->pag_agno, -1, |
1384 | _RET_IP_); | |
27b52867 BF |
1385 | } |
1386 | ||
1387 | spin_unlock(&pag->pag_ici_lock); | |
1388 | xfs_perag_put(pag); | |
1389 | } | |
1390 | ||
1391 | void | |
83104d44 | 1392 | xfs_inode_set_eofblocks_tag( |
27b52867 | 1393 | xfs_inode_t *ip) |
83104d44 DW |
1394 | { |
1395 | trace_xfs_inode_set_eofblocks_tag(ip); | |
9669f51d | 1396 | return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS); |
83104d44 DW |
1397 | } |
1398 | ||
1399 | static void | |
ce2d3bbe DW |
1400 | xfs_blockgc_clear_iflag( |
1401 | struct xfs_inode *ip, | |
1402 | unsigned long iflag) | |
27b52867 | 1403 | { |
ce2d3bbe DW |
1404 | struct xfs_mount *mp = ip->i_mount; |
1405 | struct xfs_perag *pag; | |
1406 | bool clear_tag; | |
1407 | ||
1408 | ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0); | |
27b52867 | 1409 | |
85a6e764 | 1410 | spin_lock(&ip->i_flags_lock); |
ce2d3bbe DW |
1411 | ip->i_flags &= ~iflag; |
1412 | clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0; | |
85a6e764 CH |
1413 | spin_unlock(&ip->i_flags_lock); |
1414 | ||
ce2d3bbe DW |
1415 | if (!clear_tag) |
1416 | return; | |
1417 | ||
27b52867 BF |
1418 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1419 | spin_lock(&pag->pag_ici_lock); | |
27b52867 BF |
1420 | |
1421 | radix_tree_tag_clear(&pag->pag_ici_root, | |
ce2d3bbe DW |
1422 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), |
1423 | XFS_ICI_BLOCKGC_TAG); | |
1424 | if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG)) { | |
1425 | /* clear the blockgc tag from the perag radix tree */ | |
27b52867 BF |
1426 | spin_lock(&ip->i_mount->m_perag_lock); |
1427 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | |
1428 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
ce2d3bbe | 1429 | XFS_ICI_BLOCKGC_TAG); |
27b52867 | 1430 | spin_unlock(&ip->i_mount->m_perag_lock); |
ce2d3bbe DW |
1431 | trace_xfs_perag_clear_blockgc(ip->i_mount, pag->pag_agno, -1, |
1432 | _RET_IP_); | |
27b52867 BF |
1433 | } |
1434 | ||
1435 | spin_unlock(&pag->pag_ici_lock); | |
1436 | xfs_perag_put(pag); | |
1437 | } | |
1438 | ||
83104d44 DW |
1439 | void |
1440 | xfs_inode_clear_eofblocks_tag( | |
1441 | xfs_inode_t *ip) | |
1442 | { | |
1443 | trace_xfs_inode_clear_eofblocks_tag(ip); | |
ce2d3bbe | 1444 | return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS); |
83104d44 DW |
1445 | } |
1446 | ||
1447 | /* | |
be78ff0e DW |
1448 | * Set ourselves up to free CoW blocks from this file. If it's already clean |
1449 | * then we can bail out quickly, but otherwise we must back off if the file | |
1450 | * is undergoing some kind of write. | |
83104d44 | 1451 | */ |
be78ff0e DW |
1452 | static bool |
1453 | xfs_prep_free_cowblocks( | |
51d62690 | 1454 | struct xfs_inode *ip) |
83104d44 | 1455 | { |
39937234 BF |
1456 | /* |
1457 | * Just clear the tag if we have an empty cow fork or none at all. It's | |
1458 | * possible the inode was fully unshared since it was originally tagged. | |
1459 | */ | |
51d62690 | 1460 | if (!xfs_inode_has_cow_data(ip)) { |
83104d44 DW |
1461 | trace_xfs_inode_free_cowblocks_invalid(ip); |
1462 | xfs_inode_clear_cowblocks_tag(ip); | |
be78ff0e | 1463 | return false; |
83104d44 DW |
1464 | } |
1465 | ||
1466 | /* | |
1467 | * If the mapping is dirty or under writeback we cannot touch the | |
1468 | * CoW fork. Leave it alone if we're in the midst of a directio. | |
1469 | */ | |
a1b7a4de CH |
1470 | if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || |
1471 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || | |
83104d44 DW |
1472 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || |
1473 | atomic_read(&VFS_I(ip)->i_dio_count)) | |
be78ff0e DW |
1474 | return false; |
1475 | ||
1476 | return true; | |
1477 | } | |
1478 | ||
1479 | /* | |
1480 | * Automatic CoW Reservation Freeing | |
1481 | * | |
1482 | * These functions automatically garbage collect leftover CoW reservations | |
1483 | * that were made on behalf of a cowextsize hint when we start to run out | |
1484 | * of quota or when the reservations sit around for too long. If the file | |
1485 | * has dirty pages or is undergoing writeback, its CoW reservations will | |
1486 | * be retained. | |
1487 | * | |
1488 | * The actual garbage collection piggybacks off the same code that runs | |
1489 | * the speculative EOF preallocation garbage collector. | |
1490 | */ | |
1491 | STATIC int | |
1492 | xfs_inode_free_cowblocks( | |
1493 | struct xfs_inode *ip, | |
0fa4a10a DW |
1494 | void *args, |
1495 | unsigned int *lockflags) | |
be78ff0e DW |
1496 | { |
1497 | struct xfs_eofblocks *eofb = args; | |
f41a0716 | 1498 | bool wait; |
be78ff0e DW |
1499 | int ret = 0; |
1500 | ||
f41a0716 DW |
1501 | wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC); |
1502 | ||
ce2d3bbe DW |
1503 | if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS)) |
1504 | return 0; | |
1505 | ||
51d62690 | 1506 | if (!xfs_prep_free_cowblocks(ip)) |
83104d44 DW |
1507 | return 0; |
1508 | ||
a91bf992 DW |
1509 | if (!xfs_inode_matches_eofb(ip, eofb)) |
1510 | return 0; | |
83104d44 | 1511 | |
f41a0716 DW |
1512 | /* |
1513 | * If the caller is waiting, return -EAGAIN to keep the background | |
1514 | * scanner moving and revisit the inode in a subsequent pass. | |
1515 | */ | |
0fa4a10a DW |
1516 | if (!(*lockflags & XFS_IOLOCK_EXCL) && |
1517 | !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { | |
f41a0716 DW |
1518 | if (wait) |
1519 | return -EAGAIN; | |
1520 | return 0; | |
1521 | } | |
0fa4a10a DW |
1522 | *lockflags |= XFS_IOLOCK_EXCL; |
1523 | ||
f41a0716 DW |
1524 | if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) { |
1525 | if (wait) | |
0fa4a10a DW |
1526 | return -EAGAIN; |
1527 | return 0; | |
f41a0716 | 1528 | } |
0fa4a10a | 1529 | *lockflags |= XFS_MMAPLOCK_EXCL; |
83104d44 | 1530 | |
be78ff0e DW |
1531 | /* |
1532 | * Check again, nobody else should be able to dirty blocks or change | |
1533 | * the reflink iflag now that we have the first two locks held. | |
1534 | */ | |
51d62690 | 1535 | if (xfs_prep_free_cowblocks(ip)) |
be78ff0e | 1536 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); |
83104d44 DW |
1537 | return ret; |
1538 | } | |
1539 | ||
83104d44 DW |
1540 | void |
1541 | xfs_inode_set_cowblocks_tag( | |
1542 | xfs_inode_t *ip) | |
1543 | { | |
7b7381f0 | 1544 | trace_xfs_inode_set_cowblocks_tag(ip); |
9669f51d | 1545 | return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS); |
83104d44 DW |
1546 | } |
1547 | ||
1548 | void | |
1549 | xfs_inode_clear_cowblocks_tag( | |
1550 | xfs_inode_t *ip) | |
1551 | { | |
7b7381f0 | 1552 | trace_xfs_inode_clear_cowblocks_tag(ip); |
ce2d3bbe | 1553 | return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS); |
83104d44 | 1554 | } |
d6b636eb | 1555 | |
894ecacf DW |
1556 | #define for_each_perag_tag(mp, next_agno, pag, tag) \ |
1557 | for ((next_agno) = 0, (pag) = xfs_perag_get_tag((mp), 0, (tag)); \ | |
1558 | (pag) != NULL; \ | |
1559 | (next_agno) = (pag)->pag_agno + 1, \ | |
1560 | xfs_perag_put(pag), \ | |
1561 | (pag) = xfs_perag_get_tag((mp), (next_agno), (tag))) | |
1562 | ||
1563 | ||
d6b636eb DW |
1564 | /* Disable post-EOF and CoW block auto-reclamation. */ |
1565 | void | |
c9a6526f | 1566 | xfs_blockgc_stop( |
d6b636eb DW |
1567 | struct xfs_mount *mp) |
1568 | { | |
894ecacf DW |
1569 | struct xfs_perag *pag; |
1570 | xfs_agnumber_t agno; | |
1571 | ||
1572 | for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) | |
1573 | cancel_delayed_work_sync(&pag->pag_blockgc_work); | |
d6b636eb DW |
1574 | } |
1575 | ||
1576 | /* Enable post-EOF and CoW block auto-reclamation. */ | |
1577 | void | |
c9a6526f | 1578 | xfs_blockgc_start( |
d6b636eb DW |
1579 | struct xfs_mount *mp) |
1580 | { | |
894ecacf DW |
1581 | struct xfs_perag *pag; |
1582 | xfs_agnumber_t agno; | |
1583 | ||
1584 | for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG) | |
1585 | xfs_blockgc_queue(pag); | |
d6b636eb | 1586 | } |
3d4feec0 | 1587 | |
41956753 DW |
1588 | /* Scan one incore inode for block preallocations that we can remove. */ |
1589 | static int | |
1590 | xfs_blockgc_scan_inode( | |
1591 | struct xfs_inode *ip, | |
1592 | void *args) | |
85c5b270 | 1593 | { |
0fa4a10a | 1594 | unsigned int lockflags = 0; |
85c5b270 DW |
1595 | int error; |
1596 | ||
0fa4a10a | 1597 | error = xfs_inode_free_eofblocks(ip, args, &lockflags); |
85c5b270 | 1598 | if (error) |
0fa4a10a | 1599 | goto unlock; |
85c5b270 | 1600 | |
0fa4a10a DW |
1601 | error = xfs_inode_free_cowblocks(ip, args, &lockflags); |
1602 | unlock: | |
1603 | if (lockflags) | |
1604 | xfs_iunlock(ip, lockflags); | |
1605 | return error; | |
85c5b270 DW |
1606 | } |
1607 | ||
9669f51d DW |
1608 | /* Background worker that trims preallocated space. */ |
1609 | void | |
1610 | xfs_blockgc_worker( | |
1611 | struct work_struct *work) | |
1612 | { | |
894ecacf DW |
1613 | struct xfs_perag *pag = container_of(to_delayed_work(work), |
1614 | struct xfs_perag, pag_blockgc_work); | |
1615 | struct xfs_mount *mp = pag->pag_mount; | |
9669f51d DW |
1616 | int error; |
1617 | ||
1618 | if (!sb_start_write_trylock(mp->m_super)) | |
1619 | return; | |
894ecacf | 1620 | error = xfs_inode_walk_ag(pag, 0, xfs_blockgc_scan_inode, NULL, |
41956753 | 1621 | XFS_ICI_BLOCKGC_TAG); |
9669f51d | 1622 | if (error) |
894ecacf DW |
1623 | xfs_info(mp, "AG %u preallocation gc worker failed, err=%d", |
1624 | pag->pag_agno, error); | |
9669f51d | 1625 | sb_end_write(mp->m_super); |
894ecacf | 1626 | xfs_blockgc_queue(pag); |
9669f51d DW |
1627 | } |
1628 | ||
85c5b270 DW |
1629 | /* |
1630 | * Try to free space in the filesystem by purging eofblocks and cowblocks. | |
1631 | */ | |
1632 | int | |
1633 | xfs_blockgc_free_space( | |
1634 | struct xfs_mount *mp, | |
1635 | struct xfs_eofblocks *eofb) | |
1636 | { | |
1637 | trace_xfs_blockgc_free_space(mp, eofb, _RET_IP_); | |
1638 | ||
41956753 DW |
1639 | return xfs_inode_walk(mp, 0, xfs_blockgc_scan_inode, eofb, |
1640 | XFS_ICI_BLOCKGC_TAG); | |
85c5b270 DW |
1641 | } |
1642 | ||
3d4feec0 | 1643 | /* |
c237dd7c DW |
1644 | * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which |
1645 | * quota caused an allocation failure, so we make a best effort by including | |
1646 | * each quota under low free space conditions (less than 1% free space) in the | |
1647 | * scan. | |
111068f8 DW |
1648 | * |
1649 | * Callers must not hold any inode's ILOCK. If requesting a synchronous scan | |
1650 | * (XFS_EOF_FLAGS_SYNC), the caller also must not hold any inode's IOLOCK or | |
1651 | * MMAPLOCK. | |
3d4feec0 | 1652 | */ |
111068f8 | 1653 | int |
c237dd7c DW |
1654 | xfs_blockgc_free_dquots( |
1655 | struct xfs_mount *mp, | |
1656 | struct xfs_dquot *udqp, | |
1657 | struct xfs_dquot *gdqp, | |
1658 | struct xfs_dquot *pdqp, | |
111068f8 | 1659 | unsigned int eof_flags) |
3d4feec0 DW |
1660 | { |
1661 | struct xfs_eofblocks eofb = {0}; | |
3d4feec0 DW |
1662 | bool do_work = false; |
1663 | ||
c237dd7c DW |
1664 | if (!udqp && !gdqp && !pdqp) |
1665 | return 0; | |
1666 | ||
3d4feec0 | 1667 | /* |
111068f8 DW |
1668 | * Run a scan to free blocks using the union filter to cover all |
1669 | * applicable quotas in a single scan. | |
3d4feec0 | 1670 | */ |
111068f8 | 1671 | eofb.eof_flags = XFS_EOF_FLAGS_UNION | eof_flags; |
3d4feec0 | 1672 | |
c237dd7c DW |
1673 | if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) { |
1674 | eofb.eof_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id); | |
1675 | eofb.eof_flags |= XFS_EOF_FLAGS_UID; | |
1676 | do_work = true; | |
3d4feec0 DW |
1677 | } |
1678 | ||
c237dd7c DW |
1679 | if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) { |
1680 | eofb.eof_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id); | |
1681 | eofb.eof_flags |= XFS_EOF_FLAGS_GID; | |
1682 | do_work = true; | |
3d4feec0 DW |
1683 | } |
1684 | ||
c237dd7c DW |
1685 | if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) { |
1686 | eofb.eof_prid = pdqp->q_id; | |
1687 | eofb.eof_flags |= XFS_EOF_FLAGS_PRID; | |
1688 | do_work = true; | |
3d4feec0 DW |
1689 | } |
1690 | ||
1691 | if (!do_work) | |
111068f8 | 1692 | return 0; |
3d4feec0 | 1693 | |
85c5b270 | 1694 | return xfs_blockgc_free_space(mp, &eofb); |
c237dd7c DW |
1695 | } |
1696 | ||
1697 | /* Run cow/eofblocks scans on the quotas attached to the inode. */ | |
1698 | int | |
1699 | xfs_blockgc_free_quota( | |
1700 | struct xfs_inode *ip, | |
1701 | unsigned int eof_flags) | |
1702 | { | |
1703 | return xfs_blockgc_free_dquots(ip->i_mount, | |
1704 | xfs_inode_dquot(ip, XFS_DQTYPE_USER), | |
1705 | xfs_inode_dquot(ip, XFS_DQTYPE_GROUP), | |
1706 | xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), eof_flags); | |
3d4feec0 | 1707 | } |