Commit | Line | Data |
---|---|---|
fe4fa4b8 DC |
1 | /* |
2 | * Copyright (c) 2000-2005 Silicon Graphics, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it would be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, write the Free Software Foundation, | |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
17 | */ | |
18 | #include "xfs.h" | |
19 | #include "xfs_fs.h" | |
6ca1c906 | 20 | #include "xfs_format.h" |
239880ef DC |
21 | #include "xfs_log_format.h" |
22 | #include "xfs_trans_resv.h" | |
fe4fa4b8 | 23 | #include "xfs_sb.h" |
fe4fa4b8 | 24 | #include "xfs_mount.h" |
fe4fa4b8 | 25 | #include "xfs_inode.h" |
fe4fa4b8 | 26 | #include "xfs_error.h" |
239880ef DC |
27 | #include "xfs_trans.h" |
28 | #include "xfs_trans_priv.h" | |
fe4fa4b8 | 29 | #include "xfs_inode_item.h" |
7d095257 | 30 | #include "xfs_quota.h" |
0b1b213f | 31 | #include "xfs_trace.h" |
6d8b79cf | 32 | #include "xfs_icache.h" |
c24b5dfa | 33 | #include "xfs_bmap_util.h" |
dc06f398 BF |
34 | #include "xfs_dquot_item.h" |
35 | #include "xfs_dquot.h" | |
83104d44 | 36 | #include "xfs_reflink.h" |
fe4fa4b8 | 37 | |
a167b17e DC |
38 | #include <linux/kthread.h> |
39 | #include <linux/freezer.h> | |
40 | ||
33479e05 DC |
41 | /* |
42 | * Allocate and initialise an xfs_inode. | |
43 | */ | |
638f4416 | 44 | struct xfs_inode * |
33479e05 DC |
45 | xfs_inode_alloc( |
46 | struct xfs_mount *mp, | |
47 | xfs_ino_t ino) | |
48 | { | |
49 | struct xfs_inode *ip; | |
50 | ||
51 | /* | |
52 | * if this didn't occur in transactions, we could use | |
53 | * KM_MAYFAIL and return NULL here on ENOMEM. Set the | |
54 | * code up to do this anyway. | |
55 | */ | |
56 | ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); | |
57 | if (!ip) | |
58 | return NULL; | |
59 | if (inode_init_always(mp->m_super, VFS_I(ip))) { | |
60 | kmem_zone_free(xfs_inode_zone, ip); | |
61 | return NULL; | |
62 | } | |
63 | ||
c19b3b05 DC |
64 | /* VFS doesn't initialise i_mode! */ |
65 | VFS_I(ip)->i_mode = 0; | |
66 | ||
ff6d6af2 | 67 | XFS_STATS_INC(mp, vn_active); |
33479e05 | 68 | ASSERT(atomic_read(&ip->i_pincount) == 0); |
33479e05 DC |
69 | ASSERT(!xfs_isiflocked(ip)); |
70 | ASSERT(ip->i_ino == 0); | |
71 | ||
33479e05 DC |
72 | /* initialise the xfs inode */ |
73 | ip->i_ino = ino; | |
74 | ip->i_mount = mp; | |
75 | memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); | |
76 | ip->i_afp = NULL; | |
3993baeb DW |
77 | ip->i_cowfp = NULL; |
78 | ip->i_cnextents = 0; | |
79 | ip->i_cformat = XFS_DINODE_FMT_EXTENTS; | |
33479e05 DC |
80 | memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); |
81 | ip->i_flags = 0; | |
82 | ip->i_delayed_blks = 0; | |
f8d55aa0 | 83 | memset(&ip->i_d, 0, sizeof(ip->i_d)); |
33479e05 DC |
84 | |
85 | return ip; | |
86 | } | |
87 | ||
88 | STATIC void | |
89 | xfs_inode_free_callback( | |
90 | struct rcu_head *head) | |
91 | { | |
92 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
93 | struct xfs_inode *ip = XFS_I(inode); | |
94 | ||
c19b3b05 | 95 | switch (VFS_I(ip)->i_mode & S_IFMT) { |
33479e05 DC |
96 | case S_IFREG: |
97 | case S_IFDIR: | |
98 | case S_IFLNK: | |
99 | xfs_idestroy_fork(ip, XFS_DATA_FORK); | |
100 | break; | |
101 | } | |
102 | ||
103 | if (ip->i_afp) | |
104 | xfs_idestroy_fork(ip, XFS_ATTR_FORK); | |
3993baeb DW |
105 | if (ip->i_cowfp) |
106 | xfs_idestroy_fork(ip, XFS_COW_FORK); | |
33479e05 DC |
107 | |
108 | if (ip->i_itemp) { | |
109 | ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); | |
110 | xfs_inode_item_destroy(ip); | |
111 | ip->i_itemp = NULL; | |
112 | } | |
113 | ||
1f2dcfe8 DC |
114 | kmem_zone_free(xfs_inode_zone, ip); |
115 | } | |
116 | ||
8a17d7dd DC |
117 | static void |
118 | __xfs_inode_free( | |
119 | struct xfs_inode *ip) | |
120 | { | |
121 | /* asserts to verify all state is correct here */ | |
122 | ASSERT(atomic_read(&ip->i_pincount) == 0); | |
8a17d7dd DC |
123 | XFS_STATS_DEC(ip->i_mount, vn_active); |
124 | ||
125 | call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); | |
126 | } | |
127 | ||
1f2dcfe8 DC |
128 | void |
129 | xfs_inode_free( | |
130 | struct xfs_inode *ip) | |
131 | { | |
98efe8af BF |
132 | ASSERT(!xfs_isiflocked(ip)); |
133 | ||
33479e05 DC |
134 | /* |
135 | * Because we use RCU freeing we need to ensure the inode always | |
136 | * appears to be reclaimed with an invalid inode number when in the | |
137 | * free state. The ip->i_flags_lock provides the barrier against lookup | |
138 | * races. | |
139 | */ | |
140 | spin_lock(&ip->i_flags_lock); | |
141 | ip->i_flags = XFS_IRECLAIM; | |
142 | ip->i_ino = 0; | |
143 | spin_unlock(&ip->i_flags_lock); | |
144 | ||
8a17d7dd | 145 | __xfs_inode_free(ip); |
33479e05 DC |
146 | } |
147 | ||
ad438c40 DC |
148 | /* |
149 | * Queue a new inode reclaim pass if there are reclaimable inodes and there | |
150 | * isn't a reclaim pass already in progress. By default it runs every 5s based | |
151 | * on the xfs periodic sync default of 30s. Perhaps this should have it's own | |
152 | * tunable, but that can be done if this method proves to be ineffective or too | |
153 | * aggressive. | |
154 | */ | |
155 | static void | |
156 | xfs_reclaim_work_queue( | |
157 | struct xfs_mount *mp) | |
158 | { | |
159 | ||
160 | rcu_read_lock(); | |
161 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { | |
162 | queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work, | |
163 | msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10)); | |
164 | } | |
165 | rcu_read_unlock(); | |
166 | } | |
167 | ||
168 | /* | |
169 | * This is a fast pass over the inode cache to try to get reclaim moving on as | |
170 | * many inodes as possible in a short period of time. It kicks itself every few | |
171 | * seconds, as well as being kicked by the inode cache shrinker when memory | |
172 | * goes low. It scans as quickly as possible avoiding locked inodes or those | |
173 | * already being flushed, and once done schedules a future pass. | |
174 | */ | |
175 | void | |
176 | xfs_reclaim_worker( | |
177 | struct work_struct *work) | |
178 | { | |
179 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
180 | struct xfs_mount, m_reclaim_work); | |
181 | ||
182 | xfs_reclaim_inodes(mp, SYNC_TRYLOCK); | |
183 | xfs_reclaim_work_queue(mp); | |
184 | } | |
185 | ||
186 | static void | |
187 | xfs_perag_set_reclaim_tag( | |
188 | struct xfs_perag *pag) | |
189 | { | |
190 | struct xfs_mount *mp = pag->pag_mount; | |
191 | ||
95989c46 | 192 | lockdep_assert_held(&pag->pag_ici_lock); |
ad438c40 DC |
193 | if (pag->pag_ici_reclaimable++) |
194 | return; | |
195 | ||
196 | /* propagate the reclaim tag up into the perag radix tree */ | |
197 | spin_lock(&mp->m_perag_lock); | |
198 | radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, | |
199 | XFS_ICI_RECLAIM_TAG); | |
200 | spin_unlock(&mp->m_perag_lock); | |
201 | ||
202 | /* schedule periodic background inode reclaim */ | |
203 | xfs_reclaim_work_queue(mp); | |
204 | ||
205 | trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | |
206 | } | |
207 | ||
208 | static void | |
209 | xfs_perag_clear_reclaim_tag( | |
210 | struct xfs_perag *pag) | |
211 | { | |
212 | struct xfs_mount *mp = pag->pag_mount; | |
213 | ||
95989c46 | 214 | lockdep_assert_held(&pag->pag_ici_lock); |
ad438c40 DC |
215 | if (--pag->pag_ici_reclaimable) |
216 | return; | |
217 | ||
218 | /* clear the reclaim tag from the perag radix tree */ | |
219 | spin_lock(&mp->m_perag_lock); | |
220 | radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, | |
221 | XFS_ICI_RECLAIM_TAG); | |
222 | spin_unlock(&mp->m_perag_lock); | |
223 | trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_); | |
224 | } | |
225 | ||
226 | ||
227 | /* | |
228 | * We set the inode flag atomically with the radix tree tag. | |
229 | * Once we get tag lookups on the radix tree, this inode flag | |
230 | * can go away. | |
231 | */ | |
232 | void | |
233 | xfs_inode_set_reclaim_tag( | |
234 | struct xfs_inode *ip) | |
235 | { | |
236 | struct xfs_mount *mp = ip->i_mount; | |
237 | struct xfs_perag *pag; | |
238 | ||
239 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); | |
240 | spin_lock(&pag->pag_ici_lock); | |
241 | spin_lock(&ip->i_flags_lock); | |
242 | ||
243 | radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), | |
244 | XFS_ICI_RECLAIM_TAG); | |
245 | xfs_perag_set_reclaim_tag(pag); | |
246 | __xfs_iflags_set(ip, XFS_IRECLAIMABLE); | |
247 | ||
248 | spin_unlock(&ip->i_flags_lock); | |
249 | spin_unlock(&pag->pag_ici_lock); | |
250 | xfs_perag_put(pag); | |
251 | } | |
252 | ||
253 | STATIC void | |
254 | xfs_inode_clear_reclaim_tag( | |
255 | struct xfs_perag *pag, | |
256 | xfs_ino_t ino) | |
257 | { | |
258 | radix_tree_tag_clear(&pag->pag_ici_root, | |
259 | XFS_INO_TO_AGINO(pag->pag_mount, ino), | |
260 | XFS_ICI_RECLAIM_TAG); | |
261 | xfs_perag_clear_reclaim_tag(pag); | |
262 | } | |
263 | ||
ae2c4ac2 BF |
264 | static void |
265 | xfs_inew_wait( | |
266 | struct xfs_inode *ip) | |
267 | { | |
268 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT); | |
269 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT); | |
270 | ||
271 | do { | |
272 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | |
273 | if (!xfs_iflags_test(ip, XFS_INEW)) | |
274 | break; | |
275 | schedule(); | |
276 | } while (true); | |
277 | finish_wait(wq, &wait.wait); | |
278 | } | |
279 | ||
50997470 DC |
280 | /* |
281 | * When we recycle a reclaimable inode, we need to re-initialise the VFS inode | |
282 | * part of the structure. This is made more complex by the fact we store | |
283 | * information about the on-disk values in the VFS inode and so we can't just | |
83e06f21 | 284 | * overwrite the values unconditionally. Hence we save the parameters we |
50997470 | 285 | * need to retain across reinitialisation, and rewrite them into the VFS inode |
83e06f21 | 286 | * after reinitialisation even if it fails. |
50997470 DC |
287 | */ |
288 | static int | |
289 | xfs_reinit_inode( | |
290 | struct xfs_mount *mp, | |
291 | struct inode *inode) | |
292 | { | |
293 | int error; | |
54d7b5c1 | 294 | uint32_t nlink = inode->i_nlink; |
9e9a2674 | 295 | uint32_t generation = inode->i_generation; |
83e06f21 | 296 | uint64_t version = inode->i_version; |
c19b3b05 | 297 | umode_t mode = inode->i_mode; |
50997470 DC |
298 | |
299 | error = inode_init_always(mp->m_super, inode); | |
300 | ||
54d7b5c1 | 301 | set_nlink(inode, nlink); |
9e9a2674 | 302 | inode->i_generation = generation; |
83e06f21 | 303 | inode->i_version = version; |
c19b3b05 | 304 | inode->i_mode = mode; |
50997470 DC |
305 | return error; |
306 | } | |
307 | ||
33479e05 DC |
308 | /* |
309 | * Check the validity of the inode we just found it the cache | |
310 | */ | |
311 | static int | |
312 | xfs_iget_cache_hit( | |
313 | struct xfs_perag *pag, | |
314 | struct xfs_inode *ip, | |
315 | xfs_ino_t ino, | |
316 | int flags, | |
317 | int lock_flags) __releases(RCU) | |
318 | { | |
319 | struct inode *inode = VFS_I(ip); | |
320 | struct xfs_mount *mp = ip->i_mount; | |
321 | int error; | |
322 | ||
323 | /* | |
324 | * check for re-use of an inode within an RCU grace period due to the | |
325 | * radix tree nodes not being updated yet. We monitor for this by | |
326 | * setting the inode number to zero before freeing the inode structure. | |
327 | * If the inode has been reallocated and set up, then the inode number | |
328 | * will not match, so check for that, too. | |
329 | */ | |
330 | spin_lock(&ip->i_flags_lock); | |
331 | if (ip->i_ino != ino) { | |
332 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 333 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 334 | error = -EAGAIN; |
33479e05 DC |
335 | goto out_error; |
336 | } | |
337 | ||
338 | ||
339 | /* | |
340 | * If we are racing with another cache hit that is currently | |
341 | * instantiating this inode or currently recycling it out of | |
342 | * reclaimabe state, wait for the initialisation to complete | |
343 | * before continuing. | |
344 | * | |
345 | * XXX(hch): eventually we should do something equivalent to | |
346 | * wait_on_inode to wait for these flags to be cleared | |
347 | * instead of polling for it. | |
348 | */ | |
349 | if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { | |
350 | trace_xfs_iget_skip(ip); | |
ff6d6af2 | 351 | XFS_STATS_INC(mp, xs_ig_frecycle); |
2451337d | 352 | error = -EAGAIN; |
33479e05 DC |
353 | goto out_error; |
354 | } | |
355 | ||
356 | /* | |
357 | * If lookup is racing with unlink return an error immediately. | |
358 | */ | |
c19b3b05 | 359 | if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) { |
2451337d | 360 | error = -ENOENT; |
33479e05 DC |
361 | goto out_error; |
362 | } | |
363 | ||
364 | /* | |
365 | * If IRECLAIMABLE is set, we've torn down the VFS inode already. | |
366 | * Need to carefully get it back into useable state. | |
367 | */ | |
368 | if (ip->i_flags & XFS_IRECLAIMABLE) { | |
369 | trace_xfs_iget_reclaim(ip); | |
370 | ||
371 | /* | |
372 | * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode | |
373 | * from stomping over us while we recycle the inode. We can't | |
374 | * clear the radix tree reclaimable tag yet as it requires | |
375 | * pag_ici_lock to be held exclusive. | |
376 | */ | |
377 | ip->i_flags |= XFS_IRECLAIM; | |
378 | ||
379 | spin_unlock(&ip->i_flags_lock); | |
380 | rcu_read_unlock(); | |
381 | ||
50997470 | 382 | error = xfs_reinit_inode(mp, inode); |
33479e05 | 383 | if (error) { |
756baca2 | 384 | bool wake; |
33479e05 DC |
385 | /* |
386 | * Re-initializing the inode failed, and we are in deep | |
387 | * trouble. Try to re-add it to the reclaim list. | |
388 | */ | |
389 | rcu_read_lock(); | |
390 | spin_lock(&ip->i_flags_lock); | |
756baca2 | 391 | wake = !!__xfs_iflags_test(ip, XFS_INEW); |
33479e05 | 392 | ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); |
756baca2 BF |
393 | if (wake) |
394 | wake_up_bit(&ip->i_flags, __XFS_INEW_BIT); | |
33479e05 DC |
395 | ASSERT(ip->i_flags & XFS_IRECLAIMABLE); |
396 | trace_xfs_iget_reclaim_fail(ip); | |
397 | goto out_error; | |
398 | } | |
399 | ||
400 | spin_lock(&pag->pag_ici_lock); | |
401 | spin_lock(&ip->i_flags_lock); | |
402 | ||
403 | /* | |
404 | * Clear the per-lifetime state in the inode as we are now | |
405 | * effectively a new inode and need to return to the initial | |
406 | * state before reuse occurs. | |
407 | */ | |
408 | ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; | |
409 | ip->i_flags |= XFS_INEW; | |
545c0889 | 410 | xfs_inode_clear_reclaim_tag(pag, ip->i_ino); |
33479e05 DC |
411 | inode->i_state = I_NEW; |
412 | ||
65523218 CH |
413 | ASSERT(!rwsem_is_locked(&inode->i_rwsem)); |
414 | init_rwsem(&inode->i_rwsem); | |
33479e05 DC |
415 | |
416 | spin_unlock(&ip->i_flags_lock); | |
417 | spin_unlock(&pag->pag_ici_lock); | |
418 | } else { | |
419 | /* If the VFS inode is being torn down, pause and try again. */ | |
420 | if (!igrab(inode)) { | |
421 | trace_xfs_iget_skip(ip); | |
2451337d | 422 | error = -EAGAIN; |
33479e05 DC |
423 | goto out_error; |
424 | } | |
425 | ||
426 | /* We've got a live one. */ | |
427 | spin_unlock(&ip->i_flags_lock); | |
428 | rcu_read_unlock(); | |
429 | trace_xfs_iget_hit(ip); | |
430 | } | |
431 | ||
432 | if (lock_flags != 0) | |
433 | xfs_ilock(ip, lock_flags); | |
434 | ||
435 | xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE); | |
ff6d6af2 | 436 | XFS_STATS_INC(mp, xs_ig_found); |
33479e05 DC |
437 | |
438 | return 0; | |
439 | ||
440 | out_error: | |
441 | spin_unlock(&ip->i_flags_lock); | |
442 | rcu_read_unlock(); | |
443 | return error; | |
444 | } | |
445 | ||
446 | ||
447 | static int | |
448 | xfs_iget_cache_miss( | |
449 | struct xfs_mount *mp, | |
450 | struct xfs_perag *pag, | |
451 | xfs_trans_t *tp, | |
452 | xfs_ino_t ino, | |
453 | struct xfs_inode **ipp, | |
454 | int flags, | |
455 | int lock_flags) | |
456 | { | |
457 | struct xfs_inode *ip; | |
458 | int error; | |
459 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); | |
460 | int iflags; | |
461 | ||
462 | ip = xfs_inode_alloc(mp, ino); | |
463 | if (!ip) | |
2451337d | 464 | return -ENOMEM; |
33479e05 DC |
465 | |
466 | error = xfs_iread(mp, tp, ip, flags); | |
467 | if (error) | |
468 | goto out_destroy; | |
469 | ||
470 | trace_xfs_iget_miss(ip); | |
471 | ||
c19b3b05 | 472 | if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) { |
2451337d | 473 | error = -ENOENT; |
33479e05 DC |
474 | goto out_destroy; |
475 | } | |
476 | ||
477 | /* | |
478 | * Preload the radix tree so we can insert safely under the | |
479 | * write spinlock. Note that we cannot sleep inside the preload | |
480 | * region. Since we can be called from transaction context, don't | |
481 | * recurse into the file system. | |
482 | */ | |
483 | if (radix_tree_preload(GFP_NOFS)) { | |
2451337d | 484 | error = -EAGAIN; |
33479e05 DC |
485 | goto out_destroy; |
486 | } | |
487 | ||
488 | /* | |
489 | * Because the inode hasn't been added to the radix-tree yet it can't | |
490 | * be found by another thread, so we can do the non-sleeping lock here. | |
491 | */ | |
492 | if (lock_flags) { | |
493 | if (!xfs_ilock_nowait(ip, lock_flags)) | |
494 | BUG(); | |
495 | } | |
496 | ||
497 | /* | |
498 | * These values must be set before inserting the inode into the radix | |
499 | * tree as the moment it is inserted a concurrent lookup (allowed by the | |
500 | * RCU locking mechanism) can find it and that lookup must see that this | |
501 | * is an inode currently under construction (i.e. that XFS_INEW is set). | |
502 | * The ip->i_flags_lock that protects the XFS_INEW flag forms the | |
503 | * memory barrier that ensures this detection works correctly at lookup | |
504 | * time. | |
505 | */ | |
506 | iflags = XFS_INEW; | |
507 | if (flags & XFS_IGET_DONTCACHE) | |
508 | iflags |= XFS_IDONTCACHE; | |
113a5683 CS |
509 | ip->i_udquot = NULL; |
510 | ip->i_gdquot = NULL; | |
92f8ff73 | 511 | ip->i_pdquot = NULL; |
33479e05 DC |
512 | xfs_iflags_set(ip, iflags); |
513 | ||
514 | /* insert the new inode */ | |
515 | spin_lock(&pag->pag_ici_lock); | |
516 | error = radix_tree_insert(&pag->pag_ici_root, agino, ip); | |
517 | if (unlikely(error)) { | |
518 | WARN_ON(error != -EEXIST); | |
ff6d6af2 | 519 | XFS_STATS_INC(mp, xs_ig_dup); |
2451337d | 520 | error = -EAGAIN; |
33479e05 DC |
521 | goto out_preload_end; |
522 | } | |
523 | spin_unlock(&pag->pag_ici_lock); | |
524 | radix_tree_preload_end(); | |
525 | ||
526 | *ipp = ip; | |
527 | return 0; | |
528 | ||
529 | out_preload_end: | |
530 | spin_unlock(&pag->pag_ici_lock); | |
531 | radix_tree_preload_end(); | |
532 | if (lock_flags) | |
533 | xfs_iunlock(ip, lock_flags); | |
534 | out_destroy: | |
535 | __destroy_inode(VFS_I(ip)); | |
536 | xfs_inode_free(ip); | |
537 | return error; | |
538 | } | |
539 | ||
540 | /* | |
541 | * Look up an inode by number in the given file system. | |
542 | * The inode is looked up in the cache held in each AG. | |
543 | * If the inode is found in the cache, initialise the vfs inode | |
544 | * if necessary. | |
545 | * | |
546 | * If it is not in core, read it in from the file system's device, | |
547 | * add it to the cache and initialise the vfs inode. | |
548 | * | |
549 | * The inode is locked according to the value of the lock_flags parameter. | |
550 | * This flag parameter indicates how and if the inode's IO lock and inode lock | |
551 | * should be taken. | |
552 | * | |
553 | * mp -- the mount point structure for the current file system. It points | |
554 | * to the inode hash table. | |
555 | * tp -- a pointer to the current transaction if there is one. This is | |
556 | * simply passed through to the xfs_iread() call. | |
557 | * ino -- the number of the inode desired. This is the unique identifier | |
558 | * within the file system for the inode being requested. | |
559 | * lock_flags -- flags indicating how to lock the inode. See the comment | |
560 | * for xfs_ilock() for a list of valid values. | |
561 | */ | |
562 | int | |
563 | xfs_iget( | |
564 | xfs_mount_t *mp, | |
565 | xfs_trans_t *tp, | |
566 | xfs_ino_t ino, | |
567 | uint flags, | |
568 | uint lock_flags, | |
569 | xfs_inode_t **ipp) | |
570 | { | |
571 | xfs_inode_t *ip; | |
572 | int error; | |
573 | xfs_perag_t *pag; | |
574 | xfs_agino_t agino; | |
575 | ||
576 | /* | |
577 | * xfs_reclaim_inode() uses the ILOCK to ensure an inode | |
578 | * doesn't get freed while it's being referenced during a | |
579 | * radix tree traversal here. It assumes this function | |
580 | * aqcuires only the ILOCK (and therefore it has no need to | |
581 | * involve the IOLOCK in this synchronization). | |
582 | */ | |
583 | ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); | |
584 | ||
585 | /* reject inode numbers outside existing AGs */ | |
586 | if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) | |
2451337d | 587 | return -EINVAL; |
33479e05 | 588 | |
ff6d6af2 | 589 | XFS_STATS_INC(mp, xs_ig_attempts); |
8774cf8b | 590 | |
33479e05 DC |
591 | /* get the perag structure and ensure that it's inode capable */ |
592 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); | |
593 | agino = XFS_INO_TO_AGINO(mp, ino); | |
594 | ||
595 | again: | |
596 | error = 0; | |
597 | rcu_read_lock(); | |
598 | ip = radix_tree_lookup(&pag->pag_ici_root, agino); | |
599 | ||
600 | if (ip) { | |
601 | error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); | |
602 | if (error) | |
603 | goto out_error_or_again; | |
604 | } else { | |
605 | rcu_read_unlock(); | |
ff6d6af2 | 606 | XFS_STATS_INC(mp, xs_ig_missed); |
33479e05 DC |
607 | |
608 | error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, | |
609 | flags, lock_flags); | |
610 | if (error) | |
611 | goto out_error_or_again; | |
612 | } | |
613 | xfs_perag_put(pag); | |
614 | ||
615 | *ipp = ip; | |
616 | ||
617 | /* | |
58c90473 | 618 | * If we have a real type for an on-disk inode, we can setup the inode |
33479e05 DC |
619 | * now. If it's a new inode being created, xfs_ialloc will handle it. |
620 | */ | |
c19b3b05 | 621 | if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0) |
58c90473 | 622 | xfs_setup_existing_inode(ip); |
33479e05 DC |
623 | return 0; |
624 | ||
625 | out_error_or_again: | |
2451337d | 626 | if (error == -EAGAIN) { |
33479e05 DC |
627 | delay(1); |
628 | goto again; | |
629 | } | |
630 | xfs_perag_put(pag); | |
631 | return error; | |
632 | } | |
633 | ||
78ae5256 DC |
634 | /* |
635 | * The inode lookup is done in batches to keep the amount of lock traffic and | |
636 | * radix tree lookups to a minimum. The batch size is a trade off between | |
637 | * lookup reduction and stack usage. This is in the reclaim path, so we can't | |
638 | * be too greedy. | |
639 | */ | |
640 | #define XFS_LOOKUP_BATCH 32 | |
641 | ||
e13de955 DC |
642 | STATIC int |
643 | xfs_inode_ag_walk_grab( | |
ae2c4ac2 BF |
644 | struct xfs_inode *ip, |
645 | int flags) | |
e13de955 DC |
646 | { |
647 | struct inode *inode = VFS_I(ip); | |
ae2c4ac2 | 648 | bool newinos = !!(flags & XFS_AGITER_INEW_WAIT); |
e13de955 | 649 | |
1a3e8f3d DC |
650 | ASSERT(rcu_read_lock_held()); |
651 | ||
652 | /* | |
653 | * check for stale RCU freed inode | |
654 | * | |
655 | * If the inode has been reallocated, it doesn't matter if it's not in | |
656 | * the AG we are walking - we are walking for writeback, so if it | |
657 | * passes all the "valid inode" checks and is dirty, then we'll write | |
658 | * it back anyway. If it has been reallocated and still being | |
659 | * initialised, the XFS_INEW check below will catch it. | |
660 | */ | |
661 | spin_lock(&ip->i_flags_lock); | |
662 | if (!ip->i_ino) | |
663 | goto out_unlock_noent; | |
664 | ||
665 | /* avoid new or reclaimable inodes. Leave for reclaim code to flush */ | |
ae2c4ac2 BF |
666 | if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) || |
667 | __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM)) | |
1a3e8f3d DC |
668 | goto out_unlock_noent; |
669 | spin_unlock(&ip->i_flags_lock); | |
670 | ||
e13de955 DC |
671 | /* nothing to sync during shutdown */ |
672 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) | |
2451337d | 673 | return -EFSCORRUPTED; |
e13de955 | 674 | |
e13de955 DC |
675 | /* If we can't grab the inode, it must on it's way to reclaim. */ |
676 | if (!igrab(inode)) | |
2451337d | 677 | return -ENOENT; |
e13de955 | 678 | |
e13de955 DC |
679 | /* inode is valid */ |
680 | return 0; | |
1a3e8f3d DC |
681 | |
682 | out_unlock_noent: | |
683 | spin_unlock(&ip->i_flags_lock); | |
2451337d | 684 | return -ENOENT; |
e13de955 DC |
685 | } |
686 | ||
75f3cb13 DC |
687 | STATIC int |
688 | xfs_inode_ag_walk( | |
689 | struct xfs_mount *mp, | |
5017e97d | 690 | struct xfs_perag *pag, |
e0094008 | 691 | int (*execute)(struct xfs_inode *ip, int flags, |
a454f742 BF |
692 | void *args), |
693 | int flags, | |
694 | void *args, | |
ae2c4ac2 BF |
695 | int tag, |
696 | int iter_flags) | |
75f3cb13 | 697 | { |
75f3cb13 DC |
698 | uint32_t first_index; |
699 | int last_error = 0; | |
700 | int skipped; | |
65d0f205 | 701 | int done; |
78ae5256 | 702 | int nr_found; |
75f3cb13 DC |
703 | |
704 | restart: | |
65d0f205 | 705 | done = 0; |
75f3cb13 DC |
706 | skipped = 0; |
707 | first_index = 0; | |
78ae5256 | 708 | nr_found = 0; |
75f3cb13 | 709 | do { |
78ae5256 | 710 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
75f3cb13 | 711 | int error = 0; |
78ae5256 | 712 | int i; |
75f3cb13 | 713 | |
1a3e8f3d | 714 | rcu_read_lock(); |
a454f742 BF |
715 | |
716 | if (tag == -1) | |
717 | nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, | |
78ae5256 DC |
718 | (void **)batch, first_index, |
719 | XFS_LOOKUP_BATCH); | |
a454f742 BF |
720 | else |
721 | nr_found = radix_tree_gang_lookup_tag( | |
722 | &pag->pag_ici_root, | |
723 | (void **) batch, first_index, | |
724 | XFS_LOOKUP_BATCH, tag); | |
725 | ||
65d0f205 | 726 | if (!nr_found) { |
1a3e8f3d | 727 | rcu_read_unlock(); |
75f3cb13 | 728 | break; |
c8e20be0 | 729 | } |
75f3cb13 | 730 | |
65d0f205 | 731 | /* |
78ae5256 DC |
732 | * Grab the inodes before we drop the lock. if we found |
733 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 734 | */ |
78ae5256 DC |
735 | for (i = 0; i < nr_found; i++) { |
736 | struct xfs_inode *ip = batch[i]; | |
737 | ||
ae2c4ac2 | 738 | if (done || xfs_inode_ag_walk_grab(ip, iter_flags)) |
78ae5256 DC |
739 | batch[i] = NULL; |
740 | ||
741 | /* | |
1a3e8f3d DC |
742 | * Update the index for the next lookup. Catch |
743 | * overflows into the next AG range which can occur if | |
744 | * we have inodes in the last block of the AG and we | |
745 | * are currently pointing to the last inode. | |
746 | * | |
747 | * Because we may see inodes that are from the wrong AG | |
748 | * due to RCU freeing and reallocation, only update the | |
749 | * index if it lies in this AG. It was a race that lead | |
750 | * us to see this inode, so another lookup from the | |
751 | * same index will not find it again. | |
78ae5256 | 752 | */ |
1a3e8f3d DC |
753 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) |
754 | continue; | |
78ae5256 DC |
755 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
756 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
757 | done = 1; | |
e13de955 | 758 | } |
78ae5256 DC |
759 | |
760 | /* unlock now we've grabbed the inodes. */ | |
1a3e8f3d | 761 | rcu_read_unlock(); |
e13de955 | 762 | |
78ae5256 DC |
763 | for (i = 0; i < nr_found; i++) { |
764 | if (!batch[i]) | |
765 | continue; | |
ae2c4ac2 BF |
766 | if ((iter_flags & XFS_AGITER_INEW_WAIT) && |
767 | xfs_iflags_test(batch[i], XFS_INEW)) | |
768 | xfs_inew_wait(batch[i]); | |
e0094008 | 769 | error = execute(batch[i], flags, args); |
78ae5256 | 770 | IRELE(batch[i]); |
2451337d | 771 | if (error == -EAGAIN) { |
78ae5256 DC |
772 | skipped++; |
773 | continue; | |
774 | } | |
2451337d | 775 | if (error && last_error != -EFSCORRUPTED) |
78ae5256 | 776 | last_error = error; |
75f3cb13 | 777 | } |
c8e20be0 DC |
778 | |
779 | /* bail out if the filesystem is corrupted. */ | |
2451337d | 780 | if (error == -EFSCORRUPTED) |
75f3cb13 DC |
781 | break; |
782 | ||
8daaa831 DC |
783 | cond_resched(); |
784 | ||
78ae5256 | 785 | } while (nr_found && !done); |
75f3cb13 DC |
786 | |
787 | if (skipped) { | |
788 | delay(1); | |
789 | goto restart; | |
790 | } | |
75f3cb13 DC |
791 | return last_error; |
792 | } | |
793 | ||
579b62fa BF |
794 | /* |
795 | * Background scanning to trim post-EOF preallocated space. This is queued | |
b9fe5052 | 796 | * based on the 'speculative_prealloc_lifetime' tunable (5m by default). |
579b62fa | 797 | */ |
fa5a4f57 | 798 | void |
579b62fa BF |
799 | xfs_queue_eofblocks( |
800 | struct xfs_mount *mp) | |
801 | { | |
802 | rcu_read_lock(); | |
803 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG)) | |
804 | queue_delayed_work(mp->m_eofblocks_workqueue, | |
805 | &mp->m_eofblocks_work, | |
806 | msecs_to_jiffies(xfs_eofb_secs * 1000)); | |
807 | rcu_read_unlock(); | |
808 | } | |
809 | ||
810 | void | |
811 | xfs_eofblocks_worker( | |
812 | struct work_struct *work) | |
813 | { | |
814 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
815 | struct xfs_mount, m_eofblocks_work); | |
816 | xfs_icache_free_eofblocks(mp, NULL); | |
817 | xfs_queue_eofblocks(mp); | |
818 | } | |
819 | ||
83104d44 DW |
820 | /* |
821 | * Background scanning to trim preallocated CoW space. This is queued | |
822 | * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default). | |
823 | * (We'll just piggyback on the post-EOF prealloc space workqueue.) | |
824 | */ | |
825 | STATIC void | |
826 | xfs_queue_cowblocks( | |
827 | struct xfs_mount *mp) | |
828 | { | |
829 | rcu_read_lock(); | |
830 | if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG)) | |
831 | queue_delayed_work(mp->m_eofblocks_workqueue, | |
832 | &mp->m_cowblocks_work, | |
833 | msecs_to_jiffies(xfs_cowb_secs * 1000)); | |
834 | rcu_read_unlock(); | |
835 | } | |
836 | ||
837 | void | |
838 | xfs_cowblocks_worker( | |
839 | struct work_struct *work) | |
840 | { | |
841 | struct xfs_mount *mp = container_of(to_delayed_work(work), | |
842 | struct xfs_mount, m_cowblocks_work); | |
843 | xfs_icache_free_cowblocks(mp, NULL); | |
844 | xfs_queue_cowblocks(mp); | |
845 | } | |
846 | ||
fe588ed3 | 847 | int |
ae2c4ac2 | 848 | xfs_inode_ag_iterator_flags( |
75f3cb13 | 849 | struct xfs_mount *mp, |
e0094008 | 850 | int (*execute)(struct xfs_inode *ip, int flags, |
a454f742 BF |
851 | void *args), |
852 | int flags, | |
ae2c4ac2 BF |
853 | void *args, |
854 | int iter_flags) | |
75f3cb13 | 855 | { |
16fd5367 | 856 | struct xfs_perag *pag; |
75f3cb13 DC |
857 | int error = 0; |
858 | int last_error = 0; | |
859 | xfs_agnumber_t ag; | |
860 | ||
16fd5367 | 861 | ag = 0; |
65d0f205 DC |
862 | while ((pag = xfs_perag_get(mp, ag))) { |
863 | ag = pag->pag_agno + 1; | |
ae2c4ac2 BF |
864 | error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1, |
865 | iter_flags); | |
a454f742 BF |
866 | xfs_perag_put(pag); |
867 | if (error) { | |
868 | last_error = error; | |
2451337d | 869 | if (error == -EFSCORRUPTED) |
a454f742 BF |
870 | break; |
871 | } | |
872 | } | |
b474c7ae | 873 | return last_error; |
a454f742 BF |
874 | } |
875 | ||
ae2c4ac2 BF |
876 | int |
877 | xfs_inode_ag_iterator( | |
878 | struct xfs_mount *mp, | |
879 | int (*execute)(struct xfs_inode *ip, int flags, | |
880 | void *args), | |
881 | int flags, | |
882 | void *args) | |
883 | { | |
884 | return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0); | |
885 | } | |
886 | ||
a454f742 BF |
887 | int |
888 | xfs_inode_ag_iterator_tag( | |
889 | struct xfs_mount *mp, | |
e0094008 | 890 | int (*execute)(struct xfs_inode *ip, int flags, |
a454f742 BF |
891 | void *args), |
892 | int flags, | |
893 | void *args, | |
894 | int tag) | |
895 | { | |
896 | struct xfs_perag *pag; | |
897 | int error = 0; | |
898 | int last_error = 0; | |
899 | xfs_agnumber_t ag; | |
900 | ||
901 | ag = 0; | |
902 | while ((pag = xfs_perag_get_tag(mp, ag, tag))) { | |
903 | ag = pag->pag_agno + 1; | |
ae2c4ac2 BF |
904 | error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag, |
905 | 0); | |
5017e97d | 906 | xfs_perag_put(pag); |
75f3cb13 DC |
907 | if (error) { |
908 | last_error = error; | |
2451337d | 909 | if (error == -EFSCORRUPTED) |
75f3cb13 DC |
910 | break; |
911 | } | |
912 | } | |
b474c7ae | 913 | return last_error; |
75f3cb13 DC |
914 | } |
915 | ||
e3a20c0b DC |
916 | /* |
917 | * Grab the inode for reclaim exclusively. | |
918 | * Return 0 if we grabbed it, non-zero otherwise. | |
919 | */ | |
920 | STATIC int | |
921 | xfs_reclaim_inode_grab( | |
922 | struct xfs_inode *ip, | |
923 | int flags) | |
924 | { | |
1a3e8f3d DC |
925 | ASSERT(rcu_read_lock_held()); |
926 | ||
927 | /* quick check for stale RCU freed inode */ | |
928 | if (!ip->i_ino) | |
929 | return 1; | |
e3a20c0b DC |
930 | |
931 | /* | |
474fce06 CH |
932 | * If we are asked for non-blocking operation, do unlocked checks to |
933 | * see if the inode already is being flushed or in reclaim to avoid | |
934 | * lock traffic. | |
e3a20c0b DC |
935 | */ |
936 | if ((flags & SYNC_TRYLOCK) && | |
474fce06 | 937 | __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) |
e3a20c0b | 938 | return 1; |
e3a20c0b DC |
939 | |
940 | /* | |
941 | * The radix tree lock here protects a thread in xfs_iget from racing | |
942 | * with us starting reclaim on the inode. Once we have the | |
943 | * XFS_IRECLAIM flag set it will not touch us. | |
1a3e8f3d DC |
944 | * |
945 | * Due to RCU lookup, we may find inodes that have been freed and only | |
946 | * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that | |
947 | * aren't candidates for reclaim at all, so we must check the | |
948 | * XFS_IRECLAIMABLE is set first before proceeding to reclaim. | |
e3a20c0b DC |
949 | */ |
950 | spin_lock(&ip->i_flags_lock); | |
1a3e8f3d DC |
951 | if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || |
952 | __xfs_iflags_test(ip, XFS_IRECLAIM)) { | |
953 | /* not a reclaim candidate. */ | |
e3a20c0b DC |
954 | spin_unlock(&ip->i_flags_lock); |
955 | return 1; | |
956 | } | |
957 | __xfs_iflags_set(ip, XFS_IRECLAIM); | |
958 | spin_unlock(&ip->i_flags_lock); | |
959 | return 0; | |
960 | } | |
961 | ||
777df5af | 962 | /* |
8a48088f CH |
963 | * Inodes in different states need to be treated differently. The following |
964 | * table lists the inode states and the reclaim actions necessary: | |
777df5af DC |
965 | * |
966 | * inode state iflush ret required action | |
967 | * --------------- ---------- --------------- | |
968 | * bad - reclaim | |
969 | * shutdown EIO unpin and reclaim | |
970 | * clean, unpinned 0 reclaim | |
971 | * stale, unpinned 0 reclaim | |
c854363e DC |
972 | * clean, pinned(*) 0 requeue |
973 | * stale, pinned EAGAIN requeue | |
8a48088f CH |
974 | * dirty, async - requeue |
975 | * dirty, sync 0 reclaim | |
777df5af DC |
976 | * |
977 | * (*) dgc: I don't think the clean, pinned state is possible but it gets | |
978 | * handled anyway given the order of checks implemented. | |
979 | * | |
c854363e DC |
980 | * Also, because we get the flush lock first, we know that any inode that has |
981 | * been flushed delwri has had the flush completed by the time we check that | |
8a48088f | 982 | * the inode is clean. |
c854363e | 983 | * |
8a48088f CH |
984 | * Note that because the inode is flushed delayed write by AIL pushing, the |
985 | * flush lock may already be held here and waiting on it can result in very | |
986 | * long latencies. Hence for sync reclaims, where we wait on the flush lock, | |
987 | * the caller should push the AIL first before trying to reclaim inodes to | |
988 | * minimise the amount of time spent waiting. For background relaim, we only | |
989 | * bother to reclaim clean inodes anyway. | |
c854363e | 990 | * |
777df5af DC |
991 | * Hence the order of actions after gaining the locks should be: |
992 | * bad => reclaim | |
993 | * shutdown => unpin and reclaim | |
8a48088f | 994 | * pinned, async => requeue |
c854363e | 995 | * pinned, sync => unpin |
777df5af DC |
996 | * stale => reclaim |
997 | * clean => reclaim | |
8a48088f | 998 | * dirty, async => requeue |
c854363e | 999 | * dirty, sync => flush, wait and reclaim |
777df5af | 1000 | */ |
75f3cb13 | 1001 | STATIC int |
c8e20be0 | 1002 | xfs_reclaim_inode( |
75f3cb13 DC |
1003 | struct xfs_inode *ip, |
1004 | struct xfs_perag *pag, | |
c8e20be0 | 1005 | int sync_mode) |
fce08f2f | 1006 | { |
4c46819a | 1007 | struct xfs_buf *bp = NULL; |
8a17d7dd | 1008 | xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ |
4c46819a | 1009 | int error; |
777df5af | 1010 | |
1bfd8d04 DC |
1011 | restart: |
1012 | error = 0; | |
c8e20be0 | 1013 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
c854363e DC |
1014 | if (!xfs_iflock_nowait(ip)) { |
1015 | if (!(sync_mode & SYNC_WAIT)) | |
1016 | goto out; | |
1017 | xfs_iflock(ip); | |
1018 | } | |
7a3be02b | 1019 | |
777df5af DC |
1020 | if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { |
1021 | xfs_iunpin_wait(ip); | |
98efe8af | 1022 | /* xfs_iflush_abort() drops the flush lock */ |
04913fdd | 1023 | xfs_iflush_abort(ip, false); |
777df5af DC |
1024 | goto reclaim; |
1025 | } | |
c854363e | 1026 | if (xfs_ipincount(ip)) { |
8a48088f CH |
1027 | if (!(sync_mode & SYNC_WAIT)) |
1028 | goto out_ifunlock; | |
777df5af | 1029 | xfs_iunpin_wait(ip); |
c854363e | 1030 | } |
98efe8af BF |
1031 | if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { |
1032 | xfs_ifunlock(ip); | |
777df5af | 1033 | goto reclaim; |
98efe8af | 1034 | } |
777df5af | 1035 | |
8a48088f CH |
1036 | /* |
1037 | * Never flush out dirty data during non-blocking reclaim, as it would | |
1038 | * just contend with AIL pushing trying to do the same job. | |
1039 | */ | |
1040 | if (!(sync_mode & SYNC_WAIT)) | |
1041 | goto out_ifunlock; | |
1042 | ||
1bfd8d04 DC |
1043 | /* |
1044 | * Now we have an inode that needs flushing. | |
1045 | * | |
4c46819a | 1046 | * Note that xfs_iflush will never block on the inode buffer lock, as |
1bfd8d04 | 1047 | * xfs_ifree_cluster() can lock the inode buffer before it locks the |
4c46819a | 1048 | * ip->i_lock, and we are doing the exact opposite here. As a result, |
475ee413 CH |
1049 | * doing a blocking xfs_imap_to_bp() to get the cluster buffer would |
1050 | * result in an ABBA deadlock with xfs_ifree_cluster(). | |
1bfd8d04 DC |
1051 | * |
1052 | * As xfs_ifree_cluser() must gather all inodes that are active in the | |
1053 | * cache to mark them stale, if we hit this case we don't actually want | |
1054 | * to do IO here - we want the inode marked stale so we can simply | |
4c46819a CH |
1055 | * reclaim it. Hence if we get an EAGAIN error here, just unlock the |
1056 | * inode, back off and try again. Hopefully the next pass through will | |
1057 | * see the stale flag set on the inode. | |
1bfd8d04 | 1058 | */ |
4c46819a | 1059 | error = xfs_iflush(ip, &bp); |
2451337d | 1060 | if (error == -EAGAIN) { |
8a48088f CH |
1061 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
1062 | /* backoff longer than in xfs_ifree_cluster */ | |
1063 | delay(2); | |
1064 | goto restart; | |
c854363e | 1065 | } |
c854363e | 1066 | |
4c46819a CH |
1067 | if (!error) { |
1068 | error = xfs_bwrite(bp); | |
1069 | xfs_buf_relse(bp); | |
1070 | } | |
1071 | ||
777df5af | 1072 | reclaim: |
98efe8af BF |
1073 | ASSERT(!xfs_isiflocked(ip)); |
1074 | ||
8a17d7dd DC |
1075 | /* |
1076 | * Because we use RCU freeing we need to ensure the inode always appears | |
1077 | * to be reclaimed with an invalid inode number when in the free state. | |
98efe8af BF |
1078 | * We do this as early as possible under the ILOCK so that |
1079 | * xfs_iflush_cluster() can be guaranteed to detect races with us here. | |
1080 | * By doing this, we guarantee that once xfs_iflush_cluster has locked | |
1081 | * XFS_ILOCK that it will see either a valid, flushable inode that will | |
1082 | * serialise correctly, or it will see a clean (and invalid) inode that | |
1083 | * it can skip. | |
8a17d7dd DC |
1084 | */ |
1085 | spin_lock(&ip->i_flags_lock); | |
1086 | ip->i_flags = XFS_IRECLAIM; | |
1087 | ip->i_ino = 0; | |
1088 | spin_unlock(&ip->i_flags_lock); | |
1089 | ||
c8e20be0 | 1090 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1091 | |
ff6d6af2 | 1092 | XFS_STATS_INC(ip->i_mount, xs_ig_reclaims); |
2f11feab DC |
1093 | /* |
1094 | * Remove the inode from the per-AG radix tree. | |
1095 | * | |
1096 | * Because radix_tree_delete won't complain even if the item was never | |
1097 | * added to the tree assert that it's been there before to catch | |
1098 | * problems with the inode life time early on. | |
1099 | */ | |
1a427ab0 | 1100 | spin_lock(&pag->pag_ici_lock); |
2f11feab | 1101 | if (!radix_tree_delete(&pag->pag_ici_root, |
8a17d7dd | 1102 | XFS_INO_TO_AGINO(ip->i_mount, ino))) |
2f11feab | 1103 | ASSERT(0); |
545c0889 | 1104 | xfs_perag_clear_reclaim_tag(pag); |
1a427ab0 | 1105 | spin_unlock(&pag->pag_ici_lock); |
2f11feab DC |
1106 | |
1107 | /* | |
1108 | * Here we do an (almost) spurious inode lock in order to coordinate | |
1109 | * with inode cache radix tree lookups. This is because the lookup | |
1110 | * can reference the inodes in the cache without taking references. | |
1111 | * | |
1112 | * We make that OK here by ensuring that we wait until the inode is | |
ad637a10 | 1113 | * unlocked after the lookup before we go ahead and free it. |
2f11feab | 1114 | */ |
ad637a10 | 1115 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1116 | xfs_qm_dqdetach(ip); |
ad637a10 | 1117 | xfs_iunlock(ip, XFS_ILOCK_EXCL); |
2f11feab | 1118 | |
8a17d7dd | 1119 | __xfs_inode_free(ip); |
ad637a10 | 1120 | return error; |
8a48088f CH |
1121 | |
1122 | out_ifunlock: | |
1123 | xfs_ifunlock(ip); | |
1124 | out: | |
1125 | xfs_iflags_clear(ip, XFS_IRECLAIM); | |
1126 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1127 | /* | |
2451337d | 1128 | * We could return -EAGAIN here to make reclaim rescan the inode tree in |
8a48088f | 1129 | * a short while. However, this just burns CPU time scanning the tree |
5889608d DC |
1130 | * waiting for IO to complete and the reclaim work never goes back to |
1131 | * the idle state. Instead, return 0 to let the next scheduled | |
1132 | * background reclaim attempt to reclaim the inode again. | |
8a48088f CH |
1133 | */ |
1134 | return 0; | |
7a3be02b DC |
1135 | } |
1136 | ||
65d0f205 DC |
1137 | /* |
1138 | * Walk the AGs and reclaim the inodes in them. Even if the filesystem is | |
1139 | * corrupted, we still want to try to reclaim all the inodes. If we don't, | |
1140 | * then a shut down during filesystem unmount reclaim walk leak all the | |
1141 | * unreclaimed inodes. | |
1142 | */ | |
33479e05 | 1143 | STATIC int |
65d0f205 DC |
1144 | xfs_reclaim_inodes_ag( |
1145 | struct xfs_mount *mp, | |
1146 | int flags, | |
1147 | int *nr_to_scan) | |
1148 | { | |
1149 | struct xfs_perag *pag; | |
1150 | int error = 0; | |
1151 | int last_error = 0; | |
1152 | xfs_agnumber_t ag; | |
69b491c2 DC |
1153 | int trylock = flags & SYNC_TRYLOCK; |
1154 | int skipped; | |
65d0f205 | 1155 | |
69b491c2 | 1156 | restart: |
65d0f205 | 1157 | ag = 0; |
69b491c2 | 1158 | skipped = 0; |
65d0f205 DC |
1159 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1160 | unsigned long first_index = 0; | |
1161 | int done = 0; | |
e3a20c0b | 1162 | int nr_found = 0; |
65d0f205 DC |
1163 | |
1164 | ag = pag->pag_agno + 1; | |
1165 | ||
69b491c2 DC |
1166 | if (trylock) { |
1167 | if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { | |
1168 | skipped++; | |
f83282a8 | 1169 | xfs_perag_put(pag); |
69b491c2 DC |
1170 | continue; |
1171 | } | |
1172 | first_index = pag->pag_ici_reclaim_cursor; | |
1173 | } else | |
1174 | mutex_lock(&pag->pag_ici_reclaim_lock); | |
1175 | ||
65d0f205 | 1176 | do { |
e3a20c0b DC |
1177 | struct xfs_inode *batch[XFS_LOOKUP_BATCH]; |
1178 | int i; | |
65d0f205 | 1179 | |
1a3e8f3d | 1180 | rcu_read_lock(); |
e3a20c0b DC |
1181 | nr_found = radix_tree_gang_lookup_tag( |
1182 | &pag->pag_ici_root, | |
1183 | (void **)batch, first_index, | |
1184 | XFS_LOOKUP_BATCH, | |
65d0f205 DC |
1185 | XFS_ICI_RECLAIM_TAG); |
1186 | if (!nr_found) { | |
b2232219 | 1187 | done = 1; |
1a3e8f3d | 1188 | rcu_read_unlock(); |
65d0f205 DC |
1189 | break; |
1190 | } | |
1191 | ||
1192 | /* | |
e3a20c0b DC |
1193 | * Grab the inodes before we drop the lock. if we found |
1194 | * nothing, nr == 0 and the loop will be skipped. | |
65d0f205 | 1195 | */ |
e3a20c0b DC |
1196 | for (i = 0; i < nr_found; i++) { |
1197 | struct xfs_inode *ip = batch[i]; | |
1198 | ||
1199 | if (done || xfs_reclaim_inode_grab(ip, flags)) | |
1200 | batch[i] = NULL; | |
1201 | ||
1202 | /* | |
1203 | * Update the index for the next lookup. Catch | |
1204 | * overflows into the next AG range which can | |
1205 | * occur if we have inodes in the last block of | |
1206 | * the AG and we are currently pointing to the | |
1207 | * last inode. | |
1a3e8f3d DC |
1208 | * |
1209 | * Because we may see inodes that are from the | |
1210 | * wrong AG due to RCU freeing and | |
1211 | * reallocation, only update the index if it | |
1212 | * lies in this AG. It was a race that lead us | |
1213 | * to see this inode, so another lookup from | |
1214 | * the same index will not find it again. | |
e3a20c0b | 1215 | */ |
1a3e8f3d DC |
1216 | if (XFS_INO_TO_AGNO(mp, ip->i_ino) != |
1217 | pag->pag_agno) | |
1218 | continue; | |
e3a20c0b DC |
1219 | first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); |
1220 | if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) | |
1221 | done = 1; | |
1222 | } | |
65d0f205 | 1223 | |
e3a20c0b | 1224 | /* unlock now we've grabbed the inodes. */ |
1a3e8f3d | 1225 | rcu_read_unlock(); |
e3a20c0b DC |
1226 | |
1227 | for (i = 0; i < nr_found; i++) { | |
1228 | if (!batch[i]) | |
1229 | continue; | |
1230 | error = xfs_reclaim_inode(batch[i], pag, flags); | |
2451337d | 1231 | if (error && last_error != -EFSCORRUPTED) |
e3a20c0b DC |
1232 | last_error = error; |
1233 | } | |
1234 | ||
1235 | *nr_to_scan -= XFS_LOOKUP_BATCH; | |
65d0f205 | 1236 | |
8daaa831 DC |
1237 | cond_resched(); |
1238 | ||
e3a20c0b | 1239 | } while (nr_found && !done && *nr_to_scan > 0); |
65d0f205 | 1240 | |
69b491c2 DC |
1241 | if (trylock && !done) |
1242 | pag->pag_ici_reclaim_cursor = first_index; | |
1243 | else | |
1244 | pag->pag_ici_reclaim_cursor = 0; | |
1245 | mutex_unlock(&pag->pag_ici_reclaim_lock); | |
65d0f205 DC |
1246 | xfs_perag_put(pag); |
1247 | } | |
69b491c2 DC |
1248 | |
1249 | /* | |
1250 | * if we skipped any AG, and we still have scan count remaining, do | |
1251 | * another pass this time using blocking reclaim semantics (i.e | |
1252 | * waiting on the reclaim locks and ignoring the reclaim cursors). This | |
1253 | * ensure that when we get more reclaimers than AGs we block rather | |
1254 | * than spin trying to execute reclaim. | |
1255 | */ | |
8daaa831 | 1256 | if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { |
69b491c2 DC |
1257 | trylock = 0; |
1258 | goto restart; | |
1259 | } | |
b474c7ae | 1260 | return last_error; |
65d0f205 DC |
1261 | } |
1262 | ||
7a3be02b DC |
1263 | int |
1264 | xfs_reclaim_inodes( | |
1265 | xfs_mount_t *mp, | |
7a3be02b DC |
1266 | int mode) |
1267 | { | |
65d0f205 DC |
1268 | int nr_to_scan = INT_MAX; |
1269 | ||
1270 | return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); | |
9bf729c0 DC |
1271 | } |
1272 | ||
1273 | /* | |
8daaa831 | 1274 | * Scan a certain number of inodes for reclaim. |
a7b339f1 DC |
1275 | * |
1276 | * When called we make sure that there is a background (fast) inode reclaim in | |
8daaa831 | 1277 | * progress, while we will throttle the speed of reclaim via doing synchronous |
a7b339f1 DC |
1278 | * reclaim of inodes. That means if we come across dirty inodes, we wait for |
1279 | * them to be cleaned, which we hope will not be very long due to the | |
1280 | * background walker having already kicked the IO off on those dirty inodes. | |
9bf729c0 | 1281 | */ |
0a234c6d | 1282 | long |
8daaa831 DC |
1283 | xfs_reclaim_inodes_nr( |
1284 | struct xfs_mount *mp, | |
1285 | int nr_to_scan) | |
9bf729c0 | 1286 | { |
8daaa831 | 1287 | /* kick background reclaimer and push the AIL */ |
5889608d | 1288 | xfs_reclaim_work_queue(mp); |
8daaa831 | 1289 | xfs_ail_push_all(mp->m_ail); |
a7b339f1 | 1290 | |
0a234c6d | 1291 | return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); |
8daaa831 | 1292 | } |
9bf729c0 | 1293 | |
8daaa831 DC |
1294 | /* |
1295 | * Return the number of reclaimable inodes in the filesystem for | |
1296 | * the shrinker to determine how much to reclaim. | |
1297 | */ | |
1298 | int | |
1299 | xfs_reclaim_inodes_count( | |
1300 | struct xfs_mount *mp) | |
1301 | { | |
1302 | struct xfs_perag *pag; | |
1303 | xfs_agnumber_t ag = 0; | |
1304 | int reclaimable = 0; | |
9bf729c0 | 1305 | |
65d0f205 DC |
1306 | while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { |
1307 | ag = pag->pag_agno + 1; | |
70e60ce7 DC |
1308 | reclaimable += pag->pag_ici_reclaimable; |
1309 | xfs_perag_put(pag); | |
9bf729c0 | 1310 | } |
9bf729c0 DC |
1311 | return reclaimable; |
1312 | } | |
1313 | ||
3e3f9f58 BF |
1314 | STATIC int |
1315 | xfs_inode_match_id( | |
1316 | struct xfs_inode *ip, | |
1317 | struct xfs_eofblocks *eofb) | |
1318 | { | |
b9fe5052 DE |
1319 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && |
1320 | !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
1b556048 | 1321 | return 0; |
3e3f9f58 | 1322 | |
b9fe5052 DE |
1323 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && |
1324 | !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
1b556048 BF |
1325 | return 0; |
1326 | ||
b9fe5052 | 1327 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && |
1b556048 BF |
1328 | xfs_get_projid(ip) != eofb->eof_prid) |
1329 | return 0; | |
1330 | ||
1331 | return 1; | |
3e3f9f58 BF |
1332 | } |
1333 | ||
f4526397 BF |
1334 | /* |
1335 | * A union-based inode filtering algorithm. Process the inode if any of the | |
1336 | * criteria match. This is for global/internal scans only. | |
1337 | */ | |
1338 | STATIC int | |
1339 | xfs_inode_match_id_union( | |
1340 | struct xfs_inode *ip, | |
1341 | struct xfs_eofblocks *eofb) | |
1342 | { | |
1343 | if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) && | |
1344 | uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid)) | |
1345 | return 1; | |
1346 | ||
1347 | if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) && | |
1348 | gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid)) | |
1349 | return 1; | |
1350 | ||
1351 | if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) && | |
1352 | xfs_get_projid(ip) == eofb->eof_prid) | |
1353 | return 1; | |
1354 | ||
1355 | return 0; | |
1356 | } | |
1357 | ||
41176a68 BF |
1358 | STATIC int |
1359 | xfs_inode_free_eofblocks( | |
1360 | struct xfs_inode *ip, | |
41176a68 BF |
1361 | int flags, |
1362 | void *args) | |
1363 | { | |
a36b9261 | 1364 | int ret = 0; |
3e3f9f58 | 1365 | struct xfs_eofblocks *eofb = args; |
f4526397 | 1366 | int match; |
5400da7d | 1367 | |
41176a68 BF |
1368 | if (!xfs_can_free_eofblocks(ip, false)) { |
1369 | /* inode could be preallocated or append-only */ | |
1370 | trace_xfs_inode_free_eofblocks_invalid(ip); | |
1371 | xfs_inode_clear_eofblocks_tag(ip); | |
1372 | return 0; | |
1373 | } | |
1374 | ||
1375 | /* | |
1376 | * If the mapping is dirty the operation can block and wait for some | |
1377 | * time. Unless we are waiting, skip it. | |
1378 | */ | |
1379 | if (!(flags & SYNC_WAIT) && | |
1380 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY)) | |
1381 | return 0; | |
1382 | ||
00ca79a0 | 1383 | if (eofb) { |
f4526397 BF |
1384 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) |
1385 | match = xfs_inode_match_id_union(ip, eofb); | |
1386 | else | |
1387 | match = xfs_inode_match_id(ip, eofb); | |
1388 | if (!match) | |
00ca79a0 BF |
1389 | return 0; |
1390 | ||
1391 | /* skip the inode if the file size is too small */ | |
1392 | if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && | |
1393 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | |
1394 | return 0; | |
1395 | } | |
3e3f9f58 | 1396 | |
a36b9261 BF |
1397 | /* |
1398 | * If the caller is waiting, return -EAGAIN to keep the background | |
1399 | * scanner moving and revisit the inode in a subsequent pass. | |
1400 | */ | |
c3155097 | 1401 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) { |
a36b9261 BF |
1402 | if (flags & SYNC_WAIT) |
1403 | ret = -EAGAIN; | |
1404 | return ret; | |
1405 | } | |
1406 | ret = xfs_free_eofblocks(ip); | |
c3155097 | 1407 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); |
41176a68 BF |
1408 | |
1409 | return ret; | |
1410 | } | |
1411 | ||
83104d44 DW |
1412 | static int |
1413 | __xfs_icache_free_eofblocks( | |
41176a68 | 1414 | struct xfs_mount *mp, |
83104d44 DW |
1415 | struct xfs_eofblocks *eofb, |
1416 | int (*execute)(struct xfs_inode *ip, int flags, | |
1417 | void *args), | |
1418 | int tag) | |
41176a68 | 1419 | { |
8ca149de BF |
1420 | int flags = SYNC_TRYLOCK; |
1421 | ||
1422 | if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC)) | |
1423 | flags = SYNC_WAIT; | |
1424 | ||
83104d44 DW |
1425 | return xfs_inode_ag_iterator_tag(mp, execute, flags, |
1426 | eofb, tag); | |
1427 | } | |
1428 | ||
1429 | int | |
1430 | xfs_icache_free_eofblocks( | |
1431 | struct xfs_mount *mp, | |
1432 | struct xfs_eofblocks *eofb) | |
1433 | { | |
1434 | return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks, | |
1435 | XFS_ICI_EOFBLOCKS_TAG); | |
41176a68 BF |
1436 | } |
1437 | ||
dc06f398 BF |
1438 | /* |
1439 | * Run eofblocks scans on the quotas applicable to the inode. For inodes with | |
1440 | * multiple quotas, we don't know exactly which quota caused an allocation | |
1441 | * failure. We make a best effort by including each quota under low free space | |
1442 | * conditions (less than 1% free space) in the scan. | |
1443 | */ | |
83104d44 DW |
1444 | static int |
1445 | __xfs_inode_free_quota_eofblocks( | |
1446 | struct xfs_inode *ip, | |
1447 | int (*execute)(struct xfs_mount *mp, | |
1448 | struct xfs_eofblocks *eofb)) | |
dc06f398 BF |
1449 | { |
1450 | int scan = 0; | |
1451 | struct xfs_eofblocks eofb = {0}; | |
1452 | struct xfs_dquot *dq; | |
1453 | ||
dc06f398 | 1454 | /* |
c3155097 | 1455 | * Run a sync scan to increase effectiveness and use the union filter to |
dc06f398 BF |
1456 | * cover all applicable quotas in a single scan. |
1457 | */ | |
dc06f398 BF |
1458 | eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; |
1459 | ||
1460 | if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { | |
1461 | dq = xfs_inode_dquot(ip, XFS_DQ_USER); | |
1462 | if (dq && xfs_dquot_lowsp(dq)) { | |
1463 | eofb.eof_uid = VFS_I(ip)->i_uid; | |
1464 | eofb.eof_flags |= XFS_EOF_FLAGS_UID; | |
1465 | scan = 1; | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { | |
1470 | dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); | |
1471 | if (dq && xfs_dquot_lowsp(dq)) { | |
1472 | eofb.eof_gid = VFS_I(ip)->i_gid; | |
1473 | eofb.eof_flags |= XFS_EOF_FLAGS_GID; | |
1474 | scan = 1; | |
1475 | } | |
1476 | } | |
1477 | ||
1478 | if (scan) | |
83104d44 | 1479 | execute(ip->i_mount, &eofb); |
dc06f398 BF |
1480 | |
1481 | return scan; | |
1482 | } | |
1483 | ||
83104d44 DW |
1484 | int |
1485 | xfs_inode_free_quota_eofblocks( | |
1486 | struct xfs_inode *ip) | |
1487 | { | |
1488 | return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks); | |
1489 | } | |
1490 | ||
1491 | static void | |
1492 | __xfs_inode_set_eofblocks_tag( | |
1493 | xfs_inode_t *ip, | |
1494 | void (*execute)(struct xfs_mount *mp), | |
1495 | void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, | |
1496 | int error, unsigned long caller_ip), | |
1497 | int tag) | |
27b52867 BF |
1498 | { |
1499 | struct xfs_mount *mp = ip->i_mount; | |
1500 | struct xfs_perag *pag; | |
1501 | int tagged; | |
1502 | ||
85a6e764 CH |
1503 | /* |
1504 | * Don't bother locking the AG and looking up in the radix trees | |
1505 | * if we already know that we have the tag set. | |
1506 | */ | |
1507 | if (ip->i_flags & XFS_IEOFBLOCKS) | |
1508 | return; | |
1509 | spin_lock(&ip->i_flags_lock); | |
1510 | ip->i_flags |= XFS_IEOFBLOCKS; | |
1511 | spin_unlock(&ip->i_flags_lock); | |
1512 | ||
27b52867 BF |
1513 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1514 | spin_lock(&pag->pag_ici_lock); | |
27b52867 | 1515 | |
83104d44 | 1516 | tagged = radix_tree_tagged(&pag->pag_ici_root, tag); |
27b52867 | 1517 | radix_tree_tag_set(&pag->pag_ici_root, |
83104d44 | 1518 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); |
27b52867 BF |
1519 | if (!tagged) { |
1520 | /* propagate the eofblocks tag up into the perag radix tree */ | |
1521 | spin_lock(&ip->i_mount->m_perag_lock); | |
1522 | radix_tree_tag_set(&ip->i_mount->m_perag_tree, | |
1523 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
83104d44 | 1524 | tag); |
27b52867 | 1525 | spin_unlock(&ip->i_mount->m_perag_lock); |
579b62fa BF |
1526 | |
1527 | /* kick off background trimming */ | |
83104d44 | 1528 | execute(ip->i_mount); |
27b52867 | 1529 | |
83104d44 | 1530 | set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); |
27b52867 BF |
1531 | } |
1532 | ||
1533 | spin_unlock(&pag->pag_ici_lock); | |
1534 | xfs_perag_put(pag); | |
1535 | } | |
1536 | ||
1537 | void | |
83104d44 | 1538 | xfs_inode_set_eofblocks_tag( |
27b52867 | 1539 | xfs_inode_t *ip) |
83104d44 DW |
1540 | { |
1541 | trace_xfs_inode_set_eofblocks_tag(ip); | |
1542 | return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks, | |
1543 | trace_xfs_perag_set_eofblocks, | |
1544 | XFS_ICI_EOFBLOCKS_TAG); | |
1545 | } | |
1546 | ||
1547 | static void | |
1548 | __xfs_inode_clear_eofblocks_tag( | |
1549 | xfs_inode_t *ip, | |
1550 | void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, | |
1551 | int error, unsigned long caller_ip), | |
1552 | int tag) | |
27b52867 BF |
1553 | { |
1554 | struct xfs_mount *mp = ip->i_mount; | |
1555 | struct xfs_perag *pag; | |
1556 | ||
85a6e764 CH |
1557 | spin_lock(&ip->i_flags_lock); |
1558 | ip->i_flags &= ~XFS_IEOFBLOCKS; | |
1559 | spin_unlock(&ip->i_flags_lock); | |
1560 | ||
27b52867 BF |
1561 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
1562 | spin_lock(&pag->pag_ici_lock); | |
27b52867 BF |
1563 | |
1564 | radix_tree_tag_clear(&pag->pag_ici_root, | |
83104d44 DW |
1565 | XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); |
1566 | if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { | |
27b52867 BF |
1567 | /* clear the eofblocks tag from the perag radix tree */ |
1568 | spin_lock(&ip->i_mount->m_perag_lock); | |
1569 | radix_tree_tag_clear(&ip->i_mount->m_perag_tree, | |
1570 | XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), | |
83104d44 | 1571 | tag); |
27b52867 | 1572 | spin_unlock(&ip->i_mount->m_perag_lock); |
83104d44 | 1573 | clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); |
27b52867 BF |
1574 | } |
1575 | ||
1576 | spin_unlock(&pag->pag_ici_lock); | |
1577 | xfs_perag_put(pag); | |
1578 | } | |
1579 | ||
83104d44 DW |
1580 | void |
1581 | xfs_inode_clear_eofblocks_tag( | |
1582 | xfs_inode_t *ip) | |
1583 | { | |
1584 | trace_xfs_inode_clear_eofblocks_tag(ip); | |
1585 | return __xfs_inode_clear_eofblocks_tag(ip, | |
1586 | trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG); | |
1587 | } | |
1588 | ||
1589 | /* | |
1590 | * Automatic CoW Reservation Freeing | |
1591 | * | |
1592 | * These functions automatically garbage collect leftover CoW reservations | |
1593 | * that were made on behalf of a cowextsize hint when we start to run out | |
1594 | * of quota or when the reservations sit around for too long. If the file | |
1595 | * has dirty pages or is undergoing writeback, its CoW reservations will | |
1596 | * be retained. | |
1597 | * | |
1598 | * The actual garbage collection piggybacks off the same code that runs | |
1599 | * the speculative EOF preallocation garbage collector. | |
1600 | */ | |
1601 | STATIC int | |
1602 | xfs_inode_free_cowblocks( | |
1603 | struct xfs_inode *ip, | |
1604 | int flags, | |
1605 | void *args) | |
1606 | { | |
1607 | int ret; | |
1608 | struct xfs_eofblocks *eofb = args; | |
83104d44 | 1609 | int match; |
39937234 | 1610 | struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); |
83104d44 | 1611 | |
39937234 BF |
1612 | /* |
1613 | * Just clear the tag if we have an empty cow fork or none at all. It's | |
1614 | * possible the inode was fully unshared since it was originally tagged. | |
1615 | */ | |
1616 | if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) { | |
83104d44 DW |
1617 | trace_xfs_inode_free_cowblocks_invalid(ip); |
1618 | xfs_inode_clear_cowblocks_tag(ip); | |
1619 | return 0; | |
1620 | } | |
1621 | ||
1622 | /* | |
1623 | * If the mapping is dirty or under writeback we cannot touch the | |
1624 | * CoW fork. Leave it alone if we're in the midst of a directio. | |
1625 | */ | |
a1b7a4de CH |
1626 | if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) || |
1627 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) || | |
83104d44 DW |
1628 | mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) || |
1629 | atomic_read(&VFS_I(ip)->i_dio_count)) | |
1630 | return 0; | |
1631 | ||
1632 | if (eofb) { | |
1633 | if (eofb->eof_flags & XFS_EOF_FLAGS_UNION) | |
1634 | match = xfs_inode_match_id_union(ip, eofb); | |
1635 | else | |
1636 | match = xfs_inode_match_id(ip, eofb); | |
1637 | if (!match) | |
1638 | return 0; | |
1639 | ||
1640 | /* skip the inode if the file size is too small */ | |
1641 | if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE && | |
1642 | XFS_ISIZE(ip) < eofb->eof_min_file_size) | |
1643 | return 0; | |
83104d44 DW |
1644 | } |
1645 | ||
1646 | /* Free the CoW blocks */ | |
c3155097 BF |
1647 | xfs_ilock(ip, XFS_IOLOCK_EXCL); |
1648 | xfs_ilock(ip, XFS_MMAPLOCK_EXCL); | |
83104d44 | 1649 | |
3802a345 | 1650 | ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false); |
83104d44 | 1651 | |
c3155097 BF |
1652 | xfs_iunlock(ip, XFS_MMAPLOCK_EXCL); |
1653 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
83104d44 DW |
1654 | |
1655 | return ret; | |
1656 | } | |
1657 | ||
1658 | int | |
1659 | xfs_icache_free_cowblocks( | |
1660 | struct xfs_mount *mp, | |
1661 | struct xfs_eofblocks *eofb) | |
1662 | { | |
1663 | return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks, | |
1664 | XFS_ICI_COWBLOCKS_TAG); | |
1665 | } | |
1666 | ||
1667 | int | |
1668 | xfs_inode_free_quota_cowblocks( | |
1669 | struct xfs_inode *ip) | |
1670 | { | |
1671 | return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks); | |
1672 | } | |
1673 | ||
1674 | void | |
1675 | xfs_inode_set_cowblocks_tag( | |
1676 | xfs_inode_t *ip) | |
1677 | { | |
7b7381f0 | 1678 | trace_xfs_inode_set_cowblocks_tag(ip); |
83104d44 | 1679 | return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks, |
7b7381f0 | 1680 | trace_xfs_perag_set_cowblocks, |
83104d44 DW |
1681 | XFS_ICI_COWBLOCKS_TAG); |
1682 | } | |
1683 | ||
1684 | void | |
1685 | xfs_inode_clear_cowblocks_tag( | |
1686 | xfs_inode_t *ip) | |
1687 | { | |
7b7381f0 | 1688 | trace_xfs_inode_clear_cowblocks_tag(ip); |
83104d44 | 1689 | return __xfs_inode_clear_eofblocks_tag(ip, |
7b7381f0 | 1690 | trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG); |
83104d44 | 1691 | } |