Commit | Line | Data |
---|---|---|
0b61f8a4 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
3e57ecf6 | 3 | * Copyright (c) 2000-2006 Silicon Graphics, Inc. |
7b718769 | 4 | * All Rights Reserved. |
1da177e4 | 5 | */ |
f0e28280 | 6 | #include <linux/iversion.h> |
40ebd81d | 7 | |
1da177e4 | 8 | #include "xfs.h" |
a844f451 | 9 | #include "xfs_fs.h" |
70a9883c | 10 | #include "xfs_shared.h" |
239880ef DC |
11 | #include "xfs_format.h" |
12 | #include "xfs_log_format.h" | |
13 | #include "xfs_trans_resv.h" | |
1da177e4 | 14 | #include "xfs_mount.h" |
3ab78df2 | 15 | #include "xfs_defer.h" |
a4fbe6ab | 16 | #include "xfs_inode.h" |
c24b5dfa | 17 | #include "xfs_dir2.h" |
c24b5dfa | 18 | #include "xfs_attr.h" |
239880ef DC |
19 | #include "xfs_trans_space.h" |
20 | #include "xfs_trans.h" | |
1da177e4 | 21 | #include "xfs_buf_item.h" |
a844f451 | 22 | #include "xfs_inode_item.h" |
a844f451 NS |
23 | #include "xfs_ialloc.h" |
24 | #include "xfs_bmap.h" | |
68988114 | 25 | #include "xfs_bmap_util.h" |
e9e899a2 | 26 | #include "xfs_errortag.h" |
1da177e4 | 27 | #include "xfs_error.h" |
1da177e4 | 28 | #include "xfs_quota.h" |
2a82b8be | 29 | #include "xfs_filestream.h" |
0b1b213f | 30 | #include "xfs_trace.h" |
33479e05 | 31 | #include "xfs_icache.h" |
c24b5dfa | 32 | #include "xfs_symlink.h" |
239880ef DC |
33 | #include "xfs_trans_priv.h" |
34 | #include "xfs_log.h" | |
a4fbe6ab | 35 | #include "xfs_bmap_btree.h" |
aa8968f2 | 36 | #include "xfs_reflink.h" |
9bbafc71 | 37 | #include "xfs_ag.h" |
01728b44 | 38 | #include "xfs_log_priv.h" |
1da177e4 | 39 | |
182696fb | 40 | struct kmem_cache *xfs_inode_cache; |
1da177e4 LT |
41 | |
42 | /* | |
8f04c47a | 43 | * Used in xfs_itruncate_extents(). This is the maximum number of extents |
1da177e4 LT |
44 | * freed from a file in a single transaction. |
45 | */ | |
46 | #define XFS_ITRUNC_MAX_EXTENTS 2 | |
47 | ||
54d7b5c1 | 48 | STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); |
f40aadb2 DC |
49 | STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag, |
50 | struct xfs_inode *); | |
ab297431 | 51 | |
2a0ec1d9 DC |
52 | /* |
53 | * helper function to extract extent size hint from inode | |
54 | */ | |
55 | xfs_extlen_t | |
56 | xfs_get_extsz_hint( | |
57 | struct xfs_inode *ip) | |
58 | { | |
bdb2ed2d CH |
59 | /* |
60 | * No point in aligning allocations if we need to COW to actually | |
61 | * write to them. | |
62 | */ | |
63 | if (xfs_is_always_cow_inode(ip)) | |
64 | return 0; | |
db07349d | 65 | if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize) |
031474c2 | 66 | return ip->i_extsize; |
2a0ec1d9 DC |
67 | if (XFS_IS_REALTIME_INODE(ip)) |
68 | return ip->i_mount->m_sb.sb_rextsize; | |
69 | return 0; | |
70 | } | |
71 | ||
f7ca3522 DW |
72 | /* |
73 | * Helper function to extract CoW extent size hint from inode. | |
74 | * Between the extent size hint and the CoW extent size hint, we | |
e153aa79 DW |
75 | * return the greater of the two. If the value is zero (automatic), |
76 | * use the default size. | |
f7ca3522 DW |
77 | */ |
78 | xfs_extlen_t | |
79 | xfs_get_cowextsz_hint( | |
80 | struct xfs_inode *ip) | |
81 | { | |
82 | xfs_extlen_t a, b; | |
83 | ||
84 | a = 0; | |
3e09ab8f | 85 | if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) |
b33ce57d | 86 | a = ip->i_cowextsize; |
f7ca3522 DW |
87 | b = xfs_get_extsz_hint(ip); |
88 | ||
e153aa79 DW |
89 | a = max(a, b); |
90 | if (a == 0) | |
91 | return XFS_DEFAULT_COWEXTSZ_HINT; | |
92 | return a; | |
f7ca3522 DW |
93 | } |
94 | ||
fa96acad | 95 | /* |
efa70be1 CH |
96 | * These two are wrapper routines around the xfs_ilock() routine used to |
97 | * centralize some grungy code. They are used in places that wish to lock the | |
98 | * inode solely for reading the extents. The reason these places can't just | |
99 | * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to | |
100 | * bringing in of the extents from disk for a file in b-tree format. If the | |
101 | * inode is in b-tree format, then we need to lock the inode exclusively until | |
102 | * the extents are read in. Locking it exclusively all the time would limit | |
103 | * our parallelism unnecessarily, though. What we do instead is check to see | |
104 | * if the extents have been read in yet, and only lock the inode exclusively | |
105 | * if they have not. | |
fa96acad | 106 | * |
efa70be1 | 107 | * The functions return a value which should be given to the corresponding |
01f4f327 | 108 | * xfs_iunlock() call. |
fa96acad DC |
109 | */ |
110 | uint | |
309ecac8 CH |
111 | xfs_ilock_data_map_shared( |
112 | struct xfs_inode *ip) | |
fa96acad | 113 | { |
309ecac8 | 114 | uint lock_mode = XFS_ILOCK_SHARED; |
fa96acad | 115 | |
b2197a36 | 116 | if (xfs_need_iread_extents(&ip->i_df)) |
fa96acad | 117 | lock_mode = XFS_ILOCK_EXCL; |
fa96acad | 118 | xfs_ilock(ip, lock_mode); |
fa96acad DC |
119 | return lock_mode; |
120 | } | |
121 | ||
efa70be1 CH |
122 | uint |
123 | xfs_ilock_attr_map_shared( | |
124 | struct xfs_inode *ip) | |
fa96acad | 125 | { |
efa70be1 CH |
126 | uint lock_mode = XFS_ILOCK_SHARED; |
127 | ||
b2197a36 | 128 | if (ip->i_afp && xfs_need_iread_extents(ip->i_afp)) |
efa70be1 CH |
129 | lock_mode = XFS_ILOCK_EXCL; |
130 | xfs_ilock(ip, lock_mode); | |
131 | return lock_mode; | |
fa96acad DC |
132 | } |
133 | ||
134 | /* | |
65523218 | 135 | * In addition to i_rwsem in the VFS inode, the xfs inode contains 2 |
2433480a | 136 | * multi-reader locks: invalidate_lock and the i_lock. This routine allows |
65523218 | 137 | * various combinations of the locks to be obtained. |
fa96acad | 138 | * |
653c60b6 DC |
139 | * The 3 locks should always be ordered so that the IO lock is obtained first, |
140 | * the mmap lock second and the ilock last in order to prevent deadlock. | |
fa96acad | 141 | * |
653c60b6 DC |
142 | * Basic locking order: |
143 | * | |
2433480a | 144 | * i_rwsem -> invalidate_lock -> page_lock -> i_ilock |
653c60b6 | 145 | * |
c1e8d7c6 | 146 | * mmap_lock locking order: |
653c60b6 | 147 | * |
c1e8d7c6 | 148 | * i_rwsem -> page lock -> mmap_lock |
2433480a | 149 | * mmap_lock -> invalidate_lock -> page_lock |
653c60b6 | 150 | * |
c1e8d7c6 | 151 | * The difference in mmap_lock locking order mean that we cannot hold the |
2433480a JK |
152 | * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths |
153 | * can fault in pages during copy in/out (for buffered IO) or require the | |
154 | * mmap_lock in get_user_pages() to map the user pages into the kernel address | |
155 | * space for direct IO. Similarly the i_rwsem cannot be taken inside a page | |
156 | * fault because page faults already hold the mmap_lock. | |
653c60b6 DC |
157 | * |
158 | * Hence to serialise fully against both syscall and mmap based IO, we need to | |
2433480a JK |
159 | * take both the i_rwsem and the invalidate_lock. These locks should *only* be |
160 | * both taken in places where we need to invalidate the page cache in a race | |
653c60b6 DC |
161 | * free manner (e.g. truncate, hole punch and other extent manipulation |
162 | * functions). | |
fa96acad DC |
163 | */ |
164 | void | |
165 | xfs_ilock( | |
166 | xfs_inode_t *ip, | |
167 | uint lock_flags) | |
168 | { | |
169 | trace_xfs_ilock(ip, lock_flags, _RET_IP_); | |
170 | ||
171 | /* | |
172 | * You can't set both SHARED and EXCL for the same lock, | |
173 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | |
174 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | |
175 | */ | |
176 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | |
177 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | |
653c60b6 DC |
178 | ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != |
179 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | |
fa96acad DC |
180 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
181 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | |
0952c818 | 182 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
fa96acad | 183 | |
65523218 CH |
184 | if (lock_flags & XFS_IOLOCK_EXCL) { |
185 | down_write_nested(&VFS_I(ip)->i_rwsem, | |
186 | XFS_IOLOCK_DEP(lock_flags)); | |
187 | } else if (lock_flags & XFS_IOLOCK_SHARED) { | |
188 | down_read_nested(&VFS_I(ip)->i_rwsem, | |
189 | XFS_IOLOCK_DEP(lock_flags)); | |
190 | } | |
fa96acad | 191 | |
2433480a JK |
192 | if (lock_flags & XFS_MMAPLOCK_EXCL) { |
193 | down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock, | |
194 | XFS_MMAPLOCK_DEP(lock_flags)); | |
195 | } else if (lock_flags & XFS_MMAPLOCK_SHARED) { | |
196 | down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock, | |
197 | XFS_MMAPLOCK_DEP(lock_flags)); | |
198 | } | |
653c60b6 | 199 | |
fa96acad DC |
200 | if (lock_flags & XFS_ILOCK_EXCL) |
201 | mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); | |
202 | else if (lock_flags & XFS_ILOCK_SHARED) | |
203 | mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); | |
204 | } | |
205 | ||
206 | /* | |
207 | * This is just like xfs_ilock(), except that the caller | |
208 | * is guaranteed not to sleep. It returns 1 if it gets | |
209 | * the requested locks and 0 otherwise. If the IO lock is | |
210 | * obtained but the inode lock cannot be, then the IO lock | |
211 | * is dropped before returning. | |
212 | * | |
213 | * ip -- the inode being locked | |
214 | * lock_flags -- this parameter indicates the inode's locks to be | |
215 | * to be locked. See the comment for xfs_ilock() for a list | |
216 | * of valid values. | |
217 | */ | |
218 | int | |
219 | xfs_ilock_nowait( | |
220 | xfs_inode_t *ip, | |
221 | uint lock_flags) | |
222 | { | |
223 | trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); | |
224 | ||
225 | /* | |
226 | * You can't set both SHARED and EXCL for the same lock, | |
227 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | |
228 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | |
229 | */ | |
230 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | |
231 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | |
653c60b6 DC |
232 | ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != |
233 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | |
fa96acad DC |
234 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
235 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | |
0952c818 | 236 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
fa96acad DC |
237 | |
238 | if (lock_flags & XFS_IOLOCK_EXCL) { | |
65523218 | 239 | if (!down_write_trylock(&VFS_I(ip)->i_rwsem)) |
fa96acad DC |
240 | goto out; |
241 | } else if (lock_flags & XFS_IOLOCK_SHARED) { | |
65523218 | 242 | if (!down_read_trylock(&VFS_I(ip)->i_rwsem)) |
fa96acad DC |
243 | goto out; |
244 | } | |
653c60b6 DC |
245 | |
246 | if (lock_flags & XFS_MMAPLOCK_EXCL) { | |
2433480a | 247 | if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock)) |
653c60b6 DC |
248 | goto out_undo_iolock; |
249 | } else if (lock_flags & XFS_MMAPLOCK_SHARED) { | |
2433480a | 250 | if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock)) |
653c60b6 DC |
251 | goto out_undo_iolock; |
252 | } | |
253 | ||
fa96acad DC |
254 | if (lock_flags & XFS_ILOCK_EXCL) { |
255 | if (!mrtryupdate(&ip->i_lock)) | |
653c60b6 | 256 | goto out_undo_mmaplock; |
fa96acad DC |
257 | } else if (lock_flags & XFS_ILOCK_SHARED) { |
258 | if (!mrtryaccess(&ip->i_lock)) | |
653c60b6 | 259 | goto out_undo_mmaplock; |
fa96acad DC |
260 | } |
261 | return 1; | |
262 | ||
653c60b6 DC |
263 | out_undo_mmaplock: |
264 | if (lock_flags & XFS_MMAPLOCK_EXCL) | |
2433480a | 265 | up_write(&VFS_I(ip)->i_mapping->invalidate_lock); |
653c60b6 | 266 | else if (lock_flags & XFS_MMAPLOCK_SHARED) |
2433480a | 267 | up_read(&VFS_I(ip)->i_mapping->invalidate_lock); |
653c60b6 | 268 | out_undo_iolock: |
fa96acad | 269 | if (lock_flags & XFS_IOLOCK_EXCL) |
65523218 | 270 | up_write(&VFS_I(ip)->i_rwsem); |
fa96acad | 271 | else if (lock_flags & XFS_IOLOCK_SHARED) |
65523218 | 272 | up_read(&VFS_I(ip)->i_rwsem); |
653c60b6 | 273 | out: |
fa96acad DC |
274 | return 0; |
275 | } | |
276 | ||
277 | /* | |
278 | * xfs_iunlock() is used to drop the inode locks acquired with | |
279 | * xfs_ilock() and xfs_ilock_nowait(). The caller must pass | |
280 | * in the flags given to xfs_ilock() or xfs_ilock_nowait() so | |
281 | * that we know which locks to drop. | |
282 | * | |
283 | * ip -- the inode being unlocked | |
284 | * lock_flags -- this parameter indicates the inode's locks to be | |
285 | * to be unlocked. See the comment for xfs_ilock() for a list | |
286 | * of valid values for this parameter. | |
287 | * | |
288 | */ | |
289 | void | |
290 | xfs_iunlock( | |
291 | xfs_inode_t *ip, | |
292 | uint lock_flags) | |
293 | { | |
294 | /* | |
295 | * You can't set both SHARED and EXCL for the same lock, | |
296 | * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, | |
297 | * and XFS_ILOCK_EXCL are valid values to set in lock_flags. | |
298 | */ | |
299 | ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != | |
300 | (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); | |
653c60b6 DC |
301 | ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) != |
302 | (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)); | |
fa96acad DC |
303 | ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != |
304 | (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); | |
0952c818 | 305 | ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0); |
fa96acad DC |
306 | ASSERT(lock_flags != 0); |
307 | ||
308 | if (lock_flags & XFS_IOLOCK_EXCL) | |
65523218 | 309 | up_write(&VFS_I(ip)->i_rwsem); |
fa96acad | 310 | else if (lock_flags & XFS_IOLOCK_SHARED) |
65523218 | 311 | up_read(&VFS_I(ip)->i_rwsem); |
fa96acad | 312 | |
653c60b6 | 313 | if (lock_flags & XFS_MMAPLOCK_EXCL) |
2433480a | 314 | up_write(&VFS_I(ip)->i_mapping->invalidate_lock); |
653c60b6 | 315 | else if (lock_flags & XFS_MMAPLOCK_SHARED) |
2433480a | 316 | up_read(&VFS_I(ip)->i_mapping->invalidate_lock); |
653c60b6 | 317 | |
fa96acad DC |
318 | if (lock_flags & XFS_ILOCK_EXCL) |
319 | mrunlock_excl(&ip->i_lock); | |
320 | else if (lock_flags & XFS_ILOCK_SHARED) | |
321 | mrunlock_shared(&ip->i_lock); | |
322 | ||
323 | trace_xfs_iunlock(ip, lock_flags, _RET_IP_); | |
324 | } | |
325 | ||
326 | /* | |
327 | * give up write locks. the i/o lock cannot be held nested | |
328 | * if it is being demoted. | |
329 | */ | |
330 | void | |
331 | xfs_ilock_demote( | |
332 | xfs_inode_t *ip, | |
333 | uint lock_flags) | |
334 | { | |
653c60b6 DC |
335 | ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)); |
336 | ASSERT((lock_flags & | |
337 | ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); | |
fa96acad DC |
338 | |
339 | if (lock_flags & XFS_ILOCK_EXCL) | |
340 | mrdemote(&ip->i_lock); | |
653c60b6 | 341 | if (lock_flags & XFS_MMAPLOCK_EXCL) |
2433480a | 342 | downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock); |
fa96acad | 343 | if (lock_flags & XFS_IOLOCK_EXCL) |
65523218 | 344 | downgrade_write(&VFS_I(ip)->i_rwsem); |
fa96acad DC |
345 | |
346 | trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); | |
347 | } | |
348 | ||
742ae1e3 | 349 | #if defined(DEBUG) || defined(XFS_WARN) |
e31cbde7 PR |
350 | static inline bool |
351 | __xfs_rwsem_islocked( | |
352 | struct rw_semaphore *rwsem, | |
353 | bool shared) | |
354 | { | |
355 | if (!debug_locks) | |
356 | return rwsem_is_locked(rwsem); | |
357 | ||
358 | if (!shared) | |
359 | return lockdep_is_held_type(rwsem, 0); | |
360 | ||
361 | /* | |
362 | * We are checking that the lock is held at least in shared | |
363 | * mode but don't care that it might be held exclusively | |
364 | * (i.e. shared | excl). Hence we check if the lock is held | |
365 | * in any mode rather than an explicit shared mode. | |
366 | */ | |
367 | return lockdep_is_held_type(rwsem, -1); | |
368 | } | |
369 | ||
370 | bool | |
fa96acad | 371 | xfs_isilocked( |
e31cbde7 | 372 | struct xfs_inode *ip, |
fa96acad DC |
373 | uint lock_flags) |
374 | { | |
375 | if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { | |
376 | if (!(lock_flags & XFS_ILOCK_SHARED)) | |
377 | return !!ip->i_lock.mr_writer; | |
378 | return rwsem_is_locked(&ip->i_lock.mr_lock); | |
379 | } | |
380 | ||
653c60b6 | 381 | if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) { |
2433480a JK |
382 | return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem, |
383 | (lock_flags & XFS_IOLOCK_SHARED)); | |
653c60b6 DC |
384 | } |
385 | ||
e31cbde7 PR |
386 | if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { |
387 | return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem, | |
388 | (lock_flags & XFS_IOLOCK_SHARED)); | |
fa96acad DC |
389 | } |
390 | ||
391 | ASSERT(0); | |
e31cbde7 | 392 | return false; |
fa96acad DC |
393 | } |
394 | #endif | |
395 | ||
b6a9947e DC |
396 | /* |
397 | * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when | |
398 | * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined | |
399 | * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build | |
400 | * errors and warnings. | |
401 | */ | |
402 | #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP) | |
3403ccc0 DC |
403 | static bool |
404 | xfs_lockdep_subclass_ok( | |
405 | int subclass) | |
406 | { | |
407 | return subclass < MAX_LOCKDEP_SUBCLASSES; | |
408 | } | |
409 | #else | |
410 | #define xfs_lockdep_subclass_ok(subclass) (true) | |
411 | #endif | |
412 | ||
c24b5dfa | 413 | /* |
653c60b6 | 414 | * Bump the subclass so xfs_lock_inodes() acquires each lock with a different |
0952c818 DC |
415 | * value. This can be called for any type of inode lock combination, including |
416 | * parent locking. Care must be taken to ensure we don't overrun the subclass | |
417 | * storage fields in the class mask we build. | |
c24b5dfa DC |
418 | */ |
419 | static inline int | |
420 | xfs_lock_inumorder(int lock_mode, int subclass) | |
421 | { | |
0952c818 DC |
422 | int class = 0; |
423 | ||
424 | ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP | | |
425 | XFS_ILOCK_RTSUM))); | |
3403ccc0 | 426 | ASSERT(xfs_lockdep_subclass_ok(subclass)); |
0952c818 | 427 | |
653c60b6 | 428 | if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) { |
0952c818 | 429 | ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS); |
0952c818 | 430 | class += subclass << XFS_IOLOCK_SHIFT; |
653c60b6 DC |
431 | } |
432 | ||
433 | if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) { | |
0952c818 DC |
434 | ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS); |
435 | class += subclass << XFS_MMAPLOCK_SHIFT; | |
653c60b6 DC |
436 | } |
437 | ||
0952c818 DC |
438 | if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) { |
439 | ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS); | |
440 | class += subclass << XFS_ILOCK_SHIFT; | |
441 | } | |
c24b5dfa | 442 | |
0952c818 | 443 | return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class; |
c24b5dfa DC |
444 | } |
445 | ||
446 | /* | |
95afcf5c DC |
447 | * The following routine will lock n inodes in exclusive mode. We assume the |
448 | * caller calls us with the inodes in i_ino order. | |
c24b5dfa | 449 | * |
95afcf5c DC |
450 | * We need to detect deadlock where an inode that we lock is in the AIL and we |
451 | * start waiting for another inode that is locked by a thread in a long running | |
452 | * transaction (such as truncate). This can result in deadlock since the long | |
453 | * running trans might need to wait for the inode we just locked in order to | |
454 | * push the tail and free space in the log. | |
0952c818 DC |
455 | * |
456 | * xfs_lock_inodes() can only be used to lock one type of lock at a time - | |
457 | * the iolock, the mmaplock or the ilock, but not more than one at a time. If we | |
458 | * lock more than one at a time, lockdep will report false positives saying we | |
459 | * have violated locking orders. | |
c24b5dfa | 460 | */ |
0d5a75e9 | 461 | static void |
c24b5dfa | 462 | xfs_lock_inodes( |
efe2330f CH |
463 | struct xfs_inode **ips, |
464 | int inodes, | |
465 | uint lock_mode) | |
c24b5dfa | 466 | { |
efe2330f CH |
467 | int attempts = 0, i, j, try_lock; |
468 | struct xfs_log_item *lp; | |
c24b5dfa | 469 | |
0952c818 DC |
470 | /* |
471 | * Currently supports between 2 and 5 inodes with exclusive locking. We | |
472 | * support an arbitrary depth of locking here, but absolute limits on | |
b63da6c8 | 473 | * inodes depend on the type of locking and the limits placed by |
0952c818 DC |
474 | * lockdep annotations in xfs_lock_inumorder. These are all checked by |
475 | * the asserts. | |
476 | */ | |
95afcf5c | 477 | ASSERT(ips && inodes >= 2 && inodes <= 5); |
0952c818 DC |
478 | ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL | |
479 | XFS_ILOCK_EXCL)); | |
480 | ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED | | |
481 | XFS_ILOCK_SHARED))); | |
0952c818 DC |
482 | ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) || |
483 | inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1); | |
484 | ASSERT(!(lock_mode & XFS_ILOCK_EXCL) || | |
485 | inodes <= XFS_ILOCK_MAX_SUBCLASS + 1); | |
486 | ||
487 | if (lock_mode & XFS_IOLOCK_EXCL) { | |
488 | ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL))); | |
489 | } else if (lock_mode & XFS_MMAPLOCK_EXCL) | |
490 | ASSERT(!(lock_mode & XFS_ILOCK_EXCL)); | |
c24b5dfa DC |
491 | |
492 | try_lock = 0; | |
493 | i = 0; | |
c24b5dfa DC |
494 | again: |
495 | for (; i < inodes; i++) { | |
496 | ASSERT(ips[i]); | |
497 | ||
95afcf5c | 498 | if (i && (ips[i] == ips[i - 1])) /* Already locked */ |
c24b5dfa DC |
499 | continue; |
500 | ||
501 | /* | |
95afcf5c DC |
502 | * If try_lock is not set yet, make sure all locked inodes are |
503 | * not in the AIL. If any are, set try_lock to be used later. | |
c24b5dfa | 504 | */ |
c24b5dfa DC |
505 | if (!try_lock) { |
506 | for (j = (i - 1); j >= 0 && !try_lock; j--) { | |
b3b14aac | 507 | lp = &ips[j]->i_itemp->ili_item; |
22525c17 | 508 | if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) |
c24b5dfa | 509 | try_lock++; |
c24b5dfa DC |
510 | } |
511 | } | |
512 | ||
513 | /* | |
514 | * If any of the previous locks we have locked is in the AIL, | |
515 | * we must TRY to get the second and subsequent locks. If | |
516 | * we can't get any, we must release all we have | |
517 | * and try again. | |
518 | */ | |
95afcf5c DC |
519 | if (!try_lock) { |
520 | xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i)); | |
521 | continue; | |
522 | } | |
523 | ||
524 | /* try_lock means we have an inode locked that is in the AIL. */ | |
525 | ASSERT(i != 0); | |
526 | if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) | |
527 | continue; | |
c24b5dfa | 528 | |
95afcf5c DC |
529 | /* |
530 | * Unlock all previous guys and try again. xfs_iunlock will try | |
531 | * to push the tail if the inode is in the AIL. | |
532 | */ | |
533 | attempts++; | |
534 | for (j = i - 1; j >= 0; j--) { | |
c24b5dfa | 535 | /* |
95afcf5c DC |
536 | * Check to see if we've already unlocked this one. Not |
537 | * the first one going back, and the inode ptr is the | |
538 | * same. | |
c24b5dfa | 539 | */ |
95afcf5c DC |
540 | if (j != (i - 1) && ips[j] == ips[j + 1]) |
541 | continue; | |
c24b5dfa | 542 | |
95afcf5c DC |
543 | xfs_iunlock(ips[j], lock_mode); |
544 | } | |
c24b5dfa | 545 | |
95afcf5c DC |
546 | if ((attempts % 5) == 0) { |
547 | delay(1); /* Don't just spin the CPU */ | |
c24b5dfa | 548 | } |
95afcf5c DC |
549 | i = 0; |
550 | try_lock = 0; | |
551 | goto again; | |
c24b5dfa | 552 | } |
c24b5dfa DC |
553 | } |
554 | ||
555 | /* | |
d2c292d8 JK |
556 | * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and |
557 | * mmaplock must be double-locked separately since we use i_rwsem and | |
558 | * invalidate_lock for that. We now support taking one lock EXCL and the | |
559 | * other SHARED. | |
c24b5dfa DC |
560 | */ |
561 | void | |
562 | xfs_lock_two_inodes( | |
7c2d238a DW |
563 | struct xfs_inode *ip0, |
564 | uint ip0_mode, | |
565 | struct xfs_inode *ip1, | |
566 | uint ip1_mode) | |
c24b5dfa | 567 | { |
c24b5dfa | 568 | int attempts = 0; |
efe2330f | 569 | struct xfs_log_item *lp; |
c24b5dfa | 570 | |
7c2d238a DW |
571 | ASSERT(hweight32(ip0_mode) == 1); |
572 | ASSERT(hweight32(ip1_mode) == 1); | |
573 | ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); | |
574 | ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))); | |
d2c292d8 JK |
575 | ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); |
576 | ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))); | |
c24b5dfa DC |
577 | ASSERT(ip0->i_ino != ip1->i_ino); |
578 | ||
579 | if (ip0->i_ino > ip1->i_ino) { | |
2a09b575 CD |
580 | swap(ip0, ip1); |
581 | swap(ip0_mode, ip1_mode); | |
c24b5dfa DC |
582 | } |
583 | ||
584 | again: | |
7c2d238a | 585 | xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0)); |
c24b5dfa DC |
586 | |
587 | /* | |
588 | * If the first lock we have locked is in the AIL, we must TRY to get | |
589 | * the second lock. If we can't get it, we must release the first one | |
590 | * and try again. | |
591 | */ | |
b3b14aac | 592 | lp = &ip0->i_itemp->ili_item; |
22525c17 | 593 | if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) { |
7c2d238a DW |
594 | if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) { |
595 | xfs_iunlock(ip0, ip0_mode); | |
c24b5dfa DC |
596 | if ((++attempts % 5) == 0) |
597 | delay(1); /* Don't just spin the CPU */ | |
598 | goto again; | |
599 | } | |
600 | } else { | |
7c2d238a | 601 | xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1)); |
c24b5dfa DC |
602 | } |
603 | } | |
604 | ||
4422501d CH |
605 | uint |
606 | xfs_ip2xflags( | |
607 | struct xfs_inode *ip) | |
1da177e4 LT |
608 | { |
609 | uint flags = 0; | |
610 | ||
4422501d CH |
611 | if (ip->i_diflags & XFS_DIFLAG_ANY) { |
612 | if (ip->i_diflags & XFS_DIFLAG_REALTIME) | |
e7b89481 | 613 | flags |= FS_XFLAG_REALTIME; |
4422501d | 614 | if (ip->i_diflags & XFS_DIFLAG_PREALLOC) |
e7b89481 | 615 | flags |= FS_XFLAG_PREALLOC; |
4422501d | 616 | if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE) |
e7b89481 | 617 | flags |= FS_XFLAG_IMMUTABLE; |
4422501d | 618 | if (ip->i_diflags & XFS_DIFLAG_APPEND) |
e7b89481 | 619 | flags |= FS_XFLAG_APPEND; |
4422501d | 620 | if (ip->i_diflags & XFS_DIFLAG_SYNC) |
e7b89481 | 621 | flags |= FS_XFLAG_SYNC; |
4422501d | 622 | if (ip->i_diflags & XFS_DIFLAG_NOATIME) |
e7b89481 | 623 | flags |= FS_XFLAG_NOATIME; |
4422501d | 624 | if (ip->i_diflags & XFS_DIFLAG_NODUMP) |
e7b89481 | 625 | flags |= FS_XFLAG_NODUMP; |
4422501d | 626 | if (ip->i_diflags & XFS_DIFLAG_RTINHERIT) |
e7b89481 | 627 | flags |= FS_XFLAG_RTINHERIT; |
4422501d | 628 | if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT) |
e7b89481 | 629 | flags |= FS_XFLAG_PROJINHERIT; |
4422501d | 630 | if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS) |
e7b89481 | 631 | flags |= FS_XFLAG_NOSYMLINKS; |
4422501d | 632 | if (ip->i_diflags & XFS_DIFLAG_EXTSIZE) |
e7b89481 | 633 | flags |= FS_XFLAG_EXTSIZE; |
4422501d | 634 | if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) |
e7b89481 | 635 | flags |= FS_XFLAG_EXTSZINHERIT; |
4422501d | 636 | if (ip->i_diflags & XFS_DIFLAG_NODEFRAG) |
e7b89481 | 637 | flags |= FS_XFLAG_NODEFRAG; |
4422501d | 638 | if (ip->i_diflags & XFS_DIFLAG_FILESTREAM) |
e7b89481 | 639 | flags |= FS_XFLAG_FILESTREAM; |
1da177e4 LT |
640 | } |
641 | ||
4422501d CH |
642 | if (ip->i_diflags2 & XFS_DIFLAG2_ANY) { |
643 | if (ip->i_diflags2 & XFS_DIFLAG2_DAX) | |
58f88ca2 | 644 | flags |= FS_XFLAG_DAX; |
4422501d | 645 | if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) |
f7ca3522 | 646 | flags |= FS_XFLAG_COWEXTSIZE; |
58f88ca2 DC |
647 | } |
648 | ||
4422501d | 649 | if (XFS_IFORK_Q(ip)) |
58f88ca2 | 650 | flags |= FS_XFLAG_HASATTR; |
1da177e4 LT |
651 | return flags; |
652 | } | |
653 | ||
c24b5dfa DC |
654 | /* |
655 | * Lookups up an inode from "name". If ci_name is not NULL, then a CI match | |
656 | * is allowed, otherwise it has to be an exact match. If a CI match is found, | |
657 | * ci_name->name will point to a the actual name (caller must free) or | |
658 | * will be set to NULL if an exact match is found. | |
659 | */ | |
660 | int | |
661 | xfs_lookup( | |
996b2329 DW |
662 | struct xfs_inode *dp, |
663 | const struct xfs_name *name, | |
664 | struct xfs_inode **ipp, | |
c24b5dfa DC |
665 | struct xfs_name *ci_name) |
666 | { | |
667 | xfs_ino_t inum; | |
668 | int error; | |
c24b5dfa DC |
669 | |
670 | trace_xfs_lookup(dp, name); | |
671 | ||
75c8c50f | 672 | if (xfs_is_shutdown(dp->i_mount)) |
2451337d | 673 | return -EIO; |
c24b5dfa | 674 | |
c24b5dfa | 675 | error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name); |
c24b5dfa | 676 | if (error) |
dbad7c99 | 677 | goto out_unlock; |
c24b5dfa DC |
678 | |
679 | error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp); | |
680 | if (error) | |
681 | goto out_free_name; | |
682 | ||
683 | return 0; | |
684 | ||
685 | out_free_name: | |
686 | if (ci_name) | |
687 | kmem_free(ci_name->name); | |
dbad7c99 | 688 | out_unlock: |
c24b5dfa DC |
689 | *ipp = NULL; |
690 | return error; | |
691 | } | |
692 | ||
8a569d71 DW |
693 | /* Propagate di_flags from a parent inode to a child inode. */ |
694 | static void | |
695 | xfs_inode_inherit_flags( | |
696 | struct xfs_inode *ip, | |
697 | const struct xfs_inode *pip) | |
698 | { | |
699 | unsigned int di_flags = 0; | |
603f000b | 700 | xfs_failaddr_t failaddr; |
8a569d71 DW |
701 | umode_t mode = VFS_I(ip)->i_mode; |
702 | ||
703 | if (S_ISDIR(mode)) { | |
db07349d | 704 | if (pip->i_diflags & XFS_DIFLAG_RTINHERIT) |
8a569d71 | 705 | di_flags |= XFS_DIFLAG_RTINHERIT; |
db07349d | 706 | if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { |
8a569d71 | 707 | di_flags |= XFS_DIFLAG_EXTSZINHERIT; |
031474c2 | 708 | ip->i_extsize = pip->i_extsize; |
8a569d71 | 709 | } |
db07349d | 710 | if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT) |
8a569d71 DW |
711 | di_flags |= XFS_DIFLAG_PROJINHERIT; |
712 | } else if (S_ISREG(mode)) { | |
db07349d | 713 | if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) && |
38c26bfd | 714 | xfs_has_realtime(ip->i_mount)) |
8a569d71 | 715 | di_flags |= XFS_DIFLAG_REALTIME; |
db07349d | 716 | if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) { |
8a569d71 | 717 | di_flags |= XFS_DIFLAG_EXTSIZE; |
031474c2 | 718 | ip->i_extsize = pip->i_extsize; |
8a569d71 DW |
719 | } |
720 | } | |
db07349d | 721 | if ((pip->i_diflags & XFS_DIFLAG_NOATIME) && |
8a569d71 DW |
722 | xfs_inherit_noatime) |
723 | di_flags |= XFS_DIFLAG_NOATIME; | |
db07349d | 724 | if ((pip->i_diflags & XFS_DIFLAG_NODUMP) && |
8a569d71 DW |
725 | xfs_inherit_nodump) |
726 | di_flags |= XFS_DIFLAG_NODUMP; | |
db07349d | 727 | if ((pip->i_diflags & XFS_DIFLAG_SYNC) && |
8a569d71 DW |
728 | xfs_inherit_sync) |
729 | di_flags |= XFS_DIFLAG_SYNC; | |
db07349d | 730 | if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) && |
8a569d71 DW |
731 | xfs_inherit_nosymlinks) |
732 | di_flags |= XFS_DIFLAG_NOSYMLINKS; | |
db07349d | 733 | if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) && |
8a569d71 DW |
734 | xfs_inherit_nodefrag) |
735 | di_flags |= XFS_DIFLAG_NODEFRAG; | |
db07349d | 736 | if (pip->i_diflags & XFS_DIFLAG_FILESTREAM) |
8a569d71 DW |
737 | di_flags |= XFS_DIFLAG_FILESTREAM; |
738 | ||
db07349d | 739 | ip->i_diflags |= di_flags; |
603f000b DW |
740 | |
741 | /* | |
742 | * Inode verifiers on older kernels only check that the extent size | |
743 | * hint is an integer multiple of the rt extent size on realtime files. | |
744 | * They did not check the hint alignment on a directory with both | |
745 | * rtinherit and extszinherit flags set. If the misaligned hint is | |
746 | * propagated from a directory into a new realtime file, new file | |
747 | * allocations will fail due to math errors in the rt allocator and/or | |
748 | * trip the verifiers. Validate the hint settings in the new file so | |
749 | * that we don't let broken hints propagate. | |
750 | */ | |
751 | failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize, | |
752 | VFS_I(ip)->i_mode, ip->i_diflags); | |
753 | if (failaddr) { | |
754 | ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE | | |
755 | XFS_DIFLAG_EXTSZINHERIT); | |
756 | ip->i_extsize = 0; | |
757 | } | |
8a569d71 DW |
758 | } |
759 | ||
760 | /* Propagate di_flags2 from a parent inode to a child inode. */ | |
761 | static void | |
762 | xfs_inode_inherit_flags2( | |
763 | struct xfs_inode *ip, | |
764 | const struct xfs_inode *pip) | |
765 | { | |
603f000b DW |
766 | xfs_failaddr_t failaddr; |
767 | ||
3e09ab8f CH |
768 | if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) { |
769 | ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE; | |
b33ce57d | 770 | ip->i_cowextsize = pip->i_cowextsize; |
8a569d71 | 771 | } |
3e09ab8f CH |
772 | if (pip->i_diflags2 & XFS_DIFLAG2_DAX) |
773 | ip->i_diflags2 |= XFS_DIFLAG2_DAX; | |
603f000b DW |
774 | |
775 | /* Don't let invalid cowextsize hints propagate. */ | |
776 | failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize, | |
777 | VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2); | |
778 | if (failaddr) { | |
779 | ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE; | |
780 | ip->i_cowextsize = 0; | |
781 | } | |
8a569d71 DW |
782 | } |
783 | ||
1da177e4 | 784 | /* |
1abcf261 DC |
785 | * Initialise a newly allocated inode and return the in-core inode to the |
786 | * caller locked exclusively. | |
1da177e4 | 787 | */ |
b652afd9 | 788 | int |
1abcf261 | 789 | xfs_init_new_inode( |
f736d93d | 790 | struct user_namespace *mnt_userns, |
1abcf261 DC |
791 | struct xfs_trans *tp, |
792 | struct xfs_inode *pip, | |
793 | xfs_ino_t ino, | |
794 | umode_t mode, | |
795 | xfs_nlink_t nlink, | |
796 | dev_t rdev, | |
797 | prid_t prid, | |
e6a688c3 | 798 | bool init_xattrs, |
1abcf261 | 799 | struct xfs_inode **ipp) |
1da177e4 | 800 | { |
01ea173e | 801 | struct inode *dir = pip ? VFS_I(pip) : NULL; |
1abcf261 DC |
802 | struct xfs_mount *mp = tp->t_mountp; |
803 | struct xfs_inode *ip; | |
804 | unsigned int flags; | |
805 | int error; | |
806 | struct timespec64 tv; | |
807 | struct inode *inode; | |
1da177e4 | 808 | |
8b26984d DC |
809 | /* |
810 | * Protect against obviously corrupt allocation btree records. Later | |
811 | * xfs_iget checks will catch re-allocation of other active in-memory | |
812 | * and on-disk inodes. If we don't catch reallocating the parent inode | |
813 | * here we will deadlock in xfs_iget() so we have to do these checks | |
814 | * first. | |
815 | */ | |
816 | if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) { | |
817 | xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino); | |
818 | return -EFSCORRUPTED; | |
819 | } | |
820 | ||
1da177e4 | 821 | /* |
1abcf261 DC |
822 | * Get the in-core inode with the lock held exclusively to prevent |
823 | * others from looking at until we're done. | |
1da177e4 | 824 | */ |
1abcf261 | 825 | error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip); |
bf904248 | 826 | if (error) |
1da177e4 | 827 | return error; |
1abcf261 | 828 | |
1da177e4 | 829 | ASSERT(ip != NULL); |
3987848c | 830 | inode = VFS_I(ip); |
54d7b5c1 | 831 | set_nlink(inode, nlink); |
66f36464 | 832 | inode->i_rdev = rdev; |
ceaf603c | 833 | ip->i_projid = prid; |
1da177e4 | 834 | |
0560f31a | 835 | if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) { |
db998553 | 836 | inode_fsuid_set(inode, mnt_userns); |
01ea173e CH |
837 | inode->i_gid = dir->i_gid; |
838 | inode->i_mode = mode; | |
3d8f2821 | 839 | } else { |
7d6beb71 | 840 | inode_init_owner(mnt_userns, inode, dir, mode); |
1da177e4 LT |
841 | } |
842 | ||
843 | /* | |
844 | * If the group ID of the new file does not match the effective group | |
845 | * ID or one of the supplementary group IDs, the S_ISGID bit is cleared | |
846 | * (and only if the irix_sgid_inherit compatibility variable is set). | |
847 | */ | |
54295159 | 848 | if (irix_sgid_inherit && |
f736d93d CH |
849 | (inode->i_mode & S_ISGID) && |
850 | !in_group_p(i_gid_into_mnt(mnt_userns, inode))) | |
c19b3b05 | 851 | inode->i_mode &= ~S_ISGID; |
1da177e4 | 852 | |
13d2c10b | 853 | ip->i_disk_size = 0; |
daf83964 | 854 | ip->i_df.if_nextents = 0; |
6e73a545 | 855 | ASSERT(ip->i_nblocks == 0); |
dff35fd4 | 856 | |
c2050a45 | 857 | tv = current_time(inode); |
3987848c DC |
858 | inode->i_mtime = tv; |
859 | inode->i_atime = tv; | |
860 | inode->i_ctime = tv; | |
dff35fd4 | 861 | |
031474c2 | 862 | ip->i_extsize = 0; |
db07349d | 863 | ip->i_diflags = 0; |
93848a99 | 864 | |
38c26bfd | 865 | if (xfs_has_v3inodes(mp)) { |
f0e28280 | 866 | inode_set_iversion(inode, 1); |
b33ce57d | 867 | ip->i_cowextsize = 0; |
e98d5e88 | 868 | ip->i_crtime = tv; |
93848a99 CH |
869 | } |
870 | ||
1da177e4 LT |
871 | flags = XFS_ILOG_CORE; |
872 | switch (mode & S_IFMT) { | |
873 | case S_IFIFO: | |
874 | case S_IFCHR: | |
875 | case S_IFBLK: | |
876 | case S_IFSOCK: | |
f7e67b20 | 877 | ip->i_df.if_format = XFS_DINODE_FMT_DEV; |
1da177e4 LT |
878 | flags |= XFS_ILOG_DEV; |
879 | break; | |
880 | case S_IFREG: | |
881 | case S_IFDIR: | |
db07349d | 882 | if (pip && (pip->i_diflags & XFS_DIFLAG_ANY)) |
8a569d71 | 883 | xfs_inode_inherit_flags(ip, pip); |
3e09ab8f | 884 | if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY)) |
8a569d71 | 885 | xfs_inode_inherit_flags2(ip, pip); |
53004ee7 | 886 | fallthrough; |
1da177e4 | 887 | case S_IFLNK: |
f7e67b20 | 888 | ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; |
fcacbc3f | 889 | ip->i_df.if_bytes = 0; |
6bdcf26a | 890 | ip->i_df.if_u1.if_root = NULL; |
1da177e4 LT |
891 | break; |
892 | default: | |
893 | ASSERT(0); | |
894 | } | |
1da177e4 | 895 | |
e6a688c3 DC |
896 | /* |
897 | * If we need to create attributes immediately after allocating the | |
898 | * inode, initialise an empty attribute fork right now. We use the | |
899 | * default fork offset for attributes here as we don't know exactly what | |
900 | * size or how many attributes we might be adding. We can do this | |
901 | * safely here because we know the data fork is completely empty and | |
902 | * this saves us from needing to run a separate transaction to set the | |
903 | * fork offset in the immediate future. | |
904 | */ | |
38c26bfd | 905 | if (init_xattrs && xfs_has_attr(mp)) { |
7821ea30 | 906 | ip->i_forkoff = xfs_default_attroffset(ip) >> 3; |
e6a688c3 DC |
907 | ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0); |
908 | } | |
909 | ||
1da177e4 LT |
910 | /* |
911 | * Log the new values stuffed into the inode. | |
912 | */ | |
ddc3415a | 913 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
1da177e4 LT |
914 | xfs_trans_log_inode(tp, ip, flags); |
915 | ||
58c90473 | 916 | /* now that we have an i_mode we can setup the inode structure */ |
41be8bed | 917 | xfs_setup_inode(ip); |
1da177e4 LT |
918 | |
919 | *ipp = ip; | |
920 | return 0; | |
921 | } | |
922 | ||
e546cb79 | 923 | /* |
54d7b5c1 DC |
924 | * Decrement the link count on an inode & log the change. If this causes the |
925 | * link count to go to zero, move the inode to AGI unlinked list so that it can | |
926 | * be freed when the last active reference goes away via xfs_inactive(). | |
e546cb79 | 927 | */ |
0d5a75e9 | 928 | static int /* error */ |
e546cb79 DC |
929 | xfs_droplink( |
930 | xfs_trans_t *tp, | |
931 | xfs_inode_t *ip) | |
932 | { | |
e546cb79 DC |
933 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); |
934 | ||
e546cb79 DC |
935 | drop_nlink(VFS_I(ip)); |
936 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
937 | ||
54d7b5c1 DC |
938 | if (VFS_I(ip)->i_nlink) |
939 | return 0; | |
940 | ||
941 | return xfs_iunlink(tp, ip); | |
e546cb79 DC |
942 | } |
943 | ||
e546cb79 DC |
944 | /* |
945 | * Increment the link count on an inode & log the change. | |
946 | */ | |
91083269 | 947 | static void |
e546cb79 DC |
948 | xfs_bumplink( |
949 | xfs_trans_t *tp, | |
950 | xfs_inode_t *ip) | |
951 | { | |
952 | xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG); | |
953 | ||
e546cb79 | 954 | inc_nlink(VFS_I(ip)); |
e546cb79 | 955 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
e546cb79 DC |
956 | } |
957 | ||
c24b5dfa DC |
958 | int |
959 | xfs_create( | |
f736d93d | 960 | struct user_namespace *mnt_userns, |
c24b5dfa DC |
961 | xfs_inode_t *dp, |
962 | struct xfs_name *name, | |
963 | umode_t mode, | |
66f36464 | 964 | dev_t rdev, |
e6a688c3 | 965 | bool init_xattrs, |
c24b5dfa DC |
966 | xfs_inode_t **ipp) |
967 | { | |
968 | int is_dir = S_ISDIR(mode); | |
969 | struct xfs_mount *mp = dp->i_mount; | |
970 | struct xfs_inode *ip = NULL; | |
971 | struct xfs_trans *tp = NULL; | |
972 | int error; | |
c24b5dfa | 973 | bool unlock_dp_on_error = false; |
c24b5dfa DC |
974 | prid_t prid; |
975 | struct xfs_dquot *udqp = NULL; | |
976 | struct xfs_dquot *gdqp = NULL; | |
977 | struct xfs_dquot *pdqp = NULL; | |
062647a8 | 978 | struct xfs_trans_res *tres; |
c24b5dfa | 979 | uint resblks; |
b652afd9 | 980 | xfs_ino_t ino; |
c24b5dfa DC |
981 | |
982 | trace_xfs_create(dp, name); | |
983 | ||
75c8c50f | 984 | if (xfs_is_shutdown(mp)) |
2451337d | 985 | return -EIO; |
c24b5dfa | 986 | |
163467d3 | 987 | prid = xfs_get_initial_prid(dp); |
c24b5dfa DC |
988 | |
989 | /* | |
990 | * Make sure that we have allocated dquot(s) on disk. | |
991 | */ | |
209188ce CB |
992 | error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns), |
993 | mapped_fsgid(mnt_userns, &init_user_ns), prid, | |
b5a08423 DW |
994 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, |
995 | &udqp, &gdqp, &pdqp); | |
c24b5dfa DC |
996 | if (error) |
997 | return error; | |
998 | ||
999 | if (is_dir) { | |
c24b5dfa | 1000 | resblks = XFS_MKDIR_SPACE_RES(mp, name->len); |
062647a8 | 1001 | tres = &M_RES(mp)->tr_mkdir; |
c24b5dfa DC |
1002 | } else { |
1003 | resblks = XFS_CREATE_SPACE_RES(mp, name->len); | |
062647a8 | 1004 | tres = &M_RES(mp)->tr_create; |
c24b5dfa DC |
1005 | } |
1006 | ||
c24b5dfa DC |
1007 | /* |
1008 | * Initially assume that the file does not exist and | |
1009 | * reserve the resources for that case. If that is not | |
1010 | * the case we'll drop the one we have and get a more | |
1011 | * appropriate transaction later. | |
1012 | */ | |
f2f7b9ff DW |
1013 | error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, |
1014 | &tp); | |
2451337d | 1015 | if (error == -ENOSPC) { |
c24b5dfa DC |
1016 | /* flush outstanding delalloc blocks and retry */ |
1017 | xfs_flush_inodes(mp); | |
f2f7b9ff DW |
1018 | error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, |
1019 | resblks, &tp); | |
c24b5dfa | 1020 | } |
4906e215 | 1021 | if (error) |
f2f7b9ff | 1022 | goto out_release_dquots; |
c24b5dfa | 1023 | |
65523218 | 1024 | xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); |
c24b5dfa DC |
1025 | unlock_dp_on_error = true; |
1026 | ||
f5d92749 CB |
1027 | error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK, |
1028 | XFS_IEXT_DIR_MANIP_CNT(mp)); | |
1029 | if (error) | |
1030 | goto out_trans_cancel; | |
1031 | ||
c24b5dfa DC |
1032 | /* |
1033 | * A newly created regular or special file just has one directory | |
1034 | * entry pointing to them, but a directory also the "." entry | |
1035 | * pointing to itself. | |
1036 | */ | |
b652afd9 DC |
1037 | error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); |
1038 | if (!error) | |
1039 | error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode, | |
1040 | is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip); | |
d6077aa3 | 1041 | if (error) |
4906e215 | 1042 | goto out_trans_cancel; |
c24b5dfa DC |
1043 | |
1044 | /* | |
1045 | * Now we join the directory inode to the transaction. We do not do it | |
b652afd9 | 1046 | * earlier because xfs_dialloc might commit the previous transaction |
c24b5dfa DC |
1047 | * (and release all the locks). An error from here on will result in |
1048 | * the transaction cancel unlocking dp so don't do it explicitly in the | |
1049 | * error path. | |
1050 | */ | |
65523218 | 1051 | xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
1052 | unlock_dp_on_error = false; |
1053 | ||
381eee69 | 1054 | error = xfs_dir_createname(tp, dp, name, ip->i_ino, |
63337b63 | 1055 | resblks - XFS_IALLOC_SPACE_RES(mp)); |
c24b5dfa | 1056 | if (error) { |
2451337d | 1057 | ASSERT(error != -ENOSPC); |
4906e215 | 1058 | goto out_trans_cancel; |
c24b5dfa DC |
1059 | } |
1060 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
1061 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | |
1062 | ||
1063 | if (is_dir) { | |
1064 | error = xfs_dir_init(tp, ip, dp); | |
1065 | if (error) | |
c8eac49e | 1066 | goto out_trans_cancel; |
c24b5dfa | 1067 | |
91083269 | 1068 | xfs_bumplink(tp, dp); |
c24b5dfa DC |
1069 | } |
1070 | ||
1071 | /* | |
1072 | * If this is a synchronous mount, make sure that the | |
1073 | * create transaction goes to disk before returning to | |
1074 | * the user. | |
1075 | */ | |
0560f31a | 1076 | if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) |
c24b5dfa DC |
1077 | xfs_trans_set_sync(tp); |
1078 | ||
1079 | /* | |
1080 | * Attach the dquot(s) to the inodes and modify them incore. | |
1081 | * These ids of the inode couldn't have changed since the new | |
1082 | * inode has been locked ever since it was created. | |
1083 | */ | |
1084 | xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); | |
1085 | ||
70393313 | 1086 | error = xfs_trans_commit(tp); |
c24b5dfa DC |
1087 | if (error) |
1088 | goto out_release_inode; | |
1089 | ||
1090 | xfs_qm_dqrele(udqp); | |
1091 | xfs_qm_dqrele(gdqp); | |
1092 | xfs_qm_dqrele(pdqp); | |
1093 | ||
1094 | *ipp = ip; | |
1095 | return 0; | |
1096 | ||
c24b5dfa | 1097 | out_trans_cancel: |
4906e215 | 1098 | xfs_trans_cancel(tp); |
c24b5dfa DC |
1099 | out_release_inode: |
1100 | /* | |
58c90473 DC |
1101 | * Wait until after the current transaction is aborted to finish the |
1102 | * setup of the inode and release the inode. This prevents recursive | |
1103 | * transactions and deadlocks from xfs_inactive. | |
c24b5dfa | 1104 | */ |
58c90473 DC |
1105 | if (ip) { |
1106 | xfs_finish_inode_setup(ip); | |
44a8736b | 1107 | xfs_irele(ip); |
58c90473 | 1108 | } |
f2f7b9ff | 1109 | out_release_dquots: |
c24b5dfa DC |
1110 | xfs_qm_dqrele(udqp); |
1111 | xfs_qm_dqrele(gdqp); | |
1112 | xfs_qm_dqrele(pdqp); | |
1113 | ||
1114 | if (unlock_dp_on_error) | |
65523218 | 1115 | xfs_iunlock(dp, XFS_ILOCK_EXCL); |
c24b5dfa DC |
1116 | return error; |
1117 | } | |
1118 | ||
99b6436b ZYW |
1119 | int |
1120 | xfs_create_tmpfile( | |
f736d93d | 1121 | struct user_namespace *mnt_userns, |
99b6436b | 1122 | struct xfs_inode *dp, |
330033d6 BF |
1123 | umode_t mode, |
1124 | struct xfs_inode **ipp) | |
99b6436b ZYW |
1125 | { |
1126 | struct xfs_mount *mp = dp->i_mount; | |
1127 | struct xfs_inode *ip = NULL; | |
1128 | struct xfs_trans *tp = NULL; | |
1129 | int error; | |
99b6436b ZYW |
1130 | prid_t prid; |
1131 | struct xfs_dquot *udqp = NULL; | |
1132 | struct xfs_dquot *gdqp = NULL; | |
1133 | struct xfs_dquot *pdqp = NULL; | |
1134 | struct xfs_trans_res *tres; | |
1135 | uint resblks; | |
b652afd9 | 1136 | xfs_ino_t ino; |
99b6436b | 1137 | |
75c8c50f | 1138 | if (xfs_is_shutdown(mp)) |
2451337d | 1139 | return -EIO; |
99b6436b ZYW |
1140 | |
1141 | prid = xfs_get_initial_prid(dp); | |
1142 | ||
1143 | /* | |
1144 | * Make sure that we have allocated dquot(s) on disk. | |
1145 | */ | |
209188ce CB |
1146 | error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns), |
1147 | mapped_fsgid(mnt_userns, &init_user_ns), prid, | |
b5a08423 DW |
1148 | XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, |
1149 | &udqp, &gdqp, &pdqp); | |
99b6436b ZYW |
1150 | if (error) |
1151 | return error; | |
1152 | ||
1153 | resblks = XFS_IALLOC_SPACE_RES(mp); | |
99b6436b | 1154 | tres = &M_RES(mp)->tr_create_tmpfile; |
253f4911 | 1155 | |
f2f7b9ff DW |
1156 | error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks, |
1157 | &tp); | |
99b6436b | 1158 | if (error) |
f2f7b9ff | 1159 | goto out_release_dquots; |
99b6436b | 1160 | |
b652afd9 DC |
1161 | error = xfs_dialloc(&tp, dp->i_ino, mode, &ino); |
1162 | if (!error) | |
1163 | error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode, | |
1164 | 0, 0, prid, false, &ip); | |
d6077aa3 | 1165 | if (error) |
4906e215 | 1166 | goto out_trans_cancel; |
99b6436b | 1167 | |
0560f31a | 1168 | if (xfs_has_wsync(mp)) |
99b6436b ZYW |
1169 | xfs_trans_set_sync(tp); |
1170 | ||
1171 | /* | |
1172 | * Attach the dquot(s) to the inodes and modify them incore. | |
1173 | * These ids of the inode couldn't have changed since the new | |
1174 | * inode has been locked ever since it was created. | |
1175 | */ | |
1176 | xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp); | |
1177 | ||
99b6436b ZYW |
1178 | error = xfs_iunlink(tp, ip); |
1179 | if (error) | |
4906e215 | 1180 | goto out_trans_cancel; |
99b6436b | 1181 | |
70393313 | 1182 | error = xfs_trans_commit(tp); |
99b6436b ZYW |
1183 | if (error) |
1184 | goto out_release_inode; | |
1185 | ||
1186 | xfs_qm_dqrele(udqp); | |
1187 | xfs_qm_dqrele(gdqp); | |
1188 | xfs_qm_dqrele(pdqp); | |
1189 | ||
330033d6 | 1190 | *ipp = ip; |
99b6436b ZYW |
1191 | return 0; |
1192 | ||
99b6436b | 1193 | out_trans_cancel: |
4906e215 | 1194 | xfs_trans_cancel(tp); |
99b6436b ZYW |
1195 | out_release_inode: |
1196 | /* | |
58c90473 DC |
1197 | * Wait until after the current transaction is aborted to finish the |
1198 | * setup of the inode and release the inode. This prevents recursive | |
1199 | * transactions and deadlocks from xfs_inactive. | |
99b6436b | 1200 | */ |
58c90473 DC |
1201 | if (ip) { |
1202 | xfs_finish_inode_setup(ip); | |
44a8736b | 1203 | xfs_irele(ip); |
58c90473 | 1204 | } |
f2f7b9ff | 1205 | out_release_dquots: |
99b6436b ZYW |
1206 | xfs_qm_dqrele(udqp); |
1207 | xfs_qm_dqrele(gdqp); | |
1208 | xfs_qm_dqrele(pdqp); | |
1209 | ||
1210 | return error; | |
1211 | } | |
1212 | ||
c24b5dfa DC |
1213 | int |
1214 | xfs_link( | |
1215 | xfs_inode_t *tdp, | |
1216 | xfs_inode_t *sip, | |
1217 | struct xfs_name *target_name) | |
1218 | { | |
1219 | xfs_mount_t *mp = tdp->i_mount; | |
1220 | xfs_trans_t *tp; | |
871b9316 | 1221 | int error, nospace_error = 0; |
c24b5dfa DC |
1222 | int resblks; |
1223 | ||
1224 | trace_xfs_link(tdp, target_name); | |
1225 | ||
c19b3b05 | 1226 | ASSERT(!S_ISDIR(VFS_I(sip)->i_mode)); |
c24b5dfa | 1227 | |
75c8c50f | 1228 | if (xfs_is_shutdown(mp)) |
2451337d | 1229 | return -EIO; |
c24b5dfa | 1230 | |
c14cfcca | 1231 | error = xfs_qm_dqattach(sip); |
c24b5dfa DC |
1232 | if (error) |
1233 | goto std_return; | |
1234 | ||
c14cfcca | 1235 | error = xfs_qm_dqattach(tdp); |
c24b5dfa DC |
1236 | if (error) |
1237 | goto std_return; | |
1238 | ||
c24b5dfa | 1239 | resblks = XFS_LINK_SPACE_RES(mp, target_name->len); |
871b9316 DW |
1240 | error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks, |
1241 | &tp, &nospace_error); | |
4906e215 | 1242 | if (error) |
253f4911 | 1243 | goto std_return; |
c24b5dfa | 1244 | |
f5d92749 CB |
1245 | error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK, |
1246 | XFS_IEXT_DIR_MANIP_CNT(mp)); | |
1247 | if (error) | |
1248 | goto error_return; | |
1249 | ||
c24b5dfa DC |
1250 | /* |
1251 | * If we are using project inheritance, we only allow hard link | |
1252 | * creation in our tree when the project IDs are the same; else | |
1253 | * the tree quota mechanism could be circumvented. | |
1254 | */ | |
db07349d | 1255 | if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) && |
ceaf603c | 1256 | tdp->i_projid != sip->i_projid)) { |
2451337d | 1257 | error = -EXDEV; |
c24b5dfa DC |
1258 | goto error_return; |
1259 | } | |
1260 | ||
94f3cad5 ES |
1261 | if (!resblks) { |
1262 | error = xfs_dir_canenter(tp, tdp, target_name); | |
1263 | if (error) | |
1264 | goto error_return; | |
1265 | } | |
c24b5dfa | 1266 | |
54d7b5c1 DC |
1267 | /* |
1268 | * Handle initial link state of O_TMPFILE inode | |
1269 | */ | |
1270 | if (VFS_I(sip)->i_nlink == 0) { | |
f40aadb2 DC |
1271 | struct xfs_perag *pag; |
1272 | ||
1273 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino)); | |
1274 | error = xfs_iunlink_remove(tp, pag, sip); | |
1275 | xfs_perag_put(pag); | |
ab297431 | 1276 | if (error) |
4906e215 | 1277 | goto error_return; |
ab297431 ZYW |
1278 | } |
1279 | ||
c24b5dfa | 1280 | error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino, |
381eee69 | 1281 | resblks); |
c24b5dfa | 1282 | if (error) |
4906e215 | 1283 | goto error_return; |
c24b5dfa DC |
1284 | xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
1285 | xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE); | |
1286 | ||
91083269 | 1287 | xfs_bumplink(tp, sip); |
c24b5dfa DC |
1288 | |
1289 | /* | |
1290 | * If this is a synchronous mount, make sure that the | |
1291 | * link transaction goes to disk before returning to | |
1292 | * the user. | |
1293 | */ | |
0560f31a | 1294 | if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) |
c24b5dfa | 1295 | xfs_trans_set_sync(tp); |
c24b5dfa | 1296 | |
70393313 | 1297 | return xfs_trans_commit(tp); |
c24b5dfa | 1298 | |
c24b5dfa | 1299 | error_return: |
4906e215 | 1300 | xfs_trans_cancel(tp); |
c24b5dfa | 1301 | std_return: |
871b9316 DW |
1302 | if (error == -ENOSPC && nospace_error) |
1303 | error = nospace_error; | |
c24b5dfa DC |
1304 | return error; |
1305 | } | |
1306 | ||
363e59ba DW |
1307 | /* Clear the reflink flag and the cowblocks tag if possible. */ |
1308 | static void | |
1309 | xfs_itruncate_clear_reflink_flags( | |
1310 | struct xfs_inode *ip) | |
1311 | { | |
1312 | struct xfs_ifork *dfork; | |
1313 | struct xfs_ifork *cfork; | |
1314 | ||
1315 | if (!xfs_is_reflink_inode(ip)) | |
1316 | return; | |
1317 | dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | |
1318 | cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK); | |
1319 | if (dfork->if_bytes == 0 && cfork->if_bytes == 0) | |
3e09ab8f | 1320 | ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK; |
363e59ba DW |
1321 | if (cfork->if_bytes == 0) |
1322 | xfs_inode_clear_cowblocks_tag(ip); | |
1323 | } | |
1324 | ||
1da177e4 | 1325 | /* |
8f04c47a CH |
1326 | * Free up the underlying blocks past new_size. The new size must be smaller |
1327 | * than the current size. This routine can be used both for the attribute and | |
1328 | * data fork, and does not modify the inode size, which is left to the caller. | |
1da177e4 | 1329 | * |
f6485057 DC |
1330 | * The transaction passed to this routine must have made a permanent log |
1331 | * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the | |
1332 | * given transaction and start new ones, so make sure everything involved in | |
1333 | * the transaction is tidy before calling here. Some transaction will be | |
1334 | * returned to the caller to be committed. The incoming transaction must | |
1335 | * already include the inode, and both inode locks must be held exclusively. | |
1336 | * The inode must also be "held" within the transaction. On return the inode | |
1337 | * will be "held" within the returned transaction. This routine does NOT | |
1338 | * require any disk space to be reserved for it within the transaction. | |
1da177e4 | 1339 | * |
f6485057 DC |
1340 | * If we get an error, we must return with the inode locked and linked into the |
1341 | * current transaction. This keeps things simple for the higher level code, | |
1342 | * because it always knows that the inode is locked and held in the transaction | |
1343 | * that returns to it whether errors occur or not. We don't mark the inode | |
1344 | * dirty on error so that transactions can be easily aborted if possible. | |
1da177e4 LT |
1345 | */ |
1346 | int | |
4e529339 | 1347 | xfs_itruncate_extents_flags( |
8f04c47a CH |
1348 | struct xfs_trans **tpp, |
1349 | struct xfs_inode *ip, | |
1350 | int whichfork, | |
13b86fc3 | 1351 | xfs_fsize_t new_size, |
4e529339 | 1352 | int flags) |
1da177e4 | 1353 | { |
8f04c47a CH |
1354 | struct xfs_mount *mp = ip->i_mount; |
1355 | struct xfs_trans *tp = *tpp; | |
8f04c47a | 1356 | xfs_fileoff_t first_unmap_block; |
8f04c47a | 1357 | xfs_filblks_t unmap_len; |
8f04c47a | 1358 | int error = 0; |
1da177e4 | 1359 | |
0b56185b CH |
1360 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
1361 | ASSERT(!atomic_read(&VFS_I(ip)->i_count) || | |
1362 | xfs_isilocked(ip, XFS_IOLOCK_EXCL)); | |
ce7ae151 | 1363 | ASSERT(new_size <= XFS_ISIZE(ip)); |
8f04c47a | 1364 | ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); |
1da177e4 | 1365 | ASSERT(ip->i_itemp != NULL); |
898621d5 | 1366 | ASSERT(ip->i_itemp->ili_lock_flags == 0); |
8f04c47a | 1367 | ASSERT(!XFS_NOT_DQATTACHED(mp, ip)); |
1da177e4 | 1368 | |
673e8e59 CH |
1369 | trace_xfs_itruncate_extents_start(ip, new_size); |
1370 | ||
4e529339 | 1371 | flags |= xfs_bmapi_aflag(whichfork); |
13b86fc3 | 1372 | |
1da177e4 LT |
1373 | /* |
1374 | * Since it is possible for space to become allocated beyond | |
1375 | * the end of the file (in a crash where the space is allocated | |
1376 | * but the inode size is not yet updated), simply remove any | |
1377 | * blocks which show up between the new EOF and the maximum | |
4bbb04ab DW |
1378 | * possible file size. |
1379 | * | |
1380 | * We have to free all the blocks to the bmbt maximum offset, even if | |
1381 | * the page cache can't scale that far. | |
1da177e4 | 1382 | */ |
8f04c47a | 1383 | first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size); |
33005fd0 | 1384 | if (!xfs_verify_fileoff(mp, first_unmap_block)) { |
4bbb04ab | 1385 | WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF); |
8f04c47a | 1386 | return 0; |
4bbb04ab | 1387 | } |
8f04c47a | 1388 | |
4bbb04ab DW |
1389 | unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1; |
1390 | while (unmap_len > 0) { | |
02dff7bf | 1391 | ASSERT(tp->t_firstblock == NULLFSBLOCK); |
4bbb04ab DW |
1392 | error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len, |
1393 | flags, XFS_ITRUNC_MAX_EXTENTS); | |
8f04c47a | 1394 | if (error) |
d5a2e289 | 1395 | goto out; |
1da177e4 | 1396 | |
6dd379c7 | 1397 | /* free the just unmapped extents */ |
9e28a242 | 1398 | error = xfs_defer_finish(&tp); |
8f04c47a | 1399 | if (error) |
9b1f4e98 | 1400 | goto out; |
1da177e4 | 1401 | } |
8f04c47a | 1402 | |
4919d42a DW |
1403 | if (whichfork == XFS_DATA_FORK) { |
1404 | /* Remove all pending CoW reservations. */ | |
1405 | error = xfs_reflink_cancel_cow_blocks(ip, &tp, | |
4bbb04ab | 1406 | first_unmap_block, XFS_MAX_FILEOFF, true); |
4919d42a DW |
1407 | if (error) |
1408 | goto out; | |
aa8968f2 | 1409 | |
4919d42a DW |
1410 | xfs_itruncate_clear_reflink_flags(ip); |
1411 | } | |
aa8968f2 | 1412 | |
673e8e59 CH |
1413 | /* |
1414 | * Always re-log the inode so that our permanent transaction can keep | |
1415 | * on rolling it forward in the log. | |
1416 | */ | |
1417 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); | |
1418 | ||
1419 | trace_xfs_itruncate_extents_end(ip, new_size); | |
1420 | ||
8f04c47a CH |
1421 | out: |
1422 | *tpp = tp; | |
1423 | return error; | |
8f04c47a CH |
1424 | } |
1425 | ||
c24b5dfa DC |
1426 | int |
1427 | xfs_release( | |
1428 | xfs_inode_t *ip) | |
1429 | { | |
1430 | xfs_mount_t *mp = ip->i_mount; | |
7d88329e | 1431 | int error = 0; |
c24b5dfa | 1432 | |
c19b3b05 | 1433 | if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0)) |
c24b5dfa DC |
1434 | return 0; |
1435 | ||
1436 | /* If this is a read-only mount, don't do this (would generate I/O) */ | |
2e973b2c | 1437 | if (xfs_is_readonly(mp)) |
c24b5dfa DC |
1438 | return 0; |
1439 | ||
75c8c50f | 1440 | if (!xfs_is_shutdown(mp)) { |
c24b5dfa DC |
1441 | int truncated; |
1442 | ||
c24b5dfa DC |
1443 | /* |
1444 | * If we previously truncated this file and removed old data | |
1445 | * in the process, we want to initiate "early" writeout on | |
1446 | * the last close. This is an attempt to combat the notorious | |
1447 | * NULL files problem which is particularly noticeable from a | |
1448 | * truncate down, buffered (re-)write (delalloc), followed by | |
1449 | * a crash. What we are effectively doing here is | |
1450 | * significantly reducing the time window where we'd otherwise | |
1451 | * be exposed to that problem. | |
1452 | */ | |
1453 | truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED); | |
1454 | if (truncated) { | |
1455 | xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE); | |
eac152b4 | 1456 | if (ip->i_delayed_blks > 0) { |
2451337d | 1457 | error = filemap_flush(VFS_I(ip)->i_mapping); |
c24b5dfa DC |
1458 | if (error) |
1459 | return error; | |
1460 | } | |
1461 | } | |
1462 | } | |
1463 | ||
54d7b5c1 | 1464 | if (VFS_I(ip)->i_nlink == 0) |
c24b5dfa DC |
1465 | return 0; |
1466 | ||
7d88329e DW |
1467 | /* |
1468 | * If we can't get the iolock just skip truncating the blocks past EOF | |
1469 | * because we could deadlock with the mmap_lock otherwise. We'll get | |
1470 | * another chance to drop them once the last reference to the inode is | |
1471 | * dropped, so we'll never leak blocks permanently. | |
1472 | */ | |
1473 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) | |
1474 | return 0; | |
c24b5dfa | 1475 | |
7d88329e | 1476 | if (xfs_can_free_eofblocks(ip, false)) { |
a36b9261 BF |
1477 | /* |
1478 | * Check if the inode is being opened, written and closed | |
1479 | * frequently and we have delayed allocation blocks outstanding | |
1480 | * (e.g. streaming writes from the NFS server), truncating the | |
1481 | * blocks past EOF will cause fragmentation to occur. | |
1482 | * | |
1483 | * In this case don't do the truncation, but we have to be | |
1484 | * careful how we detect this case. Blocks beyond EOF show up as | |
1485 | * i_delayed_blks even when the inode is clean, so we need to | |
1486 | * truncate them away first before checking for a dirty release. | |
1487 | * Hence on the first dirty close we will still remove the | |
1488 | * speculative allocation, but after that we will leave it in | |
1489 | * place. | |
1490 | */ | |
1491 | if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE)) | |
7d88329e DW |
1492 | goto out_unlock; |
1493 | ||
1494 | error = xfs_free_eofblocks(ip); | |
1495 | if (error) | |
1496 | goto out_unlock; | |
c24b5dfa DC |
1497 | |
1498 | /* delalloc blocks after truncation means it really is dirty */ | |
1499 | if (ip->i_delayed_blks) | |
1500 | xfs_iflags_set(ip, XFS_IDIRTY_RELEASE); | |
1501 | } | |
7d88329e DW |
1502 | |
1503 | out_unlock: | |
1504 | xfs_iunlock(ip, XFS_IOLOCK_EXCL); | |
1505 | return error; | |
c24b5dfa DC |
1506 | } |
1507 | ||
f7be2d7f BF |
1508 | /* |
1509 | * xfs_inactive_truncate | |
1510 | * | |
1511 | * Called to perform a truncate when an inode becomes unlinked. | |
1512 | */ | |
1513 | STATIC int | |
1514 | xfs_inactive_truncate( | |
1515 | struct xfs_inode *ip) | |
1516 | { | |
1517 | struct xfs_mount *mp = ip->i_mount; | |
1518 | struct xfs_trans *tp; | |
1519 | int error; | |
1520 | ||
253f4911 | 1521 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp); |
f7be2d7f | 1522 | if (error) { |
75c8c50f | 1523 | ASSERT(xfs_is_shutdown(mp)); |
f7be2d7f BF |
1524 | return error; |
1525 | } | |
f7be2d7f BF |
1526 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
1527 | xfs_trans_ijoin(tp, ip, 0); | |
1528 | ||
1529 | /* | |
1530 | * Log the inode size first to prevent stale data exposure in the event | |
1531 | * of a system crash before the truncate completes. See the related | |
69bca807 | 1532 | * comment in xfs_vn_setattr_size() for details. |
f7be2d7f | 1533 | */ |
13d2c10b | 1534 | ip->i_disk_size = 0; |
f7be2d7f BF |
1535 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
1536 | ||
1537 | error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); | |
1538 | if (error) | |
1539 | goto error_trans_cancel; | |
1540 | ||
daf83964 | 1541 | ASSERT(ip->i_df.if_nextents == 0); |
f7be2d7f | 1542 | |
70393313 | 1543 | error = xfs_trans_commit(tp); |
f7be2d7f BF |
1544 | if (error) |
1545 | goto error_unlock; | |
1546 | ||
1547 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1548 | return 0; | |
1549 | ||
1550 | error_trans_cancel: | |
4906e215 | 1551 | xfs_trans_cancel(tp); |
f7be2d7f BF |
1552 | error_unlock: |
1553 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
1554 | return error; | |
1555 | } | |
1556 | ||
88877d2b BF |
1557 | /* |
1558 | * xfs_inactive_ifree() | |
1559 | * | |
1560 | * Perform the inode free when an inode is unlinked. | |
1561 | */ | |
1562 | STATIC int | |
1563 | xfs_inactive_ifree( | |
1564 | struct xfs_inode *ip) | |
1565 | { | |
88877d2b BF |
1566 | struct xfs_mount *mp = ip->i_mount; |
1567 | struct xfs_trans *tp; | |
1568 | int error; | |
1569 | ||
9d43b180 | 1570 | /* |
76d771b4 CH |
1571 | * We try to use a per-AG reservation for any block needed by the finobt |
1572 | * tree, but as the finobt feature predates the per-AG reservation | |
1573 | * support a degraded file system might not have enough space for the | |
1574 | * reservation at mount time. In that case try to dip into the reserved | |
1575 | * pool and pray. | |
9d43b180 BF |
1576 | * |
1577 | * Send a warning if the reservation does happen to fail, as the inode | |
1578 | * now remains allocated and sits on the unlinked list until the fs is | |
1579 | * repaired. | |
1580 | */ | |
e1f6ca11 | 1581 | if (unlikely(mp->m_finobt_nores)) { |
76d771b4 CH |
1582 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, |
1583 | XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, | |
1584 | &tp); | |
1585 | } else { | |
1586 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp); | |
1587 | } | |
88877d2b | 1588 | if (error) { |
2451337d | 1589 | if (error == -ENOSPC) { |
9d43b180 BF |
1590 | xfs_warn_ratelimited(mp, |
1591 | "Failed to remove inode(s) from unlinked list. " | |
1592 | "Please free space, unmount and run xfs_repair."); | |
1593 | } else { | |
75c8c50f | 1594 | ASSERT(xfs_is_shutdown(mp)); |
9d43b180 | 1595 | } |
88877d2b BF |
1596 | return error; |
1597 | } | |
1598 | ||
96355d5a DC |
1599 | /* |
1600 | * We do not hold the inode locked across the entire rolling transaction | |
1601 | * here. We only need to hold it for the first transaction that | |
1602 | * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the | |
1603 | * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode | |
1604 | * here breaks the relationship between cluster buffer invalidation and | |
1605 | * stale inode invalidation on cluster buffer item journal commit | |
1606 | * completion, and can result in leaving dirty stale inodes hanging | |
1607 | * around in memory. | |
1608 | * | |
1609 | * We have no need for serialising this inode operation against other | |
1610 | * operations - we freed the inode and hence reallocation is required | |
1611 | * and that will serialise on reallocating the space the deferops need | |
1612 | * to free. Hence we can unlock the inode on the first commit of | |
1613 | * the transaction rather than roll it right through the deferops. This | |
1614 | * avoids relogging the XFS_ISTALE inode. | |
1615 | * | |
1616 | * We check that xfs_ifree() hasn't grown an internal transaction roll | |
1617 | * by asserting that the inode is still locked when it returns. | |
1618 | */ | |
88877d2b | 1619 | xfs_ilock(ip, XFS_ILOCK_EXCL); |
96355d5a | 1620 | xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); |
88877d2b | 1621 | |
0e0417f3 | 1622 | error = xfs_ifree(tp, ip); |
96355d5a | 1623 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
88877d2b BF |
1624 | if (error) { |
1625 | /* | |
1626 | * If we fail to free the inode, shut down. The cancel | |
1627 | * might do that, we need to make sure. Otherwise the | |
1628 | * inode might be lost for a long time or forever. | |
1629 | */ | |
75c8c50f | 1630 | if (!xfs_is_shutdown(mp)) { |
88877d2b BF |
1631 | xfs_notice(mp, "%s: xfs_ifree returned error %d", |
1632 | __func__, error); | |
1633 | xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); | |
1634 | } | |
4906e215 | 1635 | xfs_trans_cancel(tp); |
88877d2b BF |
1636 | return error; |
1637 | } | |
1638 | ||
1639 | /* | |
1640 | * Credit the quota account(s). The inode is gone. | |
1641 | */ | |
1642 | xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1); | |
1643 | ||
1644 | /* | |
d4a97a04 BF |
1645 | * Just ignore errors at this point. There is nothing we can do except |
1646 | * to try to keep going. Make sure it's not a silent error. | |
88877d2b | 1647 | */ |
70393313 | 1648 | error = xfs_trans_commit(tp); |
88877d2b BF |
1649 | if (error) |
1650 | xfs_notice(mp, "%s: xfs_trans_commit returned error %d", | |
1651 | __func__, error); | |
1652 | ||
88877d2b BF |
1653 | return 0; |
1654 | } | |
1655 | ||
62af7d54 DW |
1656 | /* |
1657 | * Returns true if we need to update the on-disk metadata before we can free | |
1658 | * the memory used by this inode. Updates include freeing post-eof | |
1659 | * preallocations; freeing COW staging extents; and marking the inode free in | |
1660 | * the inobt if it is on the unlinked list. | |
1661 | */ | |
1662 | bool | |
1663 | xfs_inode_needs_inactive( | |
1664 | struct xfs_inode *ip) | |
1665 | { | |
1666 | struct xfs_mount *mp = ip->i_mount; | |
1667 | struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK); | |
1668 | ||
1669 | /* | |
1670 | * If the inode is already free, then there can be nothing | |
1671 | * to clean up here. | |
1672 | */ | |
1673 | if (VFS_I(ip)->i_mode == 0) | |
1674 | return false; | |
1675 | ||
1676 | /* If this is a read-only mount, don't do this (would generate I/O) */ | |
2e973b2c | 1677 | if (xfs_is_readonly(mp)) |
62af7d54 DW |
1678 | return false; |
1679 | ||
1680 | /* If the log isn't running, push inodes straight to reclaim. */ | |
75c8c50f | 1681 | if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp)) |
62af7d54 DW |
1682 | return false; |
1683 | ||
1684 | /* Metadata inodes require explicit resource cleanup. */ | |
1685 | if (xfs_is_metadata_inode(ip)) | |
1686 | return false; | |
1687 | ||
1688 | /* Want to clean out the cow blocks if there are any. */ | |
1689 | if (cow_ifp && cow_ifp->if_bytes > 0) | |
1690 | return true; | |
1691 | ||
1692 | /* Unlinked files must be freed. */ | |
1693 | if (VFS_I(ip)->i_nlink == 0) | |
1694 | return true; | |
1695 | ||
1696 | /* | |
1697 | * This file isn't being freed, so check if there are post-eof blocks | |
1698 | * to free. @force is true because we are evicting an inode from the | |
1699 | * cache. Post-eof blocks must be freed, lest we end up with broken | |
1700 | * free space accounting. | |
1701 | * | |
1702 | * Note: don't bother with iolock here since lockdep complains about | |
1703 | * acquiring it in reclaim context. We have the only reference to the | |
1704 | * inode at this point anyways. | |
1705 | */ | |
1706 | return xfs_can_free_eofblocks(ip, true); | |
1707 | } | |
1708 | ||
c24b5dfa DC |
1709 | /* |
1710 | * xfs_inactive | |
1711 | * | |
1712 | * This is called when the vnode reference count for the vnode | |
1713 | * goes to zero. If the file has been unlinked, then it must | |
1714 | * now be truncated. Also, we clear all of the read-ahead state | |
1715 | * kept for the inode here since the file is now closed. | |
1716 | */ | |
74564fb4 | 1717 | void |
c24b5dfa DC |
1718 | xfs_inactive( |
1719 | xfs_inode_t *ip) | |
1720 | { | |
3d3c8b52 | 1721 | struct xfs_mount *mp; |
3d3c8b52 JL |
1722 | int error; |
1723 | int truncate = 0; | |
c24b5dfa DC |
1724 | |
1725 | /* | |
1726 | * If the inode is already free, then there can be nothing | |
1727 | * to clean up here. | |
1728 | */ | |
c19b3b05 | 1729 | if (VFS_I(ip)->i_mode == 0) { |
c24b5dfa | 1730 | ASSERT(ip->i_df.if_broot_bytes == 0); |
3ea06d73 | 1731 | goto out; |
c24b5dfa DC |
1732 | } |
1733 | ||
1734 | mp = ip->i_mount; | |
17c12bcd | 1735 | ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY)); |
c24b5dfa | 1736 | |
c24b5dfa | 1737 | /* If this is a read-only mount, don't do this (would generate I/O) */ |
2e973b2c | 1738 | if (xfs_is_readonly(mp)) |
3ea06d73 | 1739 | goto out; |
c24b5dfa | 1740 | |
383e32b0 DW |
1741 | /* Metadata inodes require explicit resource cleanup. */ |
1742 | if (xfs_is_metadata_inode(ip)) | |
3ea06d73 | 1743 | goto out; |
383e32b0 | 1744 | |
6231848c | 1745 | /* Try to clean out the cow blocks if there are any. */ |
51d62690 | 1746 | if (xfs_inode_has_cow_data(ip)) |
6231848c DW |
1747 | xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true); |
1748 | ||
54d7b5c1 | 1749 | if (VFS_I(ip)->i_nlink != 0) { |
c24b5dfa DC |
1750 | /* |
1751 | * force is true because we are evicting an inode from the | |
1752 | * cache. Post-eof blocks must be freed, lest we end up with | |
1753 | * broken free space accounting. | |
3b4683c2 BF |
1754 | * |
1755 | * Note: don't bother with iolock here since lockdep complains | |
1756 | * about acquiring it in reclaim context. We have the only | |
1757 | * reference to the inode at this point anyways. | |
c24b5dfa | 1758 | */ |
3b4683c2 | 1759 | if (xfs_can_free_eofblocks(ip, true)) |
a36b9261 | 1760 | xfs_free_eofblocks(ip); |
74564fb4 | 1761 | |
3ea06d73 | 1762 | goto out; |
c24b5dfa DC |
1763 | } |
1764 | ||
c19b3b05 | 1765 | if (S_ISREG(VFS_I(ip)->i_mode) && |
13d2c10b | 1766 | (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 || |
daf83964 | 1767 | ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0)) |
c24b5dfa DC |
1768 | truncate = 1; |
1769 | ||
c14cfcca | 1770 | error = xfs_qm_dqattach(ip); |
c24b5dfa | 1771 | if (error) |
3ea06d73 | 1772 | goto out; |
c24b5dfa | 1773 | |
c19b3b05 | 1774 | if (S_ISLNK(VFS_I(ip)->i_mode)) |
36b21dde | 1775 | error = xfs_inactive_symlink(ip); |
f7be2d7f BF |
1776 | else if (truncate) |
1777 | error = xfs_inactive_truncate(ip); | |
1778 | if (error) | |
3ea06d73 | 1779 | goto out; |
c24b5dfa DC |
1780 | |
1781 | /* | |
1782 | * If there are attributes associated with the file then blow them away | |
1783 | * now. The code calls a routine that recursively deconstructs the | |
6dfe5a04 | 1784 | * attribute fork. If also blows away the in-core attribute fork. |
c24b5dfa | 1785 | */ |
6dfe5a04 | 1786 | if (XFS_IFORK_Q(ip)) { |
c24b5dfa DC |
1787 | error = xfs_attr_inactive(ip); |
1788 | if (error) | |
3ea06d73 | 1789 | goto out; |
c24b5dfa DC |
1790 | } |
1791 | ||
6dfe5a04 | 1792 | ASSERT(!ip->i_afp); |
7821ea30 | 1793 | ASSERT(ip->i_forkoff == 0); |
c24b5dfa DC |
1794 | |
1795 | /* | |
1796 | * Free the inode. | |
1797 | */ | |
3ea06d73 | 1798 | xfs_inactive_ifree(ip); |
c24b5dfa | 1799 | |
3ea06d73 | 1800 | out: |
c24b5dfa | 1801 | /* |
3ea06d73 DW |
1802 | * We're done making metadata updates for this inode, so we can release |
1803 | * the attached dquots. | |
c24b5dfa DC |
1804 | */ |
1805 | xfs_qm_dqdetach(ip); | |
c24b5dfa DC |
1806 | } |
1807 | ||
9b247179 DW |
1808 | /* |
1809 | * In-Core Unlinked List Lookups | |
1810 | * ============================= | |
1811 | * | |
1812 | * Every inode is supposed to be reachable from some other piece of metadata | |
1813 | * with the exception of the root directory. Inodes with a connection to a | |
1814 | * file descriptor but not linked from anywhere in the on-disk directory tree | |
1815 | * are collectively known as unlinked inodes, though the filesystem itself | |
1816 | * maintains links to these inodes so that on-disk metadata are consistent. | |
1817 | * | |
1818 | * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI | |
1819 | * header contains a number of buckets that point to an inode, and each inode | |
1820 | * record has a pointer to the next inode in the hash chain. This | |
1821 | * singly-linked list causes scaling problems in the iunlink remove function | |
1822 | * because we must walk that list to find the inode that points to the inode | |
1823 | * being removed from the unlinked hash bucket list. | |
1824 | * | |
1825 | * What if we modelled the unlinked list as a collection of records capturing | |
1826 | * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd | |
1827 | * have a fast way to look up unlinked list predecessors, which avoids the | |
1828 | * slow list walk. That's exactly what we do here (in-core) with a per-AG | |
1829 | * rhashtable. | |
1830 | * | |
1831 | * Because this is a backref cache, we ignore operational failures since the | |
1832 | * iunlink code can fall back to the slow bucket walk. The only errors that | |
1833 | * should bubble out are for obviously incorrect situations. | |
1834 | * | |
1835 | * All users of the backref cache MUST hold the AGI buffer lock to serialize | |
1836 | * access or have otherwise provided for concurrency control. | |
1837 | */ | |
1838 | ||
1839 | /* Capture a "X.next_unlinked = Y" relationship. */ | |
1840 | struct xfs_iunlink { | |
1841 | struct rhash_head iu_rhash_head; | |
1842 | xfs_agino_t iu_agino; /* X */ | |
1843 | xfs_agino_t iu_next_unlinked; /* Y */ | |
1844 | }; | |
1845 | ||
1846 | /* Unlinked list predecessor lookup hashtable construction */ | |
1847 | static int | |
1848 | xfs_iunlink_obj_cmpfn( | |
1849 | struct rhashtable_compare_arg *arg, | |
1850 | const void *obj) | |
1851 | { | |
1852 | const xfs_agino_t *key = arg->key; | |
1853 | const struct xfs_iunlink *iu = obj; | |
1854 | ||
1855 | if (iu->iu_next_unlinked != *key) | |
1856 | return 1; | |
1857 | return 0; | |
1858 | } | |
1859 | ||
1860 | static const struct rhashtable_params xfs_iunlink_hash_params = { | |
1861 | .min_size = XFS_AGI_UNLINKED_BUCKETS, | |
1862 | .key_len = sizeof(xfs_agino_t), | |
1863 | .key_offset = offsetof(struct xfs_iunlink, | |
1864 | iu_next_unlinked), | |
1865 | .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head), | |
1866 | .automatic_shrinking = true, | |
1867 | .obj_cmpfn = xfs_iunlink_obj_cmpfn, | |
1868 | }; | |
1869 | ||
1870 | /* | |
1871 | * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such | |
1872 | * relation is found. | |
1873 | */ | |
1874 | static xfs_agino_t | |
1875 | xfs_iunlink_lookup_backref( | |
1876 | struct xfs_perag *pag, | |
1877 | xfs_agino_t agino) | |
1878 | { | |
1879 | struct xfs_iunlink *iu; | |
1880 | ||
1881 | iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, | |
1882 | xfs_iunlink_hash_params); | |
1883 | return iu ? iu->iu_agino : NULLAGINO; | |
1884 | } | |
1885 | ||
1886 | /* | |
1887 | * Take ownership of an iunlink cache entry and insert it into the hash table. | |
1888 | * If successful, the entry will be owned by the cache; if not, it is freed. | |
1889 | * Either way, the caller does not own @iu after this call. | |
1890 | */ | |
1891 | static int | |
1892 | xfs_iunlink_insert_backref( | |
1893 | struct xfs_perag *pag, | |
1894 | struct xfs_iunlink *iu) | |
1895 | { | |
1896 | int error; | |
1897 | ||
1898 | error = rhashtable_insert_fast(&pag->pagi_unlinked_hash, | |
1899 | &iu->iu_rhash_head, xfs_iunlink_hash_params); | |
1900 | /* | |
1901 | * Fail loudly if there already was an entry because that's a sign of | |
1902 | * corruption of in-memory data. Also fail loudly if we see an error | |
1903 | * code we didn't anticipate from the rhashtable code. Currently we | |
1904 | * only anticipate ENOMEM. | |
1905 | */ | |
1906 | if (error) { | |
1907 | WARN(error != -ENOMEM, "iunlink cache insert error %d", error); | |
1908 | kmem_free(iu); | |
1909 | } | |
1910 | /* | |
1911 | * Absorb any runtime errors that aren't a result of corruption because | |
1912 | * this is a cache and we can always fall back to bucket list scanning. | |
1913 | */ | |
1914 | if (error != 0 && error != -EEXIST) | |
1915 | error = 0; | |
1916 | return error; | |
1917 | } | |
1918 | ||
1919 | /* Remember that @prev_agino.next_unlinked = @this_agino. */ | |
1920 | static int | |
1921 | xfs_iunlink_add_backref( | |
1922 | struct xfs_perag *pag, | |
1923 | xfs_agino_t prev_agino, | |
1924 | xfs_agino_t this_agino) | |
1925 | { | |
1926 | struct xfs_iunlink *iu; | |
1927 | ||
1928 | if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK)) | |
1929 | return 0; | |
1930 | ||
707e0dda | 1931 | iu = kmem_zalloc(sizeof(*iu), KM_NOFS); |
9b247179 DW |
1932 | iu->iu_agino = prev_agino; |
1933 | iu->iu_next_unlinked = this_agino; | |
1934 | ||
1935 | return xfs_iunlink_insert_backref(pag, iu); | |
1936 | } | |
1937 | ||
1938 | /* | |
1939 | * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked. | |
1940 | * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there | |
1941 | * wasn't any such entry then we don't bother. | |
1942 | */ | |
1943 | static int | |
1944 | xfs_iunlink_change_backref( | |
1945 | struct xfs_perag *pag, | |
1946 | xfs_agino_t agino, | |
1947 | xfs_agino_t next_unlinked) | |
1948 | { | |
1949 | struct xfs_iunlink *iu; | |
1950 | int error; | |
1951 | ||
1952 | /* Look up the old entry; if there wasn't one then exit. */ | |
1953 | iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino, | |
1954 | xfs_iunlink_hash_params); | |
1955 | if (!iu) | |
1956 | return 0; | |
1957 | ||
1958 | /* | |
1959 | * Remove the entry. This shouldn't ever return an error, but if we | |
1960 | * couldn't remove the old entry we don't want to add it again to the | |
1961 | * hash table, and if the entry disappeared on us then someone's | |
1962 | * violated the locking rules and we need to fail loudly. Either way | |
1963 | * we cannot remove the inode because internal state is or would have | |
1964 | * been corrupt. | |
1965 | */ | |
1966 | error = rhashtable_remove_fast(&pag->pagi_unlinked_hash, | |
1967 | &iu->iu_rhash_head, xfs_iunlink_hash_params); | |
1968 | if (error) | |
1969 | return error; | |
1970 | ||
1971 | /* If there is no new next entry just free our item and return. */ | |
1972 | if (next_unlinked == NULLAGINO) { | |
1973 | kmem_free(iu); | |
1974 | return 0; | |
1975 | } | |
1976 | ||
1977 | /* Update the entry and re-add it to the hash table. */ | |
1978 | iu->iu_next_unlinked = next_unlinked; | |
1979 | return xfs_iunlink_insert_backref(pag, iu); | |
1980 | } | |
1981 | ||
1982 | /* Set up the in-core predecessor structures. */ | |
1983 | int | |
1984 | xfs_iunlink_init( | |
1985 | struct xfs_perag *pag) | |
1986 | { | |
1987 | return rhashtable_init(&pag->pagi_unlinked_hash, | |
1988 | &xfs_iunlink_hash_params); | |
1989 | } | |
1990 | ||
1991 | /* Free the in-core predecessor structures. */ | |
1992 | static void | |
1993 | xfs_iunlink_free_item( | |
1994 | void *ptr, | |
1995 | void *arg) | |
1996 | { | |
1997 | struct xfs_iunlink *iu = ptr; | |
1998 | bool *freed_anything = arg; | |
1999 | ||
2000 | *freed_anything = true; | |
2001 | kmem_free(iu); | |
2002 | } | |
2003 | ||
2004 | void | |
2005 | xfs_iunlink_destroy( | |
2006 | struct xfs_perag *pag) | |
2007 | { | |
2008 | bool freed_anything = false; | |
2009 | ||
2010 | rhashtable_free_and_destroy(&pag->pagi_unlinked_hash, | |
2011 | xfs_iunlink_free_item, &freed_anything); | |
2012 | ||
75c8c50f | 2013 | ASSERT(freed_anything == false || xfs_is_shutdown(pag->pag_mount)); |
9b247179 DW |
2014 | } |
2015 | ||
9a4a5118 DW |
2016 | /* |
2017 | * Point the AGI unlinked bucket at an inode and log the results. The caller | |
2018 | * is responsible for validating the old value. | |
2019 | */ | |
2020 | STATIC int | |
2021 | xfs_iunlink_update_bucket( | |
2022 | struct xfs_trans *tp, | |
f40aadb2 | 2023 | struct xfs_perag *pag, |
9a4a5118 DW |
2024 | struct xfs_buf *agibp, |
2025 | unsigned int bucket_index, | |
2026 | xfs_agino_t new_agino) | |
2027 | { | |
370c782b | 2028 | struct xfs_agi *agi = agibp->b_addr; |
9a4a5118 DW |
2029 | xfs_agino_t old_value; |
2030 | int offset; | |
2031 | ||
f40aadb2 | 2032 | ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino)); |
9a4a5118 DW |
2033 | |
2034 | old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]); | |
f40aadb2 | 2035 | trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index, |
9a4a5118 DW |
2036 | old_value, new_agino); |
2037 | ||
2038 | /* | |
2039 | * We should never find the head of the list already set to the value | |
2040 | * passed in because either we're adding or removing ourselves from the | |
2041 | * head of the list. | |
2042 | */ | |
a5155b87 | 2043 | if (old_value == new_agino) { |
8d57c216 | 2044 | xfs_buf_mark_corrupt(agibp); |
9a4a5118 | 2045 | return -EFSCORRUPTED; |
a5155b87 | 2046 | } |
9a4a5118 DW |
2047 | |
2048 | agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino); | |
2049 | offset = offsetof(struct xfs_agi, agi_unlinked) + | |
2050 | (sizeof(xfs_agino_t) * bucket_index); | |
2051 | xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1); | |
2052 | return 0; | |
2053 | } | |
2054 | ||
f2fc16a3 DW |
2055 | /* Set an on-disk inode's next_unlinked pointer. */ |
2056 | STATIC void | |
2057 | xfs_iunlink_update_dinode( | |
2058 | struct xfs_trans *tp, | |
f40aadb2 | 2059 | struct xfs_perag *pag, |
f2fc16a3 DW |
2060 | xfs_agino_t agino, |
2061 | struct xfs_buf *ibp, | |
2062 | struct xfs_dinode *dip, | |
2063 | struct xfs_imap *imap, | |
2064 | xfs_agino_t next_agino) | |
2065 | { | |
2066 | struct xfs_mount *mp = tp->t_mountp; | |
2067 | int offset; | |
2068 | ||
f40aadb2 | 2069 | ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)); |
f2fc16a3 | 2070 | |
f40aadb2 | 2071 | trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino, |
f2fc16a3 DW |
2072 | be32_to_cpu(dip->di_next_unlinked), next_agino); |
2073 | ||
2074 | dip->di_next_unlinked = cpu_to_be32(next_agino); | |
2075 | offset = imap->im_boffset + | |
2076 | offsetof(struct xfs_dinode, di_next_unlinked); | |
2077 | ||
2078 | /* need to recalc the inode CRC if appropriate */ | |
2079 | xfs_dinode_calc_crc(mp, dip); | |
2080 | xfs_trans_inode_buf(tp, ibp); | |
2081 | xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1); | |
f2fc16a3 DW |
2082 | } |
2083 | ||
2084 | /* Set an in-core inode's unlinked pointer and return the old value. */ | |
2085 | STATIC int | |
2086 | xfs_iunlink_update_inode( | |
2087 | struct xfs_trans *tp, | |
2088 | struct xfs_inode *ip, | |
f40aadb2 | 2089 | struct xfs_perag *pag, |
f2fc16a3 DW |
2090 | xfs_agino_t next_agino, |
2091 | xfs_agino_t *old_next_agino) | |
2092 | { | |
2093 | struct xfs_mount *mp = tp->t_mountp; | |
2094 | struct xfs_dinode *dip; | |
2095 | struct xfs_buf *ibp; | |
2096 | xfs_agino_t old_value; | |
2097 | int error; | |
2098 | ||
f40aadb2 | 2099 | ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)); |
f2fc16a3 | 2100 | |
af9dcdde | 2101 | error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp); |
f2fc16a3 DW |
2102 | if (error) |
2103 | return error; | |
af9dcdde | 2104 | dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset); |
f2fc16a3 DW |
2105 | |
2106 | /* Make sure the old pointer isn't garbage. */ | |
2107 | old_value = be32_to_cpu(dip->di_next_unlinked); | |
f40aadb2 | 2108 | if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) { |
a5155b87 DW |
2109 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip, |
2110 | sizeof(*dip), __this_address); | |
f2fc16a3 DW |
2111 | error = -EFSCORRUPTED; |
2112 | goto out; | |
2113 | } | |
2114 | ||
2115 | /* | |
2116 | * Since we're updating a linked list, we should never find that the | |
2117 | * current pointer is the same as the new value, unless we're | |
2118 | * terminating the list. | |
2119 | */ | |
2120 | *old_next_agino = old_value; | |
2121 | if (old_value == next_agino) { | |
a5155b87 DW |
2122 | if (next_agino != NULLAGINO) { |
2123 | xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, | |
2124 | dip, sizeof(*dip), __this_address); | |
f2fc16a3 | 2125 | error = -EFSCORRUPTED; |
a5155b87 | 2126 | } |
f2fc16a3 DW |
2127 | goto out; |
2128 | } | |
2129 | ||
2130 | /* Ok, update the new pointer. */ | |
f40aadb2 | 2131 | xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino), |
f2fc16a3 DW |
2132 | ibp, dip, &ip->i_imap, next_agino); |
2133 | return 0; | |
2134 | out: | |
2135 | xfs_trans_brelse(tp, ibp); | |
2136 | return error; | |
2137 | } | |
2138 | ||
1da177e4 | 2139 | /* |
c4a6bf7f DW |
2140 | * This is called when the inode's link count has gone to 0 or we are creating |
2141 | * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0. | |
54d7b5c1 DC |
2142 | * |
2143 | * We place the on-disk inode on a list in the AGI. It will be pulled from this | |
2144 | * list when the inode is freed. | |
1da177e4 | 2145 | */ |
54d7b5c1 | 2146 | STATIC int |
1da177e4 | 2147 | xfs_iunlink( |
5837f625 DW |
2148 | struct xfs_trans *tp, |
2149 | struct xfs_inode *ip) | |
1da177e4 | 2150 | { |
5837f625 | 2151 | struct xfs_mount *mp = tp->t_mountp; |
f40aadb2 | 2152 | struct xfs_perag *pag; |
5837f625 | 2153 | struct xfs_agi *agi; |
5837f625 | 2154 | struct xfs_buf *agibp; |
86bfd375 | 2155 | xfs_agino_t next_agino; |
5837f625 DW |
2156 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); |
2157 | short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; | |
5837f625 | 2158 | int error; |
1da177e4 | 2159 | |
c4a6bf7f | 2160 | ASSERT(VFS_I(ip)->i_nlink == 0); |
c19b3b05 | 2161 | ASSERT(VFS_I(ip)->i_mode != 0); |
4664c66c | 2162 | trace_xfs_iunlink(ip); |
1da177e4 | 2163 | |
f40aadb2 DC |
2164 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
2165 | ||
5837f625 | 2166 | /* Get the agi buffer first. It ensures lock ordering on the list. */ |
f40aadb2 | 2167 | error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp); |
859d7182 | 2168 | if (error) |
f40aadb2 | 2169 | goto out; |
370c782b | 2170 | agi = agibp->b_addr; |
5e1be0fb | 2171 | |
1da177e4 | 2172 | /* |
86bfd375 DW |
2173 | * Get the index into the agi hash table for the list this inode will |
2174 | * go on. Make sure the pointer isn't garbage and that this inode | |
2175 | * isn't already on the list. | |
1da177e4 | 2176 | */ |
86bfd375 DW |
2177 | next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); |
2178 | if (next_agino == agino || | |
f40aadb2 | 2179 | !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) { |
8d57c216 | 2180 | xfs_buf_mark_corrupt(agibp); |
f40aadb2 DC |
2181 | error = -EFSCORRUPTED; |
2182 | goto out; | |
a5155b87 | 2183 | } |
1da177e4 | 2184 | |
86bfd375 | 2185 | if (next_agino != NULLAGINO) { |
9b247179 | 2186 | xfs_agino_t old_agino; |
f2fc16a3 | 2187 | |
1da177e4 | 2188 | /* |
f2fc16a3 DW |
2189 | * There is already another inode in the bucket, so point this |
2190 | * inode to the current head of the list. | |
1da177e4 | 2191 | */ |
f40aadb2 | 2192 | error = xfs_iunlink_update_inode(tp, ip, pag, next_agino, |
f2fc16a3 | 2193 | &old_agino); |
c319b58b | 2194 | if (error) |
f40aadb2 | 2195 | goto out; |
f2fc16a3 | 2196 | ASSERT(old_agino == NULLAGINO); |
9b247179 DW |
2197 | |
2198 | /* | |
2199 | * agino has been unlinked, add a backref from the next inode | |
2200 | * back to agino. | |
2201 | */ | |
f40aadb2 | 2202 | error = xfs_iunlink_add_backref(pag, agino, next_agino); |
9b247179 | 2203 | if (error) |
f40aadb2 | 2204 | goto out; |
1da177e4 LT |
2205 | } |
2206 | ||
9a4a5118 | 2207 | /* Point the head of the list to point to this inode. */ |
f40aadb2 DC |
2208 | error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino); |
2209 | out: | |
2210 | xfs_perag_put(pag); | |
2211 | return error; | |
1da177e4 LT |
2212 | } |
2213 | ||
23ffa52c DW |
2214 | /* Return the imap, dinode pointer, and buffer for an inode. */ |
2215 | STATIC int | |
2216 | xfs_iunlink_map_ino( | |
2217 | struct xfs_trans *tp, | |
2218 | xfs_agnumber_t agno, | |
2219 | xfs_agino_t agino, | |
2220 | struct xfs_imap *imap, | |
2221 | struct xfs_dinode **dipp, | |
2222 | struct xfs_buf **bpp) | |
2223 | { | |
2224 | struct xfs_mount *mp = tp->t_mountp; | |
2225 | int error; | |
2226 | ||
2227 | imap->im_blkno = 0; | |
2228 | error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0); | |
2229 | if (error) { | |
2230 | xfs_warn(mp, "%s: xfs_imap returned error %d.", | |
2231 | __func__, error); | |
2232 | return error; | |
2233 | } | |
2234 | ||
af9dcdde | 2235 | error = xfs_imap_to_bp(mp, tp, imap, bpp); |
23ffa52c DW |
2236 | if (error) { |
2237 | xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.", | |
2238 | __func__, error); | |
2239 | return error; | |
2240 | } | |
2241 | ||
af9dcdde | 2242 | *dipp = xfs_buf_offset(*bpp, imap->im_boffset); |
23ffa52c DW |
2243 | return 0; |
2244 | } | |
2245 | ||
2246 | /* | |
2247 | * Walk the unlinked chain from @head_agino until we find the inode that | |
2248 | * points to @target_agino. Return the inode number, map, dinode pointer, | |
2249 | * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp. | |
2250 | * | |
2251 | * @tp, @pag, @head_agino, and @target_agino are input parameters. | |
2252 | * @agino, @imap, @dipp, and @bpp are all output parameters. | |
2253 | * | |
2254 | * Do not call this function if @target_agino is the head of the list. | |
2255 | */ | |
2256 | STATIC int | |
2257 | xfs_iunlink_map_prev( | |
2258 | struct xfs_trans *tp, | |
f40aadb2 | 2259 | struct xfs_perag *pag, |
23ffa52c DW |
2260 | xfs_agino_t head_agino, |
2261 | xfs_agino_t target_agino, | |
2262 | xfs_agino_t *agino, | |
2263 | struct xfs_imap *imap, | |
2264 | struct xfs_dinode **dipp, | |
f40aadb2 | 2265 | struct xfs_buf **bpp) |
23ffa52c DW |
2266 | { |
2267 | struct xfs_mount *mp = tp->t_mountp; | |
2268 | xfs_agino_t next_agino; | |
2269 | int error; | |
2270 | ||
2271 | ASSERT(head_agino != target_agino); | |
2272 | *bpp = NULL; | |
2273 | ||
9b247179 DW |
2274 | /* See if our backref cache can find it faster. */ |
2275 | *agino = xfs_iunlink_lookup_backref(pag, target_agino); | |
2276 | if (*agino != NULLAGINO) { | |
f40aadb2 DC |
2277 | error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap, |
2278 | dipp, bpp); | |
9b247179 DW |
2279 | if (error) |
2280 | return error; | |
2281 | ||
2282 | if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino) | |
2283 | return 0; | |
2284 | ||
2285 | /* | |
2286 | * If we get here the cache contents were corrupt, so drop the | |
2287 | * buffer and fall back to walking the bucket list. | |
2288 | */ | |
2289 | xfs_trans_brelse(tp, *bpp); | |
2290 | *bpp = NULL; | |
2291 | WARN_ON_ONCE(1); | |
2292 | } | |
2293 | ||
f40aadb2 | 2294 | trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno); |
9b247179 DW |
2295 | |
2296 | /* Otherwise, walk the entire bucket until we find it. */ | |
23ffa52c DW |
2297 | next_agino = head_agino; |
2298 | while (next_agino != target_agino) { | |
2299 | xfs_agino_t unlinked_agino; | |
2300 | ||
2301 | if (*bpp) | |
2302 | xfs_trans_brelse(tp, *bpp); | |
2303 | ||
2304 | *agino = next_agino; | |
f40aadb2 DC |
2305 | error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap, |
2306 | dipp, bpp); | |
23ffa52c DW |
2307 | if (error) |
2308 | return error; | |
2309 | ||
2310 | unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked); | |
2311 | /* | |
2312 | * Make sure this pointer is valid and isn't an obvious | |
2313 | * infinite loop. | |
2314 | */ | |
f40aadb2 | 2315 | if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) || |
23ffa52c DW |
2316 | next_agino == unlinked_agino) { |
2317 | XFS_CORRUPTION_ERROR(__func__, | |
2318 | XFS_ERRLEVEL_LOW, mp, | |
2319 | *dipp, sizeof(**dipp)); | |
2320 | error = -EFSCORRUPTED; | |
2321 | return error; | |
2322 | } | |
2323 | next_agino = unlinked_agino; | |
2324 | } | |
2325 | ||
2326 | return 0; | |
2327 | } | |
2328 | ||
1da177e4 LT |
2329 | /* |
2330 | * Pull the on-disk inode from the AGI unlinked list. | |
2331 | */ | |
2332 | STATIC int | |
2333 | xfs_iunlink_remove( | |
5837f625 | 2334 | struct xfs_trans *tp, |
f40aadb2 | 2335 | struct xfs_perag *pag, |
5837f625 | 2336 | struct xfs_inode *ip) |
1da177e4 | 2337 | { |
5837f625 DW |
2338 | struct xfs_mount *mp = tp->t_mountp; |
2339 | struct xfs_agi *agi; | |
5837f625 | 2340 | struct xfs_buf *agibp; |
5837f625 DW |
2341 | struct xfs_buf *last_ibp; |
2342 | struct xfs_dinode *last_dip = NULL; | |
5837f625 DW |
2343 | xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); |
2344 | xfs_agino_t next_agino; | |
b1d2a068 | 2345 | xfs_agino_t head_agino; |
5837f625 | 2346 | short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS; |
5837f625 | 2347 | int error; |
1da177e4 | 2348 | |
4664c66c DW |
2349 | trace_xfs_iunlink_remove(ip); |
2350 | ||
5837f625 | 2351 | /* Get the agi buffer first. It ensures lock ordering on the list. */ |
f40aadb2 | 2352 | error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp); |
5e1be0fb | 2353 | if (error) |
1da177e4 | 2354 | return error; |
370c782b | 2355 | agi = agibp->b_addr; |
5e1be0fb | 2356 | |
1da177e4 | 2357 | /* |
86bfd375 DW |
2358 | * Get the index into the agi hash table for the list this inode will |
2359 | * go on. Make sure the head pointer isn't garbage. | |
1da177e4 | 2360 | */ |
b1d2a068 | 2361 | head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]); |
f40aadb2 | 2362 | if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) { |
d2e73665 DW |
2363 | XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, |
2364 | agi, sizeof(*agi)); | |
2365 | return -EFSCORRUPTED; | |
2366 | } | |
1da177e4 | 2367 | |
b1d2a068 DW |
2368 | /* |
2369 | * Set our inode's next_unlinked pointer to NULL and then return | |
2370 | * the old pointer value so that we can update whatever was previous | |
2371 | * to us in the list to point to whatever was next in the list. | |
2372 | */ | |
f40aadb2 | 2373 | error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino); |
b1d2a068 DW |
2374 | if (error) |
2375 | return error; | |
9a4a5118 | 2376 | |
9b247179 DW |
2377 | /* |
2378 | * If there was a backref pointing from the next inode back to this | |
2379 | * one, remove it because we've removed this inode from the list. | |
2380 | * | |
2381 | * Later, if this inode was in the middle of the list we'll update | |
2382 | * this inode's backref to point from the next inode. | |
2383 | */ | |
2384 | if (next_agino != NULLAGINO) { | |
f40aadb2 | 2385 | error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO); |
9b247179 | 2386 | if (error) |
92a00544 | 2387 | return error; |
9b247179 DW |
2388 | } |
2389 | ||
92a00544 | 2390 | if (head_agino != agino) { |
f2fc16a3 DW |
2391 | struct xfs_imap imap; |
2392 | xfs_agino_t prev_agino; | |
2393 | ||
23ffa52c | 2394 | /* We need to search the list for the inode being freed. */ |
f40aadb2 DC |
2395 | error = xfs_iunlink_map_prev(tp, pag, head_agino, agino, |
2396 | &prev_agino, &imap, &last_dip, &last_ibp); | |
23ffa52c | 2397 | if (error) |
92a00544 | 2398 | return error; |
475ee413 | 2399 | |
f2fc16a3 | 2400 | /* Point the previous inode on the list to the next inode. */ |
f40aadb2 | 2401 | xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp, |
f2fc16a3 | 2402 | last_dip, &imap, next_agino); |
9b247179 DW |
2403 | |
2404 | /* | |
2405 | * Now we deal with the backref for this inode. If this inode | |
2406 | * pointed at a real inode, change the backref that pointed to | |
2407 | * us to point to our old next. If this inode was the end of | |
2408 | * the list, delete the backref that pointed to us. Note that | |
2409 | * change_backref takes care of deleting the backref if | |
2410 | * next_agino is NULLAGINO. | |
2411 | */ | |
92a00544 GX |
2412 | return xfs_iunlink_change_backref(agibp->b_pag, agino, |
2413 | next_agino); | |
1da177e4 | 2414 | } |
9b247179 | 2415 | |
92a00544 | 2416 | /* Point the head of the list to the next unlinked inode. */ |
f40aadb2 | 2417 | return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, |
92a00544 | 2418 | next_agino); |
1da177e4 LT |
2419 | } |
2420 | ||
5806165a | 2421 | /* |
71e3e356 DC |
2422 | * Look up the inode number specified and if it is not already marked XFS_ISTALE |
2423 | * mark it stale. We should only find clean inodes in this lookup that aren't | |
2424 | * already stale. | |
5806165a | 2425 | */ |
71e3e356 DC |
2426 | static void |
2427 | xfs_ifree_mark_inode_stale( | |
f40aadb2 | 2428 | struct xfs_perag *pag, |
5806165a | 2429 | struct xfs_inode *free_ip, |
d9fdd0ad | 2430 | xfs_ino_t inum) |
5806165a | 2431 | { |
f40aadb2 | 2432 | struct xfs_mount *mp = pag->pag_mount; |
71e3e356 | 2433 | struct xfs_inode_log_item *iip; |
5806165a DC |
2434 | struct xfs_inode *ip; |
2435 | ||
2436 | retry: | |
2437 | rcu_read_lock(); | |
2438 | ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum)); | |
2439 | ||
2440 | /* Inode not in memory, nothing to do */ | |
71e3e356 DC |
2441 | if (!ip) { |
2442 | rcu_read_unlock(); | |
2443 | return; | |
2444 | } | |
5806165a DC |
2445 | |
2446 | /* | |
2447 | * because this is an RCU protected lookup, we could find a recently | |
2448 | * freed or even reallocated inode during the lookup. We need to check | |
2449 | * under the i_flags_lock for a valid inode here. Skip it if it is not | |
2450 | * valid, the wrong inode or stale. | |
2451 | */ | |
2452 | spin_lock(&ip->i_flags_lock); | |
718ecc50 DC |
2453 | if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) |
2454 | goto out_iflags_unlock; | |
5806165a DC |
2455 | |
2456 | /* | |
2457 | * Don't try to lock/unlock the current inode, but we _cannot_ skip the | |
2458 | * other inodes that we did not find in the list attached to the buffer | |
2459 | * and are not already marked stale. If we can't lock it, back off and | |
2460 | * retry. | |
2461 | */ | |
2462 | if (ip != free_ip) { | |
2463 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { | |
71e3e356 | 2464 | spin_unlock(&ip->i_flags_lock); |
5806165a DC |
2465 | rcu_read_unlock(); |
2466 | delay(1); | |
2467 | goto retry; | |
2468 | } | |
5806165a | 2469 | } |
71e3e356 | 2470 | ip->i_flags |= XFS_ISTALE; |
5806165a | 2471 | |
71e3e356 | 2472 | /* |
718ecc50 | 2473 | * If the inode is flushing, it is already attached to the buffer. All |
71e3e356 DC |
2474 | * we needed to do here is mark the inode stale so buffer IO completion |
2475 | * will remove it from the AIL. | |
2476 | */ | |
2477 | iip = ip->i_itemp; | |
718ecc50 | 2478 | if (__xfs_iflags_test(ip, XFS_IFLUSHING)) { |
71e3e356 DC |
2479 | ASSERT(!list_empty(&iip->ili_item.li_bio_list)); |
2480 | ASSERT(iip->ili_last_fields); | |
2481 | goto out_iunlock; | |
2482 | } | |
5806165a DC |
2483 | |
2484 | /* | |
48d55e2a DC |
2485 | * Inodes not attached to the buffer can be released immediately. |
2486 | * Everything else has to go through xfs_iflush_abort() on journal | |
2487 | * commit as the flock synchronises removal of the inode from the | |
2488 | * cluster buffer against inode reclaim. | |
5806165a | 2489 | */ |
718ecc50 | 2490 | if (!iip || list_empty(&iip->ili_item.li_bio_list)) |
71e3e356 | 2491 | goto out_iunlock; |
718ecc50 DC |
2492 | |
2493 | __xfs_iflags_set(ip, XFS_IFLUSHING); | |
2494 | spin_unlock(&ip->i_flags_lock); | |
2495 | rcu_read_unlock(); | |
5806165a | 2496 | |
71e3e356 | 2497 | /* we have a dirty inode in memory that has not yet been flushed. */ |
71e3e356 DC |
2498 | spin_lock(&iip->ili_lock); |
2499 | iip->ili_last_fields = iip->ili_fields; | |
2500 | iip->ili_fields = 0; | |
2501 | iip->ili_fsync_fields = 0; | |
2502 | spin_unlock(&iip->ili_lock); | |
71e3e356 DC |
2503 | ASSERT(iip->ili_last_fields); |
2504 | ||
718ecc50 DC |
2505 | if (ip != free_ip) |
2506 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
2507 | return; | |
2508 | ||
71e3e356 DC |
2509 | out_iunlock: |
2510 | if (ip != free_ip) | |
2511 | xfs_iunlock(ip, XFS_ILOCK_EXCL); | |
718ecc50 DC |
2512 | out_iflags_unlock: |
2513 | spin_unlock(&ip->i_flags_lock); | |
2514 | rcu_read_unlock(); | |
5806165a DC |
2515 | } |
2516 | ||
5b3eed75 | 2517 | /* |
0b8182db | 2518 | * A big issue when freeing the inode cluster is that we _cannot_ skip any |
5b3eed75 DC |
2519 | * inodes that are in memory - they all must be marked stale and attached to |
2520 | * the cluster buffer. | |
2521 | */ | |
f40aadb2 | 2522 | static int |
1da177e4 | 2523 | xfs_ifree_cluster( |
71e3e356 | 2524 | struct xfs_trans *tp, |
f40aadb2 DC |
2525 | struct xfs_perag *pag, |
2526 | struct xfs_inode *free_ip, | |
09b56604 | 2527 | struct xfs_icluster *xic) |
1da177e4 | 2528 | { |
71e3e356 DC |
2529 | struct xfs_mount *mp = free_ip->i_mount; |
2530 | struct xfs_ino_geometry *igeo = M_IGEO(mp); | |
2531 | struct xfs_buf *bp; | |
2532 | xfs_daddr_t blkno; | |
2533 | xfs_ino_t inum = xic->first_ino; | |
1da177e4 | 2534 | int nbufs; |
5b257b4a | 2535 | int i, j; |
3cdaa189 | 2536 | int ioffset; |
ce92464c | 2537 | int error; |
1da177e4 | 2538 | |
ef325959 | 2539 | nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; |
1da177e4 | 2540 | |
ef325959 | 2541 | for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { |
09b56604 BF |
2542 | /* |
2543 | * The allocation bitmap tells us which inodes of the chunk were | |
2544 | * physically allocated. Skip the cluster if an inode falls into | |
2545 | * a sparse region. | |
2546 | */ | |
3cdaa189 BF |
2547 | ioffset = inum - xic->first_ino; |
2548 | if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) { | |
ef325959 | 2549 | ASSERT(ioffset % igeo->inodes_per_cluster == 0); |
09b56604 BF |
2550 | continue; |
2551 | } | |
2552 | ||
1da177e4 LT |
2553 | blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum), |
2554 | XFS_INO_TO_AGBNO(mp, inum)); | |
2555 | ||
5b257b4a DC |
2556 | /* |
2557 | * We obtain and lock the backing buffer first in the process | |
718ecc50 DC |
2558 | * here to ensure dirty inodes attached to the buffer remain in |
2559 | * the flushing state while we mark them stale. | |
2560 | * | |
5b257b4a DC |
2561 | * If we scan the in-memory inodes first, then buffer IO can |
2562 | * complete before we get a lock on it, and hence we may fail | |
2563 | * to mark all the active inodes on the buffer stale. | |
2564 | */ | |
ce92464c DW |
2565 | error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, |
2566 | mp->m_bsize * igeo->blocks_per_cluster, | |
2567 | XBF_UNMAPPED, &bp); | |
71e3e356 | 2568 | if (error) |
ce92464c | 2569 | return error; |
b0f539de DC |
2570 | |
2571 | /* | |
2572 | * This buffer may not have been correctly initialised as we | |
2573 | * didn't read it from disk. That's not important because we are | |
2574 | * only using to mark the buffer as stale in the log, and to | |
2575 | * attach stale cached inodes on it. That means it will never be | |
2576 | * dispatched for IO. If it is, we want to know about it, and we | |
2577 | * want it to fail. We can acheive this by adding a write | |
2578 | * verifier to the buffer. | |
2579 | */ | |
8c4ce794 | 2580 | bp->b_ops = &xfs_inode_buf_ops; |
b0f539de | 2581 | |
5b257b4a | 2582 | /* |
71e3e356 DC |
2583 | * Now we need to set all the cached clean inodes as XFS_ISTALE, |
2584 | * too. This requires lookups, and will skip inodes that we've | |
2585 | * already marked XFS_ISTALE. | |
1da177e4 | 2586 | */ |
71e3e356 | 2587 | for (i = 0; i < igeo->inodes_per_cluster; i++) |
f40aadb2 | 2588 | xfs_ifree_mark_inode_stale(pag, free_ip, inum + i); |
1da177e4 | 2589 | |
5b3eed75 | 2590 | xfs_trans_stale_inode_buf(tp, bp); |
1da177e4 LT |
2591 | xfs_trans_binval(tp, bp); |
2592 | } | |
2a30f36d | 2593 | return 0; |
1da177e4 LT |
2594 | } |
2595 | ||
2596 | /* | |
2597 | * This is called to return an inode to the inode free list. | |
2598 | * The inode should already be truncated to 0 length and have | |
2599 | * no pages associated with it. This routine also assumes that | |
2600 | * the inode is already a part of the transaction. | |
2601 | * | |
2602 | * The on-disk copy of the inode will have been added to the list | |
2603 | * of unlinked inodes in the AGI. We need to remove the inode from | |
2604 | * that list atomically with respect to freeing it here. | |
2605 | */ | |
2606 | int | |
2607 | xfs_ifree( | |
0e0417f3 BF |
2608 | struct xfs_trans *tp, |
2609 | struct xfs_inode *ip) | |
1da177e4 | 2610 | { |
f40aadb2 DC |
2611 | struct xfs_mount *mp = ip->i_mount; |
2612 | struct xfs_perag *pag; | |
09b56604 | 2613 | struct xfs_icluster xic = { 0 }; |
1319ebef | 2614 | struct xfs_inode_log_item *iip = ip->i_itemp; |
f40aadb2 | 2615 | int error; |
1da177e4 | 2616 | |
579aa9ca | 2617 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); |
54d7b5c1 | 2618 | ASSERT(VFS_I(ip)->i_nlink == 0); |
daf83964 | 2619 | ASSERT(ip->i_df.if_nextents == 0); |
13d2c10b | 2620 | ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode)); |
6e73a545 | 2621 | ASSERT(ip->i_nblocks == 0); |
1da177e4 | 2622 | |
f40aadb2 DC |
2623 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); |
2624 | ||
1da177e4 LT |
2625 | /* |
2626 | * Pull the on-disk inode from the AGI unlinked list. | |
2627 | */ | |
f40aadb2 | 2628 | error = xfs_iunlink_remove(tp, pag, ip); |
1baaed8f | 2629 | if (error) |
f40aadb2 | 2630 | goto out; |
1da177e4 | 2631 | |
f40aadb2 | 2632 | error = xfs_difree(tp, pag, ip->i_ino, &xic); |
1baaed8f | 2633 | if (error) |
f40aadb2 | 2634 | goto out; |
1baaed8f | 2635 | |
b2c20045 CH |
2636 | /* |
2637 | * Free any local-format data sitting around before we reset the | |
2638 | * data fork to extents format. Note that the attr fork data has | |
2639 | * already been freed by xfs_attr_inactive. | |
2640 | */ | |
f7e67b20 | 2641 | if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) { |
b2c20045 CH |
2642 | kmem_free(ip->i_df.if_u1.if_data); |
2643 | ip->i_df.if_u1.if_data = NULL; | |
2644 | ip->i_df.if_bytes = 0; | |
2645 | } | |
98c4f78d | 2646 | |
c19b3b05 | 2647 | VFS_I(ip)->i_mode = 0; /* mark incore inode as free */ |
db07349d | 2648 | ip->i_diflags = 0; |
f40aadb2 | 2649 | ip->i_diflags2 = mp->m_ino_geo.new_diflags2; |
7821ea30 | 2650 | ip->i_forkoff = 0; /* mark the attr fork not in use */ |
f7e67b20 | 2651 | ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; |
9b3beb02 CH |
2652 | if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS)) |
2653 | xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS); | |
dc1baa71 ES |
2654 | |
2655 | /* Don't attempt to replay owner changes for a deleted inode */ | |
1319ebef DC |
2656 | spin_lock(&iip->ili_lock); |
2657 | iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER); | |
2658 | spin_unlock(&iip->ili_lock); | |
dc1baa71 | 2659 | |
1da177e4 LT |
2660 | /* |
2661 | * Bump the generation count so no one will be confused | |
2662 | * by reincarnations of this inode. | |
2663 | */ | |
9e9a2674 | 2664 | VFS_I(ip)->i_generation++; |
1da177e4 LT |
2665 | xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); |
2666 | ||
09b56604 | 2667 | if (xic.deleted) |
f40aadb2 DC |
2668 | error = xfs_ifree_cluster(tp, pag, ip, &xic); |
2669 | out: | |
2670 | xfs_perag_put(pag); | |
2a30f36d | 2671 | return error; |
1da177e4 LT |
2672 | } |
2673 | ||
1da177e4 | 2674 | /* |
60ec6783 CH |
2675 | * This is called to unpin an inode. The caller must have the inode locked |
2676 | * in at least shared mode so that the buffer cannot be subsequently pinned | |
2677 | * once someone is waiting for it to be unpinned. | |
1da177e4 | 2678 | */ |
60ec6783 | 2679 | static void |
f392e631 | 2680 | xfs_iunpin( |
60ec6783 | 2681 | struct xfs_inode *ip) |
1da177e4 | 2682 | { |
579aa9ca | 2683 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
1da177e4 | 2684 | |
4aaf15d1 DC |
2685 | trace_xfs_inode_unpin_nowait(ip, _RET_IP_); |
2686 | ||
a3f74ffb | 2687 | /* Give the log a push to start the unpinning I/O */ |
5f9b4b0d | 2688 | xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL); |
a14a348b | 2689 | |
a3f74ffb | 2690 | } |
1da177e4 | 2691 | |
f392e631 CH |
2692 | static void |
2693 | __xfs_iunpin_wait( | |
2694 | struct xfs_inode *ip) | |
2695 | { | |
2696 | wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT); | |
2697 | DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT); | |
2698 | ||
2699 | xfs_iunpin(ip); | |
2700 | ||
2701 | do { | |
21417136 | 2702 | prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE); |
f392e631 CH |
2703 | if (xfs_ipincount(ip)) |
2704 | io_schedule(); | |
2705 | } while (xfs_ipincount(ip)); | |
21417136 | 2706 | finish_wait(wq, &wait.wq_entry); |
f392e631 CH |
2707 | } |
2708 | ||
777df5af | 2709 | void |
a3f74ffb | 2710 | xfs_iunpin_wait( |
60ec6783 | 2711 | struct xfs_inode *ip) |
a3f74ffb | 2712 | { |
f392e631 CH |
2713 | if (xfs_ipincount(ip)) |
2714 | __xfs_iunpin_wait(ip); | |
1da177e4 LT |
2715 | } |
2716 | ||
27320369 DC |
2717 | /* |
2718 | * Removing an inode from the namespace involves removing the directory entry | |
2719 | * and dropping the link count on the inode. Removing the directory entry can | |
2720 | * result in locking an AGF (directory blocks were freed) and removing a link | |
2721 | * count can result in placing the inode on an unlinked list which results in | |
2722 | * locking an AGI. | |
2723 | * | |
2724 | * The big problem here is that we have an ordering constraint on AGF and AGI | |
2725 | * locking - inode allocation locks the AGI, then can allocate a new extent for | |
2726 | * new inodes, locking the AGF after the AGI. Similarly, freeing the inode | |
2727 | * removes the inode from the unlinked list, requiring that we lock the AGI | |
2728 | * first, and then freeing the inode can result in an inode chunk being freed | |
2729 | * and hence freeing disk space requiring that we lock an AGF. | |
2730 | * | |
2731 | * Hence the ordering that is imposed by other parts of the code is AGI before | |
2732 | * AGF. This means we cannot remove the directory entry before we drop the inode | |
2733 | * reference count and put it on the unlinked list as this results in a lock | |
2734 | * order of AGF then AGI, and this can deadlock against inode allocation and | |
2735 | * freeing. Therefore we must drop the link counts before we remove the | |
2736 | * directory entry. | |
2737 | * | |
2738 | * This is still safe from a transactional point of view - it is not until we | |
310a75a3 | 2739 | * get to xfs_defer_finish() that we have the possibility of multiple |
27320369 DC |
2740 | * transactions in this operation. Hence as long as we remove the directory |
2741 | * entry and drop the link count in the first transaction of the remove | |
2742 | * operation, there are no transactional constraints on the ordering here. | |
2743 | */ | |
c24b5dfa DC |
2744 | int |
2745 | xfs_remove( | |
2746 | xfs_inode_t *dp, | |
2747 | struct xfs_name *name, | |
2748 | xfs_inode_t *ip) | |
2749 | { | |
2750 | xfs_mount_t *mp = dp->i_mount; | |
2751 | xfs_trans_t *tp = NULL; | |
c19b3b05 | 2752 | int is_dir = S_ISDIR(VFS_I(ip)->i_mode); |
871b9316 | 2753 | int dontcare; |
c24b5dfa | 2754 | int error = 0; |
c24b5dfa | 2755 | uint resblks; |
c24b5dfa DC |
2756 | |
2757 | trace_xfs_remove(dp, name); | |
2758 | ||
75c8c50f | 2759 | if (xfs_is_shutdown(mp)) |
2451337d | 2760 | return -EIO; |
c24b5dfa | 2761 | |
c14cfcca | 2762 | error = xfs_qm_dqattach(dp); |
c24b5dfa DC |
2763 | if (error) |
2764 | goto std_return; | |
2765 | ||
c14cfcca | 2766 | error = xfs_qm_dqattach(ip); |
c24b5dfa DC |
2767 | if (error) |
2768 | goto std_return; | |
2769 | ||
c24b5dfa | 2770 | /* |
871b9316 DW |
2771 | * We try to get the real space reservation first, allowing for |
2772 | * directory btree deletion(s) implying possible bmap insert(s). If we | |
2773 | * can't get the space reservation then we use 0 instead, and avoid the | |
2774 | * bmap btree insert(s) in the directory code by, if the bmap insert | |
2775 | * tries to happen, instead trimming the LAST block from the directory. | |
2776 | * | |
2777 | * Ignore EDQUOT and ENOSPC being returned via nospace_error because | |
2778 | * the directory code can handle a reservationless update and we don't | |
2779 | * want to prevent a user from trying to free space by deleting things. | |
c24b5dfa DC |
2780 | */ |
2781 | resblks = XFS_REMOVE_SPACE_RES(mp); | |
871b9316 DW |
2782 | error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks, |
2783 | &tp, &dontcare); | |
c24b5dfa | 2784 | if (error) { |
2451337d | 2785 | ASSERT(error != -ENOSPC); |
253f4911 | 2786 | goto std_return; |
c24b5dfa DC |
2787 | } |
2788 | ||
c24b5dfa DC |
2789 | /* |
2790 | * If we're removing a directory perform some additional validation. | |
2791 | */ | |
2792 | if (is_dir) { | |
54d7b5c1 DC |
2793 | ASSERT(VFS_I(ip)->i_nlink >= 2); |
2794 | if (VFS_I(ip)->i_nlink != 2) { | |
2451337d | 2795 | error = -ENOTEMPTY; |
c24b5dfa DC |
2796 | goto out_trans_cancel; |
2797 | } | |
2798 | if (!xfs_dir_isempty(ip)) { | |
2451337d | 2799 | error = -ENOTEMPTY; |
c24b5dfa DC |
2800 | goto out_trans_cancel; |
2801 | } | |
c24b5dfa | 2802 | |
27320369 | 2803 | /* Drop the link from ip's "..". */ |
c24b5dfa DC |
2804 | error = xfs_droplink(tp, dp); |
2805 | if (error) | |
27320369 | 2806 | goto out_trans_cancel; |
c24b5dfa | 2807 | |
27320369 | 2808 | /* Drop the "." link from ip to self. */ |
c24b5dfa DC |
2809 | error = xfs_droplink(tp, ip); |
2810 | if (error) | |
27320369 | 2811 | goto out_trans_cancel; |
5838d035 DW |
2812 | |
2813 | /* | |
2814 | * Point the unlinked child directory's ".." entry to the root | |
2815 | * directory to eliminate back-references to inodes that may | |
2816 | * get freed before the child directory is closed. If the fs | |
2817 | * gets shrunk, this can lead to dirent inode validation errors. | |
2818 | */ | |
2819 | if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) { | |
2820 | error = xfs_dir_replace(tp, ip, &xfs_name_dotdot, | |
2821 | tp->t_mountp->m_sb.sb_rootino, 0); | |
2822 | if (error) | |
2823 | return error; | |
2824 | } | |
c24b5dfa DC |
2825 | } else { |
2826 | /* | |
2827 | * When removing a non-directory we need to log the parent | |
2828 | * inode here. For a directory this is done implicitly | |
2829 | * by the xfs_droplink call for the ".." entry. | |
2830 | */ | |
2831 | xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE); | |
2832 | } | |
27320369 | 2833 | xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
c24b5dfa | 2834 | |
27320369 | 2835 | /* Drop the link from dp to ip. */ |
c24b5dfa DC |
2836 | error = xfs_droplink(tp, ip); |
2837 | if (error) | |
27320369 | 2838 | goto out_trans_cancel; |
c24b5dfa | 2839 | |
381eee69 | 2840 | error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks); |
27320369 | 2841 | if (error) { |
2451337d | 2842 | ASSERT(error != -ENOENT); |
c8eac49e | 2843 | goto out_trans_cancel; |
27320369 DC |
2844 | } |
2845 | ||
c24b5dfa DC |
2846 | /* |
2847 | * If this is a synchronous mount, make sure that the | |
2848 | * remove transaction goes to disk before returning to | |
2849 | * the user. | |
2850 | */ | |
0560f31a | 2851 | if (xfs_has_wsync(mp) || xfs_has_dirsync(mp)) |
c24b5dfa DC |
2852 | xfs_trans_set_sync(tp); |
2853 | ||
70393313 | 2854 | error = xfs_trans_commit(tp); |
c24b5dfa DC |
2855 | if (error) |
2856 | goto std_return; | |
2857 | ||
2cd2ef6a | 2858 | if (is_dir && xfs_inode_is_filestream(ip)) |
c24b5dfa DC |
2859 | xfs_filestream_deassociate(ip); |
2860 | ||
2861 | return 0; | |
2862 | ||
c24b5dfa | 2863 | out_trans_cancel: |
4906e215 | 2864 | xfs_trans_cancel(tp); |
c24b5dfa DC |
2865 | std_return: |
2866 | return error; | |
2867 | } | |
2868 | ||
f6bba201 DC |
2869 | /* |
2870 | * Enter all inodes for a rename transaction into a sorted array. | |
2871 | */ | |
95afcf5c | 2872 | #define __XFS_SORT_INODES 5 |
f6bba201 DC |
2873 | STATIC void |
2874 | xfs_sort_for_rename( | |
95afcf5c DC |
2875 | struct xfs_inode *dp1, /* in: old (source) directory inode */ |
2876 | struct xfs_inode *dp2, /* in: new (target) directory inode */ | |
2877 | struct xfs_inode *ip1, /* in: inode of old entry */ | |
2878 | struct xfs_inode *ip2, /* in: inode of new entry */ | |
2879 | struct xfs_inode *wip, /* in: whiteout inode */ | |
2880 | struct xfs_inode **i_tab,/* out: sorted array of inodes */ | |
2881 | int *num_inodes) /* in/out: inodes in array */ | |
f6bba201 | 2882 | { |
f6bba201 DC |
2883 | int i, j; |
2884 | ||
95afcf5c DC |
2885 | ASSERT(*num_inodes == __XFS_SORT_INODES); |
2886 | memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *)); | |
2887 | ||
f6bba201 DC |
2888 | /* |
2889 | * i_tab contains a list of pointers to inodes. We initialize | |
2890 | * the table here & we'll sort it. We will then use it to | |
2891 | * order the acquisition of the inode locks. | |
2892 | * | |
2893 | * Note that the table may contain duplicates. e.g., dp1 == dp2. | |
2894 | */ | |
95afcf5c DC |
2895 | i = 0; |
2896 | i_tab[i++] = dp1; | |
2897 | i_tab[i++] = dp2; | |
2898 | i_tab[i++] = ip1; | |
2899 | if (ip2) | |
2900 | i_tab[i++] = ip2; | |
2901 | if (wip) | |
2902 | i_tab[i++] = wip; | |
2903 | *num_inodes = i; | |
f6bba201 DC |
2904 | |
2905 | /* | |
2906 | * Sort the elements via bubble sort. (Remember, there are at | |
95afcf5c | 2907 | * most 5 elements to sort, so this is adequate.) |
f6bba201 DC |
2908 | */ |
2909 | for (i = 0; i < *num_inodes; i++) { | |
2910 | for (j = 1; j < *num_inodes; j++) { | |
2911 | if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) { | |
95afcf5c | 2912 | struct xfs_inode *temp = i_tab[j]; |
f6bba201 DC |
2913 | i_tab[j] = i_tab[j-1]; |
2914 | i_tab[j-1] = temp; | |
2915 | } | |
2916 | } | |
2917 | } | |
2918 | } | |
2919 | ||
310606b0 DC |
2920 | static int |
2921 | xfs_finish_rename( | |
c9cfdb38 | 2922 | struct xfs_trans *tp) |
310606b0 | 2923 | { |
310606b0 DC |
2924 | /* |
2925 | * If this is a synchronous mount, make sure that the rename transaction | |
2926 | * goes to disk before returning to the user. | |
2927 | */ | |
0560f31a | 2928 | if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp)) |
310606b0 DC |
2929 | xfs_trans_set_sync(tp); |
2930 | ||
70393313 | 2931 | return xfs_trans_commit(tp); |
310606b0 DC |
2932 | } |
2933 | ||
d31a1825 CM |
2934 | /* |
2935 | * xfs_cross_rename() | |
2936 | * | |
0145225e | 2937 | * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall |
d31a1825 CM |
2938 | */ |
2939 | STATIC int | |
2940 | xfs_cross_rename( | |
2941 | struct xfs_trans *tp, | |
2942 | struct xfs_inode *dp1, | |
2943 | struct xfs_name *name1, | |
2944 | struct xfs_inode *ip1, | |
2945 | struct xfs_inode *dp2, | |
2946 | struct xfs_name *name2, | |
2947 | struct xfs_inode *ip2, | |
d31a1825 CM |
2948 | int spaceres) |
2949 | { | |
2950 | int error = 0; | |
2951 | int ip1_flags = 0; | |
2952 | int ip2_flags = 0; | |
2953 | int dp2_flags = 0; | |
2954 | ||
2955 | /* Swap inode number for dirent in first parent */ | |
381eee69 | 2956 | error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres); |
d31a1825 | 2957 | if (error) |
eeacd321 | 2958 | goto out_trans_abort; |
d31a1825 CM |
2959 | |
2960 | /* Swap inode number for dirent in second parent */ | |
381eee69 | 2961 | error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres); |
d31a1825 | 2962 | if (error) |
eeacd321 | 2963 | goto out_trans_abort; |
d31a1825 CM |
2964 | |
2965 | /* | |
2966 | * If we're renaming one or more directories across different parents, | |
2967 | * update the respective ".." entries (and link counts) to match the new | |
2968 | * parents. | |
2969 | */ | |
2970 | if (dp1 != dp2) { | |
2971 | dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | |
2972 | ||
c19b3b05 | 2973 | if (S_ISDIR(VFS_I(ip2)->i_mode)) { |
d31a1825 | 2974 | error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot, |
381eee69 | 2975 | dp1->i_ino, spaceres); |
d31a1825 | 2976 | if (error) |
eeacd321 | 2977 | goto out_trans_abort; |
d31a1825 CM |
2978 | |
2979 | /* transfer ip2 ".." reference to dp1 */ | |
c19b3b05 | 2980 | if (!S_ISDIR(VFS_I(ip1)->i_mode)) { |
d31a1825 CM |
2981 | error = xfs_droplink(tp, dp2); |
2982 | if (error) | |
eeacd321 | 2983 | goto out_trans_abort; |
91083269 | 2984 | xfs_bumplink(tp, dp1); |
d31a1825 CM |
2985 | } |
2986 | ||
2987 | /* | |
2988 | * Although ip1 isn't changed here, userspace needs | |
2989 | * to be warned about the change, so that applications | |
2990 | * relying on it (like backup ones), will properly | |
2991 | * notify the change | |
2992 | */ | |
2993 | ip1_flags |= XFS_ICHGTIME_CHG; | |
2994 | ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | |
2995 | } | |
2996 | ||
c19b3b05 | 2997 | if (S_ISDIR(VFS_I(ip1)->i_mode)) { |
d31a1825 | 2998 | error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot, |
381eee69 | 2999 | dp2->i_ino, spaceres); |
d31a1825 | 3000 | if (error) |
eeacd321 | 3001 | goto out_trans_abort; |
d31a1825 CM |
3002 | |
3003 | /* transfer ip1 ".." reference to dp2 */ | |
c19b3b05 | 3004 | if (!S_ISDIR(VFS_I(ip2)->i_mode)) { |
d31a1825 CM |
3005 | error = xfs_droplink(tp, dp1); |
3006 | if (error) | |
eeacd321 | 3007 | goto out_trans_abort; |
91083269 | 3008 | xfs_bumplink(tp, dp2); |
d31a1825 CM |
3009 | } |
3010 | ||
3011 | /* | |
3012 | * Although ip2 isn't changed here, userspace needs | |
3013 | * to be warned about the change, so that applications | |
3014 | * relying on it (like backup ones), will properly | |
3015 | * notify the change | |
3016 | */ | |
3017 | ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; | |
3018 | ip2_flags |= XFS_ICHGTIME_CHG; | |
3019 | } | |
3020 | } | |
3021 | ||
3022 | if (ip1_flags) { | |
3023 | xfs_trans_ichgtime(tp, ip1, ip1_flags); | |
3024 | xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE); | |
3025 | } | |
3026 | if (ip2_flags) { | |
3027 | xfs_trans_ichgtime(tp, ip2, ip2_flags); | |
3028 | xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE); | |
3029 | } | |
3030 | if (dp2_flags) { | |
3031 | xfs_trans_ichgtime(tp, dp2, dp2_flags); | |
3032 | xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE); | |
3033 | } | |
3034 | xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
3035 | xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE); | |
c9cfdb38 | 3036 | return xfs_finish_rename(tp); |
eeacd321 DC |
3037 | |
3038 | out_trans_abort: | |
4906e215 | 3039 | xfs_trans_cancel(tp); |
d31a1825 CM |
3040 | return error; |
3041 | } | |
3042 | ||
7dcf5c3e DC |
3043 | /* |
3044 | * xfs_rename_alloc_whiteout() | |
3045 | * | |
b63da6c8 | 3046 | * Return a referenced, unlinked, unlocked inode that can be used as a |
7dcf5c3e DC |
3047 | * whiteout in a rename transaction. We use a tmpfile inode here so that if we |
3048 | * crash between allocating the inode and linking it into the rename transaction | |
3049 | * recovery will free the inode and we won't leak it. | |
3050 | */ | |
3051 | static int | |
3052 | xfs_rename_alloc_whiteout( | |
f736d93d | 3053 | struct user_namespace *mnt_userns, |
7dcf5c3e DC |
3054 | struct xfs_inode *dp, |
3055 | struct xfs_inode **wip) | |
3056 | { | |
3057 | struct xfs_inode *tmpfile; | |
3058 | int error; | |
3059 | ||
f736d93d CH |
3060 | error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE, |
3061 | &tmpfile); | |
7dcf5c3e DC |
3062 | if (error) |
3063 | return error; | |
3064 | ||
22419ac9 BF |
3065 | /* |
3066 | * Prepare the tmpfile inode as if it were created through the VFS. | |
c4a6bf7f DW |
3067 | * Complete the inode setup and flag it as linkable. nlink is already |
3068 | * zero, so we can skip the drop_nlink. | |
22419ac9 | 3069 | */ |
2b3d1d41 | 3070 | xfs_setup_iops(tmpfile); |
7dcf5c3e DC |
3071 | xfs_finish_inode_setup(tmpfile); |
3072 | VFS_I(tmpfile)->i_state |= I_LINKABLE; | |
3073 | ||
3074 | *wip = tmpfile; | |
3075 | return 0; | |
3076 | } | |
3077 | ||
f6bba201 DC |
3078 | /* |
3079 | * xfs_rename | |
3080 | */ | |
3081 | int | |
3082 | xfs_rename( | |
f736d93d | 3083 | struct user_namespace *mnt_userns, |
7dcf5c3e DC |
3084 | struct xfs_inode *src_dp, |
3085 | struct xfs_name *src_name, | |
3086 | struct xfs_inode *src_ip, | |
3087 | struct xfs_inode *target_dp, | |
3088 | struct xfs_name *target_name, | |
3089 | struct xfs_inode *target_ip, | |
3090 | unsigned int flags) | |
f6bba201 | 3091 | { |
7dcf5c3e DC |
3092 | struct xfs_mount *mp = src_dp->i_mount; |
3093 | struct xfs_trans *tp; | |
7dcf5c3e DC |
3094 | struct xfs_inode *wip = NULL; /* whiteout inode */ |
3095 | struct xfs_inode *inodes[__XFS_SORT_INODES]; | |
6da1b4b1 | 3096 | int i; |
7dcf5c3e | 3097 | int num_inodes = __XFS_SORT_INODES; |
2b93681f | 3098 | bool new_parent = (src_dp != target_dp); |
c19b3b05 | 3099 | bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode); |
7dcf5c3e | 3100 | int spaceres; |
41667260 DW |
3101 | bool retried = false; |
3102 | int error, nospace_error = 0; | |
f6bba201 DC |
3103 | |
3104 | trace_xfs_rename(src_dp, target_dp, src_name, target_name); | |
3105 | ||
eeacd321 DC |
3106 | if ((flags & RENAME_EXCHANGE) && !target_ip) |
3107 | return -EINVAL; | |
3108 | ||
7dcf5c3e DC |
3109 | /* |
3110 | * If we are doing a whiteout operation, allocate the whiteout inode | |
3111 | * we will be placing at the target and ensure the type is set | |
3112 | * appropriately. | |
3113 | */ | |
3114 | if (flags & RENAME_WHITEOUT) { | |
f736d93d | 3115 | error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip); |
7dcf5c3e DC |
3116 | if (error) |
3117 | return error; | |
3118 | ||
3119 | /* setup target dirent info as whiteout */ | |
3120 | src_name->type = XFS_DIR3_FT_CHRDEV; | |
3121 | } | |
f6bba201 | 3122 | |
7dcf5c3e | 3123 | xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip, |
f6bba201 DC |
3124 | inodes, &num_inodes); |
3125 | ||
41667260 DW |
3126 | retry: |
3127 | nospace_error = 0; | |
f6bba201 | 3128 | spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len); |
253f4911 | 3129 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp); |
2451337d | 3130 | if (error == -ENOSPC) { |
41667260 | 3131 | nospace_error = error; |
f6bba201 | 3132 | spaceres = 0; |
253f4911 CH |
3133 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0, |
3134 | &tp); | |
f6bba201 | 3135 | } |
445883e8 | 3136 | if (error) |
253f4911 | 3137 | goto out_release_wip; |
f6bba201 DC |
3138 | |
3139 | /* | |
3140 | * Attach the dquots to the inodes | |
3141 | */ | |
3142 | error = xfs_qm_vop_rename_dqattach(inodes); | |
445883e8 DC |
3143 | if (error) |
3144 | goto out_trans_cancel; | |
f6bba201 DC |
3145 | |
3146 | /* | |
3147 | * Lock all the participating inodes. Depending upon whether | |
3148 | * the target_name exists in the target directory, and | |
3149 | * whether the target directory is the same as the source | |
3150 | * directory, we can lock from 2 to 4 inodes. | |
3151 | */ | |
3152 | xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL); | |
3153 | ||
3154 | /* | |
3155 | * Join all the inodes to the transaction. From this point on, | |
3156 | * we can rely on either trans_commit or trans_cancel to unlock | |
3157 | * them. | |
3158 | */ | |
65523218 | 3159 | xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL); |
f6bba201 | 3160 | if (new_parent) |
65523218 | 3161 | xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL); |
f6bba201 DC |
3162 | xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL); |
3163 | if (target_ip) | |
3164 | xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL); | |
7dcf5c3e DC |
3165 | if (wip) |
3166 | xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL); | |
f6bba201 DC |
3167 | |
3168 | /* | |
3169 | * If we are using project inheritance, we only allow renames | |
3170 | * into our tree when the project IDs are the same; else the | |
3171 | * tree quota mechanism would be circumvented. | |
3172 | */ | |
db07349d | 3173 | if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) && |
ceaf603c | 3174 | target_dp->i_projid != src_ip->i_projid)) { |
2451337d | 3175 | error = -EXDEV; |
445883e8 | 3176 | goto out_trans_cancel; |
f6bba201 DC |
3177 | } |
3178 | ||
eeacd321 DC |
3179 | /* RENAME_EXCHANGE is unique from here on. */ |
3180 | if (flags & RENAME_EXCHANGE) | |
3181 | return xfs_cross_rename(tp, src_dp, src_name, src_ip, | |
3182 | target_dp, target_name, target_ip, | |
f16dea54 | 3183 | spaceres); |
d31a1825 | 3184 | |
41667260 DW |
3185 | /* |
3186 | * Try to reserve quota to handle an expansion of the target directory. | |
3187 | * We'll allow the rename to continue in reservationless mode if we hit | |
3188 | * a space usage constraint. If we trigger reservationless mode, save | |
3189 | * the errno if there isn't any free space in the target directory. | |
3190 | */ | |
3191 | if (spaceres != 0) { | |
3192 | error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres, | |
3193 | 0, false); | |
3194 | if (error == -EDQUOT || error == -ENOSPC) { | |
3195 | if (!retried) { | |
3196 | xfs_trans_cancel(tp); | |
3197 | xfs_blockgc_free_quota(target_dp, 0); | |
3198 | retried = true; | |
3199 | goto retry; | |
3200 | } | |
3201 | ||
3202 | nospace_error = error; | |
3203 | spaceres = 0; | |
3204 | error = 0; | |
3205 | } | |
3206 | if (error) | |
3207 | goto out_trans_cancel; | |
3208 | } | |
3209 | ||
f6bba201 | 3210 | /* |
bc56ad8c | 3211 | * Check for expected errors before we dirty the transaction |
3212 | * so we can return an error without a transaction abort. | |
02092a2f CB |
3213 | * |
3214 | * Extent count overflow check: | |
3215 | * | |
3216 | * From the perspective of src_dp, a rename operation is essentially a | |
3217 | * directory entry remove operation. Hence the only place where we check | |
3218 | * for extent count overflow for src_dp is in | |
3219 | * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns | |
3220 | * -ENOSPC when it detects a possible extent count overflow and in | |
3221 | * response, the higher layers of directory handling code do the | |
3222 | * following: | |
3223 | * 1. Data/Free blocks: XFS lets these blocks linger until a | |
3224 | * future remove operation removes them. | |
3225 | * 2. Dabtree blocks: XFS swaps the blocks with the last block in the | |
3226 | * Leaf space and unmaps the last block. | |
3227 | * | |
3228 | * For target_dp, there are two cases depending on whether the | |
3229 | * destination directory entry exists or not. | |
3230 | * | |
3231 | * When destination directory entry does not exist (i.e. target_ip == | |
3232 | * NULL), extent count overflow check is performed only when transaction | |
3233 | * has a non-zero sized space reservation associated with it. With a | |
3234 | * zero-sized space reservation, XFS allows a rename operation to | |
3235 | * continue only when the directory has sufficient free space in its | |
3236 | * data/leaf/free space blocks to hold the new entry. | |
3237 | * | |
3238 | * When destination directory entry exists (i.e. target_ip != NULL), all | |
3239 | * we need to do is change the inode number associated with the already | |
3240 | * existing entry. Hence there is no need to perform an extent count | |
3241 | * overflow check. | |
f6bba201 DC |
3242 | */ |
3243 | if (target_ip == NULL) { | |
3244 | /* | |
3245 | * If there's no space reservation, check the entry will | |
3246 | * fit before actually inserting it. | |
3247 | */ | |
94f3cad5 ES |
3248 | if (!spaceres) { |
3249 | error = xfs_dir_canenter(tp, target_dp, target_name); | |
3250 | if (error) | |
445883e8 | 3251 | goto out_trans_cancel; |
02092a2f CB |
3252 | } else { |
3253 | error = xfs_iext_count_may_overflow(target_dp, | |
3254 | XFS_DATA_FORK, | |
3255 | XFS_IEXT_DIR_MANIP_CNT(mp)); | |
3256 | if (error) | |
3257 | goto out_trans_cancel; | |
94f3cad5 | 3258 | } |
bc56ad8c | 3259 | } else { |
3260 | /* | |
3261 | * If target exists and it's a directory, check that whether | |
3262 | * it can be destroyed. | |
3263 | */ | |
3264 | if (S_ISDIR(VFS_I(target_ip)->i_mode) && | |
3265 | (!xfs_dir_isempty(target_ip) || | |
3266 | (VFS_I(target_ip)->i_nlink > 2))) { | |
3267 | error = -EEXIST; | |
3268 | goto out_trans_cancel; | |
3269 | } | |
3270 | } | |
3271 | ||
6da1b4b1 DW |
3272 | /* |
3273 | * Lock the AGI buffers we need to handle bumping the nlink of the | |
3274 | * whiteout inode off the unlinked list and to handle dropping the | |
3275 | * nlink of the target inode. Per locking order rules, do this in | |
3276 | * increasing AG order and before directory block allocation tries to | |
3277 | * grab AGFs because we grab AGIs before AGFs. | |
3278 | * | |
3279 | * The (vfs) caller must ensure that if src is a directory then | |
3280 | * target_ip is either null or an empty directory. | |
3281 | */ | |
3282 | for (i = 0; i < num_inodes && inodes[i] != NULL; i++) { | |
3283 | if (inodes[i] == wip || | |
3284 | (inodes[i] == target_ip && | |
3285 | (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) { | |
3286 | struct xfs_buf *bp; | |
3287 | xfs_agnumber_t agno; | |
3288 | ||
3289 | agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino); | |
3290 | error = xfs_read_agi(mp, tp, agno, &bp); | |
3291 | if (error) | |
3292 | goto out_trans_cancel; | |
3293 | } | |
3294 | } | |
3295 | ||
bc56ad8c | 3296 | /* |
3297 | * Directory entry creation below may acquire the AGF. Remove | |
3298 | * the whiteout from the unlinked list first to preserve correct | |
3299 | * AGI/AGF locking order. This dirties the transaction so failures | |
3300 | * after this point will abort and log recovery will clean up the | |
3301 | * mess. | |
3302 | * | |
3303 | * For whiteouts, we need to bump the link count on the whiteout | |
3304 | * inode. After this point, we have a real link, clear the tmpfile | |
3305 | * state flag from the inode so it doesn't accidentally get misused | |
3306 | * in future. | |
3307 | */ | |
3308 | if (wip) { | |
f40aadb2 DC |
3309 | struct xfs_perag *pag; |
3310 | ||
bc56ad8c | 3311 | ASSERT(VFS_I(wip)->i_nlink == 0); |
f40aadb2 DC |
3312 | |
3313 | pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino)); | |
3314 | error = xfs_iunlink_remove(tp, pag, wip); | |
3315 | xfs_perag_put(pag); | |
bc56ad8c | 3316 | if (error) |
3317 | goto out_trans_cancel; | |
3318 | ||
3319 | xfs_bumplink(tp, wip); | |
bc56ad8c | 3320 | VFS_I(wip)->i_state &= ~I_LINKABLE; |
3321 | } | |
3322 | ||
3323 | /* | |
3324 | * Set up the target. | |
3325 | */ | |
3326 | if (target_ip == NULL) { | |
f6bba201 DC |
3327 | /* |
3328 | * If target does not exist and the rename crosses | |
3329 | * directories, adjust the target directory link count | |
3330 | * to account for the ".." reference from the new entry. | |
3331 | */ | |
3332 | error = xfs_dir_createname(tp, target_dp, target_name, | |
381eee69 | 3333 | src_ip->i_ino, spaceres); |
f6bba201 | 3334 | if (error) |
c8eac49e | 3335 | goto out_trans_cancel; |
f6bba201 DC |
3336 | |
3337 | xfs_trans_ichgtime(tp, target_dp, | |
3338 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
3339 | ||
3340 | if (new_parent && src_is_directory) { | |
91083269 | 3341 | xfs_bumplink(tp, target_dp); |
f6bba201 DC |
3342 | } |
3343 | } else { /* target_ip != NULL */ | |
f6bba201 DC |
3344 | /* |
3345 | * Link the source inode under the target name. | |
3346 | * If the source inode is a directory and we are moving | |
3347 | * it across directories, its ".." entry will be | |
3348 | * inconsistent until we replace that down below. | |
3349 | * | |
3350 | * In case there is already an entry with the same | |
3351 | * name at the destination directory, remove it first. | |
3352 | */ | |
3353 | error = xfs_dir_replace(tp, target_dp, target_name, | |
381eee69 | 3354 | src_ip->i_ino, spaceres); |
f6bba201 | 3355 | if (error) |
c8eac49e | 3356 | goto out_trans_cancel; |
f6bba201 DC |
3357 | |
3358 | xfs_trans_ichgtime(tp, target_dp, | |
3359 | XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); | |
3360 | ||
3361 | /* | |
3362 | * Decrement the link count on the target since the target | |
3363 | * dir no longer points to it. | |
3364 | */ | |
3365 | error = xfs_droplink(tp, target_ip); | |
3366 | if (error) | |
c8eac49e | 3367 | goto out_trans_cancel; |
f6bba201 DC |
3368 | |
3369 | if (src_is_directory) { | |
3370 | /* | |
3371 | * Drop the link from the old "." entry. | |
3372 | */ | |
3373 | error = xfs_droplink(tp, target_ip); | |
3374 | if (error) | |
c8eac49e | 3375 | goto out_trans_cancel; |
f6bba201 DC |
3376 | } |
3377 | } /* target_ip != NULL */ | |
3378 | ||
3379 | /* | |
3380 | * Remove the source. | |
3381 | */ | |
3382 | if (new_parent && src_is_directory) { | |
3383 | /* | |
3384 | * Rewrite the ".." entry to point to the new | |
3385 | * directory. | |
3386 | */ | |
3387 | error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot, | |
381eee69 | 3388 | target_dp->i_ino, spaceres); |
2451337d | 3389 | ASSERT(error != -EEXIST); |
f6bba201 | 3390 | if (error) |
c8eac49e | 3391 | goto out_trans_cancel; |
f6bba201 DC |
3392 | } |
3393 | ||
3394 | /* | |
3395 | * We always want to hit the ctime on the source inode. | |
3396 | * | |
3397 | * This isn't strictly required by the standards since the source | |
3398 | * inode isn't really being changed, but old unix file systems did | |
3399 | * it and some incremental backup programs won't work without it. | |
3400 | */ | |
3401 | xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG); | |
3402 | xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE); | |
3403 | ||
3404 | /* | |
3405 | * Adjust the link count on src_dp. This is necessary when | |
3406 | * renaming a directory, either within one parent when | |
3407 | * the target existed, or across two parent directories. | |
3408 | */ | |
3409 | if (src_is_directory && (new_parent || target_ip != NULL)) { | |
3410 | ||
3411 | /* | |
3412 | * Decrement link count on src_directory since the | |
3413 | * entry that's moved no longer points to it. | |
3414 | */ | |
3415 | error = xfs_droplink(tp, src_dp); | |
3416 | if (error) | |
c8eac49e | 3417 | goto out_trans_cancel; |
f6bba201 DC |
3418 | } |
3419 | ||
7dcf5c3e DC |
3420 | /* |
3421 | * For whiteouts, we only need to update the source dirent with the | |
3422 | * inode number of the whiteout inode rather than removing it | |
3423 | * altogether. | |
3424 | */ | |
3425 | if (wip) { | |
3426 | error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino, | |
381eee69 | 3427 | spaceres); |
02092a2f CB |
3428 | } else { |
3429 | /* | |
3430 | * NOTE: We don't need to check for extent count overflow here | |
3431 | * because the dir remove name code will leave the dir block in | |
3432 | * place if the extent count would overflow. | |
3433 | */ | |
7dcf5c3e | 3434 | error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino, |
381eee69 | 3435 | spaceres); |
02092a2f CB |
3436 | } |
3437 | ||
f6bba201 | 3438 | if (error) |
c8eac49e | 3439 | goto out_trans_cancel; |
f6bba201 | 3440 | |
f6bba201 DC |
3441 | xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); |
3442 | xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE); | |
3443 | if (new_parent) | |
3444 | xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE); | |
f6bba201 | 3445 | |
c9cfdb38 | 3446 | error = xfs_finish_rename(tp); |
7dcf5c3e | 3447 | if (wip) |
44a8736b | 3448 | xfs_irele(wip); |
7dcf5c3e | 3449 | return error; |
f6bba201 | 3450 | |
445883e8 | 3451 | out_trans_cancel: |
4906e215 | 3452 | xfs_trans_cancel(tp); |
253f4911 | 3453 | out_release_wip: |
7dcf5c3e | 3454 | if (wip) |
44a8736b | 3455 | xfs_irele(wip); |
41667260 DW |
3456 | if (error == -ENOSPC && nospace_error) |
3457 | error = nospace_error; | |
f6bba201 DC |
3458 | return error; |
3459 | } | |
3460 | ||
e6187b34 DC |
3461 | static int |
3462 | xfs_iflush( | |
93848a99 CH |
3463 | struct xfs_inode *ip, |
3464 | struct xfs_buf *bp) | |
1da177e4 | 3465 | { |
93848a99 CH |
3466 | struct xfs_inode_log_item *iip = ip->i_itemp; |
3467 | struct xfs_dinode *dip; | |
3468 | struct xfs_mount *mp = ip->i_mount; | |
f2019299 | 3469 | int error; |
1da177e4 | 3470 | |
579aa9ca | 3471 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); |
718ecc50 | 3472 | ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING)); |
f7e67b20 | 3473 | ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || |
daf83964 | 3474 | ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); |
90c60e16 | 3475 | ASSERT(iip->ili_item.li_buf == bp); |
1da177e4 | 3476 | |
88ee2df7 | 3477 | dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); |
1da177e4 | 3478 | |
f2019299 BF |
3479 | /* |
3480 | * We don't flush the inode if any of the following checks fail, but we | |
3481 | * do still update the log item and attach to the backing buffer as if | |
3482 | * the flush happened. This is a formality to facilitate predictable | |
3483 | * error handling as the caller will shutdown and fail the buffer. | |
3484 | */ | |
3485 | error = -EFSCORRUPTED; | |
69ef921b | 3486 | if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC), |
9e24cfd0 | 3487 | mp, XFS_ERRTAG_IFLUSH_1)) { |
6a19d939 | 3488 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3489 | "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT, |
6a19d939 | 3490 | __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip); |
f2019299 | 3491 | goto flush_out; |
1da177e4 | 3492 | } |
c19b3b05 | 3493 | if (S_ISREG(VFS_I(ip)->i_mode)) { |
1da177e4 | 3494 | if (XFS_TEST_ERROR( |
f7e67b20 CH |
3495 | ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && |
3496 | ip->i_df.if_format != XFS_DINODE_FMT_BTREE, | |
9e24cfd0 | 3497 | mp, XFS_ERRTAG_IFLUSH_3)) { |
6a19d939 | 3498 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3499 | "%s: Bad regular inode %Lu, ptr "PTR_FMT, |
6a19d939 | 3500 | __func__, ip->i_ino, ip); |
f2019299 | 3501 | goto flush_out; |
1da177e4 | 3502 | } |
c19b3b05 | 3503 | } else if (S_ISDIR(VFS_I(ip)->i_mode)) { |
1da177e4 | 3504 | if (XFS_TEST_ERROR( |
f7e67b20 CH |
3505 | ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS && |
3506 | ip->i_df.if_format != XFS_DINODE_FMT_BTREE && | |
3507 | ip->i_df.if_format != XFS_DINODE_FMT_LOCAL, | |
9e24cfd0 | 3508 | mp, XFS_ERRTAG_IFLUSH_4)) { |
6a19d939 | 3509 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3510 | "%s: Bad directory inode %Lu, ptr "PTR_FMT, |
6a19d939 | 3511 | __func__, ip->i_ino, ip); |
f2019299 | 3512 | goto flush_out; |
1da177e4 LT |
3513 | } |
3514 | } | |
daf83964 | 3515 | if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) > |
6e73a545 | 3516 | ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) { |
6a19d939 DC |
3517 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
3518 | "%s: detected corrupt incore inode %Lu, " | |
c9690043 | 3519 | "total extents = %d, nblocks = %Ld, ptr "PTR_FMT, |
6a19d939 | 3520 | __func__, ip->i_ino, |
daf83964 | 3521 | ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp), |
6e73a545 | 3522 | ip->i_nblocks, ip); |
f2019299 | 3523 | goto flush_out; |
1da177e4 | 3524 | } |
7821ea30 | 3525 | if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize, |
9e24cfd0 | 3526 | mp, XFS_ERRTAG_IFLUSH_6)) { |
6a19d939 | 3527 | xfs_alert_tag(mp, XFS_PTAG_IFLUSH, |
c9690043 | 3528 | "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT, |
7821ea30 | 3529 | __func__, ip->i_ino, ip->i_forkoff, ip); |
f2019299 | 3530 | goto flush_out; |
1da177e4 | 3531 | } |
e60896d8 | 3532 | |
1da177e4 | 3533 | /* |
965e0a1a CH |
3534 | * Inode item log recovery for v2 inodes are dependent on the flushiter |
3535 | * count for correct sequencing. We bump the flush iteration count so | |
3536 | * we can detect flushes which postdate a log record during recovery. | |
3537 | * This is redundant as we now log every change and hence this can't | |
3538 | * happen but we need to still do it to ensure backwards compatibility | |
3539 | * with old kernels that predate logging all inode changes. | |
1da177e4 | 3540 | */ |
38c26bfd | 3541 | if (!xfs_has_v3inodes(mp)) |
965e0a1a | 3542 | ip->i_flushiter++; |
1da177e4 | 3543 | |
0f45a1b2 CH |
3544 | /* |
3545 | * If there are inline format data / attr forks attached to this inode, | |
3546 | * make sure they are not corrupt. | |
3547 | */ | |
f7e67b20 | 3548 | if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL && |
0f45a1b2 CH |
3549 | xfs_ifork_verify_local_data(ip)) |
3550 | goto flush_out; | |
f7e67b20 | 3551 | if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL && |
0f45a1b2 | 3552 | xfs_ifork_verify_local_attr(ip)) |
f2019299 | 3553 | goto flush_out; |
005c5db8 | 3554 | |
1da177e4 | 3555 | /* |
3987848c DC |
3556 | * Copy the dirty parts of the inode into the on-disk inode. We always |
3557 | * copy out the core of the inode, because if the inode is dirty at all | |
3558 | * the core must be. | |
1da177e4 | 3559 | */ |
93f958f9 | 3560 | xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn); |
1da177e4 LT |
3561 | |
3562 | /* Wrap, we never let the log put out DI_MAX_FLUSH */ | |
38c26bfd | 3563 | if (!xfs_has_v3inodes(mp)) { |
ee7b83fd CH |
3564 | if (ip->i_flushiter == DI_MAX_FLUSH) |
3565 | ip->i_flushiter = 0; | |
3566 | } | |
1da177e4 | 3567 | |
005c5db8 DW |
3568 | xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); |
3569 | if (XFS_IFORK_Q(ip)) | |
3570 | xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); | |
1da177e4 LT |
3571 | |
3572 | /* | |
f5d8d5c4 CH |
3573 | * We've recorded everything logged in the inode, so we'd like to clear |
3574 | * the ili_fields bits so we don't log and flush things unnecessarily. | |
3575 | * However, we can't stop logging all this information until the data | |
3576 | * we've copied into the disk buffer is written to disk. If we did we | |
3577 | * might overwrite the copy of the inode in the log with all the data | |
3578 | * after re-logging only part of it, and in the face of a crash we | |
3579 | * wouldn't have all the data we need to recover. | |
1da177e4 | 3580 | * |
f5d8d5c4 CH |
3581 | * What we do is move the bits to the ili_last_fields field. When |
3582 | * logging the inode, these bits are moved back to the ili_fields field. | |
664ffb8a CH |
3583 | * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since |
3584 | * we know that the information those bits represent is permanently on | |
f5d8d5c4 CH |
3585 | * disk. As long as the flush completes before the inode is logged |
3586 | * again, then both ili_fields and ili_last_fields will be cleared. | |
1da177e4 | 3587 | */ |
f2019299 BF |
3588 | error = 0; |
3589 | flush_out: | |
1319ebef | 3590 | spin_lock(&iip->ili_lock); |
93848a99 CH |
3591 | iip->ili_last_fields = iip->ili_fields; |
3592 | iip->ili_fields = 0; | |
fc0561ce | 3593 | iip->ili_fsync_fields = 0; |
1319ebef | 3594 | spin_unlock(&iip->ili_lock); |
1da177e4 | 3595 | |
1319ebef DC |
3596 | /* |
3597 | * Store the current LSN of the inode so that we can tell whether the | |
664ffb8a | 3598 | * item has moved in the AIL from xfs_buf_inode_iodone(). |
1319ebef | 3599 | */ |
93848a99 CH |
3600 | xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, |
3601 | &iip->ili_item.li_lsn); | |
1da177e4 | 3602 | |
93848a99 CH |
3603 | /* generate the checksum. */ |
3604 | xfs_dinode_calc_crc(mp, dip); | |
f2019299 | 3605 | return error; |
1da177e4 | 3606 | } |
44a8736b | 3607 | |
e6187b34 DC |
3608 | /* |
3609 | * Non-blocking flush of dirty inode metadata into the backing buffer. | |
3610 | * | |
3611 | * The caller must have a reference to the inode and hold the cluster buffer | |
3612 | * locked. The function will walk across all the inodes on the cluster buffer it | |
3613 | * can find and lock without blocking, and flush them to the cluster buffer. | |
3614 | * | |
5717ea4d DC |
3615 | * On successful flushing of at least one inode, the caller must write out the |
3616 | * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and | |
3617 | * the caller needs to release the buffer. On failure, the filesystem will be | |
3618 | * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED | |
3619 | * will be returned. | |
e6187b34 DC |
3620 | */ |
3621 | int | |
3622 | xfs_iflush_cluster( | |
e6187b34 DC |
3623 | struct xfs_buf *bp) |
3624 | { | |
5717ea4d DC |
3625 | struct xfs_mount *mp = bp->b_mount; |
3626 | struct xfs_log_item *lip, *n; | |
3627 | struct xfs_inode *ip; | |
3628 | struct xfs_inode_log_item *iip; | |
e6187b34 | 3629 | int clcount = 0; |
5717ea4d | 3630 | int error = 0; |
e6187b34 | 3631 | |
5717ea4d DC |
3632 | /* |
3633 | * We must use the safe variant here as on shutdown xfs_iflush_abort() | |
3634 | * can remove itself from the list. | |
3635 | */ | |
3636 | list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { | |
3637 | iip = (struct xfs_inode_log_item *)lip; | |
3638 | ip = iip->ili_inode; | |
e6187b34 DC |
3639 | |
3640 | /* | |
5717ea4d | 3641 | * Quick and dirty check to avoid locks if possible. |
e6187b34 | 3642 | */ |
718ecc50 | 3643 | if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) |
5717ea4d DC |
3644 | continue; |
3645 | if (xfs_ipincount(ip)) | |
e6187b34 | 3646 | continue; |
e6187b34 DC |
3647 | |
3648 | /* | |
5717ea4d DC |
3649 | * The inode is still attached to the buffer, which means it is |
3650 | * dirty but reclaim might try to grab it. Check carefully for | |
3651 | * that, and grab the ilock while still holding the i_flags_lock | |
3652 | * to guarantee reclaim will not be able to reclaim this inode | |
3653 | * once we drop the i_flags_lock. | |
e6187b34 | 3654 | */ |
5717ea4d DC |
3655 | spin_lock(&ip->i_flags_lock); |
3656 | ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE)); | |
718ecc50 | 3657 | if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) { |
5717ea4d DC |
3658 | spin_unlock(&ip->i_flags_lock); |
3659 | continue; | |
e6187b34 | 3660 | } |
e6187b34 DC |
3661 | |
3662 | /* | |
5717ea4d DC |
3663 | * ILOCK will pin the inode against reclaim and prevent |
3664 | * concurrent transactions modifying the inode while we are | |
718ecc50 DC |
3665 | * flushing the inode. If we get the lock, set the flushing |
3666 | * state before we drop the i_flags_lock. | |
e6187b34 | 3667 | */ |
5717ea4d DC |
3668 | if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { |
3669 | spin_unlock(&ip->i_flags_lock); | |
e6187b34 | 3670 | continue; |
5717ea4d | 3671 | } |
718ecc50 | 3672 | __xfs_iflags_set(ip, XFS_IFLUSHING); |
5717ea4d | 3673 | spin_unlock(&ip->i_flags_lock); |
e6187b34 | 3674 | |
e6187b34 | 3675 | /* |
5717ea4d DC |
3676 | * Abort flushing this inode if we are shut down because the |
3677 | * inode may not currently be in the AIL. This can occur when | |
3678 | * log I/O failure unpins the inode without inserting into the | |
3679 | * AIL, leaving a dirty/unpinned inode attached to the buffer | |
3680 | * that otherwise looks like it should be flushed. | |
e6187b34 | 3681 | */ |
01728b44 | 3682 | if (xlog_is_shutdown(mp->m_log)) { |
5717ea4d | 3683 | xfs_iunpin_wait(ip); |
5717ea4d DC |
3684 | xfs_iflush_abort(ip); |
3685 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | |
3686 | error = -EIO; | |
e6187b34 DC |
3687 | continue; |
3688 | } | |
3689 | ||
5717ea4d DC |
3690 | /* don't block waiting on a log force to unpin dirty inodes */ |
3691 | if (xfs_ipincount(ip)) { | |
718ecc50 | 3692 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
5717ea4d DC |
3693 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
3694 | continue; | |
e6187b34 | 3695 | } |
e6187b34 | 3696 | |
5717ea4d DC |
3697 | if (!xfs_inode_clean(ip)) |
3698 | error = xfs_iflush(ip, bp); | |
3699 | else | |
718ecc50 | 3700 | xfs_iflags_clear(ip, XFS_IFLUSHING); |
5717ea4d DC |
3701 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
3702 | if (error) | |
3703 | break; | |
3704 | clcount++; | |
e6187b34 DC |
3705 | } |
3706 | ||
e6187b34 | 3707 | if (error) { |
01728b44 DC |
3708 | /* |
3709 | * Shutdown first so we kill the log before we release this | |
3710 | * buffer. If it is an INODE_ALLOC buffer and pins the tail | |
3711 | * of the log, failing it before the _log_ is shut down can | |
3712 | * result in the log tail being moved forward in the journal | |
3713 | * on disk because log writes can still be taking place. Hence | |
3714 | * unpinning the tail will allow the ICREATE intent to be | |
3715 | * removed from the log an recovery will fail with uninitialised | |
3716 | * inode cluster buffers. | |
3717 | */ | |
3718 | xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); | |
e6187b34 DC |
3719 | bp->b_flags |= XBF_ASYNC; |
3720 | xfs_buf_ioend_fail(bp); | |
5717ea4d | 3721 | return error; |
e6187b34 | 3722 | } |
5717ea4d DC |
3723 | |
3724 | if (!clcount) | |
3725 | return -EAGAIN; | |
3726 | ||
3727 | XFS_STATS_INC(mp, xs_icluster_flushcnt); | |
3728 | XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); | |
3729 | return 0; | |
3730 | ||
e6187b34 DC |
3731 | } |
3732 | ||
44a8736b DW |
3733 | /* Release an inode. */ |
3734 | void | |
3735 | xfs_irele( | |
3736 | struct xfs_inode *ip) | |
3737 | { | |
3738 | trace_xfs_irele(ip, _RET_IP_); | |
3739 | iput(VFS_I(ip)); | |
3740 | } | |
54fbdd10 CH |
3741 | |
3742 | /* | |
3743 | * Ensure all commited transactions touching the inode are written to the log. | |
3744 | */ | |
3745 | int | |
3746 | xfs_log_force_inode( | |
3747 | struct xfs_inode *ip) | |
3748 | { | |
5f9b4b0d | 3749 | xfs_csn_t seq = 0; |
54fbdd10 CH |
3750 | |
3751 | xfs_ilock(ip, XFS_ILOCK_SHARED); | |
3752 | if (xfs_ipincount(ip)) | |
5f9b4b0d | 3753 | seq = ip->i_itemp->ili_commit_seq; |
54fbdd10 CH |
3754 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
3755 | ||
5f9b4b0d | 3756 | if (!seq) |
54fbdd10 | 3757 | return 0; |
5f9b4b0d | 3758 | return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL); |
54fbdd10 | 3759 | } |
e2aaee9c DW |
3760 | |
3761 | /* | |
3762 | * Grab the exclusive iolock for a data copy from src to dest, making sure to | |
3763 | * abide vfs locking order (lowest pointer value goes first) and breaking the | |
3764 | * layout leases before proceeding. The loop is needed because we cannot call | |
3765 | * the blocking break_layout() with the iolocks held, and therefore have to | |
3766 | * back out both locks. | |
3767 | */ | |
3768 | static int | |
3769 | xfs_iolock_two_inodes_and_break_layout( | |
3770 | struct inode *src, | |
3771 | struct inode *dest) | |
3772 | { | |
3773 | int error; | |
3774 | ||
3775 | if (src > dest) | |
3776 | swap(src, dest); | |
3777 | ||
3778 | retry: | |
3779 | /* Wait to break both inodes' layouts before we start locking. */ | |
3780 | error = break_layout(src, true); | |
3781 | if (error) | |
3782 | return error; | |
3783 | if (src != dest) { | |
3784 | error = break_layout(dest, true); | |
3785 | if (error) | |
3786 | return error; | |
3787 | } | |
3788 | ||
3789 | /* Lock one inode and make sure nobody got in and leased it. */ | |
3790 | inode_lock(src); | |
3791 | error = break_layout(src, false); | |
3792 | if (error) { | |
3793 | inode_unlock(src); | |
3794 | if (error == -EWOULDBLOCK) | |
3795 | goto retry; | |
3796 | return error; | |
3797 | } | |
3798 | ||
3799 | if (src == dest) | |
3800 | return 0; | |
3801 | ||
3802 | /* Lock the other inode and make sure nobody got in and leased it. */ | |
3803 | inode_lock_nested(dest, I_MUTEX_NONDIR2); | |
3804 | error = break_layout(dest, false); | |
3805 | if (error) { | |
3806 | inode_unlock(src); | |
3807 | inode_unlock(dest); | |
3808 | if (error == -EWOULDBLOCK) | |
3809 | goto retry; | |
3810 | return error; | |
3811 | } | |
3812 | ||
3813 | return 0; | |
3814 | } | |
3815 | ||
3816 | /* | |
3817 | * Lock two inodes so that userspace cannot initiate I/O via file syscalls or | |
3818 | * mmap activity. | |
3819 | */ | |
3820 | int | |
3821 | xfs_ilock2_io_mmap( | |
3822 | struct xfs_inode *ip1, | |
3823 | struct xfs_inode *ip2) | |
3824 | { | |
3825 | int ret; | |
3826 | ||
3827 | ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); | |
3828 | if (ret) | |
3829 | return ret; | |
d2c292d8 JK |
3830 | filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping, |
3831 | VFS_I(ip2)->i_mapping); | |
e2aaee9c DW |
3832 | return 0; |
3833 | } | |
3834 | ||
3835 | /* Unlock both inodes to allow IO and mmap activity. */ | |
3836 | void | |
3837 | xfs_iunlock2_io_mmap( | |
3838 | struct xfs_inode *ip1, | |
3839 | struct xfs_inode *ip2) | |
3840 | { | |
d2c292d8 JK |
3841 | filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping, |
3842 | VFS_I(ip2)->i_mapping); | |
e2aaee9c | 3843 | inode_unlock(VFS_I(ip2)); |
d2c292d8 | 3844 | if (ip1 != ip2) |
e2aaee9c DW |
3845 | inode_unlock(VFS_I(ip1)); |
3846 | } |