xfs: refactor unlinked list search and mapping to a separate function
[linux-2.6-block.git] / fs / xfs / xfs_inode.c
CommitLineData
0b61f8a4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
3e57ecf6 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 4 * All Rights Reserved.
1da177e4 5 */
40ebd81d 6#include <linux/log2.h>
f0e28280 7#include <linux/iversion.h>
40ebd81d 8
1da177e4 9#include "xfs.h"
a844f451 10#include "xfs_fs.h"
70a9883c 11#include "xfs_shared.h"
239880ef
DC
12#include "xfs_format.h"
13#include "xfs_log_format.h"
14#include "xfs_trans_resv.h"
1da177e4 15#include "xfs_sb.h"
1da177e4 16#include "xfs_mount.h"
3ab78df2 17#include "xfs_defer.h"
a4fbe6ab 18#include "xfs_inode.h"
57062787 19#include "xfs_da_format.h"
c24b5dfa 20#include "xfs_da_btree.h"
c24b5dfa 21#include "xfs_dir2.h"
a844f451 22#include "xfs_attr_sf.h"
c24b5dfa 23#include "xfs_attr.h"
239880ef
DC
24#include "xfs_trans_space.h"
25#include "xfs_trans.h"
1da177e4 26#include "xfs_buf_item.h"
a844f451 27#include "xfs_inode_item.h"
a844f451
NS
28#include "xfs_ialloc.h"
29#include "xfs_bmap.h"
68988114 30#include "xfs_bmap_util.h"
e9e899a2 31#include "xfs_errortag.h"
1da177e4 32#include "xfs_error.h"
1da177e4 33#include "xfs_quota.h"
2a82b8be 34#include "xfs_filestream.h"
93848a99 35#include "xfs_cksum.h"
0b1b213f 36#include "xfs_trace.h"
33479e05 37#include "xfs_icache.h"
c24b5dfa 38#include "xfs_symlink.h"
239880ef
DC
39#include "xfs_trans_priv.h"
40#include "xfs_log.h"
a4fbe6ab 41#include "xfs_bmap_btree.h"
aa8968f2 42#include "xfs_reflink.h"
005c5db8 43#include "xfs_dir2_priv.h"
1da177e4 44
1da177e4 45kmem_zone_t *xfs_inode_zone;
1da177e4
LT
46
47/*
8f04c47a 48 * Used in xfs_itruncate_extents(). This is the maximum number of extents
1da177e4
LT
49 * freed from a file in a single transaction.
50 */
51#define XFS_ITRUNC_MAX_EXTENTS 2
52
54d7b5c1
DC
53STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
54STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
55STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
ab297431 56
2a0ec1d9
DC
57/*
58 * helper function to extract extent size hint from inode
59 */
60xfs_extlen_t
61xfs_get_extsz_hint(
62 struct xfs_inode *ip)
63{
64 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
65 return ip->i_d.di_extsize;
66 if (XFS_IS_REALTIME_INODE(ip))
67 return ip->i_mount->m_sb.sb_rextsize;
68 return 0;
69}
70
f7ca3522
DW
71/*
72 * Helper function to extract CoW extent size hint from inode.
73 * Between the extent size hint and the CoW extent size hint, we
e153aa79
DW
74 * return the greater of the two. If the value is zero (automatic),
75 * use the default size.
f7ca3522
DW
76 */
77xfs_extlen_t
78xfs_get_cowextsz_hint(
79 struct xfs_inode *ip)
80{
81 xfs_extlen_t a, b;
82
83 a = 0;
84 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
85 a = ip->i_d.di_cowextsize;
86 b = xfs_get_extsz_hint(ip);
87
e153aa79
DW
88 a = max(a, b);
89 if (a == 0)
90 return XFS_DEFAULT_COWEXTSZ_HINT;
91 return a;
f7ca3522
DW
92}
93
fa96acad 94/*
efa70be1
CH
95 * These two are wrapper routines around the xfs_ilock() routine used to
96 * centralize some grungy code. They are used in places that wish to lock the
97 * inode solely for reading the extents. The reason these places can't just
98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
99 * bringing in of the extents from disk for a file in b-tree format. If the
100 * inode is in b-tree format, then we need to lock the inode exclusively until
101 * the extents are read in. Locking it exclusively all the time would limit
102 * our parallelism unnecessarily, though. What we do instead is check to see
103 * if the extents have been read in yet, and only lock the inode exclusively
104 * if they have not.
fa96acad 105 *
efa70be1 106 * The functions return a value which should be given to the corresponding
01f4f327 107 * xfs_iunlock() call.
fa96acad
DC
108 */
109uint
309ecac8
CH
110xfs_ilock_data_map_shared(
111 struct xfs_inode *ip)
fa96acad 112{
309ecac8 113 uint lock_mode = XFS_ILOCK_SHARED;
fa96acad 114
309ecac8
CH
115 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
116 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
fa96acad 117 lock_mode = XFS_ILOCK_EXCL;
fa96acad 118 xfs_ilock(ip, lock_mode);
fa96acad
DC
119 return lock_mode;
120}
121
efa70be1
CH
122uint
123xfs_ilock_attr_map_shared(
124 struct xfs_inode *ip)
fa96acad 125{
efa70be1
CH
126 uint lock_mode = XFS_ILOCK_SHARED;
127
128 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
129 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
130 lock_mode = XFS_ILOCK_EXCL;
131 xfs_ilock(ip, lock_mode);
132 return lock_mode;
fa96acad
DC
133}
134
135/*
65523218
CH
136 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
137 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
138 * various combinations of the locks to be obtained.
fa96acad 139 *
653c60b6
DC
140 * The 3 locks should always be ordered so that the IO lock is obtained first,
141 * the mmap lock second and the ilock last in order to prevent deadlock.
fa96acad 142 *
653c60b6
DC
143 * Basic locking order:
144 *
65523218 145 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
653c60b6
DC
146 *
147 * mmap_sem locking order:
148 *
65523218 149 * i_rwsem -> page lock -> mmap_sem
653c60b6
DC
150 * mmap_sem -> i_mmap_lock -> page_lock
151 *
152 * The difference in mmap_sem locking order mean that we cannot hold the
153 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
154 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
155 * in get_user_pages() to map the user pages into the kernel address space for
65523218 156 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
653c60b6
DC
157 * page faults already hold the mmap_sem.
158 *
159 * Hence to serialise fully against both syscall and mmap based IO, we need to
65523218 160 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
653c60b6
DC
161 * taken in places where we need to invalidate the page cache in a race
162 * free manner (e.g. truncate, hole punch and other extent manipulation
163 * functions).
fa96acad
DC
164 */
165void
166xfs_ilock(
167 xfs_inode_t *ip,
168 uint lock_flags)
169{
170 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171
172 /*
173 * You can't set both SHARED and EXCL for the same lock,
174 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
175 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
176 */
177 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
178 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
653c60b6
DC
179 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
180 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
fa96acad
DC
181 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
182 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0952c818 183 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
fa96acad 184
65523218
CH
185 if (lock_flags & XFS_IOLOCK_EXCL) {
186 down_write_nested(&VFS_I(ip)->i_rwsem,
187 XFS_IOLOCK_DEP(lock_flags));
188 } else if (lock_flags & XFS_IOLOCK_SHARED) {
189 down_read_nested(&VFS_I(ip)->i_rwsem,
190 XFS_IOLOCK_DEP(lock_flags));
191 }
fa96acad 192
653c60b6
DC
193 if (lock_flags & XFS_MMAPLOCK_EXCL)
194 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
195 else if (lock_flags & XFS_MMAPLOCK_SHARED)
196 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
197
fa96acad
DC
198 if (lock_flags & XFS_ILOCK_EXCL)
199 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
200 else if (lock_flags & XFS_ILOCK_SHARED)
201 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
202}
203
204/*
205 * This is just like xfs_ilock(), except that the caller
206 * is guaranteed not to sleep. It returns 1 if it gets
207 * the requested locks and 0 otherwise. If the IO lock is
208 * obtained but the inode lock cannot be, then the IO lock
209 * is dropped before returning.
210 *
211 * ip -- the inode being locked
212 * lock_flags -- this parameter indicates the inode's locks to be
213 * to be locked. See the comment for xfs_ilock() for a list
214 * of valid values.
215 */
216int
217xfs_ilock_nowait(
218 xfs_inode_t *ip,
219 uint lock_flags)
220{
221 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
222
223 /*
224 * You can't set both SHARED and EXCL for the same lock,
225 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
226 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
227 */
228 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
229 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
653c60b6
DC
230 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
231 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
fa96acad
DC
232 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
233 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0952c818 234 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
fa96acad
DC
235
236 if (lock_flags & XFS_IOLOCK_EXCL) {
65523218 237 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
fa96acad
DC
238 goto out;
239 } else if (lock_flags & XFS_IOLOCK_SHARED) {
65523218 240 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
fa96acad
DC
241 goto out;
242 }
653c60b6
DC
243
244 if (lock_flags & XFS_MMAPLOCK_EXCL) {
245 if (!mrtryupdate(&ip->i_mmaplock))
246 goto out_undo_iolock;
247 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
248 if (!mrtryaccess(&ip->i_mmaplock))
249 goto out_undo_iolock;
250 }
251
fa96acad
DC
252 if (lock_flags & XFS_ILOCK_EXCL) {
253 if (!mrtryupdate(&ip->i_lock))
653c60b6 254 goto out_undo_mmaplock;
fa96acad
DC
255 } else if (lock_flags & XFS_ILOCK_SHARED) {
256 if (!mrtryaccess(&ip->i_lock))
653c60b6 257 goto out_undo_mmaplock;
fa96acad
DC
258 }
259 return 1;
260
653c60b6
DC
261out_undo_mmaplock:
262 if (lock_flags & XFS_MMAPLOCK_EXCL)
263 mrunlock_excl(&ip->i_mmaplock);
264 else if (lock_flags & XFS_MMAPLOCK_SHARED)
265 mrunlock_shared(&ip->i_mmaplock);
266out_undo_iolock:
fa96acad 267 if (lock_flags & XFS_IOLOCK_EXCL)
65523218 268 up_write(&VFS_I(ip)->i_rwsem);
fa96acad 269 else if (lock_flags & XFS_IOLOCK_SHARED)
65523218 270 up_read(&VFS_I(ip)->i_rwsem);
653c60b6 271out:
fa96acad
DC
272 return 0;
273}
274
275/*
276 * xfs_iunlock() is used to drop the inode locks acquired with
277 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
278 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
279 * that we know which locks to drop.
280 *
281 * ip -- the inode being unlocked
282 * lock_flags -- this parameter indicates the inode's locks to be
283 * to be unlocked. See the comment for xfs_ilock() for a list
284 * of valid values for this parameter.
285 *
286 */
287void
288xfs_iunlock(
289 xfs_inode_t *ip,
290 uint lock_flags)
291{
292 /*
293 * You can't set both SHARED and EXCL for the same lock,
294 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
295 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
296 */
297 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
298 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
653c60b6
DC
299 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
300 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
fa96acad
DC
301 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
302 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0952c818 303 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
fa96acad
DC
304 ASSERT(lock_flags != 0);
305
306 if (lock_flags & XFS_IOLOCK_EXCL)
65523218 307 up_write(&VFS_I(ip)->i_rwsem);
fa96acad 308 else if (lock_flags & XFS_IOLOCK_SHARED)
65523218 309 up_read(&VFS_I(ip)->i_rwsem);
fa96acad 310
653c60b6
DC
311 if (lock_flags & XFS_MMAPLOCK_EXCL)
312 mrunlock_excl(&ip->i_mmaplock);
313 else if (lock_flags & XFS_MMAPLOCK_SHARED)
314 mrunlock_shared(&ip->i_mmaplock);
315
fa96acad
DC
316 if (lock_flags & XFS_ILOCK_EXCL)
317 mrunlock_excl(&ip->i_lock);
318 else if (lock_flags & XFS_ILOCK_SHARED)
319 mrunlock_shared(&ip->i_lock);
320
321 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
322}
323
324/*
325 * give up write locks. the i/o lock cannot be held nested
326 * if it is being demoted.
327 */
328void
329xfs_ilock_demote(
330 xfs_inode_t *ip,
331 uint lock_flags)
332{
653c60b6
DC
333 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
334 ASSERT((lock_flags &
335 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
fa96acad
DC
336
337 if (lock_flags & XFS_ILOCK_EXCL)
338 mrdemote(&ip->i_lock);
653c60b6
DC
339 if (lock_flags & XFS_MMAPLOCK_EXCL)
340 mrdemote(&ip->i_mmaplock);
fa96acad 341 if (lock_flags & XFS_IOLOCK_EXCL)
65523218 342 downgrade_write(&VFS_I(ip)->i_rwsem);
fa96acad
DC
343
344 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
345}
346
742ae1e3 347#if defined(DEBUG) || defined(XFS_WARN)
fa96acad
DC
348int
349xfs_isilocked(
350 xfs_inode_t *ip,
351 uint lock_flags)
352{
353 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
354 if (!(lock_flags & XFS_ILOCK_SHARED))
355 return !!ip->i_lock.mr_writer;
356 return rwsem_is_locked(&ip->i_lock.mr_lock);
357 }
358
653c60b6
DC
359 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
360 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
361 return !!ip->i_mmaplock.mr_writer;
362 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
363 }
364
fa96acad
DC
365 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
366 if (!(lock_flags & XFS_IOLOCK_SHARED))
65523218
CH
367 return !debug_locks ||
368 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
369 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
fa96acad
DC
370 }
371
372 ASSERT(0);
373 return 0;
374}
375#endif
376
b6a9947e
DC
377/*
378 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
379 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
380 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
381 * errors and warnings.
382 */
383#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3403ccc0
DC
384static bool
385xfs_lockdep_subclass_ok(
386 int subclass)
387{
388 return subclass < MAX_LOCKDEP_SUBCLASSES;
389}
390#else
391#define xfs_lockdep_subclass_ok(subclass) (true)
392#endif
393
c24b5dfa 394/*
653c60b6 395 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
0952c818
DC
396 * value. This can be called for any type of inode lock combination, including
397 * parent locking. Care must be taken to ensure we don't overrun the subclass
398 * storage fields in the class mask we build.
c24b5dfa
DC
399 */
400static inline int
401xfs_lock_inumorder(int lock_mode, int subclass)
402{
0952c818
DC
403 int class = 0;
404
405 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
406 XFS_ILOCK_RTSUM)));
3403ccc0 407 ASSERT(xfs_lockdep_subclass_ok(subclass));
0952c818 408
653c60b6 409 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
0952c818 410 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
0952c818 411 class += subclass << XFS_IOLOCK_SHIFT;
653c60b6
DC
412 }
413
414 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
0952c818
DC
415 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
416 class += subclass << XFS_MMAPLOCK_SHIFT;
653c60b6
DC
417 }
418
0952c818
DC
419 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
420 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
421 class += subclass << XFS_ILOCK_SHIFT;
422 }
c24b5dfa 423
0952c818 424 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
c24b5dfa
DC
425}
426
427/*
95afcf5c
DC
428 * The following routine will lock n inodes in exclusive mode. We assume the
429 * caller calls us with the inodes in i_ino order.
c24b5dfa 430 *
95afcf5c
DC
431 * We need to detect deadlock where an inode that we lock is in the AIL and we
432 * start waiting for another inode that is locked by a thread in a long running
433 * transaction (such as truncate). This can result in deadlock since the long
434 * running trans might need to wait for the inode we just locked in order to
435 * push the tail and free space in the log.
0952c818
DC
436 *
437 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
438 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
439 * lock more than one at a time, lockdep will report false positives saying we
440 * have violated locking orders.
c24b5dfa 441 */
0d5a75e9 442static void
c24b5dfa
DC
443xfs_lock_inodes(
444 xfs_inode_t **ips,
445 int inodes,
446 uint lock_mode)
447{
448 int attempts = 0, i, j, try_lock;
449 xfs_log_item_t *lp;
450
0952c818
DC
451 /*
452 * Currently supports between 2 and 5 inodes with exclusive locking. We
453 * support an arbitrary depth of locking here, but absolute limits on
454 * inodes depend on the the type of locking and the limits placed by
455 * lockdep annotations in xfs_lock_inumorder. These are all checked by
456 * the asserts.
457 */
95afcf5c 458 ASSERT(ips && inodes >= 2 && inodes <= 5);
0952c818
DC
459 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
460 XFS_ILOCK_EXCL));
461 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
462 XFS_ILOCK_SHARED)));
0952c818
DC
463 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
464 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
465 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
466 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
467
468 if (lock_mode & XFS_IOLOCK_EXCL) {
469 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
470 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
471 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
c24b5dfa
DC
472
473 try_lock = 0;
474 i = 0;
c24b5dfa
DC
475again:
476 for (; i < inodes; i++) {
477 ASSERT(ips[i]);
478
95afcf5c 479 if (i && (ips[i] == ips[i - 1])) /* Already locked */
c24b5dfa
DC
480 continue;
481
482 /*
95afcf5c
DC
483 * If try_lock is not set yet, make sure all locked inodes are
484 * not in the AIL. If any are, set try_lock to be used later.
c24b5dfa 485 */
c24b5dfa
DC
486 if (!try_lock) {
487 for (j = (i - 1); j >= 0 && !try_lock; j--) {
488 lp = (xfs_log_item_t *)ips[j]->i_itemp;
22525c17 489 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
c24b5dfa 490 try_lock++;
c24b5dfa
DC
491 }
492 }
493
494 /*
495 * If any of the previous locks we have locked is in the AIL,
496 * we must TRY to get the second and subsequent locks. If
497 * we can't get any, we must release all we have
498 * and try again.
499 */
95afcf5c
DC
500 if (!try_lock) {
501 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
502 continue;
503 }
504
505 /* try_lock means we have an inode locked that is in the AIL. */
506 ASSERT(i != 0);
507 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
508 continue;
c24b5dfa 509
95afcf5c
DC
510 /*
511 * Unlock all previous guys and try again. xfs_iunlock will try
512 * to push the tail if the inode is in the AIL.
513 */
514 attempts++;
515 for (j = i - 1; j >= 0; j--) {
c24b5dfa 516 /*
95afcf5c
DC
517 * Check to see if we've already unlocked this one. Not
518 * the first one going back, and the inode ptr is the
519 * same.
c24b5dfa 520 */
95afcf5c
DC
521 if (j != (i - 1) && ips[j] == ips[j + 1])
522 continue;
c24b5dfa 523
95afcf5c
DC
524 xfs_iunlock(ips[j], lock_mode);
525 }
c24b5dfa 526
95afcf5c
DC
527 if ((attempts % 5) == 0) {
528 delay(1); /* Don't just spin the CPU */
c24b5dfa 529 }
95afcf5c
DC
530 i = 0;
531 try_lock = 0;
532 goto again;
c24b5dfa 533 }
c24b5dfa
DC
534}
535
536/*
653c60b6 537 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
7c2d238a
DW
538 * the mmaplock or the ilock, but not more than one type at a time. If we lock
539 * more than one at a time, lockdep will report false positives saying we have
540 * violated locking orders. The iolock must be double-locked separately since
541 * we use i_rwsem for that. We now support taking one lock EXCL and the other
542 * SHARED.
c24b5dfa
DC
543 */
544void
545xfs_lock_two_inodes(
7c2d238a
DW
546 struct xfs_inode *ip0,
547 uint ip0_mode,
548 struct xfs_inode *ip1,
549 uint ip1_mode)
c24b5dfa 550{
7c2d238a
DW
551 struct xfs_inode *temp;
552 uint mode_temp;
c24b5dfa
DC
553 int attempts = 0;
554 xfs_log_item_t *lp;
555
7c2d238a
DW
556 ASSERT(hweight32(ip0_mode) == 1);
557 ASSERT(hweight32(ip1_mode) == 1);
558 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
559 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
560 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
561 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
562 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
563 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
564 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
565 !(ip0_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) ||
567 !(ip1_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
653c60b6 568
c24b5dfa
DC
569 ASSERT(ip0->i_ino != ip1->i_ino);
570
571 if (ip0->i_ino > ip1->i_ino) {
572 temp = ip0;
573 ip0 = ip1;
574 ip1 = temp;
7c2d238a
DW
575 mode_temp = ip0_mode;
576 ip0_mode = ip1_mode;
577 ip1_mode = mode_temp;
c24b5dfa
DC
578 }
579
580 again:
7c2d238a 581 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
c24b5dfa
DC
582
583 /*
584 * If the first lock we have locked is in the AIL, we must TRY to get
585 * the second lock. If we can't get it, we must release the first one
586 * and try again.
587 */
588 lp = (xfs_log_item_t *)ip0->i_itemp;
22525c17 589 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
7c2d238a
DW
590 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
591 xfs_iunlock(ip0, ip0_mode);
c24b5dfa
DC
592 if ((++attempts % 5) == 0)
593 delay(1); /* Don't just spin the CPU */
594 goto again;
595 }
596 } else {
7c2d238a 597 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
c24b5dfa
DC
598 }
599}
600
fa96acad
DC
601void
602__xfs_iflock(
603 struct xfs_inode *ip)
604{
605 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
606 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
607
608 do {
21417136 609 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
fa96acad
DC
610 if (xfs_isiflocked(ip))
611 io_schedule();
612 } while (!xfs_iflock_nowait(ip));
613
21417136 614 finish_wait(wq, &wait.wq_entry);
fa96acad
DC
615}
616
1da177e4
LT
617STATIC uint
618_xfs_dic2xflags(
c8ce540d 619 uint16_t di_flags,
58f88ca2
DC
620 uint64_t di_flags2,
621 bool has_attr)
1da177e4
LT
622{
623 uint flags = 0;
624
625 if (di_flags & XFS_DIFLAG_ANY) {
626 if (di_flags & XFS_DIFLAG_REALTIME)
e7b89481 627 flags |= FS_XFLAG_REALTIME;
1da177e4 628 if (di_flags & XFS_DIFLAG_PREALLOC)
e7b89481 629 flags |= FS_XFLAG_PREALLOC;
1da177e4 630 if (di_flags & XFS_DIFLAG_IMMUTABLE)
e7b89481 631 flags |= FS_XFLAG_IMMUTABLE;
1da177e4 632 if (di_flags & XFS_DIFLAG_APPEND)
e7b89481 633 flags |= FS_XFLAG_APPEND;
1da177e4 634 if (di_flags & XFS_DIFLAG_SYNC)
e7b89481 635 flags |= FS_XFLAG_SYNC;
1da177e4 636 if (di_flags & XFS_DIFLAG_NOATIME)
e7b89481 637 flags |= FS_XFLAG_NOATIME;
1da177e4 638 if (di_flags & XFS_DIFLAG_NODUMP)
e7b89481 639 flags |= FS_XFLAG_NODUMP;
1da177e4 640 if (di_flags & XFS_DIFLAG_RTINHERIT)
e7b89481 641 flags |= FS_XFLAG_RTINHERIT;
1da177e4 642 if (di_flags & XFS_DIFLAG_PROJINHERIT)
e7b89481 643 flags |= FS_XFLAG_PROJINHERIT;
1da177e4 644 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
e7b89481 645 flags |= FS_XFLAG_NOSYMLINKS;
dd9f438e 646 if (di_flags & XFS_DIFLAG_EXTSIZE)
e7b89481 647 flags |= FS_XFLAG_EXTSIZE;
dd9f438e 648 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
e7b89481 649 flags |= FS_XFLAG_EXTSZINHERIT;
d3446eac 650 if (di_flags & XFS_DIFLAG_NODEFRAG)
e7b89481 651 flags |= FS_XFLAG_NODEFRAG;
2a82b8be 652 if (di_flags & XFS_DIFLAG_FILESTREAM)
e7b89481 653 flags |= FS_XFLAG_FILESTREAM;
1da177e4
LT
654 }
655
58f88ca2
DC
656 if (di_flags2 & XFS_DIFLAG2_ANY) {
657 if (di_flags2 & XFS_DIFLAG2_DAX)
658 flags |= FS_XFLAG_DAX;
f7ca3522
DW
659 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
660 flags |= FS_XFLAG_COWEXTSIZE;
58f88ca2
DC
661 }
662
663 if (has_attr)
664 flags |= FS_XFLAG_HASATTR;
665
1da177e4
LT
666 return flags;
667}
668
669uint
670xfs_ip2xflags(
58f88ca2 671 struct xfs_inode *ip)
1da177e4 672{
58f88ca2 673 struct xfs_icdinode *dic = &ip->i_d;
1da177e4 674
58f88ca2 675 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
1da177e4
LT
676}
677
c24b5dfa
DC
678/*
679 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
680 * is allowed, otherwise it has to be an exact match. If a CI match is found,
681 * ci_name->name will point to a the actual name (caller must free) or
682 * will be set to NULL if an exact match is found.
683 */
684int
685xfs_lookup(
686 xfs_inode_t *dp,
687 struct xfs_name *name,
688 xfs_inode_t **ipp,
689 struct xfs_name *ci_name)
690{
691 xfs_ino_t inum;
692 int error;
c24b5dfa
DC
693
694 trace_xfs_lookup(dp, name);
695
696 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
2451337d 697 return -EIO;
c24b5dfa 698
c24b5dfa 699 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
c24b5dfa 700 if (error)
dbad7c99 701 goto out_unlock;
c24b5dfa
DC
702
703 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
704 if (error)
705 goto out_free_name;
706
707 return 0;
708
709out_free_name:
710 if (ci_name)
711 kmem_free(ci_name->name);
dbad7c99 712out_unlock:
c24b5dfa
DC
713 *ipp = NULL;
714 return error;
715}
716
1da177e4
LT
717/*
718 * Allocate an inode on disk and return a copy of its in-core version.
719 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
720 * appropriately within the inode. The uid and gid for the inode are
721 * set according to the contents of the given cred structure.
722 *
723 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
cd856db6
CM
724 * has a free inode available, call xfs_iget() to obtain the in-core
725 * version of the allocated inode. Finally, fill in the inode and
726 * log its initial contents. In this case, ialloc_context would be
727 * set to NULL.
1da177e4 728 *
cd856db6
CM
729 * If xfs_dialloc() does not have an available inode, it will replenish
730 * its supply by doing an allocation. Since we can only do one
731 * allocation within a transaction without deadlocks, we must commit
732 * the current transaction before returning the inode itself.
733 * In this case, therefore, we will set ialloc_context and return.
1da177e4
LT
734 * The caller should then commit the current transaction, start a new
735 * transaction, and call xfs_ialloc() again to actually get the inode.
736 *
737 * To ensure that some other process does not grab the inode that
738 * was allocated during the first call to xfs_ialloc(), this routine
739 * also returns the [locked] bp pointing to the head of the freelist
740 * as ialloc_context. The caller should hold this buffer across
741 * the commit and pass it back into this routine on the second call.
b11f94d5
DC
742 *
743 * If we are allocating quota inodes, we do not have a parent inode
744 * to attach to or associate with (i.e. pip == NULL) because they
745 * are not linked into the directory structure - they are attached
746 * directly to the superblock - and so have no parent.
1da177e4 747 */
0d5a75e9 748static int
1da177e4
LT
749xfs_ialloc(
750 xfs_trans_t *tp,
751 xfs_inode_t *pip,
576b1d67 752 umode_t mode,
31b084ae 753 xfs_nlink_t nlink,
66f36464 754 dev_t rdev,
6743099c 755 prid_t prid,
1da177e4 756 xfs_buf_t **ialloc_context,
1da177e4
LT
757 xfs_inode_t **ipp)
758{
93848a99 759 struct xfs_mount *mp = tp->t_mountp;
1da177e4
LT
760 xfs_ino_t ino;
761 xfs_inode_t *ip;
1da177e4
LT
762 uint flags;
763 int error;
95582b00 764 struct timespec64 tv;
3987848c 765 struct inode *inode;
1da177e4
LT
766
767 /*
768 * Call the space management code to pick
769 * the on-disk inode to be allocated.
770 */
f59cf5c2 771 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
08358906 772 ialloc_context, &ino);
bf904248 773 if (error)
1da177e4 774 return error;
08358906 775 if (*ialloc_context || ino == NULLFSINO) {
1da177e4
LT
776 *ipp = NULL;
777 return 0;
778 }
779 ASSERT(*ialloc_context == NULL);
780
8b26984d
DC
781 /*
782 * Protect against obviously corrupt allocation btree records. Later
783 * xfs_iget checks will catch re-allocation of other active in-memory
784 * and on-disk inodes. If we don't catch reallocating the parent inode
785 * here we will deadlock in xfs_iget() so we have to do these checks
786 * first.
787 */
788 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
789 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
790 return -EFSCORRUPTED;
791 }
792
1da177e4
LT
793 /*
794 * Get the in-core inode with the lock held exclusively.
795 * This is because we're setting fields here we need
796 * to prevent others from looking at until we're done.
797 */
93848a99 798 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
ec3ba85f 799 XFS_ILOCK_EXCL, &ip);
bf904248 800 if (error)
1da177e4 801 return error;
1da177e4 802 ASSERT(ip != NULL);
3987848c 803 inode = VFS_I(ip);
1da177e4 804
263997a6
DC
805 /*
806 * We always convert v1 inodes to v2 now - we only support filesystems
807 * with >= v2 inode capability, so there is no reason for ever leaving
808 * an inode in v1 format.
809 */
810 if (ip->i_d.di_version == 1)
811 ip->i_d.di_version = 2;
812
c19b3b05 813 inode->i_mode = mode;
54d7b5c1 814 set_nlink(inode, nlink);
7aab1b28
DE
815 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
816 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
66f36464 817 inode->i_rdev = rdev;
6743099c 818 xfs_set_projid(ip, prid);
1da177e4 819
bd186aa9 820 if (pip && XFS_INHERIT_GID(pip)) {
1da177e4 821 ip->i_d.di_gid = pip->i_d.di_gid;
c19b3b05
DC
822 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
823 inode->i_mode |= S_ISGID;
1da177e4
LT
824 }
825
826 /*
827 * If the group ID of the new file does not match the effective group
828 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
829 * (and only if the irix_sgid_inherit compatibility variable is set).
830 */
831 if ((irix_sgid_inherit) &&
c19b3b05
DC
832 (inode->i_mode & S_ISGID) &&
833 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
834 inode->i_mode &= ~S_ISGID;
1da177e4
LT
835
836 ip->i_d.di_size = 0;
837 ip->i_d.di_nextents = 0;
838 ASSERT(ip->i_d.di_nblocks == 0);
dff35fd4 839
c2050a45 840 tv = current_time(inode);
3987848c
DC
841 inode->i_mtime = tv;
842 inode->i_atime = tv;
843 inode->i_ctime = tv;
dff35fd4 844
1da177e4
LT
845 ip->i_d.di_extsize = 0;
846 ip->i_d.di_dmevmask = 0;
847 ip->i_d.di_dmstate = 0;
848 ip->i_d.di_flags = 0;
93848a99
CH
849
850 if (ip->i_d.di_version == 3) {
f0e28280 851 inode_set_iversion(inode, 1);
93848a99 852 ip->i_d.di_flags2 = 0;
f7ca3522 853 ip->i_d.di_cowextsize = 0;
c8ce540d
DW
854 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
855 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
93848a99
CH
856 }
857
858
1da177e4
LT
859 flags = XFS_ILOG_CORE;
860 switch (mode & S_IFMT) {
861 case S_IFIFO:
862 case S_IFCHR:
863 case S_IFBLK:
864 case S_IFSOCK:
865 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1da177e4
LT
866 ip->i_df.if_flags = 0;
867 flags |= XFS_ILOG_DEV;
868 break;
869 case S_IFREG:
870 case S_IFDIR:
b11f94d5 871 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
58f88ca2 872 uint di_flags = 0;
365ca83d 873
abbede1b 874 if (S_ISDIR(mode)) {
365ca83d
NS
875 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
876 di_flags |= XFS_DIFLAG_RTINHERIT;
dd9f438e
NS
877 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
878 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
879 ip->i_d.di_extsize = pip->i_d.di_extsize;
880 }
9336e3a7
DC
881 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
882 di_flags |= XFS_DIFLAG_PROJINHERIT;
abbede1b 883 } else if (S_ISREG(mode)) {
613d7043 884 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
365ca83d 885 di_flags |= XFS_DIFLAG_REALTIME;
dd9f438e
NS
886 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
887 di_flags |= XFS_DIFLAG_EXTSIZE;
888 ip->i_d.di_extsize = pip->i_d.di_extsize;
889 }
1da177e4
LT
890 }
891 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
892 xfs_inherit_noatime)
365ca83d 893 di_flags |= XFS_DIFLAG_NOATIME;
1da177e4
LT
894 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
895 xfs_inherit_nodump)
365ca83d 896 di_flags |= XFS_DIFLAG_NODUMP;
1da177e4
LT
897 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
898 xfs_inherit_sync)
365ca83d 899 di_flags |= XFS_DIFLAG_SYNC;
1da177e4
LT
900 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
901 xfs_inherit_nosymlinks)
365ca83d 902 di_flags |= XFS_DIFLAG_NOSYMLINKS;
d3446eac
BN
903 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
904 xfs_inherit_nodefrag)
905 di_flags |= XFS_DIFLAG_NODEFRAG;
2a82b8be
DC
906 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
907 di_flags |= XFS_DIFLAG_FILESTREAM;
58f88ca2 908
365ca83d 909 ip->i_d.di_flags |= di_flags;
1da177e4 910 }
f7ca3522
DW
911 if (pip &&
912 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
913 pip->i_d.di_version == 3 &&
914 ip->i_d.di_version == 3) {
56bdf855
LC
915 uint64_t di_flags2 = 0;
916
f7ca3522 917 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
56bdf855 918 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
f7ca3522
DW
919 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
920 }
56bdf855
LC
921 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
922 di_flags2 |= XFS_DIFLAG2_DAX;
923
924 ip->i_d.di_flags2 |= di_flags2;
f7ca3522 925 }
1da177e4
LT
926 /* FALLTHROUGH */
927 case S_IFLNK:
928 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
929 ip->i_df.if_flags = XFS_IFEXTENTS;
fcacbc3f 930 ip->i_df.if_bytes = 0;
6bdcf26a 931 ip->i_df.if_u1.if_root = NULL;
1da177e4
LT
932 break;
933 default:
934 ASSERT(0);
935 }
936 /*
937 * Attribute fork settings for new inode.
938 */
939 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
940 ip->i_d.di_anextents = 0;
941
942 /*
943 * Log the new values stuffed into the inode.
944 */
ddc3415a 945 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1da177e4
LT
946 xfs_trans_log_inode(tp, ip, flags);
947
58c90473 948 /* now that we have an i_mode we can setup the inode structure */
41be8bed 949 xfs_setup_inode(ip);
1da177e4
LT
950
951 *ipp = ip;
952 return 0;
953}
954
e546cb79
DC
955/*
956 * Allocates a new inode from disk and return a pointer to the
957 * incore copy. This routine will internally commit the current
958 * transaction and allocate a new one if the Space Manager needed
959 * to do an allocation to replenish the inode free-list.
960 *
961 * This routine is designed to be called from xfs_create and
962 * xfs_create_dir.
963 *
964 */
965int
966xfs_dir_ialloc(
967 xfs_trans_t **tpp, /* input: current transaction;
968 output: may be a new transaction. */
969 xfs_inode_t *dp, /* directory within whose allocate
970 the inode. */
971 umode_t mode,
972 xfs_nlink_t nlink,
66f36464 973 dev_t rdev,
e546cb79 974 prid_t prid, /* project id */
c959025e 975 xfs_inode_t **ipp) /* pointer to inode; it will be
e546cb79 976 locked. */
e546cb79
DC
977{
978 xfs_trans_t *tp;
e546cb79
DC
979 xfs_inode_t *ip;
980 xfs_buf_t *ialloc_context = NULL;
981 int code;
e546cb79
DC
982 void *dqinfo;
983 uint tflags;
984
985 tp = *tpp;
986 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
987
988 /*
989 * xfs_ialloc will return a pointer to an incore inode if
990 * the Space Manager has an available inode on the free
991 * list. Otherwise, it will do an allocation and replenish
992 * the freelist. Since we can only do one allocation per
993 * transaction without deadlocks, we will need to commit the
994 * current transaction and start a new one. We will then
995 * need to call xfs_ialloc again to get the inode.
996 *
997 * If xfs_ialloc did an allocation to replenish the freelist,
998 * it returns the bp containing the head of the freelist as
999 * ialloc_context. We will hold a lock on it across the
1000 * transaction commit so that no other process can steal
1001 * the inode(s) that we've just allocated.
1002 */
f59cf5c2
CH
1003 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
1004 &ip);
e546cb79
DC
1005
1006 /*
1007 * Return an error if we were unable to allocate a new inode.
1008 * This should only happen if we run out of space on disk or
1009 * encounter a disk error.
1010 */
1011 if (code) {
1012 *ipp = NULL;
1013 return code;
1014 }
1015 if (!ialloc_context && !ip) {
1016 *ipp = NULL;
2451337d 1017 return -ENOSPC;
e546cb79
DC
1018 }
1019
1020 /*
1021 * If the AGI buffer is non-NULL, then we were unable to get an
1022 * inode in one operation. We need to commit the current
1023 * transaction and call xfs_ialloc() again. It is guaranteed
1024 * to succeed the second time.
1025 */
1026 if (ialloc_context) {
1027 /*
1028 * Normally, xfs_trans_commit releases all the locks.
1029 * We call bhold to hang on to the ialloc_context across
1030 * the commit. Holding this buffer prevents any other
1031 * processes from doing any allocations in this
1032 * allocation group.
1033 */
1034 xfs_trans_bhold(tp, ialloc_context);
e546cb79
DC
1035
1036 /*
1037 * We want the quota changes to be associated with the next
1038 * transaction, NOT this one. So, detach the dqinfo from this
1039 * and attach it to the next transaction.
1040 */
1041 dqinfo = NULL;
1042 tflags = 0;
1043 if (tp->t_dqinfo) {
1044 dqinfo = (void *)tp->t_dqinfo;
1045 tp->t_dqinfo = NULL;
1046 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1047 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1048 }
1049
411350df 1050 code = xfs_trans_roll(&tp);
3d3c8b52 1051
e546cb79
DC
1052 /*
1053 * Re-attach the quota info that we detached from prev trx.
1054 */
1055 if (dqinfo) {
1056 tp->t_dqinfo = dqinfo;
1057 tp->t_flags |= tflags;
1058 }
1059
1060 if (code) {
1061 xfs_buf_relse(ialloc_context);
2e6db6c4 1062 *tpp = tp;
e546cb79
DC
1063 *ipp = NULL;
1064 return code;
1065 }
1066 xfs_trans_bjoin(tp, ialloc_context);
1067
1068 /*
1069 * Call ialloc again. Since we've locked out all
1070 * other allocations in this allocation group,
1071 * this call should always succeed.
1072 */
1073 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
f59cf5c2 1074 &ialloc_context, &ip);
e546cb79
DC
1075
1076 /*
1077 * If we get an error at this point, return to the caller
1078 * so that the current transaction can be aborted.
1079 */
1080 if (code) {
1081 *tpp = tp;
1082 *ipp = NULL;
1083 return code;
1084 }
1085 ASSERT(!ialloc_context && ip);
1086
e546cb79
DC
1087 }
1088
1089 *ipp = ip;
1090 *tpp = tp;
1091
1092 return 0;
1093}
1094
1095/*
54d7b5c1
DC
1096 * Decrement the link count on an inode & log the change. If this causes the
1097 * link count to go to zero, move the inode to AGI unlinked list so that it can
1098 * be freed when the last active reference goes away via xfs_inactive().
e546cb79 1099 */
0d5a75e9 1100static int /* error */
e546cb79
DC
1101xfs_droplink(
1102 xfs_trans_t *tp,
1103 xfs_inode_t *ip)
1104{
e546cb79
DC
1105 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1106
e546cb79
DC
1107 drop_nlink(VFS_I(ip));
1108 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1109
54d7b5c1
DC
1110 if (VFS_I(ip)->i_nlink)
1111 return 0;
1112
1113 return xfs_iunlink(tp, ip);
e546cb79
DC
1114}
1115
e546cb79
DC
1116/*
1117 * Increment the link count on an inode & log the change.
1118 */
0d5a75e9 1119static int
e546cb79
DC
1120xfs_bumplink(
1121 xfs_trans_t *tp,
1122 xfs_inode_t *ip)
1123{
1124 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1125
263997a6 1126 ASSERT(ip->i_d.di_version > 1);
e546cb79 1127 inc_nlink(VFS_I(ip));
e546cb79
DC
1128 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1129 return 0;
1130}
1131
c24b5dfa
DC
1132int
1133xfs_create(
1134 xfs_inode_t *dp,
1135 struct xfs_name *name,
1136 umode_t mode,
66f36464 1137 dev_t rdev,
c24b5dfa
DC
1138 xfs_inode_t **ipp)
1139{
1140 int is_dir = S_ISDIR(mode);
1141 struct xfs_mount *mp = dp->i_mount;
1142 struct xfs_inode *ip = NULL;
1143 struct xfs_trans *tp = NULL;
1144 int error;
c24b5dfa 1145 bool unlock_dp_on_error = false;
c24b5dfa
DC
1146 prid_t prid;
1147 struct xfs_dquot *udqp = NULL;
1148 struct xfs_dquot *gdqp = NULL;
1149 struct xfs_dquot *pdqp = NULL;
062647a8 1150 struct xfs_trans_res *tres;
c24b5dfa 1151 uint resblks;
c24b5dfa
DC
1152
1153 trace_xfs_create(dp, name);
1154
1155 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 1156 return -EIO;
c24b5dfa 1157
163467d3 1158 prid = xfs_get_initial_prid(dp);
c24b5dfa
DC
1159
1160 /*
1161 * Make sure that we have allocated dquot(s) on disk.
1162 */
7aab1b28
DE
1163 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1164 xfs_kgid_to_gid(current_fsgid()), prid,
c24b5dfa
DC
1165 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1166 &udqp, &gdqp, &pdqp);
1167 if (error)
1168 return error;
1169
1170 if (is_dir) {
c24b5dfa 1171 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
062647a8 1172 tres = &M_RES(mp)->tr_mkdir;
c24b5dfa
DC
1173 } else {
1174 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
062647a8 1175 tres = &M_RES(mp)->tr_create;
c24b5dfa
DC
1176 }
1177
c24b5dfa
DC
1178 /*
1179 * Initially assume that the file does not exist and
1180 * reserve the resources for that case. If that is not
1181 * the case we'll drop the one we have and get a more
1182 * appropriate transaction later.
1183 */
253f4911 1184 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
2451337d 1185 if (error == -ENOSPC) {
c24b5dfa
DC
1186 /* flush outstanding delalloc blocks and retry */
1187 xfs_flush_inodes(mp);
253f4911 1188 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
c24b5dfa 1189 }
4906e215 1190 if (error)
253f4911 1191 goto out_release_inode;
c24b5dfa 1192
65523218 1193 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
c24b5dfa
DC
1194 unlock_dp_on_error = true;
1195
c24b5dfa
DC
1196 /*
1197 * Reserve disk quota and the inode.
1198 */
1199 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1200 pdqp, resblks, 1, 0);
1201 if (error)
1202 goto out_trans_cancel;
1203
c24b5dfa
DC
1204 /*
1205 * A newly created regular or special file just has one directory
1206 * entry pointing to them, but a directory also the "." entry
1207 * pointing to itself.
1208 */
c959025e 1209 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip);
d6077aa3 1210 if (error)
4906e215 1211 goto out_trans_cancel;
c24b5dfa
DC
1212
1213 /*
1214 * Now we join the directory inode to the transaction. We do not do it
1215 * earlier because xfs_dir_ialloc might commit the previous transaction
1216 * (and release all the locks). An error from here on will result in
1217 * the transaction cancel unlocking dp so don't do it explicitly in the
1218 * error path.
1219 */
65523218 1220 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1221 unlock_dp_on_error = false;
1222
381eee69 1223 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
c9cfdb38 1224 resblks ?
c24b5dfa
DC
1225 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1226 if (error) {
2451337d 1227 ASSERT(error != -ENOSPC);
4906e215 1228 goto out_trans_cancel;
c24b5dfa
DC
1229 }
1230 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1231 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1232
1233 if (is_dir) {
1234 error = xfs_dir_init(tp, ip, dp);
1235 if (error)
c8eac49e 1236 goto out_trans_cancel;
c24b5dfa
DC
1237
1238 error = xfs_bumplink(tp, dp);
1239 if (error)
c8eac49e 1240 goto out_trans_cancel;
c24b5dfa
DC
1241 }
1242
1243 /*
1244 * If this is a synchronous mount, make sure that the
1245 * create transaction goes to disk before returning to
1246 * the user.
1247 */
1248 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1249 xfs_trans_set_sync(tp);
1250
1251 /*
1252 * Attach the dquot(s) to the inodes and modify them incore.
1253 * These ids of the inode couldn't have changed since the new
1254 * inode has been locked ever since it was created.
1255 */
1256 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1257
70393313 1258 error = xfs_trans_commit(tp);
c24b5dfa
DC
1259 if (error)
1260 goto out_release_inode;
1261
1262 xfs_qm_dqrele(udqp);
1263 xfs_qm_dqrele(gdqp);
1264 xfs_qm_dqrele(pdqp);
1265
1266 *ipp = ip;
1267 return 0;
1268
c24b5dfa 1269 out_trans_cancel:
4906e215 1270 xfs_trans_cancel(tp);
c24b5dfa
DC
1271 out_release_inode:
1272 /*
58c90473
DC
1273 * Wait until after the current transaction is aborted to finish the
1274 * setup of the inode and release the inode. This prevents recursive
1275 * transactions and deadlocks from xfs_inactive.
c24b5dfa 1276 */
58c90473
DC
1277 if (ip) {
1278 xfs_finish_inode_setup(ip);
44a8736b 1279 xfs_irele(ip);
58c90473 1280 }
c24b5dfa
DC
1281
1282 xfs_qm_dqrele(udqp);
1283 xfs_qm_dqrele(gdqp);
1284 xfs_qm_dqrele(pdqp);
1285
1286 if (unlock_dp_on_error)
65523218 1287 xfs_iunlock(dp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1288 return error;
1289}
1290
99b6436b
ZYW
1291int
1292xfs_create_tmpfile(
1293 struct xfs_inode *dp,
330033d6
BF
1294 umode_t mode,
1295 struct xfs_inode **ipp)
99b6436b
ZYW
1296{
1297 struct xfs_mount *mp = dp->i_mount;
1298 struct xfs_inode *ip = NULL;
1299 struct xfs_trans *tp = NULL;
1300 int error;
99b6436b
ZYW
1301 prid_t prid;
1302 struct xfs_dquot *udqp = NULL;
1303 struct xfs_dquot *gdqp = NULL;
1304 struct xfs_dquot *pdqp = NULL;
1305 struct xfs_trans_res *tres;
1306 uint resblks;
1307
1308 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 1309 return -EIO;
99b6436b
ZYW
1310
1311 prid = xfs_get_initial_prid(dp);
1312
1313 /*
1314 * Make sure that we have allocated dquot(s) on disk.
1315 */
1316 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1317 xfs_kgid_to_gid(current_fsgid()), prid,
1318 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1319 &udqp, &gdqp, &pdqp);
1320 if (error)
1321 return error;
1322
1323 resblks = XFS_IALLOC_SPACE_RES(mp);
99b6436b 1324 tres = &M_RES(mp)->tr_create_tmpfile;
253f4911
CH
1325
1326 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
4906e215 1327 if (error)
253f4911 1328 goto out_release_inode;
99b6436b
ZYW
1329
1330 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1331 pdqp, resblks, 1, 0);
1332 if (error)
1333 goto out_trans_cancel;
1334
c959025e 1335 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip);
d6077aa3 1336 if (error)
4906e215 1337 goto out_trans_cancel;
99b6436b
ZYW
1338
1339 if (mp->m_flags & XFS_MOUNT_WSYNC)
1340 xfs_trans_set_sync(tp);
1341
1342 /*
1343 * Attach the dquot(s) to the inodes and modify them incore.
1344 * These ids of the inode couldn't have changed since the new
1345 * inode has been locked ever since it was created.
1346 */
1347 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1348
99b6436b
ZYW
1349 error = xfs_iunlink(tp, ip);
1350 if (error)
4906e215 1351 goto out_trans_cancel;
99b6436b 1352
70393313 1353 error = xfs_trans_commit(tp);
99b6436b
ZYW
1354 if (error)
1355 goto out_release_inode;
1356
1357 xfs_qm_dqrele(udqp);
1358 xfs_qm_dqrele(gdqp);
1359 xfs_qm_dqrele(pdqp);
1360
330033d6 1361 *ipp = ip;
99b6436b
ZYW
1362 return 0;
1363
99b6436b 1364 out_trans_cancel:
4906e215 1365 xfs_trans_cancel(tp);
99b6436b
ZYW
1366 out_release_inode:
1367 /*
58c90473
DC
1368 * Wait until after the current transaction is aborted to finish the
1369 * setup of the inode and release the inode. This prevents recursive
1370 * transactions and deadlocks from xfs_inactive.
99b6436b 1371 */
58c90473
DC
1372 if (ip) {
1373 xfs_finish_inode_setup(ip);
44a8736b 1374 xfs_irele(ip);
58c90473 1375 }
99b6436b
ZYW
1376
1377 xfs_qm_dqrele(udqp);
1378 xfs_qm_dqrele(gdqp);
1379 xfs_qm_dqrele(pdqp);
1380
1381 return error;
1382}
1383
c24b5dfa
DC
1384int
1385xfs_link(
1386 xfs_inode_t *tdp,
1387 xfs_inode_t *sip,
1388 struct xfs_name *target_name)
1389{
1390 xfs_mount_t *mp = tdp->i_mount;
1391 xfs_trans_t *tp;
1392 int error;
c24b5dfa
DC
1393 int resblks;
1394
1395 trace_xfs_link(tdp, target_name);
1396
c19b3b05 1397 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
c24b5dfa
DC
1398
1399 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 1400 return -EIO;
c24b5dfa 1401
c14cfcca 1402 error = xfs_qm_dqattach(sip);
c24b5dfa
DC
1403 if (error)
1404 goto std_return;
1405
c14cfcca 1406 error = xfs_qm_dqattach(tdp);
c24b5dfa
DC
1407 if (error)
1408 goto std_return;
1409
c24b5dfa 1410 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
253f4911 1411 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
2451337d 1412 if (error == -ENOSPC) {
c24b5dfa 1413 resblks = 0;
253f4911 1414 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
c24b5dfa 1415 }
4906e215 1416 if (error)
253f4911 1417 goto std_return;
c24b5dfa 1418
7c2d238a 1419 xfs_lock_two_inodes(sip, XFS_ILOCK_EXCL, tdp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1420
1421 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
65523218 1422 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1423
1424 /*
1425 * If we are using project inheritance, we only allow hard link
1426 * creation in our tree when the project IDs are the same; else
1427 * the tree quota mechanism could be circumvented.
1428 */
1429 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1430 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
2451337d 1431 error = -EXDEV;
c24b5dfa
DC
1432 goto error_return;
1433 }
1434
94f3cad5
ES
1435 if (!resblks) {
1436 error = xfs_dir_canenter(tp, tdp, target_name);
1437 if (error)
1438 goto error_return;
1439 }
c24b5dfa 1440
54d7b5c1
DC
1441 /*
1442 * Handle initial link state of O_TMPFILE inode
1443 */
1444 if (VFS_I(sip)->i_nlink == 0) {
ab297431
ZYW
1445 error = xfs_iunlink_remove(tp, sip);
1446 if (error)
4906e215 1447 goto error_return;
ab297431
ZYW
1448 }
1449
c24b5dfa 1450 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
381eee69 1451 resblks);
c24b5dfa 1452 if (error)
4906e215 1453 goto error_return;
c24b5dfa
DC
1454 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1455 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1456
1457 error = xfs_bumplink(tp, sip);
1458 if (error)
4906e215 1459 goto error_return;
c24b5dfa
DC
1460
1461 /*
1462 * If this is a synchronous mount, make sure that the
1463 * link transaction goes to disk before returning to
1464 * the user.
1465 */
f6106efa 1466 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
c24b5dfa 1467 xfs_trans_set_sync(tp);
c24b5dfa 1468
70393313 1469 return xfs_trans_commit(tp);
c24b5dfa 1470
c24b5dfa 1471 error_return:
4906e215 1472 xfs_trans_cancel(tp);
c24b5dfa
DC
1473 std_return:
1474 return error;
1475}
1476
363e59ba
DW
1477/* Clear the reflink flag and the cowblocks tag if possible. */
1478static void
1479xfs_itruncate_clear_reflink_flags(
1480 struct xfs_inode *ip)
1481{
1482 struct xfs_ifork *dfork;
1483 struct xfs_ifork *cfork;
1484
1485 if (!xfs_is_reflink_inode(ip))
1486 return;
1487 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1488 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1489 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1490 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1491 if (cfork->if_bytes == 0)
1492 xfs_inode_clear_cowblocks_tag(ip);
1493}
1494
1da177e4 1495/*
8f04c47a
CH
1496 * Free up the underlying blocks past new_size. The new size must be smaller
1497 * than the current size. This routine can be used both for the attribute and
1498 * data fork, and does not modify the inode size, which is left to the caller.
1da177e4 1499 *
f6485057
DC
1500 * The transaction passed to this routine must have made a permanent log
1501 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1502 * given transaction and start new ones, so make sure everything involved in
1503 * the transaction is tidy before calling here. Some transaction will be
1504 * returned to the caller to be committed. The incoming transaction must
1505 * already include the inode, and both inode locks must be held exclusively.
1506 * The inode must also be "held" within the transaction. On return the inode
1507 * will be "held" within the returned transaction. This routine does NOT
1508 * require any disk space to be reserved for it within the transaction.
1da177e4 1509 *
f6485057
DC
1510 * If we get an error, we must return with the inode locked and linked into the
1511 * current transaction. This keeps things simple for the higher level code,
1512 * because it always knows that the inode is locked and held in the transaction
1513 * that returns to it whether errors occur or not. We don't mark the inode
1514 * dirty on error so that transactions can be easily aborted if possible.
1da177e4
LT
1515 */
1516int
4e529339 1517xfs_itruncate_extents_flags(
8f04c47a
CH
1518 struct xfs_trans **tpp,
1519 struct xfs_inode *ip,
1520 int whichfork,
13b86fc3 1521 xfs_fsize_t new_size,
4e529339 1522 int flags)
1da177e4 1523{
8f04c47a
CH
1524 struct xfs_mount *mp = ip->i_mount;
1525 struct xfs_trans *tp = *tpp;
8f04c47a
CH
1526 xfs_fileoff_t first_unmap_block;
1527 xfs_fileoff_t last_block;
1528 xfs_filblks_t unmap_len;
8f04c47a
CH
1529 int error = 0;
1530 int done = 0;
1da177e4 1531
0b56185b
CH
1532 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1533 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1534 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ce7ae151 1535 ASSERT(new_size <= XFS_ISIZE(ip));
8f04c47a 1536 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1da177e4 1537 ASSERT(ip->i_itemp != NULL);
898621d5 1538 ASSERT(ip->i_itemp->ili_lock_flags == 0);
8f04c47a 1539 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1da177e4 1540
673e8e59
CH
1541 trace_xfs_itruncate_extents_start(ip, new_size);
1542
4e529339 1543 flags |= xfs_bmapi_aflag(whichfork);
13b86fc3 1544
1da177e4
LT
1545 /*
1546 * Since it is possible for space to become allocated beyond
1547 * the end of the file (in a crash where the space is allocated
1548 * but the inode size is not yet updated), simply remove any
1549 * blocks which show up between the new EOF and the maximum
1550 * possible file size. If the first block to be removed is
1551 * beyond the maximum file size (ie it is the same as last_block),
1552 * then there is nothing to do.
1553 */
8f04c47a 1554 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
32972383 1555 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
8f04c47a
CH
1556 if (first_unmap_block == last_block)
1557 return 0;
1558
1559 ASSERT(first_unmap_block < last_block);
1560 unmap_len = last_block - first_unmap_block + 1;
1da177e4 1561 while (!done) {
02dff7bf 1562 ASSERT(tp->t_firstblock == NULLFSBLOCK);
13b86fc3 1563 error = xfs_bunmapi(tp, ip, first_unmap_block, unmap_len, flags,
2af52842 1564 XFS_ITRUNC_MAX_EXTENTS, &done);
8f04c47a 1565 if (error)
d5a2e289 1566 goto out;
1da177e4
LT
1567
1568 /*
1569 * Duplicate the transaction that has the permanent
1570 * reservation and commit the old transaction.
1571 */
9e28a242 1572 error = xfs_defer_finish(&tp);
8f04c47a 1573 if (error)
9b1f4e98 1574 goto out;
1da177e4 1575
411350df 1576 error = xfs_trans_roll_inode(&tp, ip);
f6485057 1577 if (error)
8f04c47a 1578 goto out;
1da177e4 1579 }
8f04c47a 1580
4919d42a
DW
1581 if (whichfork == XFS_DATA_FORK) {
1582 /* Remove all pending CoW reservations. */
1583 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1584 first_unmap_block, last_block, true);
1585 if (error)
1586 goto out;
aa8968f2 1587
4919d42a
DW
1588 xfs_itruncate_clear_reflink_flags(ip);
1589 }
aa8968f2 1590
673e8e59
CH
1591 /*
1592 * Always re-log the inode so that our permanent transaction can keep
1593 * on rolling it forward in the log.
1594 */
1595 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1596
1597 trace_xfs_itruncate_extents_end(ip, new_size);
1598
8f04c47a
CH
1599out:
1600 *tpp = tp;
1601 return error;
8f04c47a
CH
1602}
1603
c24b5dfa
DC
1604int
1605xfs_release(
1606 xfs_inode_t *ip)
1607{
1608 xfs_mount_t *mp = ip->i_mount;
1609 int error;
1610
c19b3b05 1611 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
c24b5dfa
DC
1612 return 0;
1613
1614 /* If this is a read-only mount, don't do this (would generate I/O) */
1615 if (mp->m_flags & XFS_MOUNT_RDONLY)
1616 return 0;
1617
1618 if (!XFS_FORCED_SHUTDOWN(mp)) {
1619 int truncated;
1620
c24b5dfa
DC
1621 /*
1622 * If we previously truncated this file and removed old data
1623 * in the process, we want to initiate "early" writeout on
1624 * the last close. This is an attempt to combat the notorious
1625 * NULL files problem which is particularly noticeable from a
1626 * truncate down, buffered (re-)write (delalloc), followed by
1627 * a crash. What we are effectively doing here is
1628 * significantly reducing the time window where we'd otherwise
1629 * be exposed to that problem.
1630 */
1631 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1632 if (truncated) {
1633 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
eac152b4 1634 if (ip->i_delayed_blks > 0) {
2451337d 1635 error = filemap_flush(VFS_I(ip)->i_mapping);
c24b5dfa
DC
1636 if (error)
1637 return error;
1638 }
1639 }
1640 }
1641
54d7b5c1 1642 if (VFS_I(ip)->i_nlink == 0)
c24b5dfa
DC
1643 return 0;
1644
1645 if (xfs_can_free_eofblocks(ip, false)) {
1646
a36b9261
BF
1647 /*
1648 * Check if the inode is being opened, written and closed
1649 * frequently and we have delayed allocation blocks outstanding
1650 * (e.g. streaming writes from the NFS server), truncating the
1651 * blocks past EOF will cause fragmentation to occur.
1652 *
1653 * In this case don't do the truncation, but we have to be
1654 * careful how we detect this case. Blocks beyond EOF show up as
1655 * i_delayed_blks even when the inode is clean, so we need to
1656 * truncate them away first before checking for a dirty release.
1657 * Hence on the first dirty close we will still remove the
1658 * speculative allocation, but after that we will leave it in
1659 * place.
1660 */
1661 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1662 return 0;
c24b5dfa
DC
1663 /*
1664 * If we can't get the iolock just skip truncating the blocks
1665 * past EOF because we could deadlock with the mmap_sem
a36b9261 1666 * otherwise. We'll get another chance to drop them once the
c24b5dfa
DC
1667 * last reference to the inode is dropped, so we'll never leak
1668 * blocks permanently.
c24b5dfa 1669 */
a36b9261
BF
1670 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1671 error = xfs_free_eofblocks(ip);
1672 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1673 if (error)
1674 return error;
1675 }
c24b5dfa
DC
1676
1677 /* delalloc blocks after truncation means it really is dirty */
1678 if (ip->i_delayed_blks)
1679 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1680 }
1681 return 0;
1682}
1683
f7be2d7f
BF
1684/*
1685 * xfs_inactive_truncate
1686 *
1687 * Called to perform a truncate when an inode becomes unlinked.
1688 */
1689STATIC int
1690xfs_inactive_truncate(
1691 struct xfs_inode *ip)
1692{
1693 struct xfs_mount *mp = ip->i_mount;
1694 struct xfs_trans *tp;
1695 int error;
1696
253f4911 1697 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
f7be2d7f
BF
1698 if (error) {
1699 ASSERT(XFS_FORCED_SHUTDOWN(mp));
f7be2d7f
BF
1700 return error;
1701 }
f7be2d7f
BF
1702 xfs_ilock(ip, XFS_ILOCK_EXCL);
1703 xfs_trans_ijoin(tp, ip, 0);
1704
1705 /*
1706 * Log the inode size first to prevent stale data exposure in the event
1707 * of a system crash before the truncate completes. See the related
69bca807 1708 * comment in xfs_vn_setattr_size() for details.
f7be2d7f
BF
1709 */
1710 ip->i_d.di_size = 0;
1711 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1712
1713 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1714 if (error)
1715 goto error_trans_cancel;
1716
1717 ASSERT(ip->i_d.di_nextents == 0);
1718
70393313 1719 error = xfs_trans_commit(tp);
f7be2d7f
BF
1720 if (error)
1721 goto error_unlock;
1722
1723 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1724 return 0;
1725
1726error_trans_cancel:
4906e215 1727 xfs_trans_cancel(tp);
f7be2d7f
BF
1728error_unlock:
1729 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1730 return error;
1731}
1732
88877d2b
BF
1733/*
1734 * xfs_inactive_ifree()
1735 *
1736 * Perform the inode free when an inode is unlinked.
1737 */
1738STATIC int
1739xfs_inactive_ifree(
1740 struct xfs_inode *ip)
1741{
88877d2b
BF
1742 struct xfs_mount *mp = ip->i_mount;
1743 struct xfs_trans *tp;
1744 int error;
1745
9d43b180 1746 /*
76d771b4
CH
1747 * We try to use a per-AG reservation for any block needed by the finobt
1748 * tree, but as the finobt feature predates the per-AG reservation
1749 * support a degraded file system might not have enough space for the
1750 * reservation at mount time. In that case try to dip into the reserved
1751 * pool and pray.
9d43b180
BF
1752 *
1753 * Send a warning if the reservation does happen to fail, as the inode
1754 * now remains allocated and sits on the unlinked list until the fs is
1755 * repaired.
1756 */
76d771b4
CH
1757 if (unlikely(mp->m_inotbt_nores)) {
1758 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1759 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1760 &tp);
1761 } else {
1762 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1763 }
88877d2b 1764 if (error) {
2451337d 1765 if (error == -ENOSPC) {
9d43b180
BF
1766 xfs_warn_ratelimited(mp,
1767 "Failed to remove inode(s) from unlinked list. "
1768 "Please free space, unmount and run xfs_repair.");
1769 } else {
1770 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1771 }
88877d2b
BF
1772 return error;
1773 }
1774
1775 xfs_ilock(ip, XFS_ILOCK_EXCL);
1776 xfs_trans_ijoin(tp, ip, 0);
1777
0e0417f3 1778 error = xfs_ifree(tp, ip);
88877d2b
BF
1779 if (error) {
1780 /*
1781 * If we fail to free the inode, shut down. The cancel
1782 * might do that, we need to make sure. Otherwise the
1783 * inode might be lost for a long time or forever.
1784 */
1785 if (!XFS_FORCED_SHUTDOWN(mp)) {
1786 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1787 __func__, error);
1788 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1789 }
4906e215 1790 xfs_trans_cancel(tp);
88877d2b
BF
1791 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1792 return error;
1793 }
1794
1795 /*
1796 * Credit the quota account(s). The inode is gone.
1797 */
1798 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1799
1800 /*
d4a97a04
BF
1801 * Just ignore errors at this point. There is nothing we can do except
1802 * to try to keep going. Make sure it's not a silent error.
88877d2b 1803 */
70393313 1804 error = xfs_trans_commit(tp);
88877d2b
BF
1805 if (error)
1806 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1807 __func__, error);
1808
1809 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1810 return 0;
1811}
1812
c24b5dfa
DC
1813/*
1814 * xfs_inactive
1815 *
1816 * This is called when the vnode reference count for the vnode
1817 * goes to zero. If the file has been unlinked, then it must
1818 * now be truncated. Also, we clear all of the read-ahead state
1819 * kept for the inode here since the file is now closed.
1820 */
74564fb4 1821void
c24b5dfa
DC
1822xfs_inactive(
1823 xfs_inode_t *ip)
1824{
3d3c8b52 1825 struct xfs_mount *mp;
3d3c8b52
JL
1826 int error;
1827 int truncate = 0;
c24b5dfa
DC
1828
1829 /*
1830 * If the inode is already free, then there can be nothing
1831 * to clean up here.
1832 */
c19b3b05 1833 if (VFS_I(ip)->i_mode == 0) {
c24b5dfa 1834 ASSERT(ip->i_df.if_broot_bytes == 0);
74564fb4 1835 return;
c24b5dfa
DC
1836 }
1837
1838 mp = ip->i_mount;
17c12bcd 1839 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
c24b5dfa 1840
c24b5dfa
DC
1841 /* If this is a read-only mount, don't do this (would generate I/O) */
1842 if (mp->m_flags & XFS_MOUNT_RDONLY)
74564fb4 1843 return;
c24b5dfa 1844
6231848c 1845 /* Try to clean out the cow blocks if there are any. */
51d62690 1846 if (xfs_inode_has_cow_data(ip))
6231848c
DW
1847 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1848
54d7b5c1 1849 if (VFS_I(ip)->i_nlink != 0) {
c24b5dfa
DC
1850 /*
1851 * force is true because we are evicting an inode from the
1852 * cache. Post-eof blocks must be freed, lest we end up with
1853 * broken free space accounting.
3b4683c2
BF
1854 *
1855 * Note: don't bother with iolock here since lockdep complains
1856 * about acquiring it in reclaim context. We have the only
1857 * reference to the inode at this point anyways.
c24b5dfa 1858 */
3b4683c2 1859 if (xfs_can_free_eofblocks(ip, true))
a36b9261 1860 xfs_free_eofblocks(ip);
74564fb4
BF
1861
1862 return;
c24b5dfa
DC
1863 }
1864
c19b3b05 1865 if (S_ISREG(VFS_I(ip)->i_mode) &&
c24b5dfa
DC
1866 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1867 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1868 truncate = 1;
1869
c14cfcca 1870 error = xfs_qm_dqattach(ip);
c24b5dfa 1871 if (error)
74564fb4 1872 return;
c24b5dfa 1873
c19b3b05 1874 if (S_ISLNK(VFS_I(ip)->i_mode))
36b21dde 1875 error = xfs_inactive_symlink(ip);
f7be2d7f
BF
1876 else if (truncate)
1877 error = xfs_inactive_truncate(ip);
1878 if (error)
74564fb4 1879 return;
c24b5dfa
DC
1880
1881 /*
1882 * If there are attributes associated with the file then blow them away
1883 * now. The code calls a routine that recursively deconstructs the
6dfe5a04 1884 * attribute fork. If also blows away the in-core attribute fork.
c24b5dfa 1885 */
6dfe5a04 1886 if (XFS_IFORK_Q(ip)) {
c24b5dfa
DC
1887 error = xfs_attr_inactive(ip);
1888 if (error)
74564fb4 1889 return;
c24b5dfa
DC
1890 }
1891
6dfe5a04 1892 ASSERT(!ip->i_afp);
c24b5dfa 1893 ASSERT(ip->i_d.di_anextents == 0);
6dfe5a04 1894 ASSERT(ip->i_d.di_forkoff == 0);
c24b5dfa
DC
1895
1896 /*
1897 * Free the inode.
1898 */
88877d2b
BF
1899 error = xfs_inactive_ifree(ip);
1900 if (error)
74564fb4 1901 return;
c24b5dfa
DC
1902
1903 /*
1904 * Release the dquots held by inode, if any.
1905 */
1906 xfs_qm_dqdetach(ip);
c24b5dfa
DC
1907}
1908
9a4a5118
DW
1909/*
1910 * Point the AGI unlinked bucket at an inode and log the results. The caller
1911 * is responsible for validating the old value.
1912 */
1913STATIC int
1914xfs_iunlink_update_bucket(
1915 struct xfs_trans *tp,
1916 xfs_agnumber_t agno,
1917 struct xfs_buf *agibp,
1918 unsigned int bucket_index,
1919 xfs_agino_t new_agino)
1920{
1921 struct xfs_agi *agi = XFS_BUF_TO_AGI(agibp);
1922 xfs_agino_t old_value;
1923 int offset;
1924
1925 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, agno, new_agino));
1926
1927 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1928 trace_xfs_iunlink_update_bucket(tp->t_mountp, agno, bucket_index,
1929 old_value, new_agino);
1930
1931 /*
1932 * We should never find the head of the list already set to the value
1933 * passed in because either we're adding or removing ourselves from the
1934 * head of the list.
1935 */
1936 if (old_value == new_agino)
1937 return -EFSCORRUPTED;
1938
1939 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
1940 offset = offsetof(struct xfs_agi, agi_unlinked) +
1941 (sizeof(xfs_agino_t) * bucket_index);
1942 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
1943 return 0;
1944}
1945
f2fc16a3
DW
1946/* Set an on-disk inode's next_unlinked pointer. */
1947STATIC void
1948xfs_iunlink_update_dinode(
1949 struct xfs_trans *tp,
1950 xfs_agnumber_t agno,
1951 xfs_agino_t agino,
1952 struct xfs_buf *ibp,
1953 struct xfs_dinode *dip,
1954 struct xfs_imap *imap,
1955 xfs_agino_t next_agino)
1956{
1957 struct xfs_mount *mp = tp->t_mountp;
1958 int offset;
1959
1960 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
1961
1962 trace_xfs_iunlink_update_dinode(mp, agno, agino,
1963 be32_to_cpu(dip->di_next_unlinked), next_agino);
1964
1965 dip->di_next_unlinked = cpu_to_be32(next_agino);
1966 offset = imap->im_boffset +
1967 offsetof(struct xfs_dinode, di_next_unlinked);
1968
1969 /* need to recalc the inode CRC if appropriate */
1970 xfs_dinode_calc_crc(mp, dip);
1971 xfs_trans_inode_buf(tp, ibp);
1972 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
1973 xfs_inobp_check(mp, ibp);
1974}
1975
1976/* Set an in-core inode's unlinked pointer and return the old value. */
1977STATIC int
1978xfs_iunlink_update_inode(
1979 struct xfs_trans *tp,
1980 struct xfs_inode *ip,
1981 xfs_agnumber_t agno,
1982 xfs_agino_t next_agino,
1983 xfs_agino_t *old_next_agino)
1984{
1985 struct xfs_mount *mp = tp->t_mountp;
1986 struct xfs_dinode *dip;
1987 struct xfs_buf *ibp;
1988 xfs_agino_t old_value;
1989 int error;
1990
1991 ASSERT(xfs_verify_agino_or_null(mp, agno, next_agino));
1992
1993 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp, 0, 0);
1994 if (error)
1995 return error;
1996
1997 /* Make sure the old pointer isn't garbage. */
1998 old_value = be32_to_cpu(dip->di_next_unlinked);
1999 if (!xfs_verify_agino_or_null(mp, agno, old_value)) {
2000 error = -EFSCORRUPTED;
2001 goto out;
2002 }
2003
2004 /*
2005 * Since we're updating a linked list, we should never find that the
2006 * current pointer is the same as the new value, unless we're
2007 * terminating the list.
2008 */
2009 *old_next_agino = old_value;
2010 if (old_value == next_agino) {
2011 if (next_agino != NULLAGINO)
2012 error = -EFSCORRUPTED;
2013 goto out;
2014 }
2015
2016 /* Ok, update the new pointer. */
2017 xfs_iunlink_update_dinode(tp, agno, XFS_INO_TO_AGINO(mp, ip->i_ino),
2018 ibp, dip, &ip->i_imap, next_agino);
2019 return 0;
2020out:
2021 xfs_trans_brelse(tp, ibp);
2022 return error;
2023}
2024
1da177e4 2025/*
54d7b5c1
DC
2026 * This is called when the inode's link count goes to 0 or we are creating a
2027 * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
2028 * set to true as the link count is dropped to zero by the VFS after we've
2029 * created the file successfully, so we have to add it to the unlinked list
2030 * while the link count is non-zero.
2031 *
2032 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2033 * list when the inode is freed.
1da177e4 2034 */
54d7b5c1 2035STATIC int
1da177e4 2036xfs_iunlink(
5837f625
DW
2037 struct xfs_trans *tp,
2038 struct xfs_inode *ip)
1da177e4 2039{
5837f625
DW
2040 struct xfs_mount *mp = tp->t_mountp;
2041 struct xfs_agi *agi;
5837f625 2042 struct xfs_buf *agibp;
86bfd375 2043 xfs_agino_t next_agino;
5837f625
DW
2044 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2045 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2046 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
5837f625 2047 int error;
1da177e4 2048
c19b3b05 2049 ASSERT(VFS_I(ip)->i_mode != 0);
1da177e4 2050
5837f625
DW
2051 /* Get the agi buffer first. It ensures lock ordering on the list. */
2052 error = xfs_read_agi(mp, tp, agno, &agibp);
859d7182 2053 if (error)
1da177e4 2054 return error;
1da177e4 2055 agi = XFS_BUF_TO_AGI(agibp);
5e1be0fb 2056
1da177e4 2057 /*
86bfd375
DW
2058 * Get the index into the agi hash table for the list this inode will
2059 * go on. Make sure the pointer isn't garbage and that this inode
2060 * isn't already on the list.
1da177e4 2061 */
86bfd375
DW
2062 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2063 if (next_agino == agino ||
2064 !xfs_verify_agino_or_null(mp, agno, next_agino))
2065 return -EFSCORRUPTED;
1da177e4 2066
86bfd375 2067 if (next_agino != NULLAGINO) {
f2fc16a3
DW
2068 xfs_agino_t old_agino;
2069
1da177e4 2070 /*
f2fc16a3
DW
2071 * There is already another inode in the bucket, so point this
2072 * inode to the current head of the list.
1da177e4 2073 */
f2fc16a3
DW
2074 error = xfs_iunlink_update_inode(tp, ip, agno, next_agino,
2075 &old_agino);
c319b58b
VA
2076 if (error)
2077 return error;
f2fc16a3 2078 ASSERT(old_agino == NULLAGINO);
1da177e4
LT
2079 }
2080
9a4a5118
DW
2081 /* Point the head of the list to point to this inode. */
2082 return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, agino);
1da177e4
LT
2083}
2084
23ffa52c
DW
2085/* Return the imap, dinode pointer, and buffer for an inode. */
2086STATIC int
2087xfs_iunlink_map_ino(
2088 struct xfs_trans *tp,
2089 xfs_agnumber_t agno,
2090 xfs_agino_t agino,
2091 struct xfs_imap *imap,
2092 struct xfs_dinode **dipp,
2093 struct xfs_buf **bpp)
2094{
2095 struct xfs_mount *mp = tp->t_mountp;
2096 int error;
2097
2098 imap->im_blkno = 0;
2099 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2100 if (error) {
2101 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2102 __func__, error);
2103 return error;
2104 }
2105
2106 error = xfs_imap_to_bp(mp, tp, imap, dipp, bpp, 0, 0);
2107 if (error) {
2108 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2109 __func__, error);
2110 return error;
2111 }
2112
2113 return 0;
2114}
2115
2116/*
2117 * Walk the unlinked chain from @head_agino until we find the inode that
2118 * points to @target_agino. Return the inode number, map, dinode pointer,
2119 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2120 *
2121 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2122 * @agino, @imap, @dipp, and @bpp are all output parameters.
2123 *
2124 * Do not call this function if @target_agino is the head of the list.
2125 */
2126STATIC int
2127xfs_iunlink_map_prev(
2128 struct xfs_trans *tp,
2129 xfs_agnumber_t agno,
2130 xfs_agino_t head_agino,
2131 xfs_agino_t target_agino,
2132 xfs_agino_t *agino,
2133 struct xfs_imap *imap,
2134 struct xfs_dinode **dipp,
2135 struct xfs_buf **bpp)
2136{
2137 struct xfs_mount *mp = tp->t_mountp;
2138 xfs_agino_t next_agino;
2139 int error;
2140
2141 ASSERT(head_agino != target_agino);
2142 *bpp = NULL;
2143
2144 next_agino = head_agino;
2145 while (next_agino != target_agino) {
2146 xfs_agino_t unlinked_agino;
2147
2148 if (*bpp)
2149 xfs_trans_brelse(tp, *bpp);
2150
2151 *agino = next_agino;
2152 error = xfs_iunlink_map_ino(tp, agno, next_agino, imap, dipp,
2153 bpp);
2154 if (error)
2155 return error;
2156
2157 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2158 /*
2159 * Make sure this pointer is valid and isn't an obvious
2160 * infinite loop.
2161 */
2162 if (!xfs_verify_agino(mp, agno, unlinked_agino) ||
2163 next_agino == unlinked_agino) {
2164 XFS_CORRUPTION_ERROR(__func__,
2165 XFS_ERRLEVEL_LOW, mp,
2166 *dipp, sizeof(**dipp));
2167 error = -EFSCORRUPTED;
2168 return error;
2169 }
2170 next_agino = unlinked_agino;
2171 }
2172
2173 return 0;
2174}
2175
1da177e4
LT
2176/*
2177 * Pull the on-disk inode from the AGI unlinked list.
2178 */
2179STATIC int
2180xfs_iunlink_remove(
5837f625
DW
2181 struct xfs_trans *tp,
2182 struct xfs_inode *ip)
1da177e4 2183{
5837f625
DW
2184 struct xfs_mount *mp = tp->t_mountp;
2185 struct xfs_agi *agi;
5837f625 2186 struct xfs_buf *agibp;
5837f625
DW
2187 struct xfs_buf *last_ibp;
2188 struct xfs_dinode *last_dip = NULL;
5837f625
DW
2189 xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2190 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2191 xfs_agino_t next_agino;
2192 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
5837f625 2193 int error;
1da177e4 2194
5837f625 2195 /* Get the agi buffer first. It ensures lock ordering on the list. */
5e1be0fb
CH
2196 error = xfs_read_agi(mp, tp, agno, &agibp);
2197 if (error)
1da177e4 2198 return error;
1da177e4 2199 agi = XFS_BUF_TO_AGI(agibp);
5e1be0fb 2200
1da177e4 2201 /*
86bfd375
DW
2202 * Get the index into the agi hash table for the list this inode will
2203 * go on. Make sure the head pointer isn't garbage.
1da177e4 2204 */
86bfd375
DW
2205 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2206 if (!xfs_verify_agino(mp, agno, next_agino)) {
d2e73665
DW
2207 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2208 agi, sizeof(*agi));
2209 return -EFSCORRUPTED;
2210 }
1da177e4 2211
86bfd375 2212 if (next_agino == agino) {
1da177e4 2213 /*
475ee413
CH
2214 * We're at the head of the list. Get the inode's on-disk
2215 * buffer to see if there is anyone after us on the list.
1da177e4 2216 */
f2fc16a3
DW
2217 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO,
2218 &next_agino);
2219 if (error)
1da177e4 2220 return error;
9a4a5118
DW
2221
2222 /* Point the head of the list to the next unlinked inode. */
2223 error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index,
2224 next_agino);
2225 if (error)
2226 return error;
1da177e4 2227 } else {
f2fc16a3
DW
2228 struct xfs_imap imap;
2229 xfs_agino_t prev_agino;
2230
23ffa52c
DW
2231 /* We need to search the list for the inode being freed. */
2232 error = xfs_iunlink_map_prev(tp, agno, next_agino, agino,
2233 &prev_agino, &imap, &last_dip, &last_ibp);
2234 if (error)
2235 return error;
475ee413 2236
1da177e4 2237 /*
475ee413
CH
2238 * Now last_ibp points to the buffer previous to us on the
2239 * unlinked list. Pull us from the list.
1da177e4 2240 */
f2fc16a3
DW
2241 error = xfs_iunlink_update_inode(tp, ip, agno, NULLAGINO,
2242 &next_agino);
2243 if (error)
1da177e4 2244 return error;
0a32c26e 2245
f2fc16a3
DW
2246 /* Point the previous inode on the list to the next inode. */
2247 xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp,
2248 last_dip, &imap, next_agino);
1da177e4
LT
2249 }
2250 return 0;
2251}
2252
5b3eed75 2253/*
0b8182db 2254 * A big issue when freeing the inode cluster is that we _cannot_ skip any
5b3eed75
DC
2255 * inodes that are in memory - they all must be marked stale and attached to
2256 * the cluster buffer.
2257 */
2a30f36d 2258STATIC int
1da177e4 2259xfs_ifree_cluster(
09b56604
BF
2260 xfs_inode_t *free_ip,
2261 xfs_trans_t *tp,
2262 struct xfs_icluster *xic)
1da177e4
LT
2263{
2264 xfs_mount_t *mp = free_ip->i_mount;
1da177e4 2265 int nbufs;
5b257b4a 2266 int i, j;
3cdaa189 2267 int ioffset;
1da177e4
LT
2268 xfs_daddr_t blkno;
2269 xfs_buf_t *bp;
5b257b4a 2270 xfs_inode_t *ip;
1da177e4 2271 xfs_inode_log_item_t *iip;
643c8c05 2272 struct xfs_log_item *lip;
5017e97d 2273 struct xfs_perag *pag;
09b56604 2274 xfs_ino_t inum;
1da177e4 2275
09b56604 2276 inum = xic->first_ino;
5017e97d 2277 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
83dcdb44 2278 nbufs = mp->m_ialloc_blks / mp->m_blocks_per_cluster;
1da177e4 2279
83dcdb44 2280 for (j = 0; j < nbufs; j++, inum += mp->m_inodes_per_cluster) {
09b56604
BF
2281 /*
2282 * The allocation bitmap tells us which inodes of the chunk were
2283 * physically allocated. Skip the cluster if an inode falls into
2284 * a sparse region.
2285 */
3cdaa189
BF
2286 ioffset = inum - xic->first_ino;
2287 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
83dcdb44 2288 ASSERT(ioffset % mp->m_inodes_per_cluster == 0);
09b56604
BF
2289 continue;
2290 }
2291
1da177e4
LT
2292 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2293 XFS_INO_TO_AGBNO(mp, inum));
2294
5b257b4a
DC
2295 /*
2296 * We obtain and lock the backing buffer first in the process
2297 * here, as we have to ensure that any dirty inode that we
2298 * can't get the flush lock on is attached to the buffer.
2299 * If we scan the in-memory inodes first, then buffer IO can
2300 * complete before we get a lock on it, and hence we may fail
2301 * to mark all the active inodes on the buffer stale.
2302 */
2303 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
83dcdb44 2304 mp->m_bsize * mp->m_blocks_per_cluster,
b6aff29f 2305 XBF_UNMAPPED);
5b257b4a 2306
2a30f36d 2307 if (!bp)
2451337d 2308 return -ENOMEM;
b0f539de
DC
2309
2310 /*
2311 * This buffer may not have been correctly initialised as we
2312 * didn't read it from disk. That's not important because we are
2313 * only using to mark the buffer as stale in the log, and to
2314 * attach stale cached inodes on it. That means it will never be
2315 * dispatched for IO. If it is, we want to know about it, and we
2316 * want it to fail. We can acheive this by adding a write
2317 * verifier to the buffer.
2318 */
8c4ce794 2319 bp->b_ops = &xfs_inode_buf_ops;
b0f539de 2320
5b257b4a
DC
2321 /*
2322 * Walk the inodes already attached to the buffer and mark them
2323 * stale. These will all have the flush locks held, so an
5b3eed75
DC
2324 * in-memory inode walk can't lock them. By marking them all
2325 * stale first, we will not attempt to lock them in the loop
2326 * below as the XFS_ISTALE flag will be set.
5b257b4a 2327 */
643c8c05 2328 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
5b257b4a
DC
2329 if (lip->li_type == XFS_LI_INODE) {
2330 iip = (xfs_inode_log_item_t *)lip;
2331 ASSERT(iip->ili_logged == 1);
ca30b2a7 2332 lip->li_cb = xfs_istale_done;
5b257b4a
DC
2333 xfs_trans_ail_copy_lsn(mp->m_ail,
2334 &iip->ili_flush_lsn,
2335 &iip->ili_item.li_lsn);
2336 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
5b257b4a 2337 }
5b257b4a 2338 }
1da177e4 2339
5b3eed75 2340
1da177e4 2341 /*
5b257b4a
DC
2342 * For each inode in memory attempt to add it to the inode
2343 * buffer and set it up for being staled on buffer IO
2344 * completion. This is safe as we've locked out tail pushing
2345 * and flushing by locking the buffer.
1da177e4 2346 *
5b257b4a
DC
2347 * We have already marked every inode that was part of a
2348 * transaction stale above, which means there is no point in
2349 * even trying to lock them.
1da177e4 2350 */
83dcdb44 2351 for (i = 0; i < mp->m_inodes_per_cluster; i++) {
5b3eed75 2352retry:
1a3e8f3d 2353 rcu_read_lock();
da353b0d
DC
2354 ip = radix_tree_lookup(&pag->pag_ici_root,
2355 XFS_INO_TO_AGINO(mp, (inum + i)));
1da177e4 2356
1a3e8f3d
DC
2357 /* Inode not in memory, nothing to do */
2358 if (!ip) {
2359 rcu_read_unlock();
1da177e4
LT
2360 continue;
2361 }
2362
1a3e8f3d
DC
2363 /*
2364 * because this is an RCU protected lookup, we could
2365 * find a recently freed or even reallocated inode
2366 * during the lookup. We need to check under the
2367 * i_flags_lock for a valid inode here. Skip it if it
2368 * is not valid, the wrong inode or stale.
2369 */
2370 spin_lock(&ip->i_flags_lock);
2371 if (ip->i_ino != inum + i ||
2372 __xfs_iflags_test(ip, XFS_ISTALE)) {
2373 spin_unlock(&ip->i_flags_lock);
2374 rcu_read_unlock();
2375 continue;
2376 }
2377 spin_unlock(&ip->i_flags_lock);
2378
5b3eed75
DC
2379 /*
2380 * Don't try to lock/unlock the current inode, but we
2381 * _cannot_ skip the other inodes that we did not find
2382 * in the list attached to the buffer and are not
2383 * already marked stale. If we can't lock it, back off
2384 * and retry.
2385 */
f2e9ad21
OS
2386 if (ip != free_ip) {
2387 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2388 rcu_read_unlock();
2389 delay(1);
2390 goto retry;
2391 }
2392
2393 /*
2394 * Check the inode number again in case we're
2395 * racing with freeing in xfs_reclaim_inode().
2396 * See the comments in that function for more
2397 * information as to why the initial check is
2398 * not sufficient.
2399 */
2400 if (ip->i_ino != inum + i) {
2401 xfs_iunlock(ip, XFS_ILOCK_EXCL);
962cc1ad 2402 rcu_read_unlock();
f2e9ad21
OS
2403 continue;
2404 }
1da177e4 2405 }
1a3e8f3d 2406 rcu_read_unlock();
1da177e4 2407
5b3eed75 2408 xfs_iflock(ip);
5b257b4a 2409 xfs_iflags_set(ip, XFS_ISTALE);
1da177e4 2410
5b3eed75
DC
2411 /*
2412 * we don't need to attach clean inodes or those only
2413 * with unlogged changes (which we throw away, anyway).
2414 */
1da177e4 2415 iip = ip->i_itemp;
5b3eed75 2416 if (!iip || xfs_inode_clean(ip)) {
5b257b4a 2417 ASSERT(ip != free_ip);
1da177e4
LT
2418 xfs_ifunlock(ip);
2419 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2420 continue;
2421 }
2422
f5d8d5c4
CH
2423 iip->ili_last_fields = iip->ili_fields;
2424 iip->ili_fields = 0;
fc0561ce 2425 iip->ili_fsync_fields = 0;
1da177e4 2426 iip->ili_logged = 1;
7b2e2a31
DC
2427 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2428 &iip->ili_item.li_lsn);
1da177e4 2429
ca30b2a7
CH
2430 xfs_buf_attach_iodone(bp, xfs_istale_done,
2431 &iip->ili_item);
5b257b4a
DC
2432
2433 if (ip != free_ip)
1da177e4 2434 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1da177e4
LT
2435 }
2436
5b3eed75 2437 xfs_trans_stale_inode_buf(tp, bp);
1da177e4
LT
2438 xfs_trans_binval(tp, bp);
2439 }
2440
5017e97d 2441 xfs_perag_put(pag);
2a30f36d 2442 return 0;
1da177e4
LT
2443}
2444
98c4f78d
DW
2445/*
2446 * Free any local-format buffers sitting around before we reset to
2447 * extents format.
2448 */
2449static inline void
2450xfs_ifree_local_data(
2451 struct xfs_inode *ip,
2452 int whichfork)
2453{
2454 struct xfs_ifork *ifp;
2455
2456 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2457 return;
2458
2459 ifp = XFS_IFORK_PTR(ip, whichfork);
2460 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2461}
2462
1da177e4
LT
2463/*
2464 * This is called to return an inode to the inode free list.
2465 * The inode should already be truncated to 0 length and have
2466 * no pages associated with it. This routine also assumes that
2467 * the inode is already a part of the transaction.
2468 *
2469 * The on-disk copy of the inode will have been added to the list
2470 * of unlinked inodes in the AGI. We need to remove the inode from
2471 * that list atomically with respect to freeing it here.
2472 */
2473int
2474xfs_ifree(
0e0417f3
BF
2475 struct xfs_trans *tp,
2476 struct xfs_inode *ip)
1da177e4
LT
2477{
2478 int error;
09b56604 2479 struct xfs_icluster xic = { 0 };
1da177e4 2480
579aa9ca 2481 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
54d7b5c1 2482 ASSERT(VFS_I(ip)->i_nlink == 0);
1da177e4
LT
2483 ASSERT(ip->i_d.di_nextents == 0);
2484 ASSERT(ip->i_d.di_anextents == 0);
c19b3b05 2485 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
1da177e4
LT
2486 ASSERT(ip->i_d.di_nblocks == 0);
2487
2488 /*
2489 * Pull the on-disk inode from the AGI unlinked list.
2490 */
2491 error = xfs_iunlink_remove(tp, ip);
1baaed8f 2492 if (error)
1da177e4 2493 return error;
1da177e4 2494
0e0417f3 2495 error = xfs_difree(tp, ip->i_ino, &xic);
1baaed8f 2496 if (error)
1da177e4 2497 return error;
1baaed8f 2498
98c4f78d
DW
2499 xfs_ifree_local_data(ip, XFS_DATA_FORK);
2500 xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2501
c19b3b05 2502 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
1da177e4 2503 ip->i_d.di_flags = 0;
beaae8cd 2504 ip->i_d.di_flags2 = 0;
1da177e4
LT
2505 ip->i_d.di_dmevmask = 0;
2506 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
1da177e4
LT
2507 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2508 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
dc1baa71
ES
2509
2510 /* Don't attempt to replay owner changes for a deleted inode */
2511 ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER);
2512
1da177e4
LT
2513 /*
2514 * Bump the generation count so no one will be confused
2515 * by reincarnations of this inode.
2516 */
9e9a2674 2517 VFS_I(ip)->i_generation++;
1da177e4
LT
2518 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2519
09b56604
BF
2520 if (xic.deleted)
2521 error = xfs_ifree_cluster(ip, tp, &xic);
1da177e4 2522
2a30f36d 2523 return error;
1da177e4
LT
2524}
2525
1da177e4 2526/*
60ec6783
CH
2527 * This is called to unpin an inode. The caller must have the inode locked
2528 * in at least shared mode so that the buffer cannot be subsequently pinned
2529 * once someone is waiting for it to be unpinned.
1da177e4 2530 */
60ec6783 2531static void
f392e631 2532xfs_iunpin(
60ec6783 2533 struct xfs_inode *ip)
1da177e4 2534{
579aa9ca 2535 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1da177e4 2536
4aaf15d1
DC
2537 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2538
a3f74ffb 2539 /* Give the log a push to start the unpinning I/O */
656de4ff 2540 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0, NULL);
a14a348b 2541
a3f74ffb 2542}
1da177e4 2543
f392e631
CH
2544static void
2545__xfs_iunpin_wait(
2546 struct xfs_inode *ip)
2547{
2548 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2549 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2550
2551 xfs_iunpin(ip);
2552
2553 do {
21417136 2554 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
f392e631
CH
2555 if (xfs_ipincount(ip))
2556 io_schedule();
2557 } while (xfs_ipincount(ip));
21417136 2558 finish_wait(wq, &wait.wq_entry);
f392e631
CH
2559}
2560
777df5af 2561void
a3f74ffb 2562xfs_iunpin_wait(
60ec6783 2563 struct xfs_inode *ip)
a3f74ffb 2564{
f392e631
CH
2565 if (xfs_ipincount(ip))
2566 __xfs_iunpin_wait(ip);
1da177e4
LT
2567}
2568
27320369
DC
2569/*
2570 * Removing an inode from the namespace involves removing the directory entry
2571 * and dropping the link count on the inode. Removing the directory entry can
2572 * result in locking an AGF (directory blocks were freed) and removing a link
2573 * count can result in placing the inode on an unlinked list which results in
2574 * locking an AGI.
2575 *
2576 * The big problem here is that we have an ordering constraint on AGF and AGI
2577 * locking - inode allocation locks the AGI, then can allocate a new extent for
2578 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2579 * removes the inode from the unlinked list, requiring that we lock the AGI
2580 * first, and then freeing the inode can result in an inode chunk being freed
2581 * and hence freeing disk space requiring that we lock an AGF.
2582 *
2583 * Hence the ordering that is imposed by other parts of the code is AGI before
2584 * AGF. This means we cannot remove the directory entry before we drop the inode
2585 * reference count and put it on the unlinked list as this results in a lock
2586 * order of AGF then AGI, and this can deadlock against inode allocation and
2587 * freeing. Therefore we must drop the link counts before we remove the
2588 * directory entry.
2589 *
2590 * This is still safe from a transactional point of view - it is not until we
310a75a3 2591 * get to xfs_defer_finish() that we have the possibility of multiple
27320369
DC
2592 * transactions in this operation. Hence as long as we remove the directory
2593 * entry and drop the link count in the first transaction of the remove
2594 * operation, there are no transactional constraints on the ordering here.
2595 */
c24b5dfa
DC
2596int
2597xfs_remove(
2598 xfs_inode_t *dp,
2599 struct xfs_name *name,
2600 xfs_inode_t *ip)
2601{
2602 xfs_mount_t *mp = dp->i_mount;
2603 xfs_trans_t *tp = NULL;
c19b3b05 2604 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
c24b5dfa 2605 int error = 0;
c24b5dfa 2606 uint resblks;
c24b5dfa
DC
2607
2608 trace_xfs_remove(dp, name);
2609
2610 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 2611 return -EIO;
c24b5dfa 2612
c14cfcca 2613 error = xfs_qm_dqattach(dp);
c24b5dfa
DC
2614 if (error)
2615 goto std_return;
2616
c14cfcca 2617 error = xfs_qm_dqattach(ip);
c24b5dfa
DC
2618 if (error)
2619 goto std_return;
2620
c24b5dfa
DC
2621 /*
2622 * We try to get the real space reservation first,
2623 * allowing for directory btree deletion(s) implying
2624 * possible bmap insert(s). If we can't get the space
2625 * reservation then we use 0 instead, and avoid the bmap
2626 * btree insert(s) in the directory code by, if the bmap
2627 * insert tries to happen, instead trimming the LAST
2628 * block from the directory.
2629 */
2630 resblks = XFS_REMOVE_SPACE_RES(mp);
253f4911 2631 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2451337d 2632 if (error == -ENOSPC) {
c24b5dfa 2633 resblks = 0;
253f4911
CH
2634 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2635 &tp);
c24b5dfa
DC
2636 }
2637 if (error) {
2451337d 2638 ASSERT(error != -ENOSPC);
253f4911 2639 goto std_return;
c24b5dfa
DC
2640 }
2641
7c2d238a 2642 xfs_lock_two_inodes(dp, XFS_ILOCK_EXCL, ip, XFS_ILOCK_EXCL);
c24b5dfa 2643
65523218 2644 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
c24b5dfa
DC
2645 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2646
2647 /*
2648 * If we're removing a directory perform some additional validation.
2649 */
2650 if (is_dir) {
54d7b5c1
DC
2651 ASSERT(VFS_I(ip)->i_nlink >= 2);
2652 if (VFS_I(ip)->i_nlink != 2) {
2451337d 2653 error = -ENOTEMPTY;
c24b5dfa
DC
2654 goto out_trans_cancel;
2655 }
2656 if (!xfs_dir_isempty(ip)) {
2451337d 2657 error = -ENOTEMPTY;
c24b5dfa
DC
2658 goto out_trans_cancel;
2659 }
c24b5dfa 2660
27320369 2661 /* Drop the link from ip's "..". */
c24b5dfa
DC
2662 error = xfs_droplink(tp, dp);
2663 if (error)
27320369 2664 goto out_trans_cancel;
c24b5dfa 2665
27320369 2666 /* Drop the "." link from ip to self. */
c24b5dfa
DC
2667 error = xfs_droplink(tp, ip);
2668 if (error)
27320369 2669 goto out_trans_cancel;
c24b5dfa
DC
2670 } else {
2671 /*
2672 * When removing a non-directory we need to log the parent
2673 * inode here. For a directory this is done implicitly
2674 * by the xfs_droplink call for the ".." entry.
2675 */
2676 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2677 }
27320369 2678 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
c24b5dfa 2679
27320369 2680 /* Drop the link from dp to ip. */
c24b5dfa
DC
2681 error = xfs_droplink(tp, ip);
2682 if (error)
27320369 2683 goto out_trans_cancel;
c24b5dfa 2684
381eee69 2685 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
27320369 2686 if (error) {
2451337d 2687 ASSERT(error != -ENOENT);
c8eac49e 2688 goto out_trans_cancel;
27320369
DC
2689 }
2690
c24b5dfa
DC
2691 /*
2692 * If this is a synchronous mount, make sure that the
2693 * remove transaction goes to disk before returning to
2694 * the user.
2695 */
2696 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2697 xfs_trans_set_sync(tp);
2698
70393313 2699 error = xfs_trans_commit(tp);
c24b5dfa
DC
2700 if (error)
2701 goto std_return;
2702
2cd2ef6a 2703 if (is_dir && xfs_inode_is_filestream(ip))
c24b5dfa
DC
2704 xfs_filestream_deassociate(ip);
2705
2706 return 0;
2707
c24b5dfa 2708 out_trans_cancel:
4906e215 2709 xfs_trans_cancel(tp);
c24b5dfa
DC
2710 std_return:
2711 return error;
2712}
2713
f6bba201
DC
2714/*
2715 * Enter all inodes for a rename transaction into a sorted array.
2716 */
95afcf5c 2717#define __XFS_SORT_INODES 5
f6bba201
DC
2718STATIC void
2719xfs_sort_for_rename(
95afcf5c
DC
2720 struct xfs_inode *dp1, /* in: old (source) directory inode */
2721 struct xfs_inode *dp2, /* in: new (target) directory inode */
2722 struct xfs_inode *ip1, /* in: inode of old entry */
2723 struct xfs_inode *ip2, /* in: inode of new entry */
2724 struct xfs_inode *wip, /* in: whiteout inode */
2725 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2726 int *num_inodes) /* in/out: inodes in array */
f6bba201 2727{
f6bba201
DC
2728 int i, j;
2729
95afcf5c
DC
2730 ASSERT(*num_inodes == __XFS_SORT_INODES);
2731 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2732
f6bba201
DC
2733 /*
2734 * i_tab contains a list of pointers to inodes. We initialize
2735 * the table here & we'll sort it. We will then use it to
2736 * order the acquisition of the inode locks.
2737 *
2738 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2739 */
95afcf5c
DC
2740 i = 0;
2741 i_tab[i++] = dp1;
2742 i_tab[i++] = dp2;
2743 i_tab[i++] = ip1;
2744 if (ip2)
2745 i_tab[i++] = ip2;
2746 if (wip)
2747 i_tab[i++] = wip;
2748 *num_inodes = i;
f6bba201
DC
2749
2750 /*
2751 * Sort the elements via bubble sort. (Remember, there are at
95afcf5c 2752 * most 5 elements to sort, so this is adequate.)
f6bba201
DC
2753 */
2754 for (i = 0; i < *num_inodes; i++) {
2755 for (j = 1; j < *num_inodes; j++) {
2756 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
95afcf5c 2757 struct xfs_inode *temp = i_tab[j];
f6bba201
DC
2758 i_tab[j] = i_tab[j-1];
2759 i_tab[j-1] = temp;
2760 }
2761 }
2762 }
2763}
2764
310606b0
DC
2765static int
2766xfs_finish_rename(
c9cfdb38 2767 struct xfs_trans *tp)
310606b0 2768{
310606b0
DC
2769 /*
2770 * If this is a synchronous mount, make sure that the rename transaction
2771 * goes to disk before returning to the user.
2772 */
2773 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2774 xfs_trans_set_sync(tp);
2775
70393313 2776 return xfs_trans_commit(tp);
310606b0
DC
2777}
2778
d31a1825
CM
2779/*
2780 * xfs_cross_rename()
2781 *
2782 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2783 */
2784STATIC int
2785xfs_cross_rename(
2786 struct xfs_trans *tp,
2787 struct xfs_inode *dp1,
2788 struct xfs_name *name1,
2789 struct xfs_inode *ip1,
2790 struct xfs_inode *dp2,
2791 struct xfs_name *name2,
2792 struct xfs_inode *ip2,
d31a1825
CM
2793 int spaceres)
2794{
2795 int error = 0;
2796 int ip1_flags = 0;
2797 int ip2_flags = 0;
2798 int dp2_flags = 0;
2799
2800 /* Swap inode number for dirent in first parent */
381eee69 2801 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
d31a1825 2802 if (error)
eeacd321 2803 goto out_trans_abort;
d31a1825
CM
2804
2805 /* Swap inode number for dirent in second parent */
381eee69 2806 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
d31a1825 2807 if (error)
eeacd321 2808 goto out_trans_abort;
d31a1825
CM
2809
2810 /*
2811 * If we're renaming one or more directories across different parents,
2812 * update the respective ".." entries (and link counts) to match the new
2813 * parents.
2814 */
2815 if (dp1 != dp2) {
2816 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2817
c19b3b05 2818 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
d31a1825 2819 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
381eee69 2820 dp1->i_ino, spaceres);
d31a1825 2821 if (error)
eeacd321 2822 goto out_trans_abort;
d31a1825
CM
2823
2824 /* transfer ip2 ".." reference to dp1 */
c19b3b05 2825 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
d31a1825
CM
2826 error = xfs_droplink(tp, dp2);
2827 if (error)
eeacd321 2828 goto out_trans_abort;
d31a1825
CM
2829 error = xfs_bumplink(tp, dp1);
2830 if (error)
eeacd321 2831 goto out_trans_abort;
d31a1825
CM
2832 }
2833
2834 /*
2835 * Although ip1 isn't changed here, userspace needs
2836 * to be warned about the change, so that applications
2837 * relying on it (like backup ones), will properly
2838 * notify the change
2839 */
2840 ip1_flags |= XFS_ICHGTIME_CHG;
2841 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2842 }
2843
c19b3b05 2844 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
d31a1825 2845 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
381eee69 2846 dp2->i_ino, spaceres);
d31a1825 2847 if (error)
eeacd321 2848 goto out_trans_abort;
d31a1825
CM
2849
2850 /* transfer ip1 ".." reference to dp2 */
c19b3b05 2851 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
d31a1825
CM
2852 error = xfs_droplink(tp, dp1);
2853 if (error)
eeacd321 2854 goto out_trans_abort;
d31a1825
CM
2855 error = xfs_bumplink(tp, dp2);
2856 if (error)
eeacd321 2857 goto out_trans_abort;
d31a1825
CM
2858 }
2859
2860 /*
2861 * Although ip2 isn't changed here, userspace needs
2862 * to be warned about the change, so that applications
2863 * relying on it (like backup ones), will properly
2864 * notify the change
2865 */
2866 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2867 ip2_flags |= XFS_ICHGTIME_CHG;
2868 }
2869 }
2870
2871 if (ip1_flags) {
2872 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2873 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2874 }
2875 if (ip2_flags) {
2876 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2877 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2878 }
2879 if (dp2_flags) {
2880 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2881 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2882 }
2883 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2884 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
c9cfdb38 2885 return xfs_finish_rename(tp);
eeacd321
DC
2886
2887out_trans_abort:
4906e215 2888 xfs_trans_cancel(tp);
d31a1825
CM
2889 return error;
2890}
2891
7dcf5c3e
DC
2892/*
2893 * xfs_rename_alloc_whiteout()
2894 *
2895 * Return a referenced, unlinked, unlocked inode that that can be used as a
2896 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2897 * crash between allocating the inode and linking it into the rename transaction
2898 * recovery will free the inode and we won't leak it.
2899 */
2900static int
2901xfs_rename_alloc_whiteout(
2902 struct xfs_inode *dp,
2903 struct xfs_inode **wip)
2904{
2905 struct xfs_inode *tmpfile;
2906 int error;
2907
a1f69417 2908 error = xfs_create_tmpfile(dp, S_IFCHR | WHITEOUT_MODE, &tmpfile);
7dcf5c3e
DC
2909 if (error)
2910 return error;
2911
22419ac9
BF
2912 /*
2913 * Prepare the tmpfile inode as if it were created through the VFS.
2914 * Otherwise, the link increment paths will complain about nlink 0->1.
2915 * Drop the link count as done by d_tmpfile(), complete the inode setup
2916 * and flag it as linkable.
2917 */
2918 drop_nlink(VFS_I(tmpfile));
2b3d1d41 2919 xfs_setup_iops(tmpfile);
7dcf5c3e
DC
2920 xfs_finish_inode_setup(tmpfile);
2921 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2922
2923 *wip = tmpfile;
2924 return 0;
2925}
2926
f6bba201
DC
2927/*
2928 * xfs_rename
2929 */
2930int
2931xfs_rename(
7dcf5c3e
DC
2932 struct xfs_inode *src_dp,
2933 struct xfs_name *src_name,
2934 struct xfs_inode *src_ip,
2935 struct xfs_inode *target_dp,
2936 struct xfs_name *target_name,
2937 struct xfs_inode *target_ip,
2938 unsigned int flags)
f6bba201 2939{
7dcf5c3e
DC
2940 struct xfs_mount *mp = src_dp->i_mount;
2941 struct xfs_trans *tp;
7dcf5c3e
DC
2942 struct xfs_inode *wip = NULL; /* whiteout inode */
2943 struct xfs_inode *inodes[__XFS_SORT_INODES];
2944 int num_inodes = __XFS_SORT_INODES;
2b93681f 2945 bool new_parent = (src_dp != target_dp);
c19b3b05 2946 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
7dcf5c3e
DC
2947 int spaceres;
2948 int error;
f6bba201
DC
2949
2950 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2951
eeacd321
DC
2952 if ((flags & RENAME_EXCHANGE) && !target_ip)
2953 return -EINVAL;
2954
7dcf5c3e
DC
2955 /*
2956 * If we are doing a whiteout operation, allocate the whiteout inode
2957 * we will be placing at the target and ensure the type is set
2958 * appropriately.
2959 */
2960 if (flags & RENAME_WHITEOUT) {
2961 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
2962 error = xfs_rename_alloc_whiteout(target_dp, &wip);
2963 if (error)
2964 return error;
2965
2966 /* setup target dirent info as whiteout */
2967 src_name->type = XFS_DIR3_FT_CHRDEV;
2968 }
f6bba201 2969
7dcf5c3e 2970 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
f6bba201
DC
2971 inodes, &num_inodes);
2972
f6bba201 2973 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
253f4911 2974 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2451337d 2975 if (error == -ENOSPC) {
f6bba201 2976 spaceres = 0;
253f4911
CH
2977 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2978 &tp);
f6bba201 2979 }
445883e8 2980 if (error)
253f4911 2981 goto out_release_wip;
f6bba201
DC
2982
2983 /*
2984 * Attach the dquots to the inodes
2985 */
2986 error = xfs_qm_vop_rename_dqattach(inodes);
445883e8
DC
2987 if (error)
2988 goto out_trans_cancel;
f6bba201
DC
2989
2990 /*
2991 * Lock all the participating inodes. Depending upon whether
2992 * the target_name exists in the target directory, and
2993 * whether the target directory is the same as the source
2994 * directory, we can lock from 2 to 4 inodes.
2995 */
2996 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2997
2998 /*
2999 * Join all the inodes to the transaction. From this point on,
3000 * we can rely on either trans_commit or trans_cancel to unlock
3001 * them.
3002 */
65523218 3003 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
f6bba201 3004 if (new_parent)
65523218 3005 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
f6bba201
DC
3006 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3007 if (target_ip)
3008 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
7dcf5c3e
DC
3009 if (wip)
3010 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
f6bba201
DC
3011
3012 /*
3013 * If we are using project inheritance, we only allow renames
3014 * into our tree when the project IDs are the same; else the
3015 * tree quota mechanism would be circumvented.
3016 */
3017 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
3018 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
2451337d 3019 error = -EXDEV;
445883e8 3020 goto out_trans_cancel;
f6bba201
DC
3021 }
3022
eeacd321
DC
3023 /* RENAME_EXCHANGE is unique from here on. */
3024 if (flags & RENAME_EXCHANGE)
3025 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3026 target_dp, target_name, target_ip,
f16dea54 3027 spaceres);
d31a1825 3028
f6bba201
DC
3029 /*
3030 * Set up the target.
3031 */
3032 if (target_ip == NULL) {
3033 /*
3034 * If there's no space reservation, check the entry will
3035 * fit before actually inserting it.
3036 */
94f3cad5
ES
3037 if (!spaceres) {
3038 error = xfs_dir_canenter(tp, target_dp, target_name);
3039 if (error)
445883e8 3040 goto out_trans_cancel;
94f3cad5 3041 }
f6bba201
DC
3042 /*
3043 * If target does not exist and the rename crosses
3044 * directories, adjust the target directory link count
3045 * to account for the ".." reference from the new entry.
3046 */
3047 error = xfs_dir_createname(tp, target_dp, target_name,
381eee69 3048 src_ip->i_ino, spaceres);
f6bba201 3049 if (error)
c8eac49e 3050 goto out_trans_cancel;
f6bba201
DC
3051
3052 xfs_trans_ichgtime(tp, target_dp,
3053 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3054
3055 if (new_parent && src_is_directory) {
3056 error = xfs_bumplink(tp, target_dp);
3057 if (error)
c8eac49e 3058 goto out_trans_cancel;
f6bba201
DC
3059 }
3060 } else { /* target_ip != NULL */
3061 /*
3062 * If target exists and it's a directory, check that both
3063 * target and source are directories and that target can be
3064 * destroyed, or that neither is a directory.
3065 */
c19b3b05 3066 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
f6bba201
DC
3067 /*
3068 * Make sure target dir is empty.
3069 */
3070 if (!(xfs_dir_isempty(target_ip)) ||
54d7b5c1 3071 (VFS_I(target_ip)->i_nlink > 2)) {
2451337d 3072 error = -EEXIST;
445883e8 3073 goto out_trans_cancel;
f6bba201
DC
3074 }
3075 }
3076
3077 /*
3078 * Link the source inode under the target name.
3079 * If the source inode is a directory and we are moving
3080 * it across directories, its ".." entry will be
3081 * inconsistent until we replace that down below.
3082 *
3083 * In case there is already an entry with the same
3084 * name at the destination directory, remove it first.
3085 */
3086 error = xfs_dir_replace(tp, target_dp, target_name,
381eee69 3087 src_ip->i_ino, spaceres);
f6bba201 3088 if (error)
c8eac49e 3089 goto out_trans_cancel;
f6bba201
DC
3090
3091 xfs_trans_ichgtime(tp, target_dp,
3092 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3093
3094 /*
3095 * Decrement the link count on the target since the target
3096 * dir no longer points to it.
3097 */
3098 error = xfs_droplink(tp, target_ip);
3099 if (error)
c8eac49e 3100 goto out_trans_cancel;
f6bba201
DC
3101
3102 if (src_is_directory) {
3103 /*
3104 * Drop the link from the old "." entry.
3105 */
3106 error = xfs_droplink(tp, target_ip);
3107 if (error)
c8eac49e 3108 goto out_trans_cancel;
f6bba201
DC
3109 }
3110 } /* target_ip != NULL */
3111
3112 /*
3113 * Remove the source.
3114 */
3115 if (new_parent && src_is_directory) {
3116 /*
3117 * Rewrite the ".." entry to point to the new
3118 * directory.
3119 */
3120 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
381eee69 3121 target_dp->i_ino, spaceres);
2451337d 3122 ASSERT(error != -EEXIST);
f6bba201 3123 if (error)
c8eac49e 3124 goto out_trans_cancel;
f6bba201
DC
3125 }
3126
3127 /*
3128 * We always want to hit the ctime on the source inode.
3129 *
3130 * This isn't strictly required by the standards since the source
3131 * inode isn't really being changed, but old unix file systems did
3132 * it and some incremental backup programs won't work without it.
3133 */
3134 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3135 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3136
3137 /*
3138 * Adjust the link count on src_dp. This is necessary when
3139 * renaming a directory, either within one parent when
3140 * the target existed, or across two parent directories.
3141 */
3142 if (src_is_directory && (new_parent || target_ip != NULL)) {
3143
3144 /*
3145 * Decrement link count on src_directory since the
3146 * entry that's moved no longer points to it.
3147 */
3148 error = xfs_droplink(tp, src_dp);
3149 if (error)
c8eac49e 3150 goto out_trans_cancel;
f6bba201
DC
3151 }
3152
7dcf5c3e
DC
3153 /*
3154 * For whiteouts, we only need to update the source dirent with the
3155 * inode number of the whiteout inode rather than removing it
3156 * altogether.
3157 */
3158 if (wip) {
3159 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
381eee69 3160 spaceres);
7dcf5c3e
DC
3161 } else
3162 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
381eee69 3163 spaceres);
f6bba201 3164 if (error)
c8eac49e 3165 goto out_trans_cancel;
f6bba201
DC
3166
3167 /*
7dcf5c3e
DC
3168 * For whiteouts, we need to bump the link count on the whiteout inode.
3169 * This means that failures all the way up to this point leave the inode
3170 * on the unlinked list and so cleanup is a simple matter of dropping
3171 * the remaining reference to it. If we fail here after bumping the link
3172 * count, we're shutting down the filesystem so we'll never see the
3173 * intermediate state on disk.
f6bba201 3174 */
7dcf5c3e 3175 if (wip) {
54d7b5c1 3176 ASSERT(VFS_I(wip)->i_nlink == 0);
7dcf5c3e
DC
3177 error = xfs_bumplink(tp, wip);
3178 if (error)
c8eac49e 3179 goto out_trans_cancel;
7dcf5c3e
DC
3180 error = xfs_iunlink_remove(tp, wip);
3181 if (error)
c8eac49e 3182 goto out_trans_cancel;
7dcf5c3e 3183 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
f6bba201 3184
7dcf5c3e
DC
3185 /*
3186 * Now we have a real link, clear the "I'm a tmpfile" state
3187 * flag from the inode so it doesn't accidentally get misused in
3188 * future.
3189 */
3190 VFS_I(wip)->i_state &= ~I_LINKABLE;
f6bba201
DC
3191 }
3192
f6bba201
DC
3193 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3194 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3195 if (new_parent)
3196 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
f6bba201 3197
c9cfdb38 3198 error = xfs_finish_rename(tp);
7dcf5c3e 3199 if (wip)
44a8736b 3200 xfs_irele(wip);
7dcf5c3e 3201 return error;
f6bba201 3202
445883e8 3203out_trans_cancel:
4906e215 3204 xfs_trans_cancel(tp);
253f4911 3205out_release_wip:
7dcf5c3e 3206 if (wip)
44a8736b 3207 xfs_irele(wip);
f6bba201
DC
3208 return error;
3209}
3210
5c4d97d0
DC
3211STATIC int
3212xfs_iflush_cluster(
19429363
DC
3213 struct xfs_inode *ip,
3214 struct xfs_buf *bp)
1da177e4 3215{
19429363 3216 struct xfs_mount *mp = ip->i_mount;
5c4d97d0
DC
3217 struct xfs_perag *pag;
3218 unsigned long first_index, mask;
3219 unsigned long inodes_per_cluster;
19429363
DC
3220 int cilist_size;
3221 struct xfs_inode **cilist;
3222 struct xfs_inode *cip;
5c4d97d0
DC
3223 int nr_found;
3224 int clcount = 0;
1da177e4 3225 int i;
1da177e4 3226
5c4d97d0 3227 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1da177e4 3228
0f49efd8 3229 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
19429363
DC
3230 cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3231 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3232 if (!cilist)
5c4d97d0 3233 goto out_put;
1da177e4 3234
0f49efd8 3235 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
5c4d97d0
DC
3236 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3237 rcu_read_lock();
3238 /* really need a gang lookup range call here */
19429363 3239 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
5c4d97d0
DC
3240 first_index, inodes_per_cluster);
3241 if (nr_found == 0)
3242 goto out_free;
3243
3244 for (i = 0; i < nr_found; i++) {
19429363
DC
3245 cip = cilist[i];
3246 if (cip == ip)
bad55843 3247 continue;
1a3e8f3d
DC
3248
3249 /*
3250 * because this is an RCU protected lookup, we could find a
3251 * recently freed or even reallocated inode during the lookup.
3252 * We need to check under the i_flags_lock for a valid inode
3253 * here. Skip it if it is not valid or the wrong inode.
3254 */
19429363
DC
3255 spin_lock(&cip->i_flags_lock);
3256 if (!cip->i_ino ||
3257 __xfs_iflags_test(cip, XFS_ISTALE)) {
3258 spin_unlock(&cip->i_flags_lock);
1a3e8f3d
DC
3259 continue;
3260 }
5a90e53e
DC
3261
3262 /*
3263 * Once we fall off the end of the cluster, no point checking
3264 * any more inodes in the list because they will also all be
3265 * outside the cluster.
3266 */
19429363
DC
3267 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3268 spin_unlock(&cip->i_flags_lock);
5a90e53e
DC
3269 break;
3270 }
19429363 3271 spin_unlock(&cip->i_flags_lock);
1a3e8f3d 3272
bad55843
DC
3273 /*
3274 * Do an un-protected check to see if the inode is dirty and
3275 * is a candidate for flushing. These checks will be repeated
3276 * later after the appropriate locks are acquired.
3277 */
19429363 3278 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
bad55843 3279 continue;
bad55843
DC
3280
3281 /*
3282 * Try to get locks. If any are unavailable or it is pinned,
3283 * then this inode cannot be flushed and is skipped.
3284 */
3285
19429363 3286 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
bad55843 3287 continue;
19429363
DC
3288 if (!xfs_iflock_nowait(cip)) {
3289 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3290 continue;
3291 }
19429363
DC
3292 if (xfs_ipincount(cip)) {
3293 xfs_ifunlock(cip);
3294 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3295 continue;
3296 }
3297
8a17d7dd
DC
3298
3299 /*
3300 * Check the inode number again, just to be certain we are not
3301 * racing with freeing in xfs_reclaim_inode(). See the comments
3302 * in that function for more information as to why the initial
3303 * check is not sufficient.
3304 */
19429363
DC
3305 if (!cip->i_ino) {
3306 xfs_ifunlock(cip);
3307 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3308 continue;
3309 }
3310
3311 /*
3312 * arriving here means that this inode can be flushed. First
3313 * re-check that it's dirty before flushing.
3314 */
19429363 3315 if (!xfs_inode_clean(cip)) {
33540408 3316 int error;
19429363 3317 error = xfs_iflush_int(cip, bp);
bad55843 3318 if (error) {
19429363 3319 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3320 goto cluster_corrupt_out;
3321 }
3322 clcount++;
3323 } else {
19429363 3324 xfs_ifunlock(cip);
bad55843 3325 }
19429363 3326 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3327 }
3328
3329 if (clcount) {
ff6d6af2
BD
3330 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3331 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
bad55843
DC
3332 }
3333
3334out_free:
1a3e8f3d 3335 rcu_read_unlock();
19429363 3336 kmem_free(cilist);
44b56e0a
DC
3337out_put:
3338 xfs_perag_put(pag);
bad55843
DC
3339 return 0;
3340
3341
3342cluster_corrupt_out:
3343 /*
3344 * Corruption detected in the clustering loop. Invalidate the
3345 * inode buffer and shut down the filesystem.
3346 */
1a3e8f3d 3347 rcu_read_unlock();
bad55843
DC
3348 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3349
bad55843 3350 /*
e53946db
DC
3351 * We'll always have an inode attached to the buffer for completion
3352 * process by the time we are called from xfs_iflush(). Hence we have
3353 * always need to do IO completion processing to abort the inodes
3354 * attached to the buffer. handle them just like the shutdown case in
3355 * xfs_buf_submit().
bad55843 3356 */
e53946db
DC
3357 ASSERT(bp->b_iodone);
3358 bp->b_flags &= ~XBF_DONE;
3359 xfs_buf_stale(bp);
3360 xfs_buf_ioerror(bp, -EIO);
3361 xfs_buf_ioend(bp);
3362
3363 /* abort the corrupt inode, as it was not attached to the buffer */
19429363
DC
3364 xfs_iflush_abort(cip, false);
3365 kmem_free(cilist);
44b56e0a 3366 xfs_perag_put(pag);
2451337d 3367 return -EFSCORRUPTED;
bad55843
DC
3368}
3369
1da177e4 3370/*
4c46819a
CH
3371 * Flush dirty inode metadata into the backing buffer.
3372 *
3373 * The caller must have the inode lock and the inode flush lock held. The
3374 * inode lock will still be held upon return to the caller, and the inode
3375 * flush lock will be released after the inode has reached the disk.
3376 *
3377 * The caller must write out the buffer returned in *bpp and release it.
1da177e4
LT
3378 */
3379int
3380xfs_iflush(
4c46819a
CH
3381 struct xfs_inode *ip,
3382 struct xfs_buf **bpp)
1da177e4 3383{
4c46819a 3384 struct xfs_mount *mp = ip->i_mount;
b1438f47 3385 struct xfs_buf *bp = NULL;
4c46819a 3386 struct xfs_dinode *dip;
1da177e4 3387 int error;
1da177e4 3388
ff6d6af2 3389 XFS_STATS_INC(mp, xs_iflush_count);
1da177e4 3390
579aa9ca 3391 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
474fce06 3392 ASSERT(xfs_isiflocked(ip));
1da177e4 3393 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
8096b1eb 3394 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
1da177e4 3395
4c46819a 3396 *bpp = NULL;
1da177e4 3397
1da177e4
LT
3398 xfs_iunpin_wait(ip);
3399
4b6a4688
DC
3400 /*
3401 * For stale inodes we cannot rely on the backing buffer remaining
3402 * stale in cache for the remaining life of the stale inode and so
475ee413 3403 * xfs_imap_to_bp() below may give us a buffer that no longer contains
4b6a4688
DC
3404 * inodes below. We have to check this after ensuring the inode is
3405 * unpinned so that it is safe to reclaim the stale inode after the
3406 * flush call.
3407 */
3408 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3409 xfs_ifunlock(ip);
3410 return 0;
3411 }
3412
1da177e4
LT
3413 /*
3414 * This may have been unpinned because the filesystem is shutting
3415 * down forcibly. If that's the case we must not write this inode
32ce90a4
CH
3416 * to disk, because the log record didn't make it to disk.
3417 *
3418 * We also have to remove the log item from the AIL in this case,
3419 * as we wait for an empty AIL as part of the unmount process.
1da177e4
LT
3420 */
3421 if (XFS_FORCED_SHUTDOWN(mp)) {
2451337d 3422 error = -EIO;
32ce90a4 3423 goto abort_out;
1da177e4
LT
3424 }
3425
a3f74ffb 3426 /*
b1438f47
DC
3427 * Get the buffer containing the on-disk inode. We are doing a try-lock
3428 * operation here, so we may get an EAGAIN error. In that case, we
3429 * simply want to return with the inode still dirty.
3430 *
3431 * If we get any other error, we effectively have a corruption situation
3432 * and we cannot flush the inode, so we treat it the same as failing
3433 * xfs_iflush_int().
a3f74ffb 3434 */
475ee413
CH
3435 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3436 0);
b1438f47 3437 if (error == -EAGAIN) {
a3f74ffb
DC
3438 xfs_ifunlock(ip);
3439 return error;
3440 }
b1438f47
DC
3441 if (error)
3442 goto corrupt_out;
a3f74ffb 3443
1da177e4
LT
3444 /*
3445 * First flush out the inode that xfs_iflush was called with.
3446 */
3447 error = xfs_iflush_int(ip, bp);
bad55843 3448 if (error)
1da177e4 3449 goto corrupt_out;
1da177e4 3450
a3f74ffb
DC
3451 /*
3452 * If the buffer is pinned then push on the log now so we won't
3453 * get stuck waiting in the write for too long.
3454 */
811e64c7 3455 if (xfs_buf_ispinned(bp))
a14a348b 3456 xfs_log_force(mp, 0);
a3f74ffb 3457
1da177e4 3458 /*
e53946db
DC
3459 * inode clustering: try to gather other inodes into this write
3460 *
3461 * Note: Any error during clustering will result in the filesystem
3462 * being shut down and completion callbacks run on the cluster buffer.
3463 * As we have already flushed and attached this inode to the buffer,
3464 * it has already been aborted and released by xfs_iflush_cluster() and
3465 * so we have no further error handling to do here.
1da177e4 3466 */
bad55843
DC
3467 error = xfs_iflush_cluster(ip, bp);
3468 if (error)
e53946db 3469 return error;
1da177e4 3470
4c46819a
CH
3471 *bpp = bp;
3472 return 0;
1da177e4
LT
3473
3474corrupt_out:
b1438f47
DC
3475 if (bp)
3476 xfs_buf_relse(bp);
7d04a335 3477 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
32ce90a4 3478abort_out:
e53946db 3479 /* abort the corrupt inode, as it was not attached to the buffer */
04913fdd 3480 xfs_iflush_abort(ip, false);
32ce90a4 3481 return error;
1da177e4
LT
3482}
3483
9cfb9b47
DW
3484/*
3485 * If there are inline format data / attr forks attached to this inode,
3486 * make sure they're not corrupt.
3487 */
3488bool
3489xfs_inode_verify_forks(
3490 struct xfs_inode *ip)
3491{
22431bf3 3492 struct xfs_ifork *ifp;
9cfb9b47
DW
3493 xfs_failaddr_t fa;
3494
3495 fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3496 if (fa) {
22431bf3
DW
3497 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
3498 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "data fork",
3499 ifp->if_u1.if_data, ifp->if_bytes, fa);
9cfb9b47
DW
3500 return false;
3501 }
3502
3503 fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3504 if (fa) {
22431bf3
DW
3505 ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
3506 xfs_inode_verifier_error(ip, -EFSCORRUPTED, "attr fork",
3507 ifp ? ifp->if_u1.if_data : NULL,
3508 ifp ? ifp->if_bytes : 0, fa);
9cfb9b47
DW
3509 return false;
3510 }
3511 return true;
3512}
3513
1da177e4
LT
3514STATIC int
3515xfs_iflush_int(
93848a99
CH
3516 struct xfs_inode *ip,
3517 struct xfs_buf *bp)
1da177e4 3518{
93848a99
CH
3519 struct xfs_inode_log_item *iip = ip->i_itemp;
3520 struct xfs_dinode *dip;
3521 struct xfs_mount *mp = ip->i_mount;
1da177e4 3522
579aa9ca 3523 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
474fce06 3524 ASSERT(xfs_isiflocked(ip));
1da177e4 3525 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
8096b1eb 3526 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
93848a99 3527 ASSERT(iip != NULL && iip->ili_fields != 0);
263997a6 3528 ASSERT(ip->i_d.di_version > 1);
1da177e4 3529
1da177e4 3530 /* set *dip = inode's place in the buffer */
88ee2df7 3531 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
1da177e4 3532
69ef921b 3533 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
9e24cfd0 3534 mp, XFS_ERRTAG_IFLUSH_1)) {
6a19d939 3535 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3536 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
6a19d939 3537 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
1da177e4
LT
3538 goto corrupt_out;
3539 }
c19b3b05 3540 if (S_ISREG(VFS_I(ip)->i_mode)) {
1da177e4
LT
3541 if (XFS_TEST_ERROR(
3542 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3543 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
9e24cfd0 3544 mp, XFS_ERRTAG_IFLUSH_3)) {
6a19d939 3545 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3546 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
6a19d939 3547 __func__, ip->i_ino, ip);
1da177e4
LT
3548 goto corrupt_out;
3549 }
c19b3b05 3550 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
1da177e4
LT
3551 if (XFS_TEST_ERROR(
3552 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3553 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3554 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
9e24cfd0 3555 mp, XFS_ERRTAG_IFLUSH_4)) {
6a19d939 3556 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3557 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
6a19d939 3558 __func__, ip->i_ino, ip);
1da177e4
LT
3559 goto corrupt_out;
3560 }
3561 }
3562 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
9e24cfd0 3563 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
6a19d939
DC
3564 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3565 "%s: detected corrupt incore inode %Lu, "
c9690043 3566 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
6a19d939 3567 __func__, ip->i_ino,
1da177e4 3568 ip->i_d.di_nextents + ip->i_d.di_anextents,
6a19d939 3569 ip->i_d.di_nblocks, ip);
1da177e4
LT
3570 goto corrupt_out;
3571 }
3572 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
9e24cfd0 3573 mp, XFS_ERRTAG_IFLUSH_6)) {
6a19d939 3574 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3575 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
6a19d939 3576 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
1da177e4
LT
3577 goto corrupt_out;
3578 }
e60896d8 3579
1da177e4 3580 /*
263997a6 3581 * Inode item log recovery for v2 inodes are dependent on the
e60896d8
DC
3582 * di_flushiter count for correct sequencing. We bump the flush
3583 * iteration count so we can detect flushes which postdate a log record
3584 * during recovery. This is redundant as we now log every change and
3585 * hence this can't happen but we need to still do it to ensure
3586 * backwards compatibility with old kernels that predate logging all
3587 * inode changes.
1da177e4 3588 */
e60896d8
DC
3589 if (ip->i_d.di_version < 3)
3590 ip->i_d.di_flushiter++;
1da177e4 3591
9cfb9b47
DW
3592 /* Check the inline fork data before we write out. */
3593 if (!xfs_inode_verify_forks(ip))
005c5db8
DW
3594 goto corrupt_out;
3595
1da177e4 3596 /*
3987848c
DC
3597 * Copy the dirty parts of the inode into the on-disk inode. We always
3598 * copy out the core of the inode, because if the inode is dirty at all
3599 * the core must be.
1da177e4 3600 */
93f958f9 3601 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
1da177e4
LT
3602
3603 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3604 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3605 ip->i_d.di_flushiter = 0;
3606
005c5db8
DW
3607 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3608 if (XFS_IFORK_Q(ip))
3609 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
1da177e4
LT
3610 xfs_inobp_check(mp, bp);
3611
3612 /*
f5d8d5c4
CH
3613 * We've recorded everything logged in the inode, so we'd like to clear
3614 * the ili_fields bits so we don't log and flush things unnecessarily.
3615 * However, we can't stop logging all this information until the data
3616 * we've copied into the disk buffer is written to disk. If we did we
3617 * might overwrite the copy of the inode in the log with all the data
3618 * after re-logging only part of it, and in the face of a crash we
3619 * wouldn't have all the data we need to recover.
1da177e4 3620 *
f5d8d5c4
CH
3621 * What we do is move the bits to the ili_last_fields field. When
3622 * logging the inode, these bits are moved back to the ili_fields field.
3623 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3624 * know that the information those bits represent is permanently on
3625 * disk. As long as the flush completes before the inode is logged
3626 * again, then both ili_fields and ili_last_fields will be cleared.
1da177e4 3627 *
f5d8d5c4
CH
3628 * We can play with the ili_fields bits here, because the inode lock
3629 * must be held exclusively in order to set bits there and the flush
3630 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3631 * done routine can tell whether or not to look in the AIL. Also, store
3632 * the current LSN of the inode so that we can tell whether the item has
3633 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3634 * need the AIL lock, because it is a 64 bit value that cannot be read
3635 * atomically.
1da177e4 3636 */
93848a99
CH
3637 iip->ili_last_fields = iip->ili_fields;
3638 iip->ili_fields = 0;
fc0561ce 3639 iip->ili_fsync_fields = 0;
93848a99 3640 iip->ili_logged = 1;
1da177e4 3641
93848a99
CH
3642 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3643 &iip->ili_item.li_lsn);
1da177e4 3644
93848a99
CH
3645 /*
3646 * Attach the function xfs_iflush_done to the inode's
3647 * buffer. This will remove the inode from the AIL
3648 * and unlock the inode's flush lock when the inode is
3649 * completely written to disk.
3650 */
3651 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
1da177e4 3652
93848a99
CH
3653 /* generate the checksum. */
3654 xfs_dinode_calc_crc(mp, dip);
1da177e4 3655
643c8c05 3656 ASSERT(!list_empty(&bp->b_li_list));
93848a99 3657 ASSERT(bp->b_iodone != NULL);
1da177e4
LT
3658 return 0;
3659
3660corrupt_out:
2451337d 3661 return -EFSCORRUPTED;
1da177e4 3662}
44a8736b
DW
3663
3664/* Release an inode. */
3665void
3666xfs_irele(
3667 struct xfs_inode *ip)
3668{
3669 trace_xfs_irele(ip, _RET_IP_);
3670 iput(VFS_I(ip));
3671}