xfs: make tracepoint inode number format consistent
[linux-block.git] / fs / xfs / xfs_inode.c
CommitLineData
1da177e4 1/*
3e57ecf6 2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
7b718769 3 * All Rights Reserved.
1da177e4 4 *
7b718769
NS
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
1da177e4
LT
7 * published by the Free Software Foundation.
8 *
7b718769
NS
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
1da177e4 13 *
7b718769
NS
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
1da177e4 17 */
40ebd81d
RD
18#include <linux/log2.h>
19
1da177e4 20#include "xfs.h"
a844f451 21#include "xfs_fs.h"
70a9883c 22#include "xfs_shared.h"
239880ef
DC
23#include "xfs_format.h"
24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h"
1da177e4 26#include "xfs_sb.h"
1da177e4 27#include "xfs_mount.h"
3ab78df2 28#include "xfs_defer.h"
a4fbe6ab 29#include "xfs_inode.h"
57062787 30#include "xfs_da_format.h"
c24b5dfa 31#include "xfs_da_btree.h"
c24b5dfa 32#include "xfs_dir2.h"
a844f451 33#include "xfs_attr_sf.h"
c24b5dfa 34#include "xfs_attr.h"
239880ef
DC
35#include "xfs_trans_space.h"
36#include "xfs_trans.h"
1da177e4 37#include "xfs_buf_item.h"
a844f451 38#include "xfs_inode_item.h"
a844f451
NS
39#include "xfs_ialloc.h"
40#include "xfs_bmap.h"
68988114 41#include "xfs_bmap_util.h"
e9e899a2 42#include "xfs_errortag.h"
1da177e4 43#include "xfs_error.h"
1da177e4 44#include "xfs_quota.h"
2a82b8be 45#include "xfs_filestream.h"
93848a99 46#include "xfs_cksum.h"
0b1b213f 47#include "xfs_trace.h"
33479e05 48#include "xfs_icache.h"
c24b5dfa 49#include "xfs_symlink.h"
239880ef
DC
50#include "xfs_trans_priv.h"
51#include "xfs_log.h"
a4fbe6ab 52#include "xfs_bmap_btree.h"
aa8968f2 53#include "xfs_reflink.h"
005c5db8 54#include "xfs_dir2_priv.h"
1da177e4 55
1da177e4 56kmem_zone_t *xfs_inode_zone;
1da177e4
LT
57
58/*
8f04c47a 59 * Used in xfs_itruncate_extents(). This is the maximum number of extents
1da177e4
LT
60 * freed from a file in a single transaction.
61 */
62#define XFS_ITRUNC_MAX_EXTENTS 2
63
54d7b5c1
DC
64STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *);
65STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
66STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *);
ab297431 67
2a0ec1d9
DC
68/*
69 * helper function to extract extent size hint from inode
70 */
71xfs_extlen_t
72xfs_get_extsz_hint(
73 struct xfs_inode *ip)
74{
75 if ((ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE) && ip->i_d.di_extsize)
76 return ip->i_d.di_extsize;
77 if (XFS_IS_REALTIME_INODE(ip))
78 return ip->i_mount->m_sb.sb_rextsize;
79 return 0;
80}
81
f7ca3522
DW
82/*
83 * Helper function to extract CoW extent size hint from inode.
84 * Between the extent size hint and the CoW extent size hint, we
e153aa79
DW
85 * return the greater of the two. If the value is zero (automatic),
86 * use the default size.
f7ca3522
DW
87 */
88xfs_extlen_t
89xfs_get_cowextsz_hint(
90 struct xfs_inode *ip)
91{
92 xfs_extlen_t a, b;
93
94 a = 0;
95 if (ip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
96 a = ip->i_d.di_cowextsize;
97 b = xfs_get_extsz_hint(ip);
98
e153aa79
DW
99 a = max(a, b);
100 if (a == 0)
101 return XFS_DEFAULT_COWEXTSZ_HINT;
102 return a;
f7ca3522
DW
103}
104
fa96acad 105/*
efa70be1
CH
106 * These two are wrapper routines around the xfs_ilock() routine used to
107 * centralize some grungy code. They are used in places that wish to lock the
108 * inode solely for reading the extents. The reason these places can't just
109 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
110 * bringing in of the extents from disk for a file in b-tree format. If the
111 * inode is in b-tree format, then we need to lock the inode exclusively until
112 * the extents are read in. Locking it exclusively all the time would limit
113 * our parallelism unnecessarily, though. What we do instead is check to see
114 * if the extents have been read in yet, and only lock the inode exclusively
115 * if they have not.
fa96acad 116 *
efa70be1 117 * The functions return a value which should be given to the corresponding
01f4f327 118 * xfs_iunlock() call.
fa96acad
DC
119 */
120uint
309ecac8
CH
121xfs_ilock_data_map_shared(
122 struct xfs_inode *ip)
fa96acad 123{
309ecac8 124 uint lock_mode = XFS_ILOCK_SHARED;
fa96acad 125
309ecac8
CH
126 if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
127 (ip->i_df.if_flags & XFS_IFEXTENTS) == 0)
fa96acad 128 lock_mode = XFS_ILOCK_EXCL;
fa96acad 129 xfs_ilock(ip, lock_mode);
fa96acad
DC
130 return lock_mode;
131}
132
efa70be1
CH
133uint
134xfs_ilock_attr_map_shared(
135 struct xfs_inode *ip)
fa96acad 136{
efa70be1
CH
137 uint lock_mode = XFS_ILOCK_SHARED;
138
139 if (ip->i_d.di_aformat == XFS_DINODE_FMT_BTREE &&
140 (ip->i_afp->if_flags & XFS_IFEXTENTS) == 0)
141 lock_mode = XFS_ILOCK_EXCL;
142 xfs_ilock(ip, lock_mode);
143 return lock_mode;
fa96acad
DC
144}
145
146/*
65523218
CH
147 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
148 * multi-reader locks: i_mmap_lock and the i_lock. This routine allows
149 * various combinations of the locks to be obtained.
fa96acad 150 *
653c60b6
DC
151 * The 3 locks should always be ordered so that the IO lock is obtained first,
152 * the mmap lock second and the ilock last in order to prevent deadlock.
fa96acad 153 *
653c60b6
DC
154 * Basic locking order:
155 *
65523218 156 * i_rwsem -> i_mmap_lock -> page_lock -> i_ilock
653c60b6
DC
157 *
158 * mmap_sem locking order:
159 *
65523218 160 * i_rwsem -> page lock -> mmap_sem
653c60b6
DC
161 * mmap_sem -> i_mmap_lock -> page_lock
162 *
163 * The difference in mmap_sem locking order mean that we cannot hold the
164 * i_mmap_lock over syscall based read(2)/write(2) based IO. These IO paths can
165 * fault in pages during copy in/out (for buffered IO) or require the mmap_sem
166 * in get_user_pages() to map the user pages into the kernel address space for
65523218 167 * direct IO. Similarly the i_rwsem cannot be taken inside a page fault because
653c60b6
DC
168 * page faults already hold the mmap_sem.
169 *
170 * Hence to serialise fully against both syscall and mmap based IO, we need to
65523218 171 * take both the i_rwsem and the i_mmap_lock. These locks should *only* be both
653c60b6
DC
172 * taken in places where we need to invalidate the page cache in a race
173 * free manner (e.g. truncate, hole punch and other extent manipulation
174 * functions).
fa96acad
DC
175 */
176void
177xfs_ilock(
178 xfs_inode_t *ip,
179 uint lock_flags)
180{
181 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
182
183 /*
184 * You can't set both SHARED and EXCL for the same lock,
185 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
186 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
187 */
188 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
189 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
653c60b6
DC
190 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
191 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
fa96acad
DC
192 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
193 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0952c818 194 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
fa96acad 195
65523218
CH
196 if (lock_flags & XFS_IOLOCK_EXCL) {
197 down_write_nested(&VFS_I(ip)->i_rwsem,
198 XFS_IOLOCK_DEP(lock_flags));
199 } else if (lock_flags & XFS_IOLOCK_SHARED) {
200 down_read_nested(&VFS_I(ip)->i_rwsem,
201 XFS_IOLOCK_DEP(lock_flags));
202 }
fa96acad 203
653c60b6
DC
204 if (lock_flags & XFS_MMAPLOCK_EXCL)
205 mrupdate_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
206 else if (lock_flags & XFS_MMAPLOCK_SHARED)
207 mraccess_nested(&ip->i_mmaplock, XFS_MMAPLOCK_DEP(lock_flags));
208
fa96acad
DC
209 if (lock_flags & XFS_ILOCK_EXCL)
210 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
211 else if (lock_flags & XFS_ILOCK_SHARED)
212 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
213}
214
215/*
216 * This is just like xfs_ilock(), except that the caller
217 * is guaranteed not to sleep. It returns 1 if it gets
218 * the requested locks and 0 otherwise. If the IO lock is
219 * obtained but the inode lock cannot be, then the IO lock
220 * is dropped before returning.
221 *
222 * ip -- the inode being locked
223 * lock_flags -- this parameter indicates the inode's locks to be
224 * to be locked. See the comment for xfs_ilock() for a list
225 * of valid values.
226 */
227int
228xfs_ilock_nowait(
229 xfs_inode_t *ip,
230 uint lock_flags)
231{
232 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
233
234 /*
235 * You can't set both SHARED and EXCL for the same lock,
236 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
237 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
238 */
239 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
240 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
653c60b6
DC
241 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
242 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
fa96acad
DC
243 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
244 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0952c818 245 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
fa96acad
DC
246
247 if (lock_flags & XFS_IOLOCK_EXCL) {
65523218 248 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
fa96acad
DC
249 goto out;
250 } else if (lock_flags & XFS_IOLOCK_SHARED) {
65523218 251 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
fa96acad
DC
252 goto out;
253 }
653c60b6
DC
254
255 if (lock_flags & XFS_MMAPLOCK_EXCL) {
256 if (!mrtryupdate(&ip->i_mmaplock))
257 goto out_undo_iolock;
258 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
259 if (!mrtryaccess(&ip->i_mmaplock))
260 goto out_undo_iolock;
261 }
262
fa96acad
DC
263 if (lock_flags & XFS_ILOCK_EXCL) {
264 if (!mrtryupdate(&ip->i_lock))
653c60b6 265 goto out_undo_mmaplock;
fa96acad
DC
266 } else if (lock_flags & XFS_ILOCK_SHARED) {
267 if (!mrtryaccess(&ip->i_lock))
653c60b6 268 goto out_undo_mmaplock;
fa96acad
DC
269 }
270 return 1;
271
653c60b6
DC
272out_undo_mmaplock:
273 if (lock_flags & XFS_MMAPLOCK_EXCL)
274 mrunlock_excl(&ip->i_mmaplock);
275 else if (lock_flags & XFS_MMAPLOCK_SHARED)
276 mrunlock_shared(&ip->i_mmaplock);
277out_undo_iolock:
fa96acad 278 if (lock_flags & XFS_IOLOCK_EXCL)
65523218 279 up_write(&VFS_I(ip)->i_rwsem);
fa96acad 280 else if (lock_flags & XFS_IOLOCK_SHARED)
65523218 281 up_read(&VFS_I(ip)->i_rwsem);
653c60b6 282out:
fa96acad
DC
283 return 0;
284}
285
286/*
287 * xfs_iunlock() is used to drop the inode locks acquired with
288 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
289 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
290 * that we know which locks to drop.
291 *
292 * ip -- the inode being unlocked
293 * lock_flags -- this parameter indicates the inode's locks to be
294 * to be unlocked. See the comment for xfs_ilock() for a list
295 * of valid values for this parameter.
296 *
297 */
298void
299xfs_iunlock(
300 xfs_inode_t *ip,
301 uint lock_flags)
302{
303 /*
304 * You can't set both SHARED and EXCL for the same lock,
305 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
306 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
307 */
308 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
309 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
653c60b6
DC
310 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
311 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
fa96acad
DC
312 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
313 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
0952c818 314 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
fa96acad
DC
315 ASSERT(lock_flags != 0);
316
317 if (lock_flags & XFS_IOLOCK_EXCL)
65523218 318 up_write(&VFS_I(ip)->i_rwsem);
fa96acad 319 else if (lock_flags & XFS_IOLOCK_SHARED)
65523218 320 up_read(&VFS_I(ip)->i_rwsem);
fa96acad 321
653c60b6
DC
322 if (lock_flags & XFS_MMAPLOCK_EXCL)
323 mrunlock_excl(&ip->i_mmaplock);
324 else if (lock_flags & XFS_MMAPLOCK_SHARED)
325 mrunlock_shared(&ip->i_mmaplock);
326
fa96acad
DC
327 if (lock_flags & XFS_ILOCK_EXCL)
328 mrunlock_excl(&ip->i_lock);
329 else if (lock_flags & XFS_ILOCK_SHARED)
330 mrunlock_shared(&ip->i_lock);
331
332 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
333}
334
335/*
336 * give up write locks. the i/o lock cannot be held nested
337 * if it is being demoted.
338 */
339void
340xfs_ilock_demote(
341 xfs_inode_t *ip,
342 uint lock_flags)
343{
653c60b6
DC
344 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
345 ASSERT((lock_flags &
346 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
fa96acad
DC
347
348 if (lock_flags & XFS_ILOCK_EXCL)
349 mrdemote(&ip->i_lock);
653c60b6
DC
350 if (lock_flags & XFS_MMAPLOCK_EXCL)
351 mrdemote(&ip->i_mmaplock);
fa96acad 352 if (lock_flags & XFS_IOLOCK_EXCL)
65523218 353 downgrade_write(&VFS_I(ip)->i_rwsem);
fa96acad
DC
354
355 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
356}
357
742ae1e3 358#if defined(DEBUG) || defined(XFS_WARN)
fa96acad
DC
359int
360xfs_isilocked(
361 xfs_inode_t *ip,
362 uint lock_flags)
363{
364 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
365 if (!(lock_flags & XFS_ILOCK_SHARED))
366 return !!ip->i_lock.mr_writer;
367 return rwsem_is_locked(&ip->i_lock.mr_lock);
368 }
369
653c60b6
DC
370 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
371 if (!(lock_flags & XFS_MMAPLOCK_SHARED))
372 return !!ip->i_mmaplock.mr_writer;
373 return rwsem_is_locked(&ip->i_mmaplock.mr_lock);
374 }
375
fa96acad
DC
376 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
377 if (!(lock_flags & XFS_IOLOCK_SHARED))
65523218
CH
378 return !debug_locks ||
379 lockdep_is_held_type(&VFS_I(ip)->i_rwsem, 0);
380 return rwsem_is_locked(&VFS_I(ip)->i_rwsem);
fa96acad
DC
381 }
382
383 ASSERT(0);
384 return 0;
385}
386#endif
387
b6a9947e
DC
388/*
389 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
390 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
391 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
392 * errors and warnings.
393 */
394#if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
3403ccc0
DC
395static bool
396xfs_lockdep_subclass_ok(
397 int subclass)
398{
399 return subclass < MAX_LOCKDEP_SUBCLASSES;
400}
401#else
402#define xfs_lockdep_subclass_ok(subclass) (true)
403#endif
404
c24b5dfa 405/*
653c60b6 406 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
0952c818
DC
407 * value. This can be called for any type of inode lock combination, including
408 * parent locking. Care must be taken to ensure we don't overrun the subclass
409 * storage fields in the class mask we build.
c24b5dfa
DC
410 */
411static inline int
412xfs_lock_inumorder(int lock_mode, int subclass)
413{
0952c818
DC
414 int class = 0;
415
416 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
417 XFS_ILOCK_RTSUM)));
3403ccc0 418 ASSERT(xfs_lockdep_subclass_ok(subclass));
0952c818 419
653c60b6 420 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
0952c818 421 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
0952c818 422 class += subclass << XFS_IOLOCK_SHIFT;
653c60b6
DC
423 }
424
425 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
0952c818
DC
426 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
427 class += subclass << XFS_MMAPLOCK_SHIFT;
653c60b6
DC
428 }
429
0952c818
DC
430 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
431 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
432 class += subclass << XFS_ILOCK_SHIFT;
433 }
c24b5dfa 434
0952c818 435 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
c24b5dfa
DC
436}
437
438/*
95afcf5c
DC
439 * The following routine will lock n inodes in exclusive mode. We assume the
440 * caller calls us with the inodes in i_ino order.
c24b5dfa 441 *
95afcf5c
DC
442 * We need to detect deadlock where an inode that we lock is in the AIL and we
443 * start waiting for another inode that is locked by a thread in a long running
444 * transaction (such as truncate). This can result in deadlock since the long
445 * running trans might need to wait for the inode we just locked in order to
446 * push the tail and free space in the log.
0952c818
DC
447 *
448 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
449 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
450 * lock more than one at a time, lockdep will report false positives saying we
451 * have violated locking orders.
c24b5dfa 452 */
0d5a75e9 453static void
c24b5dfa
DC
454xfs_lock_inodes(
455 xfs_inode_t **ips,
456 int inodes,
457 uint lock_mode)
458{
459 int attempts = 0, i, j, try_lock;
460 xfs_log_item_t *lp;
461
0952c818
DC
462 /*
463 * Currently supports between 2 and 5 inodes with exclusive locking. We
464 * support an arbitrary depth of locking here, but absolute limits on
465 * inodes depend on the the type of locking and the limits placed by
466 * lockdep annotations in xfs_lock_inumorder. These are all checked by
467 * the asserts.
468 */
95afcf5c 469 ASSERT(ips && inodes >= 2 && inodes <= 5);
0952c818
DC
470 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
471 XFS_ILOCK_EXCL));
472 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
473 XFS_ILOCK_SHARED)));
0952c818
DC
474 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
475 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
476 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
477 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
478
479 if (lock_mode & XFS_IOLOCK_EXCL) {
480 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
481 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
482 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
c24b5dfa
DC
483
484 try_lock = 0;
485 i = 0;
c24b5dfa
DC
486again:
487 for (; i < inodes; i++) {
488 ASSERT(ips[i]);
489
95afcf5c 490 if (i && (ips[i] == ips[i - 1])) /* Already locked */
c24b5dfa
DC
491 continue;
492
493 /*
95afcf5c
DC
494 * If try_lock is not set yet, make sure all locked inodes are
495 * not in the AIL. If any are, set try_lock to be used later.
c24b5dfa 496 */
c24b5dfa
DC
497 if (!try_lock) {
498 for (j = (i - 1); j >= 0 && !try_lock; j--) {
499 lp = (xfs_log_item_t *)ips[j]->i_itemp;
95afcf5c 500 if (lp && (lp->li_flags & XFS_LI_IN_AIL))
c24b5dfa 501 try_lock++;
c24b5dfa
DC
502 }
503 }
504
505 /*
506 * If any of the previous locks we have locked is in the AIL,
507 * we must TRY to get the second and subsequent locks. If
508 * we can't get any, we must release all we have
509 * and try again.
510 */
95afcf5c
DC
511 if (!try_lock) {
512 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
513 continue;
514 }
515
516 /* try_lock means we have an inode locked that is in the AIL. */
517 ASSERT(i != 0);
518 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
519 continue;
c24b5dfa 520
95afcf5c
DC
521 /*
522 * Unlock all previous guys and try again. xfs_iunlock will try
523 * to push the tail if the inode is in the AIL.
524 */
525 attempts++;
526 for (j = i - 1; j >= 0; j--) {
c24b5dfa 527 /*
95afcf5c
DC
528 * Check to see if we've already unlocked this one. Not
529 * the first one going back, and the inode ptr is the
530 * same.
c24b5dfa 531 */
95afcf5c
DC
532 if (j != (i - 1) && ips[j] == ips[j + 1])
533 continue;
c24b5dfa 534
95afcf5c
DC
535 xfs_iunlock(ips[j], lock_mode);
536 }
c24b5dfa 537
95afcf5c
DC
538 if ((attempts % 5) == 0) {
539 delay(1); /* Don't just spin the CPU */
c24b5dfa 540 }
95afcf5c
DC
541 i = 0;
542 try_lock = 0;
543 goto again;
c24b5dfa 544 }
c24b5dfa
DC
545}
546
547/*
653c60b6
DC
548 * xfs_lock_two_inodes() can only be used to lock one type of lock at a time -
549 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
550 * lock more than one at a time, lockdep will report false positives saying we
551 * have violated locking orders.
c24b5dfa
DC
552 */
553void
554xfs_lock_two_inodes(
555 xfs_inode_t *ip0,
556 xfs_inode_t *ip1,
557 uint lock_mode)
558{
559 xfs_inode_t *temp;
560 int attempts = 0;
561 xfs_log_item_t *lp;
562
65523218
CH
563 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
564 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL))
653c60b6
DC
565 ASSERT(!(lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)));
566
c24b5dfa
DC
567 ASSERT(ip0->i_ino != ip1->i_ino);
568
569 if (ip0->i_ino > ip1->i_ino) {
570 temp = ip0;
571 ip0 = ip1;
572 ip1 = temp;
573 }
574
575 again:
576 xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
577
578 /*
579 * If the first lock we have locked is in the AIL, we must TRY to get
580 * the second lock. If we can't get it, we must release the first one
581 * and try again.
582 */
583 lp = (xfs_log_item_t *)ip0->i_itemp;
584 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
585 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
586 xfs_iunlock(ip0, lock_mode);
587 if ((++attempts % 5) == 0)
588 delay(1); /* Don't just spin the CPU */
589 goto again;
590 }
591 } else {
592 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
593 }
594}
595
596
fa96acad
DC
597void
598__xfs_iflock(
599 struct xfs_inode *ip)
600{
601 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
602 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
603
604 do {
21417136 605 prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
fa96acad
DC
606 if (xfs_isiflocked(ip))
607 io_schedule();
608 } while (!xfs_iflock_nowait(ip));
609
21417136 610 finish_wait(wq, &wait.wq_entry);
fa96acad
DC
611}
612
1da177e4
LT
613STATIC uint
614_xfs_dic2xflags(
c8ce540d 615 uint16_t di_flags,
58f88ca2
DC
616 uint64_t di_flags2,
617 bool has_attr)
1da177e4
LT
618{
619 uint flags = 0;
620
621 if (di_flags & XFS_DIFLAG_ANY) {
622 if (di_flags & XFS_DIFLAG_REALTIME)
e7b89481 623 flags |= FS_XFLAG_REALTIME;
1da177e4 624 if (di_flags & XFS_DIFLAG_PREALLOC)
e7b89481 625 flags |= FS_XFLAG_PREALLOC;
1da177e4 626 if (di_flags & XFS_DIFLAG_IMMUTABLE)
e7b89481 627 flags |= FS_XFLAG_IMMUTABLE;
1da177e4 628 if (di_flags & XFS_DIFLAG_APPEND)
e7b89481 629 flags |= FS_XFLAG_APPEND;
1da177e4 630 if (di_flags & XFS_DIFLAG_SYNC)
e7b89481 631 flags |= FS_XFLAG_SYNC;
1da177e4 632 if (di_flags & XFS_DIFLAG_NOATIME)
e7b89481 633 flags |= FS_XFLAG_NOATIME;
1da177e4 634 if (di_flags & XFS_DIFLAG_NODUMP)
e7b89481 635 flags |= FS_XFLAG_NODUMP;
1da177e4 636 if (di_flags & XFS_DIFLAG_RTINHERIT)
e7b89481 637 flags |= FS_XFLAG_RTINHERIT;
1da177e4 638 if (di_flags & XFS_DIFLAG_PROJINHERIT)
e7b89481 639 flags |= FS_XFLAG_PROJINHERIT;
1da177e4 640 if (di_flags & XFS_DIFLAG_NOSYMLINKS)
e7b89481 641 flags |= FS_XFLAG_NOSYMLINKS;
dd9f438e 642 if (di_flags & XFS_DIFLAG_EXTSIZE)
e7b89481 643 flags |= FS_XFLAG_EXTSIZE;
dd9f438e 644 if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
e7b89481 645 flags |= FS_XFLAG_EXTSZINHERIT;
d3446eac 646 if (di_flags & XFS_DIFLAG_NODEFRAG)
e7b89481 647 flags |= FS_XFLAG_NODEFRAG;
2a82b8be 648 if (di_flags & XFS_DIFLAG_FILESTREAM)
e7b89481 649 flags |= FS_XFLAG_FILESTREAM;
1da177e4
LT
650 }
651
58f88ca2
DC
652 if (di_flags2 & XFS_DIFLAG2_ANY) {
653 if (di_flags2 & XFS_DIFLAG2_DAX)
654 flags |= FS_XFLAG_DAX;
f7ca3522
DW
655 if (di_flags2 & XFS_DIFLAG2_COWEXTSIZE)
656 flags |= FS_XFLAG_COWEXTSIZE;
58f88ca2
DC
657 }
658
659 if (has_attr)
660 flags |= FS_XFLAG_HASATTR;
661
1da177e4
LT
662 return flags;
663}
664
665uint
666xfs_ip2xflags(
58f88ca2 667 struct xfs_inode *ip)
1da177e4 668{
58f88ca2 669 struct xfs_icdinode *dic = &ip->i_d;
1da177e4 670
58f88ca2 671 return _xfs_dic2xflags(dic->di_flags, dic->di_flags2, XFS_IFORK_Q(ip));
1da177e4
LT
672}
673
c24b5dfa
DC
674/*
675 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
676 * is allowed, otherwise it has to be an exact match. If a CI match is found,
677 * ci_name->name will point to a the actual name (caller must free) or
678 * will be set to NULL if an exact match is found.
679 */
680int
681xfs_lookup(
682 xfs_inode_t *dp,
683 struct xfs_name *name,
684 xfs_inode_t **ipp,
685 struct xfs_name *ci_name)
686{
687 xfs_ino_t inum;
688 int error;
c24b5dfa
DC
689
690 trace_xfs_lookup(dp, name);
691
692 if (XFS_FORCED_SHUTDOWN(dp->i_mount))
2451337d 693 return -EIO;
c24b5dfa 694
c24b5dfa 695 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
c24b5dfa 696 if (error)
dbad7c99 697 goto out_unlock;
c24b5dfa
DC
698
699 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
700 if (error)
701 goto out_free_name;
702
703 return 0;
704
705out_free_name:
706 if (ci_name)
707 kmem_free(ci_name->name);
dbad7c99 708out_unlock:
c24b5dfa
DC
709 *ipp = NULL;
710 return error;
711}
712
1da177e4
LT
713/*
714 * Allocate an inode on disk and return a copy of its in-core version.
715 * The in-core inode is locked exclusively. Set mode, nlink, and rdev
716 * appropriately within the inode. The uid and gid for the inode are
717 * set according to the contents of the given cred structure.
718 *
719 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
cd856db6
CM
720 * has a free inode available, call xfs_iget() to obtain the in-core
721 * version of the allocated inode. Finally, fill in the inode and
722 * log its initial contents. In this case, ialloc_context would be
723 * set to NULL.
1da177e4 724 *
cd856db6
CM
725 * If xfs_dialloc() does not have an available inode, it will replenish
726 * its supply by doing an allocation. Since we can only do one
727 * allocation within a transaction without deadlocks, we must commit
728 * the current transaction before returning the inode itself.
729 * In this case, therefore, we will set ialloc_context and return.
1da177e4
LT
730 * The caller should then commit the current transaction, start a new
731 * transaction, and call xfs_ialloc() again to actually get the inode.
732 *
733 * To ensure that some other process does not grab the inode that
734 * was allocated during the first call to xfs_ialloc(), this routine
735 * also returns the [locked] bp pointing to the head of the freelist
736 * as ialloc_context. The caller should hold this buffer across
737 * the commit and pass it back into this routine on the second call.
b11f94d5
DC
738 *
739 * If we are allocating quota inodes, we do not have a parent inode
740 * to attach to or associate with (i.e. pip == NULL) because they
741 * are not linked into the directory structure - they are attached
742 * directly to the superblock - and so have no parent.
1da177e4 743 */
0d5a75e9 744static int
1da177e4
LT
745xfs_ialloc(
746 xfs_trans_t *tp,
747 xfs_inode_t *pip,
576b1d67 748 umode_t mode,
31b084ae 749 xfs_nlink_t nlink,
66f36464 750 dev_t rdev,
6743099c 751 prid_t prid,
1da177e4 752 xfs_buf_t **ialloc_context,
1da177e4
LT
753 xfs_inode_t **ipp)
754{
93848a99 755 struct xfs_mount *mp = tp->t_mountp;
1da177e4
LT
756 xfs_ino_t ino;
757 xfs_inode_t *ip;
1da177e4
LT
758 uint flags;
759 int error;
e076b0f3 760 struct timespec tv;
3987848c 761 struct inode *inode;
1da177e4
LT
762
763 /*
764 * Call the space management code to pick
765 * the on-disk inode to be allocated.
766 */
f59cf5c2 767 error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode,
08358906 768 ialloc_context, &ino);
bf904248 769 if (error)
1da177e4 770 return error;
08358906 771 if (*ialloc_context || ino == NULLFSINO) {
1da177e4
LT
772 *ipp = NULL;
773 return 0;
774 }
775 ASSERT(*ialloc_context == NULL);
776
777 /*
778 * Get the in-core inode with the lock held exclusively.
779 * This is because we're setting fields here we need
780 * to prevent others from looking at until we're done.
781 */
93848a99 782 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE,
ec3ba85f 783 XFS_ILOCK_EXCL, &ip);
bf904248 784 if (error)
1da177e4 785 return error;
1da177e4 786 ASSERT(ip != NULL);
3987848c 787 inode = VFS_I(ip);
1da177e4 788
263997a6
DC
789 /*
790 * We always convert v1 inodes to v2 now - we only support filesystems
791 * with >= v2 inode capability, so there is no reason for ever leaving
792 * an inode in v1 format.
793 */
794 if (ip->i_d.di_version == 1)
795 ip->i_d.di_version = 2;
796
c19b3b05 797 inode->i_mode = mode;
54d7b5c1 798 set_nlink(inode, nlink);
7aab1b28
DE
799 ip->i_d.di_uid = xfs_kuid_to_uid(current_fsuid());
800 ip->i_d.di_gid = xfs_kgid_to_gid(current_fsgid());
66f36464 801 inode->i_rdev = rdev;
6743099c 802 xfs_set_projid(ip, prid);
1da177e4 803
bd186aa9 804 if (pip && XFS_INHERIT_GID(pip)) {
1da177e4 805 ip->i_d.di_gid = pip->i_d.di_gid;
c19b3b05
DC
806 if ((VFS_I(pip)->i_mode & S_ISGID) && S_ISDIR(mode))
807 inode->i_mode |= S_ISGID;
1da177e4
LT
808 }
809
810 /*
811 * If the group ID of the new file does not match the effective group
812 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
813 * (and only if the irix_sgid_inherit compatibility variable is set).
814 */
815 if ((irix_sgid_inherit) &&
c19b3b05
DC
816 (inode->i_mode & S_ISGID) &&
817 (!in_group_p(xfs_gid_to_kgid(ip->i_d.di_gid))))
818 inode->i_mode &= ~S_ISGID;
1da177e4
LT
819
820 ip->i_d.di_size = 0;
821 ip->i_d.di_nextents = 0;
822 ASSERT(ip->i_d.di_nblocks == 0);
dff35fd4 823
c2050a45 824 tv = current_time(inode);
3987848c
DC
825 inode->i_mtime = tv;
826 inode->i_atime = tv;
827 inode->i_ctime = tv;
dff35fd4 828
1da177e4
LT
829 ip->i_d.di_extsize = 0;
830 ip->i_d.di_dmevmask = 0;
831 ip->i_d.di_dmstate = 0;
832 ip->i_d.di_flags = 0;
93848a99
CH
833
834 if (ip->i_d.di_version == 3) {
83e06f21 835 inode->i_version = 1;
93848a99 836 ip->i_d.di_flags2 = 0;
f7ca3522 837 ip->i_d.di_cowextsize = 0;
c8ce540d
DW
838 ip->i_d.di_crtime.t_sec = (int32_t)tv.tv_sec;
839 ip->i_d.di_crtime.t_nsec = (int32_t)tv.tv_nsec;
93848a99
CH
840 }
841
842
1da177e4
LT
843 flags = XFS_ILOG_CORE;
844 switch (mode & S_IFMT) {
845 case S_IFIFO:
846 case S_IFCHR:
847 case S_IFBLK:
848 case S_IFSOCK:
849 ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1da177e4
LT
850 ip->i_df.if_flags = 0;
851 flags |= XFS_ILOG_DEV;
852 break;
853 case S_IFREG:
854 case S_IFDIR:
b11f94d5 855 if (pip && (pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
58f88ca2 856 uint di_flags = 0;
365ca83d 857
abbede1b 858 if (S_ISDIR(mode)) {
365ca83d
NS
859 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
860 di_flags |= XFS_DIFLAG_RTINHERIT;
dd9f438e
NS
861 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
862 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
863 ip->i_d.di_extsize = pip->i_d.di_extsize;
864 }
9336e3a7
DC
865 if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
866 di_flags |= XFS_DIFLAG_PROJINHERIT;
abbede1b 867 } else if (S_ISREG(mode)) {
613d7043 868 if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
365ca83d 869 di_flags |= XFS_DIFLAG_REALTIME;
dd9f438e
NS
870 if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
871 di_flags |= XFS_DIFLAG_EXTSIZE;
872 ip->i_d.di_extsize = pip->i_d.di_extsize;
873 }
1da177e4
LT
874 }
875 if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
876 xfs_inherit_noatime)
365ca83d 877 di_flags |= XFS_DIFLAG_NOATIME;
1da177e4
LT
878 if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
879 xfs_inherit_nodump)
365ca83d 880 di_flags |= XFS_DIFLAG_NODUMP;
1da177e4
LT
881 if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
882 xfs_inherit_sync)
365ca83d 883 di_flags |= XFS_DIFLAG_SYNC;
1da177e4
LT
884 if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
885 xfs_inherit_nosymlinks)
365ca83d 886 di_flags |= XFS_DIFLAG_NOSYMLINKS;
d3446eac
BN
887 if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
888 xfs_inherit_nodefrag)
889 di_flags |= XFS_DIFLAG_NODEFRAG;
2a82b8be
DC
890 if (pip->i_d.di_flags & XFS_DIFLAG_FILESTREAM)
891 di_flags |= XFS_DIFLAG_FILESTREAM;
58f88ca2 892
365ca83d 893 ip->i_d.di_flags |= di_flags;
1da177e4 894 }
f7ca3522
DW
895 if (pip &&
896 (pip->i_d.di_flags2 & XFS_DIFLAG2_ANY) &&
897 pip->i_d.di_version == 3 &&
898 ip->i_d.di_version == 3) {
56bdf855
LC
899 uint64_t di_flags2 = 0;
900
f7ca3522 901 if (pip->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE) {
56bdf855 902 di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
f7ca3522
DW
903 ip->i_d.di_cowextsize = pip->i_d.di_cowextsize;
904 }
56bdf855
LC
905 if (pip->i_d.di_flags2 & XFS_DIFLAG2_DAX)
906 di_flags2 |= XFS_DIFLAG2_DAX;
907
908 ip->i_d.di_flags2 |= di_flags2;
f7ca3522 909 }
1da177e4
LT
910 /* FALLTHROUGH */
911 case S_IFLNK:
912 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
913 ip->i_df.if_flags = XFS_IFEXTENTS;
914 ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
6bdcf26a 915 ip->i_df.if_u1.if_root = NULL;
1da177e4
LT
916 break;
917 default:
918 ASSERT(0);
919 }
920 /*
921 * Attribute fork settings for new inode.
922 */
923 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
924 ip->i_d.di_anextents = 0;
925
926 /*
927 * Log the new values stuffed into the inode.
928 */
ddc3415a 929 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1da177e4
LT
930 xfs_trans_log_inode(tp, ip, flags);
931
58c90473 932 /* now that we have an i_mode we can setup the inode structure */
41be8bed 933 xfs_setup_inode(ip);
1da177e4
LT
934
935 *ipp = ip;
936 return 0;
937}
938
e546cb79
DC
939/*
940 * Allocates a new inode from disk and return a pointer to the
941 * incore copy. This routine will internally commit the current
942 * transaction and allocate a new one if the Space Manager needed
943 * to do an allocation to replenish the inode free-list.
944 *
945 * This routine is designed to be called from xfs_create and
946 * xfs_create_dir.
947 *
948 */
949int
950xfs_dir_ialloc(
951 xfs_trans_t **tpp, /* input: current transaction;
952 output: may be a new transaction. */
953 xfs_inode_t *dp, /* directory within whose allocate
954 the inode. */
955 umode_t mode,
956 xfs_nlink_t nlink,
66f36464 957 dev_t rdev,
e546cb79 958 prid_t prid, /* project id */
e546cb79
DC
959 xfs_inode_t **ipp, /* pointer to inode; it will be
960 locked. */
961 int *committed)
962
963{
964 xfs_trans_t *tp;
e546cb79
DC
965 xfs_inode_t *ip;
966 xfs_buf_t *ialloc_context = NULL;
967 int code;
e546cb79
DC
968 void *dqinfo;
969 uint tflags;
970
971 tp = *tpp;
972 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
973
974 /*
975 * xfs_ialloc will return a pointer to an incore inode if
976 * the Space Manager has an available inode on the free
977 * list. Otherwise, it will do an allocation and replenish
978 * the freelist. Since we can only do one allocation per
979 * transaction without deadlocks, we will need to commit the
980 * current transaction and start a new one. We will then
981 * need to call xfs_ialloc again to get the inode.
982 *
983 * If xfs_ialloc did an allocation to replenish the freelist,
984 * it returns the bp containing the head of the freelist as
985 * ialloc_context. We will hold a lock on it across the
986 * transaction commit so that no other process can steal
987 * the inode(s) that we've just allocated.
988 */
f59cf5c2
CH
989 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context,
990 &ip);
e546cb79
DC
991
992 /*
993 * Return an error if we were unable to allocate a new inode.
994 * This should only happen if we run out of space on disk or
995 * encounter a disk error.
996 */
997 if (code) {
998 *ipp = NULL;
999 return code;
1000 }
1001 if (!ialloc_context && !ip) {
1002 *ipp = NULL;
2451337d 1003 return -ENOSPC;
e546cb79
DC
1004 }
1005
1006 /*
1007 * If the AGI buffer is non-NULL, then we were unable to get an
1008 * inode in one operation. We need to commit the current
1009 * transaction and call xfs_ialloc() again. It is guaranteed
1010 * to succeed the second time.
1011 */
1012 if (ialloc_context) {
1013 /*
1014 * Normally, xfs_trans_commit releases all the locks.
1015 * We call bhold to hang on to the ialloc_context across
1016 * the commit. Holding this buffer prevents any other
1017 * processes from doing any allocations in this
1018 * allocation group.
1019 */
1020 xfs_trans_bhold(tp, ialloc_context);
e546cb79
DC
1021
1022 /*
1023 * We want the quota changes to be associated with the next
1024 * transaction, NOT this one. So, detach the dqinfo from this
1025 * and attach it to the next transaction.
1026 */
1027 dqinfo = NULL;
1028 tflags = 0;
1029 if (tp->t_dqinfo) {
1030 dqinfo = (void *)tp->t_dqinfo;
1031 tp->t_dqinfo = NULL;
1032 tflags = tp->t_flags & XFS_TRANS_DQ_DIRTY;
1033 tp->t_flags &= ~(XFS_TRANS_DQ_DIRTY);
1034 }
1035
411350df 1036 code = xfs_trans_roll(&tp);
2e6db6c4 1037 if (committed != NULL)
e546cb79 1038 *committed = 1;
3d3c8b52 1039
e546cb79
DC
1040 /*
1041 * Re-attach the quota info that we detached from prev trx.
1042 */
1043 if (dqinfo) {
1044 tp->t_dqinfo = dqinfo;
1045 tp->t_flags |= tflags;
1046 }
1047
1048 if (code) {
1049 xfs_buf_relse(ialloc_context);
2e6db6c4 1050 *tpp = tp;
e546cb79
DC
1051 *ipp = NULL;
1052 return code;
1053 }
1054 xfs_trans_bjoin(tp, ialloc_context);
1055
1056 /*
1057 * Call ialloc again. Since we've locked out all
1058 * other allocations in this allocation group,
1059 * this call should always succeed.
1060 */
1061 code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid,
f59cf5c2 1062 &ialloc_context, &ip);
e546cb79
DC
1063
1064 /*
1065 * If we get an error at this point, return to the caller
1066 * so that the current transaction can be aborted.
1067 */
1068 if (code) {
1069 *tpp = tp;
1070 *ipp = NULL;
1071 return code;
1072 }
1073 ASSERT(!ialloc_context && ip);
1074
1075 } else {
1076 if (committed != NULL)
1077 *committed = 0;
1078 }
1079
1080 *ipp = ip;
1081 *tpp = tp;
1082
1083 return 0;
1084}
1085
1086/*
54d7b5c1
DC
1087 * Decrement the link count on an inode & log the change. If this causes the
1088 * link count to go to zero, move the inode to AGI unlinked list so that it can
1089 * be freed when the last active reference goes away via xfs_inactive().
e546cb79 1090 */
0d5a75e9 1091static int /* error */
e546cb79
DC
1092xfs_droplink(
1093 xfs_trans_t *tp,
1094 xfs_inode_t *ip)
1095{
e546cb79
DC
1096 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1097
e546cb79
DC
1098 drop_nlink(VFS_I(ip));
1099 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1100
54d7b5c1
DC
1101 if (VFS_I(ip)->i_nlink)
1102 return 0;
1103
1104 return xfs_iunlink(tp, ip);
e546cb79
DC
1105}
1106
e546cb79
DC
1107/*
1108 * Increment the link count on an inode & log the change.
1109 */
0d5a75e9 1110static int
e546cb79
DC
1111xfs_bumplink(
1112 xfs_trans_t *tp,
1113 xfs_inode_t *ip)
1114{
1115 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
1116
263997a6 1117 ASSERT(ip->i_d.di_version > 1);
e546cb79 1118 inc_nlink(VFS_I(ip));
e546cb79
DC
1119 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1120 return 0;
1121}
1122
c24b5dfa
DC
1123int
1124xfs_create(
1125 xfs_inode_t *dp,
1126 struct xfs_name *name,
1127 umode_t mode,
66f36464 1128 dev_t rdev,
c24b5dfa
DC
1129 xfs_inode_t **ipp)
1130{
1131 int is_dir = S_ISDIR(mode);
1132 struct xfs_mount *mp = dp->i_mount;
1133 struct xfs_inode *ip = NULL;
1134 struct xfs_trans *tp = NULL;
1135 int error;
2c3234d1 1136 struct xfs_defer_ops dfops;
c24b5dfa
DC
1137 xfs_fsblock_t first_block;
1138 bool unlock_dp_on_error = false;
c24b5dfa
DC
1139 prid_t prid;
1140 struct xfs_dquot *udqp = NULL;
1141 struct xfs_dquot *gdqp = NULL;
1142 struct xfs_dquot *pdqp = NULL;
062647a8 1143 struct xfs_trans_res *tres;
c24b5dfa 1144 uint resblks;
c24b5dfa
DC
1145
1146 trace_xfs_create(dp, name);
1147
1148 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 1149 return -EIO;
c24b5dfa 1150
163467d3 1151 prid = xfs_get_initial_prid(dp);
c24b5dfa
DC
1152
1153 /*
1154 * Make sure that we have allocated dquot(s) on disk.
1155 */
7aab1b28
DE
1156 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1157 xfs_kgid_to_gid(current_fsgid()), prid,
c24b5dfa
DC
1158 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1159 &udqp, &gdqp, &pdqp);
1160 if (error)
1161 return error;
1162
1163 if (is_dir) {
c24b5dfa 1164 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
062647a8 1165 tres = &M_RES(mp)->tr_mkdir;
c24b5dfa
DC
1166 } else {
1167 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
062647a8 1168 tres = &M_RES(mp)->tr_create;
c24b5dfa
DC
1169 }
1170
c24b5dfa
DC
1171 /*
1172 * Initially assume that the file does not exist and
1173 * reserve the resources for that case. If that is not
1174 * the case we'll drop the one we have and get a more
1175 * appropriate transaction later.
1176 */
253f4911 1177 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
2451337d 1178 if (error == -ENOSPC) {
c24b5dfa
DC
1179 /* flush outstanding delalloc blocks and retry */
1180 xfs_flush_inodes(mp);
253f4911 1181 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
c24b5dfa 1182 }
4906e215 1183 if (error)
253f4911 1184 goto out_release_inode;
c24b5dfa 1185
65523218 1186 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
c24b5dfa
DC
1187 unlock_dp_on_error = true;
1188
2c3234d1 1189 xfs_defer_init(&dfops, &first_block);
c24b5dfa
DC
1190
1191 /*
1192 * Reserve disk quota and the inode.
1193 */
1194 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1195 pdqp, resblks, 1, 0);
1196 if (error)
1197 goto out_trans_cancel;
1198
c24b5dfa
DC
1199 /*
1200 * A newly created regular or special file just has one directory
1201 * entry pointing to them, but a directory also the "." entry
1202 * pointing to itself.
1203 */
f59cf5c2
CH
1204 error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip,
1205 NULL);
d6077aa3 1206 if (error)
4906e215 1207 goto out_trans_cancel;
c24b5dfa
DC
1208
1209 /*
1210 * Now we join the directory inode to the transaction. We do not do it
1211 * earlier because xfs_dir_ialloc might commit the previous transaction
1212 * (and release all the locks). An error from here on will result in
1213 * the transaction cancel unlocking dp so don't do it explicitly in the
1214 * error path.
1215 */
65523218 1216 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1217 unlock_dp_on_error = false;
1218
1219 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
2c3234d1 1220 &first_block, &dfops, resblks ?
c24b5dfa
DC
1221 resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
1222 if (error) {
2451337d 1223 ASSERT(error != -ENOSPC);
4906e215 1224 goto out_trans_cancel;
c24b5dfa
DC
1225 }
1226 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1227 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1228
1229 if (is_dir) {
1230 error = xfs_dir_init(tp, ip, dp);
1231 if (error)
1232 goto out_bmap_cancel;
1233
1234 error = xfs_bumplink(tp, dp);
1235 if (error)
1236 goto out_bmap_cancel;
1237 }
1238
1239 /*
1240 * If this is a synchronous mount, make sure that the
1241 * create transaction goes to disk before returning to
1242 * the user.
1243 */
1244 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1245 xfs_trans_set_sync(tp);
1246
1247 /*
1248 * Attach the dquot(s) to the inodes and modify them incore.
1249 * These ids of the inode couldn't have changed since the new
1250 * inode has been locked ever since it was created.
1251 */
1252 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1253
8ad7c629 1254 error = xfs_defer_finish(&tp, &dfops);
c24b5dfa
DC
1255 if (error)
1256 goto out_bmap_cancel;
1257
70393313 1258 error = xfs_trans_commit(tp);
c24b5dfa
DC
1259 if (error)
1260 goto out_release_inode;
1261
1262 xfs_qm_dqrele(udqp);
1263 xfs_qm_dqrele(gdqp);
1264 xfs_qm_dqrele(pdqp);
1265
1266 *ipp = ip;
1267 return 0;
1268
1269 out_bmap_cancel:
2c3234d1 1270 xfs_defer_cancel(&dfops);
c24b5dfa 1271 out_trans_cancel:
4906e215 1272 xfs_trans_cancel(tp);
c24b5dfa
DC
1273 out_release_inode:
1274 /*
58c90473
DC
1275 * Wait until after the current transaction is aborted to finish the
1276 * setup of the inode and release the inode. This prevents recursive
1277 * transactions and deadlocks from xfs_inactive.
c24b5dfa 1278 */
58c90473
DC
1279 if (ip) {
1280 xfs_finish_inode_setup(ip);
c24b5dfa 1281 IRELE(ip);
58c90473 1282 }
c24b5dfa
DC
1283
1284 xfs_qm_dqrele(udqp);
1285 xfs_qm_dqrele(gdqp);
1286 xfs_qm_dqrele(pdqp);
1287
1288 if (unlock_dp_on_error)
65523218 1289 xfs_iunlock(dp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1290 return error;
1291}
1292
99b6436b
ZYW
1293int
1294xfs_create_tmpfile(
1295 struct xfs_inode *dp,
1296 struct dentry *dentry,
330033d6
BF
1297 umode_t mode,
1298 struct xfs_inode **ipp)
99b6436b
ZYW
1299{
1300 struct xfs_mount *mp = dp->i_mount;
1301 struct xfs_inode *ip = NULL;
1302 struct xfs_trans *tp = NULL;
1303 int error;
99b6436b
ZYW
1304 prid_t prid;
1305 struct xfs_dquot *udqp = NULL;
1306 struct xfs_dquot *gdqp = NULL;
1307 struct xfs_dquot *pdqp = NULL;
1308 struct xfs_trans_res *tres;
1309 uint resblks;
1310
1311 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 1312 return -EIO;
99b6436b
ZYW
1313
1314 prid = xfs_get_initial_prid(dp);
1315
1316 /*
1317 * Make sure that we have allocated dquot(s) on disk.
1318 */
1319 error = xfs_qm_vop_dqalloc(dp, xfs_kuid_to_uid(current_fsuid()),
1320 xfs_kgid_to_gid(current_fsgid()), prid,
1321 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1322 &udqp, &gdqp, &pdqp);
1323 if (error)
1324 return error;
1325
1326 resblks = XFS_IALLOC_SPACE_RES(mp);
99b6436b 1327 tres = &M_RES(mp)->tr_create_tmpfile;
253f4911
CH
1328
1329 error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp);
4906e215 1330 if (error)
253f4911 1331 goto out_release_inode;
99b6436b
ZYW
1332
1333 error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
1334 pdqp, resblks, 1, 0);
1335 if (error)
1336 goto out_trans_cancel;
1337
f59cf5c2 1338 error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL);
d6077aa3 1339 if (error)
4906e215 1340 goto out_trans_cancel;
99b6436b
ZYW
1341
1342 if (mp->m_flags & XFS_MOUNT_WSYNC)
1343 xfs_trans_set_sync(tp);
1344
1345 /*
1346 * Attach the dquot(s) to the inodes and modify them incore.
1347 * These ids of the inode couldn't have changed since the new
1348 * inode has been locked ever since it was created.
1349 */
1350 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1351
99b6436b
ZYW
1352 error = xfs_iunlink(tp, ip);
1353 if (error)
4906e215 1354 goto out_trans_cancel;
99b6436b 1355
70393313 1356 error = xfs_trans_commit(tp);
99b6436b
ZYW
1357 if (error)
1358 goto out_release_inode;
1359
1360 xfs_qm_dqrele(udqp);
1361 xfs_qm_dqrele(gdqp);
1362 xfs_qm_dqrele(pdqp);
1363
330033d6 1364 *ipp = ip;
99b6436b
ZYW
1365 return 0;
1366
99b6436b 1367 out_trans_cancel:
4906e215 1368 xfs_trans_cancel(tp);
99b6436b
ZYW
1369 out_release_inode:
1370 /*
58c90473
DC
1371 * Wait until after the current transaction is aborted to finish the
1372 * setup of the inode and release the inode. This prevents recursive
1373 * transactions and deadlocks from xfs_inactive.
99b6436b 1374 */
58c90473
DC
1375 if (ip) {
1376 xfs_finish_inode_setup(ip);
99b6436b 1377 IRELE(ip);
58c90473 1378 }
99b6436b
ZYW
1379
1380 xfs_qm_dqrele(udqp);
1381 xfs_qm_dqrele(gdqp);
1382 xfs_qm_dqrele(pdqp);
1383
1384 return error;
1385}
1386
c24b5dfa
DC
1387int
1388xfs_link(
1389 xfs_inode_t *tdp,
1390 xfs_inode_t *sip,
1391 struct xfs_name *target_name)
1392{
1393 xfs_mount_t *mp = tdp->i_mount;
1394 xfs_trans_t *tp;
1395 int error;
2c3234d1 1396 struct xfs_defer_ops dfops;
c24b5dfa 1397 xfs_fsblock_t first_block;
c24b5dfa
DC
1398 int resblks;
1399
1400 trace_xfs_link(tdp, target_name);
1401
c19b3b05 1402 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
c24b5dfa
DC
1403
1404 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 1405 return -EIO;
c24b5dfa
DC
1406
1407 error = xfs_qm_dqattach(sip, 0);
1408 if (error)
1409 goto std_return;
1410
1411 error = xfs_qm_dqattach(tdp, 0);
1412 if (error)
1413 goto std_return;
1414
c24b5dfa 1415 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
253f4911 1416 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, resblks, 0, 0, &tp);
2451337d 1417 if (error == -ENOSPC) {
c24b5dfa 1418 resblks = 0;
253f4911 1419 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_link, 0, 0, 0, &tp);
c24b5dfa 1420 }
4906e215 1421 if (error)
253f4911 1422 goto std_return;
c24b5dfa
DC
1423
1424 xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1425
1426 xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
65523218 1427 xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
c24b5dfa
DC
1428
1429 /*
1430 * If we are using project inheritance, we only allow hard link
1431 * creation in our tree when the project IDs are the same; else
1432 * the tree quota mechanism could be circumvented.
1433 */
1434 if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1435 (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
2451337d 1436 error = -EXDEV;
c24b5dfa
DC
1437 goto error_return;
1438 }
1439
94f3cad5
ES
1440 if (!resblks) {
1441 error = xfs_dir_canenter(tp, tdp, target_name);
1442 if (error)
1443 goto error_return;
1444 }
c24b5dfa 1445
2c3234d1 1446 xfs_defer_init(&dfops, &first_block);
c24b5dfa 1447
54d7b5c1
DC
1448 /*
1449 * Handle initial link state of O_TMPFILE inode
1450 */
1451 if (VFS_I(sip)->i_nlink == 0) {
ab297431
ZYW
1452 error = xfs_iunlink_remove(tp, sip);
1453 if (error)
4906e215 1454 goto error_return;
ab297431
ZYW
1455 }
1456
c24b5dfa 1457 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
2c3234d1 1458 &first_block, &dfops, resblks);
c24b5dfa 1459 if (error)
4906e215 1460 goto error_return;
c24b5dfa
DC
1461 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1462 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1463
1464 error = xfs_bumplink(tp, sip);
1465 if (error)
4906e215 1466 goto error_return;
c24b5dfa
DC
1467
1468 /*
1469 * If this is a synchronous mount, make sure that the
1470 * link transaction goes to disk before returning to
1471 * the user.
1472 */
f6106efa 1473 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
c24b5dfa 1474 xfs_trans_set_sync(tp);
c24b5dfa 1475
8ad7c629 1476 error = xfs_defer_finish(&tp, &dfops);
c24b5dfa 1477 if (error) {
2c3234d1 1478 xfs_defer_cancel(&dfops);
4906e215 1479 goto error_return;
c24b5dfa
DC
1480 }
1481
70393313 1482 return xfs_trans_commit(tp);
c24b5dfa 1483
c24b5dfa 1484 error_return:
4906e215 1485 xfs_trans_cancel(tp);
c24b5dfa
DC
1486 std_return:
1487 return error;
1488}
1489
363e59ba
DW
1490/* Clear the reflink flag and the cowblocks tag if possible. */
1491static void
1492xfs_itruncate_clear_reflink_flags(
1493 struct xfs_inode *ip)
1494{
1495 struct xfs_ifork *dfork;
1496 struct xfs_ifork *cfork;
1497
1498 if (!xfs_is_reflink_inode(ip))
1499 return;
1500 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1501 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1502 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1503 ip->i_d.di_flags2 &= ~XFS_DIFLAG2_REFLINK;
1504 if (cfork->if_bytes == 0)
1505 xfs_inode_clear_cowblocks_tag(ip);
1506}
1507
1da177e4 1508/*
8f04c47a
CH
1509 * Free up the underlying blocks past new_size. The new size must be smaller
1510 * than the current size. This routine can be used both for the attribute and
1511 * data fork, and does not modify the inode size, which is left to the caller.
1da177e4 1512 *
f6485057
DC
1513 * The transaction passed to this routine must have made a permanent log
1514 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1515 * given transaction and start new ones, so make sure everything involved in
1516 * the transaction is tidy before calling here. Some transaction will be
1517 * returned to the caller to be committed. The incoming transaction must
1518 * already include the inode, and both inode locks must be held exclusively.
1519 * The inode must also be "held" within the transaction. On return the inode
1520 * will be "held" within the returned transaction. This routine does NOT
1521 * require any disk space to be reserved for it within the transaction.
1da177e4 1522 *
f6485057
DC
1523 * If we get an error, we must return with the inode locked and linked into the
1524 * current transaction. This keeps things simple for the higher level code,
1525 * because it always knows that the inode is locked and held in the transaction
1526 * that returns to it whether errors occur or not. We don't mark the inode
1527 * dirty on error so that transactions can be easily aborted if possible.
1da177e4
LT
1528 */
1529int
8f04c47a
CH
1530xfs_itruncate_extents(
1531 struct xfs_trans **tpp,
1532 struct xfs_inode *ip,
1533 int whichfork,
1534 xfs_fsize_t new_size)
1da177e4 1535{
8f04c47a
CH
1536 struct xfs_mount *mp = ip->i_mount;
1537 struct xfs_trans *tp = *tpp;
2c3234d1 1538 struct xfs_defer_ops dfops;
8f04c47a
CH
1539 xfs_fsblock_t first_block;
1540 xfs_fileoff_t first_unmap_block;
1541 xfs_fileoff_t last_block;
1542 xfs_filblks_t unmap_len;
8f04c47a
CH
1543 int error = 0;
1544 int done = 0;
1da177e4 1545
0b56185b
CH
1546 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1547 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1548 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ce7ae151 1549 ASSERT(new_size <= XFS_ISIZE(ip));
8f04c47a 1550 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1da177e4 1551 ASSERT(ip->i_itemp != NULL);
898621d5 1552 ASSERT(ip->i_itemp->ili_lock_flags == 0);
8f04c47a 1553 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1da177e4 1554
673e8e59
CH
1555 trace_xfs_itruncate_extents_start(ip, new_size);
1556
1da177e4
LT
1557 /*
1558 * Since it is possible for space to become allocated beyond
1559 * the end of the file (in a crash where the space is allocated
1560 * but the inode size is not yet updated), simply remove any
1561 * blocks which show up between the new EOF and the maximum
1562 * possible file size. If the first block to be removed is
1563 * beyond the maximum file size (ie it is the same as last_block),
1564 * then there is nothing to do.
1565 */
8f04c47a 1566 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
32972383 1567 last_block = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
8f04c47a
CH
1568 if (first_unmap_block == last_block)
1569 return 0;
1570
1571 ASSERT(first_unmap_block < last_block);
1572 unmap_len = last_block - first_unmap_block + 1;
1da177e4 1573 while (!done) {
2c3234d1 1574 xfs_defer_init(&dfops, &first_block);
8f04c47a 1575 error = xfs_bunmapi(tp, ip,
3e57ecf6 1576 first_unmap_block, unmap_len,
8f04c47a 1577 xfs_bmapi_aflag(whichfork),
1da177e4 1578 XFS_ITRUNC_MAX_EXTENTS,
2c3234d1 1579 &first_block, &dfops,
b4e9181e 1580 &done);
8f04c47a
CH
1581 if (error)
1582 goto out_bmap_cancel;
1da177e4
LT
1583
1584 /*
1585 * Duplicate the transaction that has the permanent
1586 * reservation and commit the old transaction.
1587 */
8ad7c629
CH
1588 xfs_defer_ijoin(&dfops, ip);
1589 error = xfs_defer_finish(&tp, &dfops);
8f04c47a
CH
1590 if (error)
1591 goto out_bmap_cancel;
1da177e4 1592
411350df 1593 error = xfs_trans_roll_inode(&tp, ip);
f6485057 1594 if (error)
8f04c47a 1595 goto out;
1da177e4 1596 }
8f04c47a 1597
aa8968f2
DW
1598 /* Remove all pending CoW reservations. */
1599 error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
3802a345 1600 last_block, true);
aa8968f2
DW
1601 if (error)
1602 goto out;
1603
363e59ba 1604 xfs_itruncate_clear_reflink_flags(ip);
aa8968f2 1605
673e8e59
CH
1606 /*
1607 * Always re-log the inode so that our permanent transaction can keep
1608 * on rolling it forward in the log.
1609 */
1610 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1611
1612 trace_xfs_itruncate_extents_end(ip, new_size);
1613
8f04c47a
CH
1614out:
1615 *tpp = tp;
1616 return error;
1617out_bmap_cancel:
1da177e4 1618 /*
8f04c47a
CH
1619 * If the bunmapi call encounters an error, return to the caller where
1620 * the transaction can be properly aborted. We just need to make sure
1621 * we're not holding any resources that we were not when we came in.
1da177e4 1622 */
2c3234d1 1623 xfs_defer_cancel(&dfops);
8f04c47a
CH
1624 goto out;
1625}
1626
c24b5dfa
DC
1627int
1628xfs_release(
1629 xfs_inode_t *ip)
1630{
1631 xfs_mount_t *mp = ip->i_mount;
1632 int error;
1633
c19b3b05 1634 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
c24b5dfa
DC
1635 return 0;
1636
1637 /* If this is a read-only mount, don't do this (would generate I/O) */
1638 if (mp->m_flags & XFS_MOUNT_RDONLY)
1639 return 0;
1640
1641 if (!XFS_FORCED_SHUTDOWN(mp)) {
1642 int truncated;
1643
c24b5dfa
DC
1644 /*
1645 * If we previously truncated this file and removed old data
1646 * in the process, we want to initiate "early" writeout on
1647 * the last close. This is an attempt to combat the notorious
1648 * NULL files problem which is particularly noticeable from a
1649 * truncate down, buffered (re-)write (delalloc), followed by
1650 * a crash. What we are effectively doing here is
1651 * significantly reducing the time window where we'd otherwise
1652 * be exposed to that problem.
1653 */
1654 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1655 if (truncated) {
1656 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
eac152b4 1657 if (ip->i_delayed_blks > 0) {
2451337d 1658 error = filemap_flush(VFS_I(ip)->i_mapping);
c24b5dfa
DC
1659 if (error)
1660 return error;
1661 }
1662 }
1663 }
1664
54d7b5c1 1665 if (VFS_I(ip)->i_nlink == 0)
c24b5dfa
DC
1666 return 0;
1667
1668 if (xfs_can_free_eofblocks(ip, false)) {
1669
a36b9261
BF
1670 /*
1671 * Check if the inode is being opened, written and closed
1672 * frequently and we have delayed allocation blocks outstanding
1673 * (e.g. streaming writes from the NFS server), truncating the
1674 * blocks past EOF will cause fragmentation to occur.
1675 *
1676 * In this case don't do the truncation, but we have to be
1677 * careful how we detect this case. Blocks beyond EOF show up as
1678 * i_delayed_blks even when the inode is clean, so we need to
1679 * truncate them away first before checking for a dirty release.
1680 * Hence on the first dirty close we will still remove the
1681 * speculative allocation, but after that we will leave it in
1682 * place.
1683 */
1684 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1685 return 0;
c24b5dfa
DC
1686 /*
1687 * If we can't get the iolock just skip truncating the blocks
1688 * past EOF because we could deadlock with the mmap_sem
a36b9261 1689 * otherwise. We'll get another chance to drop them once the
c24b5dfa
DC
1690 * last reference to the inode is dropped, so we'll never leak
1691 * blocks permanently.
c24b5dfa 1692 */
a36b9261
BF
1693 if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1694 error = xfs_free_eofblocks(ip);
1695 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1696 if (error)
1697 return error;
1698 }
c24b5dfa
DC
1699
1700 /* delalloc blocks after truncation means it really is dirty */
1701 if (ip->i_delayed_blks)
1702 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1703 }
1704 return 0;
1705}
1706
f7be2d7f
BF
1707/*
1708 * xfs_inactive_truncate
1709 *
1710 * Called to perform a truncate when an inode becomes unlinked.
1711 */
1712STATIC int
1713xfs_inactive_truncate(
1714 struct xfs_inode *ip)
1715{
1716 struct xfs_mount *mp = ip->i_mount;
1717 struct xfs_trans *tp;
1718 int error;
1719
253f4911 1720 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
f7be2d7f
BF
1721 if (error) {
1722 ASSERT(XFS_FORCED_SHUTDOWN(mp));
f7be2d7f
BF
1723 return error;
1724 }
1725
1726 xfs_ilock(ip, XFS_ILOCK_EXCL);
1727 xfs_trans_ijoin(tp, ip, 0);
1728
1729 /*
1730 * Log the inode size first to prevent stale data exposure in the event
1731 * of a system crash before the truncate completes. See the related
69bca807 1732 * comment in xfs_vn_setattr_size() for details.
f7be2d7f
BF
1733 */
1734 ip->i_d.di_size = 0;
1735 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1736
1737 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1738 if (error)
1739 goto error_trans_cancel;
1740
1741 ASSERT(ip->i_d.di_nextents == 0);
1742
70393313 1743 error = xfs_trans_commit(tp);
f7be2d7f
BF
1744 if (error)
1745 goto error_unlock;
1746
1747 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1748 return 0;
1749
1750error_trans_cancel:
4906e215 1751 xfs_trans_cancel(tp);
f7be2d7f
BF
1752error_unlock:
1753 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1754 return error;
1755}
1756
88877d2b
BF
1757/*
1758 * xfs_inactive_ifree()
1759 *
1760 * Perform the inode free when an inode is unlinked.
1761 */
1762STATIC int
1763xfs_inactive_ifree(
1764 struct xfs_inode *ip)
1765{
2c3234d1 1766 struct xfs_defer_ops dfops;
88877d2b 1767 xfs_fsblock_t first_block;
88877d2b
BF
1768 struct xfs_mount *mp = ip->i_mount;
1769 struct xfs_trans *tp;
1770 int error;
1771
9d43b180 1772 /*
76d771b4
CH
1773 * We try to use a per-AG reservation for any block needed by the finobt
1774 * tree, but as the finobt feature predates the per-AG reservation
1775 * support a degraded file system might not have enough space for the
1776 * reservation at mount time. In that case try to dip into the reserved
1777 * pool and pray.
9d43b180
BF
1778 *
1779 * Send a warning if the reservation does happen to fail, as the inode
1780 * now remains allocated and sits on the unlinked list until the fs is
1781 * repaired.
1782 */
76d771b4
CH
1783 if (unlikely(mp->m_inotbt_nores)) {
1784 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1785 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1786 &tp);
1787 } else {
1788 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1789 }
88877d2b 1790 if (error) {
2451337d 1791 if (error == -ENOSPC) {
9d43b180
BF
1792 xfs_warn_ratelimited(mp,
1793 "Failed to remove inode(s) from unlinked list. "
1794 "Please free space, unmount and run xfs_repair.");
1795 } else {
1796 ASSERT(XFS_FORCED_SHUTDOWN(mp));
1797 }
88877d2b
BF
1798 return error;
1799 }
1800
1801 xfs_ilock(ip, XFS_ILOCK_EXCL);
1802 xfs_trans_ijoin(tp, ip, 0);
1803
2c3234d1
DW
1804 xfs_defer_init(&dfops, &first_block);
1805 error = xfs_ifree(tp, ip, &dfops);
88877d2b
BF
1806 if (error) {
1807 /*
1808 * If we fail to free the inode, shut down. The cancel
1809 * might do that, we need to make sure. Otherwise the
1810 * inode might be lost for a long time or forever.
1811 */
1812 if (!XFS_FORCED_SHUTDOWN(mp)) {
1813 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1814 __func__, error);
1815 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1816 }
4906e215 1817 xfs_trans_cancel(tp);
88877d2b
BF
1818 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1819 return error;
1820 }
1821
1822 /*
1823 * Credit the quota account(s). The inode is gone.
1824 */
1825 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1826
1827 /*
d4a97a04
BF
1828 * Just ignore errors at this point. There is nothing we can do except
1829 * to try to keep going. Make sure it's not a silent error.
88877d2b 1830 */
8ad7c629 1831 error = xfs_defer_finish(&tp, &dfops);
d4a97a04 1832 if (error) {
310a75a3 1833 xfs_notice(mp, "%s: xfs_defer_finish returned error %d",
88877d2b 1834 __func__, error);
2c3234d1 1835 xfs_defer_cancel(&dfops);
d4a97a04 1836 }
70393313 1837 error = xfs_trans_commit(tp);
88877d2b
BF
1838 if (error)
1839 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1840 __func__, error);
1841
1842 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1843 return 0;
1844}
1845
c24b5dfa
DC
1846/*
1847 * xfs_inactive
1848 *
1849 * This is called when the vnode reference count for the vnode
1850 * goes to zero. If the file has been unlinked, then it must
1851 * now be truncated. Also, we clear all of the read-ahead state
1852 * kept for the inode here since the file is now closed.
1853 */
74564fb4 1854void
c24b5dfa
DC
1855xfs_inactive(
1856 xfs_inode_t *ip)
1857{
3d3c8b52 1858 struct xfs_mount *mp;
3d3c8b52
JL
1859 int error;
1860 int truncate = 0;
c24b5dfa
DC
1861
1862 /*
1863 * If the inode is already free, then there can be nothing
1864 * to clean up here.
1865 */
c19b3b05 1866 if (VFS_I(ip)->i_mode == 0) {
c24b5dfa
DC
1867 ASSERT(ip->i_df.if_real_bytes == 0);
1868 ASSERT(ip->i_df.if_broot_bytes == 0);
74564fb4 1869 return;
c24b5dfa
DC
1870 }
1871
1872 mp = ip->i_mount;
17c12bcd 1873 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
c24b5dfa 1874
c24b5dfa
DC
1875 /* If this is a read-only mount, don't do this (would generate I/O) */
1876 if (mp->m_flags & XFS_MOUNT_RDONLY)
74564fb4 1877 return;
c24b5dfa 1878
54d7b5c1 1879 if (VFS_I(ip)->i_nlink != 0) {
c24b5dfa
DC
1880 /*
1881 * force is true because we are evicting an inode from the
1882 * cache. Post-eof blocks must be freed, lest we end up with
1883 * broken free space accounting.
3b4683c2
BF
1884 *
1885 * Note: don't bother with iolock here since lockdep complains
1886 * about acquiring it in reclaim context. We have the only
1887 * reference to the inode at this point anyways.
c24b5dfa 1888 */
3b4683c2 1889 if (xfs_can_free_eofblocks(ip, true))
a36b9261 1890 xfs_free_eofblocks(ip);
74564fb4
BF
1891
1892 return;
c24b5dfa
DC
1893 }
1894
c19b3b05 1895 if (S_ISREG(VFS_I(ip)->i_mode) &&
c24b5dfa
DC
1896 (ip->i_d.di_size != 0 || XFS_ISIZE(ip) != 0 ||
1897 ip->i_d.di_nextents > 0 || ip->i_delayed_blks > 0))
1898 truncate = 1;
1899
1900 error = xfs_qm_dqattach(ip, 0);
1901 if (error)
74564fb4 1902 return;
c24b5dfa 1903
c19b3b05 1904 if (S_ISLNK(VFS_I(ip)->i_mode))
36b21dde 1905 error = xfs_inactive_symlink(ip);
f7be2d7f
BF
1906 else if (truncate)
1907 error = xfs_inactive_truncate(ip);
1908 if (error)
74564fb4 1909 return;
c24b5dfa
DC
1910
1911 /*
1912 * If there are attributes associated with the file then blow them away
1913 * now. The code calls a routine that recursively deconstructs the
6dfe5a04 1914 * attribute fork. If also blows away the in-core attribute fork.
c24b5dfa 1915 */
6dfe5a04 1916 if (XFS_IFORK_Q(ip)) {
c24b5dfa
DC
1917 error = xfs_attr_inactive(ip);
1918 if (error)
74564fb4 1919 return;
c24b5dfa
DC
1920 }
1921
6dfe5a04 1922 ASSERT(!ip->i_afp);
c24b5dfa 1923 ASSERT(ip->i_d.di_anextents == 0);
6dfe5a04 1924 ASSERT(ip->i_d.di_forkoff == 0);
c24b5dfa
DC
1925
1926 /*
1927 * Free the inode.
1928 */
88877d2b
BF
1929 error = xfs_inactive_ifree(ip);
1930 if (error)
74564fb4 1931 return;
c24b5dfa
DC
1932
1933 /*
1934 * Release the dquots held by inode, if any.
1935 */
1936 xfs_qm_dqdetach(ip);
c24b5dfa
DC
1937}
1938
1da177e4 1939/*
54d7b5c1
DC
1940 * This is called when the inode's link count goes to 0 or we are creating a
1941 * tmpfile via O_TMPFILE. In the case of a tmpfile, @ignore_linkcount will be
1942 * set to true as the link count is dropped to zero by the VFS after we've
1943 * created the file successfully, so we have to add it to the unlinked list
1944 * while the link count is non-zero.
1945 *
1946 * We place the on-disk inode on a list in the AGI. It will be pulled from this
1947 * list when the inode is freed.
1da177e4 1948 */
54d7b5c1 1949STATIC int
1da177e4 1950xfs_iunlink(
54d7b5c1
DC
1951 struct xfs_trans *tp,
1952 struct xfs_inode *ip)
1da177e4 1953{
54d7b5c1 1954 xfs_mount_t *mp = tp->t_mountp;
1da177e4
LT
1955 xfs_agi_t *agi;
1956 xfs_dinode_t *dip;
1957 xfs_buf_t *agibp;
1958 xfs_buf_t *ibp;
1da177e4
LT
1959 xfs_agino_t agino;
1960 short bucket_index;
1961 int offset;
1962 int error;
1da177e4 1963
c19b3b05 1964 ASSERT(VFS_I(ip)->i_mode != 0);
1da177e4 1965
1da177e4
LT
1966 /*
1967 * Get the agi buffer first. It ensures lock ordering
1968 * on the list.
1969 */
5e1be0fb 1970 error = xfs_read_agi(mp, tp, XFS_INO_TO_AGNO(mp, ip->i_ino), &agibp);
859d7182 1971 if (error)
1da177e4 1972 return error;
1da177e4 1973 agi = XFS_BUF_TO_AGI(agibp);
5e1be0fb 1974
1da177e4
LT
1975 /*
1976 * Get the index into the agi hash table for the
1977 * list this inode will go on.
1978 */
1979 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1980 ASSERT(agino != 0);
1981 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1982 ASSERT(agi->agi_unlinked[bucket_index]);
16259e7d 1983 ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1da177e4 1984
69ef921b 1985 if (agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO)) {
1da177e4
LT
1986 /*
1987 * There is already another inode in the bucket we need
1988 * to add ourselves to. Add us at the front of the list.
1989 * Here we put the head pointer into our next pointer,
1990 * and then we fall through to point the head at us.
1991 */
475ee413
CH
1992 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
1993 0, 0);
c319b58b
VA
1994 if (error)
1995 return error;
1996
69ef921b 1997 ASSERT(dip->di_next_unlinked == cpu_to_be32(NULLAGINO));
1da177e4 1998 dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
92bfc6e7 1999 offset = ip->i_imap.im_boffset +
1da177e4 2000 offsetof(xfs_dinode_t, di_next_unlinked);
0a32c26e
DC
2001
2002 /* need to recalc the inode CRC if appropriate */
2003 xfs_dinode_calc_crc(mp, dip);
2004
1da177e4
LT
2005 xfs_trans_inode_buf(tp, ibp);
2006 xfs_trans_log_buf(tp, ibp, offset,
2007 (offset + sizeof(xfs_agino_t) - 1));
2008 xfs_inobp_check(mp, ibp);
2009 }
2010
2011 /*
2012 * Point the bucket head pointer at the inode being inserted.
2013 */
2014 ASSERT(agino != 0);
16259e7d 2015 agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1da177e4
LT
2016 offset = offsetof(xfs_agi_t, agi_unlinked) +
2017 (sizeof(xfs_agino_t) * bucket_index);
2018 xfs_trans_log_buf(tp, agibp, offset,
2019 (offset + sizeof(xfs_agino_t) - 1));
2020 return 0;
2021}
2022
2023/*
2024 * Pull the on-disk inode from the AGI unlinked list.
2025 */
2026STATIC int
2027xfs_iunlink_remove(
2028 xfs_trans_t *tp,
2029 xfs_inode_t *ip)
2030{
2031 xfs_ino_t next_ino;
2032 xfs_mount_t *mp;
2033 xfs_agi_t *agi;
2034 xfs_dinode_t *dip;
2035 xfs_buf_t *agibp;
2036 xfs_buf_t *ibp;
2037 xfs_agnumber_t agno;
1da177e4
LT
2038 xfs_agino_t agino;
2039 xfs_agino_t next_agino;
2040 xfs_buf_t *last_ibp;
6fdf8ccc 2041 xfs_dinode_t *last_dip = NULL;
1da177e4 2042 short bucket_index;
6fdf8ccc 2043 int offset, last_offset = 0;
1da177e4 2044 int error;
1da177e4 2045
1da177e4 2046 mp = tp->t_mountp;
1da177e4 2047 agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1da177e4
LT
2048
2049 /*
2050 * Get the agi buffer first. It ensures lock ordering
2051 * on the list.
2052 */
5e1be0fb
CH
2053 error = xfs_read_agi(mp, tp, agno, &agibp);
2054 if (error)
1da177e4 2055 return error;
5e1be0fb 2056
1da177e4 2057 agi = XFS_BUF_TO_AGI(agibp);
5e1be0fb 2058
1da177e4
LT
2059 /*
2060 * Get the index into the agi hash table for the
2061 * list this inode will go on.
2062 */
2063 agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2064 ASSERT(agino != 0);
2065 bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
69ef921b 2066 ASSERT(agi->agi_unlinked[bucket_index] != cpu_to_be32(NULLAGINO));
1da177e4
LT
2067 ASSERT(agi->agi_unlinked[bucket_index]);
2068
16259e7d 2069 if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
1da177e4 2070 /*
475ee413
CH
2071 * We're at the head of the list. Get the inode's on-disk
2072 * buffer to see if there is anyone after us on the list.
2073 * Only modify our next pointer if it is not already NULLAGINO.
2074 * This saves us the overhead of dealing with the buffer when
2075 * there is no need to change it.
1da177e4 2076 */
475ee413
CH
2077 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2078 0, 0);
1da177e4 2079 if (error) {
475ee413 2080 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
0b932ccc 2081 __func__, error);
1da177e4
LT
2082 return error;
2083 }
347d1c01 2084 next_agino = be32_to_cpu(dip->di_next_unlinked);
1da177e4
LT
2085 ASSERT(next_agino != 0);
2086 if (next_agino != NULLAGINO) {
347d1c01 2087 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
92bfc6e7 2088 offset = ip->i_imap.im_boffset +
1da177e4 2089 offsetof(xfs_dinode_t, di_next_unlinked);
0a32c26e
DC
2090
2091 /* need to recalc the inode CRC if appropriate */
2092 xfs_dinode_calc_crc(mp, dip);
2093
1da177e4
LT
2094 xfs_trans_inode_buf(tp, ibp);
2095 xfs_trans_log_buf(tp, ibp, offset,
2096 (offset + sizeof(xfs_agino_t) - 1));
2097 xfs_inobp_check(mp, ibp);
2098 } else {
2099 xfs_trans_brelse(tp, ibp);
2100 }
2101 /*
2102 * Point the bucket head pointer at the next inode.
2103 */
2104 ASSERT(next_agino != 0);
2105 ASSERT(next_agino != agino);
16259e7d 2106 agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
1da177e4
LT
2107 offset = offsetof(xfs_agi_t, agi_unlinked) +
2108 (sizeof(xfs_agino_t) * bucket_index);
2109 xfs_trans_log_buf(tp, agibp, offset,
2110 (offset + sizeof(xfs_agino_t) - 1));
2111 } else {
2112 /*
2113 * We need to search the list for the inode being freed.
2114 */
16259e7d 2115 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
1da177e4
LT
2116 last_ibp = NULL;
2117 while (next_agino != agino) {
129dbc9a
CH
2118 struct xfs_imap imap;
2119
2120 if (last_ibp)
1da177e4 2121 xfs_trans_brelse(tp, last_ibp);
129dbc9a
CH
2122
2123 imap.im_blkno = 0;
1da177e4 2124 next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
129dbc9a
CH
2125
2126 error = xfs_imap(mp, tp, next_ino, &imap, 0);
2127 if (error) {
2128 xfs_warn(mp,
2129 "%s: xfs_imap returned error %d.",
2130 __func__, error);
2131 return error;
2132 }
2133
2134 error = xfs_imap_to_bp(mp, tp, &imap, &last_dip,
2135 &last_ibp, 0, 0);
1da177e4 2136 if (error) {
0b932ccc 2137 xfs_warn(mp,
129dbc9a 2138 "%s: xfs_imap_to_bp returned error %d.",
0b932ccc 2139 __func__, error);
1da177e4
LT
2140 return error;
2141 }
129dbc9a
CH
2142
2143 last_offset = imap.im_boffset;
347d1c01 2144 next_agino = be32_to_cpu(last_dip->di_next_unlinked);
1da177e4
LT
2145 ASSERT(next_agino != NULLAGINO);
2146 ASSERT(next_agino != 0);
2147 }
475ee413 2148
1da177e4 2149 /*
475ee413
CH
2150 * Now last_ibp points to the buffer previous to us on the
2151 * unlinked list. Pull us from the list.
1da177e4 2152 */
475ee413
CH
2153 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &ibp,
2154 0, 0);
1da177e4 2155 if (error) {
475ee413 2156 xfs_warn(mp, "%s: xfs_imap_to_bp(2) returned error %d.",
0b932ccc 2157 __func__, error);
1da177e4
LT
2158 return error;
2159 }
347d1c01 2160 next_agino = be32_to_cpu(dip->di_next_unlinked);
1da177e4
LT
2161 ASSERT(next_agino != 0);
2162 ASSERT(next_agino != agino);
2163 if (next_agino != NULLAGINO) {
347d1c01 2164 dip->di_next_unlinked = cpu_to_be32(NULLAGINO);
92bfc6e7 2165 offset = ip->i_imap.im_boffset +
1da177e4 2166 offsetof(xfs_dinode_t, di_next_unlinked);
0a32c26e
DC
2167
2168 /* need to recalc the inode CRC if appropriate */
2169 xfs_dinode_calc_crc(mp, dip);
2170
1da177e4
LT
2171 xfs_trans_inode_buf(tp, ibp);
2172 xfs_trans_log_buf(tp, ibp, offset,
2173 (offset + sizeof(xfs_agino_t) - 1));
2174 xfs_inobp_check(mp, ibp);
2175 } else {
2176 xfs_trans_brelse(tp, ibp);
2177 }
2178 /*
2179 * Point the previous inode on the list to the next inode.
2180 */
347d1c01 2181 last_dip->di_next_unlinked = cpu_to_be32(next_agino);
1da177e4
LT
2182 ASSERT(next_agino != 0);
2183 offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
0a32c26e
DC
2184
2185 /* need to recalc the inode CRC if appropriate */
2186 xfs_dinode_calc_crc(mp, last_dip);
2187
1da177e4
LT
2188 xfs_trans_inode_buf(tp, last_ibp);
2189 xfs_trans_log_buf(tp, last_ibp, offset,
2190 (offset + sizeof(xfs_agino_t) - 1));
2191 xfs_inobp_check(mp, last_ibp);
2192 }
2193 return 0;
2194}
2195
5b3eed75 2196/*
0b8182db 2197 * A big issue when freeing the inode cluster is that we _cannot_ skip any
5b3eed75
DC
2198 * inodes that are in memory - they all must be marked stale and attached to
2199 * the cluster buffer.
2200 */
2a30f36d 2201STATIC int
1da177e4 2202xfs_ifree_cluster(
09b56604
BF
2203 xfs_inode_t *free_ip,
2204 xfs_trans_t *tp,
2205 struct xfs_icluster *xic)
1da177e4
LT
2206{
2207 xfs_mount_t *mp = free_ip->i_mount;
2208 int blks_per_cluster;
982e939e 2209 int inodes_per_cluster;
1da177e4 2210 int nbufs;
5b257b4a 2211 int i, j;
3cdaa189 2212 int ioffset;
1da177e4
LT
2213 xfs_daddr_t blkno;
2214 xfs_buf_t *bp;
5b257b4a 2215 xfs_inode_t *ip;
1da177e4 2216 xfs_inode_log_item_t *iip;
643c8c05 2217 struct xfs_log_item *lip;
5017e97d 2218 struct xfs_perag *pag;
09b56604 2219 xfs_ino_t inum;
1da177e4 2220
09b56604 2221 inum = xic->first_ino;
5017e97d 2222 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
982e939e
JL
2223 blks_per_cluster = xfs_icluster_size_fsb(mp);
2224 inodes_per_cluster = blks_per_cluster << mp->m_sb.sb_inopblog;
2225 nbufs = mp->m_ialloc_blks / blks_per_cluster;
1da177e4 2226
982e939e 2227 for (j = 0; j < nbufs; j++, inum += inodes_per_cluster) {
09b56604
BF
2228 /*
2229 * The allocation bitmap tells us which inodes of the chunk were
2230 * physically allocated. Skip the cluster if an inode falls into
2231 * a sparse region.
2232 */
3cdaa189
BF
2233 ioffset = inum - xic->first_ino;
2234 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2235 ASSERT(do_mod(ioffset, inodes_per_cluster) == 0);
09b56604
BF
2236 continue;
2237 }
2238
1da177e4
LT
2239 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2240 XFS_INO_TO_AGBNO(mp, inum));
2241
5b257b4a
DC
2242 /*
2243 * We obtain and lock the backing buffer first in the process
2244 * here, as we have to ensure that any dirty inode that we
2245 * can't get the flush lock on is attached to the buffer.
2246 * If we scan the in-memory inodes first, then buffer IO can
2247 * complete before we get a lock on it, and hence we may fail
2248 * to mark all the active inodes on the buffer stale.
2249 */
2250 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
b6aff29f
DC
2251 mp->m_bsize * blks_per_cluster,
2252 XBF_UNMAPPED);
5b257b4a 2253
2a30f36d 2254 if (!bp)
2451337d 2255 return -ENOMEM;
b0f539de
DC
2256
2257 /*
2258 * This buffer may not have been correctly initialised as we
2259 * didn't read it from disk. That's not important because we are
2260 * only using to mark the buffer as stale in the log, and to
2261 * attach stale cached inodes on it. That means it will never be
2262 * dispatched for IO. If it is, we want to know about it, and we
2263 * want it to fail. We can acheive this by adding a write
2264 * verifier to the buffer.
2265 */
1813dd64 2266 bp->b_ops = &xfs_inode_buf_ops;
b0f539de 2267
5b257b4a
DC
2268 /*
2269 * Walk the inodes already attached to the buffer and mark them
2270 * stale. These will all have the flush locks held, so an
5b3eed75
DC
2271 * in-memory inode walk can't lock them. By marking them all
2272 * stale first, we will not attempt to lock them in the loop
2273 * below as the XFS_ISTALE flag will be set.
5b257b4a 2274 */
643c8c05 2275 list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
5b257b4a
DC
2276 if (lip->li_type == XFS_LI_INODE) {
2277 iip = (xfs_inode_log_item_t *)lip;
2278 ASSERT(iip->ili_logged == 1);
ca30b2a7 2279 lip->li_cb = xfs_istale_done;
5b257b4a
DC
2280 xfs_trans_ail_copy_lsn(mp->m_ail,
2281 &iip->ili_flush_lsn,
2282 &iip->ili_item.li_lsn);
2283 xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
5b257b4a 2284 }
5b257b4a 2285 }
1da177e4 2286
5b3eed75 2287
1da177e4 2288 /*
5b257b4a
DC
2289 * For each inode in memory attempt to add it to the inode
2290 * buffer and set it up for being staled on buffer IO
2291 * completion. This is safe as we've locked out tail pushing
2292 * and flushing by locking the buffer.
1da177e4 2293 *
5b257b4a
DC
2294 * We have already marked every inode that was part of a
2295 * transaction stale above, which means there is no point in
2296 * even trying to lock them.
1da177e4 2297 */
982e939e 2298 for (i = 0; i < inodes_per_cluster; i++) {
5b3eed75 2299retry:
1a3e8f3d 2300 rcu_read_lock();
da353b0d
DC
2301 ip = radix_tree_lookup(&pag->pag_ici_root,
2302 XFS_INO_TO_AGINO(mp, (inum + i)));
1da177e4 2303
1a3e8f3d
DC
2304 /* Inode not in memory, nothing to do */
2305 if (!ip) {
2306 rcu_read_unlock();
1da177e4
LT
2307 continue;
2308 }
2309
1a3e8f3d
DC
2310 /*
2311 * because this is an RCU protected lookup, we could
2312 * find a recently freed or even reallocated inode
2313 * during the lookup. We need to check under the
2314 * i_flags_lock for a valid inode here. Skip it if it
2315 * is not valid, the wrong inode or stale.
2316 */
2317 spin_lock(&ip->i_flags_lock);
2318 if (ip->i_ino != inum + i ||
2319 __xfs_iflags_test(ip, XFS_ISTALE)) {
2320 spin_unlock(&ip->i_flags_lock);
2321 rcu_read_unlock();
2322 continue;
2323 }
2324 spin_unlock(&ip->i_flags_lock);
2325
5b3eed75
DC
2326 /*
2327 * Don't try to lock/unlock the current inode, but we
2328 * _cannot_ skip the other inodes that we did not find
2329 * in the list attached to the buffer and are not
2330 * already marked stale. If we can't lock it, back off
2331 * and retry.
2332 */
f2e9ad21
OS
2333 if (ip != free_ip) {
2334 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2335 rcu_read_unlock();
2336 delay(1);
2337 goto retry;
2338 }
2339
2340 /*
2341 * Check the inode number again in case we're
2342 * racing with freeing in xfs_reclaim_inode().
2343 * See the comments in that function for more
2344 * information as to why the initial check is
2345 * not sufficient.
2346 */
2347 if (ip->i_ino != inum + i) {
2348 xfs_iunlock(ip, XFS_ILOCK_EXCL);
962cc1ad 2349 rcu_read_unlock();
f2e9ad21
OS
2350 continue;
2351 }
1da177e4 2352 }
1a3e8f3d 2353 rcu_read_unlock();
1da177e4 2354
5b3eed75 2355 xfs_iflock(ip);
5b257b4a 2356 xfs_iflags_set(ip, XFS_ISTALE);
1da177e4 2357
5b3eed75
DC
2358 /*
2359 * we don't need to attach clean inodes or those only
2360 * with unlogged changes (which we throw away, anyway).
2361 */
1da177e4 2362 iip = ip->i_itemp;
5b3eed75 2363 if (!iip || xfs_inode_clean(ip)) {
5b257b4a 2364 ASSERT(ip != free_ip);
1da177e4
LT
2365 xfs_ifunlock(ip);
2366 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2367 continue;
2368 }
2369
f5d8d5c4
CH
2370 iip->ili_last_fields = iip->ili_fields;
2371 iip->ili_fields = 0;
fc0561ce 2372 iip->ili_fsync_fields = 0;
1da177e4 2373 iip->ili_logged = 1;
7b2e2a31
DC
2374 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
2375 &iip->ili_item.li_lsn);
1da177e4 2376
ca30b2a7
CH
2377 xfs_buf_attach_iodone(bp, xfs_istale_done,
2378 &iip->ili_item);
5b257b4a
DC
2379
2380 if (ip != free_ip)
1da177e4 2381 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1da177e4
LT
2382 }
2383
5b3eed75 2384 xfs_trans_stale_inode_buf(tp, bp);
1da177e4
LT
2385 xfs_trans_binval(tp, bp);
2386 }
2387
5017e97d 2388 xfs_perag_put(pag);
2a30f36d 2389 return 0;
1da177e4
LT
2390}
2391
98c4f78d
DW
2392/*
2393 * Free any local-format buffers sitting around before we reset to
2394 * extents format.
2395 */
2396static inline void
2397xfs_ifree_local_data(
2398 struct xfs_inode *ip,
2399 int whichfork)
2400{
2401 struct xfs_ifork *ifp;
2402
2403 if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
2404 return;
2405
2406 ifp = XFS_IFORK_PTR(ip, whichfork);
2407 xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
2408}
2409
1da177e4
LT
2410/*
2411 * This is called to return an inode to the inode free list.
2412 * The inode should already be truncated to 0 length and have
2413 * no pages associated with it. This routine also assumes that
2414 * the inode is already a part of the transaction.
2415 *
2416 * The on-disk copy of the inode will have been added to the list
2417 * of unlinked inodes in the AGI. We need to remove the inode from
2418 * that list atomically with respect to freeing it here.
2419 */
2420int
2421xfs_ifree(
2422 xfs_trans_t *tp,
2423 xfs_inode_t *ip,
2c3234d1 2424 struct xfs_defer_ops *dfops)
1da177e4
LT
2425{
2426 int error;
09b56604 2427 struct xfs_icluster xic = { 0 };
1da177e4 2428
579aa9ca 2429 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
54d7b5c1 2430 ASSERT(VFS_I(ip)->i_nlink == 0);
1da177e4
LT
2431 ASSERT(ip->i_d.di_nextents == 0);
2432 ASSERT(ip->i_d.di_anextents == 0);
c19b3b05 2433 ASSERT(ip->i_d.di_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
1da177e4
LT
2434 ASSERT(ip->i_d.di_nblocks == 0);
2435
2436 /*
2437 * Pull the on-disk inode from the AGI unlinked list.
2438 */
2439 error = xfs_iunlink_remove(tp, ip);
1baaed8f 2440 if (error)
1da177e4 2441 return error;
1da177e4 2442
2c3234d1 2443 error = xfs_difree(tp, ip->i_ino, dfops, &xic);
1baaed8f 2444 if (error)
1da177e4 2445 return error;
1baaed8f 2446
98c4f78d
DW
2447 xfs_ifree_local_data(ip, XFS_DATA_FORK);
2448 xfs_ifree_local_data(ip, XFS_ATTR_FORK);
2449
c19b3b05 2450 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
1da177e4 2451 ip->i_d.di_flags = 0;
beaae8cd 2452 ip->i_d.di_flags2 = 0;
1da177e4
LT
2453 ip->i_d.di_dmevmask = 0;
2454 ip->i_d.di_forkoff = 0; /* mark the attr fork not in use */
1da177e4
LT
2455 ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2456 ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2457 /*
2458 * Bump the generation count so no one will be confused
2459 * by reincarnations of this inode.
2460 */
9e9a2674 2461 VFS_I(ip)->i_generation++;
1da177e4
LT
2462 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2463
09b56604
BF
2464 if (xic.deleted)
2465 error = xfs_ifree_cluster(ip, tp, &xic);
1da177e4 2466
2a30f36d 2467 return error;
1da177e4
LT
2468}
2469
1da177e4 2470/*
60ec6783
CH
2471 * This is called to unpin an inode. The caller must have the inode locked
2472 * in at least shared mode so that the buffer cannot be subsequently pinned
2473 * once someone is waiting for it to be unpinned.
1da177e4 2474 */
60ec6783 2475static void
f392e631 2476xfs_iunpin(
60ec6783 2477 struct xfs_inode *ip)
1da177e4 2478{
579aa9ca 2479 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1da177e4 2480
4aaf15d1
DC
2481 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2482
a3f74ffb 2483 /* Give the log a push to start the unpinning I/O */
60ec6783 2484 xfs_log_force_lsn(ip->i_mount, ip->i_itemp->ili_last_lsn, 0);
a14a348b 2485
a3f74ffb 2486}
1da177e4 2487
f392e631
CH
2488static void
2489__xfs_iunpin_wait(
2490 struct xfs_inode *ip)
2491{
2492 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2493 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2494
2495 xfs_iunpin(ip);
2496
2497 do {
21417136 2498 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
f392e631
CH
2499 if (xfs_ipincount(ip))
2500 io_schedule();
2501 } while (xfs_ipincount(ip));
21417136 2502 finish_wait(wq, &wait.wq_entry);
f392e631
CH
2503}
2504
777df5af 2505void
a3f74ffb 2506xfs_iunpin_wait(
60ec6783 2507 struct xfs_inode *ip)
a3f74ffb 2508{
f392e631
CH
2509 if (xfs_ipincount(ip))
2510 __xfs_iunpin_wait(ip);
1da177e4
LT
2511}
2512
27320369
DC
2513/*
2514 * Removing an inode from the namespace involves removing the directory entry
2515 * and dropping the link count on the inode. Removing the directory entry can
2516 * result in locking an AGF (directory blocks were freed) and removing a link
2517 * count can result in placing the inode on an unlinked list which results in
2518 * locking an AGI.
2519 *
2520 * The big problem here is that we have an ordering constraint on AGF and AGI
2521 * locking - inode allocation locks the AGI, then can allocate a new extent for
2522 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2523 * removes the inode from the unlinked list, requiring that we lock the AGI
2524 * first, and then freeing the inode can result in an inode chunk being freed
2525 * and hence freeing disk space requiring that we lock an AGF.
2526 *
2527 * Hence the ordering that is imposed by other parts of the code is AGI before
2528 * AGF. This means we cannot remove the directory entry before we drop the inode
2529 * reference count and put it on the unlinked list as this results in a lock
2530 * order of AGF then AGI, and this can deadlock against inode allocation and
2531 * freeing. Therefore we must drop the link counts before we remove the
2532 * directory entry.
2533 *
2534 * This is still safe from a transactional point of view - it is not until we
310a75a3 2535 * get to xfs_defer_finish() that we have the possibility of multiple
27320369
DC
2536 * transactions in this operation. Hence as long as we remove the directory
2537 * entry and drop the link count in the first transaction of the remove
2538 * operation, there are no transactional constraints on the ordering here.
2539 */
c24b5dfa
DC
2540int
2541xfs_remove(
2542 xfs_inode_t *dp,
2543 struct xfs_name *name,
2544 xfs_inode_t *ip)
2545{
2546 xfs_mount_t *mp = dp->i_mount;
2547 xfs_trans_t *tp = NULL;
c19b3b05 2548 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
c24b5dfa 2549 int error = 0;
2c3234d1 2550 struct xfs_defer_ops dfops;
c24b5dfa 2551 xfs_fsblock_t first_block;
c24b5dfa 2552 uint resblks;
c24b5dfa
DC
2553
2554 trace_xfs_remove(dp, name);
2555
2556 if (XFS_FORCED_SHUTDOWN(mp))
2451337d 2557 return -EIO;
c24b5dfa
DC
2558
2559 error = xfs_qm_dqattach(dp, 0);
2560 if (error)
2561 goto std_return;
2562
2563 error = xfs_qm_dqattach(ip, 0);
2564 if (error)
2565 goto std_return;
2566
c24b5dfa
DC
2567 /*
2568 * We try to get the real space reservation first,
2569 * allowing for directory btree deletion(s) implying
2570 * possible bmap insert(s). If we can't get the space
2571 * reservation then we use 0 instead, and avoid the bmap
2572 * btree insert(s) in the directory code by, if the bmap
2573 * insert tries to happen, instead trimming the LAST
2574 * block from the directory.
2575 */
2576 resblks = XFS_REMOVE_SPACE_RES(mp);
253f4911 2577 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, resblks, 0, 0, &tp);
2451337d 2578 if (error == -ENOSPC) {
c24b5dfa 2579 resblks = 0;
253f4911
CH
2580 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_remove, 0, 0, 0,
2581 &tp);
c24b5dfa
DC
2582 }
2583 if (error) {
2451337d 2584 ASSERT(error != -ENOSPC);
253f4911 2585 goto std_return;
c24b5dfa
DC
2586 }
2587
2588 xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
2589
65523218 2590 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
c24b5dfa
DC
2591 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2592
2593 /*
2594 * If we're removing a directory perform some additional validation.
2595 */
2596 if (is_dir) {
54d7b5c1
DC
2597 ASSERT(VFS_I(ip)->i_nlink >= 2);
2598 if (VFS_I(ip)->i_nlink != 2) {
2451337d 2599 error = -ENOTEMPTY;
c24b5dfa
DC
2600 goto out_trans_cancel;
2601 }
2602 if (!xfs_dir_isempty(ip)) {
2451337d 2603 error = -ENOTEMPTY;
c24b5dfa
DC
2604 goto out_trans_cancel;
2605 }
c24b5dfa 2606
27320369 2607 /* Drop the link from ip's "..". */
c24b5dfa
DC
2608 error = xfs_droplink(tp, dp);
2609 if (error)
27320369 2610 goto out_trans_cancel;
c24b5dfa 2611
27320369 2612 /* Drop the "." link from ip to self. */
c24b5dfa
DC
2613 error = xfs_droplink(tp, ip);
2614 if (error)
27320369 2615 goto out_trans_cancel;
c24b5dfa
DC
2616 } else {
2617 /*
2618 * When removing a non-directory we need to log the parent
2619 * inode here. For a directory this is done implicitly
2620 * by the xfs_droplink call for the ".." entry.
2621 */
2622 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2623 }
27320369 2624 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
c24b5dfa 2625
27320369 2626 /* Drop the link from dp to ip. */
c24b5dfa
DC
2627 error = xfs_droplink(tp, ip);
2628 if (error)
27320369 2629 goto out_trans_cancel;
c24b5dfa 2630
2c3234d1 2631 xfs_defer_init(&dfops, &first_block);
27320369 2632 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2c3234d1 2633 &first_block, &dfops, resblks);
27320369 2634 if (error) {
2451337d 2635 ASSERT(error != -ENOENT);
27320369
DC
2636 goto out_bmap_cancel;
2637 }
2638
c24b5dfa
DC
2639 /*
2640 * If this is a synchronous mount, make sure that the
2641 * remove transaction goes to disk before returning to
2642 * the user.
2643 */
2644 if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2645 xfs_trans_set_sync(tp);
2646
8ad7c629 2647 error = xfs_defer_finish(&tp, &dfops);
c24b5dfa
DC
2648 if (error)
2649 goto out_bmap_cancel;
2650
70393313 2651 error = xfs_trans_commit(tp);
c24b5dfa
DC
2652 if (error)
2653 goto std_return;
2654
2cd2ef6a 2655 if (is_dir && xfs_inode_is_filestream(ip))
c24b5dfa
DC
2656 xfs_filestream_deassociate(ip);
2657
2658 return 0;
2659
2660 out_bmap_cancel:
2c3234d1 2661 xfs_defer_cancel(&dfops);
c24b5dfa 2662 out_trans_cancel:
4906e215 2663 xfs_trans_cancel(tp);
c24b5dfa
DC
2664 std_return:
2665 return error;
2666}
2667
f6bba201
DC
2668/*
2669 * Enter all inodes for a rename transaction into a sorted array.
2670 */
95afcf5c 2671#define __XFS_SORT_INODES 5
f6bba201
DC
2672STATIC void
2673xfs_sort_for_rename(
95afcf5c
DC
2674 struct xfs_inode *dp1, /* in: old (source) directory inode */
2675 struct xfs_inode *dp2, /* in: new (target) directory inode */
2676 struct xfs_inode *ip1, /* in: inode of old entry */
2677 struct xfs_inode *ip2, /* in: inode of new entry */
2678 struct xfs_inode *wip, /* in: whiteout inode */
2679 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2680 int *num_inodes) /* in/out: inodes in array */
f6bba201 2681{
f6bba201
DC
2682 int i, j;
2683
95afcf5c
DC
2684 ASSERT(*num_inodes == __XFS_SORT_INODES);
2685 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2686
f6bba201
DC
2687 /*
2688 * i_tab contains a list of pointers to inodes. We initialize
2689 * the table here & we'll sort it. We will then use it to
2690 * order the acquisition of the inode locks.
2691 *
2692 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2693 */
95afcf5c
DC
2694 i = 0;
2695 i_tab[i++] = dp1;
2696 i_tab[i++] = dp2;
2697 i_tab[i++] = ip1;
2698 if (ip2)
2699 i_tab[i++] = ip2;
2700 if (wip)
2701 i_tab[i++] = wip;
2702 *num_inodes = i;
f6bba201
DC
2703
2704 /*
2705 * Sort the elements via bubble sort. (Remember, there are at
95afcf5c 2706 * most 5 elements to sort, so this is adequate.)
f6bba201
DC
2707 */
2708 for (i = 0; i < *num_inodes; i++) {
2709 for (j = 1; j < *num_inodes; j++) {
2710 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
95afcf5c 2711 struct xfs_inode *temp = i_tab[j];
f6bba201
DC
2712 i_tab[j] = i_tab[j-1];
2713 i_tab[j-1] = temp;
2714 }
2715 }
2716 }
2717}
2718
310606b0
DC
2719static int
2720xfs_finish_rename(
2721 struct xfs_trans *tp,
2c3234d1 2722 struct xfs_defer_ops *dfops)
310606b0 2723{
310606b0
DC
2724 int error;
2725
2726 /*
2727 * If this is a synchronous mount, make sure that the rename transaction
2728 * goes to disk before returning to the user.
2729 */
2730 if (tp->t_mountp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
2731 xfs_trans_set_sync(tp);
2732
8ad7c629 2733 error = xfs_defer_finish(&tp, dfops);
310606b0 2734 if (error) {
2c3234d1 2735 xfs_defer_cancel(dfops);
4906e215 2736 xfs_trans_cancel(tp);
310606b0
DC
2737 return error;
2738 }
2739
70393313 2740 return xfs_trans_commit(tp);
310606b0
DC
2741}
2742
d31a1825
CM
2743/*
2744 * xfs_cross_rename()
2745 *
2746 * responsible for handling RENAME_EXCHANGE flag in renameat2() sytemcall
2747 */
2748STATIC int
2749xfs_cross_rename(
2750 struct xfs_trans *tp,
2751 struct xfs_inode *dp1,
2752 struct xfs_name *name1,
2753 struct xfs_inode *ip1,
2754 struct xfs_inode *dp2,
2755 struct xfs_name *name2,
2756 struct xfs_inode *ip2,
2c3234d1 2757 struct xfs_defer_ops *dfops,
d31a1825
CM
2758 xfs_fsblock_t *first_block,
2759 int spaceres)
2760{
2761 int error = 0;
2762 int ip1_flags = 0;
2763 int ip2_flags = 0;
2764 int dp2_flags = 0;
2765
2766 /* Swap inode number for dirent in first parent */
2767 error = xfs_dir_replace(tp, dp1, name1,
2768 ip2->i_ino,
2c3234d1 2769 first_block, dfops, spaceres);
d31a1825 2770 if (error)
eeacd321 2771 goto out_trans_abort;
d31a1825
CM
2772
2773 /* Swap inode number for dirent in second parent */
2774 error = xfs_dir_replace(tp, dp2, name2,
2775 ip1->i_ino,
2c3234d1 2776 first_block, dfops, spaceres);
d31a1825 2777 if (error)
eeacd321 2778 goto out_trans_abort;
d31a1825
CM
2779
2780 /*
2781 * If we're renaming one or more directories across different parents,
2782 * update the respective ".." entries (and link counts) to match the new
2783 * parents.
2784 */
2785 if (dp1 != dp2) {
2786 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2787
c19b3b05 2788 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
d31a1825
CM
2789 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2790 dp1->i_ino, first_block,
2c3234d1 2791 dfops, spaceres);
d31a1825 2792 if (error)
eeacd321 2793 goto out_trans_abort;
d31a1825
CM
2794
2795 /* transfer ip2 ".." reference to dp1 */
c19b3b05 2796 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
d31a1825
CM
2797 error = xfs_droplink(tp, dp2);
2798 if (error)
eeacd321 2799 goto out_trans_abort;
d31a1825
CM
2800 error = xfs_bumplink(tp, dp1);
2801 if (error)
eeacd321 2802 goto out_trans_abort;
d31a1825
CM
2803 }
2804
2805 /*
2806 * Although ip1 isn't changed here, userspace needs
2807 * to be warned about the change, so that applications
2808 * relying on it (like backup ones), will properly
2809 * notify the change
2810 */
2811 ip1_flags |= XFS_ICHGTIME_CHG;
2812 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2813 }
2814
c19b3b05 2815 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
d31a1825
CM
2816 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2817 dp2->i_ino, first_block,
2c3234d1 2818 dfops, spaceres);
d31a1825 2819 if (error)
eeacd321 2820 goto out_trans_abort;
d31a1825
CM
2821
2822 /* transfer ip1 ".." reference to dp2 */
c19b3b05 2823 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
d31a1825
CM
2824 error = xfs_droplink(tp, dp1);
2825 if (error)
eeacd321 2826 goto out_trans_abort;
d31a1825
CM
2827 error = xfs_bumplink(tp, dp2);
2828 if (error)
eeacd321 2829 goto out_trans_abort;
d31a1825
CM
2830 }
2831
2832 /*
2833 * Although ip2 isn't changed here, userspace needs
2834 * to be warned about the change, so that applications
2835 * relying on it (like backup ones), will properly
2836 * notify the change
2837 */
2838 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2839 ip2_flags |= XFS_ICHGTIME_CHG;
2840 }
2841 }
2842
2843 if (ip1_flags) {
2844 xfs_trans_ichgtime(tp, ip1, ip1_flags);
2845 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
2846 }
2847 if (ip2_flags) {
2848 xfs_trans_ichgtime(tp, ip2, ip2_flags);
2849 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
2850 }
2851 if (dp2_flags) {
2852 xfs_trans_ichgtime(tp, dp2, dp2_flags);
2853 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
2854 }
2855 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2856 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
2c3234d1 2857 return xfs_finish_rename(tp, dfops);
eeacd321
DC
2858
2859out_trans_abort:
2c3234d1 2860 xfs_defer_cancel(dfops);
4906e215 2861 xfs_trans_cancel(tp);
d31a1825
CM
2862 return error;
2863}
2864
7dcf5c3e
DC
2865/*
2866 * xfs_rename_alloc_whiteout()
2867 *
2868 * Return a referenced, unlinked, unlocked inode that that can be used as a
2869 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
2870 * crash between allocating the inode and linking it into the rename transaction
2871 * recovery will free the inode and we won't leak it.
2872 */
2873static int
2874xfs_rename_alloc_whiteout(
2875 struct xfs_inode *dp,
2876 struct xfs_inode **wip)
2877{
2878 struct xfs_inode *tmpfile;
2879 int error;
2880
2881 error = xfs_create_tmpfile(dp, NULL, S_IFCHR | WHITEOUT_MODE, &tmpfile);
2882 if (error)
2883 return error;
2884
22419ac9
BF
2885 /*
2886 * Prepare the tmpfile inode as if it were created through the VFS.
2887 * Otherwise, the link increment paths will complain about nlink 0->1.
2888 * Drop the link count as done by d_tmpfile(), complete the inode setup
2889 * and flag it as linkable.
2890 */
2891 drop_nlink(VFS_I(tmpfile));
2b3d1d41 2892 xfs_setup_iops(tmpfile);
7dcf5c3e
DC
2893 xfs_finish_inode_setup(tmpfile);
2894 VFS_I(tmpfile)->i_state |= I_LINKABLE;
2895
2896 *wip = tmpfile;
2897 return 0;
2898}
2899
f6bba201
DC
2900/*
2901 * xfs_rename
2902 */
2903int
2904xfs_rename(
7dcf5c3e
DC
2905 struct xfs_inode *src_dp,
2906 struct xfs_name *src_name,
2907 struct xfs_inode *src_ip,
2908 struct xfs_inode *target_dp,
2909 struct xfs_name *target_name,
2910 struct xfs_inode *target_ip,
2911 unsigned int flags)
f6bba201 2912{
7dcf5c3e
DC
2913 struct xfs_mount *mp = src_dp->i_mount;
2914 struct xfs_trans *tp;
2c3234d1 2915 struct xfs_defer_ops dfops;
7dcf5c3e
DC
2916 xfs_fsblock_t first_block;
2917 struct xfs_inode *wip = NULL; /* whiteout inode */
2918 struct xfs_inode *inodes[__XFS_SORT_INODES];
2919 int num_inodes = __XFS_SORT_INODES;
2b93681f 2920 bool new_parent = (src_dp != target_dp);
c19b3b05 2921 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
7dcf5c3e
DC
2922 int spaceres;
2923 int error;
f6bba201
DC
2924
2925 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
2926
eeacd321
DC
2927 if ((flags & RENAME_EXCHANGE) && !target_ip)
2928 return -EINVAL;
2929
7dcf5c3e
DC
2930 /*
2931 * If we are doing a whiteout operation, allocate the whiteout inode
2932 * we will be placing at the target and ensure the type is set
2933 * appropriately.
2934 */
2935 if (flags & RENAME_WHITEOUT) {
2936 ASSERT(!(flags & (RENAME_NOREPLACE | RENAME_EXCHANGE)));
2937 error = xfs_rename_alloc_whiteout(target_dp, &wip);
2938 if (error)
2939 return error;
2940
2941 /* setup target dirent info as whiteout */
2942 src_name->type = XFS_DIR3_FT_CHRDEV;
2943 }
f6bba201 2944
7dcf5c3e 2945 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
f6bba201
DC
2946 inodes, &num_inodes);
2947
f6bba201 2948 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
253f4911 2949 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
2451337d 2950 if (error == -ENOSPC) {
f6bba201 2951 spaceres = 0;
253f4911
CH
2952 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
2953 &tp);
f6bba201 2954 }
445883e8 2955 if (error)
253f4911 2956 goto out_release_wip;
f6bba201
DC
2957
2958 /*
2959 * Attach the dquots to the inodes
2960 */
2961 error = xfs_qm_vop_rename_dqattach(inodes);
445883e8
DC
2962 if (error)
2963 goto out_trans_cancel;
f6bba201
DC
2964
2965 /*
2966 * Lock all the participating inodes. Depending upon whether
2967 * the target_name exists in the target directory, and
2968 * whether the target directory is the same as the source
2969 * directory, we can lock from 2 to 4 inodes.
2970 */
2971 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
2972
2973 /*
2974 * Join all the inodes to the transaction. From this point on,
2975 * we can rely on either trans_commit or trans_cancel to unlock
2976 * them.
2977 */
65523218 2978 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
f6bba201 2979 if (new_parent)
65523218 2980 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
f6bba201
DC
2981 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
2982 if (target_ip)
2983 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
7dcf5c3e
DC
2984 if (wip)
2985 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
f6bba201
DC
2986
2987 /*
2988 * If we are using project inheritance, we only allow renames
2989 * into our tree when the project IDs are the same; else the
2990 * tree quota mechanism would be circumvented.
2991 */
2992 if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
2993 (xfs_get_projid(target_dp) != xfs_get_projid(src_ip)))) {
2451337d 2994 error = -EXDEV;
445883e8 2995 goto out_trans_cancel;
f6bba201
DC
2996 }
2997
2c3234d1 2998 xfs_defer_init(&dfops, &first_block);
445883e8 2999
eeacd321
DC
3000 /* RENAME_EXCHANGE is unique from here on. */
3001 if (flags & RENAME_EXCHANGE)
3002 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3003 target_dp, target_name, target_ip,
2c3234d1 3004 &dfops, &first_block, spaceres);
d31a1825 3005
f6bba201
DC
3006 /*
3007 * Set up the target.
3008 */
3009 if (target_ip == NULL) {
3010 /*
3011 * If there's no space reservation, check the entry will
3012 * fit before actually inserting it.
3013 */
94f3cad5
ES
3014 if (!spaceres) {
3015 error = xfs_dir_canenter(tp, target_dp, target_name);
3016 if (error)
445883e8 3017 goto out_trans_cancel;
94f3cad5 3018 }
f6bba201
DC
3019 /*
3020 * If target does not exist and the rename crosses
3021 * directories, adjust the target directory link count
3022 * to account for the ".." reference from the new entry.
3023 */
3024 error = xfs_dir_createname(tp, target_dp, target_name,
3025 src_ip->i_ino, &first_block,
2c3234d1 3026 &dfops, spaceres);
f6bba201 3027 if (error)
4906e215 3028 goto out_bmap_cancel;
f6bba201
DC
3029
3030 xfs_trans_ichgtime(tp, target_dp,
3031 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3032
3033 if (new_parent && src_is_directory) {
3034 error = xfs_bumplink(tp, target_dp);
3035 if (error)
4906e215 3036 goto out_bmap_cancel;
f6bba201
DC
3037 }
3038 } else { /* target_ip != NULL */
3039 /*
3040 * If target exists and it's a directory, check that both
3041 * target and source are directories and that target can be
3042 * destroyed, or that neither is a directory.
3043 */
c19b3b05 3044 if (S_ISDIR(VFS_I(target_ip)->i_mode)) {
f6bba201
DC
3045 /*
3046 * Make sure target dir is empty.
3047 */
3048 if (!(xfs_dir_isempty(target_ip)) ||
54d7b5c1 3049 (VFS_I(target_ip)->i_nlink > 2)) {
2451337d 3050 error = -EEXIST;
445883e8 3051 goto out_trans_cancel;
f6bba201
DC
3052 }
3053 }
3054
3055 /*
3056 * Link the source inode under the target name.
3057 * If the source inode is a directory and we are moving
3058 * it across directories, its ".." entry will be
3059 * inconsistent until we replace that down below.
3060 *
3061 * In case there is already an entry with the same
3062 * name at the destination directory, remove it first.
3063 */
3064 error = xfs_dir_replace(tp, target_dp, target_name,
3065 src_ip->i_ino,
2c3234d1 3066 &first_block, &dfops, spaceres);
f6bba201 3067 if (error)
4906e215 3068 goto out_bmap_cancel;
f6bba201
DC
3069
3070 xfs_trans_ichgtime(tp, target_dp,
3071 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3072
3073 /*
3074 * Decrement the link count on the target since the target
3075 * dir no longer points to it.
3076 */
3077 error = xfs_droplink(tp, target_ip);
3078 if (error)
4906e215 3079 goto out_bmap_cancel;
f6bba201
DC
3080
3081 if (src_is_directory) {
3082 /*
3083 * Drop the link from the old "." entry.
3084 */
3085 error = xfs_droplink(tp, target_ip);
3086 if (error)
4906e215 3087 goto out_bmap_cancel;
f6bba201
DC
3088 }
3089 } /* target_ip != NULL */
3090
3091 /*
3092 * Remove the source.
3093 */
3094 if (new_parent && src_is_directory) {
3095 /*
3096 * Rewrite the ".." entry to point to the new
3097 * directory.
3098 */
3099 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3100 target_dp->i_ino,
2c3234d1 3101 &first_block, &dfops, spaceres);
2451337d 3102 ASSERT(error != -EEXIST);
f6bba201 3103 if (error)
4906e215 3104 goto out_bmap_cancel;
f6bba201
DC
3105 }
3106
3107 /*
3108 * We always want to hit the ctime on the source inode.
3109 *
3110 * This isn't strictly required by the standards since the source
3111 * inode isn't really being changed, but old unix file systems did
3112 * it and some incremental backup programs won't work without it.
3113 */
3114 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3115 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3116
3117 /*
3118 * Adjust the link count on src_dp. This is necessary when
3119 * renaming a directory, either within one parent when
3120 * the target existed, or across two parent directories.
3121 */
3122 if (src_is_directory && (new_parent || target_ip != NULL)) {
3123
3124 /*
3125 * Decrement link count on src_directory since the
3126 * entry that's moved no longer points to it.
3127 */
3128 error = xfs_droplink(tp, src_dp);
3129 if (error)
4906e215 3130 goto out_bmap_cancel;
f6bba201
DC
3131 }
3132
7dcf5c3e
DC
3133 /*
3134 * For whiteouts, we only need to update the source dirent with the
3135 * inode number of the whiteout inode rather than removing it
3136 * altogether.
3137 */
3138 if (wip) {
3139 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
2c3234d1 3140 &first_block, &dfops, spaceres);
7dcf5c3e
DC
3141 } else
3142 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
2c3234d1 3143 &first_block, &dfops, spaceres);
f6bba201 3144 if (error)
4906e215 3145 goto out_bmap_cancel;
f6bba201
DC
3146
3147 /*
7dcf5c3e
DC
3148 * For whiteouts, we need to bump the link count on the whiteout inode.
3149 * This means that failures all the way up to this point leave the inode
3150 * on the unlinked list and so cleanup is a simple matter of dropping
3151 * the remaining reference to it. If we fail here after bumping the link
3152 * count, we're shutting down the filesystem so we'll never see the
3153 * intermediate state on disk.
f6bba201 3154 */
7dcf5c3e 3155 if (wip) {
54d7b5c1 3156 ASSERT(VFS_I(wip)->i_nlink == 0);
7dcf5c3e
DC
3157 error = xfs_bumplink(tp, wip);
3158 if (error)
4906e215 3159 goto out_bmap_cancel;
7dcf5c3e
DC
3160 error = xfs_iunlink_remove(tp, wip);
3161 if (error)
4906e215 3162 goto out_bmap_cancel;
7dcf5c3e 3163 xfs_trans_log_inode(tp, wip, XFS_ILOG_CORE);
f6bba201 3164
7dcf5c3e
DC
3165 /*
3166 * Now we have a real link, clear the "I'm a tmpfile" state
3167 * flag from the inode so it doesn't accidentally get misused in
3168 * future.
3169 */
3170 VFS_I(wip)->i_state &= ~I_LINKABLE;
f6bba201
DC
3171 }
3172
f6bba201
DC
3173 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3174 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3175 if (new_parent)
3176 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
f6bba201 3177
2c3234d1 3178 error = xfs_finish_rename(tp, &dfops);
7dcf5c3e
DC
3179 if (wip)
3180 IRELE(wip);
3181 return error;
f6bba201 3182
445883e8 3183out_bmap_cancel:
2c3234d1 3184 xfs_defer_cancel(&dfops);
445883e8 3185out_trans_cancel:
4906e215 3186 xfs_trans_cancel(tp);
253f4911 3187out_release_wip:
7dcf5c3e
DC
3188 if (wip)
3189 IRELE(wip);
f6bba201
DC
3190 return error;
3191}
3192
5c4d97d0
DC
3193STATIC int
3194xfs_iflush_cluster(
19429363
DC
3195 struct xfs_inode *ip,
3196 struct xfs_buf *bp)
1da177e4 3197{
19429363 3198 struct xfs_mount *mp = ip->i_mount;
5c4d97d0
DC
3199 struct xfs_perag *pag;
3200 unsigned long first_index, mask;
3201 unsigned long inodes_per_cluster;
19429363
DC
3202 int cilist_size;
3203 struct xfs_inode **cilist;
3204 struct xfs_inode *cip;
5c4d97d0
DC
3205 int nr_found;
3206 int clcount = 0;
3207 int bufwasdelwri;
1da177e4 3208 int i;
1da177e4 3209
5c4d97d0 3210 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1da177e4 3211
0f49efd8 3212 inodes_per_cluster = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
19429363
DC
3213 cilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
3214 cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS);
3215 if (!cilist)
5c4d97d0 3216 goto out_put;
1da177e4 3217
0f49efd8 3218 mask = ~(((mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog)) - 1);
5c4d97d0
DC
3219 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask;
3220 rcu_read_lock();
3221 /* really need a gang lookup range call here */
19429363 3222 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist,
5c4d97d0
DC
3223 first_index, inodes_per_cluster);
3224 if (nr_found == 0)
3225 goto out_free;
3226
3227 for (i = 0; i < nr_found; i++) {
19429363
DC
3228 cip = cilist[i];
3229 if (cip == ip)
bad55843 3230 continue;
1a3e8f3d
DC
3231
3232 /*
3233 * because this is an RCU protected lookup, we could find a
3234 * recently freed or even reallocated inode during the lookup.
3235 * We need to check under the i_flags_lock for a valid inode
3236 * here. Skip it if it is not valid or the wrong inode.
3237 */
19429363
DC
3238 spin_lock(&cip->i_flags_lock);
3239 if (!cip->i_ino ||
3240 __xfs_iflags_test(cip, XFS_ISTALE)) {
3241 spin_unlock(&cip->i_flags_lock);
1a3e8f3d
DC
3242 continue;
3243 }
5a90e53e
DC
3244
3245 /*
3246 * Once we fall off the end of the cluster, no point checking
3247 * any more inodes in the list because they will also all be
3248 * outside the cluster.
3249 */
19429363
DC
3250 if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) {
3251 spin_unlock(&cip->i_flags_lock);
5a90e53e
DC
3252 break;
3253 }
19429363 3254 spin_unlock(&cip->i_flags_lock);
1a3e8f3d 3255
bad55843
DC
3256 /*
3257 * Do an un-protected check to see if the inode is dirty and
3258 * is a candidate for flushing. These checks will be repeated
3259 * later after the appropriate locks are acquired.
3260 */
19429363 3261 if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0)
bad55843 3262 continue;
bad55843
DC
3263
3264 /*
3265 * Try to get locks. If any are unavailable or it is pinned,
3266 * then this inode cannot be flushed and is skipped.
3267 */
3268
19429363 3269 if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED))
bad55843 3270 continue;
19429363
DC
3271 if (!xfs_iflock_nowait(cip)) {
3272 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3273 continue;
3274 }
19429363
DC
3275 if (xfs_ipincount(cip)) {
3276 xfs_ifunlock(cip);
3277 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3278 continue;
3279 }
3280
8a17d7dd
DC
3281
3282 /*
3283 * Check the inode number again, just to be certain we are not
3284 * racing with freeing in xfs_reclaim_inode(). See the comments
3285 * in that function for more information as to why the initial
3286 * check is not sufficient.
3287 */
19429363
DC
3288 if (!cip->i_ino) {
3289 xfs_ifunlock(cip);
3290 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3291 continue;
3292 }
3293
3294 /*
3295 * arriving here means that this inode can be flushed. First
3296 * re-check that it's dirty before flushing.
3297 */
19429363 3298 if (!xfs_inode_clean(cip)) {
33540408 3299 int error;
19429363 3300 error = xfs_iflush_int(cip, bp);
bad55843 3301 if (error) {
19429363 3302 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3303 goto cluster_corrupt_out;
3304 }
3305 clcount++;
3306 } else {
19429363 3307 xfs_ifunlock(cip);
bad55843 3308 }
19429363 3309 xfs_iunlock(cip, XFS_ILOCK_SHARED);
bad55843
DC
3310 }
3311
3312 if (clcount) {
ff6d6af2
BD
3313 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3314 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
bad55843
DC
3315 }
3316
3317out_free:
1a3e8f3d 3318 rcu_read_unlock();
19429363 3319 kmem_free(cilist);
44b56e0a
DC
3320out_put:
3321 xfs_perag_put(pag);
bad55843
DC
3322 return 0;
3323
3324
3325cluster_corrupt_out:
3326 /*
3327 * Corruption detected in the clustering loop. Invalidate the
3328 * inode buffer and shut down the filesystem.
3329 */
1a3e8f3d 3330 rcu_read_unlock();
bad55843 3331 /*
43ff2122 3332 * Clean up the buffer. If it was delwri, just release it --
bad55843
DC
3333 * brelse can handle it with no problems. If not, shut down the
3334 * filesystem before releasing the buffer.
3335 */
43ff2122 3336 bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
bad55843
DC
3337 if (bufwasdelwri)
3338 xfs_buf_relse(bp);
3339
3340 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3341
3342 if (!bufwasdelwri) {
3343 /*
3344 * Just like incore_relse: if we have b_iodone functions,
3345 * mark the buffer as an error and call them. Otherwise
3346 * mark it as stale and brelse.
3347 */
cb669ca5 3348 if (bp->b_iodone) {
b0388bf1 3349 bp->b_flags &= ~XBF_DONE;
c867cb61 3350 xfs_buf_stale(bp);
2451337d 3351 xfs_buf_ioerror(bp, -EIO);
e8aaba9a 3352 xfs_buf_ioend(bp);
bad55843 3353 } else {
c867cb61 3354 xfs_buf_stale(bp);
bad55843
DC
3355 xfs_buf_relse(bp);
3356 }
3357 }
3358
3359 /*
3360 * Unlocks the flush lock
3361 */
19429363
DC
3362 xfs_iflush_abort(cip, false);
3363 kmem_free(cilist);
44b56e0a 3364 xfs_perag_put(pag);
2451337d 3365 return -EFSCORRUPTED;
bad55843
DC
3366}
3367
1da177e4 3368/*
4c46819a
CH
3369 * Flush dirty inode metadata into the backing buffer.
3370 *
3371 * The caller must have the inode lock and the inode flush lock held. The
3372 * inode lock will still be held upon return to the caller, and the inode
3373 * flush lock will be released after the inode has reached the disk.
3374 *
3375 * The caller must write out the buffer returned in *bpp and release it.
1da177e4
LT
3376 */
3377int
3378xfs_iflush(
4c46819a
CH
3379 struct xfs_inode *ip,
3380 struct xfs_buf **bpp)
1da177e4 3381{
4c46819a 3382 struct xfs_mount *mp = ip->i_mount;
b1438f47 3383 struct xfs_buf *bp = NULL;
4c46819a 3384 struct xfs_dinode *dip;
1da177e4 3385 int error;
1da177e4 3386
ff6d6af2 3387 XFS_STATS_INC(mp, xs_iflush_count);
1da177e4 3388
579aa9ca 3389 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
474fce06 3390 ASSERT(xfs_isiflocked(ip));
1da177e4 3391 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
8096b1eb 3392 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
1da177e4 3393
4c46819a 3394 *bpp = NULL;
1da177e4 3395
1da177e4
LT
3396 xfs_iunpin_wait(ip);
3397
4b6a4688
DC
3398 /*
3399 * For stale inodes we cannot rely on the backing buffer remaining
3400 * stale in cache for the remaining life of the stale inode and so
475ee413 3401 * xfs_imap_to_bp() below may give us a buffer that no longer contains
4b6a4688
DC
3402 * inodes below. We have to check this after ensuring the inode is
3403 * unpinned so that it is safe to reclaim the stale inode after the
3404 * flush call.
3405 */
3406 if (xfs_iflags_test(ip, XFS_ISTALE)) {
3407 xfs_ifunlock(ip);
3408 return 0;
3409 }
3410
1da177e4
LT
3411 /*
3412 * This may have been unpinned because the filesystem is shutting
3413 * down forcibly. If that's the case we must not write this inode
32ce90a4
CH
3414 * to disk, because the log record didn't make it to disk.
3415 *
3416 * We also have to remove the log item from the AIL in this case,
3417 * as we wait for an empty AIL as part of the unmount process.
1da177e4
LT
3418 */
3419 if (XFS_FORCED_SHUTDOWN(mp)) {
2451337d 3420 error = -EIO;
32ce90a4 3421 goto abort_out;
1da177e4
LT
3422 }
3423
a3f74ffb 3424 /*
b1438f47
DC
3425 * Get the buffer containing the on-disk inode. We are doing a try-lock
3426 * operation here, so we may get an EAGAIN error. In that case, we
3427 * simply want to return with the inode still dirty.
3428 *
3429 * If we get any other error, we effectively have a corruption situation
3430 * and we cannot flush the inode, so we treat it the same as failing
3431 * xfs_iflush_int().
a3f74ffb 3432 */
475ee413
CH
3433 error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK,
3434 0);
b1438f47 3435 if (error == -EAGAIN) {
a3f74ffb
DC
3436 xfs_ifunlock(ip);
3437 return error;
3438 }
b1438f47
DC
3439 if (error)
3440 goto corrupt_out;
a3f74ffb 3441
1da177e4
LT
3442 /*
3443 * First flush out the inode that xfs_iflush was called with.
3444 */
3445 error = xfs_iflush_int(ip, bp);
bad55843 3446 if (error)
1da177e4 3447 goto corrupt_out;
1da177e4 3448
a3f74ffb
DC
3449 /*
3450 * If the buffer is pinned then push on the log now so we won't
3451 * get stuck waiting in the write for too long.
3452 */
811e64c7 3453 if (xfs_buf_ispinned(bp))
a14a348b 3454 xfs_log_force(mp, 0);
a3f74ffb 3455
1da177e4
LT
3456 /*
3457 * inode clustering:
3458 * see if other inodes can be gathered into this write
3459 */
bad55843
DC
3460 error = xfs_iflush_cluster(ip, bp);
3461 if (error)
3462 goto cluster_corrupt_out;
1da177e4 3463
4c46819a
CH
3464 *bpp = bp;
3465 return 0;
1da177e4
LT
3466
3467corrupt_out:
b1438f47
DC
3468 if (bp)
3469 xfs_buf_relse(bp);
7d04a335 3470 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
1da177e4 3471cluster_corrupt_out:
2451337d 3472 error = -EFSCORRUPTED;
32ce90a4 3473abort_out:
1da177e4
LT
3474 /*
3475 * Unlocks the flush lock
3476 */
04913fdd 3477 xfs_iflush_abort(ip, false);
32ce90a4 3478 return error;
1da177e4
LT
3479}
3480
9cfb9b47
DW
3481/*
3482 * If there are inline format data / attr forks attached to this inode,
3483 * make sure they're not corrupt.
3484 */
3485bool
3486xfs_inode_verify_forks(
3487 struct xfs_inode *ip)
3488{
3489 xfs_failaddr_t fa;
3490
3491 fa = xfs_ifork_verify_data(ip, &xfs_default_ifork_ops);
3492 if (fa) {
3493 xfs_alert(ip->i_mount,
aff68a55 3494 "%s: bad inode %llu inline data fork at %pS",
9cfb9b47
DW
3495 __func__, ip->i_ino, fa);
3496 return false;
3497 }
3498
3499 fa = xfs_ifork_verify_attr(ip, &xfs_default_ifork_ops);
3500 if (fa) {
3501 xfs_alert(ip->i_mount,
aff68a55 3502 "%s: bad inode %llu inline attr fork at %pS",
9cfb9b47
DW
3503 __func__, ip->i_ino, fa);
3504 return false;
3505 }
3506 return true;
3507}
3508
1da177e4
LT
3509STATIC int
3510xfs_iflush_int(
93848a99
CH
3511 struct xfs_inode *ip,
3512 struct xfs_buf *bp)
1da177e4 3513{
93848a99
CH
3514 struct xfs_inode_log_item *iip = ip->i_itemp;
3515 struct xfs_dinode *dip;
3516 struct xfs_mount *mp = ip->i_mount;
1da177e4 3517
579aa9ca 3518 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
474fce06 3519 ASSERT(xfs_isiflocked(ip));
1da177e4 3520 ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
8096b1eb 3521 ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
93848a99 3522 ASSERT(iip != NULL && iip->ili_fields != 0);
263997a6 3523 ASSERT(ip->i_d.di_version > 1);
1da177e4 3524
1da177e4 3525 /* set *dip = inode's place in the buffer */
88ee2df7 3526 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
1da177e4 3527
69ef921b 3528 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
9e24cfd0 3529 mp, XFS_ERRTAG_IFLUSH_1)) {
6a19d939 3530 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3531 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
6a19d939 3532 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
1da177e4
LT
3533 goto corrupt_out;
3534 }
c19b3b05 3535 if (S_ISREG(VFS_I(ip)->i_mode)) {
1da177e4
LT
3536 if (XFS_TEST_ERROR(
3537 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3538 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
9e24cfd0 3539 mp, XFS_ERRTAG_IFLUSH_3)) {
6a19d939 3540 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3541 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
6a19d939 3542 __func__, ip->i_ino, ip);
1da177e4
LT
3543 goto corrupt_out;
3544 }
c19b3b05 3545 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
1da177e4
LT
3546 if (XFS_TEST_ERROR(
3547 (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3548 (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3549 (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
9e24cfd0 3550 mp, XFS_ERRTAG_IFLUSH_4)) {
6a19d939 3551 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3552 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
6a19d939 3553 __func__, ip->i_ino, ip);
1da177e4
LT
3554 goto corrupt_out;
3555 }
3556 }
3557 if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
9e24cfd0 3558 ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
6a19d939
DC
3559 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3560 "%s: detected corrupt incore inode %Lu, "
c9690043 3561 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
6a19d939 3562 __func__, ip->i_ino,
1da177e4 3563 ip->i_d.di_nextents + ip->i_d.di_anextents,
6a19d939 3564 ip->i_d.di_nblocks, ip);
1da177e4
LT
3565 goto corrupt_out;
3566 }
3567 if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
9e24cfd0 3568 mp, XFS_ERRTAG_IFLUSH_6)) {
6a19d939 3569 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
c9690043 3570 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
6a19d939 3571 __func__, ip->i_ino, ip->i_d.di_forkoff, ip);
1da177e4
LT
3572 goto corrupt_out;
3573 }
e60896d8 3574
1da177e4 3575 /*
263997a6 3576 * Inode item log recovery for v2 inodes are dependent on the
e60896d8
DC
3577 * di_flushiter count for correct sequencing. We bump the flush
3578 * iteration count so we can detect flushes which postdate a log record
3579 * during recovery. This is redundant as we now log every change and
3580 * hence this can't happen but we need to still do it to ensure
3581 * backwards compatibility with old kernels that predate logging all
3582 * inode changes.
1da177e4 3583 */
e60896d8
DC
3584 if (ip->i_d.di_version < 3)
3585 ip->i_d.di_flushiter++;
1da177e4 3586
9cfb9b47
DW
3587 /* Check the inline fork data before we write out. */
3588 if (!xfs_inode_verify_forks(ip))
005c5db8
DW
3589 goto corrupt_out;
3590
1da177e4 3591 /*
3987848c
DC
3592 * Copy the dirty parts of the inode into the on-disk inode. We always
3593 * copy out the core of the inode, because if the inode is dirty at all
3594 * the core must be.
1da177e4 3595 */
93f958f9 3596 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
1da177e4
LT
3597
3598 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3599 if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3600 ip->i_d.di_flushiter = 0;
3601
005c5db8
DW
3602 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3603 if (XFS_IFORK_Q(ip))
3604 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
1da177e4
LT
3605 xfs_inobp_check(mp, bp);
3606
3607 /*
f5d8d5c4
CH
3608 * We've recorded everything logged in the inode, so we'd like to clear
3609 * the ili_fields bits so we don't log and flush things unnecessarily.
3610 * However, we can't stop logging all this information until the data
3611 * we've copied into the disk buffer is written to disk. If we did we
3612 * might overwrite the copy of the inode in the log with all the data
3613 * after re-logging only part of it, and in the face of a crash we
3614 * wouldn't have all the data we need to recover.
1da177e4 3615 *
f5d8d5c4
CH
3616 * What we do is move the bits to the ili_last_fields field. When
3617 * logging the inode, these bits are moved back to the ili_fields field.
3618 * In the xfs_iflush_done() routine we clear ili_last_fields, since we
3619 * know that the information those bits represent is permanently on
3620 * disk. As long as the flush completes before the inode is logged
3621 * again, then both ili_fields and ili_last_fields will be cleared.
1da177e4 3622 *
f5d8d5c4
CH
3623 * We can play with the ili_fields bits here, because the inode lock
3624 * must be held exclusively in order to set bits there and the flush
3625 * lock protects the ili_last_fields bits. Set ili_logged so the flush
3626 * done routine can tell whether or not to look in the AIL. Also, store
3627 * the current LSN of the inode so that we can tell whether the item has
3628 * moved in the AIL from xfs_iflush_done(). In order to read the lsn we
3629 * need the AIL lock, because it is a 64 bit value that cannot be read
3630 * atomically.
1da177e4 3631 */
93848a99
CH
3632 iip->ili_last_fields = iip->ili_fields;
3633 iip->ili_fields = 0;
fc0561ce 3634 iip->ili_fsync_fields = 0;
93848a99 3635 iip->ili_logged = 1;
1da177e4 3636
93848a99
CH
3637 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3638 &iip->ili_item.li_lsn);
1da177e4 3639
93848a99
CH
3640 /*
3641 * Attach the function xfs_iflush_done to the inode's
3642 * buffer. This will remove the inode from the AIL
3643 * and unlock the inode's flush lock when the inode is
3644 * completely written to disk.
3645 */
3646 xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item);
1da177e4 3647
93848a99
CH
3648 /* generate the checksum. */
3649 xfs_dinode_calc_crc(mp, dip);
1da177e4 3650
643c8c05 3651 ASSERT(!list_empty(&bp->b_li_list));
93848a99 3652 ASSERT(bp->b_iodone != NULL);
1da177e4
LT
3653 return 0;
3654
3655corrupt_out:
2451337d 3656 return -EFSCORRUPTED;
1da177e4 3657}