1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
6 #include <linux/iversion.h>
10 #include "xfs_shared.h"
11 #include "xfs_format.h"
12 #include "xfs_log_format.h"
13 #include "xfs_trans_resv.h"
14 #include "xfs_mount.h"
15 #include "xfs_defer.h"
16 #include "xfs_inode.h"
19 #include "xfs_trans_space.h"
20 #include "xfs_trans.h"
21 #include "xfs_buf_item.h"
22 #include "xfs_inode_item.h"
23 #include "xfs_ialloc.h"
25 #include "xfs_bmap_util.h"
26 #include "xfs_errortag.h"
27 #include "xfs_error.h"
28 #include "xfs_quota.h"
29 #include "xfs_filestream.h"
30 #include "xfs_trace.h"
31 #include "xfs_icache.h"
32 #include "xfs_symlink.h"
33 #include "xfs_trans_priv.h"
35 #include "xfs_bmap_btree.h"
36 #include "xfs_reflink.h"
39 struct kmem_cache *xfs_inode_cache;
42 * Used in xfs_itruncate_extents(). This is the maximum number of extents
43 * freed from a file in a single transaction.
45 #define XFS_ITRUNC_MAX_EXTENTS 2
47 STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *);
48 STATIC int xfs_iunlink_remove(struct xfs_trans *tp, struct xfs_perag *pag,
52 * helper function to extract extent size hint from inode
59 * No point in aligning allocations if we need to COW to actually
62 if (xfs_is_always_cow_inode(ip))
64 if ((ip->i_diflags & XFS_DIFLAG_EXTSIZE) && ip->i_extsize)
66 if (XFS_IS_REALTIME_INODE(ip))
67 return ip->i_mount->m_sb.sb_rextsize;
72 * Helper function to extract CoW extent size hint from inode.
73 * Between the extent size hint and the CoW extent size hint, we
74 * return the greater of the two. If the value is zero (automatic),
75 * use the default size.
78 xfs_get_cowextsz_hint(
84 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
86 b = xfs_get_extsz_hint(ip);
90 return XFS_DEFAULT_COWEXTSZ_HINT;
95 * These two are wrapper routines around the xfs_ilock() routine used to
96 * centralize some grungy code. They are used in places that wish to lock the
97 * inode solely for reading the extents. The reason these places can't just
98 * call xfs_ilock(ip, XFS_ILOCK_SHARED) is that the inode lock also guards to
99 * bringing in of the extents from disk for a file in b-tree format. If the
100 * inode is in b-tree format, then we need to lock the inode exclusively until
101 * the extents are read in. Locking it exclusively all the time would limit
102 * our parallelism unnecessarily, though. What we do instead is check to see
103 * if the extents have been read in yet, and only lock the inode exclusively
106 * The functions return a value which should be given to the corresponding
107 * xfs_iunlock() call.
110 xfs_ilock_data_map_shared(
111 struct xfs_inode *ip)
113 uint lock_mode = XFS_ILOCK_SHARED;
115 if (xfs_need_iread_extents(&ip->i_df))
116 lock_mode = XFS_ILOCK_EXCL;
117 xfs_ilock(ip, lock_mode);
122 xfs_ilock_attr_map_shared(
123 struct xfs_inode *ip)
125 uint lock_mode = XFS_ILOCK_SHARED;
127 if (ip->i_afp && xfs_need_iread_extents(ip->i_afp))
128 lock_mode = XFS_ILOCK_EXCL;
129 xfs_ilock(ip, lock_mode);
134 * In addition to i_rwsem in the VFS inode, the xfs inode contains 2
135 * multi-reader locks: invalidate_lock and the i_lock. This routine allows
136 * various combinations of the locks to be obtained.
138 * The 3 locks should always be ordered so that the IO lock is obtained first,
139 * the mmap lock second and the ilock last in order to prevent deadlock.
141 * Basic locking order:
143 * i_rwsem -> invalidate_lock -> page_lock -> i_ilock
145 * mmap_lock locking order:
147 * i_rwsem -> page lock -> mmap_lock
148 * mmap_lock -> invalidate_lock -> page_lock
150 * The difference in mmap_lock locking order mean that we cannot hold the
151 * invalidate_lock over syscall based read(2)/write(2) based IO. These IO paths
152 * can fault in pages during copy in/out (for buffered IO) or require the
153 * mmap_lock in get_user_pages() to map the user pages into the kernel address
154 * space for direct IO. Similarly the i_rwsem cannot be taken inside a page
155 * fault because page faults already hold the mmap_lock.
157 * Hence to serialise fully against both syscall and mmap based IO, we need to
158 * take both the i_rwsem and the invalidate_lock. These locks should *only* be
159 * both taken in places where we need to invalidate the page cache in a race
160 * free manner (e.g. truncate, hole punch and other extent manipulation
168 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
171 * You can't set both SHARED and EXCL for the same lock,
172 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
173 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
175 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
176 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
177 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
178 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
179 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
180 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
181 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
183 if (lock_flags & XFS_IOLOCK_EXCL) {
184 down_write_nested(&VFS_I(ip)->i_rwsem,
185 XFS_IOLOCK_DEP(lock_flags));
186 } else if (lock_flags & XFS_IOLOCK_SHARED) {
187 down_read_nested(&VFS_I(ip)->i_rwsem,
188 XFS_IOLOCK_DEP(lock_flags));
191 if (lock_flags & XFS_MMAPLOCK_EXCL) {
192 down_write_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
193 XFS_MMAPLOCK_DEP(lock_flags));
194 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
195 down_read_nested(&VFS_I(ip)->i_mapping->invalidate_lock,
196 XFS_MMAPLOCK_DEP(lock_flags));
199 if (lock_flags & XFS_ILOCK_EXCL)
200 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
201 else if (lock_flags & XFS_ILOCK_SHARED)
202 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
206 * This is just like xfs_ilock(), except that the caller
207 * is guaranteed not to sleep. It returns 1 if it gets
208 * the requested locks and 0 otherwise. If the IO lock is
209 * obtained but the inode lock cannot be, then the IO lock
210 * is dropped before returning.
212 * ip -- the inode being locked
213 * lock_flags -- this parameter indicates the inode's locks to be
214 * to be locked. See the comment for xfs_ilock() for a list
222 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
225 * You can't set both SHARED and EXCL for the same lock,
226 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
227 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
229 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
230 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
231 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
232 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
233 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
234 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
235 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
237 if (lock_flags & XFS_IOLOCK_EXCL) {
238 if (!down_write_trylock(&VFS_I(ip)->i_rwsem))
240 } else if (lock_flags & XFS_IOLOCK_SHARED) {
241 if (!down_read_trylock(&VFS_I(ip)->i_rwsem))
245 if (lock_flags & XFS_MMAPLOCK_EXCL) {
246 if (!down_write_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
247 goto out_undo_iolock;
248 } else if (lock_flags & XFS_MMAPLOCK_SHARED) {
249 if (!down_read_trylock(&VFS_I(ip)->i_mapping->invalidate_lock))
250 goto out_undo_iolock;
253 if (lock_flags & XFS_ILOCK_EXCL) {
254 if (!mrtryupdate(&ip->i_lock))
255 goto out_undo_mmaplock;
256 } else if (lock_flags & XFS_ILOCK_SHARED) {
257 if (!mrtryaccess(&ip->i_lock))
258 goto out_undo_mmaplock;
263 if (lock_flags & XFS_MMAPLOCK_EXCL)
264 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
265 else if (lock_flags & XFS_MMAPLOCK_SHARED)
266 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
268 if (lock_flags & XFS_IOLOCK_EXCL)
269 up_write(&VFS_I(ip)->i_rwsem);
270 else if (lock_flags & XFS_IOLOCK_SHARED)
271 up_read(&VFS_I(ip)->i_rwsem);
277 * xfs_iunlock() is used to drop the inode locks acquired with
278 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
279 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
280 * that we know which locks to drop.
282 * ip -- the inode being unlocked
283 * lock_flags -- this parameter indicates the inode's locks to be
284 * to be unlocked. See the comment for xfs_ilock() for a list
285 * of valid values for this parameter.
294 * You can't set both SHARED and EXCL for the same lock,
295 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
296 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
298 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
299 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
300 ASSERT((lock_flags & (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL)) !=
301 (XFS_MMAPLOCK_SHARED | XFS_MMAPLOCK_EXCL));
302 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
303 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
304 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_SUBCLASS_MASK)) == 0);
305 ASSERT(lock_flags != 0);
307 if (lock_flags & XFS_IOLOCK_EXCL)
308 up_write(&VFS_I(ip)->i_rwsem);
309 else if (lock_flags & XFS_IOLOCK_SHARED)
310 up_read(&VFS_I(ip)->i_rwsem);
312 if (lock_flags & XFS_MMAPLOCK_EXCL)
313 up_write(&VFS_I(ip)->i_mapping->invalidate_lock);
314 else if (lock_flags & XFS_MMAPLOCK_SHARED)
315 up_read(&VFS_I(ip)->i_mapping->invalidate_lock);
317 if (lock_flags & XFS_ILOCK_EXCL)
318 mrunlock_excl(&ip->i_lock);
319 else if (lock_flags & XFS_ILOCK_SHARED)
320 mrunlock_shared(&ip->i_lock);
322 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
326 * give up write locks. the i/o lock cannot be held nested
327 * if it is being demoted.
334 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL));
336 ~(XFS_IOLOCK_EXCL|XFS_MMAPLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
338 if (lock_flags & XFS_ILOCK_EXCL)
339 mrdemote(&ip->i_lock);
340 if (lock_flags & XFS_MMAPLOCK_EXCL)
341 downgrade_write(&VFS_I(ip)->i_mapping->invalidate_lock);
342 if (lock_flags & XFS_IOLOCK_EXCL)
343 downgrade_write(&VFS_I(ip)->i_rwsem);
345 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
348 #if defined(DEBUG) || defined(XFS_WARN)
350 __xfs_rwsem_islocked(
351 struct rw_semaphore *rwsem,
355 return rwsem_is_locked(rwsem);
358 return lockdep_is_held_type(rwsem, 0);
361 * We are checking that the lock is held at least in shared
362 * mode but don't care that it might be held exclusively
363 * (i.e. shared | excl). Hence we check if the lock is held
364 * in any mode rather than an explicit shared mode.
366 return lockdep_is_held_type(rwsem, -1);
371 struct xfs_inode *ip,
374 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
375 if (!(lock_flags & XFS_ILOCK_SHARED))
376 return !!ip->i_lock.mr_writer;
377 return rwsem_is_locked(&ip->i_lock.mr_lock);
380 if (lock_flags & (XFS_MMAPLOCK_EXCL|XFS_MMAPLOCK_SHARED)) {
381 return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
382 (lock_flags & XFS_IOLOCK_SHARED));
385 if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) {
386 return __xfs_rwsem_islocked(&VFS_I(ip)->i_rwsem,
387 (lock_flags & XFS_IOLOCK_SHARED));
396 * xfs_lockdep_subclass_ok() is only used in an ASSERT, so is only called when
397 * DEBUG or XFS_WARN is set. And MAX_LOCKDEP_SUBCLASSES is then only defined
398 * when CONFIG_LOCKDEP is set. Hence the complex define below to avoid build
399 * errors and warnings.
401 #if (defined(DEBUG) || defined(XFS_WARN)) && defined(CONFIG_LOCKDEP)
403 xfs_lockdep_subclass_ok(
406 return subclass < MAX_LOCKDEP_SUBCLASSES;
409 #define xfs_lockdep_subclass_ok(subclass) (true)
413 * Bump the subclass so xfs_lock_inodes() acquires each lock with a different
414 * value. This can be called for any type of inode lock combination, including
415 * parent locking. Care must be taken to ensure we don't overrun the subclass
416 * storage fields in the class mask we build.
419 xfs_lock_inumorder(int lock_mode, int subclass)
423 ASSERT(!(lock_mode & (XFS_ILOCK_PARENT | XFS_ILOCK_RTBITMAP |
425 ASSERT(xfs_lockdep_subclass_ok(subclass));
427 if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)) {
428 ASSERT(subclass <= XFS_IOLOCK_MAX_SUBCLASS);
429 class += subclass << XFS_IOLOCK_SHIFT;
432 if (lock_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)) {
433 ASSERT(subclass <= XFS_MMAPLOCK_MAX_SUBCLASS);
434 class += subclass << XFS_MMAPLOCK_SHIFT;
437 if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) {
438 ASSERT(subclass <= XFS_ILOCK_MAX_SUBCLASS);
439 class += subclass << XFS_ILOCK_SHIFT;
442 return (lock_mode & ~XFS_LOCK_SUBCLASS_MASK) | class;
446 * The following routine will lock n inodes in exclusive mode. We assume the
447 * caller calls us with the inodes in i_ino order.
449 * We need to detect deadlock where an inode that we lock is in the AIL and we
450 * start waiting for another inode that is locked by a thread in a long running
451 * transaction (such as truncate). This can result in deadlock since the long
452 * running trans might need to wait for the inode we just locked in order to
453 * push the tail and free space in the log.
455 * xfs_lock_inodes() can only be used to lock one type of lock at a time -
456 * the iolock, the mmaplock or the ilock, but not more than one at a time. If we
457 * lock more than one at a time, lockdep will report false positives saying we
458 * have violated locking orders.
462 struct xfs_inode **ips,
466 int attempts = 0, i, j, try_lock;
467 struct xfs_log_item *lp;
470 * Currently supports between 2 and 5 inodes with exclusive locking. We
471 * support an arbitrary depth of locking here, but absolute limits on
472 * inodes depend on the type of locking and the limits placed by
473 * lockdep annotations in xfs_lock_inumorder. These are all checked by
476 ASSERT(ips && inodes >= 2 && inodes <= 5);
477 ASSERT(lock_mode & (XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL |
479 ASSERT(!(lock_mode & (XFS_IOLOCK_SHARED | XFS_MMAPLOCK_SHARED |
481 ASSERT(!(lock_mode & XFS_MMAPLOCK_EXCL) ||
482 inodes <= XFS_MMAPLOCK_MAX_SUBCLASS + 1);
483 ASSERT(!(lock_mode & XFS_ILOCK_EXCL) ||
484 inodes <= XFS_ILOCK_MAX_SUBCLASS + 1);
486 if (lock_mode & XFS_IOLOCK_EXCL) {
487 ASSERT(!(lock_mode & (XFS_MMAPLOCK_EXCL | XFS_ILOCK_EXCL)));
488 } else if (lock_mode & XFS_MMAPLOCK_EXCL)
489 ASSERT(!(lock_mode & XFS_ILOCK_EXCL));
494 for (; i < inodes; i++) {
497 if (i && (ips[i] == ips[i - 1])) /* Already locked */
501 * If try_lock is not set yet, make sure all locked inodes are
502 * not in the AIL. If any are, set try_lock to be used later.
505 for (j = (i - 1); j >= 0 && !try_lock; j--) {
506 lp = &ips[j]->i_itemp->ili_item;
507 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags))
513 * If any of the previous locks we have locked is in the AIL,
514 * we must TRY to get the second and subsequent locks. If
515 * we can't get any, we must release all we have
519 xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
523 /* try_lock means we have an inode locked that is in the AIL. */
525 if (xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i)))
529 * Unlock all previous guys and try again. xfs_iunlock will try
530 * to push the tail if the inode is in the AIL.
533 for (j = i - 1; j >= 0; j--) {
535 * Check to see if we've already unlocked this one. Not
536 * the first one going back, and the inode ptr is the
539 if (j != (i - 1) && ips[j] == ips[j + 1])
542 xfs_iunlock(ips[j], lock_mode);
545 if ((attempts % 5) == 0) {
546 delay(1); /* Don't just spin the CPU */
555 * xfs_lock_two_inodes() can only be used to lock ilock. The iolock and
556 * mmaplock must be double-locked separately since we use i_rwsem and
557 * invalidate_lock for that. We now support taking one lock EXCL and the
562 struct xfs_inode *ip0,
564 struct xfs_inode *ip1,
568 struct xfs_log_item *lp;
570 ASSERT(hweight32(ip0_mode) == 1);
571 ASSERT(hweight32(ip1_mode) == 1);
572 ASSERT(!(ip0_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
573 ASSERT(!(ip1_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)));
574 ASSERT(!(ip0_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
575 ASSERT(!(ip1_mode & (XFS_MMAPLOCK_SHARED|XFS_MMAPLOCK_EXCL)));
576 ASSERT(ip0->i_ino != ip1->i_ino);
578 if (ip0->i_ino > ip1->i_ino) {
580 swap(ip0_mode, ip1_mode);
584 xfs_ilock(ip0, xfs_lock_inumorder(ip0_mode, 0));
587 * If the first lock we have locked is in the AIL, we must TRY to get
588 * the second lock. If we can't get it, we must release the first one
591 lp = &ip0->i_itemp->ili_item;
592 if (lp && test_bit(XFS_LI_IN_AIL, &lp->li_flags)) {
593 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(ip1_mode, 1))) {
594 xfs_iunlock(ip0, ip0_mode);
595 if ((++attempts % 5) == 0)
596 delay(1); /* Don't just spin the CPU */
600 xfs_ilock(ip1, xfs_lock_inumorder(ip1_mode, 1));
606 struct xfs_inode *ip)
610 if (ip->i_diflags & XFS_DIFLAG_ANY) {
611 if (ip->i_diflags & XFS_DIFLAG_REALTIME)
612 flags |= FS_XFLAG_REALTIME;
613 if (ip->i_diflags & XFS_DIFLAG_PREALLOC)
614 flags |= FS_XFLAG_PREALLOC;
615 if (ip->i_diflags & XFS_DIFLAG_IMMUTABLE)
616 flags |= FS_XFLAG_IMMUTABLE;
617 if (ip->i_diflags & XFS_DIFLAG_APPEND)
618 flags |= FS_XFLAG_APPEND;
619 if (ip->i_diflags & XFS_DIFLAG_SYNC)
620 flags |= FS_XFLAG_SYNC;
621 if (ip->i_diflags & XFS_DIFLAG_NOATIME)
622 flags |= FS_XFLAG_NOATIME;
623 if (ip->i_diflags & XFS_DIFLAG_NODUMP)
624 flags |= FS_XFLAG_NODUMP;
625 if (ip->i_diflags & XFS_DIFLAG_RTINHERIT)
626 flags |= FS_XFLAG_RTINHERIT;
627 if (ip->i_diflags & XFS_DIFLAG_PROJINHERIT)
628 flags |= FS_XFLAG_PROJINHERIT;
629 if (ip->i_diflags & XFS_DIFLAG_NOSYMLINKS)
630 flags |= FS_XFLAG_NOSYMLINKS;
631 if (ip->i_diflags & XFS_DIFLAG_EXTSIZE)
632 flags |= FS_XFLAG_EXTSIZE;
633 if (ip->i_diflags & XFS_DIFLAG_EXTSZINHERIT)
634 flags |= FS_XFLAG_EXTSZINHERIT;
635 if (ip->i_diflags & XFS_DIFLAG_NODEFRAG)
636 flags |= FS_XFLAG_NODEFRAG;
637 if (ip->i_diflags & XFS_DIFLAG_FILESTREAM)
638 flags |= FS_XFLAG_FILESTREAM;
641 if (ip->i_diflags2 & XFS_DIFLAG2_ANY) {
642 if (ip->i_diflags2 & XFS_DIFLAG2_DAX)
643 flags |= FS_XFLAG_DAX;
644 if (ip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE)
645 flags |= FS_XFLAG_COWEXTSIZE;
649 flags |= FS_XFLAG_HASATTR;
654 * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
655 * is allowed, otherwise it has to be an exact match. If a CI match is found,
656 * ci_name->name will point to a the actual name (caller must free) or
657 * will be set to NULL if an exact match is found.
661 struct xfs_inode *dp,
662 const struct xfs_name *name,
663 struct xfs_inode **ipp,
664 struct xfs_name *ci_name)
669 trace_xfs_lookup(dp, name);
671 if (xfs_is_shutdown(dp->i_mount))
674 error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
678 error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
686 kmem_free(ci_name->name);
692 /* Propagate di_flags from a parent inode to a child inode. */
694 xfs_inode_inherit_flags(
695 struct xfs_inode *ip,
696 const struct xfs_inode *pip)
698 unsigned int di_flags = 0;
699 xfs_failaddr_t failaddr;
700 umode_t mode = VFS_I(ip)->i_mode;
703 if (pip->i_diflags & XFS_DIFLAG_RTINHERIT)
704 di_flags |= XFS_DIFLAG_RTINHERIT;
705 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
706 di_flags |= XFS_DIFLAG_EXTSZINHERIT;
707 ip->i_extsize = pip->i_extsize;
709 if (pip->i_diflags & XFS_DIFLAG_PROJINHERIT)
710 di_flags |= XFS_DIFLAG_PROJINHERIT;
711 } else if (S_ISREG(mode)) {
712 if ((pip->i_diflags & XFS_DIFLAG_RTINHERIT) &&
713 xfs_has_realtime(ip->i_mount))
714 di_flags |= XFS_DIFLAG_REALTIME;
715 if (pip->i_diflags & XFS_DIFLAG_EXTSZINHERIT) {
716 di_flags |= XFS_DIFLAG_EXTSIZE;
717 ip->i_extsize = pip->i_extsize;
720 if ((pip->i_diflags & XFS_DIFLAG_NOATIME) &&
722 di_flags |= XFS_DIFLAG_NOATIME;
723 if ((pip->i_diflags & XFS_DIFLAG_NODUMP) &&
725 di_flags |= XFS_DIFLAG_NODUMP;
726 if ((pip->i_diflags & XFS_DIFLAG_SYNC) &&
728 di_flags |= XFS_DIFLAG_SYNC;
729 if ((pip->i_diflags & XFS_DIFLAG_NOSYMLINKS) &&
730 xfs_inherit_nosymlinks)
731 di_flags |= XFS_DIFLAG_NOSYMLINKS;
732 if ((pip->i_diflags & XFS_DIFLAG_NODEFRAG) &&
733 xfs_inherit_nodefrag)
734 di_flags |= XFS_DIFLAG_NODEFRAG;
735 if (pip->i_diflags & XFS_DIFLAG_FILESTREAM)
736 di_flags |= XFS_DIFLAG_FILESTREAM;
738 ip->i_diflags |= di_flags;
741 * Inode verifiers on older kernels only check that the extent size
742 * hint is an integer multiple of the rt extent size on realtime files.
743 * They did not check the hint alignment on a directory with both
744 * rtinherit and extszinherit flags set. If the misaligned hint is
745 * propagated from a directory into a new realtime file, new file
746 * allocations will fail due to math errors in the rt allocator and/or
747 * trip the verifiers. Validate the hint settings in the new file so
748 * that we don't let broken hints propagate.
750 failaddr = xfs_inode_validate_extsize(ip->i_mount, ip->i_extsize,
751 VFS_I(ip)->i_mode, ip->i_diflags);
753 ip->i_diflags &= ~(XFS_DIFLAG_EXTSIZE |
754 XFS_DIFLAG_EXTSZINHERIT);
759 /* Propagate di_flags2 from a parent inode to a child inode. */
761 xfs_inode_inherit_flags2(
762 struct xfs_inode *ip,
763 const struct xfs_inode *pip)
765 xfs_failaddr_t failaddr;
767 if (pip->i_diflags2 & XFS_DIFLAG2_COWEXTSIZE) {
768 ip->i_diflags2 |= XFS_DIFLAG2_COWEXTSIZE;
769 ip->i_cowextsize = pip->i_cowextsize;
771 if (pip->i_diflags2 & XFS_DIFLAG2_DAX)
772 ip->i_diflags2 |= XFS_DIFLAG2_DAX;
774 /* Don't let invalid cowextsize hints propagate. */
775 failaddr = xfs_inode_validate_cowextsize(ip->i_mount, ip->i_cowextsize,
776 VFS_I(ip)->i_mode, ip->i_diflags, ip->i_diflags2);
778 ip->i_diflags2 &= ~XFS_DIFLAG2_COWEXTSIZE;
779 ip->i_cowextsize = 0;
784 * Initialise a newly allocated inode and return the in-core inode to the
785 * caller locked exclusively.
789 struct user_namespace *mnt_userns,
790 struct xfs_trans *tp,
791 struct xfs_inode *pip,
798 struct xfs_inode **ipp)
800 struct inode *dir = pip ? VFS_I(pip) : NULL;
801 struct xfs_mount *mp = tp->t_mountp;
802 struct xfs_inode *ip;
805 struct timespec64 tv;
809 * Protect against obviously corrupt allocation btree records. Later
810 * xfs_iget checks will catch re-allocation of other active in-memory
811 * and on-disk inodes. If we don't catch reallocating the parent inode
812 * here we will deadlock in xfs_iget() so we have to do these checks
815 if ((pip && ino == pip->i_ino) || !xfs_verify_dir_ino(mp, ino)) {
816 xfs_alert(mp, "Allocated a known in-use inode 0x%llx!", ino);
817 return -EFSCORRUPTED;
821 * Get the in-core inode with the lock held exclusively to prevent
822 * others from looking at until we're done.
824 error = xfs_iget(mp, tp, ino, XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
830 set_nlink(inode, nlink);
831 inode->i_rdev = rdev;
834 if (dir && !(dir->i_mode & S_ISGID) && xfs_has_grpid(mp)) {
835 inode_fsuid_set(inode, mnt_userns);
836 inode->i_gid = dir->i_gid;
837 inode->i_mode = mode;
839 inode_init_owner(mnt_userns, inode, dir, mode);
843 * If the group ID of the new file does not match the effective group
844 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
845 * (and only if the irix_sgid_inherit compatibility variable is set).
847 if (irix_sgid_inherit &&
848 (inode->i_mode & S_ISGID) &&
849 !in_group_p(i_gid_into_mnt(mnt_userns, inode)))
850 inode->i_mode &= ~S_ISGID;
853 ip->i_df.if_nextents = 0;
854 ASSERT(ip->i_nblocks == 0);
856 tv = current_time(inode);
864 if (xfs_has_v3inodes(mp)) {
865 inode_set_iversion(inode, 1);
866 ip->i_cowextsize = 0;
870 flags = XFS_ILOG_CORE;
871 switch (mode & S_IFMT) {
876 ip->i_df.if_format = XFS_DINODE_FMT_DEV;
877 flags |= XFS_ILOG_DEV;
881 if (pip && (pip->i_diflags & XFS_DIFLAG_ANY))
882 xfs_inode_inherit_flags(ip, pip);
883 if (pip && (pip->i_diflags2 & XFS_DIFLAG2_ANY))
884 xfs_inode_inherit_flags2(ip, pip);
887 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
888 ip->i_df.if_bytes = 0;
889 ip->i_df.if_u1.if_root = NULL;
896 * If we need to create attributes immediately after allocating the
897 * inode, initialise an empty attribute fork right now. We use the
898 * default fork offset for attributes here as we don't know exactly what
899 * size or how many attributes we might be adding. We can do this
900 * safely here because we know the data fork is completely empty and
901 * this saves us from needing to run a separate transaction to set the
902 * fork offset in the immediate future.
904 if (init_xattrs && xfs_has_attr(mp)) {
905 ip->i_forkoff = xfs_default_attroffset(ip) >> 3;
906 ip->i_afp = xfs_ifork_alloc(XFS_DINODE_FMT_EXTENTS, 0);
910 * Log the new values stuffed into the inode.
912 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
913 xfs_trans_log_inode(tp, ip, flags);
915 /* now that we have an i_mode we can setup the inode structure */
923 * Decrement the link count on an inode & log the change. If this causes the
924 * link count to go to zero, move the inode to AGI unlinked list so that it can
925 * be freed when the last active reference goes away via xfs_inactive().
927 static int /* error */
932 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
934 drop_nlink(VFS_I(ip));
935 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
937 if (VFS_I(ip)->i_nlink)
940 return xfs_iunlink(tp, ip);
944 * Increment the link count on an inode & log the change.
951 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_CHG);
953 inc_nlink(VFS_I(ip));
954 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
959 struct user_namespace *mnt_userns,
961 struct xfs_name *name,
967 int is_dir = S_ISDIR(mode);
968 struct xfs_mount *mp = dp->i_mount;
969 struct xfs_inode *ip = NULL;
970 struct xfs_trans *tp = NULL;
972 bool unlock_dp_on_error = false;
974 struct xfs_dquot *udqp = NULL;
975 struct xfs_dquot *gdqp = NULL;
976 struct xfs_dquot *pdqp = NULL;
977 struct xfs_trans_res *tres;
981 trace_xfs_create(dp, name);
983 if (xfs_is_shutdown(mp))
986 prid = xfs_get_initial_prid(dp);
989 * Make sure that we have allocated dquot(s) on disk.
991 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
992 mapped_fsgid(mnt_userns, &init_user_ns), prid,
993 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
994 &udqp, &gdqp, &pdqp);
999 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1000 tres = &M_RES(mp)->tr_mkdir;
1002 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1003 tres = &M_RES(mp)->tr_create;
1007 * Initially assume that the file does not exist and
1008 * reserve the resources for that case. If that is not
1009 * the case we'll drop the one we have and get a more
1010 * appropriate transaction later.
1012 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1014 if (error == -ENOSPC) {
1015 /* flush outstanding delalloc blocks and retry */
1016 xfs_flush_inodes(mp);
1017 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp,
1021 goto out_release_dquots;
1023 xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1024 unlock_dp_on_error = true;
1026 error = xfs_iext_count_may_overflow(dp, XFS_DATA_FORK,
1027 XFS_IEXT_DIR_MANIP_CNT(mp));
1029 goto out_trans_cancel;
1032 * A newly created regular or special file just has one directory
1033 * entry pointing to them, but a directory also the "." entry
1034 * pointing to itself.
1036 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1038 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1039 is_dir ? 2 : 1, rdev, prid, init_xattrs, &ip);
1041 goto out_trans_cancel;
1044 * Now we join the directory inode to the transaction. We do not do it
1045 * earlier because xfs_dialloc might commit the previous transaction
1046 * (and release all the locks). An error from here on will result in
1047 * the transaction cancel unlocking dp so don't do it explicitly in the
1050 xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1051 unlock_dp_on_error = false;
1053 error = xfs_dir_createname(tp, dp, name, ip->i_ino,
1054 resblks - XFS_IALLOC_SPACE_RES(mp));
1056 ASSERT(error != -ENOSPC);
1057 goto out_trans_cancel;
1059 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1060 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1063 error = xfs_dir_init(tp, ip, dp);
1065 goto out_trans_cancel;
1067 xfs_bumplink(tp, dp);
1071 * If this is a synchronous mount, make sure that the
1072 * create transaction goes to disk before returning to
1075 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1076 xfs_trans_set_sync(tp);
1079 * Attach the dquot(s) to the inodes and modify them incore.
1080 * These ids of the inode couldn't have changed since the new
1081 * inode has been locked ever since it was created.
1083 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1085 error = xfs_trans_commit(tp);
1087 goto out_release_inode;
1089 xfs_qm_dqrele(udqp);
1090 xfs_qm_dqrele(gdqp);
1091 xfs_qm_dqrele(pdqp);
1097 xfs_trans_cancel(tp);
1100 * Wait until after the current transaction is aborted to finish the
1101 * setup of the inode and release the inode. This prevents recursive
1102 * transactions and deadlocks from xfs_inactive.
1105 xfs_finish_inode_setup(ip);
1109 xfs_qm_dqrele(udqp);
1110 xfs_qm_dqrele(gdqp);
1111 xfs_qm_dqrele(pdqp);
1113 if (unlock_dp_on_error)
1114 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1120 struct user_namespace *mnt_userns,
1121 struct xfs_inode *dp,
1123 struct xfs_inode **ipp)
1125 struct xfs_mount *mp = dp->i_mount;
1126 struct xfs_inode *ip = NULL;
1127 struct xfs_trans *tp = NULL;
1130 struct xfs_dquot *udqp = NULL;
1131 struct xfs_dquot *gdqp = NULL;
1132 struct xfs_dquot *pdqp = NULL;
1133 struct xfs_trans_res *tres;
1137 if (xfs_is_shutdown(mp))
1140 prid = xfs_get_initial_prid(dp);
1143 * Make sure that we have allocated dquot(s) on disk.
1145 error = xfs_qm_vop_dqalloc(dp, mapped_fsuid(mnt_userns, &init_user_ns),
1146 mapped_fsgid(mnt_userns, &init_user_ns), prid,
1147 XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
1148 &udqp, &gdqp, &pdqp);
1152 resblks = XFS_IALLOC_SPACE_RES(mp);
1153 tres = &M_RES(mp)->tr_create_tmpfile;
1155 error = xfs_trans_alloc_icreate(mp, tres, udqp, gdqp, pdqp, resblks,
1158 goto out_release_dquots;
1160 error = xfs_dialloc(&tp, dp->i_ino, mode, &ino);
1162 error = xfs_init_new_inode(mnt_userns, tp, dp, ino, mode,
1163 0, 0, prid, false, &ip);
1165 goto out_trans_cancel;
1167 if (xfs_has_wsync(mp))
1168 xfs_trans_set_sync(tp);
1171 * Attach the dquot(s) to the inodes and modify them incore.
1172 * These ids of the inode couldn't have changed since the new
1173 * inode has been locked ever since it was created.
1175 xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
1177 error = xfs_iunlink(tp, ip);
1179 goto out_trans_cancel;
1181 error = xfs_trans_commit(tp);
1183 goto out_release_inode;
1185 xfs_qm_dqrele(udqp);
1186 xfs_qm_dqrele(gdqp);
1187 xfs_qm_dqrele(pdqp);
1193 xfs_trans_cancel(tp);
1196 * Wait until after the current transaction is aborted to finish the
1197 * setup of the inode and release the inode. This prevents recursive
1198 * transactions and deadlocks from xfs_inactive.
1201 xfs_finish_inode_setup(ip);
1205 xfs_qm_dqrele(udqp);
1206 xfs_qm_dqrele(gdqp);
1207 xfs_qm_dqrele(pdqp);
1216 struct xfs_name *target_name)
1218 xfs_mount_t *mp = tdp->i_mount;
1220 int error, nospace_error = 0;
1223 trace_xfs_link(tdp, target_name);
1225 ASSERT(!S_ISDIR(VFS_I(sip)->i_mode));
1227 if (xfs_is_shutdown(mp))
1230 error = xfs_qm_dqattach(sip);
1234 error = xfs_qm_dqattach(tdp);
1238 resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1239 error = xfs_trans_alloc_dir(tdp, &M_RES(mp)->tr_link, sip, &resblks,
1240 &tp, &nospace_error);
1244 error = xfs_iext_count_may_overflow(tdp, XFS_DATA_FORK,
1245 XFS_IEXT_DIR_MANIP_CNT(mp));
1250 * If we are using project inheritance, we only allow hard link
1251 * creation in our tree when the project IDs are the same; else
1252 * the tree quota mechanism could be circumvented.
1254 if (unlikely((tdp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
1255 tdp->i_projid != sip->i_projid)) {
1261 error = xfs_dir_canenter(tp, tdp, target_name);
1267 * Handle initial link state of O_TMPFILE inode
1269 if (VFS_I(sip)->i_nlink == 0) {
1270 struct xfs_perag *pag;
1272 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, sip->i_ino));
1273 error = xfs_iunlink_remove(tp, pag, sip);
1279 error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1283 xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1284 xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1286 xfs_bumplink(tp, sip);
1289 * If this is a synchronous mount, make sure that the
1290 * link transaction goes to disk before returning to
1293 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
1294 xfs_trans_set_sync(tp);
1296 return xfs_trans_commit(tp);
1299 xfs_trans_cancel(tp);
1301 if (error == -ENOSPC && nospace_error)
1302 error = nospace_error;
1306 /* Clear the reflink flag and the cowblocks tag if possible. */
1308 xfs_itruncate_clear_reflink_flags(
1309 struct xfs_inode *ip)
1311 struct xfs_ifork *dfork;
1312 struct xfs_ifork *cfork;
1314 if (!xfs_is_reflink_inode(ip))
1316 dfork = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1317 cfork = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1318 if (dfork->if_bytes == 0 && cfork->if_bytes == 0)
1319 ip->i_diflags2 &= ~XFS_DIFLAG2_REFLINK;
1320 if (cfork->if_bytes == 0)
1321 xfs_inode_clear_cowblocks_tag(ip);
1325 * Free up the underlying blocks past new_size. The new size must be smaller
1326 * than the current size. This routine can be used both for the attribute and
1327 * data fork, and does not modify the inode size, which is left to the caller.
1329 * The transaction passed to this routine must have made a permanent log
1330 * reservation of at least XFS_ITRUNCATE_LOG_RES. This routine may commit the
1331 * given transaction and start new ones, so make sure everything involved in
1332 * the transaction is tidy before calling here. Some transaction will be
1333 * returned to the caller to be committed. The incoming transaction must
1334 * already include the inode, and both inode locks must be held exclusively.
1335 * The inode must also be "held" within the transaction. On return the inode
1336 * will be "held" within the returned transaction. This routine does NOT
1337 * require any disk space to be reserved for it within the transaction.
1339 * If we get an error, we must return with the inode locked and linked into the
1340 * current transaction. This keeps things simple for the higher level code,
1341 * because it always knows that the inode is locked and held in the transaction
1342 * that returns to it whether errors occur or not. We don't mark the inode
1343 * dirty on error so that transactions can be easily aborted if possible.
1346 xfs_itruncate_extents_flags(
1347 struct xfs_trans **tpp,
1348 struct xfs_inode *ip,
1350 xfs_fsize_t new_size,
1353 struct xfs_mount *mp = ip->i_mount;
1354 struct xfs_trans *tp = *tpp;
1355 xfs_fileoff_t first_unmap_block;
1356 xfs_filblks_t unmap_len;
1359 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1360 ASSERT(!atomic_read(&VFS_I(ip)->i_count) ||
1361 xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1362 ASSERT(new_size <= XFS_ISIZE(ip));
1363 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
1364 ASSERT(ip->i_itemp != NULL);
1365 ASSERT(ip->i_itemp->ili_lock_flags == 0);
1366 ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
1368 trace_xfs_itruncate_extents_start(ip, new_size);
1370 flags |= xfs_bmapi_aflag(whichfork);
1373 * Since it is possible for space to become allocated beyond
1374 * the end of the file (in a crash where the space is allocated
1375 * but the inode size is not yet updated), simply remove any
1376 * blocks which show up between the new EOF and the maximum
1377 * possible file size.
1379 * We have to free all the blocks to the bmbt maximum offset, even if
1380 * the page cache can't scale that far.
1382 first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1383 if (!xfs_verify_fileoff(mp, first_unmap_block)) {
1384 WARN_ON_ONCE(first_unmap_block > XFS_MAX_FILEOFF);
1388 unmap_len = XFS_MAX_FILEOFF - first_unmap_block + 1;
1389 while (unmap_len > 0) {
1390 ASSERT(tp->t_firstblock == NULLFSBLOCK);
1391 error = __xfs_bunmapi(tp, ip, first_unmap_block, &unmap_len,
1392 flags, XFS_ITRUNC_MAX_EXTENTS);
1396 /* free the just unmapped extents */
1397 error = xfs_defer_finish(&tp);
1402 if (whichfork == XFS_DATA_FORK) {
1403 /* Remove all pending CoW reservations. */
1404 error = xfs_reflink_cancel_cow_blocks(ip, &tp,
1405 first_unmap_block, XFS_MAX_FILEOFF, true);
1409 xfs_itruncate_clear_reflink_flags(ip);
1413 * Always re-log the inode so that our permanent transaction can keep
1414 * on rolling it forward in the log.
1416 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1418 trace_xfs_itruncate_extents_end(ip, new_size);
1429 xfs_mount_t *mp = ip->i_mount;
1432 if (!S_ISREG(VFS_I(ip)->i_mode) || (VFS_I(ip)->i_mode == 0))
1435 /* If this is a read-only mount, don't do this (would generate I/O) */
1436 if (xfs_is_readonly(mp))
1439 if (!xfs_is_shutdown(mp)) {
1443 * If we previously truncated this file and removed old data
1444 * in the process, we want to initiate "early" writeout on
1445 * the last close. This is an attempt to combat the notorious
1446 * NULL files problem which is particularly noticeable from a
1447 * truncate down, buffered (re-)write (delalloc), followed by
1448 * a crash. What we are effectively doing here is
1449 * significantly reducing the time window where we'd otherwise
1450 * be exposed to that problem.
1452 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
1454 xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
1455 if (ip->i_delayed_blks > 0) {
1456 error = filemap_flush(VFS_I(ip)->i_mapping);
1463 if (VFS_I(ip)->i_nlink == 0)
1467 * If we can't get the iolock just skip truncating the blocks past EOF
1468 * because we could deadlock with the mmap_lock otherwise. We'll get
1469 * another chance to drop them once the last reference to the inode is
1470 * dropped, so we'll never leak blocks permanently.
1472 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
1475 if (xfs_can_free_eofblocks(ip, false)) {
1477 * Check if the inode is being opened, written and closed
1478 * frequently and we have delayed allocation blocks outstanding
1479 * (e.g. streaming writes from the NFS server), truncating the
1480 * blocks past EOF will cause fragmentation to occur.
1482 * In this case don't do the truncation, but we have to be
1483 * careful how we detect this case. Blocks beyond EOF show up as
1484 * i_delayed_blks even when the inode is clean, so we need to
1485 * truncate them away first before checking for a dirty release.
1486 * Hence on the first dirty close we will still remove the
1487 * speculative allocation, but after that we will leave it in
1490 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
1493 error = xfs_free_eofblocks(ip);
1497 /* delalloc blocks after truncation means it really is dirty */
1498 if (ip->i_delayed_blks)
1499 xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
1503 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1508 * xfs_inactive_truncate
1510 * Called to perform a truncate when an inode becomes unlinked.
1513 xfs_inactive_truncate(
1514 struct xfs_inode *ip)
1516 struct xfs_mount *mp = ip->i_mount;
1517 struct xfs_trans *tp;
1520 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0, &tp);
1522 ASSERT(xfs_is_shutdown(mp));
1525 xfs_ilock(ip, XFS_ILOCK_EXCL);
1526 xfs_trans_ijoin(tp, ip, 0);
1529 * Log the inode size first to prevent stale data exposure in the event
1530 * of a system crash before the truncate completes. See the related
1531 * comment in xfs_vn_setattr_size() for details.
1533 ip->i_disk_size = 0;
1534 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1536 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
1538 goto error_trans_cancel;
1540 ASSERT(ip->i_df.if_nextents == 0);
1542 error = xfs_trans_commit(tp);
1546 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1550 xfs_trans_cancel(tp);
1552 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1557 * xfs_inactive_ifree()
1559 * Perform the inode free when an inode is unlinked.
1563 struct xfs_inode *ip)
1565 struct xfs_mount *mp = ip->i_mount;
1566 struct xfs_trans *tp;
1570 * We try to use a per-AG reservation for any block needed by the finobt
1571 * tree, but as the finobt feature predates the per-AG reservation
1572 * support a degraded file system might not have enough space for the
1573 * reservation at mount time. In that case try to dip into the reserved
1576 * Send a warning if the reservation does happen to fail, as the inode
1577 * now remains allocated and sits on the unlinked list until the fs is
1580 if (unlikely(mp->m_finobt_nores)) {
1581 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
1582 XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
1585 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
1588 if (error == -ENOSPC) {
1589 xfs_warn_ratelimited(mp,
1590 "Failed to remove inode(s) from unlinked list. "
1591 "Please free space, unmount and run xfs_repair.");
1593 ASSERT(xfs_is_shutdown(mp));
1599 * We do not hold the inode locked across the entire rolling transaction
1600 * here. We only need to hold it for the first transaction that
1601 * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
1602 * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
1603 * here breaks the relationship between cluster buffer invalidation and
1604 * stale inode invalidation on cluster buffer item journal commit
1605 * completion, and can result in leaving dirty stale inodes hanging
1608 * We have no need for serialising this inode operation against other
1609 * operations - we freed the inode and hence reallocation is required
1610 * and that will serialise on reallocating the space the deferops need
1611 * to free. Hence we can unlock the inode on the first commit of
1612 * the transaction rather than roll it right through the deferops. This
1613 * avoids relogging the XFS_ISTALE inode.
1615 * We check that xfs_ifree() hasn't grown an internal transaction roll
1616 * by asserting that the inode is still locked when it returns.
1618 xfs_ilock(ip, XFS_ILOCK_EXCL);
1619 xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1621 error = xfs_ifree(tp, ip);
1622 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1625 * If we fail to free the inode, shut down. The cancel
1626 * might do that, we need to make sure. Otherwise the
1627 * inode might be lost for a long time or forever.
1629 if (!xfs_is_shutdown(mp)) {
1630 xfs_notice(mp, "%s: xfs_ifree returned error %d",
1632 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
1634 xfs_trans_cancel(tp);
1639 * Credit the quota account(s). The inode is gone.
1641 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
1644 * Just ignore errors at this point. There is nothing we can do except
1645 * to try to keep going. Make sure it's not a silent error.
1647 error = xfs_trans_commit(tp);
1649 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
1656 * Returns true if we need to update the on-disk metadata before we can free
1657 * the memory used by this inode. Updates include freeing post-eof
1658 * preallocations; freeing COW staging extents; and marking the inode free in
1659 * the inobt if it is on the unlinked list.
1662 xfs_inode_needs_inactive(
1663 struct xfs_inode *ip)
1665 struct xfs_mount *mp = ip->i_mount;
1666 struct xfs_ifork *cow_ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1669 * If the inode is already free, then there can be nothing
1672 if (VFS_I(ip)->i_mode == 0)
1675 /* If this is a read-only mount, don't do this (would generate I/O) */
1676 if (xfs_is_readonly(mp))
1679 /* If the log isn't running, push inodes straight to reclaim. */
1680 if (xfs_is_shutdown(mp) || xfs_has_norecovery(mp))
1683 /* Metadata inodes require explicit resource cleanup. */
1684 if (xfs_is_metadata_inode(ip))
1687 /* Want to clean out the cow blocks if there are any. */
1688 if (cow_ifp && cow_ifp->if_bytes > 0)
1691 /* Unlinked files must be freed. */
1692 if (VFS_I(ip)->i_nlink == 0)
1696 * This file isn't being freed, so check if there are post-eof blocks
1697 * to free. @force is true because we are evicting an inode from the
1698 * cache. Post-eof blocks must be freed, lest we end up with broken
1699 * free space accounting.
1701 * Note: don't bother with iolock here since lockdep complains about
1702 * acquiring it in reclaim context. We have the only reference to the
1703 * inode at this point anyways.
1705 return xfs_can_free_eofblocks(ip, true);
1711 * This is called when the vnode reference count for the vnode
1712 * goes to zero. If the file has been unlinked, then it must
1713 * now be truncated. Also, we clear all of the read-ahead state
1714 * kept for the inode here since the file is now closed.
1720 struct xfs_mount *mp;
1725 * If the inode is already free, then there can be nothing
1728 if (VFS_I(ip)->i_mode == 0) {
1729 ASSERT(ip->i_df.if_broot_bytes == 0);
1734 ASSERT(!xfs_iflags_test(ip, XFS_IRECOVERY));
1736 /* If this is a read-only mount, don't do this (would generate I/O) */
1737 if (xfs_is_readonly(mp))
1740 /* Metadata inodes require explicit resource cleanup. */
1741 if (xfs_is_metadata_inode(ip))
1744 /* Try to clean out the cow blocks if there are any. */
1745 if (xfs_inode_has_cow_data(ip))
1746 xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
1748 if (VFS_I(ip)->i_nlink != 0) {
1750 * force is true because we are evicting an inode from the
1751 * cache. Post-eof blocks must be freed, lest we end up with
1752 * broken free space accounting.
1754 * Note: don't bother with iolock here since lockdep complains
1755 * about acquiring it in reclaim context. We have the only
1756 * reference to the inode at this point anyways.
1758 if (xfs_can_free_eofblocks(ip, true))
1759 xfs_free_eofblocks(ip);
1764 if (S_ISREG(VFS_I(ip)->i_mode) &&
1765 (ip->i_disk_size != 0 || XFS_ISIZE(ip) != 0 ||
1766 ip->i_df.if_nextents > 0 || ip->i_delayed_blks > 0))
1769 error = xfs_qm_dqattach(ip);
1773 if (S_ISLNK(VFS_I(ip)->i_mode))
1774 error = xfs_inactive_symlink(ip);
1776 error = xfs_inactive_truncate(ip);
1781 * If there are attributes associated with the file then blow them away
1782 * now. The code calls a routine that recursively deconstructs the
1783 * attribute fork. If also blows away the in-core attribute fork.
1785 if (XFS_IFORK_Q(ip)) {
1786 error = xfs_attr_inactive(ip);
1792 ASSERT(ip->i_forkoff == 0);
1797 xfs_inactive_ifree(ip);
1801 * We're done making metadata updates for this inode, so we can release
1802 * the attached dquots.
1804 xfs_qm_dqdetach(ip);
1808 * In-Core Unlinked List Lookups
1809 * =============================
1811 * Every inode is supposed to be reachable from some other piece of metadata
1812 * with the exception of the root directory. Inodes with a connection to a
1813 * file descriptor but not linked from anywhere in the on-disk directory tree
1814 * are collectively known as unlinked inodes, though the filesystem itself
1815 * maintains links to these inodes so that on-disk metadata are consistent.
1817 * XFS implements a per-AG on-disk hash table of unlinked inodes. The AGI
1818 * header contains a number of buckets that point to an inode, and each inode
1819 * record has a pointer to the next inode in the hash chain. This
1820 * singly-linked list causes scaling problems in the iunlink remove function
1821 * because we must walk that list to find the inode that points to the inode
1822 * being removed from the unlinked hash bucket list.
1824 * What if we modelled the unlinked list as a collection of records capturing
1825 * "X.next_unlinked = Y" relations? If we indexed those records on Y, we'd
1826 * have a fast way to look up unlinked list predecessors, which avoids the
1827 * slow list walk. That's exactly what we do here (in-core) with a per-AG
1830 * Because this is a backref cache, we ignore operational failures since the
1831 * iunlink code can fall back to the slow bucket walk. The only errors that
1832 * should bubble out are for obviously incorrect situations.
1834 * All users of the backref cache MUST hold the AGI buffer lock to serialize
1835 * access or have otherwise provided for concurrency control.
1838 /* Capture a "X.next_unlinked = Y" relationship. */
1839 struct xfs_iunlink {
1840 struct rhash_head iu_rhash_head;
1841 xfs_agino_t iu_agino; /* X */
1842 xfs_agino_t iu_next_unlinked; /* Y */
1845 /* Unlinked list predecessor lookup hashtable construction */
1847 xfs_iunlink_obj_cmpfn(
1848 struct rhashtable_compare_arg *arg,
1851 const xfs_agino_t *key = arg->key;
1852 const struct xfs_iunlink *iu = obj;
1854 if (iu->iu_next_unlinked != *key)
1859 static const struct rhashtable_params xfs_iunlink_hash_params = {
1860 .min_size = XFS_AGI_UNLINKED_BUCKETS,
1861 .key_len = sizeof(xfs_agino_t),
1862 .key_offset = offsetof(struct xfs_iunlink,
1864 .head_offset = offsetof(struct xfs_iunlink, iu_rhash_head),
1865 .automatic_shrinking = true,
1866 .obj_cmpfn = xfs_iunlink_obj_cmpfn,
1870 * Return X, where X.next_unlinked == @agino. Returns NULLAGINO if no such
1871 * relation is found.
1874 xfs_iunlink_lookup_backref(
1875 struct xfs_perag *pag,
1878 struct xfs_iunlink *iu;
1880 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1881 xfs_iunlink_hash_params);
1882 return iu ? iu->iu_agino : NULLAGINO;
1886 * Take ownership of an iunlink cache entry and insert it into the hash table.
1887 * If successful, the entry will be owned by the cache; if not, it is freed.
1888 * Either way, the caller does not own @iu after this call.
1891 xfs_iunlink_insert_backref(
1892 struct xfs_perag *pag,
1893 struct xfs_iunlink *iu)
1897 error = rhashtable_insert_fast(&pag->pagi_unlinked_hash,
1898 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1900 * Fail loudly if there already was an entry because that's a sign of
1901 * corruption of in-memory data. Also fail loudly if we see an error
1902 * code we didn't anticipate from the rhashtable code. Currently we
1903 * only anticipate ENOMEM.
1906 WARN(error != -ENOMEM, "iunlink cache insert error %d", error);
1910 * Absorb any runtime errors that aren't a result of corruption because
1911 * this is a cache and we can always fall back to bucket list scanning.
1913 if (error != 0 && error != -EEXIST)
1918 /* Remember that @prev_agino.next_unlinked = @this_agino. */
1920 xfs_iunlink_add_backref(
1921 struct xfs_perag *pag,
1922 xfs_agino_t prev_agino,
1923 xfs_agino_t this_agino)
1925 struct xfs_iunlink *iu;
1927 if (XFS_TEST_ERROR(false, pag->pag_mount, XFS_ERRTAG_IUNLINK_FALLBACK))
1930 iu = kmem_zalloc(sizeof(*iu), KM_NOFS);
1931 iu->iu_agino = prev_agino;
1932 iu->iu_next_unlinked = this_agino;
1934 return xfs_iunlink_insert_backref(pag, iu);
1938 * Replace X.next_unlinked = @agino with X.next_unlinked = @next_unlinked.
1939 * If @next_unlinked is NULLAGINO, we drop the backref and exit. If there
1940 * wasn't any such entry then we don't bother.
1943 xfs_iunlink_change_backref(
1944 struct xfs_perag *pag,
1946 xfs_agino_t next_unlinked)
1948 struct xfs_iunlink *iu;
1951 /* Look up the old entry; if there wasn't one then exit. */
1952 iu = rhashtable_lookup_fast(&pag->pagi_unlinked_hash, &agino,
1953 xfs_iunlink_hash_params);
1958 * Remove the entry. This shouldn't ever return an error, but if we
1959 * couldn't remove the old entry we don't want to add it again to the
1960 * hash table, and if the entry disappeared on us then someone's
1961 * violated the locking rules and we need to fail loudly. Either way
1962 * we cannot remove the inode because internal state is or would have
1965 error = rhashtable_remove_fast(&pag->pagi_unlinked_hash,
1966 &iu->iu_rhash_head, xfs_iunlink_hash_params);
1970 /* If there is no new next entry just free our item and return. */
1971 if (next_unlinked == NULLAGINO) {
1976 /* Update the entry and re-add it to the hash table. */
1977 iu->iu_next_unlinked = next_unlinked;
1978 return xfs_iunlink_insert_backref(pag, iu);
1981 /* Set up the in-core predecessor structures. */
1984 struct xfs_perag *pag)
1986 return rhashtable_init(&pag->pagi_unlinked_hash,
1987 &xfs_iunlink_hash_params);
1990 /* Free the in-core predecessor structures. */
1992 xfs_iunlink_free_item(
1996 struct xfs_iunlink *iu = ptr;
1997 bool *freed_anything = arg;
1999 *freed_anything = true;
2004 xfs_iunlink_destroy(
2005 struct xfs_perag *pag)
2007 bool freed_anything = false;
2009 rhashtable_free_and_destroy(&pag->pagi_unlinked_hash,
2010 xfs_iunlink_free_item, &freed_anything);
2012 ASSERT(freed_anything == false || xfs_is_shutdown(pag->pag_mount));
2016 * Point the AGI unlinked bucket at an inode and log the results. The caller
2017 * is responsible for validating the old value.
2020 xfs_iunlink_update_bucket(
2021 struct xfs_trans *tp,
2022 struct xfs_perag *pag,
2023 struct xfs_buf *agibp,
2024 unsigned int bucket_index,
2025 xfs_agino_t new_agino)
2027 struct xfs_agi *agi = agibp->b_addr;
2028 xfs_agino_t old_value;
2031 ASSERT(xfs_verify_agino_or_null(tp->t_mountp, pag->pag_agno, new_agino));
2033 old_value = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2034 trace_xfs_iunlink_update_bucket(tp->t_mountp, pag->pag_agno, bucket_index,
2035 old_value, new_agino);
2038 * We should never find the head of the list already set to the value
2039 * passed in because either we're adding or removing ourselves from the
2042 if (old_value == new_agino) {
2043 xfs_buf_mark_corrupt(agibp);
2044 return -EFSCORRUPTED;
2047 agi->agi_unlinked[bucket_index] = cpu_to_be32(new_agino);
2048 offset = offsetof(struct xfs_agi, agi_unlinked) +
2049 (sizeof(xfs_agino_t) * bucket_index);
2050 xfs_trans_log_buf(tp, agibp, offset, offset + sizeof(xfs_agino_t) - 1);
2054 /* Set an on-disk inode's next_unlinked pointer. */
2056 xfs_iunlink_update_dinode(
2057 struct xfs_trans *tp,
2058 struct xfs_perag *pag,
2060 struct xfs_buf *ibp,
2061 struct xfs_dinode *dip,
2062 struct xfs_imap *imap,
2063 xfs_agino_t next_agino)
2065 struct xfs_mount *mp = tp->t_mountp;
2068 ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2070 trace_xfs_iunlink_update_dinode(mp, pag->pag_agno, agino,
2071 be32_to_cpu(dip->di_next_unlinked), next_agino);
2073 dip->di_next_unlinked = cpu_to_be32(next_agino);
2074 offset = imap->im_boffset +
2075 offsetof(struct xfs_dinode, di_next_unlinked);
2077 /* need to recalc the inode CRC if appropriate */
2078 xfs_dinode_calc_crc(mp, dip);
2079 xfs_trans_inode_buf(tp, ibp);
2080 xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1);
2083 /* Set an in-core inode's unlinked pointer and return the old value. */
2085 xfs_iunlink_update_inode(
2086 struct xfs_trans *tp,
2087 struct xfs_inode *ip,
2088 struct xfs_perag *pag,
2089 xfs_agino_t next_agino,
2090 xfs_agino_t *old_next_agino)
2092 struct xfs_mount *mp = tp->t_mountp;
2093 struct xfs_dinode *dip;
2094 struct xfs_buf *ibp;
2095 xfs_agino_t old_value;
2098 ASSERT(xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino));
2100 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &ibp);
2103 dip = xfs_buf_offset(ibp, ip->i_imap.im_boffset);
2105 /* Make sure the old pointer isn't garbage. */
2106 old_value = be32_to_cpu(dip->di_next_unlinked);
2107 if (!xfs_verify_agino_or_null(mp, pag->pag_agno, old_value)) {
2108 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__, dip,
2109 sizeof(*dip), __this_address);
2110 error = -EFSCORRUPTED;
2115 * Since we're updating a linked list, we should never find that the
2116 * current pointer is the same as the new value, unless we're
2117 * terminating the list.
2119 *old_next_agino = old_value;
2120 if (old_value == next_agino) {
2121 if (next_agino != NULLAGINO) {
2122 xfs_inode_verifier_error(ip, -EFSCORRUPTED, __func__,
2123 dip, sizeof(*dip), __this_address);
2124 error = -EFSCORRUPTED;
2129 /* Ok, update the new pointer. */
2130 xfs_iunlink_update_dinode(tp, pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
2131 ibp, dip, &ip->i_imap, next_agino);
2134 xfs_trans_brelse(tp, ibp);
2139 * This is called when the inode's link count has gone to 0 or we are creating
2140 * a tmpfile via O_TMPFILE. The inode @ip must have nlink == 0.
2142 * We place the on-disk inode on a list in the AGI. It will be pulled from this
2143 * list when the inode is freed.
2147 struct xfs_trans *tp,
2148 struct xfs_inode *ip)
2150 struct xfs_mount *mp = tp->t_mountp;
2151 struct xfs_perag *pag;
2152 struct xfs_agi *agi;
2153 struct xfs_buf *agibp;
2154 xfs_agino_t next_agino;
2155 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2156 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2159 ASSERT(VFS_I(ip)->i_nlink == 0);
2160 ASSERT(VFS_I(ip)->i_mode != 0);
2161 trace_xfs_iunlink(ip);
2163 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2165 /* Get the agi buffer first. It ensures lock ordering on the list. */
2166 error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2169 agi = agibp->b_addr;
2172 * Get the index into the agi hash table for the list this inode will
2173 * go on. Make sure the pointer isn't garbage and that this inode
2174 * isn't already on the list.
2176 next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2177 if (next_agino == agino ||
2178 !xfs_verify_agino_or_null(mp, pag->pag_agno, next_agino)) {
2179 xfs_buf_mark_corrupt(agibp);
2180 error = -EFSCORRUPTED;
2184 if (next_agino != NULLAGINO) {
2185 xfs_agino_t old_agino;
2188 * There is already another inode in the bucket, so point this
2189 * inode to the current head of the list.
2191 error = xfs_iunlink_update_inode(tp, ip, pag, next_agino,
2195 ASSERT(old_agino == NULLAGINO);
2198 * agino has been unlinked, add a backref from the next inode
2201 error = xfs_iunlink_add_backref(pag, agino, next_agino);
2206 /* Point the head of the list to point to this inode. */
2207 error = xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index, agino);
2213 /* Return the imap, dinode pointer, and buffer for an inode. */
2215 xfs_iunlink_map_ino(
2216 struct xfs_trans *tp,
2217 xfs_agnumber_t agno,
2219 struct xfs_imap *imap,
2220 struct xfs_dinode **dipp,
2221 struct xfs_buf **bpp)
2223 struct xfs_mount *mp = tp->t_mountp;
2227 error = xfs_imap(mp, tp, XFS_AGINO_TO_INO(mp, agno, agino), imap, 0);
2229 xfs_warn(mp, "%s: xfs_imap returned error %d.",
2234 error = xfs_imap_to_bp(mp, tp, imap, bpp);
2236 xfs_warn(mp, "%s: xfs_imap_to_bp returned error %d.",
2241 *dipp = xfs_buf_offset(*bpp, imap->im_boffset);
2246 * Walk the unlinked chain from @head_agino until we find the inode that
2247 * points to @target_agino. Return the inode number, map, dinode pointer,
2248 * and inode cluster buffer of that inode as @agino, @imap, @dipp, and @bpp.
2250 * @tp, @pag, @head_agino, and @target_agino are input parameters.
2251 * @agino, @imap, @dipp, and @bpp are all output parameters.
2253 * Do not call this function if @target_agino is the head of the list.
2256 xfs_iunlink_map_prev(
2257 struct xfs_trans *tp,
2258 struct xfs_perag *pag,
2259 xfs_agino_t head_agino,
2260 xfs_agino_t target_agino,
2262 struct xfs_imap *imap,
2263 struct xfs_dinode **dipp,
2264 struct xfs_buf **bpp)
2266 struct xfs_mount *mp = tp->t_mountp;
2267 xfs_agino_t next_agino;
2270 ASSERT(head_agino != target_agino);
2273 /* See if our backref cache can find it faster. */
2274 *agino = xfs_iunlink_lookup_backref(pag, target_agino);
2275 if (*agino != NULLAGINO) {
2276 error = xfs_iunlink_map_ino(tp, pag->pag_agno, *agino, imap,
2281 if (be32_to_cpu((*dipp)->di_next_unlinked) == target_agino)
2285 * If we get here the cache contents were corrupt, so drop the
2286 * buffer and fall back to walking the bucket list.
2288 xfs_trans_brelse(tp, *bpp);
2293 trace_xfs_iunlink_map_prev_fallback(mp, pag->pag_agno);
2295 /* Otherwise, walk the entire bucket until we find it. */
2296 next_agino = head_agino;
2297 while (next_agino != target_agino) {
2298 xfs_agino_t unlinked_agino;
2301 xfs_trans_brelse(tp, *bpp);
2303 *agino = next_agino;
2304 error = xfs_iunlink_map_ino(tp, pag->pag_agno, next_agino, imap,
2309 unlinked_agino = be32_to_cpu((*dipp)->di_next_unlinked);
2311 * Make sure this pointer is valid and isn't an obvious
2314 if (!xfs_verify_agino(mp, pag->pag_agno, unlinked_agino) ||
2315 next_agino == unlinked_agino) {
2316 XFS_CORRUPTION_ERROR(__func__,
2317 XFS_ERRLEVEL_LOW, mp,
2318 *dipp, sizeof(**dipp));
2319 error = -EFSCORRUPTED;
2322 next_agino = unlinked_agino;
2329 * Pull the on-disk inode from the AGI unlinked list.
2333 struct xfs_trans *tp,
2334 struct xfs_perag *pag,
2335 struct xfs_inode *ip)
2337 struct xfs_mount *mp = tp->t_mountp;
2338 struct xfs_agi *agi;
2339 struct xfs_buf *agibp;
2340 struct xfs_buf *last_ibp;
2341 struct xfs_dinode *last_dip = NULL;
2342 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2343 xfs_agino_t next_agino;
2344 xfs_agino_t head_agino;
2345 short bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2348 trace_xfs_iunlink_remove(ip);
2350 /* Get the agi buffer first. It ensures lock ordering on the list. */
2351 error = xfs_read_agi(mp, tp, pag->pag_agno, &agibp);
2354 agi = agibp->b_addr;
2357 * Get the index into the agi hash table for the list this inode will
2358 * go on. Make sure the head pointer isn't garbage.
2360 head_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2361 if (!xfs_verify_agino(mp, pag->pag_agno, head_agino)) {
2362 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
2364 return -EFSCORRUPTED;
2368 * Set our inode's next_unlinked pointer to NULL and then return
2369 * the old pointer value so that we can update whatever was previous
2370 * to us in the list to point to whatever was next in the list.
2372 error = xfs_iunlink_update_inode(tp, ip, pag, NULLAGINO, &next_agino);
2377 * If there was a backref pointing from the next inode back to this
2378 * one, remove it because we've removed this inode from the list.
2380 * Later, if this inode was in the middle of the list we'll update
2381 * this inode's backref to point from the next inode.
2383 if (next_agino != NULLAGINO) {
2384 error = xfs_iunlink_change_backref(pag, next_agino, NULLAGINO);
2389 if (head_agino != agino) {
2390 struct xfs_imap imap;
2391 xfs_agino_t prev_agino;
2393 /* We need to search the list for the inode being freed. */
2394 error = xfs_iunlink_map_prev(tp, pag, head_agino, agino,
2395 &prev_agino, &imap, &last_dip, &last_ibp);
2399 /* Point the previous inode on the list to the next inode. */
2400 xfs_iunlink_update_dinode(tp, pag, prev_agino, last_ibp,
2401 last_dip, &imap, next_agino);
2404 * Now we deal with the backref for this inode. If this inode
2405 * pointed at a real inode, change the backref that pointed to
2406 * us to point to our old next. If this inode was the end of
2407 * the list, delete the backref that pointed to us. Note that
2408 * change_backref takes care of deleting the backref if
2409 * next_agino is NULLAGINO.
2411 return xfs_iunlink_change_backref(agibp->b_pag, agino,
2415 /* Point the head of the list to the next unlinked inode. */
2416 return xfs_iunlink_update_bucket(tp, pag, agibp, bucket_index,
2421 * Look up the inode number specified and if it is not already marked XFS_ISTALE
2422 * mark it stale. We should only find clean inodes in this lookup that aren't
2426 xfs_ifree_mark_inode_stale(
2427 struct xfs_perag *pag,
2428 struct xfs_inode *free_ip,
2431 struct xfs_mount *mp = pag->pag_mount;
2432 struct xfs_inode_log_item *iip;
2433 struct xfs_inode *ip;
2437 ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum));
2439 /* Inode not in memory, nothing to do */
2446 * because this is an RCU protected lookup, we could find a recently
2447 * freed or even reallocated inode during the lookup. We need to check
2448 * under the i_flags_lock for a valid inode here. Skip it if it is not
2449 * valid, the wrong inode or stale.
2451 spin_lock(&ip->i_flags_lock);
2452 if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE))
2453 goto out_iflags_unlock;
2456 * Don't try to lock/unlock the current inode, but we _cannot_ skip the
2457 * other inodes that we did not find in the list attached to the buffer
2458 * and are not already marked stale. If we can't lock it, back off and
2461 if (ip != free_ip) {
2462 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2463 spin_unlock(&ip->i_flags_lock);
2469 ip->i_flags |= XFS_ISTALE;
2472 * If the inode is flushing, it is already attached to the buffer. All
2473 * we needed to do here is mark the inode stale so buffer IO completion
2474 * will remove it from the AIL.
2477 if (__xfs_iflags_test(ip, XFS_IFLUSHING)) {
2478 ASSERT(!list_empty(&iip->ili_item.li_bio_list));
2479 ASSERT(iip->ili_last_fields);
2484 * Inodes not attached to the buffer can be released immediately.
2485 * Everything else has to go through xfs_iflush_abort() on journal
2486 * commit as the flock synchronises removal of the inode from the
2487 * cluster buffer against inode reclaim.
2489 if (!iip || list_empty(&iip->ili_item.li_bio_list))
2492 __xfs_iflags_set(ip, XFS_IFLUSHING);
2493 spin_unlock(&ip->i_flags_lock);
2496 /* we have a dirty inode in memory that has not yet been flushed. */
2497 spin_lock(&iip->ili_lock);
2498 iip->ili_last_fields = iip->ili_fields;
2499 iip->ili_fields = 0;
2500 iip->ili_fsync_fields = 0;
2501 spin_unlock(&iip->ili_lock);
2502 ASSERT(iip->ili_last_fields);
2505 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2510 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2512 spin_unlock(&ip->i_flags_lock);
2517 * A big issue when freeing the inode cluster is that we _cannot_ skip any
2518 * inodes that are in memory - they all must be marked stale and attached to
2519 * the cluster buffer.
2523 struct xfs_trans *tp,
2524 struct xfs_perag *pag,
2525 struct xfs_inode *free_ip,
2526 struct xfs_icluster *xic)
2528 struct xfs_mount *mp = free_ip->i_mount;
2529 struct xfs_ino_geometry *igeo = M_IGEO(mp);
2532 xfs_ino_t inum = xic->first_ino;
2538 nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster;
2540 for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) {
2542 * The allocation bitmap tells us which inodes of the chunk were
2543 * physically allocated. Skip the cluster if an inode falls into
2546 ioffset = inum - xic->first_ino;
2547 if ((xic->alloc & XFS_INOBT_MASK(ioffset)) == 0) {
2548 ASSERT(ioffset % igeo->inodes_per_cluster == 0);
2552 blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2553 XFS_INO_TO_AGBNO(mp, inum));
2556 * We obtain and lock the backing buffer first in the process
2557 * here to ensure dirty inodes attached to the buffer remain in
2558 * the flushing state while we mark them stale.
2560 * If we scan the in-memory inodes first, then buffer IO can
2561 * complete before we get a lock on it, and hence we may fail
2562 * to mark all the active inodes on the buffer stale.
2564 error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2565 mp->m_bsize * igeo->blocks_per_cluster,
2571 * This buffer may not have been correctly initialised as we
2572 * didn't read it from disk. That's not important because we are
2573 * only using to mark the buffer as stale in the log, and to
2574 * attach stale cached inodes on it. That means it will never be
2575 * dispatched for IO. If it is, we want to know about it, and we
2576 * want it to fail. We can acheive this by adding a write
2577 * verifier to the buffer.
2579 bp->b_ops = &xfs_inode_buf_ops;
2582 * Now we need to set all the cached clean inodes as XFS_ISTALE,
2583 * too. This requires lookups, and will skip inodes that we've
2584 * already marked XFS_ISTALE.
2586 for (i = 0; i < igeo->inodes_per_cluster; i++)
2587 xfs_ifree_mark_inode_stale(pag, free_ip, inum + i);
2589 xfs_trans_stale_inode_buf(tp, bp);
2590 xfs_trans_binval(tp, bp);
2596 * This is called to return an inode to the inode free list.
2597 * The inode should already be truncated to 0 length and have
2598 * no pages associated with it. This routine also assumes that
2599 * the inode is already a part of the transaction.
2601 * The on-disk copy of the inode will have been added to the list
2602 * of unlinked inodes in the AGI. We need to remove the inode from
2603 * that list atomically with respect to freeing it here.
2607 struct xfs_trans *tp,
2608 struct xfs_inode *ip)
2610 struct xfs_mount *mp = ip->i_mount;
2611 struct xfs_perag *pag;
2612 struct xfs_icluster xic = { 0 };
2613 struct xfs_inode_log_item *iip = ip->i_itemp;
2616 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2617 ASSERT(VFS_I(ip)->i_nlink == 0);
2618 ASSERT(ip->i_df.if_nextents == 0);
2619 ASSERT(ip->i_disk_size == 0 || !S_ISREG(VFS_I(ip)->i_mode));
2620 ASSERT(ip->i_nblocks == 0);
2622 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
2625 * Pull the on-disk inode from the AGI unlinked list.
2627 error = xfs_iunlink_remove(tp, pag, ip);
2631 error = xfs_difree(tp, pag, ip->i_ino, &xic);
2636 * Free any local-format data sitting around before we reset the
2637 * data fork to extents format. Note that the attr fork data has
2638 * already been freed by xfs_attr_inactive.
2640 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL) {
2641 kmem_free(ip->i_df.if_u1.if_data);
2642 ip->i_df.if_u1.if_data = NULL;
2643 ip->i_df.if_bytes = 0;
2646 VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
2648 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
2649 ip->i_forkoff = 0; /* mark the attr fork not in use */
2650 ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS;
2651 if (xfs_iflags_test(ip, XFS_IPRESERVE_DM_FIELDS))
2652 xfs_iflags_clear(ip, XFS_IPRESERVE_DM_FIELDS);
2654 /* Don't attempt to replay owner changes for a deleted inode */
2655 spin_lock(&iip->ili_lock);
2656 iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER);
2657 spin_unlock(&iip->ili_lock);
2660 * Bump the generation count so no one will be confused
2661 * by reincarnations of this inode.
2663 VFS_I(ip)->i_generation++;
2664 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2667 error = xfs_ifree_cluster(tp, pag, ip, &xic);
2674 * This is called to unpin an inode. The caller must have the inode locked
2675 * in at least shared mode so that the buffer cannot be subsequently pinned
2676 * once someone is waiting for it to be unpinned.
2680 struct xfs_inode *ip)
2682 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
2684 trace_xfs_inode_unpin_nowait(ip, _RET_IP_);
2686 /* Give the log a push to start the unpinning I/O */
2687 xfs_log_force_seq(ip->i_mount, ip->i_itemp->ili_commit_seq, 0, NULL);
2693 struct xfs_inode *ip)
2695 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
2696 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
2701 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
2702 if (xfs_ipincount(ip))
2704 } while (xfs_ipincount(ip));
2705 finish_wait(wq, &wait.wq_entry);
2710 struct xfs_inode *ip)
2712 if (xfs_ipincount(ip))
2713 __xfs_iunpin_wait(ip);
2717 * Removing an inode from the namespace involves removing the directory entry
2718 * and dropping the link count on the inode. Removing the directory entry can
2719 * result in locking an AGF (directory blocks were freed) and removing a link
2720 * count can result in placing the inode on an unlinked list which results in
2723 * The big problem here is that we have an ordering constraint on AGF and AGI
2724 * locking - inode allocation locks the AGI, then can allocate a new extent for
2725 * new inodes, locking the AGF after the AGI. Similarly, freeing the inode
2726 * removes the inode from the unlinked list, requiring that we lock the AGI
2727 * first, and then freeing the inode can result in an inode chunk being freed
2728 * and hence freeing disk space requiring that we lock an AGF.
2730 * Hence the ordering that is imposed by other parts of the code is AGI before
2731 * AGF. This means we cannot remove the directory entry before we drop the inode
2732 * reference count and put it on the unlinked list as this results in a lock
2733 * order of AGF then AGI, and this can deadlock against inode allocation and
2734 * freeing. Therefore we must drop the link counts before we remove the
2737 * This is still safe from a transactional point of view - it is not until we
2738 * get to xfs_defer_finish() that we have the possibility of multiple
2739 * transactions in this operation. Hence as long as we remove the directory
2740 * entry and drop the link count in the first transaction of the remove
2741 * operation, there are no transactional constraints on the ordering here.
2746 struct xfs_name *name,
2749 xfs_mount_t *mp = dp->i_mount;
2750 xfs_trans_t *tp = NULL;
2751 int is_dir = S_ISDIR(VFS_I(ip)->i_mode);
2756 trace_xfs_remove(dp, name);
2758 if (xfs_is_shutdown(mp))
2761 error = xfs_qm_dqattach(dp);
2765 error = xfs_qm_dqattach(ip);
2770 * We try to get the real space reservation first, allowing for
2771 * directory btree deletion(s) implying possible bmap insert(s). If we
2772 * can't get the space reservation then we use 0 instead, and avoid the
2773 * bmap btree insert(s) in the directory code by, if the bmap insert
2774 * tries to happen, instead trimming the LAST block from the directory.
2776 * Ignore EDQUOT and ENOSPC being returned via nospace_error because
2777 * the directory code can handle a reservationless update and we don't
2778 * want to prevent a user from trying to free space by deleting things.
2780 resblks = XFS_REMOVE_SPACE_RES(mp);
2781 error = xfs_trans_alloc_dir(dp, &M_RES(mp)->tr_remove, ip, &resblks,
2784 ASSERT(error != -ENOSPC);
2789 * If we're removing a directory perform some additional validation.
2792 ASSERT(VFS_I(ip)->i_nlink >= 2);
2793 if (VFS_I(ip)->i_nlink != 2) {
2795 goto out_trans_cancel;
2797 if (!xfs_dir_isempty(ip)) {
2799 goto out_trans_cancel;
2802 /* Drop the link from ip's "..". */
2803 error = xfs_droplink(tp, dp);
2805 goto out_trans_cancel;
2807 /* Drop the "." link from ip to self. */
2808 error = xfs_droplink(tp, ip);
2810 goto out_trans_cancel;
2813 * Point the unlinked child directory's ".." entry to the root
2814 * directory to eliminate back-references to inodes that may
2815 * get freed before the child directory is closed. If the fs
2816 * gets shrunk, this can lead to dirent inode validation errors.
2818 if (dp->i_ino != tp->t_mountp->m_sb.sb_rootino) {
2819 error = xfs_dir_replace(tp, ip, &xfs_name_dotdot,
2820 tp->t_mountp->m_sb.sb_rootino, 0);
2826 * When removing a non-directory we need to log the parent
2827 * inode here. For a directory this is done implicitly
2828 * by the xfs_droplink call for the ".." entry.
2830 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
2832 xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2834 /* Drop the link from dp to ip. */
2835 error = xfs_droplink(tp, ip);
2837 goto out_trans_cancel;
2839 error = xfs_dir_removename(tp, dp, name, ip->i_ino, resblks);
2841 ASSERT(error != -ENOENT);
2842 goto out_trans_cancel;
2846 * If this is a synchronous mount, make sure that the
2847 * remove transaction goes to disk before returning to
2850 if (xfs_has_wsync(mp) || xfs_has_dirsync(mp))
2851 xfs_trans_set_sync(tp);
2853 error = xfs_trans_commit(tp);
2857 if (is_dir && xfs_inode_is_filestream(ip))
2858 xfs_filestream_deassociate(ip);
2863 xfs_trans_cancel(tp);
2869 * Enter all inodes for a rename transaction into a sorted array.
2871 #define __XFS_SORT_INODES 5
2873 xfs_sort_for_rename(
2874 struct xfs_inode *dp1, /* in: old (source) directory inode */
2875 struct xfs_inode *dp2, /* in: new (target) directory inode */
2876 struct xfs_inode *ip1, /* in: inode of old entry */
2877 struct xfs_inode *ip2, /* in: inode of new entry */
2878 struct xfs_inode *wip, /* in: whiteout inode */
2879 struct xfs_inode **i_tab,/* out: sorted array of inodes */
2880 int *num_inodes) /* in/out: inodes in array */
2884 ASSERT(*num_inodes == __XFS_SORT_INODES);
2885 memset(i_tab, 0, *num_inodes * sizeof(struct xfs_inode *));
2888 * i_tab contains a list of pointers to inodes. We initialize
2889 * the table here & we'll sort it. We will then use it to
2890 * order the acquisition of the inode locks.
2892 * Note that the table may contain duplicates. e.g., dp1 == dp2.
2905 * Sort the elements via bubble sort. (Remember, there are at
2906 * most 5 elements to sort, so this is adequate.)
2908 for (i = 0; i < *num_inodes; i++) {
2909 for (j = 1; j < *num_inodes; j++) {
2910 if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
2911 struct xfs_inode *temp = i_tab[j];
2912 i_tab[j] = i_tab[j-1];
2921 struct xfs_trans *tp)
2924 * If this is a synchronous mount, make sure that the rename transaction
2925 * goes to disk before returning to the user.
2927 if (xfs_has_wsync(tp->t_mountp) || xfs_has_dirsync(tp->t_mountp))
2928 xfs_trans_set_sync(tp);
2930 return xfs_trans_commit(tp);
2934 * xfs_cross_rename()
2936 * responsible for handling RENAME_EXCHANGE flag in renameat2() syscall
2940 struct xfs_trans *tp,
2941 struct xfs_inode *dp1,
2942 struct xfs_name *name1,
2943 struct xfs_inode *ip1,
2944 struct xfs_inode *dp2,
2945 struct xfs_name *name2,
2946 struct xfs_inode *ip2,
2954 /* Swap inode number for dirent in first parent */
2955 error = xfs_dir_replace(tp, dp1, name1, ip2->i_ino, spaceres);
2957 goto out_trans_abort;
2959 /* Swap inode number for dirent in second parent */
2960 error = xfs_dir_replace(tp, dp2, name2, ip1->i_ino, spaceres);
2962 goto out_trans_abort;
2965 * If we're renaming one or more directories across different parents,
2966 * update the respective ".." entries (and link counts) to match the new
2970 dp2_flags = XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2972 if (S_ISDIR(VFS_I(ip2)->i_mode)) {
2973 error = xfs_dir_replace(tp, ip2, &xfs_name_dotdot,
2974 dp1->i_ino, spaceres);
2976 goto out_trans_abort;
2978 /* transfer ip2 ".." reference to dp1 */
2979 if (!S_ISDIR(VFS_I(ip1)->i_mode)) {
2980 error = xfs_droplink(tp, dp2);
2982 goto out_trans_abort;
2983 xfs_bumplink(tp, dp1);
2987 * Although ip1 isn't changed here, userspace needs
2988 * to be warned about the change, so that applications
2989 * relying on it (like backup ones), will properly
2992 ip1_flags |= XFS_ICHGTIME_CHG;
2993 ip2_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
2996 if (S_ISDIR(VFS_I(ip1)->i_mode)) {
2997 error = xfs_dir_replace(tp, ip1, &xfs_name_dotdot,
2998 dp2->i_ino, spaceres);
3000 goto out_trans_abort;
3002 /* transfer ip1 ".." reference to dp2 */
3003 if (!S_ISDIR(VFS_I(ip2)->i_mode)) {
3004 error = xfs_droplink(tp, dp1);
3006 goto out_trans_abort;
3007 xfs_bumplink(tp, dp2);
3011 * Although ip2 isn't changed here, userspace needs
3012 * to be warned about the change, so that applications
3013 * relying on it (like backup ones), will properly
3016 ip1_flags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
3017 ip2_flags |= XFS_ICHGTIME_CHG;
3022 xfs_trans_ichgtime(tp, ip1, ip1_flags);
3023 xfs_trans_log_inode(tp, ip1, XFS_ILOG_CORE);
3026 xfs_trans_ichgtime(tp, ip2, ip2_flags);
3027 xfs_trans_log_inode(tp, ip2, XFS_ILOG_CORE);
3030 xfs_trans_ichgtime(tp, dp2, dp2_flags);
3031 xfs_trans_log_inode(tp, dp2, XFS_ILOG_CORE);
3033 xfs_trans_ichgtime(tp, dp1, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3034 xfs_trans_log_inode(tp, dp1, XFS_ILOG_CORE);
3035 return xfs_finish_rename(tp);
3038 xfs_trans_cancel(tp);
3043 * xfs_rename_alloc_whiteout()
3045 * Return a referenced, unlinked, unlocked inode that can be used as a
3046 * whiteout in a rename transaction. We use a tmpfile inode here so that if we
3047 * crash between allocating the inode and linking it into the rename transaction
3048 * recovery will free the inode and we won't leak it.
3051 xfs_rename_alloc_whiteout(
3052 struct user_namespace *mnt_userns,
3053 struct xfs_inode *dp,
3054 struct xfs_inode **wip)
3056 struct xfs_inode *tmpfile;
3059 error = xfs_create_tmpfile(mnt_userns, dp, S_IFCHR | WHITEOUT_MODE,
3065 * Prepare the tmpfile inode as if it were created through the VFS.
3066 * Complete the inode setup and flag it as linkable. nlink is already
3067 * zero, so we can skip the drop_nlink.
3069 xfs_setup_iops(tmpfile);
3070 xfs_finish_inode_setup(tmpfile);
3071 VFS_I(tmpfile)->i_state |= I_LINKABLE;
3082 struct user_namespace *mnt_userns,
3083 struct xfs_inode *src_dp,
3084 struct xfs_name *src_name,
3085 struct xfs_inode *src_ip,
3086 struct xfs_inode *target_dp,
3087 struct xfs_name *target_name,
3088 struct xfs_inode *target_ip,
3091 struct xfs_mount *mp = src_dp->i_mount;
3092 struct xfs_trans *tp;
3093 struct xfs_inode *wip = NULL; /* whiteout inode */
3094 struct xfs_inode *inodes[__XFS_SORT_INODES];
3096 int num_inodes = __XFS_SORT_INODES;
3097 bool new_parent = (src_dp != target_dp);
3098 bool src_is_directory = S_ISDIR(VFS_I(src_ip)->i_mode);
3100 bool retried = false;
3101 int error, nospace_error = 0;
3103 trace_xfs_rename(src_dp, target_dp, src_name, target_name);
3105 if ((flags & RENAME_EXCHANGE) && !target_ip)
3109 * If we are doing a whiteout operation, allocate the whiteout inode
3110 * we will be placing at the target and ensure the type is set
3113 if (flags & RENAME_WHITEOUT) {
3114 error = xfs_rename_alloc_whiteout(mnt_userns, target_dp, &wip);
3118 /* setup target dirent info as whiteout */
3119 src_name->type = XFS_DIR3_FT_CHRDEV;
3122 xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip, wip,
3123 inodes, &num_inodes);
3127 spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
3128 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, spaceres, 0, 0, &tp);
3129 if (error == -ENOSPC) {
3130 nospace_error = error;
3132 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_rename, 0, 0, 0,
3136 goto out_release_wip;
3139 * Attach the dquots to the inodes
3141 error = xfs_qm_vop_rename_dqattach(inodes);
3143 goto out_trans_cancel;
3146 * Lock all the participating inodes. Depending upon whether
3147 * the target_name exists in the target directory, and
3148 * whether the target directory is the same as the source
3149 * directory, we can lock from 2 to 4 inodes.
3151 xfs_lock_inodes(inodes, num_inodes, XFS_ILOCK_EXCL);
3154 * Join all the inodes to the transaction. From this point on,
3155 * we can rely on either trans_commit or trans_cancel to unlock
3158 xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
3160 xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
3161 xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
3163 xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
3165 xfs_trans_ijoin(tp, wip, XFS_ILOCK_EXCL);
3168 * If we are using project inheritance, we only allow renames
3169 * into our tree when the project IDs are the same; else the
3170 * tree quota mechanism would be circumvented.
3172 if (unlikely((target_dp->i_diflags & XFS_DIFLAG_PROJINHERIT) &&
3173 target_dp->i_projid != src_ip->i_projid)) {
3175 goto out_trans_cancel;
3178 /* RENAME_EXCHANGE is unique from here on. */
3179 if (flags & RENAME_EXCHANGE)
3180 return xfs_cross_rename(tp, src_dp, src_name, src_ip,
3181 target_dp, target_name, target_ip,
3185 * Try to reserve quota to handle an expansion of the target directory.
3186 * We'll allow the rename to continue in reservationless mode if we hit
3187 * a space usage constraint. If we trigger reservationless mode, save
3188 * the errno if there isn't any free space in the target directory.
3190 if (spaceres != 0) {
3191 error = xfs_trans_reserve_quota_nblks(tp, target_dp, spaceres,
3193 if (error == -EDQUOT || error == -ENOSPC) {
3195 xfs_trans_cancel(tp);
3196 xfs_blockgc_free_quota(target_dp, 0);
3201 nospace_error = error;
3206 goto out_trans_cancel;
3210 * Check for expected errors before we dirty the transaction
3211 * so we can return an error without a transaction abort.
3213 * Extent count overflow check:
3215 * From the perspective of src_dp, a rename operation is essentially a
3216 * directory entry remove operation. Hence the only place where we check
3217 * for extent count overflow for src_dp is in
3218 * xfs_bmap_del_extent_real(). xfs_bmap_del_extent_real() returns
3219 * -ENOSPC when it detects a possible extent count overflow and in
3220 * response, the higher layers of directory handling code do the
3222 * 1. Data/Free blocks: XFS lets these blocks linger until a
3223 * future remove operation removes them.
3224 * 2. Dabtree blocks: XFS swaps the blocks with the last block in the
3225 * Leaf space and unmaps the last block.
3227 * For target_dp, there are two cases depending on whether the
3228 * destination directory entry exists or not.
3230 * When destination directory entry does not exist (i.e. target_ip ==
3231 * NULL), extent count overflow check is performed only when transaction
3232 * has a non-zero sized space reservation associated with it. With a
3233 * zero-sized space reservation, XFS allows a rename operation to
3234 * continue only when the directory has sufficient free space in its
3235 * data/leaf/free space blocks to hold the new entry.
3237 * When destination directory entry exists (i.e. target_ip != NULL), all
3238 * we need to do is change the inode number associated with the already
3239 * existing entry. Hence there is no need to perform an extent count
3242 if (target_ip == NULL) {
3244 * If there's no space reservation, check the entry will
3245 * fit before actually inserting it.
3248 error = xfs_dir_canenter(tp, target_dp, target_name);
3250 goto out_trans_cancel;
3252 error = xfs_iext_count_may_overflow(target_dp,
3254 XFS_IEXT_DIR_MANIP_CNT(mp));
3256 goto out_trans_cancel;
3260 * If target exists and it's a directory, check that whether
3261 * it can be destroyed.
3263 if (S_ISDIR(VFS_I(target_ip)->i_mode) &&
3264 (!xfs_dir_isempty(target_ip) ||
3265 (VFS_I(target_ip)->i_nlink > 2))) {
3267 goto out_trans_cancel;
3272 * Lock the AGI buffers we need to handle bumping the nlink of the
3273 * whiteout inode off the unlinked list and to handle dropping the
3274 * nlink of the target inode. Per locking order rules, do this in
3275 * increasing AG order and before directory block allocation tries to
3276 * grab AGFs because we grab AGIs before AGFs.
3278 * The (vfs) caller must ensure that if src is a directory then
3279 * target_ip is either null or an empty directory.
3281 for (i = 0; i < num_inodes && inodes[i] != NULL; i++) {
3282 if (inodes[i] == wip ||
3283 (inodes[i] == target_ip &&
3284 (VFS_I(target_ip)->i_nlink == 1 || src_is_directory))) {
3286 xfs_agnumber_t agno;
3288 agno = XFS_INO_TO_AGNO(mp, inodes[i]->i_ino);
3289 error = xfs_read_agi(mp, tp, agno, &bp);
3291 goto out_trans_cancel;
3296 * Directory entry creation below may acquire the AGF. Remove
3297 * the whiteout from the unlinked list first to preserve correct
3298 * AGI/AGF locking order. This dirties the transaction so failures
3299 * after this point will abort and log recovery will clean up the
3302 * For whiteouts, we need to bump the link count on the whiteout
3303 * inode. After this point, we have a real link, clear the tmpfile
3304 * state flag from the inode so it doesn't accidentally get misused
3308 struct xfs_perag *pag;
3310 ASSERT(VFS_I(wip)->i_nlink == 0);
3312 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, wip->i_ino));
3313 error = xfs_iunlink_remove(tp, pag, wip);
3316 goto out_trans_cancel;
3318 xfs_bumplink(tp, wip);
3319 VFS_I(wip)->i_state &= ~I_LINKABLE;
3323 * Set up the target.
3325 if (target_ip == NULL) {
3327 * If target does not exist and the rename crosses
3328 * directories, adjust the target directory link count
3329 * to account for the ".." reference from the new entry.
3331 error = xfs_dir_createname(tp, target_dp, target_name,
3332 src_ip->i_ino, spaceres);
3334 goto out_trans_cancel;
3336 xfs_trans_ichgtime(tp, target_dp,
3337 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3339 if (new_parent && src_is_directory) {
3340 xfs_bumplink(tp, target_dp);
3342 } else { /* target_ip != NULL */
3344 * Link the source inode under the target name.
3345 * If the source inode is a directory and we are moving
3346 * it across directories, its ".." entry will be
3347 * inconsistent until we replace that down below.
3349 * In case there is already an entry with the same
3350 * name at the destination directory, remove it first.
3352 error = xfs_dir_replace(tp, target_dp, target_name,
3353 src_ip->i_ino, spaceres);
3355 goto out_trans_cancel;
3357 xfs_trans_ichgtime(tp, target_dp,
3358 XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3361 * Decrement the link count on the target since the target
3362 * dir no longer points to it.
3364 error = xfs_droplink(tp, target_ip);
3366 goto out_trans_cancel;
3368 if (src_is_directory) {
3370 * Drop the link from the old "." entry.
3372 error = xfs_droplink(tp, target_ip);
3374 goto out_trans_cancel;
3376 } /* target_ip != NULL */
3379 * Remove the source.
3381 if (new_parent && src_is_directory) {
3383 * Rewrite the ".." entry to point to the new
3386 error = xfs_dir_replace(tp, src_ip, &xfs_name_dotdot,
3387 target_dp->i_ino, spaceres);
3388 ASSERT(error != -EEXIST);
3390 goto out_trans_cancel;
3394 * We always want to hit the ctime on the source inode.
3396 * This isn't strictly required by the standards since the source
3397 * inode isn't really being changed, but old unix file systems did
3398 * it and some incremental backup programs won't work without it.
3400 xfs_trans_ichgtime(tp, src_ip, XFS_ICHGTIME_CHG);
3401 xfs_trans_log_inode(tp, src_ip, XFS_ILOG_CORE);
3404 * Adjust the link count on src_dp. This is necessary when
3405 * renaming a directory, either within one parent when
3406 * the target existed, or across two parent directories.
3408 if (src_is_directory && (new_parent || target_ip != NULL)) {
3411 * Decrement link count on src_directory since the
3412 * entry that's moved no longer points to it.
3414 error = xfs_droplink(tp, src_dp);
3416 goto out_trans_cancel;
3420 * For whiteouts, we only need to update the source dirent with the
3421 * inode number of the whiteout inode rather than removing it
3425 error = xfs_dir_replace(tp, src_dp, src_name, wip->i_ino,
3429 * NOTE: We don't need to check for extent count overflow here
3430 * because the dir remove name code will leave the dir block in
3431 * place if the extent count would overflow.
3433 error = xfs_dir_removename(tp, src_dp, src_name, src_ip->i_ino,
3438 goto out_trans_cancel;
3440 xfs_trans_ichgtime(tp, src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
3441 xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);
3443 xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
3445 error = xfs_finish_rename(tp);
3451 xfs_trans_cancel(tp);
3455 if (error == -ENOSPC && nospace_error)
3456 error = nospace_error;
3462 struct xfs_inode *ip,
3465 struct xfs_inode_log_item *iip = ip->i_itemp;
3466 struct xfs_dinode *dip;
3467 struct xfs_mount *mp = ip->i_mount;
3470 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
3471 ASSERT(xfs_iflags_test(ip, XFS_IFLUSHING));
3472 ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE ||
3473 ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
3474 ASSERT(iip->ili_item.li_buf == bp);
3476 dip = xfs_buf_offset(bp, ip->i_imap.im_boffset);
3479 * We don't flush the inode if any of the following checks fail, but we
3480 * do still update the log item and attach to the backing buffer as if
3481 * the flush happened. This is a formality to facilitate predictable
3482 * error handling as the caller will shutdown and fail the buffer.
3484 error = -EFSCORRUPTED;
3485 if (XFS_TEST_ERROR(dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC),
3486 mp, XFS_ERRTAG_IFLUSH_1)) {
3487 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3488 "%s: Bad inode %Lu magic number 0x%x, ptr "PTR_FMT,
3489 __func__, ip->i_ino, be16_to_cpu(dip->di_magic), dip);
3492 if (S_ISREG(VFS_I(ip)->i_mode)) {
3494 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3495 ip->i_df.if_format != XFS_DINODE_FMT_BTREE,
3496 mp, XFS_ERRTAG_IFLUSH_3)) {
3497 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3498 "%s: Bad regular inode %Lu, ptr "PTR_FMT,
3499 __func__, ip->i_ino, ip);
3502 } else if (S_ISDIR(VFS_I(ip)->i_mode)) {
3504 ip->i_df.if_format != XFS_DINODE_FMT_EXTENTS &&
3505 ip->i_df.if_format != XFS_DINODE_FMT_BTREE &&
3506 ip->i_df.if_format != XFS_DINODE_FMT_LOCAL,
3507 mp, XFS_ERRTAG_IFLUSH_4)) {
3508 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3509 "%s: Bad directory inode %Lu, ptr "PTR_FMT,
3510 __func__, ip->i_ino, ip);
3514 if (XFS_TEST_ERROR(ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp) >
3515 ip->i_nblocks, mp, XFS_ERRTAG_IFLUSH_5)) {
3516 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3517 "%s: detected corrupt incore inode %Lu, "
3518 "total extents = %d, nblocks = %Ld, ptr "PTR_FMT,
3519 __func__, ip->i_ino,
3520 ip->i_df.if_nextents + xfs_ifork_nextents(ip->i_afp),
3524 if (XFS_TEST_ERROR(ip->i_forkoff > mp->m_sb.sb_inodesize,
3525 mp, XFS_ERRTAG_IFLUSH_6)) {
3526 xfs_alert_tag(mp, XFS_PTAG_IFLUSH,
3527 "%s: bad inode %Lu, forkoff 0x%x, ptr "PTR_FMT,
3528 __func__, ip->i_ino, ip->i_forkoff, ip);
3533 * Inode item log recovery for v2 inodes are dependent on the flushiter
3534 * count for correct sequencing. We bump the flush iteration count so
3535 * we can detect flushes which postdate a log record during recovery.
3536 * This is redundant as we now log every change and hence this can't
3537 * happen but we need to still do it to ensure backwards compatibility
3538 * with old kernels that predate logging all inode changes.
3540 if (!xfs_has_v3inodes(mp))
3544 * If there are inline format data / attr forks attached to this inode,
3545 * make sure they are not corrupt.
3547 if (ip->i_df.if_format == XFS_DINODE_FMT_LOCAL &&
3548 xfs_ifork_verify_local_data(ip))
3550 if (ip->i_afp && ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL &&
3551 xfs_ifork_verify_local_attr(ip))
3555 * Copy the dirty parts of the inode into the on-disk inode. We always
3556 * copy out the core of the inode, because if the inode is dirty at all
3559 xfs_inode_to_disk(ip, dip, iip->ili_item.li_lsn);
3561 /* Wrap, we never let the log put out DI_MAX_FLUSH */
3562 if (!xfs_has_v3inodes(mp)) {
3563 if (ip->i_flushiter == DI_MAX_FLUSH)
3564 ip->i_flushiter = 0;
3567 xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK);
3568 if (XFS_IFORK_Q(ip))
3569 xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK);
3572 * We've recorded everything logged in the inode, so we'd like to clear
3573 * the ili_fields bits so we don't log and flush things unnecessarily.
3574 * However, we can't stop logging all this information until the data
3575 * we've copied into the disk buffer is written to disk. If we did we
3576 * might overwrite the copy of the inode in the log with all the data
3577 * after re-logging only part of it, and in the face of a crash we
3578 * wouldn't have all the data we need to recover.
3580 * What we do is move the bits to the ili_last_fields field. When
3581 * logging the inode, these bits are moved back to the ili_fields field.
3582 * In the xfs_buf_inode_iodone() routine we clear ili_last_fields, since
3583 * we know that the information those bits represent is permanently on
3584 * disk. As long as the flush completes before the inode is logged
3585 * again, then both ili_fields and ili_last_fields will be cleared.
3589 spin_lock(&iip->ili_lock);
3590 iip->ili_last_fields = iip->ili_fields;
3591 iip->ili_fields = 0;
3592 iip->ili_fsync_fields = 0;
3593 spin_unlock(&iip->ili_lock);
3596 * Store the current LSN of the inode so that we can tell whether the
3597 * item has moved in the AIL from xfs_buf_inode_iodone().
3599 xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn,
3600 &iip->ili_item.li_lsn);
3602 /* generate the checksum. */
3603 xfs_dinode_calc_crc(mp, dip);
3608 * Non-blocking flush of dirty inode metadata into the backing buffer.
3610 * The caller must have a reference to the inode and hold the cluster buffer
3611 * locked. The function will walk across all the inodes on the cluster buffer it
3612 * can find and lock without blocking, and flush them to the cluster buffer.
3614 * On successful flushing of at least one inode, the caller must write out the
3615 * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and
3616 * the caller needs to release the buffer. On failure, the filesystem will be
3617 * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED
3624 struct xfs_mount *mp = bp->b_mount;
3625 struct xfs_log_item *lip, *n;
3626 struct xfs_inode *ip;
3627 struct xfs_inode_log_item *iip;
3632 * We must use the safe variant here as on shutdown xfs_iflush_abort()
3633 * can remove itself from the list.
3635 list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) {
3636 iip = (struct xfs_inode_log_item *)lip;
3637 ip = iip->ili_inode;
3640 * Quick and dirty check to avoid locks if possible.
3642 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING))
3644 if (xfs_ipincount(ip))
3648 * The inode is still attached to the buffer, which means it is
3649 * dirty but reclaim might try to grab it. Check carefully for
3650 * that, and grab the ilock while still holding the i_flags_lock
3651 * to guarantee reclaim will not be able to reclaim this inode
3652 * once we drop the i_flags_lock.
3654 spin_lock(&ip->i_flags_lock);
3655 ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE));
3656 if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLUSHING)) {
3657 spin_unlock(&ip->i_flags_lock);
3662 * ILOCK will pin the inode against reclaim and prevent
3663 * concurrent transactions modifying the inode while we are
3664 * flushing the inode. If we get the lock, set the flushing
3665 * state before we drop the i_flags_lock.
3667 if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) {
3668 spin_unlock(&ip->i_flags_lock);
3671 __xfs_iflags_set(ip, XFS_IFLUSHING);
3672 spin_unlock(&ip->i_flags_lock);
3675 * Abort flushing this inode if we are shut down because the
3676 * inode may not currently be in the AIL. This can occur when
3677 * log I/O failure unpins the inode without inserting into the
3678 * AIL, leaving a dirty/unpinned inode attached to the buffer
3679 * that otherwise looks like it should be flushed.
3681 if (xfs_is_shutdown(mp)) {
3682 xfs_iunpin_wait(ip);
3683 xfs_iflush_abort(ip);
3684 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3689 /* don't block waiting on a log force to unpin dirty inodes */
3690 if (xfs_ipincount(ip)) {
3691 xfs_iflags_clear(ip, XFS_IFLUSHING);
3692 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3696 if (!xfs_inode_clean(ip))
3697 error = xfs_iflush(ip, bp);
3699 xfs_iflags_clear(ip, XFS_IFLUSHING);
3700 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3707 bp->b_flags |= XBF_ASYNC;
3708 xfs_buf_ioend_fail(bp);
3709 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3716 XFS_STATS_INC(mp, xs_icluster_flushcnt);
3717 XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount);
3722 /* Release an inode. */
3725 struct xfs_inode *ip)
3727 trace_xfs_irele(ip, _RET_IP_);
3732 * Ensure all commited transactions touching the inode are written to the log.
3735 xfs_log_force_inode(
3736 struct xfs_inode *ip)
3740 xfs_ilock(ip, XFS_ILOCK_SHARED);
3741 if (xfs_ipincount(ip))
3742 seq = ip->i_itemp->ili_commit_seq;
3743 xfs_iunlock(ip, XFS_ILOCK_SHARED);
3747 return xfs_log_force_seq(ip->i_mount, seq, XFS_LOG_SYNC, NULL);
3751 * Grab the exclusive iolock for a data copy from src to dest, making sure to
3752 * abide vfs locking order (lowest pointer value goes first) and breaking the
3753 * layout leases before proceeding. The loop is needed because we cannot call
3754 * the blocking break_layout() with the iolocks held, and therefore have to
3755 * back out both locks.
3758 xfs_iolock_two_inodes_and_break_layout(
3768 /* Wait to break both inodes' layouts before we start locking. */
3769 error = break_layout(src, true);
3773 error = break_layout(dest, true);
3778 /* Lock one inode and make sure nobody got in and leased it. */
3780 error = break_layout(src, false);
3783 if (error == -EWOULDBLOCK)
3791 /* Lock the other inode and make sure nobody got in and leased it. */
3792 inode_lock_nested(dest, I_MUTEX_NONDIR2);
3793 error = break_layout(dest, false);
3797 if (error == -EWOULDBLOCK)
3806 * Lock two inodes so that userspace cannot initiate I/O via file syscalls or
3811 struct xfs_inode *ip1,
3812 struct xfs_inode *ip2)
3816 ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2));
3819 filemap_invalidate_lock_two(VFS_I(ip1)->i_mapping,
3820 VFS_I(ip2)->i_mapping);
3824 /* Unlock both inodes to allow IO and mmap activity. */
3826 xfs_iunlock2_io_mmap(
3827 struct xfs_inode *ip1,
3828 struct xfs_inode *ip2)
3830 filemap_invalidate_unlock_two(VFS_I(ip1)->i_mapping,
3831 VFS_I(ip2)->i_mapping);
3832 inode_unlock(VFS_I(ip2));
3834 inode_unlock(VFS_I(ip1));