2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_types.h"
25 #include "xfs_trans.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_alloc_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_btree.h"
35 #include "xfs_ialloc.h"
36 #include "xfs_quota.h"
37 #include "xfs_utils.h"
38 #include "xfs_trans_priv.h"
39 #include "xfs_inode_item.h"
41 #include "xfs_trace.h"
45 * Define xfs inode iolock lockdep classes. We need to ensure that all active
46 * inodes are considered the same for lockdep purposes, including inodes that
47 * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to
48 * guarantee the locks are considered the same when there are multiple lock
49 * initialisation siteѕ. Also, define a reclaimable inode class so it is
50 * obvious in lockdep reports which class the report is against.
52 static struct lock_class_key xfs_iolock_active;
53 struct lock_class_key xfs_iolock_reclaimable;
56 * Allocate and initialise an xfs_inode.
58 STATIC struct xfs_inode *
66 * if this didn't occur in transactions, we could use
67 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
68 * code up to do this anyway.
70 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
73 if (inode_init_always(mp->m_super, VFS_I(ip))) {
74 kmem_zone_free(xfs_inode_zone, ip);
78 ASSERT(atomic_read(&ip->i_pincount) == 0);
79 ASSERT(!spin_is_locked(&ip->i_flags_lock));
80 ASSERT(completion_done(&ip->i_flush));
81 ASSERT(ip->i_ino == 0);
83 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
84 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
85 &xfs_iolock_active, "xfs_iolock_active");
87 /* initialise the xfs inode */
90 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
92 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
94 ip->i_update_core = 0;
95 ip->i_delayed_blks = 0;
96 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
104 xfs_inode_free_callback(
105 struct rcu_head *head)
107 struct inode *inode = container_of(head, struct inode, i_rcu);
108 struct xfs_inode *ip = XFS_I(inode);
110 kmem_zone_free(xfs_inode_zone, ip);
115 struct xfs_inode *ip)
117 switch (ip->i_d.di_mode & S_IFMT) {
121 xfs_idestroy_fork(ip, XFS_DATA_FORK);
126 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
130 * Only if we are shutting down the fs will we see an
131 * inode still in the AIL. If it is there, we should remove
132 * it to prevent a use-after-free from occurring.
134 xfs_log_item_t *lip = &ip->i_itemp->ili_item;
135 struct xfs_ail *ailp = lip->li_ailp;
137 ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
138 XFS_FORCED_SHUTDOWN(ip->i_mount));
139 if (lip->li_flags & XFS_LI_IN_AIL) {
140 spin_lock(&ailp->xa_lock);
141 if (lip->li_flags & XFS_LI_IN_AIL)
142 xfs_trans_ail_delete(ailp, lip);
144 spin_unlock(&ailp->xa_lock);
146 xfs_inode_item_destroy(ip);
150 /* asserts to verify all state is correct here */
151 ASSERT(atomic_read(&ip->i_pincount) == 0);
152 ASSERT(!spin_is_locked(&ip->i_flags_lock));
153 ASSERT(completion_done(&ip->i_flush));
156 * Because we use RCU freeing we need to ensure the inode always
157 * appears to be reclaimed with an invalid inode number when in the
158 * free state. The ip->i_flags_lock provides the barrier against lookup
161 spin_lock(&ip->i_flags_lock);
162 ip->i_flags = XFS_IRECLAIM;
164 spin_unlock(&ip->i_flags_lock);
166 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
170 * Check the validity of the inode we just found it the cache
174 struct xfs_perag *pag,
175 struct xfs_inode *ip,
178 int lock_flags) __releases(RCU)
180 struct inode *inode = VFS_I(ip);
181 struct xfs_mount *mp = ip->i_mount;
185 * check for re-use of an inode within an RCU grace period due to the
186 * radix tree nodes not being updated yet. We monitor for this by
187 * setting the inode number to zero before freeing the inode structure.
188 * If the inode has been reallocated and set up, then the inode number
189 * will not match, so check for that, too.
191 spin_lock(&ip->i_flags_lock);
192 if (ip->i_ino != ino) {
193 trace_xfs_iget_skip(ip);
194 XFS_STATS_INC(xs_ig_frecycle);
201 * If we are racing with another cache hit that is currently
202 * instantiating this inode or currently recycling it out of
203 * reclaimabe state, wait for the initialisation to complete
206 * XXX(hch): eventually we should do something equivalent to
207 * wait_on_inode to wait for these flags to be cleared
208 * instead of polling for it.
210 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
211 trace_xfs_iget_skip(ip);
212 XFS_STATS_INC(xs_ig_frecycle);
218 * If lookup is racing with unlink return an error immediately.
220 if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) {
226 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
227 * Need to carefully get it back into useable state.
229 if (ip->i_flags & XFS_IRECLAIMABLE) {
230 trace_xfs_iget_reclaim(ip);
233 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
234 * from stomping over us while we recycle the inode. We can't
235 * clear the radix tree reclaimable tag yet as it requires
236 * pag_ici_lock to be held exclusive.
238 ip->i_flags |= XFS_IRECLAIM;
240 spin_unlock(&ip->i_flags_lock);
243 error = -inode_init_always(mp->m_super, inode);
246 * Re-initializing the inode failed, and we are in deep
247 * trouble. Try to re-add it to the reclaim list.
250 spin_lock(&ip->i_flags_lock);
252 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
253 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
254 trace_xfs_iget_reclaim_fail(ip);
258 spin_lock(&pag->pag_ici_lock);
259 spin_lock(&ip->i_flags_lock);
262 * Clear the per-lifetime state in the inode as we are now
263 * effectively a new inode and need to return to the initial
264 * state before reuse occurs.
266 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
267 ip->i_flags |= XFS_INEW;
268 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
269 inode->i_state = I_NEW;
271 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
272 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
273 lockdep_set_class_and_name(&ip->i_iolock.mr_lock,
274 &xfs_iolock_active, "xfs_iolock_active");
276 spin_unlock(&ip->i_flags_lock);
277 spin_unlock(&pag->pag_ici_lock);
279 /* If the VFS inode is being torn down, pause and try again. */
281 trace_xfs_iget_skip(ip);
286 /* We've got a live one. */
287 spin_unlock(&ip->i_flags_lock);
289 trace_xfs_iget_hit(ip);
293 xfs_ilock(ip, lock_flags);
295 xfs_iflags_clear(ip, XFS_ISTALE);
296 XFS_STATS_INC(xs_ig_found);
301 spin_unlock(&ip->i_flags_lock);
309 struct xfs_mount *mp,
310 struct xfs_perag *pag,
313 struct xfs_inode **ipp,
317 struct xfs_inode *ip;
319 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
321 ip = xfs_inode_alloc(mp, ino);
325 error = xfs_iread(mp, tp, ip, flags);
329 trace_xfs_iget_miss(ip);
331 if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) {
337 * Preload the radix tree so we can insert safely under the
338 * write spinlock. Note that we cannot sleep inside the preload
341 if (radix_tree_preload(GFP_KERNEL)) {
347 * Because the inode hasn't been added to the radix-tree yet it can't
348 * be found by another thread, so we can do the non-sleeping lock here.
351 if (!xfs_ilock_nowait(ip, lock_flags))
355 spin_lock(&pag->pag_ici_lock);
357 /* insert the new inode */
358 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
359 if (unlikely(error)) {
360 WARN_ON(error != -EEXIST);
361 XFS_STATS_INC(xs_ig_dup);
363 goto out_preload_end;
366 /* These values _must_ be set before releasing the radix tree lock! */
367 ip->i_udquot = ip->i_gdquot = NULL;
368 xfs_iflags_set(ip, XFS_INEW);
370 spin_unlock(&pag->pag_ici_lock);
371 radix_tree_preload_end();
377 spin_unlock(&pag->pag_ici_lock);
378 radix_tree_preload_end();
380 xfs_iunlock(ip, lock_flags);
382 __destroy_inode(VFS_I(ip));
388 * Look up an inode by number in the given file system.
389 * The inode is looked up in the cache held in each AG.
390 * If the inode is found in the cache, initialise the vfs inode
393 * If it is not in core, read it in from the file system's device,
394 * add it to the cache and initialise the vfs inode.
396 * The inode is locked according to the value of the lock_flags parameter.
397 * This flag parameter indicates how and if the inode's IO lock and inode lock
400 * mp -- the mount point structure for the current file system. It points
401 * to the inode hash table.
402 * tp -- a pointer to the current transaction if there is one. This is
403 * simply passed through to the xfs_iread() call.
404 * ino -- the number of the inode desired. This is the unique identifier
405 * within the file system for the inode being requested.
406 * lock_flags -- flags indicating how to lock the inode. See the comment
407 * for xfs_ilock() for a list of valid values.
423 /* reject inode numbers outside existing AGs */
424 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
427 /* get the perag structure and ensure that it's inode capable */
428 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
429 agino = XFS_INO_TO_AGINO(mp, ino);
434 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
437 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
439 goto out_error_or_again;
442 XFS_STATS_INC(xs_ig_missed);
444 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
447 goto out_error_or_again;
453 ASSERT(ip->i_df.if_ext_max ==
454 XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
456 * If we have a real type for an on-disk inode, we can set ops(&unlock)
457 * now. If it's a new inode being created, xfs_ialloc will handle it.
459 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
464 if (error == EAGAIN) {
473 * This is a wrapper routine around the xfs_ilock() routine
474 * used to centralize some grungy code. It is used in places
475 * that wish to lock the inode solely for reading the extents.
476 * The reason these places can't just call xfs_ilock(SHARED)
477 * is that the inode lock also guards to bringing in of the
478 * extents from disk for a file in b-tree format. If the inode
479 * is in b-tree format, then we need to lock the inode exclusively
480 * until the extents are read in. Locking it exclusively all
481 * the time would limit our parallelism unnecessarily, though.
482 * What we do instead is check to see if the extents have been
483 * read in yet, and only lock the inode exclusively if they
486 * The function returns a value which should be given to the
487 * corresponding xfs_iunlock_map_shared(). This value is
488 * the mode in which the lock was actually taken.
491 xfs_ilock_map_shared(
496 if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) &&
497 ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) {
498 lock_mode = XFS_ILOCK_EXCL;
500 lock_mode = XFS_ILOCK_SHARED;
503 xfs_ilock(ip, lock_mode);
509 * This is simply the unlock routine to go with xfs_ilock_map_shared().
510 * All it does is call xfs_iunlock() with the given lock_mode.
513 xfs_iunlock_map_shared(
515 unsigned int lock_mode)
517 xfs_iunlock(ip, lock_mode);
521 * The xfs inode contains 2 locks: a multi-reader lock called the
522 * i_iolock and a multi-reader lock called the i_lock. This routine
523 * allows either or both of the locks to be obtained.
525 * The 2 locks should always be ordered so that the IO lock is
526 * obtained first in order to prevent deadlock.
528 * ip -- the inode being locked
529 * lock_flags -- this parameter indicates the inode's locks
530 * to be locked. It can be:
535 * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED,
536 * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL,
537 * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED,
538 * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL
546 * You can't set both SHARED and EXCL for the same lock,
547 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
548 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
550 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
551 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
552 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
553 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
554 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
556 if (lock_flags & XFS_IOLOCK_EXCL)
557 mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
558 else if (lock_flags & XFS_IOLOCK_SHARED)
559 mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags));
561 if (lock_flags & XFS_ILOCK_EXCL)
562 mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
563 else if (lock_flags & XFS_ILOCK_SHARED)
564 mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags));
566 trace_xfs_ilock(ip, lock_flags, _RET_IP_);
570 * This is just like xfs_ilock(), except that the caller
571 * is guaranteed not to sleep. It returns 1 if it gets
572 * the requested locks and 0 otherwise. If the IO lock is
573 * obtained but the inode lock cannot be, then the IO lock
574 * is dropped before returning.
576 * ip -- the inode being locked
577 * lock_flags -- this parameter indicates the inode's locks to be
578 * to be locked. See the comment for xfs_ilock() for a list
587 * You can't set both SHARED and EXCL for the same lock,
588 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
589 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
591 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
592 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
593 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
594 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
595 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0);
597 if (lock_flags & XFS_IOLOCK_EXCL) {
598 if (!mrtryupdate(&ip->i_iolock))
600 } else if (lock_flags & XFS_IOLOCK_SHARED) {
601 if (!mrtryaccess(&ip->i_iolock))
604 if (lock_flags & XFS_ILOCK_EXCL) {
605 if (!mrtryupdate(&ip->i_lock))
606 goto out_undo_iolock;
607 } else if (lock_flags & XFS_ILOCK_SHARED) {
608 if (!mrtryaccess(&ip->i_lock))
609 goto out_undo_iolock;
611 trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_);
615 if (lock_flags & XFS_IOLOCK_EXCL)
616 mrunlock_excl(&ip->i_iolock);
617 else if (lock_flags & XFS_IOLOCK_SHARED)
618 mrunlock_shared(&ip->i_iolock);
624 * xfs_iunlock() is used to drop the inode locks acquired with
625 * xfs_ilock() and xfs_ilock_nowait(). The caller must pass
626 * in the flags given to xfs_ilock() or xfs_ilock_nowait() so
627 * that we know which locks to drop.
629 * ip -- the inode being unlocked
630 * lock_flags -- this parameter indicates the inode's locks to be
631 * to be unlocked. See the comment for xfs_ilock() for a list
632 * of valid values for this parameter.
641 * You can't set both SHARED and EXCL for the same lock,
642 * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED,
643 * and XFS_ILOCK_EXCL are valid values to set in lock_flags.
645 ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) !=
646 (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL));
647 ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) !=
648 (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));
649 ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY |
650 XFS_LOCK_DEP_MASK)) == 0);
651 ASSERT(lock_flags != 0);
653 if (lock_flags & XFS_IOLOCK_EXCL)
654 mrunlock_excl(&ip->i_iolock);
655 else if (lock_flags & XFS_IOLOCK_SHARED)
656 mrunlock_shared(&ip->i_iolock);
658 if (lock_flags & XFS_ILOCK_EXCL)
659 mrunlock_excl(&ip->i_lock);
660 else if (lock_flags & XFS_ILOCK_SHARED)
661 mrunlock_shared(&ip->i_lock);
663 if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) &&
664 !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) {
666 * Let the AIL know that this item has been unlocked in case
667 * it is in the AIL and anyone is waiting on it. Don't do
668 * this if the caller has asked us not to.
670 xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp,
671 (xfs_log_item_t*)(ip->i_itemp));
673 trace_xfs_iunlock(ip, lock_flags, _RET_IP_);
677 * give up write locks. the i/o lock cannot be held nested
678 * if it is being demoted.
685 ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL));
686 ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0);
688 if (lock_flags & XFS_ILOCK_EXCL)
689 mrdemote(&ip->i_lock);
690 if (lock_flags & XFS_IOLOCK_EXCL)
691 mrdemote(&ip->i_iolock);
693 trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_);
702 if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) {
703 if (!(lock_flags & XFS_ILOCK_SHARED))
704 return !!ip->i_lock.mr_writer;
705 return rwsem_is_locked(&ip->i_lock.mr_lock);
708 if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) {
709 if (!(lock_flags & XFS_IOLOCK_SHARED))
710 return !!ip->i_iolock.mr_writer;
711 return rwsem_is_locked(&ip->i_iolock.mr_lock);