2 * fs/inotify.c - inode-based file event notifications
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
8 * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
10 * Copyright (C) 2005 John McCutchan
11 * Copyright 2006 Hewlett-Packard Development Company, L.P.
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2, or (at your option) any
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/idr.h>
28 #include <linux/slab.h>
30 #include <linux/init.h>
31 #include <linux/list.h>
32 #include <linux/writeback.h>
33 #include <linux/inotify.h>
35 static atomic_t inotify_cookie;
40 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
41 * iprune_mutex (synchronize shrink_icache_memory())
42 * inode_lock (protects the super_block->s_inodes list)
43 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
44 * inotify_handle->mutex (protects inotify_handle and watches->h_list)
46 * The inode->inotify_mutex and inotify_handle->mutex and held during execution
47 * of a caller's event handler. Thus, the caller must not hold any locks
48 * taken in their event handler while calling any of the published inotify
53 * Lifetimes of the three main data structures--inotify_handle, inode, and
54 * inotify_watch--are managed by reference count.
56 * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
57 * Additional references can bump the count via get_inotify_handle() and drop
58 * the count via put_inotify_handle().
60 * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
61 * to remove_watch_no_event(). Additional references can bump the count via
62 * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
63 * is reponsible for the final put after receiving IN_IGNORED, or when using
64 * IN_ONESHOT after receiving the first event. Inotify does the final put if
65 * inotify_destroy() is called.
67 * inode: Pinned so long as the inode is associated with a watch, from
68 * inotify_add_watch() to the final put_inotify_watch().
72 * struct inotify_handle - represents an inotify instance
74 * This structure is protected by the mutex 'mutex'.
76 struct inotify_handle {
77 struct idr idr; /* idr mapping wd -> watch */
78 struct mutex mutex; /* protects this bad boy */
79 struct list_head watches; /* list of watches */
80 atomic_t count; /* reference count */
81 u32 last_wd; /* the last wd allocated */
82 const struct inotify_operations *in_ops; /* inotify caller operations */
85 static inline void get_inotify_handle(struct inotify_handle *ih)
87 atomic_inc(&ih->count);
90 static inline void put_inotify_handle(struct inotify_handle *ih)
92 if (atomic_dec_and_test(&ih->count)) {
93 idr_destroy(&ih->idr);
99 * get_inotify_watch - grab a reference to an inotify_watch
100 * @watch: watch to grab
102 void get_inotify_watch(struct inotify_watch *watch)
104 atomic_inc(&watch->count);
106 EXPORT_SYMBOL_GPL(get_inotify_watch);
109 * put_inotify_watch - decrements the ref count on a given watch. cleans up
110 * watch references if the count reaches zero. inotify_watch is freed by
111 * inotify callers via the destroy_watch() op.
112 * @watch: watch to release
114 void put_inotify_watch(struct inotify_watch *watch)
116 if (atomic_dec_and_test(&watch->count)) {
117 struct inotify_handle *ih = watch->ih;
120 ih->in_ops->destroy_watch(watch);
121 put_inotify_handle(ih);
124 EXPORT_SYMBOL_GPL(put_inotify_watch);
127 * inotify_handle_get_wd - returns the next WD for use by the given handle
129 * Callers must hold ih->mutex. This function can sleep.
131 static int inotify_handle_get_wd(struct inotify_handle *ih,
132 struct inotify_watch *watch)
137 if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
139 ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
140 } while (ret == -EAGAIN);
143 ih->last_wd = watch->wd;
149 * inotify_inode_watched - returns nonzero if there are watches on this inode
150 * and zero otherwise. We call this lockless, we do not care if we race.
152 static inline int inotify_inode_watched(struct inode *inode)
154 return !list_empty(&inode->inotify_watches);
158 * Get child dentry flag into synch with parent inode.
159 * Flag should always be clear for negative dentrys.
161 static void set_dentry_child_flags(struct inode *inode, int watched)
163 struct dentry *alias;
165 spin_lock(&dcache_lock);
166 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
167 struct dentry *child;
169 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
170 if (!child->d_inode) {
171 WARN_ON(child->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
174 spin_lock(&child->d_lock);
176 WARN_ON(child->d_flags &
177 DCACHE_INOTIFY_PARENT_WATCHED);
178 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
180 WARN_ON(!(child->d_flags &
181 DCACHE_INOTIFY_PARENT_WATCHED));
182 child->d_flags&=~DCACHE_INOTIFY_PARENT_WATCHED;
184 spin_unlock(&child->d_lock);
187 spin_unlock(&dcache_lock);
191 * inotify_find_handle - find the watch associated with the given inode and
194 * Callers must hold inode->inotify_mutex.
196 static struct inotify_watch *inode_find_handle(struct inode *inode,
197 struct inotify_handle *ih)
199 struct inotify_watch *watch;
201 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
210 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
212 * Callers must hold both inode->inotify_mutex and ih->mutex.
214 static void remove_watch_no_event(struct inotify_watch *watch,
215 struct inotify_handle *ih)
217 list_del(&watch->i_list);
218 list_del(&watch->h_list);
220 if (!inotify_inode_watched(watch->inode))
221 set_dentry_child_flags(watch->inode, 0);
223 idr_remove(&ih->idr, watch->wd);
227 * remove_watch - Remove a watch from both the handle and the inode. Sends
228 * the IN_IGNORED event signifying that the inode is no longer watched.
230 * Callers must hold both inode->inotify_mutex and ih->mutex.
232 static void remove_watch(struct inotify_watch *watch, struct inotify_handle *ih)
234 remove_watch_no_event(watch, ih);
235 ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
238 /* Kernel API for producing events */
241 * inotify_d_instantiate - instantiate dcache entry for inode
243 void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
245 struct dentry *parent;
250 WARN_ON(entry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED);
251 spin_lock(&entry->d_lock);
252 parent = entry->d_parent;
253 if (parent->d_inode && inotify_inode_watched(parent->d_inode))
254 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
255 spin_unlock(&entry->d_lock);
259 * inotify_d_move - dcache entry has been moved
261 void inotify_d_move(struct dentry *entry)
263 struct dentry *parent;
265 parent = entry->d_parent;
266 if (inotify_inode_watched(parent->d_inode))
267 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
269 entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
273 * inotify_inode_queue_event - queue an event to all watches on this inode
274 * @inode: inode event is originating from
275 * @mask: event mask describing this event
276 * @cookie: cookie for synchronization, or zero
277 * @name: filename, if any
278 * @n_inode: inode associated with name
280 void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
281 const char *name, struct inode *n_inode)
283 struct inotify_watch *watch, *next;
285 if (!inotify_inode_watched(inode))
288 mutex_lock(&inode->inotify_mutex);
289 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
290 u32 watch_mask = watch->mask;
291 if (watch_mask & mask) {
292 struct inotify_handle *ih= watch->ih;
293 mutex_lock(&ih->mutex);
294 if (watch_mask & IN_ONESHOT)
295 remove_watch_no_event(watch, ih);
296 ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
298 mutex_unlock(&ih->mutex);
301 mutex_unlock(&inode->inotify_mutex);
303 EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
306 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
307 * @dentry: the dentry in question, we queue against this dentry's parent
308 * @mask: event mask describing this event
309 * @cookie: cookie for synchronization, or zero
310 * @name: filename, if any
312 void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
313 u32 cookie, const char *name)
315 struct dentry *parent;
318 if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
321 spin_lock(&dentry->d_lock);
322 parent = dentry->d_parent;
323 inode = parent->d_inode;
325 if (inotify_inode_watched(inode)) {
327 spin_unlock(&dentry->d_lock);
328 inotify_inode_queue_event(inode, mask, cookie, name,
332 spin_unlock(&dentry->d_lock);
334 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
337 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
339 u32 inotify_get_cookie(void)
341 return atomic_inc_return(&inotify_cookie);
343 EXPORT_SYMBOL_GPL(inotify_get_cookie);
346 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
347 * @list: list of inodes being unmounted (sb->s_inodes)
349 * Called with inode_lock held, protecting the unmounting super block's list
350 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
351 * We temporarily drop inode_lock, however, and CAN block.
353 void inotify_unmount_inodes(struct list_head *list)
355 struct inode *inode, *next_i, *need_iput = NULL;
357 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
358 struct inotify_watch *watch, *next_w;
359 struct inode *need_iput_tmp;
360 struct list_head *watches;
363 * If i_count is zero, the inode cannot have any watches and
364 * doing an __iget/iput with MS_ACTIVE clear would actually
365 * evict all inodes with zero i_count from icache which is
366 * unnecessarily violent and may in fact be illegal to do.
368 if (!atomic_read(&inode->i_count))
372 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
373 * I_WILL_FREE which is fine because by that point the inode
374 * cannot have any associated watches.
376 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
379 need_iput_tmp = need_iput;
381 /* In case the remove_watch() drops a reference. */
382 if (inode != need_iput_tmp)
385 need_iput_tmp = NULL;
386 /* In case the dropping of a reference would nuke next_i. */
387 if ((&next_i->i_sb_list != list) &&
388 atomic_read(&next_i->i_count) &&
389 !(next_i->i_state & (I_CLEAR | I_FREEING |
396 * We can safely drop inode_lock here because we hold
397 * references on both inode and next_i. Also no new inodes
398 * will be added since the umount has begun. Finally,
399 * iprune_mutex keeps shrink_icache_memory() away.
401 spin_unlock(&inode_lock);
406 /* for each watch, send IN_UNMOUNT and then remove it */
407 mutex_lock(&inode->inotify_mutex);
408 watches = &inode->inotify_watches;
409 list_for_each_entry_safe(watch, next_w, watches, i_list) {
410 struct inotify_handle *ih= watch->ih;
411 mutex_lock(&ih->mutex);
412 ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
414 remove_watch(watch, ih);
415 mutex_unlock(&ih->mutex);
417 mutex_unlock(&inode->inotify_mutex);
420 spin_lock(&inode_lock);
423 EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
426 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
427 * @inode: inode that is about to be removed
429 void inotify_inode_is_dead(struct inode *inode)
431 struct inotify_watch *watch, *next;
433 mutex_lock(&inode->inotify_mutex);
434 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
435 struct inotify_handle *ih = watch->ih;
436 mutex_lock(&ih->mutex);
437 remove_watch(watch, ih);
438 mutex_unlock(&ih->mutex);
440 mutex_unlock(&inode->inotify_mutex);
442 EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
444 /* Kernel Consumer API */
447 * inotify_init - allocate and initialize an inotify instance
448 * @ops: caller's inotify operations
450 struct inotify_handle *inotify_init(const struct inotify_operations *ops)
452 struct inotify_handle *ih;
454 ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
456 return ERR_PTR(-ENOMEM);
459 INIT_LIST_HEAD(&ih->watches);
460 mutex_init(&ih->mutex);
463 atomic_set(&ih->count, 0);
464 get_inotify_handle(ih);
468 EXPORT_SYMBOL_GPL(inotify_init);
471 * inotify_init_watch - initialize an inotify watch
472 * @watch: watch to initialize
474 void inotify_init_watch(struct inotify_watch *watch)
476 INIT_LIST_HEAD(&watch->h_list);
477 INIT_LIST_HEAD(&watch->i_list);
478 atomic_set(&watch->count, 0);
479 get_inotify_watch(watch); /* initial get */
481 EXPORT_SYMBOL_GPL(inotify_init_watch);
484 * inotify_destroy - clean up and destroy an inotify instance
485 * @ih: inotify handle
487 void inotify_destroy(struct inotify_handle *ih)
490 * Destroy all of the watches for this handle. Unfortunately, not very
491 * pretty. We cannot do a simple iteration over the list, because we
492 * do not know the inode until we iterate to the watch. But we need to
493 * hold inode->inotify_mutex before ih->mutex. The following works.
496 struct inotify_watch *watch;
497 struct list_head *watches;
500 mutex_lock(&ih->mutex);
501 watches = &ih->watches;
502 if (list_empty(watches)) {
503 mutex_unlock(&ih->mutex);
506 watch = list_entry(watches->next, struct inotify_watch, h_list);
507 get_inotify_watch(watch);
508 mutex_unlock(&ih->mutex);
510 inode = watch->inode;
511 mutex_lock(&inode->inotify_mutex);
512 mutex_lock(&ih->mutex);
514 /* make sure we didn't race with another list removal */
515 if (likely(idr_find(&ih->idr, watch->wd))) {
516 remove_watch_no_event(watch, ih);
517 put_inotify_watch(watch);
520 mutex_unlock(&ih->mutex);
521 mutex_unlock(&inode->inotify_mutex);
522 put_inotify_watch(watch);
525 /* free this handle: the put matching the get in inotify_init() */
526 put_inotify_handle(ih);
528 EXPORT_SYMBOL_GPL(inotify_destroy);
531 * inotify_find_watch - find an existing watch for an (ih,inode) pair
532 * @ih: inotify handle
533 * @inode: inode to watch
534 * @watchp: pointer to existing inotify_watch
536 * Caller must pin given inode (via nameidata).
538 s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
539 struct inotify_watch **watchp)
541 struct inotify_watch *old;
544 mutex_lock(&inode->inotify_mutex);
545 mutex_lock(&ih->mutex);
547 old = inode_find_handle(inode, ih);
549 get_inotify_watch(old); /* caller must put watch */
554 mutex_unlock(&ih->mutex);
555 mutex_unlock(&inode->inotify_mutex);
559 EXPORT_SYMBOL_GPL(inotify_find_watch);
562 * inotify_find_update_watch - find and update the mask of an existing watch
563 * @ih: inotify handle
564 * @inode: inode's watch to update
565 * @mask: mask of events to watch
567 * Caller must pin given inode (via nameidata).
569 s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
572 struct inotify_watch *old;
576 if (mask & IN_MASK_ADD)
579 /* don't allow invalid bits: we don't want flags set */
580 mask &= IN_ALL_EVENTS | IN_ONESHOT;
584 mutex_lock(&inode->inotify_mutex);
585 mutex_lock(&ih->mutex);
588 * Handle the case of re-adding a watch on an (inode,ih) pair that we
589 * are already watching. We just update the mask and return its wd.
591 old = inode_find_handle(inode, ih);
592 if (unlikely(!old)) {
603 mutex_unlock(&ih->mutex);
604 mutex_unlock(&inode->inotify_mutex);
607 EXPORT_SYMBOL_GPL(inotify_find_update_watch);
610 * inotify_add_watch - add a watch to an inotify instance
611 * @ih: inotify handle
612 * @watch: caller allocated watch structure
613 * @inode: inode to watch
614 * @mask: mask of events to watch
616 * Caller must pin given inode (via nameidata).
617 * Caller must ensure it only calls inotify_add_watch() once per watch.
618 * Calls inotify_handle_get_wd() so may sleep.
620 s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
621 struct inode *inode, u32 mask)
625 /* don't allow invalid bits: we don't want flags set */
626 mask &= IN_ALL_EVENTS | IN_ONESHOT;
631 mutex_lock(&inode->inotify_mutex);
632 mutex_lock(&ih->mutex);
634 /* Initialize a new watch */
635 ret = inotify_handle_get_wd(ih, watch);
640 /* save a reference to handle and bump the count to make it official */
641 get_inotify_handle(ih);
645 * Save a reference to the inode and bump the ref count to make it
646 * official. We hold a reference to nameidata, which makes this safe.
648 watch->inode = igrab(inode);
650 if (!inotify_inode_watched(inode))
651 set_dentry_child_flags(inode, 1);
653 /* Add the watch to the handle's and the inode's list */
654 list_add(&watch->h_list, &ih->watches);
655 list_add(&watch->i_list, &inode->inotify_watches);
657 mutex_unlock(&ih->mutex);
658 mutex_unlock(&inode->inotify_mutex);
661 EXPORT_SYMBOL_GPL(inotify_add_watch);
664 * inotify_rm_wd - remove a watch from an inotify instance
665 * @ih: inotify handle
666 * @wd: watch descriptor to remove
670 int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
672 struct inotify_watch *watch;
675 mutex_lock(&ih->mutex);
676 watch = idr_find(&ih->idr, wd);
677 if (unlikely(!watch)) {
678 mutex_unlock(&ih->mutex);
681 get_inotify_watch(watch);
682 inode = watch->inode;
683 mutex_unlock(&ih->mutex);
685 mutex_lock(&inode->inotify_mutex);
686 mutex_lock(&ih->mutex);
688 /* make sure that we did not race */
689 if (likely(idr_find(&ih->idr, wd) == watch))
690 remove_watch(watch, ih);
692 mutex_unlock(&ih->mutex);
693 mutex_unlock(&inode->inotify_mutex);
694 put_inotify_watch(watch);
698 EXPORT_SYMBOL_GPL(inotify_rm_wd);
701 * inotify_rm_watch - remove a watch from an inotify instance
702 * @ih: inotify handle
703 * @watch: watch to remove
707 int inotify_rm_watch(struct inotify_handle *ih,
708 struct inotify_watch *watch)
710 return inotify_rm_wd(ih, watch->wd);
712 EXPORT_SYMBOL_GPL(inotify_rm_watch);
715 * inotify_setup - core initialization function
717 static int __init inotify_setup(void)
719 atomic_set(&inotify_cookie, 0);
724 module_init(inotify_setup);