Commit | Line | Data |
---|---|---|
0eeca283 RL |
1 | /* |
2 | * fs/inotify.c - inode-based file event notifications | |
3 | * | |
4 | * Authors: | |
5 | * John McCutchan <ttb@tentacle.dhs.org> | |
6 | * Robert Love <rml@novell.com> | |
7 | * | |
8 | * Copyright (C) 2005 John McCutchan | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2, or (at your option) any | |
13 | * later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, but | |
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | */ | |
20 | ||
21 | #include <linux/module.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/sched.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/idr.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/file.h> | |
29 | #include <linux/mount.h> | |
30 | #include <linux/namei.h> | |
31 | #include <linux/poll.h> | |
32 | #include <linux/device.h> | |
33 | #include <linux/miscdevice.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/list.h> | |
36 | #include <linux/writeback.h> | |
37 | #include <linux/inotify.h> | |
38 | ||
39 | #include <asm/ioctls.h> | |
40 | ||
41 | static atomic_t inotify_cookie; | |
42 | ||
43 | static kmem_cache_t *watch_cachep; | |
44 | static kmem_cache_t *event_cachep; | |
45 | ||
46 | static struct vfsmount *inotify_mnt; | |
47 | ||
48 | /* These are configurable via /proc/sys/inotify */ | |
49 | int inotify_max_user_devices; | |
50 | int inotify_max_user_watches; | |
51 | int inotify_max_queued_events; | |
52 | ||
53 | /* | |
54 | * Lock ordering: | |
55 | * | |
56 | * dentry->d_lock (used to keep d_move() away from dentry->d_parent) | |
57 | * iprune_sem (synchronize shrink_icache_memory()) | |
58 | * inode_lock (protects the super_block->s_inodes list) | |
59 | * inode->inotify_sem (protects inode->inotify_watches and watches->i_list) | |
60 | * inotify_dev->sem (protects inotify_device and watches->d_list) | |
61 | */ | |
62 | ||
63 | /* | |
64 | * Lifetimes of the three main data structures--inotify_device, inode, and | |
65 | * inotify_watch--are managed by reference count. | |
66 | * | |
67 | * inotify_device: Lifetime is from open until release. Additional references | |
68 | * can bump the count via get_inotify_dev() and drop the count via | |
69 | * put_inotify_dev(). | |
70 | * | |
71 | * inotify_watch: Lifetime is from create_watch() to destory_watch(). | |
72 | * Additional references can bump the count via get_inotify_watch() and drop | |
73 | * the count via put_inotify_watch(). | |
74 | * | |
75 | * inode: Pinned so long as the inode is associated with a watch, from | |
76 | * create_watch() to put_inotify_watch(). | |
77 | */ | |
78 | ||
79 | /* | |
80 | * struct inotify_device - represents an open instance of an inotify device | |
81 | * | |
82 | * This structure is protected by the semaphore 'sem'. | |
83 | */ | |
84 | struct inotify_device { | |
85 | wait_queue_head_t wq; /* wait queue for i/o */ | |
86 | struct idr idr; /* idr mapping wd -> watch */ | |
87 | struct semaphore sem; /* protects this bad boy */ | |
88 | struct list_head events; /* list of queued events */ | |
89 | struct list_head watches; /* list of watches */ | |
90 | atomic_t count; /* reference count */ | |
91 | struct user_struct *user; /* user who opened this dev */ | |
92 | unsigned int queue_size; /* size of the queue (bytes) */ | |
93 | unsigned int event_count; /* number of pending events */ | |
94 | unsigned int max_events; /* maximum number of events */ | |
95 | }; | |
96 | ||
97 | /* | |
98 | * struct inotify_kernel_event - An inotify event, originating from a watch and | |
99 | * queued for user-space. A list of these is attached to each instance of the | |
100 | * device. In read(), this list is walked and all events that can fit in the | |
101 | * buffer are returned. | |
102 | * | |
103 | * Protected by dev->sem of the device in which we are queued. | |
104 | */ | |
105 | struct inotify_kernel_event { | |
106 | struct inotify_event event; /* the user-space event */ | |
107 | struct list_head list; /* entry in inotify_device's list */ | |
108 | char *name; /* filename, if any */ | |
109 | }; | |
110 | ||
111 | /* | |
112 | * struct inotify_watch - represents a watch request on a specific inode | |
113 | * | |
114 | * d_list is protected by dev->sem of the associated watch->dev. | |
115 | * i_list and mask are protected by inode->inotify_sem of the associated inode. | |
116 | * dev, inode, and wd are never written to once the watch is created. | |
117 | */ | |
118 | struct inotify_watch { | |
119 | struct list_head d_list; /* entry in inotify_device's list */ | |
120 | struct list_head i_list; /* entry in inode's list */ | |
121 | atomic_t count; /* reference count */ | |
122 | struct inotify_device *dev; /* associated device */ | |
123 | struct inode *inode; /* associated inode */ | |
124 | s32 wd; /* watch descriptor */ | |
125 | u32 mask; /* event mask for this watch */ | |
126 | }; | |
127 | ||
128 | static inline void get_inotify_dev(struct inotify_device *dev) | |
129 | { | |
130 | atomic_inc(&dev->count); | |
131 | } | |
132 | ||
133 | static inline void put_inotify_dev(struct inotify_device *dev) | |
134 | { | |
135 | if (atomic_dec_and_test(&dev->count)) { | |
136 | atomic_dec(&dev->user->inotify_devs); | |
137 | free_uid(dev->user); | |
138 | kfree(dev); | |
139 | } | |
140 | } | |
141 | ||
142 | static inline void get_inotify_watch(struct inotify_watch *watch) | |
143 | { | |
144 | atomic_inc(&watch->count); | |
145 | } | |
146 | ||
147 | /* | |
148 | * put_inotify_watch - decrements the ref count on a given watch. cleans up | |
149 | * the watch and its references if the count reaches zero. | |
150 | */ | |
151 | static inline void put_inotify_watch(struct inotify_watch *watch) | |
152 | { | |
153 | if (atomic_dec_and_test(&watch->count)) { | |
154 | put_inotify_dev(watch->dev); | |
155 | iput(watch->inode); | |
156 | kmem_cache_free(watch_cachep, watch); | |
157 | } | |
158 | } | |
159 | ||
160 | /* | |
161 | * kernel_event - create a new kernel event with the given parameters | |
162 | * | |
163 | * This function can sleep. | |
164 | */ | |
165 | static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie, | |
166 | const char *name) | |
167 | { | |
168 | struct inotify_kernel_event *kevent; | |
169 | ||
170 | kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL); | |
171 | if (unlikely(!kevent)) | |
172 | return NULL; | |
173 | ||
174 | /* we hand this out to user-space, so zero it just in case */ | |
175 | memset(&kevent->event, 0, sizeof(struct inotify_event)); | |
176 | ||
177 | kevent->event.wd = wd; | |
178 | kevent->event.mask = mask; | |
179 | kevent->event.cookie = cookie; | |
180 | ||
181 | INIT_LIST_HEAD(&kevent->list); | |
182 | ||
183 | if (name) { | |
184 | size_t len, rem, event_size = sizeof(struct inotify_event); | |
185 | ||
186 | /* | |
187 | * We need to pad the filename so as to properly align an | |
188 | * array of inotify_event structures. Because the structure is | |
189 | * small and the common case is a small filename, we just round | |
190 | * up to the next multiple of the structure's sizeof. This is | |
191 | * simple and safe for all architectures. | |
192 | */ | |
193 | len = strlen(name) + 1; | |
194 | rem = event_size - len; | |
195 | if (len > event_size) { | |
196 | rem = event_size - (len % event_size); | |
197 | if (len % event_size == 0) | |
198 | rem = 0; | |
199 | } | |
200 | ||
201 | kevent->name = kmalloc(len + rem, GFP_KERNEL); | |
202 | if (unlikely(!kevent->name)) { | |
203 | kmem_cache_free(event_cachep, kevent); | |
204 | return NULL; | |
205 | } | |
206 | memcpy(kevent->name, name, len); | |
207 | if (rem) | |
208 | memset(kevent->name + len, 0, rem); | |
209 | kevent->event.len = len + rem; | |
210 | } else { | |
211 | kevent->event.len = 0; | |
212 | kevent->name = NULL; | |
213 | } | |
214 | ||
215 | return kevent; | |
216 | } | |
217 | ||
218 | /* | |
219 | * inotify_dev_get_event - return the next event in the given dev's queue | |
220 | * | |
221 | * Caller must hold dev->sem. | |
222 | */ | |
223 | static inline struct inotify_kernel_event * | |
224 | inotify_dev_get_event(struct inotify_device *dev) | |
225 | { | |
226 | return list_entry(dev->events.next, struct inotify_kernel_event, list); | |
227 | } | |
228 | ||
229 | /* | |
230 | * inotify_dev_queue_event - add a new event to the given device | |
231 | * | |
232 | * Caller must hold dev->sem. Can sleep (calls kernel_event()). | |
233 | */ | |
234 | static void inotify_dev_queue_event(struct inotify_device *dev, | |
235 | struct inotify_watch *watch, u32 mask, | |
236 | u32 cookie, const char *name) | |
237 | { | |
238 | struct inotify_kernel_event *kevent, *last; | |
239 | ||
240 | /* coalescing: drop this event if it is a dupe of the previous */ | |
241 | last = inotify_dev_get_event(dev); | |
242 | if (last && last->event.mask == mask && last->event.wd == watch->wd && | |
243 | last->event.cookie == cookie) { | |
244 | const char *lastname = last->name; | |
245 | ||
246 | if (!name && !lastname) | |
247 | return; | |
248 | if (name && lastname && !strcmp(lastname, name)) | |
249 | return; | |
250 | } | |
251 | ||
252 | /* the queue overflowed and we already sent the Q_OVERFLOW event */ | |
253 | if (unlikely(dev->event_count > dev->max_events)) | |
254 | return; | |
255 | ||
256 | /* if the queue overflows, we need to notify user space */ | |
257 | if (unlikely(dev->event_count == dev->max_events)) | |
258 | kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL); | |
259 | else | |
260 | kevent = kernel_event(watch->wd, mask, cookie, name); | |
261 | ||
262 | if (unlikely(!kevent)) | |
263 | return; | |
264 | ||
265 | /* queue the event and wake up anyone waiting */ | |
266 | dev->event_count++; | |
267 | dev->queue_size += sizeof(struct inotify_event) + kevent->event.len; | |
268 | list_add_tail(&kevent->list, &dev->events); | |
269 | wake_up_interruptible(&dev->wq); | |
270 | } | |
271 | ||
272 | /* | |
273 | * remove_kevent - cleans up and ultimately frees the given kevent | |
274 | * | |
275 | * Caller must hold dev->sem. | |
276 | */ | |
277 | static void remove_kevent(struct inotify_device *dev, | |
278 | struct inotify_kernel_event *kevent) | |
279 | { | |
280 | list_del(&kevent->list); | |
281 | ||
282 | dev->event_count--; | |
283 | dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len; | |
284 | ||
285 | kfree(kevent->name); | |
286 | kmem_cache_free(event_cachep, kevent); | |
287 | } | |
288 | ||
289 | /* | |
290 | * inotify_dev_event_dequeue - destroy an event on the given device | |
291 | * | |
292 | * Caller must hold dev->sem. | |
293 | */ | |
294 | static void inotify_dev_event_dequeue(struct inotify_device *dev) | |
295 | { | |
296 | if (!list_empty(&dev->events)) { | |
297 | struct inotify_kernel_event *kevent; | |
298 | kevent = inotify_dev_get_event(dev); | |
299 | remove_kevent(dev, kevent); | |
300 | } | |
301 | } | |
302 | ||
303 | /* | |
304 | * inotify_dev_get_wd - returns the next WD for use by the given dev | |
305 | * | |
306 | * Callers must hold dev->sem. This function can sleep. | |
307 | */ | |
308 | static int inotify_dev_get_wd(struct inotify_device *dev, | |
309 | struct inotify_watch *watch) | |
310 | { | |
311 | int ret; | |
312 | ||
313 | do { | |
314 | if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL))) | |
315 | return -ENOSPC; | |
316 | ret = idr_get_new(&dev->idr, watch, &watch->wd); | |
317 | } while (ret == -EAGAIN); | |
318 | ||
319 | return ret; | |
320 | } | |
321 | ||
322 | /* | |
323 | * find_inode - resolve a user-given path to a specific inode and return a nd | |
324 | */ | |
325 | static int find_inode(const char __user *dirname, struct nameidata *nd) | |
326 | { | |
327 | int error; | |
328 | ||
329 | error = __user_walk(dirname, LOOKUP_FOLLOW, nd); | |
330 | if (error) | |
331 | return error; | |
332 | /* you can only watch an inode if you have read permissions on it */ | |
333 | error = permission(nd->dentry->d_inode, MAY_READ, NULL); | |
334 | if (error) | |
335 | path_release (nd); | |
336 | return error; | |
337 | } | |
338 | ||
339 | /* | |
340 | * create_watch - creates a watch on the given device. | |
341 | * | |
342 | * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep. | |
343 | * Both 'dev' and 'inode' (by way of nameidata) need to be pinned. | |
344 | */ | |
345 | static struct inotify_watch *create_watch(struct inotify_device *dev, | |
346 | u32 mask, struct inode *inode) | |
347 | { | |
348 | struct inotify_watch *watch; | |
349 | int ret; | |
350 | ||
351 | if (atomic_read(&dev->user->inotify_watches) >= inotify_max_user_watches) | |
352 | return ERR_PTR(-ENOSPC); | |
353 | ||
354 | watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL); | |
355 | if (unlikely(!watch)) | |
356 | return ERR_PTR(-ENOMEM); | |
357 | ||
358 | ret = inotify_dev_get_wd(dev, watch); | |
359 | if (unlikely(ret)) { | |
360 | kmem_cache_free(watch_cachep, watch); | |
361 | return ERR_PTR(ret); | |
362 | } | |
363 | ||
364 | watch->mask = mask; | |
365 | atomic_set(&watch->count, 0); | |
366 | INIT_LIST_HEAD(&watch->d_list); | |
367 | INIT_LIST_HEAD(&watch->i_list); | |
368 | ||
369 | /* save a reference to device and bump the count to make it official */ | |
370 | get_inotify_dev(dev); | |
371 | watch->dev = dev; | |
372 | ||
373 | /* | |
374 | * Save a reference to the inode and bump the ref count to make it | |
375 | * official. We hold a reference to nameidata, which makes this safe. | |
376 | */ | |
377 | watch->inode = igrab(inode); | |
378 | ||
379 | /* bump our own count, corresponding to our entry in dev->watches */ | |
380 | get_inotify_watch(watch); | |
381 | ||
382 | atomic_inc(&dev->user->inotify_watches); | |
383 | ||
384 | return watch; | |
385 | } | |
386 | ||
387 | /* | |
388 | * inotify_find_dev - find the watch associated with the given inode and dev | |
389 | * | |
390 | * Callers must hold inode->inotify_sem. | |
391 | */ | |
392 | static struct inotify_watch *inode_find_dev(struct inode *inode, | |
393 | struct inotify_device *dev) | |
394 | { | |
395 | struct inotify_watch *watch; | |
396 | ||
397 | list_for_each_entry(watch, &inode->inotify_watches, i_list) { | |
398 | if (watch->dev == dev) | |
399 | return watch; | |
400 | } | |
401 | ||
402 | return NULL; | |
403 | } | |
404 | ||
405 | /* | |
406 | * remove_watch_no_event - remove_watch() without the IN_IGNORED event. | |
407 | */ | |
408 | static void remove_watch_no_event(struct inotify_watch *watch, | |
409 | struct inotify_device *dev) | |
410 | { | |
411 | list_del(&watch->i_list); | |
412 | list_del(&watch->d_list); | |
413 | ||
414 | atomic_dec(&dev->user->inotify_watches); | |
415 | idr_remove(&dev->idr, watch->wd); | |
416 | put_inotify_watch(watch); | |
417 | } | |
418 | ||
419 | /* | |
420 | * remove_watch - Remove a watch from both the device and the inode. Sends | |
421 | * the IN_IGNORED event to the given device signifying that the inode is no | |
422 | * longer watched. | |
423 | * | |
424 | * Callers must hold both inode->inotify_sem and dev->sem. We drop a | |
425 | * reference to the inode before returning. | |
426 | * | |
427 | * The inode is not iput() so as to remain atomic. If the inode needs to be | |
428 | * iput(), the call returns one. Otherwise, it returns zero. | |
429 | */ | |
430 | static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev) | |
431 | { | |
432 | inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL); | |
433 | remove_watch_no_event(watch, dev); | |
434 | } | |
435 | ||
436 | /* | |
437 | * inotify_inode_watched - returns nonzero if there are watches on this inode | |
438 | * and zero otherwise. We call this lockless, we do not care if we race. | |
439 | */ | |
440 | static inline int inotify_inode_watched(struct inode *inode) | |
441 | { | |
442 | return !list_empty(&inode->inotify_watches); | |
443 | } | |
444 | ||
445 | /* Kernel API */ | |
446 | ||
447 | /** | |
448 | * inotify_inode_queue_event - queue an event to all watches on this inode | |
449 | * @inode: inode event is originating from | |
450 | * @mask: event mask describing this event | |
451 | * @cookie: cookie for synchronization, or zero | |
452 | * @name: filename, if any | |
453 | */ | |
454 | void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie, | |
455 | const char *name) | |
456 | { | |
457 | struct inotify_watch *watch, *next; | |
458 | ||
459 | if (!inotify_inode_watched(inode)) | |
460 | return; | |
461 | ||
462 | down(&inode->inotify_sem); | |
463 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | |
464 | u32 watch_mask = watch->mask; | |
465 | if (watch_mask & mask) { | |
466 | struct inotify_device *dev = watch->dev; | |
467 | get_inotify_watch(watch); | |
468 | down(&dev->sem); | |
469 | inotify_dev_queue_event(dev, watch, mask, cookie, name); | |
470 | if (watch_mask & IN_ONESHOT) | |
471 | remove_watch_no_event(watch, dev); | |
472 | up(&dev->sem); | |
473 | put_inotify_watch(watch); | |
474 | } | |
475 | } | |
476 | up(&inode->inotify_sem); | |
477 | } | |
478 | EXPORT_SYMBOL_GPL(inotify_inode_queue_event); | |
479 | ||
480 | /** | |
481 | * inotify_dentry_parent_queue_event - queue an event to a dentry's parent | |
482 | * @dentry: the dentry in question, we queue against this dentry's parent | |
483 | * @mask: event mask describing this event | |
484 | * @cookie: cookie for synchronization, or zero | |
485 | * @name: filename, if any | |
486 | */ | |
487 | void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask, | |
488 | u32 cookie, const char *name) | |
489 | { | |
490 | struct dentry *parent; | |
491 | struct inode *inode; | |
492 | ||
493 | spin_lock(&dentry->d_lock); | |
494 | parent = dentry->d_parent; | |
495 | inode = parent->d_inode; | |
496 | ||
497 | if (inotify_inode_watched(inode)) { | |
498 | dget(parent); | |
499 | spin_unlock(&dentry->d_lock); | |
500 | inotify_inode_queue_event(inode, mask, cookie, name); | |
501 | dput(parent); | |
502 | } else | |
503 | spin_unlock(&dentry->d_lock); | |
504 | } | |
505 | EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event); | |
506 | ||
507 | /** | |
508 | * inotify_get_cookie - return a unique cookie for use in synchronizing events. | |
509 | */ | |
510 | u32 inotify_get_cookie(void) | |
511 | { | |
512 | return atomic_inc_return(&inotify_cookie); | |
513 | } | |
514 | EXPORT_SYMBOL_GPL(inotify_get_cookie); | |
515 | ||
516 | /** | |
517 | * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes. | |
518 | * @list: list of inodes being unmounted (sb->s_inodes) | |
519 | * | |
520 | * Called with inode_lock held, protecting the unmounting super block's list | |
521 | * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. | |
522 | * We temporarily drop inode_lock, however, and CAN block. | |
523 | */ | |
524 | void inotify_unmount_inodes(struct list_head *list) | |
525 | { | |
526 | struct inode *inode, *next_i, *need_iput = NULL; | |
527 | ||
528 | list_for_each_entry_safe(inode, next_i, list, i_sb_list) { | |
529 | struct inotify_watch *watch, *next_w; | |
530 | struct inode *need_iput_tmp; | |
531 | struct list_head *watches; | |
532 | ||
533 | /* | |
534 | * If i_count is zero, the inode cannot have any watches and | |
535 | * doing an __iget/iput with MS_ACTIVE clear would actually | |
536 | * evict all inodes with zero i_count from icache which is | |
537 | * unnecessarily violent and may in fact be illegal to do. | |
538 | */ | |
539 | if (!atomic_read(&inode->i_count)) | |
540 | continue; | |
541 | ||
542 | /* | |
543 | * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or | |
544 | * I_WILL_FREE which is fine because by that point the inode | |
545 | * cannot have any associated watches. | |
546 | */ | |
547 | if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE)) | |
548 | continue; | |
549 | ||
550 | need_iput_tmp = need_iput; | |
551 | need_iput = NULL; | |
552 | /* In case the remove_watch() drops a reference. */ | |
553 | if (inode != need_iput_tmp) | |
554 | __iget(inode); | |
555 | else | |
556 | need_iput_tmp = NULL; | |
557 | /* In case the dropping of a reference would nuke next_i. */ | |
558 | if ((&next_i->i_sb_list != list) && | |
559 | atomic_read(&next_i->i_count) && | |
560 | !(next_i->i_state & (I_CLEAR | I_FREEING | | |
561 | I_WILL_FREE))) { | |
562 | __iget(next_i); | |
563 | need_iput = next_i; | |
564 | } | |
565 | ||
566 | /* | |
567 | * We can safely drop inode_lock here because we hold | |
568 | * references on both inode and next_i. Also no new inodes | |
569 | * will be added since the umount has begun. Finally, | |
570 | * iprune_sem keeps shrink_icache_memory() away. | |
571 | */ | |
572 | spin_unlock(&inode_lock); | |
573 | ||
574 | if (need_iput_tmp) | |
575 | iput(need_iput_tmp); | |
576 | ||
577 | /* for each watch, send IN_UNMOUNT and then remove it */ | |
578 | down(&inode->inotify_sem); | |
579 | watches = &inode->inotify_watches; | |
580 | list_for_each_entry_safe(watch, next_w, watches, i_list) { | |
581 | struct inotify_device *dev = watch->dev; | |
582 | down(&dev->sem); | |
583 | inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL); | |
584 | remove_watch(watch, dev); | |
585 | up(&dev->sem); | |
586 | } | |
587 | up(&inode->inotify_sem); | |
588 | iput(inode); | |
589 | ||
590 | spin_lock(&inode_lock); | |
591 | } | |
592 | } | |
593 | EXPORT_SYMBOL_GPL(inotify_unmount_inodes); | |
594 | ||
595 | /** | |
596 | * inotify_inode_is_dead - an inode has been deleted, cleanup any watches | |
597 | * @inode: inode that is about to be removed | |
598 | */ | |
599 | void inotify_inode_is_dead(struct inode *inode) | |
600 | { | |
601 | struct inotify_watch *watch, *next; | |
602 | ||
603 | down(&inode->inotify_sem); | |
604 | list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) { | |
605 | struct inotify_device *dev = watch->dev; | |
606 | down(&dev->sem); | |
607 | remove_watch(watch, dev); | |
608 | up(&dev->sem); | |
609 | } | |
610 | up(&inode->inotify_sem); | |
611 | } | |
612 | EXPORT_SYMBOL_GPL(inotify_inode_is_dead); | |
613 | ||
614 | /* Device Interface */ | |
615 | ||
616 | static unsigned int inotify_poll(struct file *file, poll_table *wait) | |
617 | { | |
618 | struct inotify_device *dev = file->private_data; | |
619 | int ret = 0; | |
620 | ||
621 | poll_wait(file, &dev->wq, wait); | |
622 | down(&dev->sem); | |
623 | if (!list_empty(&dev->events)) | |
624 | ret = POLLIN | POLLRDNORM; | |
625 | up(&dev->sem); | |
626 | ||
627 | return ret; | |
628 | } | |
629 | ||
630 | static ssize_t inotify_read(struct file *file, char __user *buf, | |
631 | size_t count, loff_t *pos) | |
632 | { | |
633 | size_t event_size = sizeof (struct inotify_event); | |
634 | struct inotify_device *dev; | |
635 | char __user *start; | |
636 | int ret; | |
637 | DEFINE_WAIT(wait); | |
638 | ||
639 | start = buf; | |
640 | dev = file->private_data; | |
641 | ||
642 | while (1) { | |
643 | int events; | |
644 | ||
645 | prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE); | |
646 | ||
647 | down(&dev->sem); | |
648 | events = !list_empty(&dev->events); | |
649 | up(&dev->sem); | |
650 | if (events) { | |
651 | ret = 0; | |
652 | break; | |
653 | } | |
654 | ||
655 | if (file->f_flags & O_NONBLOCK) { | |
656 | ret = -EAGAIN; | |
657 | break; | |
658 | } | |
659 | ||
660 | if (signal_pending(current)) { | |
661 | ret = -EINTR; | |
662 | break; | |
663 | } | |
664 | ||
665 | schedule(); | |
666 | } | |
667 | ||
668 | finish_wait(&dev->wq, &wait); | |
669 | if (ret) | |
670 | return ret; | |
671 | ||
672 | down(&dev->sem); | |
673 | while (1) { | |
674 | struct inotify_kernel_event *kevent; | |
675 | ||
676 | ret = buf - start; | |
677 | if (list_empty(&dev->events)) | |
678 | break; | |
679 | ||
680 | kevent = inotify_dev_get_event(dev); | |
681 | if (event_size + kevent->event.len > count) | |
682 | break; | |
683 | ||
684 | if (copy_to_user(buf, &kevent->event, event_size)) { | |
685 | ret = -EFAULT; | |
686 | break; | |
687 | } | |
688 | buf += event_size; | |
689 | count -= event_size; | |
690 | ||
691 | if (kevent->name) { | |
692 | if (copy_to_user(buf, kevent->name, kevent->event.len)){ | |
693 | ret = -EFAULT; | |
694 | break; | |
695 | } | |
696 | buf += kevent->event.len; | |
697 | count -= kevent->event.len; | |
698 | } | |
699 | ||
700 | remove_kevent(dev, kevent); | |
701 | } | |
702 | up(&dev->sem); | |
703 | ||
704 | return ret; | |
705 | } | |
706 | ||
707 | static int inotify_release(struct inode *ignored, struct file *file) | |
708 | { | |
709 | struct inotify_device *dev = file->private_data; | |
710 | ||
711 | /* | |
712 | * Destroy all of the watches on this device. Unfortunately, not very | |
713 | * pretty. We cannot do a simple iteration over the list, because we | |
714 | * do not know the inode until we iterate to the watch. But we need to | |
715 | * hold inode->inotify_sem before dev->sem. The following works. | |
716 | */ | |
717 | while (1) { | |
718 | struct inotify_watch *watch; | |
719 | struct list_head *watches; | |
720 | struct inode *inode; | |
721 | ||
722 | down(&dev->sem); | |
723 | watches = &dev->watches; | |
724 | if (list_empty(watches)) { | |
725 | up(&dev->sem); | |
726 | break; | |
727 | } | |
728 | watch = list_entry(watches->next, struct inotify_watch, d_list); | |
729 | get_inotify_watch(watch); | |
730 | up(&dev->sem); | |
731 | ||
732 | inode = watch->inode; | |
733 | down(&inode->inotify_sem); | |
734 | down(&dev->sem); | |
735 | remove_watch_no_event(watch, dev); | |
736 | up(&dev->sem); | |
737 | up(&inode->inotify_sem); | |
738 | put_inotify_watch(watch); | |
739 | } | |
740 | ||
741 | /* destroy all of the events on this device */ | |
742 | down(&dev->sem); | |
743 | while (!list_empty(&dev->events)) | |
744 | inotify_dev_event_dequeue(dev); | |
745 | up(&dev->sem); | |
746 | ||
747 | /* free this device: the put matching the get in inotify_open() */ | |
748 | put_inotify_dev(dev); | |
749 | ||
750 | return 0; | |
751 | } | |
752 | ||
753 | /* | |
754 | * inotify_ignore - handle the INOTIFY_IGNORE ioctl, asking that a given wd be | |
755 | * removed from the device. | |
756 | * | |
757 | * Can sleep. | |
758 | */ | |
759 | static int inotify_ignore(struct inotify_device *dev, s32 wd) | |
760 | { | |
761 | struct inotify_watch *watch; | |
762 | struct inode *inode; | |
763 | ||
764 | down(&dev->sem); | |
765 | watch = idr_find(&dev->idr, wd); | |
766 | if (unlikely(!watch)) { | |
767 | up(&dev->sem); | |
768 | return -EINVAL; | |
769 | } | |
770 | get_inotify_watch(watch); | |
771 | inode = watch->inode; | |
772 | up(&dev->sem); | |
773 | ||
774 | down(&inode->inotify_sem); | |
775 | down(&dev->sem); | |
776 | ||
777 | /* make sure that we did not race */ | |
778 | watch = idr_find(&dev->idr, wd); | |
779 | if (likely(watch)) | |
780 | remove_watch(watch, dev); | |
781 | ||
782 | up(&dev->sem); | |
783 | up(&inode->inotify_sem); | |
784 | put_inotify_watch(watch); | |
785 | ||
786 | return 0; | |
787 | } | |
788 | ||
789 | static long inotify_ioctl(struct file *file, unsigned int cmd, | |
790 | unsigned long arg) | |
791 | { | |
792 | struct inotify_device *dev; | |
793 | void __user *p; | |
794 | int ret = -ENOTTY; | |
795 | ||
796 | dev = file->private_data; | |
797 | p = (void __user *) arg; | |
798 | ||
799 | switch (cmd) { | |
800 | case FIONREAD: | |
801 | ret = put_user(dev->queue_size, (int __user *) p); | |
802 | break; | |
803 | } | |
804 | ||
805 | return ret; | |
806 | } | |
807 | ||
808 | static struct file_operations inotify_fops = { | |
809 | .poll = inotify_poll, | |
810 | .read = inotify_read, | |
811 | .release = inotify_release, | |
812 | .unlocked_ioctl = inotify_ioctl, | |
813 | .compat_ioctl = inotify_ioctl, | |
814 | }; | |
815 | ||
816 | asmlinkage long sys_inotify_init(void) | |
817 | { | |
818 | struct inotify_device *dev; | |
819 | struct user_struct *user; | |
820 | int ret = -ENOTTY; | |
821 | int fd; | |
822 | struct file *filp; | |
823 | ||
824 | fd = get_unused_fd(); | |
825 | if (fd < 0) { | |
826 | ret = fd; | |
827 | goto out; | |
828 | } | |
829 | ||
830 | filp = get_empty_filp(); | |
831 | if (!filp) { | |
832 | put_unused_fd(fd); | |
833 | ret = -ENFILE; | |
834 | goto out; | |
835 | } | |
836 | filp->f_op = &inotify_fops; | |
837 | filp->f_vfsmnt = mntget(inotify_mnt); | |
838 | filp->f_dentry = dget(inotify_mnt->mnt_root); | |
839 | filp->f_mapping = filp->f_dentry->d_inode->i_mapping; | |
840 | filp->f_mode = FMODE_READ; | |
841 | filp->f_flags = O_RDONLY; | |
842 | ||
843 | user = get_uid(current->user); | |
844 | ||
845 | if (unlikely(atomic_read(&user->inotify_devs) >= inotify_max_user_devices)) { | |
846 | ret = -EMFILE; | |
847 | goto out_err; | |
848 | } | |
849 | ||
850 | dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL); | |
851 | if (unlikely(!dev)) { | |
852 | ret = -ENOMEM; | |
853 | goto out_err; | |
854 | } | |
855 | ||
856 | idr_init(&dev->idr); | |
857 | INIT_LIST_HEAD(&dev->events); | |
858 | INIT_LIST_HEAD(&dev->watches); | |
859 | init_waitqueue_head(&dev->wq); | |
860 | sema_init(&dev->sem, 1); | |
861 | dev->event_count = 0; | |
862 | dev->queue_size = 0; | |
863 | dev->max_events = inotify_max_queued_events; | |
864 | dev->user = user; | |
865 | atomic_set(&dev->count, 0); | |
866 | ||
867 | get_inotify_dev(dev); | |
868 | atomic_inc(&user->inotify_devs); | |
869 | ||
870 | filp->private_data = dev; | |
871 | fd_install (fd, filp); | |
872 | return fd; | |
873 | out_err: | |
874 | put_unused_fd (fd); | |
875 | put_filp (filp); | |
876 | free_uid(user); | |
877 | out: | |
878 | return ret; | |
879 | } | |
880 | ||
881 | asmlinkage long sys_inotify_add_watch(int fd, const char *path, u32 mask) | |
882 | { | |
883 | struct inotify_watch *watch, *old; | |
884 | struct inode *inode; | |
885 | struct inotify_device *dev; | |
886 | struct nameidata nd; | |
887 | struct file *filp; | |
888 | int ret; | |
889 | ||
890 | filp = fget(fd); | |
891 | if (!filp) | |
892 | return -EBADF; | |
893 | ||
894 | dev = filp->private_data; | |
895 | ||
896 | ret = find_inode ((const char __user*)path, &nd); | |
897 | if (ret) | |
898 | goto fput_and_out; | |
899 | ||
900 | /* Held in place by reference in nd */ | |
901 | inode = nd.dentry->d_inode; | |
902 | ||
903 | down(&inode->inotify_sem); | |
904 | down(&dev->sem); | |
905 | ||
906 | /* don't let user-space set invalid bits: we don't want flags set */ | |
907 | mask &= IN_ALL_EVENTS; | |
908 | if (!mask) { | |
909 | ret = -EINVAL; | |
910 | goto out; | |
911 | } | |
912 | ||
913 | /* | |
914 | * Handle the case of re-adding a watch on an (inode,dev) pair that we | |
915 | * are already watching. We just update the mask and return its wd. | |
916 | */ | |
917 | old = inode_find_dev(inode, dev); | |
918 | if (unlikely(old)) { | |
919 | old->mask = mask; | |
920 | ret = old->wd; | |
921 | goto out; | |
922 | } | |
923 | ||
924 | watch = create_watch(dev, mask, inode); | |
925 | if (unlikely(IS_ERR(watch))) { | |
926 | ret = PTR_ERR(watch); | |
927 | goto out; | |
928 | } | |
929 | ||
930 | /* Add the watch to the device's and the inode's list */ | |
931 | list_add(&watch->d_list, &dev->watches); | |
932 | list_add(&watch->i_list, &inode->inotify_watches); | |
933 | ret = watch->wd; | |
934 | out: | |
935 | path_release (&nd); | |
936 | up(&dev->sem); | |
937 | up(&inode->inotify_sem); | |
938 | fput_and_out: | |
939 | fput(filp); | |
940 | return ret; | |
941 | } | |
942 | ||
943 | asmlinkage long sys_inotify_rm_watch(int fd, u32 wd) | |
944 | { | |
945 | struct file *filp; | |
946 | struct inotify_device *dev; | |
947 | int ret; | |
948 | ||
949 | filp = fget(fd); | |
950 | if (!filp) | |
951 | return -EBADF; | |
952 | dev = filp->private_data; | |
953 | ret = inotify_ignore (dev, wd); | |
954 | fput(filp); | |
955 | return ret; | |
956 | } | |
957 | ||
958 | static struct super_block * | |
959 | inotify_get_sb(struct file_system_type *fs_type, int flags, | |
960 | const char *dev_name, void *data) | |
961 | { | |
962 | return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA); | |
963 | } | |
964 | ||
965 | static struct file_system_type inotify_fs_type = { | |
966 | .name = "inotifyfs", | |
967 | .get_sb = inotify_get_sb, | |
968 | .kill_sb = kill_anon_super, | |
969 | }; | |
970 | ||
971 | /* | |
972 | * inotify_init - Our initialization function. Note that we cannnot return | |
973 | * error because we have compiled-in VFS hooks. So an (unlikely) failure here | |
974 | * must result in panic(). | |
975 | */ | |
976 | static int __init inotify_init(void) | |
977 | { | |
978 | register_filesystem(&inotify_fs_type); | |
979 | inotify_mnt = kern_mount(&inotify_fs_type); | |
980 | ||
981 | inotify_max_queued_events = 8192; | |
982 | inotify_max_user_devices = 128; | |
983 | inotify_max_user_watches = 8192; | |
984 | ||
985 | atomic_set(&inotify_cookie, 0); | |
986 | ||
987 | watch_cachep = kmem_cache_create("inotify_watch_cache", | |
988 | sizeof(struct inotify_watch), | |
989 | 0, SLAB_PANIC, NULL, NULL); | |
990 | event_cachep = kmem_cache_create("inotify_event_cache", | |
991 | sizeof(struct inotify_kernel_event), | |
992 | 0, SLAB_PANIC, NULL, NULL); | |
993 | ||
994 | printk(KERN_INFO "inotify syscall\n"); | |
995 | ||
996 | return 0; | |
997 | } | |
998 | ||
999 | module_init(inotify_init); |