2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
24 struct list_head hash;
25 struct fsnotify_mark mark;
26 struct list_head trees; /* with root here */
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
42 * One struct chunk is attached to each inode of interest.
43 * We replace struct chunk on tagging/untagging.
44 * Rules have pointer to struct audit_tree.
45 * Rules have struct list_head rlist forming a list of rules over
47 * References to struct chunk are collected at audit_inode{,_child}()
48 * time and used in AUDIT_TREE rule matching.
49 * These references are dropped at the same time we are calling
50 * audit_free_names(), etc.
52 * Cyclic lists galore:
53 * tree.chunks anchors chunk.owners[].list hash_lock
54 * tree.rules anchors rule.rlist audit_filter_mutex
55 * chunk.trees anchors tree.same_root hash_lock
56 * chunk.hash is a hash with middle bits of watch.inode as
57 * a hash function. RCU, hash_lock
59 * tree is refcounted; one reference for "some rules on rules_list refer to
60 * it", one for each chunk with pointer to it.
62 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
63 * of watch contributes 1 to .refs).
65 * node.index allows to get from node.list to containing chunk.
66 * MSB of that sucker is stolen to mark taggings that we might have to
67 * revert - several operations have very unpleasant cleanup logics and
68 * that makes a difference. Some.
71 static struct fsnotify_group *audit_tree_group;
73 static struct audit_tree *alloc_tree(const char *s)
75 struct audit_tree *tree;
77 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 atomic_set(&tree->count, 1);
81 INIT_LIST_HEAD(&tree->chunks);
82 INIT_LIST_HEAD(&tree->rules);
83 INIT_LIST_HEAD(&tree->list);
84 INIT_LIST_HEAD(&tree->same_root);
86 strcpy(tree->pathname, s);
91 static inline void get_tree(struct audit_tree *tree)
93 atomic_inc(&tree->count);
96 static inline void put_tree(struct audit_tree *tree)
98 if (atomic_dec_and_test(&tree->count))
99 kfree_rcu(tree, head);
102 /* to avoid bringing the entire thing in audit.h */
103 const char *audit_tree_path(struct audit_tree *tree)
105 return tree->pathname;
108 static void free_chunk(struct audit_chunk *chunk)
112 for (i = 0; i < chunk->count; i++) {
113 if (chunk->owners[i].owner)
114 put_tree(chunk->owners[i].owner);
119 void audit_put_chunk(struct audit_chunk *chunk)
121 if (atomic_long_dec_and_test(&chunk->refs))
125 static void __put_chunk(struct rcu_head *rcu)
127 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
128 audit_put_chunk(chunk);
131 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
133 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
134 call_rcu(&chunk->head, __put_chunk);
137 static struct audit_chunk *alloc_chunk(int count)
139 struct audit_chunk *chunk;
143 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
144 chunk = kzalloc(size, GFP_KERNEL);
148 INIT_LIST_HEAD(&chunk->hash);
149 INIT_LIST_HEAD(&chunk->trees);
150 chunk->count = count;
151 atomic_long_set(&chunk->refs, 1);
152 for (i = 0; i < count; i++) {
153 INIT_LIST_HEAD(&chunk->owners[i].list);
154 chunk->owners[i].index = i;
156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 chunk->mark.mask = FS_IN_IGNORED;
161 enum {HASH_SIZE = 128};
162 static struct list_head chunk_hash_heads[HASH_SIZE];
163 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165 static inline struct list_head *chunk_hash(const struct inode *inode)
167 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
168 return chunk_hash_heads + n % HASH_SIZE;
171 /* hash_lock & entry->lock is held by caller */
172 static void insert_hash(struct audit_chunk *chunk)
174 struct fsnotify_mark *entry = &chunk->mark;
175 struct list_head *list;
179 list = chunk_hash(entry->inode);
180 list_add_rcu(&chunk->hash, list);
183 /* called under rcu_read_lock */
184 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186 struct list_head *list = chunk_hash(inode);
187 struct audit_chunk *p;
189 list_for_each_entry_rcu(p, list, hash) {
190 /* mark.inode may have gone NULL, but who cares? */
191 if (p->mark.inode == inode) {
192 atomic_long_inc(&p->refs);
199 int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
202 for (n = 0; n < chunk->count; n++)
203 if (chunk->owners[n].owner == tree)
208 /* tagging and untagging inodes with trees */
210 static struct audit_chunk *find_chunk(struct node *p)
212 int index = p->index & ~(1U<<31);
214 return container_of(p, struct audit_chunk, owners[0]);
217 static void untag_chunk(struct node *p)
219 struct audit_chunk *chunk = find_chunk(p);
220 struct fsnotify_mark *entry = &chunk->mark;
221 struct audit_chunk *new = NULL;
222 struct audit_tree *owner;
223 int size = chunk->count - 1;
226 fsnotify_get_mark(entry);
228 spin_unlock(&hash_lock);
231 new = alloc_chunk(size);
233 spin_lock(&entry->lock);
234 if (chunk->dead || !entry->inode) {
235 spin_unlock(&entry->lock);
245 spin_lock(&hash_lock);
246 list_del_init(&chunk->trees);
247 if (owner->root == chunk)
249 list_del_init(&p->list);
250 list_del_rcu(&chunk->hash);
251 spin_unlock(&hash_lock);
252 spin_unlock(&entry->lock);
253 fsnotify_destroy_mark(entry, audit_tree_group);
260 fsnotify_duplicate_mark(&new->mark, entry);
261 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
262 fsnotify_put_mark(&new->mark);
267 spin_lock(&hash_lock);
268 list_replace_init(&chunk->trees, &new->trees);
269 if (owner->root == chunk) {
270 list_del_init(&owner->same_root);
274 for (i = j = 0; j <= size; i++, j++) {
275 struct audit_tree *s;
276 if (&chunk->owners[j] == p) {
277 list_del_init(&p->list);
281 s = chunk->owners[j].owner;
282 new->owners[i].owner = s;
283 new->owners[i].index = chunk->owners[j].index - j + i;
284 if (!s) /* result of earlier fallback */
287 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
290 list_replace_rcu(&chunk->hash, &new->hash);
291 list_for_each_entry(owner, &new->trees, same_root)
293 spin_unlock(&hash_lock);
294 spin_unlock(&entry->lock);
295 fsnotify_destroy_mark(entry, audit_tree_group);
296 fsnotify_put_mark(&new->mark); /* drop initial reference */
300 // do the best we can
301 spin_lock(&hash_lock);
302 if (owner->root == chunk) {
303 list_del_init(&owner->same_root);
306 list_del_init(&p->list);
309 spin_unlock(&hash_lock);
310 spin_unlock(&entry->lock);
312 fsnotify_put_mark(entry);
313 spin_lock(&hash_lock);
316 static int create_chunk(struct inode *inode, struct audit_tree *tree)
318 struct fsnotify_mark *entry;
319 struct audit_chunk *chunk = alloc_chunk(1);
323 entry = &chunk->mark;
324 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
325 fsnotify_put_mark(entry);
329 spin_lock(&entry->lock);
330 spin_lock(&hash_lock);
332 spin_unlock(&hash_lock);
334 spin_unlock(&entry->lock);
335 fsnotify_destroy_mark(entry, audit_tree_group);
336 fsnotify_put_mark(entry);
339 chunk->owners[0].index = (1U << 31);
340 chunk->owners[0].owner = tree;
342 list_add(&chunk->owners[0].list, &tree->chunks);
345 list_add(&tree->same_root, &chunk->trees);
348 spin_unlock(&hash_lock);
349 spin_unlock(&entry->lock);
350 fsnotify_put_mark(entry); /* drop initial reference */
354 /* the first tagged inode becomes root of tree */
355 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
357 struct fsnotify_mark *old_entry, *chunk_entry;
358 struct audit_tree *owner;
359 struct audit_chunk *chunk, *old;
363 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
365 return create_chunk(inode, tree);
367 old = container_of(old_entry, struct audit_chunk, mark);
369 /* are we already there? */
370 spin_lock(&hash_lock);
371 for (n = 0; n < old->count; n++) {
372 if (old->owners[n].owner == tree) {
373 spin_unlock(&hash_lock);
374 fsnotify_put_mark(old_entry);
378 spin_unlock(&hash_lock);
380 chunk = alloc_chunk(old->count + 1);
382 fsnotify_put_mark(old_entry);
386 chunk_entry = &chunk->mark;
388 spin_lock(&old_entry->lock);
389 if (!old_entry->inode) {
390 /* old_entry is being shot, lets just lie */
391 spin_unlock(&old_entry->lock);
392 fsnotify_put_mark(old_entry);
397 fsnotify_duplicate_mark(chunk_entry, old_entry);
398 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
399 spin_unlock(&old_entry->lock);
400 fsnotify_put_mark(chunk_entry);
401 fsnotify_put_mark(old_entry);
405 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
406 spin_lock(&chunk_entry->lock);
407 spin_lock(&hash_lock);
409 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
411 spin_unlock(&hash_lock);
413 spin_unlock(&chunk_entry->lock);
414 spin_unlock(&old_entry->lock);
416 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
418 fsnotify_put_mark(chunk_entry);
419 fsnotify_put_mark(old_entry);
422 list_replace_init(&old->trees, &chunk->trees);
423 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
424 struct audit_tree *s = old->owners[n].owner;
426 p->index = old->owners[n].index;
427 if (!s) /* result of fallback in untag */
430 list_replace_init(&old->owners[n].list, &p->list);
432 p->index = (chunk->count - 1) | (1U<<31);
435 list_add(&p->list, &tree->chunks);
436 list_replace_rcu(&old->hash, &chunk->hash);
437 list_for_each_entry(owner, &chunk->trees, same_root)
442 list_add(&tree->same_root, &chunk->trees);
444 spin_unlock(&hash_lock);
445 spin_unlock(&chunk_entry->lock);
446 spin_unlock(&old_entry->lock);
447 fsnotify_destroy_mark(old_entry, audit_tree_group);
448 fsnotify_put_mark(chunk_entry); /* drop initial reference */
449 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
453 static void audit_tree_log_remove_rule(struct audit_krule *rule)
455 struct audit_buffer *ab;
457 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
460 audit_log_format(ab, "op=");
461 audit_log_string(ab, "remove_rule");
462 audit_log_format(ab, " dir=");
463 audit_log_untrustedstring(ab, rule->tree->pathname);
464 audit_log_key(ab, rule->filterkey);
465 audit_log_format(ab, " list=%d res=1", rule->listnr);
469 static void kill_rules(struct audit_tree *tree)
471 struct audit_krule *rule, *next;
472 struct audit_entry *entry;
474 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
475 entry = container_of(rule, struct audit_entry, rule);
477 list_del_init(&rule->rlist);
479 /* not a half-baked one */
480 audit_tree_log_remove_rule(rule);
482 audit_remove_mark(entry->rule.exe);
484 list_del_rcu(&entry->list);
485 list_del(&entry->rule.list);
486 call_rcu(&entry->rcu, audit_free_rule_rcu);
492 * finish killing struct audit_tree
494 static void prune_one(struct audit_tree *victim)
496 spin_lock(&hash_lock);
497 while (!list_empty(&victim->chunks)) {
500 p = list_entry(victim->chunks.next, struct node, list);
504 spin_unlock(&hash_lock);
508 /* trim the uncommitted chunks from tree */
510 static void trim_marked(struct audit_tree *tree)
512 struct list_head *p, *q;
513 spin_lock(&hash_lock);
515 spin_unlock(&hash_lock);
519 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
520 struct node *node = list_entry(p, struct node, list);
522 if (node->index & (1U<<31)) {
524 list_add(p, &tree->chunks);
528 while (!list_empty(&tree->chunks)) {
531 node = list_entry(tree->chunks.next, struct node, list);
533 /* have we run out of marked? */
534 if (!(node->index & (1U<<31)))
539 if (!tree->root && !tree->goner) {
541 spin_unlock(&hash_lock);
542 mutex_lock(&audit_filter_mutex);
544 list_del_init(&tree->list);
545 mutex_unlock(&audit_filter_mutex);
548 spin_unlock(&hash_lock);
552 static void audit_schedule_prune(void);
554 /* called with audit_filter_mutex */
555 int audit_remove_tree_rule(struct audit_krule *rule)
557 struct audit_tree *tree;
560 spin_lock(&hash_lock);
561 list_del_init(&rule->rlist);
562 if (list_empty(&tree->rules) && !tree->goner) {
564 list_del_init(&tree->same_root);
566 list_move(&tree->list, &prune_list);
568 spin_unlock(&hash_lock);
569 audit_schedule_prune();
573 spin_unlock(&hash_lock);
579 static int compare_root(struct vfsmount *mnt, void *arg)
581 return mnt->mnt_root->d_inode == arg;
584 void audit_trim_trees(void)
586 struct list_head cursor;
588 mutex_lock(&audit_filter_mutex);
589 list_add(&cursor, &tree_list);
590 while (cursor.next != &tree_list) {
591 struct audit_tree *tree;
593 struct vfsmount *root_mnt;
597 tree = container_of(cursor.next, struct audit_tree, list);
600 list_add(&cursor, &tree->list);
601 mutex_unlock(&audit_filter_mutex);
603 err = kern_path(tree->pathname, 0, &path);
607 root_mnt = collect_mounts(&path);
609 if (IS_ERR(root_mnt))
612 spin_lock(&hash_lock);
613 list_for_each_entry(node, &tree->chunks, list) {
614 struct audit_chunk *chunk = find_chunk(node);
615 /* this could be NULL if the watch is dying else where... */
616 struct inode *inode = chunk->mark.inode;
617 node->index |= 1U<<31;
618 if (iterate_mounts(compare_root, inode, root_mnt))
619 node->index &= ~(1U<<31);
621 spin_unlock(&hash_lock);
623 drop_collected_mounts(root_mnt);
626 mutex_lock(&audit_filter_mutex);
629 mutex_unlock(&audit_filter_mutex);
632 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
635 if (pathname[0] != '/' ||
636 rule->listnr != AUDIT_FILTER_EXIT ||
638 rule->inode_f || rule->watch || rule->tree)
640 rule->tree = alloc_tree(pathname);
646 void audit_put_tree(struct audit_tree *tree)
651 static int tag_mount(struct vfsmount *mnt, void *arg)
653 return tag_chunk(mnt->mnt_root->d_inode, arg);
656 /* called with audit_filter_mutex */
657 int audit_add_tree_rule(struct audit_krule *rule)
659 struct audit_tree *seed = rule->tree, *tree;
661 struct vfsmount *mnt;
665 list_for_each_entry(tree, &tree_list, list) {
666 if (!strcmp(seed->pathname, tree->pathname)) {
669 list_add(&rule->rlist, &tree->rules);
674 list_add(&tree->list, &tree_list);
675 list_add(&rule->rlist, &tree->rules);
676 /* do not set rule->tree yet */
677 mutex_unlock(&audit_filter_mutex);
679 err = kern_path(tree->pathname, 0, &path);
682 mnt = collect_mounts(&path);
690 err = iterate_mounts(tag_mount, tree, mnt);
691 drop_collected_mounts(mnt);
695 spin_lock(&hash_lock);
696 list_for_each_entry(node, &tree->chunks, list)
697 node->index &= ~(1U<<31);
698 spin_unlock(&hash_lock);
704 mutex_lock(&audit_filter_mutex);
705 if (list_empty(&rule->rlist)) {
714 mutex_lock(&audit_filter_mutex);
715 list_del_init(&tree->list);
716 list_del_init(&tree->rules);
721 int audit_tag_tree(char *old, char *new)
723 struct list_head cursor, barrier;
725 struct path path1, path2;
726 struct vfsmount *tagged;
729 err = kern_path(new, 0, &path2);
732 tagged = collect_mounts(&path2);
735 return PTR_ERR(tagged);
737 err = kern_path(old, 0, &path1);
739 drop_collected_mounts(tagged);
743 mutex_lock(&audit_filter_mutex);
744 list_add(&barrier, &tree_list);
745 list_add(&cursor, &barrier);
747 while (cursor.next != &tree_list) {
748 struct audit_tree *tree;
751 tree = container_of(cursor.next, struct audit_tree, list);
754 list_add(&cursor, &tree->list);
755 mutex_unlock(&audit_filter_mutex);
757 err = kern_path(tree->pathname, 0, &path2);
759 good_one = path_is_under(&path1, &path2);
765 mutex_lock(&audit_filter_mutex);
769 failed = iterate_mounts(tag_mount, tree, tagged);
772 mutex_lock(&audit_filter_mutex);
776 mutex_lock(&audit_filter_mutex);
777 spin_lock(&hash_lock);
779 list_del(&tree->list);
780 list_add(&tree->list, &tree_list);
782 spin_unlock(&hash_lock);
786 while (barrier.prev != &tree_list) {
787 struct audit_tree *tree;
789 tree = container_of(barrier.prev, struct audit_tree, list);
791 list_del(&tree->list);
792 list_add(&tree->list, &barrier);
793 mutex_unlock(&audit_filter_mutex);
797 spin_lock(&hash_lock);
798 list_for_each_entry(node, &tree->chunks, list)
799 node->index &= ~(1U<<31);
800 spin_unlock(&hash_lock);
806 mutex_lock(&audit_filter_mutex);
810 mutex_unlock(&audit_filter_mutex);
812 drop_collected_mounts(tagged);
817 * That gets run when evict_chunk() ends up needing to kill audit_tree.
818 * Runs from a separate thread.
820 static int prune_tree_thread(void *unused)
822 mutex_lock(&audit_cmd_mutex);
823 mutex_lock(&audit_filter_mutex);
825 while (!list_empty(&prune_list)) {
826 struct audit_tree *victim;
828 victim = list_entry(prune_list.next, struct audit_tree, list);
829 list_del_init(&victim->list);
831 mutex_unlock(&audit_filter_mutex);
835 mutex_lock(&audit_filter_mutex);
838 mutex_unlock(&audit_filter_mutex);
839 mutex_unlock(&audit_cmd_mutex);
843 static void audit_schedule_prune(void)
845 kthread_run(prune_tree_thread, NULL, "audit_prune_tree");
849 * ... and that one is done if evict_chunk() decides to delay until the end
850 * of syscall. Runs synchronously.
852 void audit_kill_trees(struct list_head *list)
854 mutex_lock(&audit_cmd_mutex);
855 mutex_lock(&audit_filter_mutex);
857 while (!list_empty(list)) {
858 struct audit_tree *victim;
860 victim = list_entry(list->next, struct audit_tree, list);
862 list_del_init(&victim->list);
864 mutex_unlock(&audit_filter_mutex);
868 mutex_lock(&audit_filter_mutex);
871 mutex_unlock(&audit_filter_mutex);
872 mutex_unlock(&audit_cmd_mutex);
876 * Here comes the stuff asynchronous to auditctl operations
879 static void evict_chunk(struct audit_chunk *chunk)
881 struct audit_tree *owner;
882 struct list_head *postponed = audit_killed_trees();
890 mutex_lock(&audit_filter_mutex);
891 spin_lock(&hash_lock);
892 while (!list_empty(&chunk->trees)) {
893 owner = list_entry(chunk->trees.next,
894 struct audit_tree, same_root);
897 list_del_init(&owner->same_root);
898 spin_unlock(&hash_lock);
901 list_move(&owner->list, &prune_list);
904 list_move(&owner->list, postponed);
906 spin_lock(&hash_lock);
908 list_del_rcu(&chunk->hash);
909 for (n = 0; n < chunk->count; n++)
910 list_del_init(&chunk->owners[n].list);
911 spin_unlock(&hash_lock);
913 audit_schedule_prune();
914 mutex_unlock(&audit_filter_mutex);
917 static int audit_tree_handle_event(struct fsnotify_group *group,
918 struct inode *to_tell,
919 struct fsnotify_mark *inode_mark,
920 struct fsnotify_mark *vfsmount_mark,
921 u32 mask, void *data, int data_type,
922 const unsigned char *file_name, u32 cookie)
927 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
929 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
934 * We are guaranteed to have at least one reference to the mark from
935 * either the inode or the caller of fsnotify_destroy_mark().
937 BUG_ON(atomic_read(&entry->refcnt) < 1);
940 static const struct fsnotify_ops audit_tree_ops = {
941 .handle_event = audit_tree_handle_event,
942 .freeing_mark = audit_tree_freeing_mark,
945 static int __init audit_tree_init(void)
949 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
950 if (IS_ERR(audit_tree_group))
951 audit_panic("cannot initialize fsnotify group for rectree watches");
953 for (i = 0; i < HASH_SIZE; i++)
954 INIT_LIST_HEAD(&chunk_hash_heads[i]);
958 __initcall(audit_tree_init);