Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / kernel / audit_tree.c
CommitLineData
74c3cbe3 1#include "audit.h"
28a3a7eb 2#include <linux/fsnotify_backend.h>
74c3cbe3
AV
3#include <linux/namei.h>
4#include <linux/mount.h>
916d7576 5#include <linux/kthread.h>
5a0e3ad6 6#include <linux/slab.h>
74c3cbe3
AV
7
8struct audit_tree;
9struct audit_chunk;
10
11struct audit_tree {
12 atomic_t count;
13 int goner;
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
19 struct rcu_head head;
20 char pathname[];
21};
22
23struct audit_chunk {
24 struct list_head hash;
e61ce867 25 struct fsnotify_mark mark;
74c3cbe3
AV
26 struct list_head trees; /* with root here */
27 int dead;
28 int count;
8f7b0ba1 29 atomic_long_t refs;
74c3cbe3
AV
30 struct rcu_head head;
31 struct node {
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
35 } owners[];
36};
37
38static LIST_HEAD(tree_list);
39static LIST_HEAD(prune_list);
f1aaf262 40static struct task_struct *prune_thread;
74c3cbe3
AV
41
42/*
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
47 * the same tree.
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
52 *
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
59 *
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
62 *
28a3a7eb 63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
8f7b0ba1 64 * of watch contributes 1 to .refs).
74c3cbe3
AV
65 *
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
70 */
71
28a3a7eb 72static struct fsnotify_group *audit_tree_group;
74c3cbe3
AV
73
74static struct audit_tree *alloc_tree(const char *s)
75{
76 struct audit_tree *tree;
77
78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
79 if (tree) {
80 atomic_set(&tree->count, 1);
81 tree->goner = 0;
82 INIT_LIST_HEAD(&tree->chunks);
83 INIT_LIST_HEAD(&tree->rules);
84 INIT_LIST_HEAD(&tree->list);
85 INIT_LIST_HEAD(&tree->same_root);
86 tree->root = NULL;
87 strcpy(tree->pathname, s);
88 }
89 return tree;
90}
91
92static inline void get_tree(struct audit_tree *tree)
93{
94 atomic_inc(&tree->count);
95}
96
74c3cbe3
AV
97static inline void put_tree(struct audit_tree *tree)
98{
99 if (atomic_dec_and_test(&tree->count))
3b097c46 100 kfree_rcu(tree, head);
74c3cbe3
AV
101}
102
103/* to avoid bringing the entire thing in audit.h */
104const char *audit_tree_path(struct audit_tree *tree)
105{
106 return tree->pathname;
107}
108
8f7b0ba1 109static void free_chunk(struct audit_chunk *chunk)
74c3cbe3 110{
74c3cbe3
AV
111 int i;
112
113 for (i = 0; i < chunk->count; i++) {
114 if (chunk->owners[i].owner)
115 put_tree(chunk->owners[i].owner);
116 }
117 kfree(chunk);
118}
119
8f7b0ba1 120void audit_put_chunk(struct audit_chunk *chunk)
74c3cbe3 121{
8f7b0ba1
AV
122 if (atomic_long_dec_and_test(&chunk->refs))
123 free_chunk(chunk);
74c3cbe3
AV
124}
125
8f7b0ba1 126static void __put_chunk(struct rcu_head *rcu)
74c3cbe3 127{
8f7b0ba1
AV
128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 audit_put_chunk(chunk);
74c3cbe3
AV
130}
131
e61ce867 132static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
28a3a7eb
EP
133{
134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 call_rcu(&chunk->head, __put_chunk);
136}
137
138static struct audit_chunk *alloc_chunk(int count)
139{
140 struct audit_chunk *chunk;
141 size_t size;
142 int i;
143
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 chunk = kzalloc(size, GFP_KERNEL);
146 if (!chunk)
147 return NULL;
148
149 INIT_LIST_HEAD(&chunk->hash);
150 INIT_LIST_HEAD(&chunk->trees);
151 chunk->count = count;
152 atomic_long_set(&chunk->refs, 1);
153 for (i = 0; i < count; i++) {
154 INIT_LIST_HEAD(&chunk->owners[i].list);
155 chunk->owners[i].index = i;
156 }
157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
799b6014 158 chunk->mark.mask = FS_IN_IGNORED;
28a3a7eb
EP
159 return chunk;
160}
161
74c3cbe3
AV
162enum {HASH_SIZE = 128};
163static struct list_head chunk_hash_heads[HASH_SIZE];
164static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
165
166static inline struct list_head *chunk_hash(const struct inode *inode)
167{
168 unsigned long n = (unsigned long)inode / L1_CACHE_BYTES;
169 return chunk_hash_heads + n % HASH_SIZE;
170}
171
28a3a7eb 172/* hash_lock & entry->lock is held by caller */
74c3cbe3
AV
173static void insert_hash(struct audit_chunk *chunk)
174{
e61ce867 175 struct fsnotify_mark *entry = &chunk->mark;
28a3a7eb
EP
176 struct list_head *list;
177
0809ab69 178 if (!entry->inode)
28a3a7eb 179 return;
0809ab69 180 list = chunk_hash(entry->inode);
74c3cbe3
AV
181 list_add_rcu(&chunk->hash, list);
182}
183
184/* called under rcu_read_lock */
185struct audit_chunk *audit_tree_lookup(const struct inode *inode)
186{
187 struct list_head *list = chunk_hash(inode);
6793a051 188 struct audit_chunk *p;
74c3cbe3 189
6793a051 190 list_for_each_entry_rcu(p, list, hash) {
28a3a7eb 191 /* mark.inode may have gone NULL, but who cares? */
0809ab69 192 if (p->mark.inode == inode) {
8f7b0ba1 193 atomic_long_inc(&p->refs);
74c3cbe3
AV
194 return p;
195 }
196 }
197 return NULL;
198}
199
200int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
201{
202 int n;
203 for (n = 0; n < chunk->count; n++)
204 if (chunk->owners[n].owner == tree)
205 return 1;
206 return 0;
207}
208
209/* tagging and untagging inodes with trees */
210
8f7b0ba1
AV
211static struct audit_chunk *find_chunk(struct node *p)
212{
213 int index = p->index & ~(1U<<31);
214 p -= index;
215 return container_of(p, struct audit_chunk, owners[0]);
216}
217
218static void untag_chunk(struct node *p)
74c3cbe3 219{
8f7b0ba1 220 struct audit_chunk *chunk = find_chunk(p);
e61ce867 221 struct fsnotify_mark *entry = &chunk->mark;
f7a998a9 222 struct audit_chunk *new = NULL;
74c3cbe3
AV
223 struct audit_tree *owner;
224 int size = chunk->count - 1;
225 int i, j;
226
28a3a7eb 227 fsnotify_get_mark(entry);
8f7b0ba1
AV
228
229 spin_unlock(&hash_lock);
230
f7a998a9
AV
231 if (size)
232 new = alloc_chunk(size);
233
28a3a7eb 234 spin_lock(&entry->lock);
0809ab69 235 if (chunk->dead || !entry->inode) {
28a3a7eb 236 spin_unlock(&entry->lock);
f7a998a9
AV
237 if (new)
238 free_chunk(new);
8f7b0ba1 239 goto out;
74c3cbe3
AV
240 }
241
242 owner = p->owner;
243
244 if (!size) {
245 chunk->dead = 1;
246 spin_lock(&hash_lock);
247 list_del_init(&chunk->trees);
248 if (owner->root == chunk)
249 owner->root = NULL;
250 list_del_init(&p->list);
251 list_del_rcu(&chunk->hash);
252 spin_unlock(&hash_lock);
28a3a7eb 253 spin_unlock(&entry->lock);
e2a29943 254 fsnotify_destroy_mark(entry, audit_tree_group);
8f7b0ba1 255 goto out;
74c3cbe3
AV
256 }
257
74c3cbe3
AV
258 if (!new)
259 goto Fallback;
f7a998a9 260
28a3a7eb 261 fsnotify_duplicate_mark(&new->mark, entry);
0809ab69 262 if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.inode, NULL, 1)) {
0fe33aae 263 fsnotify_put_mark(&new->mark);
74c3cbe3
AV
264 goto Fallback;
265 }
266
267 chunk->dead = 1;
268 spin_lock(&hash_lock);
269 list_replace_init(&chunk->trees, &new->trees);
270 if (owner->root == chunk) {
271 list_del_init(&owner->same_root);
272 owner->root = NULL;
273 }
274
6f5d5114 275 for (i = j = 0; j <= size; i++, j++) {
74c3cbe3
AV
276 struct audit_tree *s;
277 if (&chunk->owners[j] == p) {
278 list_del_init(&p->list);
279 i--;
280 continue;
281 }
282 s = chunk->owners[j].owner;
283 new->owners[i].owner = s;
284 new->owners[i].index = chunk->owners[j].index - j + i;
285 if (!s) /* result of earlier fallback */
286 continue;
287 get_tree(s);
6f5d5114 288 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
74c3cbe3
AV
289 }
290
291 list_replace_rcu(&chunk->hash, &new->hash);
292 list_for_each_entry(owner, &new->trees, same_root)
293 owner->root = new;
294 spin_unlock(&hash_lock);
28a3a7eb 295 spin_unlock(&entry->lock);
e2a29943 296 fsnotify_destroy_mark(entry, audit_tree_group);
b3e8692b 297 fsnotify_put_mark(&new->mark); /* drop initial reference */
8f7b0ba1 298 goto out;
74c3cbe3
AV
299
300Fallback:
301 // do the best we can
302 spin_lock(&hash_lock);
303 if (owner->root == chunk) {
304 list_del_init(&owner->same_root);
305 owner->root = NULL;
306 }
307 list_del_init(&p->list);
308 p->owner = NULL;
309 put_tree(owner);
310 spin_unlock(&hash_lock);
28a3a7eb 311 spin_unlock(&entry->lock);
8f7b0ba1 312out:
28a3a7eb 313 fsnotify_put_mark(entry);
8f7b0ba1 314 spin_lock(&hash_lock);
74c3cbe3
AV
315}
316
317static int create_chunk(struct inode *inode, struct audit_tree *tree)
318{
e61ce867 319 struct fsnotify_mark *entry;
74c3cbe3
AV
320 struct audit_chunk *chunk = alloc_chunk(1);
321 if (!chunk)
322 return -ENOMEM;
323
28a3a7eb 324 entry = &chunk->mark;
5444e298 325 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
0fe33aae 326 fsnotify_put_mark(entry);
74c3cbe3
AV
327 return -ENOSPC;
328 }
329
28a3a7eb 330 spin_lock(&entry->lock);
74c3cbe3
AV
331 spin_lock(&hash_lock);
332 if (tree->goner) {
333 spin_unlock(&hash_lock);
334 chunk->dead = 1;
28a3a7eb 335 spin_unlock(&entry->lock);
e2a29943 336 fsnotify_destroy_mark(entry, audit_tree_group);
28a3a7eb 337 fsnotify_put_mark(entry);
74c3cbe3
AV
338 return 0;
339 }
340 chunk->owners[0].index = (1U << 31);
341 chunk->owners[0].owner = tree;
342 get_tree(tree);
343 list_add(&chunk->owners[0].list, &tree->chunks);
344 if (!tree->root) {
345 tree->root = chunk;
346 list_add(&tree->same_root, &chunk->trees);
347 }
348 insert_hash(chunk);
349 spin_unlock(&hash_lock);
28a3a7eb 350 spin_unlock(&entry->lock);
b3e8692b 351 fsnotify_put_mark(entry); /* drop initial reference */
74c3cbe3
AV
352 return 0;
353}
354
355/* the first tagged inode becomes root of tree */
356static int tag_chunk(struct inode *inode, struct audit_tree *tree)
357{
e61ce867 358 struct fsnotify_mark *old_entry, *chunk_entry;
74c3cbe3
AV
359 struct audit_tree *owner;
360 struct audit_chunk *chunk, *old;
361 struct node *p;
362 int n;
363
5444e298 364 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
28a3a7eb 365 if (!old_entry)
74c3cbe3
AV
366 return create_chunk(inode, tree);
367
28a3a7eb 368 old = container_of(old_entry, struct audit_chunk, mark);
74c3cbe3
AV
369
370 /* are we already there? */
371 spin_lock(&hash_lock);
372 for (n = 0; n < old->count; n++) {
373 if (old->owners[n].owner == tree) {
374 spin_unlock(&hash_lock);
28a3a7eb 375 fsnotify_put_mark(old_entry);
74c3cbe3
AV
376 return 0;
377 }
378 }
379 spin_unlock(&hash_lock);
380
381 chunk = alloc_chunk(old->count + 1);
b4c30aad 382 if (!chunk) {
28a3a7eb 383 fsnotify_put_mark(old_entry);
74c3cbe3 384 return -ENOMEM;
b4c30aad 385 }
74c3cbe3 386
28a3a7eb
EP
387 chunk_entry = &chunk->mark;
388
389 spin_lock(&old_entry->lock);
0809ab69 390 if (!old_entry->inode) {
28a3a7eb
EP
391 /* old_entry is being shot, lets just lie */
392 spin_unlock(&old_entry->lock);
393 fsnotify_put_mark(old_entry);
74c3cbe3 394 free_chunk(chunk);
28a3a7eb
EP
395 return -ENOENT;
396 }
397
398 fsnotify_duplicate_mark(chunk_entry, old_entry);
0809ab69 399 if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->inode, NULL, 1)) {
28a3a7eb 400 spin_unlock(&old_entry->lock);
0fe33aae 401 fsnotify_put_mark(chunk_entry);
28a3a7eb 402 fsnotify_put_mark(old_entry);
74c3cbe3
AV
403 return -ENOSPC;
404 }
28a3a7eb
EP
405
406 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
407 spin_lock(&chunk_entry->lock);
74c3cbe3 408 spin_lock(&hash_lock);
28a3a7eb
EP
409
410 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
74c3cbe3
AV
411 if (tree->goner) {
412 spin_unlock(&hash_lock);
413 chunk->dead = 1;
28a3a7eb
EP
414 spin_unlock(&chunk_entry->lock);
415 spin_unlock(&old_entry->lock);
416
e2a29943 417 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
28a3a7eb
EP
418
419 fsnotify_put_mark(chunk_entry);
420 fsnotify_put_mark(old_entry);
74c3cbe3
AV
421 return 0;
422 }
423 list_replace_init(&old->trees, &chunk->trees);
424 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
425 struct audit_tree *s = old->owners[n].owner;
426 p->owner = s;
427 p->index = old->owners[n].index;
428 if (!s) /* result of fallback in untag */
429 continue;
430 get_tree(s);
431 list_replace_init(&old->owners[n].list, &p->list);
432 }
433 p->index = (chunk->count - 1) | (1U<<31);
434 p->owner = tree;
435 get_tree(tree);
436 list_add(&p->list, &tree->chunks);
437 list_replace_rcu(&old->hash, &chunk->hash);
438 list_for_each_entry(owner, &chunk->trees, same_root)
439 owner->root = chunk;
440 old->dead = 1;
441 if (!tree->root) {
442 tree->root = chunk;
443 list_add(&tree->same_root, &chunk->trees);
444 }
445 spin_unlock(&hash_lock);
28a3a7eb
EP
446 spin_unlock(&chunk_entry->lock);
447 spin_unlock(&old_entry->lock);
e2a29943 448 fsnotify_destroy_mark(old_entry, audit_tree_group);
b3e8692b 449 fsnotify_put_mark(chunk_entry); /* drop initial reference */
28a3a7eb 450 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
74c3cbe3
AV
451 return 0;
452}
453
2991dd2b 454static void audit_tree_log_remove_rule(struct audit_krule *rule)
0644ec0c
KC
455{
456 struct audit_buffer *ab;
457
458 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
459 if (unlikely(!ab))
460 return;
461 audit_log_format(ab, "op=");
e7df61f4 462 audit_log_string(ab, "remove_rule");
0644ec0c
KC
463 audit_log_format(ab, " dir=");
464 audit_log_untrustedstring(ab, rule->tree->pathname);
465 audit_log_key(ab, rule->filterkey);
466 audit_log_format(ab, " list=%d res=1", rule->listnr);
467 audit_log_end(ab);
468}
469
74c3cbe3
AV
470static void kill_rules(struct audit_tree *tree)
471{
472 struct audit_krule *rule, *next;
473 struct audit_entry *entry;
74c3cbe3
AV
474
475 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
476 entry = container_of(rule, struct audit_entry, rule);
477
478 list_del_init(&rule->rlist);
479 if (rule->tree) {
480 /* not a half-baked one */
2991dd2b 481 audit_tree_log_remove_rule(rule);
74c3cbe3
AV
482 rule->tree = NULL;
483 list_del_rcu(&entry->list);
e45aa212 484 list_del(&entry->rule.list);
74c3cbe3
AV
485 call_rcu(&entry->rcu, audit_free_rule_rcu);
486 }
487 }
488}
489
490/*
491 * finish killing struct audit_tree
492 */
493static void prune_one(struct audit_tree *victim)
494{
495 spin_lock(&hash_lock);
496 while (!list_empty(&victim->chunks)) {
497 struct node *p;
74c3cbe3
AV
498
499 p = list_entry(victim->chunks.next, struct node, list);
74c3cbe3 500
8f7b0ba1 501 untag_chunk(p);
74c3cbe3
AV
502 }
503 spin_unlock(&hash_lock);
504 put_tree(victim);
505}
506
507/* trim the uncommitted chunks from tree */
508
509static void trim_marked(struct audit_tree *tree)
510{
511 struct list_head *p, *q;
512 spin_lock(&hash_lock);
513 if (tree->goner) {
514 spin_unlock(&hash_lock);
515 return;
516 }
517 /* reorder */
518 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
519 struct node *node = list_entry(p, struct node, list);
520 q = p->next;
521 if (node->index & (1U<<31)) {
522 list_del_init(p);
523 list_add(p, &tree->chunks);
524 }
525 }
526
527 while (!list_empty(&tree->chunks)) {
528 struct node *node;
74c3cbe3
AV
529
530 node = list_entry(tree->chunks.next, struct node, list);
531
532 /* have we run out of marked? */
533 if (!(node->index & (1U<<31)))
534 break;
535
8f7b0ba1 536 untag_chunk(node);
74c3cbe3
AV
537 }
538 if (!tree->root && !tree->goner) {
539 tree->goner = 1;
540 spin_unlock(&hash_lock);
541 mutex_lock(&audit_filter_mutex);
542 kill_rules(tree);
543 list_del_init(&tree->list);
544 mutex_unlock(&audit_filter_mutex);
545 prune_one(tree);
546 } else {
547 spin_unlock(&hash_lock);
548 }
549}
550
916d7576
AV
551static void audit_schedule_prune(void);
552
74c3cbe3
AV
553/* called with audit_filter_mutex */
554int audit_remove_tree_rule(struct audit_krule *rule)
555{
556 struct audit_tree *tree;
557 tree = rule->tree;
558 if (tree) {
559 spin_lock(&hash_lock);
560 list_del_init(&rule->rlist);
561 if (list_empty(&tree->rules) && !tree->goner) {
562 tree->root = NULL;
563 list_del_init(&tree->same_root);
564 tree->goner = 1;
565 list_move(&tree->list, &prune_list);
566 rule->tree = NULL;
567 spin_unlock(&hash_lock);
568 audit_schedule_prune();
569 return 1;
570 }
571 rule->tree = NULL;
572 spin_unlock(&hash_lock);
573 return 1;
574 }
575 return 0;
576}
577
1f707137
AV
578static int compare_root(struct vfsmount *mnt, void *arg)
579{
3b362157 580 return d_backing_inode(mnt->mnt_root) == arg;
1f707137
AV
581}
582
74c3cbe3
AV
583void audit_trim_trees(void)
584{
585 struct list_head cursor;
586
587 mutex_lock(&audit_filter_mutex);
588 list_add(&cursor, &tree_list);
589 while (cursor.next != &tree_list) {
590 struct audit_tree *tree;
98bc993f 591 struct path path;
74c3cbe3
AV
592 struct vfsmount *root_mnt;
593 struct node *node;
74c3cbe3
AV
594 int err;
595
596 tree = container_of(cursor.next, struct audit_tree, list);
597 get_tree(tree);
598 list_del(&cursor);
599 list_add(&cursor, &tree->list);
600 mutex_unlock(&audit_filter_mutex);
601
98bc993f 602 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
603 if (err)
604 goto skip_it;
605
589ff870 606 root_mnt = collect_mounts(&path);
98bc993f 607 path_put(&path);
be34d1a3 608 if (IS_ERR(root_mnt))
74c3cbe3
AV
609 goto skip_it;
610
74c3cbe3
AV
611 spin_lock(&hash_lock);
612 list_for_each_entry(node, &tree->chunks, list) {
28a3a7eb 613 struct audit_chunk *chunk = find_chunk(node);
25985edc 614 /* this could be NULL if the watch is dying else where... */
0809ab69 615 struct inode *inode = chunk->mark.inode;
74c3cbe3 616 node->index |= 1U<<31;
1f707137
AV
617 if (iterate_mounts(compare_root, inode, root_mnt))
618 node->index &= ~(1U<<31);
74c3cbe3
AV
619 }
620 spin_unlock(&hash_lock);
621 trim_marked(tree);
74c3cbe3
AV
622 drop_collected_mounts(root_mnt);
623skip_it:
12b2f117 624 put_tree(tree);
74c3cbe3
AV
625 mutex_lock(&audit_filter_mutex);
626 }
627 list_del(&cursor);
628 mutex_unlock(&audit_filter_mutex);
629}
630
74c3cbe3
AV
631int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
632{
633
634 if (pathname[0] != '/' ||
635 rule->listnr != AUDIT_FILTER_EXIT ||
5af75d8d 636 op != Audit_equal ||
74c3cbe3
AV
637 rule->inode_f || rule->watch || rule->tree)
638 return -EINVAL;
639 rule->tree = alloc_tree(pathname);
640 if (!rule->tree)
641 return -ENOMEM;
642 return 0;
643}
644
645void audit_put_tree(struct audit_tree *tree)
646{
647 put_tree(tree);
648}
649
1f707137
AV
650static int tag_mount(struct vfsmount *mnt, void *arg)
651{
3b362157 652 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
1f707137
AV
653}
654
f1aaf262
IP
655/*
656 * That gets run when evict_chunk() ends up needing to kill audit_tree.
657 * Runs from a separate thread.
658 */
659static int prune_tree_thread(void *unused)
660{
661 for (;;) {
662 set_current_state(TASK_INTERRUPTIBLE);
663 if (list_empty(&prune_list))
664 schedule();
665 __set_current_state(TASK_RUNNING);
666
667 mutex_lock(&audit_cmd_mutex);
668 mutex_lock(&audit_filter_mutex);
669
670 while (!list_empty(&prune_list)) {
671 struct audit_tree *victim;
672
673 victim = list_entry(prune_list.next,
674 struct audit_tree, list);
675 list_del_init(&victim->list);
676
677 mutex_unlock(&audit_filter_mutex);
678
679 prune_one(victim);
680
681 mutex_lock(&audit_filter_mutex);
682 }
683
684 mutex_unlock(&audit_filter_mutex);
685 mutex_unlock(&audit_cmd_mutex);
686 }
687 return 0;
688}
689
690static int audit_launch_prune(void)
691{
692 if (prune_thread)
693 return 0;
694 prune_thread = kthread_create(prune_tree_thread, NULL,
695 "audit_prune_tree");
696 if (IS_ERR(prune_thread)) {
697 pr_err("cannot start thread audit_prune_tree");
698 prune_thread = NULL;
699 return -ENOMEM;
700 } else {
701 wake_up_process(prune_thread);
702 return 0;
703 }
704}
705
74c3cbe3
AV
706/* called with audit_filter_mutex */
707int audit_add_tree_rule(struct audit_krule *rule)
708{
709 struct audit_tree *seed = rule->tree, *tree;
98bc993f 710 struct path path;
1f707137 711 struct vfsmount *mnt;
74c3cbe3
AV
712 int err;
713
736f3203 714 rule->tree = NULL;
74c3cbe3
AV
715 list_for_each_entry(tree, &tree_list, list) {
716 if (!strcmp(seed->pathname, tree->pathname)) {
717 put_tree(seed);
718 rule->tree = tree;
719 list_add(&rule->rlist, &tree->rules);
720 return 0;
721 }
722 }
723 tree = seed;
724 list_add(&tree->list, &tree_list);
725 list_add(&rule->rlist, &tree->rules);
726 /* do not set rule->tree yet */
727 mutex_unlock(&audit_filter_mutex);
728
f1aaf262
IP
729 if (unlikely(!prune_thread)) {
730 err = audit_launch_prune();
731 if (err)
732 goto Err;
733 }
734
98bc993f 735 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
736 if (err)
737 goto Err;
589ff870 738 mnt = collect_mounts(&path);
98bc993f 739 path_put(&path);
be34d1a3
DH
740 if (IS_ERR(mnt)) {
741 err = PTR_ERR(mnt);
74c3cbe3
AV
742 goto Err;
743 }
74c3cbe3
AV
744
745 get_tree(tree);
1f707137 746 err = iterate_mounts(tag_mount, tree, mnt);
74c3cbe3
AV
747 drop_collected_mounts(mnt);
748
749 if (!err) {
750 struct node *node;
751 spin_lock(&hash_lock);
752 list_for_each_entry(node, &tree->chunks, list)
753 node->index &= ~(1U<<31);
754 spin_unlock(&hash_lock);
755 } else {
756 trim_marked(tree);
757 goto Err;
758 }
759
760 mutex_lock(&audit_filter_mutex);
761 if (list_empty(&rule->rlist)) {
762 put_tree(tree);
763 return -ENOENT;
764 }
765 rule->tree = tree;
766 put_tree(tree);
767
768 return 0;
769Err:
770 mutex_lock(&audit_filter_mutex);
771 list_del_init(&tree->list);
772 list_del_init(&tree->rules);
773 put_tree(tree);
774 return err;
775}
776
777int audit_tag_tree(char *old, char *new)
778{
779 struct list_head cursor, barrier;
780 int failed = 0;
2096f759 781 struct path path1, path2;
74c3cbe3 782 struct vfsmount *tagged;
74c3cbe3
AV
783 int err;
784
2096f759 785 err = kern_path(new, 0, &path2);
74c3cbe3
AV
786 if (err)
787 return err;
2096f759
AV
788 tagged = collect_mounts(&path2);
789 path_put(&path2);
be34d1a3
DH
790 if (IS_ERR(tagged))
791 return PTR_ERR(tagged);
74c3cbe3 792
2096f759 793 err = kern_path(old, 0, &path1);
74c3cbe3
AV
794 if (err) {
795 drop_collected_mounts(tagged);
796 return err;
797 }
74c3cbe3 798
74c3cbe3
AV
799 mutex_lock(&audit_filter_mutex);
800 list_add(&barrier, &tree_list);
801 list_add(&cursor, &barrier);
802
803 while (cursor.next != &tree_list) {
804 struct audit_tree *tree;
2096f759 805 int good_one = 0;
74c3cbe3
AV
806
807 tree = container_of(cursor.next, struct audit_tree, list);
808 get_tree(tree);
809 list_del(&cursor);
810 list_add(&cursor, &tree->list);
811 mutex_unlock(&audit_filter_mutex);
812
2096f759
AV
813 err = kern_path(tree->pathname, 0, &path2);
814 if (!err) {
815 good_one = path_is_under(&path1, &path2);
816 path_put(&path2);
74c3cbe3
AV
817 }
818
2096f759 819 if (!good_one) {
74c3cbe3
AV
820 put_tree(tree);
821 mutex_lock(&audit_filter_mutex);
822 continue;
823 }
74c3cbe3 824
1f707137 825 failed = iterate_mounts(tag_mount, tree, tagged);
74c3cbe3
AV
826 if (failed) {
827 put_tree(tree);
828 mutex_lock(&audit_filter_mutex);
829 break;
830 }
831
832 mutex_lock(&audit_filter_mutex);
833 spin_lock(&hash_lock);
834 if (!tree->goner) {
835 list_del(&tree->list);
836 list_add(&tree->list, &tree_list);
837 }
838 spin_unlock(&hash_lock);
839 put_tree(tree);
840 }
841
842 while (barrier.prev != &tree_list) {
843 struct audit_tree *tree;
844
845 tree = container_of(barrier.prev, struct audit_tree, list);
846 get_tree(tree);
847 list_del(&tree->list);
848 list_add(&tree->list, &barrier);
849 mutex_unlock(&audit_filter_mutex);
850
851 if (!failed) {
852 struct node *node;
853 spin_lock(&hash_lock);
854 list_for_each_entry(node, &tree->chunks, list)
855 node->index &= ~(1U<<31);
856 spin_unlock(&hash_lock);
857 } else {
858 trim_marked(tree);
859 }
860
861 put_tree(tree);
862 mutex_lock(&audit_filter_mutex);
863 }
864 list_del(&barrier);
865 list_del(&cursor);
74c3cbe3 866 mutex_unlock(&audit_filter_mutex);
2096f759 867 path_put(&path1);
74c3cbe3
AV
868 drop_collected_mounts(tagged);
869 return failed;
870}
871
916d7576
AV
872
873static void audit_schedule_prune(void)
874{
f1aaf262 875 wake_up_process(prune_thread);
916d7576
AV
876}
877
878/*
879 * ... and that one is done if evict_chunk() decides to delay until the end
880 * of syscall. Runs synchronously.
881 */
882void audit_kill_trees(struct list_head *list)
883{
884 mutex_lock(&audit_cmd_mutex);
885 mutex_lock(&audit_filter_mutex);
886
887 while (!list_empty(list)) {
888 struct audit_tree *victim;
889
890 victim = list_entry(list->next, struct audit_tree, list);
891 kill_rules(victim);
892 list_del_init(&victim->list);
893
894 mutex_unlock(&audit_filter_mutex);
895
896 prune_one(victim);
897
898 mutex_lock(&audit_filter_mutex);
899 }
900
901 mutex_unlock(&audit_filter_mutex);
902 mutex_unlock(&audit_cmd_mutex);
74c3cbe3
AV
903}
904
905/*
906 * Here comes the stuff asynchronous to auditctl operations
907 */
908
74c3cbe3
AV
909static void evict_chunk(struct audit_chunk *chunk)
910{
911 struct audit_tree *owner;
916d7576
AV
912 struct list_head *postponed = audit_killed_trees();
913 int need_prune = 0;
74c3cbe3
AV
914 int n;
915
916 if (chunk->dead)
917 return;
918
919 chunk->dead = 1;
920 mutex_lock(&audit_filter_mutex);
921 spin_lock(&hash_lock);
922 while (!list_empty(&chunk->trees)) {
923 owner = list_entry(chunk->trees.next,
924 struct audit_tree, same_root);
925 owner->goner = 1;
926 owner->root = NULL;
927 list_del_init(&owner->same_root);
928 spin_unlock(&hash_lock);
916d7576
AV
929 if (!postponed) {
930 kill_rules(owner);
931 list_move(&owner->list, &prune_list);
932 need_prune = 1;
933 } else {
934 list_move(&owner->list, postponed);
935 }
74c3cbe3
AV
936 spin_lock(&hash_lock);
937 }
938 list_del_rcu(&chunk->hash);
939 for (n = 0; n < chunk->count; n++)
940 list_del_init(&chunk->owners[n].list);
941 spin_unlock(&hash_lock);
f1aaf262 942 mutex_unlock(&audit_filter_mutex);
916d7576
AV
943 if (need_prune)
944 audit_schedule_prune();
74c3cbe3
AV
945}
946
3a9b16b4 947static int audit_tree_handle_event(struct fsnotify_group *group,
7053aee2 948 struct inode *to_tell,
ce8f76fb 949 struct fsnotify_mark *inode_mark,
7053aee2
JK
950 struct fsnotify_mark *vfsmount_mark,
951 u32 mask, void *data, int data_type,
45a22f4c 952 const unsigned char *file_name, u32 cookie)
74c3cbe3 953{
83c4c4b0 954 return 0;
28a3a7eb 955}
74c3cbe3 956
e61ce867 957static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
28a3a7eb
EP
958{
959 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
960
961 evict_chunk(chunk);
b3e8692b
MS
962
963 /*
964 * We are guaranteed to have at least one reference to the mark from
965 * either the inode or the caller of fsnotify_destroy_mark().
966 */
967 BUG_ON(atomic_read(&entry->refcnt) < 1);
74c3cbe3
AV
968}
969
28a3a7eb
EP
970static const struct fsnotify_ops audit_tree_ops = {
971 .handle_event = audit_tree_handle_event,
28a3a7eb 972 .freeing_mark = audit_tree_freeing_mark,
74c3cbe3
AV
973};
974
975static int __init audit_tree_init(void)
976{
977 int i;
978
0d2e2a1d 979 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
28a3a7eb
EP
980 if (IS_ERR(audit_tree_group))
981 audit_panic("cannot initialize fsnotify group for rectree watches");
74c3cbe3
AV
982
983 for (i = 0; i < HASH_SIZE; i++)
984 INIT_LIST_HEAD(&chunk_hash_heads[i]);
985
986 return 0;
987}
988__initcall(audit_tree_init);