Merge branch 'for-v5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/ebiederm...
[linux-2.6-block.git] / kernel / audit_tree.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
74c3cbe3 2#include "audit.h"
28a3a7eb 3#include <linux/fsnotify_backend.h>
74c3cbe3
AV
4#include <linux/namei.h>
5#include <linux/mount.h>
916d7576 6#include <linux/kthread.h>
9d2378f8 7#include <linux/refcount.h>
5a0e3ad6 8#include <linux/slab.h>
74c3cbe3
AV
9
10struct audit_tree;
11struct audit_chunk;
12
13struct audit_tree {
9d2378f8 14 refcount_t count;
74c3cbe3
AV
15 int goner;
16 struct audit_chunk *root;
17 struct list_head chunks;
18 struct list_head rules;
19 struct list_head list;
20 struct list_head same_root;
21 struct rcu_head head;
22 char pathname[];
23};
24
25struct audit_chunk {
26 struct list_head hash;
8d20d6e9 27 unsigned long key;
5f516130 28 struct fsnotify_mark *mark;
74c3cbe3 29 struct list_head trees; /* with root here */
74c3cbe3 30 int count;
8f7b0ba1 31 atomic_long_t refs;
74c3cbe3
AV
32 struct rcu_head head;
33 struct node {
34 struct list_head list;
35 struct audit_tree *owner;
36 unsigned index; /* index; upper bit indicates 'will prune' */
37 } owners[];
38};
39
5f516130
JK
40struct audit_tree_mark {
41 struct fsnotify_mark mark;
42 struct audit_chunk *chunk;
43};
44
74c3cbe3
AV
45static LIST_HEAD(tree_list);
46static LIST_HEAD(prune_list);
f1aaf262 47static struct task_struct *prune_thread;
74c3cbe3
AV
48
49/*
83d23bc8
JK
50 * One struct chunk is attached to each inode of interest through
51 * audit_tree_mark (fsnotify mark). We replace struct chunk on tagging /
52 * untagging, the mark is stable as long as there is chunk attached. The
53 * association between mark and chunk is protected by hash_lock and
54 * audit_tree_group->mark_mutex. Thus as long as we hold
55 * audit_tree_group->mark_mutex and check that the mark is alive by
56 * FSNOTIFY_MARK_FLAG_ATTACHED flag check, we are sure the mark points to
57 * the current chunk.
58 *
74c3cbe3
AV
59 * Rules have pointer to struct audit_tree.
60 * Rules have struct list_head rlist forming a list of rules over
61 * the same tree.
62 * References to struct chunk are collected at audit_inode{,_child}()
63 * time and used in AUDIT_TREE rule matching.
64 * These references are dropped at the same time we are calling
65 * audit_free_names(), etc.
66 *
67 * Cyclic lists galore:
68 * tree.chunks anchors chunk.owners[].list hash_lock
69 * tree.rules anchors rule.rlist audit_filter_mutex
70 * chunk.trees anchors tree.same_root hash_lock
71 * chunk.hash is a hash with middle bits of watch.inode as
72 * a hash function. RCU, hash_lock
73 *
74 * tree is refcounted; one reference for "some rules on rules_list refer to
75 * it", one for each chunk with pointer to it.
76 *
83d23bc8
JK
77 * chunk is refcounted by embedded .refs. Mark associated with the chunk holds
78 * one chunk reference. This reference is dropped either when a mark is going
79 * to be freed (corresponding inode goes away) or when chunk attached to the
80 * mark gets replaced. This reference must be dropped using
81 * audit_mark_put_chunk() to make sure the reference is dropped only after RCU
82 * grace period as it protects RCU readers of the hash table.
74c3cbe3
AV
83 *
84 * node.index allows to get from node.list to containing chunk.
85 * MSB of that sucker is stolen to mark taggings that we might have to
86 * revert - several operations have very unpleasant cleanup logics and
87 * that makes a difference. Some.
88 */
89
28a3a7eb 90static struct fsnotify_group *audit_tree_group;
5f516130 91static struct kmem_cache *audit_tree_mark_cachep __read_mostly;
74c3cbe3
AV
92
93static struct audit_tree *alloc_tree(const char *s)
94{
95 struct audit_tree *tree;
96
97 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
98 if (tree) {
9d2378f8 99 refcount_set(&tree->count, 1);
74c3cbe3
AV
100 tree->goner = 0;
101 INIT_LIST_HEAD(&tree->chunks);
102 INIT_LIST_HEAD(&tree->rules);
103 INIT_LIST_HEAD(&tree->list);
104 INIT_LIST_HEAD(&tree->same_root);
105 tree->root = NULL;
106 strcpy(tree->pathname, s);
107 }
108 return tree;
109}
110
111static inline void get_tree(struct audit_tree *tree)
112{
9d2378f8 113 refcount_inc(&tree->count);
74c3cbe3
AV
114}
115
74c3cbe3
AV
116static inline void put_tree(struct audit_tree *tree)
117{
9d2378f8 118 if (refcount_dec_and_test(&tree->count))
3b097c46 119 kfree_rcu(tree, head);
74c3cbe3
AV
120}
121
122/* to avoid bringing the entire thing in audit.h */
123const char *audit_tree_path(struct audit_tree *tree)
124{
125 return tree->pathname;
126}
127
8f7b0ba1 128static void free_chunk(struct audit_chunk *chunk)
74c3cbe3 129{
74c3cbe3
AV
130 int i;
131
132 for (i = 0; i < chunk->count; i++) {
133 if (chunk->owners[i].owner)
134 put_tree(chunk->owners[i].owner);
135 }
136 kfree(chunk);
137}
138
8f7b0ba1 139void audit_put_chunk(struct audit_chunk *chunk)
74c3cbe3 140{
8f7b0ba1
AV
141 if (atomic_long_dec_and_test(&chunk->refs))
142 free_chunk(chunk);
74c3cbe3
AV
143}
144
8f7b0ba1 145static void __put_chunk(struct rcu_head *rcu)
74c3cbe3 146{
8f7b0ba1
AV
147 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
148 audit_put_chunk(chunk);
74c3cbe3
AV
149}
150
a8375713
JK
151/*
152 * Drop reference to the chunk that was held by the mark. This is the reference
153 * that gets dropped after we've removed the chunk from the hash table and we
154 * use it to make sure chunk cannot be freed before RCU grace period expires.
155 */
156static void audit_mark_put_chunk(struct audit_chunk *chunk)
157{
158 call_rcu(&chunk->head, __put_chunk);
159}
160
f905c2fc 161static inline struct audit_tree_mark *audit_mark(struct fsnotify_mark *mark)
5f516130 162{
f905c2fc 163 return container_of(mark, struct audit_tree_mark, mark);
5f516130
JK
164}
165
166static struct audit_chunk *mark_chunk(struct fsnotify_mark *mark)
167{
168 return audit_mark(mark)->chunk;
169}
170
f905c2fc 171static void audit_tree_destroy_watch(struct fsnotify_mark *mark)
28a3a7eb 172{
f905c2fc 173 kmem_cache_free(audit_tree_mark_cachep, audit_mark(mark));
5f516130
JK
174}
175
176static struct fsnotify_mark *alloc_mark(void)
177{
178 struct audit_tree_mark *amark;
179
180 amark = kmem_cache_zalloc(audit_tree_mark_cachep, GFP_KERNEL);
181 if (!amark)
182 return NULL;
183 fsnotify_init_mark(&amark->mark, audit_tree_group);
184 amark->mark.mask = FS_IN_IGNORED;
185 return &amark->mark;
28a3a7eb
EP
186}
187
188static struct audit_chunk *alloc_chunk(int count)
189{
190 struct audit_chunk *chunk;
28a3a7eb
EP
191 int i;
192
bbccc11b 193 chunk = kzalloc(struct_size(chunk, owners, count), GFP_KERNEL);
28a3a7eb
EP
194 if (!chunk)
195 return NULL;
196
197 INIT_LIST_HEAD(&chunk->hash);
198 INIT_LIST_HEAD(&chunk->trees);
199 chunk->count = count;
200 atomic_long_set(&chunk->refs, 1);
201 for (i = 0; i < count; i++) {
202 INIT_LIST_HEAD(&chunk->owners[i].list);
203 chunk->owners[i].index = i;
204 }
28a3a7eb
EP
205 return chunk;
206}
207
74c3cbe3
AV
208enum {HASH_SIZE = 128};
209static struct list_head chunk_hash_heads[HASH_SIZE];
210static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
211
f410ff65
JK
212/* Function to return search key in our hash from inode. */
213static unsigned long inode_to_key(const struct inode *inode)
74c3cbe3 214{
36f10f55
AG
215 /* Use address pointed to by connector->obj as the key */
216 return (unsigned long)&inode->i_fsnotify_marks;
f410ff65
JK
217}
218
f410ff65
JK
219static inline struct list_head *chunk_hash(unsigned long key)
220{
221 unsigned long n = key / L1_CACHE_BYTES;
74c3cbe3
AV
222 return chunk_hash_heads + n % HASH_SIZE;
223}
224
f905c2fc 225/* hash_lock & mark->group->mark_mutex is held by caller */
74c3cbe3
AV
226static void insert_hash(struct audit_chunk *chunk)
227{
28a3a7eb
EP
228 struct list_head *list;
229
1635e572
JK
230 /*
231 * Make sure chunk is fully initialized before making it visible in the
232 * hash. Pairs with a data dependency barrier in READ_ONCE() in
233 * audit_tree_lookup().
234 */
235 smp_wmb();
8d20d6e9
JK
236 WARN_ON_ONCE(!chunk->key);
237 list = chunk_hash(chunk->key);
74c3cbe3
AV
238 list_add_rcu(&chunk->hash, list);
239}
240
241/* called under rcu_read_lock */
242struct audit_chunk *audit_tree_lookup(const struct inode *inode)
243{
f410ff65
JK
244 unsigned long key = inode_to_key(inode);
245 struct list_head *list = chunk_hash(key);
6793a051 246 struct audit_chunk *p;
74c3cbe3 247
6793a051 248 list_for_each_entry_rcu(p, list, hash) {
1635e572
JK
249 /*
250 * We use a data dependency barrier in READ_ONCE() to make sure
251 * the chunk we see is fully initialized.
252 */
253 if (READ_ONCE(p->key) == key) {
8f7b0ba1 254 atomic_long_inc(&p->refs);
74c3cbe3
AV
255 return p;
256 }
257 }
258 return NULL;
259}
260
6f1b5d7a 261bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
74c3cbe3
AV
262{
263 int n;
264 for (n = 0; n < chunk->count; n++)
265 if (chunk->owners[n].owner == tree)
6f1b5d7a
YB
266 return true;
267 return false;
74c3cbe3
AV
268}
269
270/* tagging and untagging inodes with trees */
271
8f7b0ba1
AV
272static struct audit_chunk *find_chunk(struct node *p)
273{
274 int index = p->index & ~(1U<<31);
275 p -= index;
276 return container_of(p, struct audit_chunk, owners[0]);
277}
278
f905c2fc 279static void replace_mark_chunk(struct fsnotify_mark *mark,
83d23bc8
JK
280 struct audit_chunk *chunk)
281{
282 struct audit_chunk *old;
283
284 assert_spin_locked(&hash_lock);
f905c2fc
JK
285 old = mark_chunk(mark);
286 audit_mark(mark)->chunk = chunk;
83d23bc8 287 if (chunk)
f905c2fc 288 chunk->mark = mark;
83d23bc8
JK
289 if (old)
290 old->mark = NULL;
291}
292
c22fcde7 293static void replace_chunk(struct audit_chunk *new, struct audit_chunk *old)
d31b326d
JK
294{
295 struct audit_tree *owner;
296 int i, j;
297
298 new->key = old->key;
299 list_splice_init(&old->trees, &new->trees);
300 list_for_each_entry(owner, &new->trees, same_root)
301 owner->root = new;
302 for (i = j = 0; j < old->count; i++, j++) {
c22fcde7 303 if (!old->owners[j].owner) {
d31b326d
JK
304 i--;
305 continue;
306 }
307 owner = old->owners[j].owner;
308 new->owners[i].owner = owner;
309 new->owners[i].index = old->owners[j].index - j + i;
310 if (!owner) /* result of earlier fallback */
311 continue;
312 get_tree(owner);
313 list_replace_init(&old->owners[j].list, &new->owners[i].list);
314 }
83d23bc8 315 replace_mark_chunk(old->mark, new);
d31b326d
JK
316 /*
317 * Make sure chunk is fully initialized before making it visible in the
318 * hash. Pairs with a data dependency barrier in READ_ONCE() in
319 * audit_tree_lookup().
320 */
321 smp_wmb();
322 list_replace_rcu(&old->hash, &new->hash);
323}
324
49a4ee7d
JK
325static void remove_chunk_node(struct audit_chunk *chunk, struct node *p)
326{
327 struct audit_tree *owner = p->owner;
328
329 if (owner->root == chunk) {
330 list_del_init(&owner->same_root);
331 owner->root = NULL;
332 }
333 list_del_init(&p->list);
334 p->owner = NULL;
335 put_tree(owner);
336}
337
c22fcde7
JK
338static int chunk_count_trees(struct audit_chunk *chunk)
339{
340 int i;
341 int ret = 0;
342
343 for (i = 0; i < chunk->count; i++)
344 if (chunk->owners[i].owner)
345 ret++;
346 return ret;
347}
348
f905c2fc 349static void untag_chunk(struct audit_chunk *chunk, struct fsnotify_mark *mark)
74c3cbe3 350{
8432c700 351 struct audit_chunk *new;
c22fcde7 352 int size;
74c3cbe3 353
8432c700 354 mutex_lock(&audit_tree_group->mark_mutex);
6b3f05d2 355 /*
83d23bc8
JK
356 * mark_mutex stabilizes chunk attached to the mark so we can check
357 * whether it didn't change while we've dropped hash_lock.
6b3f05d2 358 */
f905c2fc
JK
359 if (!(mark->flags & FSNOTIFY_MARK_FLAG_ATTACHED) ||
360 mark_chunk(mark) != chunk)
8432c700 361 goto out_mutex;
74c3cbe3 362
c22fcde7 363 size = chunk_count_trees(chunk);
74c3cbe3 364 if (!size) {
74c3cbe3
AV
365 spin_lock(&hash_lock);
366 list_del_init(&chunk->trees);
74c3cbe3 367 list_del_rcu(&chunk->hash);
f905c2fc 368 replace_mark_chunk(mark, NULL);
74c3cbe3 369 spin_unlock(&hash_lock);
f905c2fc 370 fsnotify_detach_mark(mark);
8432c700 371 mutex_unlock(&audit_tree_group->mark_mutex);
83d23bc8 372 audit_mark_put_chunk(chunk);
f905c2fc 373 fsnotify_free_mark(mark);
8432c700 374 return;
74c3cbe3
AV
375 }
376
c22fcde7 377 new = alloc_chunk(size);
74c3cbe3 378 if (!new)
49a4ee7d 379 goto out_mutex;
f7a998a9 380
74c3cbe3 381 spin_lock(&hash_lock);
1635e572 382 /*
d31b326d
JK
383 * This has to go last when updating chunk as once replace_chunk() is
384 * called, new RCU readers can see the new chunk.
1635e572 385 */
c22fcde7 386 replace_chunk(new, chunk);
74c3cbe3 387 spin_unlock(&hash_lock);
8432c700 388 mutex_unlock(&audit_tree_group->mark_mutex);
83d23bc8 389 audit_mark_put_chunk(chunk);
8432c700 390 return;
74c3cbe3 391
49a4ee7d 392out_mutex:
8432c700 393 mutex_unlock(&audit_tree_group->mark_mutex);
74c3cbe3
AV
394}
395
a5789b07 396/* Call with group->mark_mutex held, releases it */
74c3cbe3
AV
397static int create_chunk(struct inode *inode, struct audit_tree *tree)
398{
f905c2fc 399 struct fsnotify_mark *mark;
74c3cbe3 400 struct audit_chunk *chunk = alloc_chunk(1);
a5789b07
JK
401
402 if (!chunk) {
403 mutex_unlock(&audit_tree_group->mark_mutex);
74c3cbe3 404 return -ENOMEM;
a5789b07 405 }
74c3cbe3 406
f905c2fc
JK
407 mark = alloc_mark();
408 if (!mark) {
83d23bc8
JK
409 mutex_unlock(&audit_tree_group->mark_mutex);
410 kfree(chunk);
411 return -ENOMEM;
412 }
413
f905c2fc 414 if (fsnotify_add_inode_mark_locked(mark, inode, 0)) {
a5789b07 415 mutex_unlock(&audit_tree_group->mark_mutex);
f905c2fc 416 fsnotify_put_mark(mark);
83d23bc8 417 kfree(chunk);
74c3cbe3
AV
418 return -ENOSPC;
419 }
420
74c3cbe3
AV
421 spin_lock(&hash_lock);
422 if (tree->goner) {
423 spin_unlock(&hash_lock);
f905c2fc 424 fsnotify_detach_mark(mark);
a5789b07 425 mutex_unlock(&audit_tree_group->mark_mutex);
f905c2fc
JK
426 fsnotify_free_mark(mark);
427 fsnotify_put_mark(mark);
83d23bc8 428 kfree(chunk);
74c3cbe3
AV
429 return 0;
430 }
f905c2fc 431 replace_mark_chunk(mark, chunk);
74c3cbe3
AV
432 chunk->owners[0].index = (1U << 31);
433 chunk->owners[0].owner = tree;
434 get_tree(tree);
435 list_add(&chunk->owners[0].list, &tree->chunks);
436 if (!tree->root) {
437 tree->root = chunk;
438 list_add(&tree->same_root, &chunk->trees);
439 }
8d20d6e9 440 chunk->key = inode_to_key(inode);
1635e572
JK
441 /*
442 * Inserting into the hash table has to go last as once we do that RCU
443 * readers can see the chunk.
444 */
74c3cbe3
AV
445 insert_hash(chunk);
446 spin_unlock(&hash_lock);
a5789b07 447 mutex_unlock(&audit_tree_group->mark_mutex);
83d23bc8
JK
448 /*
449 * Drop our initial reference. When mark we point to is getting freed,
450 * we get notification through ->freeing_mark callback and cleanup
451 * chunk pointing to this mark.
452 */
f905c2fc 453 fsnotify_put_mark(mark);
74c3cbe3
AV
454 return 0;
455}
456
457/* the first tagged inode becomes root of tree */
458static int tag_chunk(struct inode *inode, struct audit_tree *tree)
459{
f905c2fc 460 struct fsnotify_mark *mark;
74c3cbe3
AV
461 struct audit_chunk *chunk, *old;
462 struct node *p;
463 int n;
464
a5789b07 465 mutex_lock(&audit_tree_group->mark_mutex);
f905c2fc
JK
466 mark = fsnotify_find_mark(&inode->i_fsnotify_marks, audit_tree_group);
467 if (!mark)
74c3cbe3
AV
468 return create_chunk(inode, tree);
469
83d23bc8
JK
470 /*
471 * Found mark is guaranteed to be attached and mark_mutex protects mark
472 * from getting detached and thus it makes sure there is chunk attached
473 * to the mark.
474 */
74c3cbe3
AV
475 /* are we already there? */
476 spin_lock(&hash_lock);
f905c2fc 477 old = mark_chunk(mark);
74c3cbe3
AV
478 for (n = 0; n < old->count; n++) {
479 if (old->owners[n].owner == tree) {
480 spin_unlock(&hash_lock);
a5789b07 481 mutex_unlock(&audit_tree_group->mark_mutex);
f905c2fc 482 fsnotify_put_mark(mark);
74c3cbe3
AV
483 return 0;
484 }
485 }
486 spin_unlock(&hash_lock);
487
488 chunk = alloc_chunk(old->count + 1);
b4c30aad 489 if (!chunk) {
a5789b07 490 mutex_unlock(&audit_tree_group->mark_mutex);
f905c2fc 491 fsnotify_put_mark(mark);
74c3cbe3 492 return -ENOMEM;
b4c30aad 493 }
74c3cbe3 494
74c3cbe3
AV
495 spin_lock(&hash_lock);
496 if (tree->goner) {
497 spin_unlock(&hash_lock);
a5789b07 498 mutex_unlock(&audit_tree_group->mark_mutex);
f905c2fc 499 fsnotify_put_mark(mark);
83d23bc8 500 kfree(chunk);
74c3cbe3
AV
501 return 0;
502 }
d31b326d 503 p = &chunk->owners[chunk->count - 1];
74c3cbe3
AV
504 p->index = (chunk->count - 1) | (1U<<31);
505 p->owner = tree;
506 get_tree(tree);
507 list_add(&p->list, &tree->chunks);
74c3cbe3
AV
508 if (!tree->root) {
509 tree->root = chunk;
510 list_add(&tree->same_root, &chunk->trees);
511 }
1635e572 512 /*
d31b326d
JK
513 * This has to go last when updating chunk as once replace_chunk() is
514 * called, new RCU readers can see the new chunk.
1635e572 515 */
c22fcde7 516 replace_chunk(chunk, old);
74c3cbe3 517 spin_unlock(&hash_lock);
a5789b07 518 mutex_unlock(&audit_tree_group->mark_mutex);
f905c2fc 519 fsnotify_put_mark(mark); /* pair to fsnotify_find_mark */
83d23bc8
JK
520 audit_mark_put_chunk(old);
521
74c3cbe3
AV
522 return 0;
523}
524
9e36a5d4
RGB
525static void audit_tree_log_remove_rule(struct audit_context *context,
526 struct audit_krule *rule)
0644ec0c
KC
527{
528 struct audit_buffer *ab;
529
65a8766f
RGB
530 if (!audit_enabled)
531 return;
9e36a5d4 532 ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
0644ec0c
KC
533 if (unlikely(!ab))
534 return;
d0a3f18a 535 audit_log_format(ab, "op=remove_rule dir=");
0644ec0c
KC
536 audit_log_untrustedstring(ab, rule->tree->pathname);
537 audit_log_key(ab, rule->filterkey);
538 audit_log_format(ab, " list=%d res=1", rule->listnr);
539 audit_log_end(ab);
540}
541
9e36a5d4 542static void kill_rules(struct audit_context *context, struct audit_tree *tree)
74c3cbe3
AV
543{
544 struct audit_krule *rule, *next;
545 struct audit_entry *entry;
74c3cbe3
AV
546
547 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
548 entry = container_of(rule, struct audit_entry, rule);
549
550 list_del_init(&rule->rlist);
551 if (rule->tree) {
552 /* not a half-baked one */
9e36a5d4 553 audit_tree_log_remove_rule(context, rule);
34d99af5
RGB
554 if (entry->rule.exe)
555 audit_remove_mark(entry->rule.exe);
74c3cbe3
AV
556 rule->tree = NULL;
557 list_del_rcu(&entry->list);
e45aa212 558 list_del(&entry->rule.list);
74c3cbe3
AV
559 call_rcu(&entry->rcu, audit_free_rule_rcu);
560 }
561 }
562}
563
564/*
8432c700
JK
565 * Remove tree from chunks. If 'tagged' is set, remove tree only from tagged
566 * chunks. The function expects tagged chunks are all at the beginning of the
567 * chunks list.
74c3cbe3 568 */
8432c700 569static void prune_tree_chunks(struct audit_tree *victim, bool tagged)
74c3cbe3
AV
570{
571 spin_lock(&hash_lock);
572 while (!list_empty(&victim->chunks)) {
573 struct node *p;
8432c700
JK
574 struct audit_chunk *chunk;
575 struct fsnotify_mark *mark;
576
577 p = list_first_entry(&victim->chunks, struct node, list);
578 /* have we run out of marked? */
579 if (tagged && !(p->index & (1U<<31)))
580 break;
581 chunk = find_chunk(p);
582 mark = chunk->mark;
583 remove_chunk_node(chunk, p);
83d23bc8
JK
584 /* Racing with audit_tree_freeing_mark()? */
585 if (!mark)
586 continue;
8432c700
JK
587 fsnotify_get_mark(mark);
588 spin_unlock(&hash_lock);
74c3cbe3 589
8432c700
JK
590 untag_chunk(chunk, mark);
591 fsnotify_put_mark(mark);
74c3cbe3 592
8432c700 593 spin_lock(&hash_lock);
74c3cbe3
AV
594 }
595 spin_unlock(&hash_lock);
596 put_tree(victim);
597}
598
8432c700
JK
599/*
600 * finish killing struct audit_tree
601 */
602static void prune_one(struct audit_tree *victim)
603{
604 prune_tree_chunks(victim, false);
605}
606
74c3cbe3
AV
607/* trim the uncommitted chunks from tree */
608
609static void trim_marked(struct audit_tree *tree)
610{
611 struct list_head *p, *q;
612 spin_lock(&hash_lock);
613 if (tree->goner) {
614 spin_unlock(&hash_lock);
615 return;
616 }
617 /* reorder */
618 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
619 struct node *node = list_entry(p, struct node, list);
620 q = p->next;
621 if (node->index & (1U<<31)) {
622 list_del_init(p);
623 list_add(p, &tree->chunks);
624 }
625 }
8432c700 626 spin_unlock(&hash_lock);
74c3cbe3 627
8432c700 628 prune_tree_chunks(tree, true);
74c3cbe3 629
8432c700 630 spin_lock(&hash_lock);
74c3cbe3
AV
631 if (!tree->root && !tree->goner) {
632 tree->goner = 1;
633 spin_unlock(&hash_lock);
634 mutex_lock(&audit_filter_mutex);
9e36a5d4 635 kill_rules(audit_context(), tree);
74c3cbe3
AV
636 list_del_init(&tree->list);
637 mutex_unlock(&audit_filter_mutex);
638 prune_one(tree);
639 } else {
640 spin_unlock(&hash_lock);
641 }
642}
643
916d7576
AV
644static void audit_schedule_prune(void);
645
74c3cbe3
AV
646/* called with audit_filter_mutex */
647int audit_remove_tree_rule(struct audit_krule *rule)
648{
649 struct audit_tree *tree;
650 tree = rule->tree;
651 if (tree) {
652 spin_lock(&hash_lock);
653 list_del_init(&rule->rlist);
654 if (list_empty(&tree->rules) && !tree->goner) {
655 tree->root = NULL;
656 list_del_init(&tree->same_root);
657 tree->goner = 1;
658 list_move(&tree->list, &prune_list);
659 rule->tree = NULL;
660 spin_unlock(&hash_lock);
661 audit_schedule_prune();
662 return 1;
663 }
664 rule->tree = NULL;
665 spin_unlock(&hash_lock);
666 return 1;
667 }
668 return 0;
669}
670
1f707137
AV
671static int compare_root(struct vfsmount *mnt, void *arg)
672{
f410ff65
JK
673 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
674 (unsigned long)arg;
1f707137
AV
675}
676
74c3cbe3
AV
677void audit_trim_trees(void)
678{
679 struct list_head cursor;
680
681 mutex_lock(&audit_filter_mutex);
682 list_add(&cursor, &tree_list);
683 while (cursor.next != &tree_list) {
684 struct audit_tree *tree;
98bc993f 685 struct path path;
74c3cbe3
AV
686 struct vfsmount *root_mnt;
687 struct node *node;
74c3cbe3
AV
688 int err;
689
690 tree = container_of(cursor.next, struct audit_tree, list);
691 get_tree(tree);
dd8b865c 692 list_move(&cursor, &tree->list);
74c3cbe3
AV
693 mutex_unlock(&audit_filter_mutex);
694
98bc993f 695 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
696 if (err)
697 goto skip_it;
698
589ff870 699 root_mnt = collect_mounts(&path);
98bc993f 700 path_put(&path);
be34d1a3 701 if (IS_ERR(root_mnt))
74c3cbe3
AV
702 goto skip_it;
703
74c3cbe3
AV
704 spin_lock(&hash_lock);
705 list_for_each_entry(node, &tree->chunks, list) {
28a3a7eb 706 struct audit_chunk *chunk = find_chunk(node);
25985edc 707 /* this could be NULL if the watch is dying else where... */
74c3cbe3 708 node->index |= 1U<<31;
f410ff65 709 if (iterate_mounts(compare_root,
8d20d6e9 710 (void *)(chunk->key),
f410ff65 711 root_mnt))
1f707137 712 node->index &= ~(1U<<31);
74c3cbe3
AV
713 }
714 spin_unlock(&hash_lock);
715 trim_marked(tree);
74c3cbe3
AV
716 drop_collected_mounts(root_mnt);
717skip_it:
12b2f117 718 put_tree(tree);
74c3cbe3
AV
719 mutex_lock(&audit_filter_mutex);
720 }
721 list_del(&cursor);
722 mutex_unlock(&audit_filter_mutex);
723}
724
74c3cbe3
AV
725int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
726{
727
728 if (pathname[0] != '/' ||
729 rule->listnr != AUDIT_FILTER_EXIT ||
5af75d8d 730 op != Audit_equal ||
74c3cbe3
AV
731 rule->inode_f || rule->watch || rule->tree)
732 return -EINVAL;
733 rule->tree = alloc_tree(pathname);
734 if (!rule->tree)
735 return -ENOMEM;
736 return 0;
737}
738
739void audit_put_tree(struct audit_tree *tree)
740{
741 put_tree(tree);
742}
743
1f707137
AV
744static int tag_mount(struct vfsmount *mnt, void *arg)
745{
3b362157 746 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
1f707137
AV
747}
748
f1aaf262
IP
749/*
750 * That gets run when evict_chunk() ends up needing to kill audit_tree.
751 * Runs from a separate thread.
752 */
753static int prune_tree_thread(void *unused)
754{
755 for (;;) {
0bf676d1
JS
756 if (list_empty(&prune_list)) {
757 set_current_state(TASK_INTERRUPTIBLE);
f1aaf262 758 schedule();
0bf676d1 759 }
f1aaf262 760
ce423631 761 audit_ctl_lock();
f1aaf262
IP
762 mutex_lock(&audit_filter_mutex);
763
764 while (!list_empty(&prune_list)) {
765 struct audit_tree *victim;
766
767 victim = list_entry(prune_list.next,
768 struct audit_tree, list);
769 list_del_init(&victim->list);
770
771 mutex_unlock(&audit_filter_mutex);
772
773 prune_one(victim);
774
775 mutex_lock(&audit_filter_mutex);
776 }
777
778 mutex_unlock(&audit_filter_mutex);
ce423631 779 audit_ctl_unlock();
f1aaf262
IP
780 }
781 return 0;
782}
783
784static int audit_launch_prune(void)
785{
786 if (prune_thread)
787 return 0;
0bf676d1 788 prune_thread = kthread_run(prune_tree_thread, NULL,
f1aaf262
IP
789 "audit_prune_tree");
790 if (IS_ERR(prune_thread)) {
791 pr_err("cannot start thread audit_prune_tree");
792 prune_thread = NULL;
793 return -ENOMEM;
f1aaf262 794 }
0bf676d1 795 return 0;
f1aaf262
IP
796}
797
74c3cbe3
AV
798/* called with audit_filter_mutex */
799int audit_add_tree_rule(struct audit_krule *rule)
800{
801 struct audit_tree *seed = rule->tree, *tree;
98bc993f 802 struct path path;
1f707137 803 struct vfsmount *mnt;
74c3cbe3
AV
804 int err;
805
736f3203 806 rule->tree = NULL;
74c3cbe3
AV
807 list_for_each_entry(tree, &tree_list, list) {
808 if (!strcmp(seed->pathname, tree->pathname)) {
809 put_tree(seed);
810 rule->tree = tree;
811 list_add(&rule->rlist, &tree->rules);
812 return 0;
813 }
814 }
815 tree = seed;
816 list_add(&tree->list, &tree_list);
817 list_add(&rule->rlist, &tree->rules);
818 /* do not set rule->tree yet */
819 mutex_unlock(&audit_filter_mutex);
820
f1aaf262
IP
821 if (unlikely(!prune_thread)) {
822 err = audit_launch_prune();
823 if (err)
824 goto Err;
825 }
826
98bc993f 827 err = kern_path(tree->pathname, 0, &path);
74c3cbe3
AV
828 if (err)
829 goto Err;
589ff870 830 mnt = collect_mounts(&path);
98bc993f 831 path_put(&path);
be34d1a3
DH
832 if (IS_ERR(mnt)) {
833 err = PTR_ERR(mnt);
74c3cbe3
AV
834 goto Err;
835 }
74c3cbe3
AV
836
837 get_tree(tree);
1f707137 838 err = iterate_mounts(tag_mount, tree, mnt);
74c3cbe3
AV
839 drop_collected_mounts(mnt);
840
841 if (!err) {
842 struct node *node;
843 spin_lock(&hash_lock);
844 list_for_each_entry(node, &tree->chunks, list)
845 node->index &= ~(1U<<31);
846 spin_unlock(&hash_lock);
847 } else {
848 trim_marked(tree);
849 goto Err;
850 }
851
852 mutex_lock(&audit_filter_mutex);
853 if (list_empty(&rule->rlist)) {
854 put_tree(tree);
855 return -ENOENT;
856 }
857 rule->tree = tree;
858 put_tree(tree);
859
860 return 0;
861Err:
862 mutex_lock(&audit_filter_mutex);
863 list_del_init(&tree->list);
864 list_del_init(&tree->rules);
865 put_tree(tree);
866 return err;
867}
868
869int audit_tag_tree(char *old, char *new)
870{
871 struct list_head cursor, barrier;
872 int failed = 0;
2096f759 873 struct path path1, path2;
74c3cbe3 874 struct vfsmount *tagged;
74c3cbe3
AV
875 int err;
876
2096f759 877 err = kern_path(new, 0, &path2);
74c3cbe3
AV
878 if (err)
879 return err;
2096f759
AV
880 tagged = collect_mounts(&path2);
881 path_put(&path2);
be34d1a3
DH
882 if (IS_ERR(tagged))
883 return PTR_ERR(tagged);
74c3cbe3 884
2096f759 885 err = kern_path(old, 0, &path1);
74c3cbe3
AV
886 if (err) {
887 drop_collected_mounts(tagged);
888 return err;
889 }
74c3cbe3 890
74c3cbe3
AV
891 mutex_lock(&audit_filter_mutex);
892 list_add(&barrier, &tree_list);
893 list_add(&cursor, &barrier);
894
895 while (cursor.next != &tree_list) {
896 struct audit_tree *tree;
2096f759 897 int good_one = 0;
74c3cbe3
AV
898
899 tree = container_of(cursor.next, struct audit_tree, list);
900 get_tree(tree);
dd8b865c 901 list_move(&cursor, &tree->list);
74c3cbe3
AV
902 mutex_unlock(&audit_filter_mutex);
903
2096f759
AV
904 err = kern_path(tree->pathname, 0, &path2);
905 if (!err) {
906 good_one = path_is_under(&path1, &path2);
907 path_put(&path2);
74c3cbe3
AV
908 }
909
2096f759 910 if (!good_one) {
74c3cbe3
AV
911 put_tree(tree);
912 mutex_lock(&audit_filter_mutex);
913 continue;
914 }
74c3cbe3 915
1f707137 916 failed = iterate_mounts(tag_mount, tree, tagged);
74c3cbe3
AV
917 if (failed) {
918 put_tree(tree);
919 mutex_lock(&audit_filter_mutex);
920 break;
921 }
922
923 mutex_lock(&audit_filter_mutex);
924 spin_lock(&hash_lock);
925 if (!tree->goner) {
dd8b865c 926 list_move(&tree->list, &tree_list);
74c3cbe3
AV
927 }
928 spin_unlock(&hash_lock);
929 put_tree(tree);
930 }
931
932 while (barrier.prev != &tree_list) {
933 struct audit_tree *tree;
934
935 tree = container_of(barrier.prev, struct audit_tree, list);
936 get_tree(tree);
dd8b865c 937 list_move(&tree->list, &barrier);
74c3cbe3
AV
938 mutex_unlock(&audit_filter_mutex);
939
940 if (!failed) {
941 struct node *node;
942 spin_lock(&hash_lock);
943 list_for_each_entry(node, &tree->chunks, list)
944 node->index &= ~(1U<<31);
945 spin_unlock(&hash_lock);
946 } else {
947 trim_marked(tree);
948 }
949
950 put_tree(tree);
951 mutex_lock(&audit_filter_mutex);
952 }
953 list_del(&barrier);
954 list_del(&cursor);
74c3cbe3 955 mutex_unlock(&audit_filter_mutex);
2096f759 956 path_put(&path1);
74c3cbe3
AV
957 drop_collected_mounts(tagged);
958 return failed;
959}
960
916d7576
AV
961
962static void audit_schedule_prune(void)
963{
f1aaf262 964 wake_up_process(prune_thread);
916d7576
AV
965}
966
967/*
968 * ... and that one is done if evict_chunk() decides to delay until the end
969 * of syscall. Runs synchronously.
970 */
9e36a5d4 971void audit_kill_trees(struct audit_context *context)
916d7576 972{
9e36a5d4
RGB
973 struct list_head *list = &context->killed_trees;
974
ce423631 975 audit_ctl_lock();
916d7576
AV
976 mutex_lock(&audit_filter_mutex);
977
978 while (!list_empty(list)) {
979 struct audit_tree *victim;
980
981 victim = list_entry(list->next, struct audit_tree, list);
9e36a5d4 982 kill_rules(context, victim);
916d7576
AV
983 list_del_init(&victim->list);
984
985 mutex_unlock(&audit_filter_mutex);
986
987 prune_one(victim);
988
989 mutex_lock(&audit_filter_mutex);
990 }
991
992 mutex_unlock(&audit_filter_mutex);
ce423631 993 audit_ctl_unlock();
74c3cbe3
AV
994}
995
996/*
997 * Here comes the stuff asynchronous to auditctl operations
998 */
999
74c3cbe3
AV
1000static void evict_chunk(struct audit_chunk *chunk)
1001{
1002 struct audit_tree *owner;
916d7576
AV
1003 struct list_head *postponed = audit_killed_trees();
1004 int need_prune = 0;
74c3cbe3
AV
1005 int n;
1006
74c3cbe3
AV
1007 mutex_lock(&audit_filter_mutex);
1008 spin_lock(&hash_lock);
1009 while (!list_empty(&chunk->trees)) {
1010 owner = list_entry(chunk->trees.next,
1011 struct audit_tree, same_root);
1012 owner->goner = 1;
1013 owner->root = NULL;
1014 list_del_init(&owner->same_root);
1015 spin_unlock(&hash_lock);
916d7576 1016 if (!postponed) {
9e36a5d4 1017 kill_rules(audit_context(), owner);
916d7576
AV
1018 list_move(&owner->list, &prune_list);
1019 need_prune = 1;
1020 } else {
1021 list_move(&owner->list, postponed);
1022 }
74c3cbe3
AV
1023 spin_lock(&hash_lock);
1024 }
1025 list_del_rcu(&chunk->hash);
1026 for (n = 0; n < chunk->count; n++)
1027 list_del_init(&chunk->owners[n].list);
1028 spin_unlock(&hash_lock);
f1aaf262 1029 mutex_unlock(&audit_filter_mutex);
916d7576
AV
1030 if (need_prune)
1031 audit_schedule_prune();
74c3cbe3
AV
1032}
1033
b9a1b977
AG
1034static int audit_tree_handle_event(struct fsnotify_mark *mark, u32 mask,
1035 struct inode *inode, struct inode *dir,
950cc0d2 1036 const struct qstr *file_name, u32 cookie)
74c3cbe3 1037{
83c4c4b0 1038 return 0;
28a3a7eb 1039}
74c3cbe3 1040
f905c2fc
JK
1041static void audit_tree_freeing_mark(struct fsnotify_mark *mark,
1042 struct fsnotify_group *group)
28a3a7eb 1043{
83d23bc8 1044 struct audit_chunk *chunk;
28a3a7eb 1045
f905c2fc 1046 mutex_lock(&mark->group->mark_mutex);
83d23bc8 1047 spin_lock(&hash_lock);
f905c2fc
JK
1048 chunk = mark_chunk(mark);
1049 replace_mark_chunk(mark, NULL);
83d23bc8 1050 spin_unlock(&hash_lock);
f905c2fc 1051 mutex_unlock(&mark->group->mark_mutex);
83d23bc8
JK
1052 if (chunk) {
1053 evict_chunk(chunk);
1054 audit_mark_put_chunk(chunk);
1055 }
b3e8692b
MS
1056
1057 /*
1058 * We are guaranteed to have at least one reference to the mark from
1059 * either the inode or the caller of fsnotify_destroy_mark().
1060 */
f905c2fc 1061 BUG_ON(refcount_read(&mark->refcnt) < 1);
74c3cbe3
AV
1062}
1063
28a3a7eb 1064static const struct fsnotify_ops audit_tree_ops = {
b9a1b977 1065 .handle_inode_event = audit_tree_handle_event,
28a3a7eb 1066 .freeing_mark = audit_tree_freeing_mark,
054c636e 1067 .free_mark = audit_tree_destroy_watch,
74c3cbe3
AV
1068};
1069
1070static int __init audit_tree_init(void)
1071{
1072 int i;
1073
5f516130
JK
1074 audit_tree_mark_cachep = KMEM_CACHE(audit_tree_mark, SLAB_PANIC);
1075
0d2e2a1d 1076 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
28a3a7eb
EP
1077 if (IS_ERR(audit_tree_group))
1078 audit_panic("cannot initialize fsnotify group for rectree watches");
74c3cbe3
AV
1079
1080 for (i = 0; i < HASH_SIZE; i++)
1081 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1082
1083 return 0;
1084}
1085__initcall(audit_tree_init);