Commit | Line | Data |
---|---|---|
74c3cbe3 | 1 | #include "audit.h" |
28a3a7eb | 2 | #include <linux/fsnotify_backend.h> |
74c3cbe3 AV |
3 | #include <linux/namei.h> |
4 | #include <linux/mount.h> | |
916d7576 | 5 | #include <linux/kthread.h> |
5a0e3ad6 | 6 | #include <linux/slab.h> |
74c3cbe3 AV |
7 | |
8 | struct audit_tree; | |
9 | struct audit_chunk; | |
10 | ||
11 | struct audit_tree { | |
12 | atomic_t count; | |
13 | int goner; | |
14 | struct audit_chunk *root; | |
15 | struct list_head chunks; | |
16 | struct list_head rules; | |
17 | struct list_head list; | |
18 | struct list_head same_root; | |
19 | struct rcu_head head; | |
20 | char pathname[]; | |
21 | }; | |
22 | ||
23 | struct audit_chunk { | |
24 | struct list_head hash; | |
e61ce867 | 25 | struct fsnotify_mark mark; |
74c3cbe3 AV |
26 | struct list_head trees; /* with root here */ |
27 | int dead; | |
28 | int count; | |
8f7b0ba1 | 29 | atomic_long_t refs; |
74c3cbe3 AV |
30 | struct rcu_head head; |
31 | struct node { | |
32 | struct list_head list; | |
33 | struct audit_tree *owner; | |
34 | unsigned index; /* index; upper bit indicates 'will prune' */ | |
35 | } owners[]; | |
36 | }; | |
37 | ||
38 | static LIST_HEAD(tree_list); | |
39 | static LIST_HEAD(prune_list); | |
40 | ||
41 | /* | |
42 | * One struct chunk is attached to each inode of interest. | |
43 | * We replace struct chunk on tagging/untagging. | |
44 | * Rules have pointer to struct audit_tree. | |
45 | * Rules have struct list_head rlist forming a list of rules over | |
46 | * the same tree. | |
47 | * References to struct chunk are collected at audit_inode{,_child}() | |
48 | * time and used in AUDIT_TREE rule matching. | |
49 | * These references are dropped at the same time we are calling | |
50 | * audit_free_names(), etc. | |
51 | * | |
52 | * Cyclic lists galore: | |
53 | * tree.chunks anchors chunk.owners[].list hash_lock | |
54 | * tree.rules anchors rule.rlist audit_filter_mutex | |
55 | * chunk.trees anchors tree.same_root hash_lock | |
56 | * chunk.hash is a hash with middle bits of watch.inode as | |
57 | * a hash function. RCU, hash_lock | |
58 | * | |
59 | * tree is refcounted; one reference for "some rules on rules_list refer to | |
60 | * it", one for each chunk with pointer to it. | |
61 | * | |
28a3a7eb | 62 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount |
8f7b0ba1 | 63 | * of watch contributes 1 to .refs). |
74c3cbe3 AV |
64 | * |
65 | * node.index allows to get from node.list to containing chunk. | |
66 | * MSB of that sucker is stolen to mark taggings that we might have to | |
67 | * revert - several operations have very unpleasant cleanup logics and | |
68 | * that makes a difference. Some. | |
69 | */ | |
70 | ||
28a3a7eb | 71 | static struct fsnotify_group *audit_tree_group; |
74c3cbe3 AV |
72 | |
73 | static struct audit_tree *alloc_tree(const char *s) | |
74 | { | |
75 | struct audit_tree *tree; | |
76 | ||
77 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); | |
78 | if (tree) { | |
79 | atomic_set(&tree->count, 1); | |
80 | tree->goner = 0; | |
81 | INIT_LIST_HEAD(&tree->chunks); | |
82 | INIT_LIST_HEAD(&tree->rules); | |
83 | INIT_LIST_HEAD(&tree->list); | |
84 | INIT_LIST_HEAD(&tree->same_root); | |
85 | tree->root = NULL; | |
86 | strcpy(tree->pathname, s); | |
87 | } | |
88 | return tree; | |
89 | } | |
90 | ||
91 | static inline void get_tree(struct audit_tree *tree) | |
92 | { | |
93 | atomic_inc(&tree->count); | |
94 | } | |
95 | ||
74c3cbe3 AV |
96 | static inline void put_tree(struct audit_tree *tree) |
97 | { | |
98 | if (atomic_dec_and_test(&tree->count)) | |
3b097c46 | 99 | kfree_rcu(tree, head); |
74c3cbe3 AV |
100 | } |
101 | ||
102 | /* to avoid bringing the entire thing in audit.h */ | |
103 | const char *audit_tree_path(struct audit_tree *tree) | |
104 | { | |
105 | return tree->pathname; | |
106 | } | |
107 | ||
8f7b0ba1 | 108 | static void free_chunk(struct audit_chunk *chunk) |
74c3cbe3 | 109 | { |
74c3cbe3 AV |
110 | int i; |
111 | ||
112 | for (i = 0; i < chunk->count; i++) { | |
113 | if (chunk->owners[i].owner) | |
114 | put_tree(chunk->owners[i].owner); | |
115 | } | |
116 | kfree(chunk); | |
117 | } | |
118 | ||
8f7b0ba1 | 119 | void audit_put_chunk(struct audit_chunk *chunk) |
74c3cbe3 | 120 | { |
8f7b0ba1 AV |
121 | if (atomic_long_dec_and_test(&chunk->refs)) |
122 | free_chunk(chunk); | |
74c3cbe3 AV |
123 | } |
124 | ||
8f7b0ba1 | 125 | static void __put_chunk(struct rcu_head *rcu) |
74c3cbe3 | 126 | { |
8f7b0ba1 AV |
127 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
128 | audit_put_chunk(chunk); | |
74c3cbe3 AV |
129 | } |
130 | ||
e61ce867 | 131 | static void audit_tree_destroy_watch(struct fsnotify_mark *entry) |
28a3a7eb EP |
132 | { |
133 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
134 | call_rcu(&chunk->head, __put_chunk); | |
135 | } | |
136 | ||
137 | static struct audit_chunk *alloc_chunk(int count) | |
138 | { | |
139 | struct audit_chunk *chunk; | |
140 | size_t size; | |
141 | int i; | |
142 | ||
143 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | |
144 | chunk = kzalloc(size, GFP_KERNEL); | |
145 | if (!chunk) | |
146 | return NULL; | |
147 | ||
148 | INIT_LIST_HEAD(&chunk->hash); | |
149 | INIT_LIST_HEAD(&chunk->trees); | |
150 | chunk->count = count; | |
151 | atomic_long_set(&chunk->refs, 1); | |
152 | for (i = 0; i < count; i++) { | |
153 | INIT_LIST_HEAD(&chunk->owners[i].list); | |
154 | chunk->owners[i].index = i; | |
155 | } | |
156 | fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); | |
157 | return chunk; | |
158 | } | |
159 | ||
74c3cbe3 AV |
160 | enum {HASH_SIZE = 128}; |
161 | static struct list_head chunk_hash_heads[HASH_SIZE]; | |
162 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); | |
163 | ||
164 | static inline struct list_head *chunk_hash(const struct inode *inode) | |
165 | { | |
166 | unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; | |
167 | return chunk_hash_heads + n % HASH_SIZE; | |
168 | } | |
169 | ||
28a3a7eb | 170 | /* hash_lock & entry->lock is held by caller */ |
74c3cbe3 AV |
171 | static void insert_hash(struct audit_chunk *chunk) |
172 | { | |
e61ce867 | 173 | struct fsnotify_mark *entry = &chunk->mark; |
28a3a7eb EP |
174 | struct list_head *list; |
175 | ||
2823e04d | 176 | if (!entry->i.inode) |
28a3a7eb | 177 | return; |
2823e04d | 178 | list = chunk_hash(entry->i.inode); |
74c3cbe3 AV |
179 | list_add_rcu(&chunk->hash, list); |
180 | } | |
181 | ||
182 | /* called under rcu_read_lock */ | |
183 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |
184 | { | |
185 | struct list_head *list = chunk_hash(inode); | |
6793a051 | 186 | struct audit_chunk *p; |
74c3cbe3 | 187 | |
6793a051 | 188 | list_for_each_entry_rcu(p, list, hash) { |
28a3a7eb | 189 | /* mark.inode may have gone NULL, but who cares? */ |
2823e04d | 190 | if (p->mark.i.inode == inode) { |
8f7b0ba1 | 191 | atomic_long_inc(&p->refs); |
74c3cbe3 AV |
192 | return p; |
193 | } | |
194 | } | |
195 | return NULL; | |
196 | } | |
197 | ||
198 | int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) | |
199 | { | |
200 | int n; | |
201 | for (n = 0; n < chunk->count; n++) | |
202 | if (chunk->owners[n].owner == tree) | |
203 | return 1; | |
204 | return 0; | |
205 | } | |
206 | ||
207 | /* tagging and untagging inodes with trees */ | |
208 | ||
8f7b0ba1 AV |
209 | static struct audit_chunk *find_chunk(struct node *p) |
210 | { | |
211 | int index = p->index & ~(1U<<31); | |
212 | p -= index; | |
213 | return container_of(p, struct audit_chunk, owners[0]); | |
214 | } | |
215 | ||
216 | static void untag_chunk(struct node *p) | |
74c3cbe3 | 217 | { |
8f7b0ba1 | 218 | struct audit_chunk *chunk = find_chunk(p); |
e61ce867 | 219 | struct fsnotify_mark *entry = &chunk->mark; |
f7a998a9 | 220 | struct audit_chunk *new = NULL; |
74c3cbe3 AV |
221 | struct audit_tree *owner; |
222 | int size = chunk->count - 1; | |
223 | int i, j; | |
224 | ||
28a3a7eb | 225 | fsnotify_get_mark(entry); |
8f7b0ba1 AV |
226 | |
227 | spin_unlock(&hash_lock); | |
228 | ||
f7a998a9 AV |
229 | if (size) |
230 | new = alloc_chunk(size); | |
231 | ||
28a3a7eb | 232 | spin_lock(&entry->lock); |
2823e04d | 233 | if (chunk->dead || !entry->i.inode) { |
28a3a7eb | 234 | spin_unlock(&entry->lock); |
f7a998a9 AV |
235 | if (new) |
236 | free_chunk(new); | |
8f7b0ba1 | 237 | goto out; |
74c3cbe3 AV |
238 | } |
239 | ||
240 | owner = p->owner; | |
241 | ||
242 | if (!size) { | |
243 | chunk->dead = 1; | |
244 | spin_lock(&hash_lock); | |
245 | list_del_init(&chunk->trees); | |
246 | if (owner->root == chunk) | |
247 | owner->root = NULL; | |
248 | list_del_init(&p->list); | |
249 | list_del_rcu(&chunk->hash); | |
250 | spin_unlock(&hash_lock); | |
28a3a7eb | 251 | spin_unlock(&entry->lock); |
d0775441 | 252 | fsnotify_destroy_mark(entry); |
8f7b0ba1 | 253 | goto out; |
74c3cbe3 AV |
254 | } |
255 | ||
74c3cbe3 AV |
256 | if (!new) |
257 | goto Fallback; | |
f7a998a9 | 258 | |
28a3a7eb | 259 | fsnotify_duplicate_mark(&new->mark, entry); |
5444e298 | 260 | if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { |
0fe33aae | 261 | fsnotify_put_mark(&new->mark); |
74c3cbe3 AV |
262 | goto Fallback; |
263 | } | |
264 | ||
265 | chunk->dead = 1; | |
266 | spin_lock(&hash_lock); | |
267 | list_replace_init(&chunk->trees, &new->trees); | |
268 | if (owner->root == chunk) { | |
269 | list_del_init(&owner->same_root); | |
270 | owner->root = NULL; | |
271 | } | |
272 | ||
6f5d5114 | 273 | for (i = j = 0; j <= size; i++, j++) { |
74c3cbe3 AV |
274 | struct audit_tree *s; |
275 | if (&chunk->owners[j] == p) { | |
276 | list_del_init(&p->list); | |
277 | i--; | |
278 | continue; | |
279 | } | |
280 | s = chunk->owners[j].owner; | |
281 | new->owners[i].owner = s; | |
282 | new->owners[i].index = chunk->owners[j].index - j + i; | |
283 | if (!s) /* result of earlier fallback */ | |
284 | continue; | |
285 | get_tree(s); | |
6f5d5114 | 286 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
74c3cbe3 AV |
287 | } |
288 | ||
289 | list_replace_rcu(&chunk->hash, &new->hash); | |
290 | list_for_each_entry(owner, &new->trees, same_root) | |
291 | owner->root = new; | |
292 | spin_unlock(&hash_lock); | |
28a3a7eb | 293 | spin_unlock(&entry->lock); |
d0775441 | 294 | fsnotify_destroy_mark(entry); |
8f7b0ba1 | 295 | goto out; |
74c3cbe3 AV |
296 | |
297 | Fallback: | |
298 | // do the best we can | |
299 | spin_lock(&hash_lock); | |
300 | if (owner->root == chunk) { | |
301 | list_del_init(&owner->same_root); | |
302 | owner->root = NULL; | |
303 | } | |
304 | list_del_init(&p->list); | |
305 | p->owner = NULL; | |
306 | put_tree(owner); | |
307 | spin_unlock(&hash_lock); | |
28a3a7eb | 308 | spin_unlock(&entry->lock); |
8f7b0ba1 | 309 | out: |
28a3a7eb | 310 | fsnotify_put_mark(entry); |
8f7b0ba1 | 311 | spin_lock(&hash_lock); |
74c3cbe3 AV |
312 | } |
313 | ||
314 | static int create_chunk(struct inode *inode, struct audit_tree *tree) | |
315 | { | |
e61ce867 | 316 | struct fsnotify_mark *entry; |
74c3cbe3 AV |
317 | struct audit_chunk *chunk = alloc_chunk(1); |
318 | if (!chunk) | |
319 | return -ENOMEM; | |
320 | ||
28a3a7eb | 321 | entry = &chunk->mark; |
5444e298 | 322 | if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { |
0fe33aae | 323 | fsnotify_put_mark(entry); |
74c3cbe3 AV |
324 | return -ENOSPC; |
325 | } | |
326 | ||
28a3a7eb | 327 | spin_lock(&entry->lock); |
74c3cbe3 AV |
328 | spin_lock(&hash_lock); |
329 | if (tree->goner) { | |
330 | spin_unlock(&hash_lock); | |
331 | chunk->dead = 1; | |
28a3a7eb | 332 | spin_unlock(&entry->lock); |
a2140fc0 | 333 | fsnotify_get_mark(entry); |
d0775441 | 334 | fsnotify_destroy_mark(entry); |
28a3a7eb | 335 | fsnotify_put_mark(entry); |
74c3cbe3 AV |
336 | return 0; |
337 | } | |
338 | chunk->owners[0].index = (1U << 31); | |
339 | chunk->owners[0].owner = tree; | |
340 | get_tree(tree); | |
341 | list_add(&chunk->owners[0].list, &tree->chunks); | |
342 | if (!tree->root) { | |
343 | tree->root = chunk; | |
344 | list_add(&tree->same_root, &chunk->trees); | |
345 | } | |
346 | insert_hash(chunk); | |
347 | spin_unlock(&hash_lock); | |
28a3a7eb | 348 | spin_unlock(&entry->lock); |
74c3cbe3 AV |
349 | return 0; |
350 | } | |
351 | ||
352 | /* the first tagged inode becomes root of tree */ | |
353 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |
354 | { | |
e61ce867 | 355 | struct fsnotify_mark *old_entry, *chunk_entry; |
74c3cbe3 AV |
356 | struct audit_tree *owner; |
357 | struct audit_chunk *chunk, *old; | |
358 | struct node *p; | |
359 | int n; | |
360 | ||
5444e298 | 361 | old_entry = fsnotify_find_inode_mark(audit_tree_group, inode); |
28a3a7eb | 362 | if (!old_entry) |
74c3cbe3 AV |
363 | return create_chunk(inode, tree); |
364 | ||
28a3a7eb | 365 | old = container_of(old_entry, struct audit_chunk, mark); |
74c3cbe3 AV |
366 | |
367 | /* are we already there? */ | |
368 | spin_lock(&hash_lock); | |
369 | for (n = 0; n < old->count; n++) { | |
370 | if (old->owners[n].owner == tree) { | |
371 | spin_unlock(&hash_lock); | |
28a3a7eb | 372 | fsnotify_put_mark(old_entry); |
74c3cbe3 AV |
373 | return 0; |
374 | } | |
375 | } | |
376 | spin_unlock(&hash_lock); | |
377 | ||
378 | chunk = alloc_chunk(old->count + 1); | |
b4c30aad | 379 | if (!chunk) { |
28a3a7eb | 380 | fsnotify_put_mark(old_entry); |
74c3cbe3 | 381 | return -ENOMEM; |
b4c30aad | 382 | } |
74c3cbe3 | 383 | |
28a3a7eb EP |
384 | chunk_entry = &chunk->mark; |
385 | ||
386 | spin_lock(&old_entry->lock); | |
2823e04d | 387 | if (!old_entry->i.inode) { |
28a3a7eb EP |
388 | /* old_entry is being shot, lets just lie */ |
389 | spin_unlock(&old_entry->lock); | |
390 | fsnotify_put_mark(old_entry); | |
74c3cbe3 | 391 | free_chunk(chunk); |
28a3a7eb EP |
392 | return -ENOENT; |
393 | } | |
394 | ||
395 | fsnotify_duplicate_mark(chunk_entry, old_entry); | |
5444e298 | 396 | if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { |
28a3a7eb | 397 | spin_unlock(&old_entry->lock); |
0fe33aae | 398 | fsnotify_put_mark(chunk_entry); |
28a3a7eb | 399 | fsnotify_put_mark(old_entry); |
74c3cbe3 AV |
400 | return -ENOSPC; |
401 | } | |
28a3a7eb EP |
402 | |
403 | /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */ | |
404 | spin_lock(&chunk_entry->lock); | |
74c3cbe3 | 405 | spin_lock(&hash_lock); |
28a3a7eb EP |
406 | |
407 | /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */ | |
74c3cbe3 AV |
408 | if (tree->goner) { |
409 | spin_unlock(&hash_lock); | |
410 | chunk->dead = 1; | |
28a3a7eb EP |
411 | spin_unlock(&chunk_entry->lock); |
412 | spin_unlock(&old_entry->lock); | |
413 | ||
a2140fc0 | 414 | fsnotify_get_mark(chunk_entry); |
d0775441 | 415 | fsnotify_destroy_mark(chunk_entry); |
28a3a7eb EP |
416 | |
417 | fsnotify_put_mark(chunk_entry); | |
418 | fsnotify_put_mark(old_entry); | |
74c3cbe3 AV |
419 | return 0; |
420 | } | |
421 | list_replace_init(&old->trees, &chunk->trees); | |
422 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { | |
423 | struct audit_tree *s = old->owners[n].owner; | |
424 | p->owner = s; | |
425 | p->index = old->owners[n].index; | |
426 | if (!s) /* result of fallback in untag */ | |
427 | continue; | |
428 | get_tree(s); | |
429 | list_replace_init(&old->owners[n].list, &p->list); | |
430 | } | |
431 | p->index = (chunk->count - 1) | (1U<<31); | |
432 | p->owner = tree; | |
433 | get_tree(tree); | |
434 | list_add(&p->list, &tree->chunks); | |
435 | list_replace_rcu(&old->hash, &chunk->hash); | |
436 | list_for_each_entry(owner, &chunk->trees, same_root) | |
437 | owner->root = chunk; | |
438 | old->dead = 1; | |
439 | if (!tree->root) { | |
440 | tree->root = chunk; | |
441 | list_add(&tree->same_root, &chunk->trees); | |
442 | } | |
443 | spin_unlock(&hash_lock); | |
28a3a7eb EP |
444 | spin_unlock(&chunk_entry->lock); |
445 | spin_unlock(&old_entry->lock); | |
d0775441 | 446 | fsnotify_destroy_mark(old_entry); |
28a3a7eb | 447 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
74c3cbe3 AV |
448 | return 0; |
449 | } | |
450 | ||
74c3cbe3 AV |
451 | static void kill_rules(struct audit_tree *tree) |
452 | { | |
453 | struct audit_krule *rule, *next; | |
454 | struct audit_entry *entry; | |
455 | struct audit_buffer *ab; | |
456 | ||
457 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | |
458 | entry = container_of(rule, struct audit_entry, rule); | |
459 | ||
460 | list_del_init(&rule->rlist); | |
461 | if (rule->tree) { | |
462 | /* not a half-baked one */ | |
463 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); | |
9d960985 EP |
464 | audit_log_format(ab, "op="); |
465 | audit_log_string(ab, "remove rule"); | |
466 | audit_log_format(ab, " dir="); | |
74c3cbe3 | 467 | audit_log_untrustedstring(ab, rule->tree->pathname); |
9d960985 | 468 | audit_log_key(ab, rule->filterkey); |
74c3cbe3 AV |
469 | audit_log_format(ab, " list=%d res=1", rule->listnr); |
470 | audit_log_end(ab); | |
471 | rule->tree = NULL; | |
472 | list_del_rcu(&entry->list); | |
e45aa212 | 473 | list_del(&entry->rule.list); |
74c3cbe3 AV |
474 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
475 | } | |
476 | } | |
477 | } | |
478 | ||
479 | /* | |
480 | * finish killing struct audit_tree | |
481 | */ | |
482 | static void prune_one(struct audit_tree *victim) | |
483 | { | |
484 | spin_lock(&hash_lock); | |
485 | while (!list_empty(&victim->chunks)) { | |
486 | struct node *p; | |
74c3cbe3 AV |
487 | |
488 | p = list_entry(victim->chunks.next, struct node, list); | |
74c3cbe3 | 489 | |
8f7b0ba1 | 490 | untag_chunk(p); |
74c3cbe3 AV |
491 | } |
492 | spin_unlock(&hash_lock); | |
493 | put_tree(victim); | |
494 | } | |
495 | ||
496 | /* trim the uncommitted chunks from tree */ | |
497 | ||
498 | static void trim_marked(struct audit_tree *tree) | |
499 | { | |
500 | struct list_head *p, *q; | |
501 | spin_lock(&hash_lock); | |
502 | if (tree->goner) { | |
503 | spin_unlock(&hash_lock); | |
504 | return; | |
505 | } | |
506 | /* reorder */ | |
507 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { | |
508 | struct node *node = list_entry(p, struct node, list); | |
509 | q = p->next; | |
510 | if (node->index & (1U<<31)) { | |
511 | list_del_init(p); | |
512 | list_add(p, &tree->chunks); | |
513 | } | |
514 | } | |
515 | ||
516 | while (!list_empty(&tree->chunks)) { | |
517 | struct node *node; | |
74c3cbe3 AV |
518 | |
519 | node = list_entry(tree->chunks.next, struct node, list); | |
520 | ||
521 | /* have we run out of marked? */ | |
522 | if (!(node->index & (1U<<31))) | |
523 | break; | |
524 | ||
8f7b0ba1 | 525 | untag_chunk(node); |
74c3cbe3 AV |
526 | } |
527 | if (!tree->root && !tree->goner) { | |
528 | tree->goner = 1; | |
529 | spin_unlock(&hash_lock); | |
530 | mutex_lock(&audit_filter_mutex); | |
531 | kill_rules(tree); | |
532 | list_del_init(&tree->list); | |
533 | mutex_unlock(&audit_filter_mutex); | |
534 | prune_one(tree); | |
535 | } else { | |
536 | spin_unlock(&hash_lock); | |
537 | } | |
538 | } | |
539 | ||
916d7576 AV |
540 | static void audit_schedule_prune(void); |
541 | ||
74c3cbe3 AV |
542 | /* called with audit_filter_mutex */ |
543 | int audit_remove_tree_rule(struct audit_krule *rule) | |
544 | { | |
545 | struct audit_tree *tree; | |
546 | tree = rule->tree; | |
547 | if (tree) { | |
548 | spin_lock(&hash_lock); | |
549 | list_del_init(&rule->rlist); | |
550 | if (list_empty(&tree->rules) && !tree->goner) { | |
551 | tree->root = NULL; | |
552 | list_del_init(&tree->same_root); | |
553 | tree->goner = 1; | |
554 | list_move(&tree->list, &prune_list); | |
555 | rule->tree = NULL; | |
556 | spin_unlock(&hash_lock); | |
557 | audit_schedule_prune(); | |
558 | return 1; | |
559 | } | |
560 | rule->tree = NULL; | |
561 | spin_unlock(&hash_lock); | |
562 | return 1; | |
563 | } | |
564 | return 0; | |
565 | } | |
566 | ||
1f707137 AV |
567 | static int compare_root(struct vfsmount *mnt, void *arg) |
568 | { | |
569 | return mnt->mnt_root->d_inode == arg; | |
570 | } | |
571 | ||
74c3cbe3 AV |
572 | void audit_trim_trees(void) |
573 | { | |
574 | struct list_head cursor; | |
575 | ||
576 | mutex_lock(&audit_filter_mutex); | |
577 | list_add(&cursor, &tree_list); | |
578 | while (cursor.next != &tree_list) { | |
579 | struct audit_tree *tree; | |
98bc993f | 580 | struct path path; |
74c3cbe3 AV |
581 | struct vfsmount *root_mnt; |
582 | struct node *node; | |
74c3cbe3 AV |
583 | int err; |
584 | ||
585 | tree = container_of(cursor.next, struct audit_tree, list); | |
586 | get_tree(tree); | |
587 | list_del(&cursor); | |
588 | list_add(&cursor, &tree->list); | |
589 | mutex_unlock(&audit_filter_mutex); | |
590 | ||
98bc993f | 591 | err = kern_path(tree->pathname, 0, &path); |
74c3cbe3 AV |
592 | if (err) |
593 | goto skip_it; | |
594 | ||
589ff870 | 595 | root_mnt = collect_mounts(&path); |
98bc993f | 596 | path_put(&path); |
be34d1a3 | 597 | if (IS_ERR(root_mnt)) |
74c3cbe3 AV |
598 | goto skip_it; |
599 | ||
74c3cbe3 AV |
600 | spin_lock(&hash_lock); |
601 | list_for_each_entry(node, &tree->chunks, list) { | |
28a3a7eb | 602 | struct audit_chunk *chunk = find_chunk(node); |
25985edc | 603 | /* this could be NULL if the watch is dying else where... */ |
2823e04d | 604 | struct inode *inode = chunk->mark.i.inode; |
74c3cbe3 | 605 | node->index |= 1U<<31; |
1f707137 AV |
606 | if (iterate_mounts(compare_root, inode, root_mnt)) |
607 | node->index &= ~(1U<<31); | |
74c3cbe3 AV |
608 | } |
609 | spin_unlock(&hash_lock); | |
610 | trim_marked(tree); | |
611 | put_tree(tree); | |
74c3cbe3 AV |
612 | drop_collected_mounts(root_mnt); |
613 | skip_it: | |
614 | mutex_lock(&audit_filter_mutex); | |
615 | } | |
616 | list_del(&cursor); | |
617 | mutex_unlock(&audit_filter_mutex); | |
618 | } | |
619 | ||
74c3cbe3 AV |
620 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
621 | { | |
622 | ||
623 | if (pathname[0] != '/' || | |
624 | rule->listnr != AUDIT_FILTER_EXIT || | |
5af75d8d | 625 | op != Audit_equal || |
74c3cbe3 AV |
626 | rule->inode_f || rule->watch || rule->tree) |
627 | return -EINVAL; | |
628 | rule->tree = alloc_tree(pathname); | |
629 | if (!rule->tree) | |
630 | return -ENOMEM; | |
631 | return 0; | |
632 | } | |
633 | ||
634 | void audit_put_tree(struct audit_tree *tree) | |
635 | { | |
636 | put_tree(tree); | |
637 | } | |
638 | ||
1f707137 AV |
639 | static int tag_mount(struct vfsmount *mnt, void *arg) |
640 | { | |
641 | return tag_chunk(mnt->mnt_root->d_inode, arg); | |
642 | } | |
643 | ||
74c3cbe3 AV |
644 | /* called with audit_filter_mutex */ |
645 | int audit_add_tree_rule(struct audit_krule *rule) | |
646 | { | |
647 | struct audit_tree *seed = rule->tree, *tree; | |
98bc993f | 648 | struct path path; |
1f707137 | 649 | struct vfsmount *mnt; |
74c3cbe3 AV |
650 | int err; |
651 | ||
652 | list_for_each_entry(tree, &tree_list, list) { | |
653 | if (!strcmp(seed->pathname, tree->pathname)) { | |
654 | put_tree(seed); | |
655 | rule->tree = tree; | |
656 | list_add(&rule->rlist, &tree->rules); | |
657 | return 0; | |
658 | } | |
659 | } | |
660 | tree = seed; | |
661 | list_add(&tree->list, &tree_list); | |
662 | list_add(&rule->rlist, &tree->rules); | |
663 | /* do not set rule->tree yet */ | |
664 | mutex_unlock(&audit_filter_mutex); | |
665 | ||
98bc993f | 666 | err = kern_path(tree->pathname, 0, &path); |
74c3cbe3 AV |
667 | if (err) |
668 | goto Err; | |
589ff870 | 669 | mnt = collect_mounts(&path); |
98bc993f | 670 | path_put(&path); |
be34d1a3 DH |
671 | if (IS_ERR(mnt)) { |
672 | err = PTR_ERR(mnt); | |
74c3cbe3 AV |
673 | goto Err; |
674 | } | |
74c3cbe3 AV |
675 | |
676 | get_tree(tree); | |
1f707137 | 677 | err = iterate_mounts(tag_mount, tree, mnt); |
74c3cbe3 AV |
678 | drop_collected_mounts(mnt); |
679 | ||
680 | if (!err) { | |
681 | struct node *node; | |
682 | spin_lock(&hash_lock); | |
683 | list_for_each_entry(node, &tree->chunks, list) | |
684 | node->index &= ~(1U<<31); | |
685 | spin_unlock(&hash_lock); | |
686 | } else { | |
687 | trim_marked(tree); | |
688 | goto Err; | |
689 | } | |
690 | ||
691 | mutex_lock(&audit_filter_mutex); | |
692 | if (list_empty(&rule->rlist)) { | |
693 | put_tree(tree); | |
694 | return -ENOENT; | |
695 | } | |
696 | rule->tree = tree; | |
697 | put_tree(tree); | |
698 | ||
699 | return 0; | |
700 | Err: | |
701 | mutex_lock(&audit_filter_mutex); | |
702 | list_del_init(&tree->list); | |
703 | list_del_init(&tree->rules); | |
704 | put_tree(tree); | |
705 | return err; | |
706 | } | |
707 | ||
708 | int audit_tag_tree(char *old, char *new) | |
709 | { | |
710 | struct list_head cursor, barrier; | |
711 | int failed = 0; | |
2096f759 | 712 | struct path path1, path2; |
74c3cbe3 | 713 | struct vfsmount *tagged; |
74c3cbe3 AV |
714 | int err; |
715 | ||
2096f759 | 716 | err = kern_path(new, 0, &path2); |
74c3cbe3 AV |
717 | if (err) |
718 | return err; | |
2096f759 AV |
719 | tagged = collect_mounts(&path2); |
720 | path_put(&path2); | |
be34d1a3 DH |
721 | if (IS_ERR(tagged)) |
722 | return PTR_ERR(tagged); | |
74c3cbe3 | 723 | |
2096f759 | 724 | err = kern_path(old, 0, &path1); |
74c3cbe3 AV |
725 | if (err) { |
726 | drop_collected_mounts(tagged); | |
727 | return err; | |
728 | } | |
74c3cbe3 | 729 | |
74c3cbe3 AV |
730 | mutex_lock(&audit_filter_mutex); |
731 | list_add(&barrier, &tree_list); | |
732 | list_add(&cursor, &barrier); | |
733 | ||
734 | while (cursor.next != &tree_list) { | |
735 | struct audit_tree *tree; | |
2096f759 | 736 | int good_one = 0; |
74c3cbe3 AV |
737 | |
738 | tree = container_of(cursor.next, struct audit_tree, list); | |
739 | get_tree(tree); | |
740 | list_del(&cursor); | |
741 | list_add(&cursor, &tree->list); | |
742 | mutex_unlock(&audit_filter_mutex); | |
743 | ||
2096f759 AV |
744 | err = kern_path(tree->pathname, 0, &path2); |
745 | if (!err) { | |
746 | good_one = path_is_under(&path1, &path2); | |
747 | path_put(&path2); | |
74c3cbe3 AV |
748 | } |
749 | ||
2096f759 | 750 | if (!good_one) { |
74c3cbe3 AV |
751 | put_tree(tree); |
752 | mutex_lock(&audit_filter_mutex); | |
753 | continue; | |
754 | } | |
74c3cbe3 | 755 | |
1f707137 | 756 | failed = iterate_mounts(tag_mount, tree, tagged); |
74c3cbe3 AV |
757 | if (failed) { |
758 | put_tree(tree); | |
759 | mutex_lock(&audit_filter_mutex); | |
760 | break; | |
761 | } | |
762 | ||
763 | mutex_lock(&audit_filter_mutex); | |
764 | spin_lock(&hash_lock); | |
765 | if (!tree->goner) { | |
766 | list_del(&tree->list); | |
767 | list_add(&tree->list, &tree_list); | |
768 | } | |
769 | spin_unlock(&hash_lock); | |
770 | put_tree(tree); | |
771 | } | |
772 | ||
773 | while (barrier.prev != &tree_list) { | |
774 | struct audit_tree *tree; | |
775 | ||
776 | tree = container_of(barrier.prev, struct audit_tree, list); | |
777 | get_tree(tree); | |
778 | list_del(&tree->list); | |
779 | list_add(&tree->list, &barrier); | |
780 | mutex_unlock(&audit_filter_mutex); | |
781 | ||
782 | if (!failed) { | |
783 | struct node *node; | |
784 | spin_lock(&hash_lock); | |
785 | list_for_each_entry(node, &tree->chunks, list) | |
786 | node->index &= ~(1U<<31); | |
787 | spin_unlock(&hash_lock); | |
788 | } else { | |
789 | trim_marked(tree); | |
790 | } | |
791 | ||
792 | put_tree(tree); | |
793 | mutex_lock(&audit_filter_mutex); | |
794 | } | |
795 | list_del(&barrier); | |
796 | list_del(&cursor); | |
74c3cbe3 | 797 | mutex_unlock(&audit_filter_mutex); |
2096f759 | 798 | path_put(&path1); |
74c3cbe3 AV |
799 | drop_collected_mounts(tagged); |
800 | return failed; | |
801 | } | |
802 | ||
803 | /* | |
804 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | |
916d7576 | 805 | * Runs from a separate thread. |
74c3cbe3 | 806 | */ |
916d7576 | 807 | static int prune_tree_thread(void *unused) |
74c3cbe3 | 808 | { |
916d7576 | 809 | mutex_lock(&audit_cmd_mutex); |
74c3cbe3 AV |
810 | mutex_lock(&audit_filter_mutex); |
811 | ||
812 | while (!list_empty(&prune_list)) { | |
813 | struct audit_tree *victim; | |
814 | ||
815 | victim = list_entry(prune_list.next, struct audit_tree, list); | |
816 | list_del_init(&victim->list); | |
817 | ||
818 | mutex_unlock(&audit_filter_mutex); | |
819 | ||
820 | prune_one(victim); | |
821 | ||
822 | mutex_lock(&audit_filter_mutex); | |
823 | } | |
824 | ||
825 | mutex_unlock(&audit_filter_mutex); | |
916d7576 AV |
826 | mutex_unlock(&audit_cmd_mutex); |
827 | return 0; | |
828 | } | |
829 | ||
830 | static void audit_schedule_prune(void) | |
831 | { | |
832 | kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); | |
833 | } | |
834 | ||
835 | /* | |
836 | * ... and that one is done if evict_chunk() decides to delay until the end | |
837 | * of syscall. Runs synchronously. | |
838 | */ | |
839 | void audit_kill_trees(struct list_head *list) | |
840 | { | |
841 | mutex_lock(&audit_cmd_mutex); | |
842 | mutex_lock(&audit_filter_mutex); | |
843 | ||
844 | while (!list_empty(list)) { | |
845 | struct audit_tree *victim; | |
846 | ||
847 | victim = list_entry(list->next, struct audit_tree, list); | |
848 | kill_rules(victim); | |
849 | list_del_init(&victim->list); | |
850 | ||
851 | mutex_unlock(&audit_filter_mutex); | |
852 | ||
853 | prune_one(victim); | |
854 | ||
855 | mutex_lock(&audit_filter_mutex); | |
856 | } | |
857 | ||
858 | mutex_unlock(&audit_filter_mutex); | |
859 | mutex_unlock(&audit_cmd_mutex); | |
74c3cbe3 AV |
860 | } |
861 | ||
862 | /* | |
863 | * Here comes the stuff asynchronous to auditctl operations | |
864 | */ | |
865 | ||
74c3cbe3 AV |
866 | static void evict_chunk(struct audit_chunk *chunk) |
867 | { | |
868 | struct audit_tree *owner; | |
916d7576 AV |
869 | struct list_head *postponed = audit_killed_trees(); |
870 | int need_prune = 0; | |
74c3cbe3 AV |
871 | int n; |
872 | ||
873 | if (chunk->dead) | |
874 | return; | |
875 | ||
876 | chunk->dead = 1; | |
877 | mutex_lock(&audit_filter_mutex); | |
878 | spin_lock(&hash_lock); | |
879 | while (!list_empty(&chunk->trees)) { | |
880 | owner = list_entry(chunk->trees.next, | |
881 | struct audit_tree, same_root); | |
882 | owner->goner = 1; | |
883 | owner->root = NULL; | |
884 | list_del_init(&owner->same_root); | |
885 | spin_unlock(&hash_lock); | |
916d7576 AV |
886 | if (!postponed) { |
887 | kill_rules(owner); | |
888 | list_move(&owner->list, &prune_list); | |
889 | need_prune = 1; | |
890 | } else { | |
891 | list_move(&owner->list, postponed); | |
892 | } | |
74c3cbe3 AV |
893 | spin_lock(&hash_lock); |
894 | } | |
895 | list_del_rcu(&chunk->hash); | |
896 | for (n = 0; n < chunk->count; n++) | |
897 | list_del_init(&chunk->owners[n].list); | |
898 | spin_unlock(&hash_lock); | |
916d7576 AV |
899 | if (need_prune) |
900 | audit_schedule_prune(); | |
74c3cbe3 AV |
901 | mutex_unlock(&audit_filter_mutex); |
902 | } | |
903 | ||
3a9b16b4 | 904 | static int audit_tree_handle_event(struct fsnotify_group *group, |
ce8f76fb EP |
905 | struct fsnotify_mark *inode_mark, |
906 | struct fsnotify_mark *vfsmonut_mark, | |
3a9b16b4 | 907 | struct fsnotify_event *event) |
74c3cbe3 | 908 | { |
28a3a7eb EP |
909 | BUG(); |
910 | return -EOPNOTSUPP; | |
911 | } | |
74c3cbe3 | 912 | |
e61ce867 | 913 | static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) |
28a3a7eb EP |
914 | { |
915 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
916 | ||
917 | evict_chunk(chunk); | |
918 | fsnotify_put_mark(entry); | |
74c3cbe3 AV |
919 | } |
920 | ||
7b0a04fb | 921 | static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, |
1968f5ee | 922 | struct fsnotify_mark *inode_mark, |
ce8f76fb | 923 | struct fsnotify_mark *vfsmount_mark, |
3a9b16b4 | 924 | __u32 mask, void *data, int data_type) |
74c3cbe3 | 925 | { |
2612abb5 | 926 | return false; |
74c3cbe3 AV |
927 | } |
928 | ||
28a3a7eb EP |
929 | static const struct fsnotify_ops audit_tree_ops = { |
930 | .handle_event = audit_tree_handle_event, | |
931 | .should_send_event = audit_tree_send_event, | |
932 | .free_group_priv = NULL, | |
933 | .free_event_priv = NULL, | |
934 | .freeing_mark = audit_tree_freeing_mark, | |
74c3cbe3 AV |
935 | }; |
936 | ||
937 | static int __init audit_tree_init(void) | |
938 | { | |
939 | int i; | |
940 | ||
0d2e2a1d | 941 | audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); |
28a3a7eb EP |
942 | if (IS_ERR(audit_tree_group)) |
943 | audit_panic("cannot initialize fsnotify group for rectree watches"); | |
74c3cbe3 AV |
944 | |
945 | for (i = 0; i < HASH_SIZE; i++) | |
946 | INIT_LIST_HEAD(&chunk_hash_heads[i]); | |
947 | ||
948 | return 0; | |
949 | } | |
950 | __initcall(audit_tree_init); |