Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
74c3cbe3 | 2 | #include "audit.h" |
28a3a7eb | 3 | #include <linux/fsnotify_backend.h> |
74c3cbe3 AV |
4 | #include <linux/namei.h> |
5 | #include <linux/mount.h> | |
916d7576 | 6 | #include <linux/kthread.h> |
9d2378f8 | 7 | #include <linux/refcount.h> |
5a0e3ad6 | 8 | #include <linux/slab.h> |
74c3cbe3 AV |
9 | |
10 | struct audit_tree; | |
11 | struct audit_chunk; | |
12 | ||
13 | struct audit_tree { | |
9d2378f8 | 14 | refcount_t count; |
74c3cbe3 AV |
15 | int goner; |
16 | struct audit_chunk *root; | |
17 | struct list_head chunks; | |
18 | struct list_head rules; | |
19 | struct list_head list; | |
20 | struct list_head same_root; | |
21 | struct rcu_head head; | |
22 | char pathname[]; | |
23 | }; | |
24 | ||
25 | struct audit_chunk { | |
26 | struct list_head hash; | |
8d20d6e9 | 27 | unsigned long key; |
e61ce867 | 28 | struct fsnotify_mark mark; |
74c3cbe3 AV |
29 | struct list_head trees; /* with root here */ |
30 | int dead; | |
31 | int count; | |
8f7b0ba1 | 32 | atomic_long_t refs; |
74c3cbe3 AV |
33 | struct rcu_head head; |
34 | struct node { | |
35 | struct list_head list; | |
36 | struct audit_tree *owner; | |
37 | unsigned index; /* index; upper bit indicates 'will prune' */ | |
38 | } owners[]; | |
39 | }; | |
40 | ||
41 | static LIST_HEAD(tree_list); | |
42 | static LIST_HEAD(prune_list); | |
f1aaf262 | 43 | static struct task_struct *prune_thread; |
74c3cbe3 AV |
44 | |
45 | /* | |
46 | * One struct chunk is attached to each inode of interest. | |
47 | * We replace struct chunk on tagging/untagging. | |
48 | * Rules have pointer to struct audit_tree. | |
49 | * Rules have struct list_head rlist forming a list of rules over | |
50 | * the same tree. | |
51 | * References to struct chunk are collected at audit_inode{,_child}() | |
52 | * time and used in AUDIT_TREE rule matching. | |
53 | * These references are dropped at the same time we are calling | |
54 | * audit_free_names(), etc. | |
55 | * | |
56 | * Cyclic lists galore: | |
57 | * tree.chunks anchors chunk.owners[].list hash_lock | |
58 | * tree.rules anchors rule.rlist audit_filter_mutex | |
59 | * chunk.trees anchors tree.same_root hash_lock | |
60 | * chunk.hash is a hash with middle bits of watch.inode as | |
61 | * a hash function. RCU, hash_lock | |
62 | * | |
63 | * tree is refcounted; one reference for "some rules on rules_list refer to | |
64 | * it", one for each chunk with pointer to it. | |
65 | * | |
28a3a7eb | 66 | * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount |
8f7b0ba1 | 67 | * of watch contributes 1 to .refs). |
74c3cbe3 AV |
68 | * |
69 | * node.index allows to get from node.list to containing chunk. | |
70 | * MSB of that sucker is stolen to mark taggings that we might have to | |
71 | * revert - several operations have very unpleasant cleanup logics and | |
72 | * that makes a difference. Some. | |
73 | */ | |
74 | ||
28a3a7eb | 75 | static struct fsnotify_group *audit_tree_group; |
74c3cbe3 AV |
76 | |
77 | static struct audit_tree *alloc_tree(const char *s) | |
78 | { | |
79 | struct audit_tree *tree; | |
80 | ||
81 | tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); | |
82 | if (tree) { | |
9d2378f8 | 83 | refcount_set(&tree->count, 1); |
74c3cbe3 AV |
84 | tree->goner = 0; |
85 | INIT_LIST_HEAD(&tree->chunks); | |
86 | INIT_LIST_HEAD(&tree->rules); | |
87 | INIT_LIST_HEAD(&tree->list); | |
88 | INIT_LIST_HEAD(&tree->same_root); | |
89 | tree->root = NULL; | |
90 | strcpy(tree->pathname, s); | |
91 | } | |
92 | return tree; | |
93 | } | |
94 | ||
95 | static inline void get_tree(struct audit_tree *tree) | |
96 | { | |
9d2378f8 | 97 | refcount_inc(&tree->count); |
74c3cbe3 AV |
98 | } |
99 | ||
74c3cbe3 AV |
100 | static inline void put_tree(struct audit_tree *tree) |
101 | { | |
9d2378f8 | 102 | if (refcount_dec_and_test(&tree->count)) |
3b097c46 | 103 | kfree_rcu(tree, head); |
74c3cbe3 AV |
104 | } |
105 | ||
106 | /* to avoid bringing the entire thing in audit.h */ | |
107 | const char *audit_tree_path(struct audit_tree *tree) | |
108 | { | |
109 | return tree->pathname; | |
110 | } | |
111 | ||
8f7b0ba1 | 112 | static void free_chunk(struct audit_chunk *chunk) |
74c3cbe3 | 113 | { |
74c3cbe3 AV |
114 | int i; |
115 | ||
116 | for (i = 0; i < chunk->count; i++) { | |
117 | if (chunk->owners[i].owner) | |
118 | put_tree(chunk->owners[i].owner); | |
119 | } | |
120 | kfree(chunk); | |
121 | } | |
122 | ||
8f7b0ba1 | 123 | void audit_put_chunk(struct audit_chunk *chunk) |
74c3cbe3 | 124 | { |
8f7b0ba1 AV |
125 | if (atomic_long_dec_and_test(&chunk->refs)) |
126 | free_chunk(chunk); | |
74c3cbe3 AV |
127 | } |
128 | ||
8f7b0ba1 | 129 | static void __put_chunk(struct rcu_head *rcu) |
74c3cbe3 | 130 | { |
8f7b0ba1 AV |
131 | struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); |
132 | audit_put_chunk(chunk); | |
74c3cbe3 AV |
133 | } |
134 | ||
e61ce867 | 135 | static void audit_tree_destroy_watch(struct fsnotify_mark *entry) |
28a3a7eb EP |
136 | { |
137 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
138 | call_rcu(&chunk->head, __put_chunk); | |
139 | } | |
140 | ||
141 | static struct audit_chunk *alloc_chunk(int count) | |
142 | { | |
143 | struct audit_chunk *chunk; | |
144 | size_t size; | |
145 | int i; | |
146 | ||
147 | size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); | |
148 | chunk = kzalloc(size, GFP_KERNEL); | |
149 | if (!chunk) | |
150 | return NULL; | |
151 | ||
152 | INIT_LIST_HEAD(&chunk->hash); | |
153 | INIT_LIST_HEAD(&chunk->trees); | |
154 | chunk->count = count; | |
155 | atomic_long_set(&chunk->refs, 1); | |
156 | for (i = 0; i < count; i++) { | |
157 | INIT_LIST_HEAD(&chunk->owners[i].list); | |
158 | chunk->owners[i].index = i; | |
159 | } | |
054c636e | 160 | fsnotify_init_mark(&chunk->mark, audit_tree_group); |
799b6014 | 161 | chunk->mark.mask = FS_IN_IGNORED; |
28a3a7eb EP |
162 | return chunk; |
163 | } | |
164 | ||
74c3cbe3 AV |
165 | enum {HASH_SIZE = 128}; |
166 | static struct list_head chunk_hash_heads[HASH_SIZE]; | |
167 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); | |
168 | ||
f410ff65 JK |
169 | /* Function to return search key in our hash from inode. */ |
170 | static unsigned long inode_to_key(const struct inode *inode) | |
74c3cbe3 | 171 | { |
36f10f55 AG |
172 | /* Use address pointed to by connector->obj as the key */ |
173 | return (unsigned long)&inode->i_fsnotify_marks; | |
f410ff65 JK |
174 | } |
175 | ||
f410ff65 JK |
176 | static inline struct list_head *chunk_hash(unsigned long key) |
177 | { | |
178 | unsigned long n = key / L1_CACHE_BYTES; | |
74c3cbe3 AV |
179 | return chunk_hash_heads + n % HASH_SIZE; |
180 | } | |
181 | ||
9f16d2e6 | 182 | /* hash_lock & entry->group->mark_mutex is held by caller */ |
74c3cbe3 AV |
183 | static void insert_hash(struct audit_chunk *chunk) |
184 | { | |
28a3a7eb EP |
185 | struct list_head *list; |
186 | ||
43471d15 | 187 | if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED)) |
28a3a7eb | 188 | return; |
1635e572 JK |
189 | /* |
190 | * Make sure chunk is fully initialized before making it visible in the | |
191 | * hash. Pairs with a data dependency barrier in READ_ONCE() in | |
192 | * audit_tree_lookup(). | |
193 | */ | |
194 | smp_wmb(); | |
8d20d6e9 JK |
195 | WARN_ON_ONCE(!chunk->key); |
196 | list = chunk_hash(chunk->key); | |
74c3cbe3 AV |
197 | list_add_rcu(&chunk->hash, list); |
198 | } | |
199 | ||
200 | /* called under rcu_read_lock */ | |
201 | struct audit_chunk *audit_tree_lookup(const struct inode *inode) | |
202 | { | |
f410ff65 JK |
203 | unsigned long key = inode_to_key(inode); |
204 | struct list_head *list = chunk_hash(key); | |
6793a051 | 205 | struct audit_chunk *p; |
74c3cbe3 | 206 | |
6793a051 | 207 | list_for_each_entry_rcu(p, list, hash) { |
1635e572 JK |
208 | /* |
209 | * We use a data dependency barrier in READ_ONCE() to make sure | |
210 | * the chunk we see is fully initialized. | |
211 | */ | |
212 | if (READ_ONCE(p->key) == key) { | |
8f7b0ba1 | 213 | atomic_long_inc(&p->refs); |
74c3cbe3 AV |
214 | return p; |
215 | } | |
216 | } | |
217 | return NULL; | |
218 | } | |
219 | ||
6f1b5d7a | 220 | bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) |
74c3cbe3 AV |
221 | { |
222 | int n; | |
223 | for (n = 0; n < chunk->count; n++) | |
224 | if (chunk->owners[n].owner == tree) | |
6f1b5d7a YB |
225 | return true; |
226 | return false; | |
74c3cbe3 AV |
227 | } |
228 | ||
229 | /* tagging and untagging inodes with trees */ | |
230 | ||
8f7b0ba1 AV |
231 | static struct audit_chunk *find_chunk(struct node *p) |
232 | { | |
233 | int index = p->index & ~(1U<<31); | |
234 | p -= index; | |
235 | return container_of(p, struct audit_chunk, owners[0]); | |
236 | } | |
237 | ||
238 | static void untag_chunk(struct node *p) | |
74c3cbe3 | 239 | { |
8f7b0ba1 | 240 | struct audit_chunk *chunk = find_chunk(p); |
e61ce867 | 241 | struct fsnotify_mark *entry = &chunk->mark; |
f7a998a9 | 242 | struct audit_chunk *new = NULL; |
74c3cbe3 AV |
243 | struct audit_tree *owner; |
244 | int size = chunk->count - 1; | |
245 | int i, j; | |
246 | ||
28a3a7eb | 247 | fsnotify_get_mark(entry); |
8f7b0ba1 AV |
248 | |
249 | spin_unlock(&hash_lock); | |
250 | ||
f7a998a9 AV |
251 | if (size) |
252 | new = alloc_chunk(size); | |
253 | ||
be29d20f | 254 | mutex_lock(&entry->group->mark_mutex); |
6b3f05d2 JK |
255 | /* |
256 | * mark_mutex protects mark from getting detached and thus also from | |
36f10f55 | 257 | * mark->connector->obj getting NULL. |
6b3f05d2 | 258 | */ |
43471d15 | 259 | if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { |
be29d20f | 260 | mutex_unlock(&entry->group->mark_mutex); |
f7a998a9 | 261 | if (new) |
7b129323 | 262 | fsnotify_put_mark(&new->mark); |
8f7b0ba1 | 263 | goto out; |
74c3cbe3 AV |
264 | } |
265 | ||
266 | owner = p->owner; | |
267 | ||
268 | if (!size) { | |
269 | chunk->dead = 1; | |
270 | spin_lock(&hash_lock); | |
271 | list_del_init(&chunk->trees); | |
272 | if (owner->root == chunk) | |
273 | owner->root = NULL; | |
274 | list_del_init(&p->list); | |
275 | list_del_rcu(&chunk->hash); | |
276 | spin_unlock(&hash_lock); | |
b1e4603b | 277 | fsnotify_detach_mark(entry); |
be29d20f | 278 | mutex_unlock(&entry->group->mark_mutex); |
b1e4603b | 279 | fsnotify_free_mark(entry); |
8f7b0ba1 | 280 | goto out; |
74c3cbe3 AV |
281 | } |
282 | ||
74c3cbe3 AV |
283 | if (!new) |
284 | goto Fallback; | |
f7a998a9 | 285 | |
36f10f55 AG |
286 | if (fsnotify_add_mark_locked(&new->mark, entry->connector->obj, |
287 | FSNOTIFY_OBJ_TYPE_INODE, 1)) { | |
0fe33aae | 288 | fsnotify_put_mark(&new->mark); |
74c3cbe3 AV |
289 | goto Fallback; |
290 | } | |
291 | ||
292 | chunk->dead = 1; | |
293 | spin_lock(&hash_lock); | |
8d20d6e9 | 294 | new->key = chunk->key; |
74c3cbe3 AV |
295 | list_replace_init(&chunk->trees, &new->trees); |
296 | if (owner->root == chunk) { | |
297 | list_del_init(&owner->same_root); | |
298 | owner->root = NULL; | |
299 | } | |
300 | ||
6f5d5114 | 301 | for (i = j = 0; j <= size; i++, j++) { |
74c3cbe3 AV |
302 | struct audit_tree *s; |
303 | if (&chunk->owners[j] == p) { | |
304 | list_del_init(&p->list); | |
305 | i--; | |
306 | continue; | |
307 | } | |
308 | s = chunk->owners[j].owner; | |
309 | new->owners[i].owner = s; | |
310 | new->owners[i].index = chunk->owners[j].index - j + i; | |
311 | if (!s) /* result of earlier fallback */ | |
312 | continue; | |
313 | get_tree(s); | |
6f5d5114 | 314 | list_replace_init(&chunk->owners[j].list, &new->owners[i].list); |
74c3cbe3 AV |
315 | } |
316 | ||
74c3cbe3 AV |
317 | list_for_each_entry(owner, &new->trees, same_root) |
318 | owner->root = new; | |
1635e572 JK |
319 | /* |
320 | * Make sure chunk is fully initialized before making it visible in the | |
321 | * hash. Pairs with a data dependency barrier in READ_ONCE() in | |
322 | * audit_tree_lookup(). | |
323 | */ | |
324 | smp_wmb(); | |
325 | list_replace_rcu(&chunk->hash, &new->hash); | |
74c3cbe3 | 326 | spin_unlock(&hash_lock); |
b1e4603b | 327 | fsnotify_detach_mark(entry); |
be29d20f | 328 | mutex_unlock(&entry->group->mark_mutex); |
b1e4603b | 329 | fsnotify_free_mark(entry); |
b3e8692b | 330 | fsnotify_put_mark(&new->mark); /* drop initial reference */ |
8f7b0ba1 | 331 | goto out; |
74c3cbe3 AV |
332 | |
333 | Fallback: | |
334 | // do the best we can | |
335 | spin_lock(&hash_lock); | |
336 | if (owner->root == chunk) { | |
337 | list_del_init(&owner->same_root); | |
338 | owner->root = NULL; | |
339 | } | |
340 | list_del_init(&p->list); | |
341 | p->owner = NULL; | |
342 | put_tree(owner); | |
343 | spin_unlock(&hash_lock); | |
be29d20f | 344 | mutex_unlock(&entry->group->mark_mutex); |
8f7b0ba1 | 345 | out: |
28a3a7eb | 346 | fsnotify_put_mark(entry); |
8f7b0ba1 | 347 | spin_lock(&hash_lock); |
74c3cbe3 AV |
348 | } |
349 | ||
a5789b07 | 350 | /* Call with group->mark_mutex held, releases it */ |
74c3cbe3 AV |
351 | static int create_chunk(struct inode *inode, struct audit_tree *tree) |
352 | { | |
e61ce867 | 353 | struct fsnotify_mark *entry; |
74c3cbe3 | 354 | struct audit_chunk *chunk = alloc_chunk(1); |
a5789b07 JK |
355 | |
356 | if (!chunk) { | |
357 | mutex_unlock(&audit_tree_group->mark_mutex); | |
74c3cbe3 | 358 | return -ENOMEM; |
a5789b07 | 359 | } |
74c3cbe3 | 360 | |
28a3a7eb | 361 | entry = &chunk->mark; |
a5789b07 JK |
362 | if (fsnotify_add_inode_mark_locked(entry, inode, 0)) { |
363 | mutex_unlock(&audit_tree_group->mark_mutex); | |
0fe33aae | 364 | fsnotify_put_mark(entry); |
74c3cbe3 AV |
365 | return -ENOSPC; |
366 | } | |
367 | ||
74c3cbe3 AV |
368 | spin_lock(&hash_lock); |
369 | if (tree->goner) { | |
370 | spin_unlock(&hash_lock); | |
371 | chunk->dead = 1; | |
b1e4603b | 372 | fsnotify_detach_mark(entry); |
a5789b07 | 373 | mutex_unlock(&audit_tree_group->mark_mutex); |
b1e4603b | 374 | fsnotify_free_mark(entry); |
28a3a7eb | 375 | fsnotify_put_mark(entry); |
74c3cbe3 AV |
376 | return 0; |
377 | } | |
378 | chunk->owners[0].index = (1U << 31); | |
379 | chunk->owners[0].owner = tree; | |
380 | get_tree(tree); | |
381 | list_add(&chunk->owners[0].list, &tree->chunks); | |
382 | if (!tree->root) { | |
383 | tree->root = chunk; | |
384 | list_add(&tree->same_root, &chunk->trees); | |
385 | } | |
8d20d6e9 | 386 | chunk->key = inode_to_key(inode); |
1635e572 JK |
387 | /* |
388 | * Inserting into the hash table has to go last as once we do that RCU | |
389 | * readers can see the chunk. | |
390 | */ | |
74c3cbe3 AV |
391 | insert_hash(chunk); |
392 | spin_unlock(&hash_lock); | |
a5789b07 | 393 | mutex_unlock(&audit_tree_group->mark_mutex); |
b3e8692b | 394 | fsnotify_put_mark(entry); /* drop initial reference */ |
74c3cbe3 AV |
395 | return 0; |
396 | } | |
397 | ||
398 | /* the first tagged inode becomes root of tree */ | |
399 | static int tag_chunk(struct inode *inode, struct audit_tree *tree) | |
400 | { | |
e61ce867 | 401 | struct fsnotify_mark *old_entry, *chunk_entry; |
74c3cbe3 AV |
402 | struct audit_tree *owner; |
403 | struct audit_chunk *chunk, *old; | |
404 | struct node *p; | |
405 | int n; | |
406 | ||
a5789b07 | 407 | mutex_lock(&audit_tree_group->mark_mutex); |
b1362edf JK |
408 | old_entry = fsnotify_find_mark(&inode->i_fsnotify_marks, |
409 | audit_tree_group); | |
28a3a7eb | 410 | if (!old_entry) |
74c3cbe3 AV |
411 | return create_chunk(inode, tree); |
412 | ||
28a3a7eb | 413 | old = container_of(old_entry, struct audit_chunk, mark); |
74c3cbe3 AV |
414 | |
415 | /* are we already there? */ | |
416 | spin_lock(&hash_lock); | |
417 | for (n = 0; n < old->count; n++) { | |
418 | if (old->owners[n].owner == tree) { | |
419 | spin_unlock(&hash_lock); | |
a5789b07 | 420 | mutex_unlock(&audit_tree_group->mark_mutex); |
28a3a7eb | 421 | fsnotify_put_mark(old_entry); |
74c3cbe3 AV |
422 | return 0; |
423 | } | |
424 | } | |
425 | spin_unlock(&hash_lock); | |
426 | ||
427 | chunk = alloc_chunk(old->count + 1); | |
b4c30aad | 428 | if (!chunk) { |
a5789b07 | 429 | mutex_unlock(&audit_tree_group->mark_mutex); |
28a3a7eb | 430 | fsnotify_put_mark(old_entry); |
74c3cbe3 | 431 | return -ENOMEM; |
b4c30aad | 432 | } |
74c3cbe3 | 433 | |
28a3a7eb EP |
434 | chunk_entry = &chunk->mark; |
435 | ||
6b3f05d2 JK |
436 | /* |
437 | * mark_mutex protects mark from getting detached and thus also from | |
36f10f55 | 438 | * mark->connector->obj getting NULL. |
6b3f05d2 | 439 | */ |
43471d15 | 440 | if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) { |
28a3a7eb | 441 | /* old_entry is being shot, lets just lie */ |
a5789b07 | 442 | mutex_unlock(&audit_tree_group->mark_mutex); |
28a3a7eb | 443 | fsnotify_put_mark(old_entry); |
7b129323 | 444 | fsnotify_put_mark(&chunk->mark); |
28a3a7eb EP |
445 | return -ENOENT; |
446 | } | |
447 | ||
36f10f55 AG |
448 | if (fsnotify_add_mark_locked(chunk_entry, old_entry->connector->obj, |
449 | FSNOTIFY_OBJ_TYPE_INODE, 1)) { | |
a5789b07 | 450 | mutex_unlock(&audit_tree_group->mark_mutex); |
0fe33aae | 451 | fsnotify_put_mark(chunk_entry); |
28a3a7eb | 452 | fsnotify_put_mark(old_entry); |
74c3cbe3 AV |
453 | return -ENOSPC; |
454 | } | |
28a3a7eb | 455 | |
74c3cbe3 AV |
456 | spin_lock(&hash_lock); |
457 | if (tree->goner) { | |
458 | spin_unlock(&hash_lock); | |
459 | chunk->dead = 1; | |
b1e4603b | 460 | fsnotify_detach_mark(chunk_entry); |
a5789b07 | 461 | mutex_unlock(&audit_tree_group->mark_mutex); |
b1e4603b | 462 | fsnotify_free_mark(chunk_entry); |
28a3a7eb EP |
463 | fsnotify_put_mark(chunk_entry); |
464 | fsnotify_put_mark(old_entry); | |
74c3cbe3 AV |
465 | return 0; |
466 | } | |
8d20d6e9 | 467 | chunk->key = old->key; |
74c3cbe3 AV |
468 | list_replace_init(&old->trees, &chunk->trees); |
469 | for (n = 0, p = chunk->owners; n < old->count; n++, p++) { | |
470 | struct audit_tree *s = old->owners[n].owner; | |
471 | p->owner = s; | |
472 | p->index = old->owners[n].index; | |
473 | if (!s) /* result of fallback in untag */ | |
474 | continue; | |
475 | get_tree(s); | |
476 | list_replace_init(&old->owners[n].list, &p->list); | |
477 | } | |
478 | p->index = (chunk->count - 1) | (1U<<31); | |
479 | p->owner = tree; | |
480 | get_tree(tree); | |
481 | list_add(&p->list, &tree->chunks); | |
74c3cbe3 AV |
482 | list_for_each_entry(owner, &chunk->trees, same_root) |
483 | owner->root = chunk; | |
484 | old->dead = 1; | |
485 | if (!tree->root) { | |
486 | tree->root = chunk; | |
487 | list_add(&tree->same_root, &chunk->trees); | |
488 | } | |
1635e572 JK |
489 | /* |
490 | * Make sure chunk is fully initialized before making it visible in the | |
491 | * hash. Pairs with a data dependency barrier in READ_ONCE() in | |
492 | * audit_tree_lookup(). | |
493 | */ | |
494 | smp_wmb(); | |
495 | list_replace_rcu(&old->hash, &chunk->hash); | |
74c3cbe3 | 496 | spin_unlock(&hash_lock); |
b1e4603b | 497 | fsnotify_detach_mark(old_entry); |
a5789b07 | 498 | mutex_unlock(&audit_tree_group->mark_mutex); |
b1e4603b | 499 | fsnotify_free_mark(old_entry); |
b3e8692b | 500 | fsnotify_put_mark(chunk_entry); /* drop initial reference */ |
28a3a7eb | 501 | fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ |
74c3cbe3 AV |
502 | return 0; |
503 | } | |
504 | ||
2991dd2b | 505 | static void audit_tree_log_remove_rule(struct audit_krule *rule) |
0644ec0c KC |
506 | { |
507 | struct audit_buffer *ab; | |
508 | ||
65a8766f RGB |
509 | if (!audit_enabled) |
510 | return; | |
0644ec0c KC |
511 | ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); |
512 | if (unlikely(!ab)) | |
513 | return; | |
c1e8f06d | 514 | audit_log_format(ab, "op=remove_rule"); |
0644ec0c KC |
515 | audit_log_format(ab, " dir="); |
516 | audit_log_untrustedstring(ab, rule->tree->pathname); | |
517 | audit_log_key(ab, rule->filterkey); | |
518 | audit_log_format(ab, " list=%d res=1", rule->listnr); | |
519 | audit_log_end(ab); | |
520 | } | |
521 | ||
74c3cbe3 AV |
522 | static void kill_rules(struct audit_tree *tree) |
523 | { | |
524 | struct audit_krule *rule, *next; | |
525 | struct audit_entry *entry; | |
74c3cbe3 AV |
526 | |
527 | list_for_each_entry_safe(rule, next, &tree->rules, rlist) { | |
528 | entry = container_of(rule, struct audit_entry, rule); | |
529 | ||
530 | list_del_init(&rule->rlist); | |
531 | if (rule->tree) { | |
532 | /* not a half-baked one */ | |
2991dd2b | 533 | audit_tree_log_remove_rule(rule); |
34d99af5 RGB |
534 | if (entry->rule.exe) |
535 | audit_remove_mark(entry->rule.exe); | |
74c3cbe3 AV |
536 | rule->tree = NULL; |
537 | list_del_rcu(&entry->list); | |
e45aa212 | 538 | list_del(&entry->rule.list); |
74c3cbe3 AV |
539 | call_rcu(&entry->rcu, audit_free_rule_rcu); |
540 | } | |
541 | } | |
542 | } | |
543 | ||
544 | /* | |
545 | * finish killing struct audit_tree | |
546 | */ | |
547 | static void prune_one(struct audit_tree *victim) | |
548 | { | |
549 | spin_lock(&hash_lock); | |
550 | while (!list_empty(&victim->chunks)) { | |
551 | struct node *p; | |
74c3cbe3 AV |
552 | |
553 | p = list_entry(victim->chunks.next, struct node, list); | |
74c3cbe3 | 554 | |
8f7b0ba1 | 555 | untag_chunk(p); |
74c3cbe3 AV |
556 | } |
557 | spin_unlock(&hash_lock); | |
558 | put_tree(victim); | |
559 | } | |
560 | ||
561 | /* trim the uncommitted chunks from tree */ | |
562 | ||
563 | static void trim_marked(struct audit_tree *tree) | |
564 | { | |
565 | struct list_head *p, *q; | |
566 | spin_lock(&hash_lock); | |
567 | if (tree->goner) { | |
568 | spin_unlock(&hash_lock); | |
569 | return; | |
570 | } | |
571 | /* reorder */ | |
572 | for (p = tree->chunks.next; p != &tree->chunks; p = q) { | |
573 | struct node *node = list_entry(p, struct node, list); | |
574 | q = p->next; | |
575 | if (node->index & (1U<<31)) { | |
576 | list_del_init(p); | |
577 | list_add(p, &tree->chunks); | |
578 | } | |
579 | } | |
580 | ||
581 | while (!list_empty(&tree->chunks)) { | |
582 | struct node *node; | |
74c3cbe3 AV |
583 | |
584 | node = list_entry(tree->chunks.next, struct node, list); | |
585 | ||
586 | /* have we run out of marked? */ | |
587 | if (!(node->index & (1U<<31))) | |
588 | break; | |
589 | ||
8f7b0ba1 | 590 | untag_chunk(node); |
74c3cbe3 AV |
591 | } |
592 | if (!tree->root && !tree->goner) { | |
593 | tree->goner = 1; | |
594 | spin_unlock(&hash_lock); | |
595 | mutex_lock(&audit_filter_mutex); | |
596 | kill_rules(tree); | |
597 | list_del_init(&tree->list); | |
598 | mutex_unlock(&audit_filter_mutex); | |
599 | prune_one(tree); | |
600 | } else { | |
601 | spin_unlock(&hash_lock); | |
602 | } | |
603 | } | |
604 | ||
916d7576 AV |
605 | static void audit_schedule_prune(void); |
606 | ||
74c3cbe3 AV |
607 | /* called with audit_filter_mutex */ |
608 | int audit_remove_tree_rule(struct audit_krule *rule) | |
609 | { | |
610 | struct audit_tree *tree; | |
611 | tree = rule->tree; | |
612 | if (tree) { | |
613 | spin_lock(&hash_lock); | |
614 | list_del_init(&rule->rlist); | |
615 | if (list_empty(&tree->rules) && !tree->goner) { | |
616 | tree->root = NULL; | |
617 | list_del_init(&tree->same_root); | |
618 | tree->goner = 1; | |
619 | list_move(&tree->list, &prune_list); | |
620 | rule->tree = NULL; | |
621 | spin_unlock(&hash_lock); | |
622 | audit_schedule_prune(); | |
623 | return 1; | |
624 | } | |
625 | rule->tree = NULL; | |
626 | spin_unlock(&hash_lock); | |
627 | return 1; | |
628 | } | |
629 | return 0; | |
630 | } | |
631 | ||
1f707137 AV |
632 | static int compare_root(struct vfsmount *mnt, void *arg) |
633 | { | |
f410ff65 JK |
634 | return inode_to_key(d_backing_inode(mnt->mnt_root)) == |
635 | (unsigned long)arg; | |
1f707137 AV |
636 | } |
637 | ||
74c3cbe3 AV |
638 | void audit_trim_trees(void) |
639 | { | |
640 | struct list_head cursor; | |
641 | ||
642 | mutex_lock(&audit_filter_mutex); | |
643 | list_add(&cursor, &tree_list); | |
644 | while (cursor.next != &tree_list) { | |
645 | struct audit_tree *tree; | |
98bc993f | 646 | struct path path; |
74c3cbe3 AV |
647 | struct vfsmount *root_mnt; |
648 | struct node *node; | |
74c3cbe3 AV |
649 | int err; |
650 | ||
651 | tree = container_of(cursor.next, struct audit_tree, list); | |
652 | get_tree(tree); | |
653 | list_del(&cursor); | |
654 | list_add(&cursor, &tree->list); | |
655 | mutex_unlock(&audit_filter_mutex); | |
656 | ||
98bc993f | 657 | err = kern_path(tree->pathname, 0, &path); |
74c3cbe3 AV |
658 | if (err) |
659 | goto skip_it; | |
660 | ||
589ff870 | 661 | root_mnt = collect_mounts(&path); |
98bc993f | 662 | path_put(&path); |
be34d1a3 | 663 | if (IS_ERR(root_mnt)) |
74c3cbe3 AV |
664 | goto skip_it; |
665 | ||
74c3cbe3 AV |
666 | spin_lock(&hash_lock); |
667 | list_for_each_entry(node, &tree->chunks, list) { | |
28a3a7eb | 668 | struct audit_chunk *chunk = find_chunk(node); |
25985edc | 669 | /* this could be NULL if the watch is dying else where... */ |
74c3cbe3 | 670 | node->index |= 1U<<31; |
f410ff65 | 671 | if (iterate_mounts(compare_root, |
8d20d6e9 | 672 | (void *)(chunk->key), |
f410ff65 | 673 | root_mnt)) |
1f707137 | 674 | node->index &= ~(1U<<31); |
74c3cbe3 AV |
675 | } |
676 | spin_unlock(&hash_lock); | |
677 | trim_marked(tree); | |
74c3cbe3 AV |
678 | drop_collected_mounts(root_mnt); |
679 | skip_it: | |
12b2f117 | 680 | put_tree(tree); |
74c3cbe3 AV |
681 | mutex_lock(&audit_filter_mutex); |
682 | } | |
683 | list_del(&cursor); | |
684 | mutex_unlock(&audit_filter_mutex); | |
685 | } | |
686 | ||
74c3cbe3 AV |
687 | int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) |
688 | { | |
689 | ||
690 | if (pathname[0] != '/' || | |
691 | rule->listnr != AUDIT_FILTER_EXIT || | |
5af75d8d | 692 | op != Audit_equal || |
74c3cbe3 AV |
693 | rule->inode_f || rule->watch || rule->tree) |
694 | return -EINVAL; | |
695 | rule->tree = alloc_tree(pathname); | |
696 | if (!rule->tree) | |
697 | return -ENOMEM; | |
698 | return 0; | |
699 | } | |
700 | ||
701 | void audit_put_tree(struct audit_tree *tree) | |
702 | { | |
703 | put_tree(tree); | |
704 | } | |
705 | ||
1f707137 AV |
706 | static int tag_mount(struct vfsmount *mnt, void *arg) |
707 | { | |
3b362157 | 708 | return tag_chunk(d_backing_inode(mnt->mnt_root), arg); |
1f707137 AV |
709 | } |
710 | ||
f1aaf262 IP |
711 | /* |
712 | * That gets run when evict_chunk() ends up needing to kill audit_tree. | |
713 | * Runs from a separate thread. | |
714 | */ | |
715 | static int prune_tree_thread(void *unused) | |
716 | { | |
717 | for (;;) { | |
0bf676d1 JS |
718 | if (list_empty(&prune_list)) { |
719 | set_current_state(TASK_INTERRUPTIBLE); | |
f1aaf262 | 720 | schedule(); |
0bf676d1 | 721 | } |
f1aaf262 | 722 | |
ce423631 | 723 | audit_ctl_lock(); |
f1aaf262 IP |
724 | mutex_lock(&audit_filter_mutex); |
725 | ||
726 | while (!list_empty(&prune_list)) { | |
727 | struct audit_tree *victim; | |
728 | ||
729 | victim = list_entry(prune_list.next, | |
730 | struct audit_tree, list); | |
731 | list_del_init(&victim->list); | |
732 | ||
733 | mutex_unlock(&audit_filter_mutex); | |
734 | ||
735 | prune_one(victim); | |
736 | ||
737 | mutex_lock(&audit_filter_mutex); | |
738 | } | |
739 | ||
740 | mutex_unlock(&audit_filter_mutex); | |
ce423631 | 741 | audit_ctl_unlock(); |
f1aaf262 IP |
742 | } |
743 | return 0; | |
744 | } | |
745 | ||
746 | static int audit_launch_prune(void) | |
747 | { | |
748 | if (prune_thread) | |
749 | return 0; | |
0bf676d1 | 750 | prune_thread = kthread_run(prune_tree_thread, NULL, |
f1aaf262 IP |
751 | "audit_prune_tree"); |
752 | if (IS_ERR(prune_thread)) { | |
753 | pr_err("cannot start thread audit_prune_tree"); | |
754 | prune_thread = NULL; | |
755 | return -ENOMEM; | |
f1aaf262 | 756 | } |
0bf676d1 | 757 | return 0; |
f1aaf262 IP |
758 | } |
759 | ||
74c3cbe3 AV |
760 | /* called with audit_filter_mutex */ |
761 | int audit_add_tree_rule(struct audit_krule *rule) | |
762 | { | |
763 | struct audit_tree *seed = rule->tree, *tree; | |
98bc993f | 764 | struct path path; |
1f707137 | 765 | struct vfsmount *mnt; |
74c3cbe3 AV |
766 | int err; |
767 | ||
736f3203 | 768 | rule->tree = NULL; |
74c3cbe3 AV |
769 | list_for_each_entry(tree, &tree_list, list) { |
770 | if (!strcmp(seed->pathname, tree->pathname)) { | |
771 | put_tree(seed); | |
772 | rule->tree = tree; | |
773 | list_add(&rule->rlist, &tree->rules); | |
774 | return 0; | |
775 | } | |
776 | } | |
777 | tree = seed; | |
778 | list_add(&tree->list, &tree_list); | |
779 | list_add(&rule->rlist, &tree->rules); | |
780 | /* do not set rule->tree yet */ | |
781 | mutex_unlock(&audit_filter_mutex); | |
782 | ||
f1aaf262 IP |
783 | if (unlikely(!prune_thread)) { |
784 | err = audit_launch_prune(); | |
785 | if (err) | |
786 | goto Err; | |
787 | } | |
788 | ||
98bc993f | 789 | err = kern_path(tree->pathname, 0, &path); |
74c3cbe3 AV |
790 | if (err) |
791 | goto Err; | |
589ff870 | 792 | mnt = collect_mounts(&path); |
98bc993f | 793 | path_put(&path); |
be34d1a3 DH |
794 | if (IS_ERR(mnt)) { |
795 | err = PTR_ERR(mnt); | |
74c3cbe3 AV |
796 | goto Err; |
797 | } | |
74c3cbe3 AV |
798 | |
799 | get_tree(tree); | |
1f707137 | 800 | err = iterate_mounts(tag_mount, tree, mnt); |
74c3cbe3 AV |
801 | drop_collected_mounts(mnt); |
802 | ||
803 | if (!err) { | |
804 | struct node *node; | |
805 | spin_lock(&hash_lock); | |
806 | list_for_each_entry(node, &tree->chunks, list) | |
807 | node->index &= ~(1U<<31); | |
808 | spin_unlock(&hash_lock); | |
809 | } else { | |
810 | trim_marked(tree); | |
811 | goto Err; | |
812 | } | |
813 | ||
814 | mutex_lock(&audit_filter_mutex); | |
815 | if (list_empty(&rule->rlist)) { | |
816 | put_tree(tree); | |
817 | return -ENOENT; | |
818 | } | |
819 | rule->tree = tree; | |
820 | put_tree(tree); | |
821 | ||
822 | return 0; | |
823 | Err: | |
824 | mutex_lock(&audit_filter_mutex); | |
825 | list_del_init(&tree->list); | |
826 | list_del_init(&tree->rules); | |
827 | put_tree(tree); | |
828 | return err; | |
829 | } | |
830 | ||
831 | int audit_tag_tree(char *old, char *new) | |
832 | { | |
833 | struct list_head cursor, barrier; | |
834 | int failed = 0; | |
2096f759 | 835 | struct path path1, path2; |
74c3cbe3 | 836 | struct vfsmount *tagged; |
74c3cbe3 AV |
837 | int err; |
838 | ||
2096f759 | 839 | err = kern_path(new, 0, &path2); |
74c3cbe3 AV |
840 | if (err) |
841 | return err; | |
2096f759 AV |
842 | tagged = collect_mounts(&path2); |
843 | path_put(&path2); | |
be34d1a3 DH |
844 | if (IS_ERR(tagged)) |
845 | return PTR_ERR(tagged); | |
74c3cbe3 | 846 | |
2096f759 | 847 | err = kern_path(old, 0, &path1); |
74c3cbe3 AV |
848 | if (err) { |
849 | drop_collected_mounts(tagged); | |
850 | return err; | |
851 | } | |
74c3cbe3 | 852 | |
74c3cbe3 AV |
853 | mutex_lock(&audit_filter_mutex); |
854 | list_add(&barrier, &tree_list); | |
855 | list_add(&cursor, &barrier); | |
856 | ||
857 | while (cursor.next != &tree_list) { | |
858 | struct audit_tree *tree; | |
2096f759 | 859 | int good_one = 0; |
74c3cbe3 AV |
860 | |
861 | tree = container_of(cursor.next, struct audit_tree, list); | |
862 | get_tree(tree); | |
863 | list_del(&cursor); | |
864 | list_add(&cursor, &tree->list); | |
865 | mutex_unlock(&audit_filter_mutex); | |
866 | ||
2096f759 AV |
867 | err = kern_path(tree->pathname, 0, &path2); |
868 | if (!err) { | |
869 | good_one = path_is_under(&path1, &path2); | |
870 | path_put(&path2); | |
74c3cbe3 AV |
871 | } |
872 | ||
2096f759 | 873 | if (!good_one) { |
74c3cbe3 AV |
874 | put_tree(tree); |
875 | mutex_lock(&audit_filter_mutex); | |
876 | continue; | |
877 | } | |
74c3cbe3 | 878 | |
1f707137 | 879 | failed = iterate_mounts(tag_mount, tree, tagged); |
74c3cbe3 AV |
880 | if (failed) { |
881 | put_tree(tree); | |
882 | mutex_lock(&audit_filter_mutex); | |
883 | break; | |
884 | } | |
885 | ||
886 | mutex_lock(&audit_filter_mutex); | |
887 | spin_lock(&hash_lock); | |
888 | if (!tree->goner) { | |
889 | list_del(&tree->list); | |
890 | list_add(&tree->list, &tree_list); | |
891 | } | |
892 | spin_unlock(&hash_lock); | |
893 | put_tree(tree); | |
894 | } | |
895 | ||
896 | while (barrier.prev != &tree_list) { | |
897 | struct audit_tree *tree; | |
898 | ||
899 | tree = container_of(barrier.prev, struct audit_tree, list); | |
900 | get_tree(tree); | |
901 | list_del(&tree->list); | |
902 | list_add(&tree->list, &barrier); | |
903 | mutex_unlock(&audit_filter_mutex); | |
904 | ||
905 | if (!failed) { | |
906 | struct node *node; | |
907 | spin_lock(&hash_lock); | |
908 | list_for_each_entry(node, &tree->chunks, list) | |
909 | node->index &= ~(1U<<31); | |
910 | spin_unlock(&hash_lock); | |
911 | } else { | |
912 | trim_marked(tree); | |
913 | } | |
914 | ||
915 | put_tree(tree); | |
916 | mutex_lock(&audit_filter_mutex); | |
917 | } | |
918 | list_del(&barrier); | |
919 | list_del(&cursor); | |
74c3cbe3 | 920 | mutex_unlock(&audit_filter_mutex); |
2096f759 | 921 | path_put(&path1); |
74c3cbe3 AV |
922 | drop_collected_mounts(tagged); |
923 | return failed; | |
924 | } | |
925 | ||
916d7576 AV |
926 | |
927 | static void audit_schedule_prune(void) | |
928 | { | |
f1aaf262 | 929 | wake_up_process(prune_thread); |
916d7576 AV |
930 | } |
931 | ||
932 | /* | |
933 | * ... and that one is done if evict_chunk() decides to delay until the end | |
934 | * of syscall. Runs synchronously. | |
935 | */ | |
936 | void audit_kill_trees(struct list_head *list) | |
937 | { | |
ce423631 | 938 | audit_ctl_lock(); |
916d7576 AV |
939 | mutex_lock(&audit_filter_mutex); |
940 | ||
941 | while (!list_empty(list)) { | |
942 | struct audit_tree *victim; | |
943 | ||
944 | victim = list_entry(list->next, struct audit_tree, list); | |
945 | kill_rules(victim); | |
946 | list_del_init(&victim->list); | |
947 | ||
948 | mutex_unlock(&audit_filter_mutex); | |
949 | ||
950 | prune_one(victim); | |
951 | ||
952 | mutex_lock(&audit_filter_mutex); | |
953 | } | |
954 | ||
955 | mutex_unlock(&audit_filter_mutex); | |
ce423631 | 956 | audit_ctl_unlock(); |
74c3cbe3 AV |
957 | } |
958 | ||
959 | /* | |
960 | * Here comes the stuff asynchronous to auditctl operations | |
961 | */ | |
962 | ||
74c3cbe3 AV |
963 | static void evict_chunk(struct audit_chunk *chunk) |
964 | { | |
965 | struct audit_tree *owner; | |
916d7576 AV |
966 | struct list_head *postponed = audit_killed_trees(); |
967 | int need_prune = 0; | |
74c3cbe3 AV |
968 | int n; |
969 | ||
970 | if (chunk->dead) | |
971 | return; | |
972 | ||
973 | chunk->dead = 1; | |
974 | mutex_lock(&audit_filter_mutex); | |
975 | spin_lock(&hash_lock); | |
976 | while (!list_empty(&chunk->trees)) { | |
977 | owner = list_entry(chunk->trees.next, | |
978 | struct audit_tree, same_root); | |
979 | owner->goner = 1; | |
980 | owner->root = NULL; | |
981 | list_del_init(&owner->same_root); | |
982 | spin_unlock(&hash_lock); | |
916d7576 AV |
983 | if (!postponed) { |
984 | kill_rules(owner); | |
985 | list_move(&owner->list, &prune_list); | |
986 | need_prune = 1; | |
987 | } else { | |
988 | list_move(&owner->list, postponed); | |
989 | } | |
74c3cbe3 AV |
990 | spin_lock(&hash_lock); |
991 | } | |
992 | list_del_rcu(&chunk->hash); | |
993 | for (n = 0; n < chunk->count; n++) | |
994 | list_del_init(&chunk->owners[n].list); | |
995 | spin_unlock(&hash_lock); | |
f1aaf262 | 996 | mutex_unlock(&audit_filter_mutex); |
916d7576 AV |
997 | if (need_prune) |
998 | audit_schedule_prune(); | |
74c3cbe3 AV |
999 | } |
1000 | ||
3a9b16b4 | 1001 | static int audit_tree_handle_event(struct fsnotify_group *group, |
7053aee2 | 1002 | struct inode *to_tell, |
3cd5eca8 | 1003 | u32 mask, const void *data, int data_type, |
9385a84d JK |
1004 | const unsigned char *file_name, u32 cookie, |
1005 | struct fsnotify_iter_info *iter_info) | |
74c3cbe3 | 1006 | { |
83c4c4b0 | 1007 | return 0; |
28a3a7eb | 1008 | } |
74c3cbe3 | 1009 | |
e61ce867 | 1010 | static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group) |
28a3a7eb EP |
1011 | { |
1012 | struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); | |
1013 | ||
1014 | evict_chunk(chunk); | |
b3e8692b MS |
1015 | |
1016 | /* | |
1017 | * We are guaranteed to have at least one reference to the mark from | |
1018 | * either the inode or the caller of fsnotify_destroy_mark(). | |
1019 | */ | |
ab97f873 | 1020 | BUG_ON(refcount_read(&entry->refcnt) < 1); |
74c3cbe3 AV |
1021 | } |
1022 | ||
28a3a7eb EP |
1023 | static const struct fsnotify_ops audit_tree_ops = { |
1024 | .handle_event = audit_tree_handle_event, | |
28a3a7eb | 1025 | .freeing_mark = audit_tree_freeing_mark, |
054c636e | 1026 | .free_mark = audit_tree_destroy_watch, |
74c3cbe3 AV |
1027 | }; |
1028 | ||
1029 | static int __init audit_tree_init(void) | |
1030 | { | |
1031 | int i; | |
1032 | ||
0d2e2a1d | 1033 | audit_tree_group = fsnotify_alloc_group(&audit_tree_ops); |
28a3a7eb EP |
1034 | if (IS_ERR(audit_tree_group)) |
1035 | audit_panic("cannot initialize fsnotify group for rectree watches"); | |
74c3cbe3 AV |
1036 | |
1037 | for (i = 0; i < HASH_SIZE; i++) | |
1038 | INIT_LIST_HEAD(&chunk_hash_heads[i]); | |
1039 | ||
1040 | return 0; | |
1041 | } | |
1042 | __initcall(audit_tree_init); |