Commit | Line | Data |
---|---|---|
ae271c1b MS |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Landlock LSM - Ruleset management | |
4 | * | |
5 | * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> | |
6 | * Copyright © 2018-2020 ANSSI | |
7 | */ | |
8 | ||
9 | #include <linux/bits.h> | |
10 | #include <linux/bug.h> | |
11 | #include <linux/compiler_types.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/lockdep.h> | |
16 | #include <linux/overflow.h> | |
17 | #include <linux/rbtree.h> | |
18 | #include <linux/refcount.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/workqueue.h> | |
22 | ||
23 | #include "limits.h" | |
24 | #include "object.h" | |
25 | #include "ruleset.h" | |
26 | ||
27 | static struct landlock_ruleset *create_ruleset(const u32 num_layers) | |
28 | { | |
29 | struct landlock_ruleset *new_ruleset; | |
30 | ||
31 | new_ruleset = kzalloc(struct_size(new_ruleset, fs_access_masks, | |
32 | num_layers), GFP_KERNEL_ACCOUNT); | |
33 | if (!new_ruleset) | |
34 | return ERR_PTR(-ENOMEM); | |
35 | refcount_set(&new_ruleset->usage, 1); | |
36 | mutex_init(&new_ruleset->lock); | |
37 | new_ruleset->root = RB_ROOT; | |
38 | new_ruleset->num_layers = num_layers; | |
39 | /* | |
40 | * hierarchy = NULL | |
41 | * num_rules = 0 | |
42 | * fs_access_masks[] = 0 | |
43 | */ | |
44 | return new_ruleset; | |
45 | } | |
46 | ||
47 | struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask) | |
48 | { | |
49 | struct landlock_ruleset *new_ruleset; | |
50 | ||
51 | /* Informs about useless ruleset. */ | |
52 | if (!fs_access_mask) | |
53 | return ERR_PTR(-ENOMSG); | |
54 | new_ruleset = create_ruleset(1); | |
55 | if (!IS_ERR(new_ruleset)) | |
56 | new_ruleset->fs_access_masks[0] = fs_access_mask; | |
57 | return new_ruleset; | |
58 | } | |
59 | ||
60 | static void build_check_rule(void) | |
61 | { | |
62 | const struct landlock_rule rule = { | |
63 | .num_layers = ~0, | |
64 | }; | |
65 | ||
66 | BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS); | |
67 | } | |
68 | ||
69 | static struct landlock_rule *create_rule( | |
70 | struct landlock_object *const object, | |
71 | const struct landlock_layer (*const layers)[], | |
72 | const u32 num_layers, | |
73 | const struct landlock_layer *const new_layer) | |
74 | { | |
75 | struct landlock_rule *new_rule; | |
76 | u32 new_num_layers; | |
77 | ||
78 | build_check_rule(); | |
79 | if (new_layer) { | |
80 | /* Should already be checked by landlock_merge_ruleset(). */ | |
81 | if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS)) | |
82 | return ERR_PTR(-E2BIG); | |
83 | new_num_layers = num_layers + 1; | |
84 | } else { | |
85 | new_num_layers = num_layers; | |
86 | } | |
87 | new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers), | |
88 | GFP_KERNEL_ACCOUNT); | |
89 | if (!new_rule) | |
90 | return ERR_PTR(-ENOMEM); | |
91 | RB_CLEAR_NODE(&new_rule->node); | |
92 | landlock_get_object(object); | |
93 | new_rule->object = object; | |
94 | new_rule->num_layers = new_num_layers; | |
95 | /* Copies the original layer stack. */ | |
96 | memcpy(new_rule->layers, layers, | |
97 | flex_array_size(new_rule, layers, num_layers)); | |
98 | if (new_layer) | |
99 | /* Adds a copy of @new_layer on the layer stack. */ | |
100 | new_rule->layers[new_rule->num_layers - 1] = *new_layer; | |
101 | return new_rule; | |
102 | } | |
103 | ||
104 | static void free_rule(struct landlock_rule *const rule) | |
105 | { | |
106 | might_sleep(); | |
107 | if (!rule) | |
108 | return; | |
109 | landlock_put_object(rule->object); | |
110 | kfree(rule); | |
111 | } | |
112 | ||
113 | static void build_check_ruleset(void) | |
114 | { | |
115 | const struct landlock_ruleset ruleset = { | |
116 | .num_rules = ~0, | |
117 | .num_layers = ~0, | |
118 | }; | |
cb2c7d1a | 119 | typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0; |
ae271c1b MS |
120 | |
121 | BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); | |
122 | BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); | |
cb2c7d1a | 123 | BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS); |
ae271c1b MS |
124 | } |
125 | ||
126 | /** | |
127 | * insert_rule - Create and insert a rule in a ruleset | |
128 | * | |
129 | * @ruleset: The ruleset to be updated. | |
130 | * @object: The object to build the new rule with. The underlying kernel | |
131 | * object must be held by the caller. | |
132 | * @layers: One or multiple layers to be copied into the new rule. | |
133 | * @num_layers: The number of @layers entries. | |
134 | * | |
135 | * When user space requests to add a new rule to a ruleset, @layers only | |
136 | * contains one entry and this entry is not assigned to any level. In this | |
137 | * case, the new rule will extend @ruleset, similarly to a boolean OR between | |
138 | * access rights. | |
139 | * | |
140 | * When merging a ruleset in a domain, or copying a domain, @layers will be | |
141 | * added to @ruleset as new constraints, similarly to a boolean AND between | |
142 | * access rights. | |
143 | */ | |
144 | static int insert_rule(struct landlock_ruleset *const ruleset, | |
145 | struct landlock_object *const object, | |
146 | const struct landlock_layer (*const layers)[], | |
147 | size_t num_layers) | |
148 | { | |
149 | struct rb_node **walker_node; | |
150 | struct rb_node *parent_node = NULL; | |
151 | struct landlock_rule *new_rule; | |
152 | ||
153 | might_sleep(); | |
154 | lockdep_assert_held(&ruleset->lock); | |
155 | if (WARN_ON_ONCE(!object || !layers)) | |
156 | return -ENOENT; | |
157 | walker_node = &(ruleset->root.rb_node); | |
158 | while (*walker_node) { | |
159 | struct landlock_rule *const this = rb_entry(*walker_node, | |
160 | struct landlock_rule, node); | |
161 | ||
162 | if (this->object != object) { | |
163 | parent_node = *walker_node; | |
164 | if (this->object < object) | |
165 | walker_node = &((*walker_node)->rb_right); | |
166 | else | |
167 | walker_node = &((*walker_node)->rb_left); | |
168 | continue; | |
169 | } | |
170 | ||
171 | /* Only a single-level layer should match an existing rule. */ | |
172 | if (WARN_ON_ONCE(num_layers != 1)) | |
173 | return -EINVAL; | |
174 | ||
175 | /* If there is a matching rule, updates it. */ | |
176 | if ((*layers)[0].level == 0) { | |
177 | /* | |
178 | * Extends access rights when the request comes from | |
179 | * landlock_add_rule(2), i.e. @ruleset is not a domain. | |
180 | */ | |
181 | if (WARN_ON_ONCE(this->num_layers != 1)) | |
182 | return -EINVAL; | |
183 | if (WARN_ON_ONCE(this->layers[0].level != 0)) | |
184 | return -EINVAL; | |
185 | this->layers[0].access |= (*layers)[0].access; | |
186 | return 0; | |
187 | } | |
188 | ||
189 | if (WARN_ON_ONCE(this->layers[0].level == 0)) | |
190 | return -EINVAL; | |
191 | ||
192 | /* | |
193 | * Intersects access rights when it is a merge between a | |
194 | * ruleset and a domain. | |
195 | */ | |
196 | new_rule = create_rule(object, &this->layers, this->num_layers, | |
197 | &(*layers)[0]); | |
198 | if (IS_ERR(new_rule)) | |
199 | return PTR_ERR(new_rule); | |
200 | rb_replace_node(&this->node, &new_rule->node, &ruleset->root); | |
201 | free_rule(this); | |
202 | return 0; | |
203 | } | |
204 | ||
205 | /* There is no match for @object. */ | |
206 | build_check_ruleset(); | |
207 | if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) | |
208 | return -E2BIG; | |
209 | new_rule = create_rule(object, layers, num_layers, NULL); | |
210 | if (IS_ERR(new_rule)) | |
211 | return PTR_ERR(new_rule); | |
212 | rb_link_node(&new_rule->node, parent_node, walker_node); | |
213 | rb_insert_color(&new_rule->node, &ruleset->root); | |
214 | ruleset->num_rules++; | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static void build_check_layer(void) | |
219 | { | |
220 | const struct landlock_layer layer = { | |
221 | .level = ~0, | |
cb2c7d1a | 222 | .access = ~0, |
ae271c1b MS |
223 | }; |
224 | ||
225 | BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS); | |
cb2c7d1a | 226 | BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS); |
ae271c1b MS |
227 | } |
228 | ||
229 | /* @ruleset must be locked by the caller. */ | |
230 | int landlock_insert_rule(struct landlock_ruleset *const ruleset, | |
231 | struct landlock_object *const object, const u32 access) | |
232 | { | |
233 | struct landlock_layer layers[] = {{ | |
234 | .access = access, | |
235 | /* When @level is zero, insert_rule() extends @ruleset. */ | |
236 | .level = 0, | |
237 | }}; | |
238 | ||
239 | build_check_layer(); | |
240 | return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers)); | |
241 | } | |
242 | ||
243 | static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy) | |
244 | { | |
245 | if (hierarchy) | |
246 | refcount_inc(&hierarchy->usage); | |
247 | } | |
248 | ||
249 | static void put_hierarchy(struct landlock_hierarchy *hierarchy) | |
250 | { | |
251 | while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { | |
252 | const struct landlock_hierarchy *const freeme = hierarchy; | |
253 | ||
254 | hierarchy = hierarchy->parent; | |
255 | kfree(freeme); | |
256 | } | |
257 | } | |
258 | ||
259 | static int merge_ruleset(struct landlock_ruleset *const dst, | |
260 | struct landlock_ruleset *const src) | |
261 | { | |
262 | struct landlock_rule *walker_rule, *next_rule; | |
263 | int err = 0; | |
264 | ||
265 | might_sleep(); | |
266 | /* Should already be checked by landlock_merge_ruleset() */ | |
267 | if (WARN_ON_ONCE(!src)) | |
268 | return 0; | |
269 | /* Only merge into a domain. */ | |
270 | if (WARN_ON_ONCE(!dst || !dst->hierarchy)) | |
271 | return -EINVAL; | |
272 | ||
273 | /* Locks @dst first because we are its only owner. */ | |
274 | mutex_lock(&dst->lock); | |
275 | mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING); | |
276 | ||
277 | /* Stacks the new layer. */ | |
278 | if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) { | |
279 | err = -EINVAL; | |
280 | goto out_unlock; | |
281 | } | |
282 | dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0]; | |
283 | ||
284 | /* Merges the @src tree. */ | |
285 | rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, | |
286 | &src->root, node) { | |
287 | struct landlock_layer layers[] = {{ | |
288 | .level = dst->num_layers, | |
289 | }}; | |
290 | ||
291 | if (WARN_ON_ONCE(walker_rule->num_layers != 1)) { | |
292 | err = -EINVAL; | |
293 | goto out_unlock; | |
294 | } | |
295 | if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) { | |
296 | err = -EINVAL; | |
297 | goto out_unlock; | |
298 | } | |
299 | layers[0].access = walker_rule->layers[0].access; | |
300 | err = insert_rule(dst, walker_rule->object, &layers, | |
301 | ARRAY_SIZE(layers)); | |
302 | if (err) | |
303 | goto out_unlock; | |
304 | } | |
305 | ||
306 | out_unlock: | |
307 | mutex_unlock(&src->lock); | |
308 | mutex_unlock(&dst->lock); | |
309 | return err; | |
310 | } | |
311 | ||
312 | static int inherit_ruleset(struct landlock_ruleset *const parent, | |
313 | struct landlock_ruleset *const child) | |
314 | { | |
315 | struct landlock_rule *walker_rule, *next_rule; | |
316 | int err = 0; | |
317 | ||
318 | might_sleep(); | |
319 | if (!parent) | |
320 | return 0; | |
321 | ||
322 | /* Locks @child first because we are its only owner. */ | |
323 | mutex_lock(&child->lock); | |
324 | mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); | |
325 | ||
326 | /* Copies the @parent tree. */ | |
327 | rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, | |
328 | &parent->root, node) { | |
329 | err = insert_rule(child, walker_rule->object, | |
330 | &walker_rule->layers, walker_rule->num_layers); | |
331 | if (err) | |
332 | goto out_unlock; | |
333 | } | |
334 | ||
335 | if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { | |
336 | err = -EINVAL; | |
337 | goto out_unlock; | |
338 | } | |
339 | /* Copies the parent layer stack and leaves a space for the new layer. */ | |
340 | memcpy(child->fs_access_masks, parent->fs_access_masks, | |
341 | flex_array_size(parent, fs_access_masks, parent->num_layers)); | |
342 | ||
343 | if (WARN_ON_ONCE(!parent->hierarchy)) { | |
344 | err = -EINVAL; | |
345 | goto out_unlock; | |
346 | } | |
347 | get_hierarchy(parent->hierarchy); | |
348 | child->hierarchy->parent = parent->hierarchy; | |
349 | ||
350 | out_unlock: | |
351 | mutex_unlock(&parent->lock); | |
352 | mutex_unlock(&child->lock); | |
353 | return err; | |
354 | } | |
355 | ||
356 | static void free_ruleset(struct landlock_ruleset *const ruleset) | |
357 | { | |
358 | struct landlock_rule *freeme, *next; | |
359 | ||
360 | might_sleep(); | |
361 | rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, | |
362 | node) | |
363 | free_rule(freeme); | |
364 | put_hierarchy(ruleset->hierarchy); | |
365 | kfree(ruleset); | |
366 | } | |
367 | ||
368 | void landlock_put_ruleset(struct landlock_ruleset *const ruleset) | |
369 | { | |
370 | might_sleep(); | |
371 | if (ruleset && refcount_dec_and_test(&ruleset->usage)) | |
372 | free_ruleset(ruleset); | |
373 | } | |
374 | ||
375 | static void free_ruleset_work(struct work_struct *const work) | |
376 | { | |
377 | struct landlock_ruleset *ruleset; | |
378 | ||
379 | ruleset = container_of(work, struct landlock_ruleset, work_free); | |
380 | free_ruleset(ruleset); | |
381 | } | |
382 | ||
383 | void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) | |
384 | { | |
385 | if (ruleset && refcount_dec_and_test(&ruleset->usage)) { | |
386 | INIT_WORK(&ruleset->work_free, free_ruleset_work); | |
387 | schedule_work(&ruleset->work_free); | |
388 | } | |
389 | } | |
390 | ||
391 | /** | |
392 | * landlock_merge_ruleset - Merge a ruleset with a domain | |
393 | * | |
394 | * @parent: Parent domain. | |
395 | * @ruleset: New ruleset to be merged. | |
396 | * | |
397 | * Returns the intersection of @parent and @ruleset, or returns @parent if | |
398 | * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. | |
399 | */ | |
400 | struct landlock_ruleset *landlock_merge_ruleset( | |
401 | struct landlock_ruleset *const parent, | |
402 | struct landlock_ruleset *const ruleset) | |
403 | { | |
404 | struct landlock_ruleset *new_dom; | |
405 | u32 num_layers; | |
406 | int err; | |
407 | ||
408 | might_sleep(); | |
409 | if (WARN_ON_ONCE(!ruleset || parent == ruleset)) | |
410 | return ERR_PTR(-EINVAL); | |
411 | ||
412 | if (parent) { | |
413 | if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS) | |
414 | return ERR_PTR(-E2BIG); | |
415 | num_layers = parent->num_layers + 1; | |
416 | } else { | |
417 | num_layers = 1; | |
418 | } | |
419 | ||
420 | /* Creates a new domain... */ | |
421 | new_dom = create_ruleset(num_layers); | |
422 | if (IS_ERR(new_dom)) | |
423 | return new_dom; | |
424 | new_dom->hierarchy = kzalloc(sizeof(*new_dom->hierarchy), | |
425 | GFP_KERNEL_ACCOUNT); | |
426 | if (!new_dom->hierarchy) { | |
427 | err = -ENOMEM; | |
428 | goto out_put_dom; | |
429 | } | |
430 | refcount_set(&new_dom->hierarchy->usage, 1); | |
431 | ||
432 | /* ...as a child of @parent... */ | |
433 | err = inherit_ruleset(parent, new_dom); | |
434 | if (err) | |
435 | goto out_put_dom; | |
436 | ||
437 | /* ...and including @ruleset. */ | |
438 | err = merge_ruleset(new_dom, ruleset); | |
439 | if (err) | |
440 | goto out_put_dom; | |
441 | ||
442 | return new_dom; | |
443 | ||
444 | out_put_dom: | |
445 | landlock_put_ruleset(new_dom); | |
446 | return ERR_PTR(err); | |
447 | } | |
448 | ||
449 | /* | |
450 | * The returned access has the same lifetime as @ruleset. | |
451 | */ | |
452 | const struct landlock_rule *landlock_find_rule( | |
453 | const struct landlock_ruleset *const ruleset, | |
454 | const struct landlock_object *const object) | |
455 | { | |
456 | const struct rb_node *node; | |
457 | ||
458 | if (!object) | |
459 | return NULL; | |
460 | node = ruleset->root.rb_node; | |
461 | while (node) { | |
462 | struct landlock_rule *this = rb_entry(node, | |
463 | struct landlock_rule, node); | |
464 | ||
465 | if (this->object == object) | |
466 | return this; | |
467 | if (this->object < object) | |
468 | node = node->rb_right; | |
469 | else | |
470 | node = node->rb_left; | |
471 | } | |
472 | return NULL; | |
473 | } |