Commit | Line | Data |
---|---|---|
ae271c1b MS |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Landlock LSM - Ruleset management | |
4 | * | |
5 | * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net> | |
6 | * Copyright © 2018-2020 ANSSI | |
7 | */ | |
8 | ||
9 | #include <linux/bits.h> | |
10 | #include <linux/bug.h> | |
11 | #include <linux/compiler_types.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/errno.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/lockdep.h> | |
16 | #include <linux/overflow.h> | |
17 | #include <linux/rbtree.h> | |
18 | #include <linux/refcount.h> | |
19 | #include <linux/slab.h> | |
20 | #include <linux/spinlock.h> | |
21 | #include <linux/workqueue.h> | |
22 | ||
23 | #include "limits.h" | |
24 | #include "object.h" | |
25 | #include "ruleset.h" | |
26 | ||
27 | static struct landlock_ruleset *create_ruleset(const u32 num_layers) | |
28 | { | |
29 | struct landlock_ruleset *new_ruleset; | |
30 | ||
06a1c40a MS |
31 | new_ruleset = |
32 | kzalloc(struct_size(new_ruleset, fs_access_masks, num_layers), | |
33 | GFP_KERNEL_ACCOUNT); | |
ae271c1b MS |
34 | if (!new_ruleset) |
35 | return ERR_PTR(-ENOMEM); | |
36 | refcount_set(&new_ruleset->usage, 1); | |
37 | mutex_init(&new_ruleset->lock); | |
38 | new_ruleset->root = RB_ROOT; | |
39 | new_ruleset->num_layers = num_layers; | |
40 | /* | |
41 | * hierarchy = NULL | |
42 | * num_rules = 0 | |
43 | * fs_access_masks[] = 0 | |
44 | */ | |
45 | return new_ruleset; | |
46 | } | |
47 | ||
48 | struct landlock_ruleset *landlock_create_ruleset(const u32 fs_access_mask) | |
49 | { | |
50 | struct landlock_ruleset *new_ruleset; | |
51 | ||
52 | /* Informs about useless ruleset. */ | |
53 | if (!fs_access_mask) | |
54 | return ERR_PTR(-ENOMSG); | |
55 | new_ruleset = create_ruleset(1); | |
56 | if (!IS_ERR(new_ruleset)) | |
57 | new_ruleset->fs_access_masks[0] = fs_access_mask; | |
58 | return new_ruleset; | |
59 | } | |
60 | ||
61 | static void build_check_rule(void) | |
62 | { | |
63 | const struct landlock_rule rule = { | |
64 | .num_layers = ~0, | |
65 | }; | |
66 | ||
67 | BUILD_BUG_ON(rule.num_layers < LANDLOCK_MAX_NUM_LAYERS); | |
68 | } | |
69 | ||
06a1c40a MS |
70 | static struct landlock_rule * |
71 | create_rule(struct landlock_object *const object, | |
72 | const struct landlock_layer (*const layers)[], const u32 num_layers, | |
73 | const struct landlock_layer *const new_layer) | |
ae271c1b MS |
74 | { |
75 | struct landlock_rule *new_rule; | |
76 | u32 new_num_layers; | |
77 | ||
78 | build_check_rule(); | |
79 | if (new_layer) { | |
80 | /* Should already be checked by landlock_merge_ruleset(). */ | |
81 | if (WARN_ON_ONCE(num_layers >= LANDLOCK_MAX_NUM_LAYERS)) | |
82 | return ERR_PTR(-E2BIG); | |
83 | new_num_layers = num_layers + 1; | |
84 | } else { | |
85 | new_num_layers = num_layers; | |
86 | } | |
87 | new_rule = kzalloc(struct_size(new_rule, layers, new_num_layers), | |
06a1c40a | 88 | GFP_KERNEL_ACCOUNT); |
ae271c1b MS |
89 | if (!new_rule) |
90 | return ERR_PTR(-ENOMEM); | |
91 | RB_CLEAR_NODE(&new_rule->node); | |
92 | landlock_get_object(object); | |
93 | new_rule->object = object; | |
94 | new_rule->num_layers = new_num_layers; | |
95 | /* Copies the original layer stack. */ | |
96 | memcpy(new_rule->layers, layers, | |
06a1c40a | 97 | flex_array_size(new_rule, layers, num_layers)); |
ae271c1b MS |
98 | if (new_layer) |
99 | /* Adds a copy of @new_layer on the layer stack. */ | |
100 | new_rule->layers[new_rule->num_layers - 1] = *new_layer; | |
101 | return new_rule; | |
102 | } | |
103 | ||
104 | static void free_rule(struct landlock_rule *const rule) | |
105 | { | |
106 | might_sleep(); | |
107 | if (!rule) | |
108 | return; | |
109 | landlock_put_object(rule->object); | |
110 | kfree(rule); | |
111 | } | |
112 | ||
113 | static void build_check_ruleset(void) | |
114 | { | |
115 | const struct landlock_ruleset ruleset = { | |
116 | .num_rules = ~0, | |
117 | .num_layers = ~0, | |
118 | }; | |
cb2c7d1a | 119 | typeof(ruleset.fs_access_masks[0]) fs_access_mask = ~0; |
ae271c1b MS |
120 | |
121 | BUILD_BUG_ON(ruleset.num_rules < LANDLOCK_MAX_NUM_RULES); | |
122 | BUILD_BUG_ON(ruleset.num_layers < LANDLOCK_MAX_NUM_LAYERS); | |
cb2c7d1a | 123 | BUILD_BUG_ON(fs_access_mask < LANDLOCK_MASK_ACCESS_FS); |
ae271c1b MS |
124 | } |
125 | ||
126 | /** | |
127 | * insert_rule - Create and insert a rule in a ruleset | |
128 | * | |
129 | * @ruleset: The ruleset to be updated. | |
130 | * @object: The object to build the new rule with. The underlying kernel | |
131 | * object must be held by the caller. | |
132 | * @layers: One or multiple layers to be copied into the new rule. | |
133 | * @num_layers: The number of @layers entries. | |
134 | * | |
135 | * When user space requests to add a new rule to a ruleset, @layers only | |
136 | * contains one entry and this entry is not assigned to any level. In this | |
137 | * case, the new rule will extend @ruleset, similarly to a boolean OR between | |
138 | * access rights. | |
139 | * | |
140 | * When merging a ruleset in a domain, or copying a domain, @layers will be | |
141 | * added to @ruleset as new constraints, similarly to a boolean AND between | |
142 | * access rights. | |
143 | */ | |
144 | static int insert_rule(struct landlock_ruleset *const ruleset, | |
06a1c40a MS |
145 | struct landlock_object *const object, |
146 | const struct landlock_layer (*const layers)[], | |
147 | size_t num_layers) | |
ae271c1b MS |
148 | { |
149 | struct rb_node **walker_node; | |
150 | struct rb_node *parent_node = NULL; | |
151 | struct landlock_rule *new_rule; | |
152 | ||
153 | might_sleep(); | |
154 | lockdep_assert_held(&ruleset->lock); | |
155 | if (WARN_ON_ONCE(!object || !layers)) | |
156 | return -ENOENT; | |
157 | walker_node = &(ruleset->root.rb_node); | |
158 | while (*walker_node) { | |
06a1c40a MS |
159 | struct landlock_rule *const this = |
160 | rb_entry(*walker_node, struct landlock_rule, node); | |
ae271c1b MS |
161 | |
162 | if (this->object != object) { | |
163 | parent_node = *walker_node; | |
164 | if (this->object < object) | |
165 | walker_node = &((*walker_node)->rb_right); | |
166 | else | |
167 | walker_node = &((*walker_node)->rb_left); | |
168 | continue; | |
169 | } | |
170 | ||
171 | /* Only a single-level layer should match an existing rule. */ | |
172 | if (WARN_ON_ONCE(num_layers != 1)) | |
173 | return -EINVAL; | |
174 | ||
175 | /* If there is a matching rule, updates it. */ | |
176 | if ((*layers)[0].level == 0) { | |
177 | /* | |
178 | * Extends access rights when the request comes from | |
179 | * landlock_add_rule(2), i.e. @ruleset is not a domain. | |
180 | */ | |
181 | if (WARN_ON_ONCE(this->num_layers != 1)) | |
182 | return -EINVAL; | |
183 | if (WARN_ON_ONCE(this->layers[0].level != 0)) | |
184 | return -EINVAL; | |
185 | this->layers[0].access |= (*layers)[0].access; | |
186 | return 0; | |
187 | } | |
188 | ||
189 | if (WARN_ON_ONCE(this->layers[0].level == 0)) | |
190 | return -EINVAL; | |
191 | ||
192 | /* | |
193 | * Intersects access rights when it is a merge between a | |
194 | * ruleset and a domain. | |
195 | */ | |
196 | new_rule = create_rule(object, &this->layers, this->num_layers, | |
06a1c40a | 197 | &(*layers)[0]); |
ae271c1b MS |
198 | if (IS_ERR(new_rule)) |
199 | return PTR_ERR(new_rule); | |
200 | rb_replace_node(&this->node, &new_rule->node, &ruleset->root); | |
201 | free_rule(this); | |
202 | return 0; | |
203 | } | |
204 | ||
205 | /* There is no match for @object. */ | |
206 | build_check_ruleset(); | |
207 | if (ruleset->num_rules >= LANDLOCK_MAX_NUM_RULES) | |
208 | return -E2BIG; | |
209 | new_rule = create_rule(object, layers, num_layers, NULL); | |
210 | if (IS_ERR(new_rule)) | |
211 | return PTR_ERR(new_rule); | |
212 | rb_link_node(&new_rule->node, parent_node, walker_node); | |
213 | rb_insert_color(&new_rule->node, &ruleset->root); | |
214 | ruleset->num_rules++; | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static void build_check_layer(void) | |
219 | { | |
220 | const struct landlock_layer layer = { | |
221 | .level = ~0, | |
cb2c7d1a | 222 | .access = ~0, |
ae271c1b MS |
223 | }; |
224 | ||
225 | BUILD_BUG_ON(layer.level < LANDLOCK_MAX_NUM_LAYERS); | |
cb2c7d1a | 226 | BUILD_BUG_ON(layer.access < LANDLOCK_MASK_ACCESS_FS); |
ae271c1b MS |
227 | } |
228 | ||
229 | /* @ruleset must be locked by the caller. */ | |
230 | int landlock_insert_rule(struct landlock_ruleset *const ruleset, | |
06a1c40a | 231 | struct landlock_object *const object, const u32 access) |
ae271c1b | 232 | { |
06a1c40a | 233 | struct landlock_layer layers[] = { { |
ae271c1b MS |
234 | .access = access, |
235 | /* When @level is zero, insert_rule() extends @ruleset. */ | |
236 | .level = 0, | |
06a1c40a | 237 | } }; |
ae271c1b MS |
238 | |
239 | build_check_layer(); | |
240 | return insert_rule(ruleset, object, &layers, ARRAY_SIZE(layers)); | |
241 | } | |
242 | ||
243 | static inline void get_hierarchy(struct landlock_hierarchy *const hierarchy) | |
244 | { | |
245 | if (hierarchy) | |
246 | refcount_inc(&hierarchy->usage); | |
247 | } | |
248 | ||
249 | static void put_hierarchy(struct landlock_hierarchy *hierarchy) | |
250 | { | |
251 | while (hierarchy && refcount_dec_and_test(&hierarchy->usage)) { | |
252 | const struct landlock_hierarchy *const freeme = hierarchy; | |
253 | ||
254 | hierarchy = hierarchy->parent; | |
255 | kfree(freeme); | |
256 | } | |
257 | } | |
258 | ||
259 | static int merge_ruleset(struct landlock_ruleset *const dst, | |
06a1c40a | 260 | struct landlock_ruleset *const src) |
ae271c1b MS |
261 | { |
262 | struct landlock_rule *walker_rule, *next_rule; | |
263 | int err = 0; | |
264 | ||
265 | might_sleep(); | |
266 | /* Should already be checked by landlock_merge_ruleset() */ | |
267 | if (WARN_ON_ONCE(!src)) | |
268 | return 0; | |
269 | /* Only merge into a domain. */ | |
270 | if (WARN_ON_ONCE(!dst || !dst->hierarchy)) | |
271 | return -EINVAL; | |
272 | ||
273 | /* Locks @dst first because we are its only owner. */ | |
274 | mutex_lock(&dst->lock); | |
275 | mutex_lock_nested(&src->lock, SINGLE_DEPTH_NESTING); | |
276 | ||
277 | /* Stacks the new layer. */ | |
278 | if (WARN_ON_ONCE(src->num_layers != 1 || dst->num_layers < 1)) { | |
279 | err = -EINVAL; | |
280 | goto out_unlock; | |
281 | } | |
282 | dst->fs_access_masks[dst->num_layers - 1] = src->fs_access_masks[0]; | |
283 | ||
284 | /* Merges the @src tree. */ | |
06a1c40a MS |
285 | rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, &src->root, |
286 | node) { | |
287 | struct landlock_layer layers[] = { { | |
ae271c1b | 288 | .level = dst->num_layers, |
06a1c40a | 289 | } }; |
ae271c1b MS |
290 | |
291 | if (WARN_ON_ONCE(walker_rule->num_layers != 1)) { | |
292 | err = -EINVAL; | |
293 | goto out_unlock; | |
294 | } | |
295 | if (WARN_ON_ONCE(walker_rule->layers[0].level != 0)) { | |
296 | err = -EINVAL; | |
297 | goto out_unlock; | |
298 | } | |
299 | layers[0].access = walker_rule->layers[0].access; | |
300 | err = insert_rule(dst, walker_rule->object, &layers, | |
06a1c40a | 301 | ARRAY_SIZE(layers)); |
ae271c1b MS |
302 | if (err) |
303 | goto out_unlock; | |
304 | } | |
305 | ||
306 | out_unlock: | |
307 | mutex_unlock(&src->lock); | |
308 | mutex_unlock(&dst->lock); | |
309 | return err; | |
310 | } | |
311 | ||
312 | static int inherit_ruleset(struct landlock_ruleset *const parent, | |
06a1c40a | 313 | struct landlock_ruleset *const child) |
ae271c1b MS |
314 | { |
315 | struct landlock_rule *walker_rule, *next_rule; | |
316 | int err = 0; | |
317 | ||
318 | might_sleep(); | |
319 | if (!parent) | |
320 | return 0; | |
321 | ||
322 | /* Locks @child first because we are its only owner. */ | |
323 | mutex_lock(&child->lock); | |
324 | mutex_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); | |
325 | ||
326 | /* Copies the @parent tree. */ | |
327 | rbtree_postorder_for_each_entry_safe(walker_rule, next_rule, | |
06a1c40a | 328 | &parent->root, node) { |
ae271c1b | 329 | err = insert_rule(child, walker_rule->object, |
06a1c40a MS |
330 | &walker_rule->layers, |
331 | walker_rule->num_layers); | |
ae271c1b MS |
332 | if (err) |
333 | goto out_unlock; | |
334 | } | |
335 | ||
336 | if (WARN_ON_ONCE(child->num_layers <= parent->num_layers)) { | |
337 | err = -EINVAL; | |
338 | goto out_unlock; | |
339 | } | |
340 | /* Copies the parent layer stack and leaves a space for the new layer. */ | |
341 | memcpy(child->fs_access_masks, parent->fs_access_masks, | |
06a1c40a | 342 | flex_array_size(parent, fs_access_masks, parent->num_layers)); |
ae271c1b MS |
343 | |
344 | if (WARN_ON_ONCE(!parent->hierarchy)) { | |
345 | err = -EINVAL; | |
346 | goto out_unlock; | |
347 | } | |
348 | get_hierarchy(parent->hierarchy); | |
349 | child->hierarchy->parent = parent->hierarchy; | |
350 | ||
351 | out_unlock: | |
352 | mutex_unlock(&parent->lock); | |
353 | mutex_unlock(&child->lock); | |
354 | return err; | |
355 | } | |
356 | ||
357 | static void free_ruleset(struct landlock_ruleset *const ruleset) | |
358 | { | |
359 | struct landlock_rule *freeme, *next; | |
360 | ||
361 | might_sleep(); | |
06a1c40a | 362 | rbtree_postorder_for_each_entry_safe(freeme, next, &ruleset->root, node) |
ae271c1b MS |
363 | free_rule(freeme); |
364 | put_hierarchy(ruleset->hierarchy); | |
365 | kfree(ruleset); | |
366 | } | |
367 | ||
368 | void landlock_put_ruleset(struct landlock_ruleset *const ruleset) | |
369 | { | |
370 | might_sleep(); | |
371 | if (ruleset && refcount_dec_and_test(&ruleset->usage)) | |
372 | free_ruleset(ruleset); | |
373 | } | |
374 | ||
375 | static void free_ruleset_work(struct work_struct *const work) | |
376 | { | |
377 | struct landlock_ruleset *ruleset; | |
378 | ||
379 | ruleset = container_of(work, struct landlock_ruleset, work_free); | |
380 | free_ruleset(ruleset); | |
381 | } | |
382 | ||
383 | void landlock_put_ruleset_deferred(struct landlock_ruleset *const ruleset) | |
384 | { | |
385 | if (ruleset && refcount_dec_and_test(&ruleset->usage)) { | |
386 | INIT_WORK(&ruleset->work_free, free_ruleset_work); | |
387 | schedule_work(&ruleset->work_free); | |
388 | } | |
389 | } | |
390 | ||
391 | /** | |
392 | * landlock_merge_ruleset - Merge a ruleset with a domain | |
393 | * | |
394 | * @parent: Parent domain. | |
395 | * @ruleset: New ruleset to be merged. | |
396 | * | |
397 | * Returns the intersection of @parent and @ruleset, or returns @parent if | |
398 | * @ruleset is empty, or returns a duplicate of @ruleset if @parent is empty. | |
399 | */ | |
06a1c40a MS |
400 | struct landlock_ruleset * |
401 | landlock_merge_ruleset(struct landlock_ruleset *const parent, | |
402 | struct landlock_ruleset *const ruleset) | |
ae271c1b MS |
403 | { |
404 | struct landlock_ruleset *new_dom; | |
405 | u32 num_layers; | |
406 | int err; | |
407 | ||
408 | might_sleep(); | |
409 | if (WARN_ON_ONCE(!ruleset || parent == ruleset)) | |
410 | return ERR_PTR(-EINVAL); | |
411 | ||
412 | if (parent) { | |
413 | if (parent->num_layers >= LANDLOCK_MAX_NUM_LAYERS) | |
414 | return ERR_PTR(-E2BIG); | |
415 | num_layers = parent->num_layers + 1; | |
416 | } else { | |
417 | num_layers = 1; | |
418 | } | |
419 | ||
420 | /* Creates a new domain... */ | |
421 | new_dom = create_ruleset(num_layers); | |
422 | if (IS_ERR(new_dom)) | |
423 | return new_dom; | |
06a1c40a MS |
424 | new_dom->hierarchy = |
425 | kzalloc(sizeof(*new_dom->hierarchy), GFP_KERNEL_ACCOUNT); | |
ae271c1b MS |
426 | if (!new_dom->hierarchy) { |
427 | err = -ENOMEM; | |
428 | goto out_put_dom; | |
429 | } | |
430 | refcount_set(&new_dom->hierarchy->usage, 1); | |
431 | ||
432 | /* ...as a child of @parent... */ | |
433 | err = inherit_ruleset(parent, new_dom); | |
434 | if (err) | |
435 | goto out_put_dom; | |
436 | ||
437 | /* ...and including @ruleset. */ | |
438 | err = merge_ruleset(new_dom, ruleset); | |
439 | if (err) | |
440 | goto out_put_dom; | |
441 | ||
442 | return new_dom; | |
443 | ||
444 | out_put_dom: | |
445 | landlock_put_ruleset(new_dom); | |
446 | return ERR_PTR(err); | |
447 | } | |
448 | ||
449 | /* | |
450 | * The returned access has the same lifetime as @ruleset. | |
451 | */ | |
06a1c40a MS |
452 | const struct landlock_rule * |
453 | landlock_find_rule(const struct landlock_ruleset *const ruleset, | |
454 | const struct landlock_object *const object) | |
ae271c1b MS |
455 | { |
456 | const struct rb_node *node; | |
457 | ||
458 | if (!object) | |
459 | return NULL; | |
460 | node = ruleset->root.rb_node; | |
461 | while (node) { | |
06a1c40a MS |
462 | struct landlock_rule *this = |
463 | rb_entry(node, struct landlock_rule, node); | |
ae271c1b MS |
464 | |
465 | if (this->object == object) | |
466 | return this; | |
467 | if (this->object < object) | |
468 | node = node->rb_right; | |
469 | else | |
470 | node = node->rb_left; | |
471 | } | |
472 | return NULL; | |
473 | } |