landlock: Move filesystem helpers and add a new one
[linux-block.git] / security / landlock / fs.c
CommitLineData
cb2c7d1a
MS
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Landlock LSM - Filesystem management and hooks
4 *
5 * Copyright © 2016-2020 Mickaël Salaün <mic@digikod.net>
6 * Copyright © 2018-2020 ANSSI
7 */
8
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/bits.h>
12#include <linux/compiler_types.h>
13#include <linux/dcache.h>
14#include <linux/err.h>
15#include <linux/fs.h>
16#include <linux/init.h>
17#include <linux/kernel.h>
18#include <linux/limits.h>
19#include <linux/list.h>
20#include <linux/lsm_hooks.h>
21#include <linux/mount.h>
22#include <linux/namei.h>
23#include <linux/path.h>
24#include <linux/rcupdate.h>
25#include <linux/spinlock.h>
26#include <linux/stat.h>
27#include <linux/types.h>
28#include <linux/wait_bit.h>
29#include <linux/workqueue.h>
30#include <uapi/linux/landlock.h>
31
32#include "common.h"
33#include "cred.h"
34#include "fs.h"
35#include "limits.h"
36#include "object.h"
37#include "ruleset.h"
38#include "setup.h"
39
40/* Underlying object management */
41
42static void release_inode(struct landlock_object *const object)
43 __releases(object->lock)
44{
45 struct inode *const inode = object->underobj;
46 struct super_block *sb;
47
48 if (!inode) {
49 spin_unlock(&object->lock);
50 return;
51 }
52
53 /*
54 * Protects against concurrent use by hook_sb_delete() of the reference
55 * to the underlying inode.
56 */
57 object->underobj = NULL;
58 /*
59 * Makes sure that if the filesystem is concurrently unmounted,
60 * hook_sb_delete() will wait for us to finish iput().
61 */
62 sb = inode->i_sb;
63 atomic_long_inc(&landlock_superblock(sb)->inode_refs);
64 spin_unlock(&object->lock);
65 /*
66 * Because object->underobj was not NULL, hook_sb_delete() and
67 * get_inode_object() guarantee that it is safe to reset
68 * landlock_inode(inode)->object while it is not NULL. It is therefore
69 * not necessary to lock inode->i_lock.
70 */
71 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
72 /*
73 * Now, new rules can safely be tied to @inode with get_inode_object().
74 */
75
76 iput(inode);
77 if (atomic_long_dec_and_test(&landlock_superblock(sb)->inode_refs))
78 wake_up_var(&landlock_superblock(sb)->inode_refs);
79}
80
81static const struct landlock_object_underops landlock_fs_underops = {
82 .release = release_inode
83};
84
85/* Ruleset management */
86
87static struct landlock_object *get_inode_object(struct inode *const inode)
88{
89 struct landlock_object *object, *new_object;
90 struct landlock_inode_security *inode_sec = landlock_inode(inode);
91
92 rcu_read_lock();
93retry:
94 object = rcu_dereference(inode_sec->object);
95 if (object) {
96 if (likely(refcount_inc_not_zero(&object->usage))) {
97 rcu_read_unlock();
98 return object;
99 }
100 /*
101 * We are racing with release_inode(), the object is going
102 * away. Wait for release_inode(), then retry.
103 */
104 spin_lock(&object->lock);
105 spin_unlock(&object->lock);
106 goto retry;
107 }
108 rcu_read_unlock();
109
110 /*
111 * If there is no object tied to @inode, then create a new one (without
112 * holding any locks).
113 */
114 new_object = landlock_create_object(&landlock_fs_underops, inode);
115 if (IS_ERR(new_object))
116 return new_object;
117
118 /*
119 * Protects against concurrent calls to get_inode_object() or
120 * hook_sb_delete().
121 */
122 spin_lock(&inode->i_lock);
123 if (unlikely(rcu_access_pointer(inode_sec->object))) {
124 /* Someone else just created the object, bail out and retry. */
125 spin_unlock(&inode->i_lock);
126 kfree(new_object);
127
128 rcu_read_lock();
129 goto retry;
130 }
131
132 /*
133 * @inode will be released by hook_sb_delete() on its superblock
134 * shutdown, or by release_inode() when no more ruleset references the
135 * related object.
136 */
137 ihold(inode);
138 rcu_assign_pointer(inode_sec->object, new_object);
139 spin_unlock(&inode->i_lock);
140 return new_object;
141}
142
143/* All access rights that can be tied to files. */
6cc2df8e 144/* clang-format off */
cb2c7d1a
MS
145#define ACCESS_FILE ( \
146 LANDLOCK_ACCESS_FS_EXECUTE | \
147 LANDLOCK_ACCESS_FS_WRITE_FILE | \
148 LANDLOCK_ACCESS_FS_READ_FILE)
6cc2df8e 149/* clang-format on */
cb2c7d1a
MS
150
151/*
152 * @path: Should have been checked by get_path_from_fd().
153 */
154int landlock_append_fs_rule(struct landlock_ruleset *const ruleset,
5f2ff33e
MS
155 const struct path *const path,
156 access_mask_t access_rights)
cb2c7d1a
MS
157{
158 int err;
159 struct landlock_object *object;
160
161 /* Files only get access rights that make sense. */
06a1c40a
MS
162 if (!d_is_dir(path->dentry) &&
163 (access_rights | ACCESS_FILE) != ACCESS_FILE)
cb2c7d1a
MS
164 return -EINVAL;
165 if (WARN_ON_ONCE(ruleset->num_layers != 1))
166 return -EINVAL;
167
168 /* Transforms relative access rights to absolute ones. */
169 access_rights |= LANDLOCK_MASK_ACCESS_FS & ~ruleset->fs_access_masks[0];
170 object = get_inode_object(d_backing_inode(path->dentry));
171 if (IS_ERR(object))
172 return PTR_ERR(object);
173 mutex_lock(&ruleset->lock);
174 err = landlock_insert_rule(ruleset, object, access_rights);
175 mutex_unlock(&ruleset->lock);
176 /*
177 * No need to check for an error because landlock_insert_rule()
178 * increments the refcount for the new object if needed.
179 */
180 landlock_put_object(object);
181 return err;
182}
183
184/* Access-control management */
185
2cd7cd6e
MS
186/*
187 * The lifetime of the returned rule is tied to @domain.
188 *
189 * Returns NULL if no rule is found or if @dentry is negative.
190 */
191static inline const struct landlock_rule *
192find_rule(const struct landlock_ruleset *const domain,
193 const struct dentry *const dentry)
cb2c7d1a
MS
194{
195 const struct landlock_rule *rule;
196 const struct inode *inode;
cb2c7d1a 197
2cd7cd6e
MS
198 /* Ignores nonexistent leafs. */
199 if (d_is_negative(dentry))
200 return NULL;
201
202 inode = d_backing_inode(dentry);
cb2c7d1a 203 rcu_read_lock();
06a1c40a
MS
204 rule = landlock_find_rule(
205 domain, rcu_dereference(landlock_inode(inode)->object));
cb2c7d1a 206 rcu_read_unlock();
2cd7cd6e
MS
207 return rule;
208}
209
8ba0005f
MS
210/*
211 * @layer_masks is read and may be updated according to the access request and
212 * the matching rule.
213 *
214 * Returns true if the request is allowed (i.e. relevant layer masks for the
215 * request are empty).
216 */
217static inline bool
218unmask_layers(const struct landlock_rule *const rule,
219 const access_mask_t access_request,
220 layer_mask_t (*const layer_masks)[LANDLOCK_NUM_ACCESS_FS])
2cd7cd6e
MS
221{
222 size_t layer_level;
223
8ba0005f
MS
224 if (!access_request || !layer_masks)
225 return true;
cb2c7d1a 226 if (!rule)
8ba0005f 227 return false;
cb2c7d1a
MS
228
229 /*
230 * An access is granted if, for each policy layer, at least one rule
8ba0005f
MS
231 * encountered on the pathwalk grants the requested access,
232 * regardless of its position in the layer stack. We must then check
cb2c7d1a 233 * the remaining layers for each inode, from the first added layer to
8ba0005f
MS
234 * the last one. When there is multiple requested accesses, for each
235 * policy layer, the full set of requested accesses may not be granted
236 * by only one rule, but by the union (binary OR) of multiple rules.
237 * E.g. /a/b <execute> + /a <read> => /a/b <execute + read>
cb2c7d1a 238 */
2cd7cd6e
MS
239 for (layer_level = 0; layer_level < rule->num_layers; layer_level++) {
240 const struct landlock_layer *const layer =
241 &rule->layers[layer_level];
75c542d6 242 const layer_mask_t layer_bit = BIT_ULL(layer->level - 1);
8ba0005f
MS
243 const unsigned long access_req = access_request;
244 unsigned long access_bit;
245 bool is_empty;
cb2c7d1a 246
8ba0005f
MS
247 /*
248 * Records in @layer_masks which layer grants access to each
249 * requested access.
250 */
251 is_empty = true;
252 for_each_set_bit(access_bit, &access_req,
253 ARRAY_SIZE(*layer_masks)) {
254 if (layer->access & BIT_ULL(access_bit))
255 (*layer_masks)[access_bit] &= ~layer_bit;
256 is_empty = is_empty && !(*layer_masks)[access_bit];
cb2c7d1a 257 }
8ba0005f
MS
258 if (is_empty)
259 return true;
cb2c7d1a 260 }
8ba0005f 261 return false;
cb2c7d1a
MS
262}
263
9da82b20
MS
264/*
265 * Allows access to pseudo filesystems that will never be mountable (e.g.
266 * sockfs, pipefs), but can still be reachable through
267 * /proc/<pid>/fd/<file-descriptor>
268 */
269static inline bool is_nouser_or_private(const struct dentry *dentry)
270{
271 return (dentry->d_sb->s_flags & SB_NOUSER) ||
272 (d_is_positive(dentry) &&
273 unlikely(IS_PRIVATE(d_backing_inode(dentry))));
274}
275
cb2c7d1a 276static int check_access_path(const struct landlock_ruleset *const domain,
5f2ff33e
MS
277 const struct path *const path,
278 const access_mask_t access_request)
cb2c7d1a 279{
8ba0005f
MS
280 layer_mask_t layer_masks[LANDLOCK_NUM_ACCESS_FS] = {};
281 bool allowed = false, has_access = false;
cb2c7d1a 282 struct path walker_path;
cb2c7d1a
MS
283 size_t i;
284
cb2c7d1a
MS
285 if (!access_request)
286 return 0;
287 if (WARN_ON_ONCE(!domain || !path))
288 return 0;
9da82b20 289 if (is_nouser_or_private(path->dentry))
cb2c7d1a
MS
290 return 0;
291 if (WARN_ON_ONCE(domain->num_layers < 1))
292 return -EACCES;
293
294 /* Saves all layers handling a subset of requested accesses. */
cb2c7d1a 295 for (i = 0; i < domain->num_layers; i++) {
8ba0005f
MS
296 const unsigned long access_req = access_request;
297 unsigned long access_bit;
298
299 for_each_set_bit(access_bit, &access_req,
300 ARRAY_SIZE(layer_masks)) {
301 if (domain->fs_access_masks[i] & BIT_ULL(access_bit)) {
302 layer_masks[access_bit] |= BIT_ULL(i);
303 has_access = true;
304 }
305 }
cb2c7d1a
MS
306 }
307 /* An access request not handled by the domain is allowed. */
8ba0005f 308 if (!has_access)
cb2c7d1a
MS
309 return 0;
310
311 walker_path = *path;
312 path_get(&walker_path);
313 /*
314 * We need to walk through all the hierarchy to not miss any relevant
315 * restriction.
316 */
317 while (true) {
318 struct dentry *parent_dentry;
319
8ba0005f
MS
320 allowed = unmask_layers(find_rule(domain, walker_path.dentry),
321 access_request, &layer_masks);
322 if (allowed)
cb2c7d1a 323 /* Stops when a rule from each layer grants access. */
cb2c7d1a 324 break;
cb2c7d1a
MS
325
326jump_up:
327 if (walker_path.dentry == walker_path.mnt->mnt_root) {
328 if (follow_up(&walker_path)) {
329 /* Ignores hidden mount points. */
330 goto jump_up;
331 } else {
332 /*
333 * Stops at the real root. Denies access
334 * because not all layers have granted access.
335 */
336 allowed = false;
337 break;
338 }
339 }
340 if (unlikely(IS_ROOT(walker_path.dentry))) {
341 /*
342 * Stops at disconnected root directories. Only allows
343 * access to internal filesystems (e.g. nsfs, which is
344 * reachable through /proc/<pid>/ns/<namespace>).
345 */
346 allowed = !!(walker_path.mnt->mnt_flags & MNT_INTERNAL);
347 break;
348 }
349 parent_dentry = dget_parent(walker_path.dentry);
350 dput(walker_path.dentry);
351 walker_path.dentry = parent_dentry;
352 }
353 path_put(&walker_path);
354 return allowed ? 0 : -EACCES;
355}
356
357static inline int current_check_access_path(const struct path *const path,
5f2ff33e 358 const access_mask_t access_request)
cb2c7d1a
MS
359{
360 const struct landlock_ruleset *const dom =
361 landlock_get_current_domain();
362
363 if (!dom)
364 return 0;
365 return check_access_path(dom, path, access_request);
366}
367
9da82b20
MS
368static inline access_mask_t get_mode_access(const umode_t mode)
369{
370 switch (mode & S_IFMT) {
371 case S_IFLNK:
372 return LANDLOCK_ACCESS_FS_MAKE_SYM;
373 case 0:
374 /* A zero mode translates to S_IFREG. */
375 case S_IFREG:
376 return LANDLOCK_ACCESS_FS_MAKE_REG;
377 case S_IFDIR:
378 return LANDLOCK_ACCESS_FS_MAKE_DIR;
379 case S_IFCHR:
380 return LANDLOCK_ACCESS_FS_MAKE_CHAR;
381 case S_IFBLK:
382 return LANDLOCK_ACCESS_FS_MAKE_BLOCK;
383 case S_IFIFO:
384 return LANDLOCK_ACCESS_FS_MAKE_FIFO;
385 case S_IFSOCK:
386 return LANDLOCK_ACCESS_FS_MAKE_SOCK;
387 default:
388 WARN_ON_ONCE(1);
389 return 0;
390 }
391}
392
393static inline access_mask_t maybe_remove(const struct dentry *const dentry)
394{
395 if (d_is_negative(dentry))
396 return 0;
397 return d_is_dir(dentry) ? LANDLOCK_ACCESS_FS_REMOVE_DIR :
398 LANDLOCK_ACCESS_FS_REMOVE_FILE;
399}
400
cb2c7d1a
MS
401/* Inode hooks */
402
403static void hook_inode_free_security(struct inode *const inode)
404{
405 /*
406 * All inodes must already have been untied from their object by
407 * release_inode() or hook_sb_delete().
408 */
409 WARN_ON_ONCE(landlock_inode(inode)->object);
410}
411
412/* Super-block hooks */
413
414/*
415 * Release the inodes used in a security policy.
416 *
417 * Cf. fsnotify_unmount_inodes() and invalidate_inodes()
418 */
419static void hook_sb_delete(struct super_block *const sb)
420{
421 struct inode *inode, *prev_inode = NULL;
422
423 if (!landlock_initialized)
424 return;
425
426 spin_lock(&sb->s_inode_list_lock);
427 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
428 struct landlock_object *object;
429
430 /* Only handles referenced inodes. */
431 if (!atomic_read(&inode->i_count))
432 continue;
433
434 /*
435 * Protects against concurrent modification of inode (e.g.
436 * from get_inode_object()).
437 */
438 spin_lock(&inode->i_lock);
439 /*
440 * Checks I_FREEING and I_WILL_FREE to protect against a race
441 * condition when release_inode() just called iput(), which
442 * could lead to a NULL dereference of inode->security or a
443 * second call to iput() for the same Landlock object. Also
444 * checks I_NEW because such inode cannot be tied to an object.
445 */
446 if (inode->i_state & (I_FREEING | I_WILL_FREE | I_NEW)) {
447 spin_unlock(&inode->i_lock);
448 continue;
449 }
450
451 rcu_read_lock();
452 object = rcu_dereference(landlock_inode(inode)->object);
453 if (!object) {
454 rcu_read_unlock();
455 spin_unlock(&inode->i_lock);
456 continue;
457 }
458 /* Keeps a reference to this inode until the next loop walk. */
459 __iget(inode);
460 spin_unlock(&inode->i_lock);
461
462 /*
463 * If there is no concurrent release_inode() ongoing, then we
464 * are in charge of calling iput() on this inode, otherwise we
465 * will just wait for it to finish.
466 */
467 spin_lock(&object->lock);
468 if (object->underobj == inode) {
469 object->underobj = NULL;
470 spin_unlock(&object->lock);
471 rcu_read_unlock();
472
473 /*
474 * Because object->underobj was not NULL,
475 * release_inode() and get_inode_object() guarantee
476 * that it is safe to reset
477 * landlock_inode(inode)->object while it is not NULL.
478 * It is therefore not necessary to lock inode->i_lock.
479 */
480 rcu_assign_pointer(landlock_inode(inode)->object, NULL);
481 /*
482 * At this point, we own the ihold() reference that was
483 * originally set up by get_inode_object() and the
484 * __iget() reference that we just set in this loop
485 * walk. Therefore the following call to iput() will
486 * not sleep nor drop the inode because there is now at
487 * least two references to it.
488 */
489 iput(inode);
490 } else {
491 spin_unlock(&object->lock);
492 rcu_read_unlock();
493 }
494
495 if (prev_inode) {
496 /*
497 * At this point, we still own the __iget() reference
498 * that we just set in this loop walk. Therefore we
499 * can drop the list lock and know that the inode won't
500 * disappear from under us until the next loop walk.
501 */
502 spin_unlock(&sb->s_inode_list_lock);
503 /*
504 * We can now actually put the inode reference from the
505 * previous loop walk, which is not needed anymore.
506 */
507 iput(prev_inode);
508 cond_resched();
509 spin_lock(&sb->s_inode_list_lock);
510 }
511 prev_inode = inode;
512 }
513 spin_unlock(&sb->s_inode_list_lock);
514
515 /* Puts the inode reference from the last loop walk, if any. */
516 if (prev_inode)
517 iput(prev_inode);
518 /* Waits for pending iput() in release_inode(). */
06a1c40a
MS
519 wait_var_event(&landlock_superblock(sb)->inode_refs,
520 !atomic_long_read(&landlock_superblock(sb)->inode_refs));
cb2c7d1a
MS
521}
522
523/*
524 * Because a Landlock security policy is defined according to the filesystem
525 * topology (i.e. the mount namespace), changing it may grant access to files
526 * not previously allowed.
527 *
528 * To make it simple, deny any filesystem topology modification by landlocked
529 * processes. Non-landlocked processes may still change the namespace of a
530 * landlocked process, but this kind of threat must be handled by a system-wide
531 * access-control security policy.
532 *
533 * This could be lifted in the future if Landlock can safely handle mount
534 * namespace updates requested by a landlocked process. Indeed, we could
535 * update the current domain (which is currently read-only) by taking into
536 * account the accesses of the source and the destination of a new mount point.
537 * However, it would also require to make all the child domains dynamically
538 * inherit these new constraints. Anyway, for backward compatibility reasons,
539 * a dedicated user space option would be required (e.g. as a ruleset flag).
540 */
541static int hook_sb_mount(const char *const dev_name,
06a1c40a
MS
542 const struct path *const path, const char *const type,
543 const unsigned long flags, void *const data)
cb2c7d1a
MS
544{
545 if (!landlock_get_current_domain())
546 return 0;
547 return -EPERM;
548}
549
550static int hook_move_mount(const struct path *const from_path,
06a1c40a 551 const struct path *const to_path)
cb2c7d1a
MS
552{
553 if (!landlock_get_current_domain())
554 return 0;
555 return -EPERM;
556}
557
558/*
559 * Removing a mount point may reveal a previously hidden file hierarchy, which
560 * may then grant access to files, which may have previously been forbidden.
561 */
562static int hook_sb_umount(struct vfsmount *const mnt, const int flags)
563{
564 if (!landlock_get_current_domain())
565 return 0;
566 return -EPERM;
567}
568
569static int hook_sb_remount(struct super_block *const sb, void *const mnt_opts)
570{
571 if (!landlock_get_current_domain())
572 return 0;
573 return -EPERM;
574}
575
576/*
577 * pivot_root(2), like mount(2), changes the current mount namespace. It must
578 * then be forbidden for a landlocked process.
579 *
580 * However, chroot(2) may be allowed because it only changes the relative root
581 * directory of the current process. Moreover, it can be used to restrict the
582 * view of the filesystem.
583 */
584static int hook_sb_pivotroot(const struct path *const old_path,
06a1c40a 585 const struct path *const new_path)
cb2c7d1a
MS
586{
587 if (!landlock_get_current_domain())
588 return 0;
589 return -EPERM;
590}
591
592/* Path hooks */
593
cb2c7d1a
MS
594/*
595 * Creating multiple links or renaming may lead to privilege escalations if not
596 * handled properly. Indeed, we must be sure that the source doesn't gain more
597 * privileges by being accessible from the destination. This is getting more
598 * complex when dealing with multiple layers. The whole picture can be seen as
599 * a multilayer partial ordering problem. A future version of Landlock will
600 * deal with that.
601 */
602static int hook_path_link(struct dentry *const old_dentry,
06a1c40a
MS
603 const struct path *const new_dir,
604 struct dentry *const new_dentry)
cb2c7d1a
MS
605{
606 const struct landlock_ruleset *const dom =
607 landlock_get_current_domain();
608
609 if (!dom)
610 return 0;
611 /* The mount points are the same for old and new paths, cf. EXDEV. */
612 if (old_dentry->d_parent != new_dir->dentry)
613 /* Gracefully forbids reparenting. */
614 return -EXDEV;
615 if (unlikely(d_is_negative(old_dentry)))
616 return -ENOENT;
06a1c40a
MS
617 return check_access_path(
618 dom, new_dir,
619 get_mode_access(d_backing_inode(old_dentry)->i_mode));
cb2c7d1a
MS
620}
621
cb2c7d1a 622static int hook_path_rename(const struct path *const old_dir,
06a1c40a
MS
623 struct dentry *const old_dentry,
624 const struct path *const new_dir,
625 struct dentry *const new_dentry)
cb2c7d1a
MS
626{
627 const struct landlock_ruleset *const dom =
628 landlock_get_current_domain();
629
630 if (!dom)
631 return 0;
632 /* The mount points are the same for old and new paths, cf. EXDEV. */
633 if (old_dir->dentry != new_dir->dentry)
634 /* Gracefully forbids reparenting. */
635 return -EXDEV;
636 if (unlikely(d_is_negative(old_dentry)))
637 return -ENOENT;
638 /* RENAME_EXCHANGE is handled because directories are the same. */
06a1c40a
MS
639 return check_access_path(
640 dom, old_dir,
641 maybe_remove(old_dentry) | maybe_remove(new_dentry) |
cb2c7d1a
MS
642 get_mode_access(d_backing_inode(old_dentry)->i_mode));
643}
644
645static int hook_path_mkdir(const struct path *const dir,
06a1c40a 646 struct dentry *const dentry, const umode_t mode)
cb2c7d1a
MS
647{
648 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_DIR);
649}
650
651static int hook_path_mknod(const struct path *const dir,
06a1c40a
MS
652 struct dentry *const dentry, const umode_t mode,
653 const unsigned int dev)
cb2c7d1a
MS
654{
655 const struct landlock_ruleset *const dom =
656 landlock_get_current_domain();
657
658 if (!dom)
659 return 0;
660 return check_access_path(dom, dir, get_mode_access(mode));
661}
662
663static int hook_path_symlink(const struct path *const dir,
06a1c40a
MS
664 struct dentry *const dentry,
665 const char *const old_name)
cb2c7d1a
MS
666{
667 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_MAKE_SYM);
668}
669
670static int hook_path_unlink(const struct path *const dir,
06a1c40a 671 struct dentry *const dentry)
cb2c7d1a
MS
672{
673 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_FILE);
674}
675
676static int hook_path_rmdir(const struct path *const dir,
06a1c40a 677 struct dentry *const dentry)
cb2c7d1a
MS
678{
679 return current_check_access_path(dir, LANDLOCK_ACCESS_FS_REMOVE_DIR);
680}
681
682/* File hooks */
683
5f2ff33e 684static inline access_mask_t get_file_access(const struct file *const file)
cb2c7d1a 685{
5f2ff33e 686 access_mask_t access = 0;
cb2c7d1a
MS
687
688 if (file->f_mode & FMODE_READ) {
689 /* A directory can only be opened in read mode. */
690 if (S_ISDIR(file_inode(file)->i_mode))
691 return LANDLOCK_ACCESS_FS_READ_DIR;
692 access = LANDLOCK_ACCESS_FS_READ_FILE;
693 }
694 if (file->f_mode & FMODE_WRITE)
695 access |= LANDLOCK_ACCESS_FS_WRITE_FILE;
696 /* __FMODE_EXEC is indeed part of f_flags, not f_mode. */
697 if (file->f_flags & __FMODE_EXEC)
698 access |= LANDLOCK_ACCESS_FS_EXECUTE;
699 return access;
700}
701
702static int hook_file_open(struct file *const file)
703{
704 const struct landlock_ruleset *const dom =
705 landlock_get_current_domain();
706
707 if (!dom)
708 return 0;
709 /*
710 * Because a file may be opened with O_PATH, get_file_access() may
711 * return 0. This case will be handled with a future Landlock
712 * evolution.
713 */
714 return check_access_path(dom, &file->f_path, get_file_access(file));
715}
716
717static struct security_hook_list landlock_hooks[] __lsm_ro_after_init = {
718 LSM_HOOK_INIT(inode_free_security, hook_inode_free_security),
719
720 LSM_HOOK_INIT(sb_delete, hook_sb_delete),
721 LSM_HOOK_INIT(sb_mount, hook_sb_mount),
722 LSM_HOOK_INIT(move_mount, hook_move_mount),
723 LSM_HOOK_INIT(sb_umount, hook_sb_umount),
724 LSM_HOOK_INIT(sb_remount, hook_sb_remount),
725 LSM_HOOK_INIT(sb_pivotroot, hook_sb_pivotroot),
726
727 LSM_HOOK_INIT(path_link, hook_path_link),
728 LSM_HOOK_INIT(path_rename, hook_path_rename),
729 LSM_HOOK_INIT(path_mkdir, hook_path_mkdir),
730 LSM_HOOK_INIT(path_mknod, hook_path_mknod),
731 LSM_HOOK_INIT(path_symlink, hook_path_symlink),
732 LSM_HOOK_INIT(path_unlink, hook_path_unlink),
733 LSM_HOOK_INIT(path_rmdir, hook_path_rmdir),
734
735 LSM_HOOK_INIT(file_open, hook_file_open),
736};
737
738__init void landlock_add_fs_hooks(void)
739{
740 security_add_hooks(landlock_hooks, ARRAY_SIZE(landlock_hooks),
06a1c40a 741 LANDLOCK_NAME);
cb2c7d1a 742}