1 // SPDX-License-Identifier: GPL-2.0-only
3 * Minimal file system backend for holding eBPF maps and programs,
4 * used by bpf(2) object pinning.
8 * Daniel Borkmann <daniel@iogearbox.net>
11 #include <linux/init.h>
12 #include <linux/magic.h>
13 #include <linux/major.h>
14 #include <linux/mount.h>
15 #include <linux/namei.h>
17 #include <linux/fs_context.h>
18 #include <linux/fs_parser.h>
19 #include <linux/kdev_t.h>
20 #include <linux/filter.h>
21 #include <linux/bpf.h>
22 #include <linux/bpf_trace.h>
23 #include "preload/bpf_preload.h"
32 static void *bpf_any_get(void *raw, enum bpf_type type)
39 bpf_map_inc_with_uref(raw);
52 static void bpf_any_put(void *raw, enum bpf_type type)
59 bpf_map_put_with_uref(raw);
70 static void *bpf_fd_probe_obj(u32 ufd, enum bpf_type *type)
74 raw = bpf_map_get_with_uref(ufd);
80 raw = bpf_prog_get(ufd);
82 *type = BPF_TYPE_PROG;
86 raw = bpf_link_get_from_fd(ufd);
88 *type = BPF_TYPE_LINK;
92 return ERR_PTR(-EINVAL);
95 static const struct inode_operations bpf_dir_iops;
97 static const struct inode_operations bpf_prog_iops = { };
98 static const struct inode_operations bpf_map_iops = { };
99 static const struct inode_operations bpf_link_iops = { };
101 static struct inode *bpf_get_inode(struct super_block *sb,
102 const struct inode *dir,
107 switch (mode & S_IFMT) {
113 return ERR_PTR(-EINVAL);
116 inode = new_inode(sb);
118 return ERR_PTR(-ENOSPC);
120 inode->i_ino = get_next_ino();
121 simple_inode_init_ts(inode);
123 inode_init_owner(&nop_mnt_idmap, inode, dir, mode);
128 static int bpf_inode_type(const struct inode *inode, enum bpf_type *type)
130 *type = BPF_TYPE_UNSPEC;
131 if (inode->i_op == &bpf_prog_iops)
132 *type = BPF_TYPE_PROG;
133 else if (inode->i_op == &bpf_map_iops)
134 *type = BPF_TYPE_MAP;
135 else if (inode->i_op == &bpf_link_iops)
136 *type = BPF_TYPE_LINK;
143 static void bpf_dentry_finalize(struct dentry *dentry, struct inode *inode,
146 d_instantiate(dentry, inode);
149 inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
152 static int bpf_mkdir(struct mnt_idmap *idmap, struct inode *dir,
153 struct dentry *dentry, umode_t mode)
157 inode = bpf_get_inode(dir->i_sb, dir, mode | S_IFDIR);
159 return PTR_ERR(inode);
161 inode->i_op = &bpf_dir_iops;
162 inode->i_fop = &simple_dir_operations;
167 bpf_dentry_finalize(dentry, inode, dir);
176 static struct map_iter *map_iter(struct seq_file *m)
181 static struct bpf_map *seq_file_to_map(struct seq_file *m)
183 return file_inode(m->file)->i_private;
186 static void map_iter_free(struct map_iter *iter)
194 static struct map_iter *map_iter_alloc(struct bpf_map *map)
196 struct map_iter *iter;
198 iter = kzalloc(sizeof(*iter), GFP_KERNEL | __GFP_NOWARN);
202 iter->key = kzalloc(map->key_size, GFP_KERNEL | __GFP_NOWARN);
213 static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos)
215 struct bpf_map *map = seq_file_to_map(m);
216 void *key = map_iter(m)->key;
220 if (map_iter(m)->done)
223 if (unlikely(v == SEQ_START_TOKEN))
229 if (map->ops->map_get_next_key(map, prev_key, key)) {
230 map_iter(m)->done = true;
237 static void *map_seq_start(struct seq_file *m, loff_t *pos)
239 if (map_iter(m)->done)
242 return *pos ? map_iter(m)->key : SEQ_START_TOKEN;
245 static void map_seq_stop(struct seq_file *m, void *v)
249 static int map_seq_show(struct seq_file *m, void *v)
251 struct bpf_map *map = seq_file_to_map(m);
252 void *key = map_iter(m)->key;
254 if (unlikely(v == SEQ_START_TOKEN)) {
255 seq_puts(m, "# WARNING!! The output is for debug purpose only\n");
256 seq_puts(m, "# WARNING!! The output format will change\n");
258 map->ops->map_seq_show_elem(map, key, m);
264 static const struct seq_operations bpffs_map_seq_ops = {
265 .start = map_seq_start,
266 .next = map_seq_next,
267 .show = map_seq_show,
268 .stop = map_seq_stop,
271 static int bpffs_map_open(struct inode *inode, struct file *file)
273 struct bpf_map *map = inode->i_private;
274 struct map_iter *iter;
278 iter = map_iter_alloc(map);
282 err = seq_open(file, &bpffs_map_seq_ops);
288 m = file->private_data;
294 static int bpffs_map_release(struct inode *inode, struct file *file)
296 struct seq_file *m = file->private_data;
298 map_iter_free(map_iter(m));
300 return seq_release(inode, file);
303 /* bpffs_map_fops should only implement the basic
304 * read operation for a BPF map. The purpose is to
305 * provide a simple user intuitive way to do
306 * "cat bpffs/pathto/a-pinned-map".
308 * Other operations (e.g. write, lookup...) should be realized by
309 * the userspace tools (e.g. bpftool) through the
310 * BPF_OBJ_GET_INFO_BY_FD and the map's lookup/update
313 static const struct file_operations bpffs_map_fops = {
314 .open = bpffs_map_open,
316 .release = bpffs_map_release,
319 static int bpffs_obj_open(struct inode *inode, struct file *file)
324 static const struct file_operations bpffs_obj_fops = {
325 .open = bpffs_obj_open,
328 static int bpf_mkobj_ops(struct dentry *dentry, umode_t mode, void *raw,
329 const struct inode_operations *iops,
330 const struct file_operations *fops)
332 struct inode *dir = dentry->d_parent->d_inode;
333 struct inode *inode = bpf_get_inode(dir->i_sb, dir, mode);
335 return PTR_ERR(inode);
339 inode->i_private = raw;
341 bpf_dentry_finalize(dentry, inode, dir);
345 static int bpf_mkprog(struct dentry *dentry, umode_t mode, void *arg)
347 return bpf_mkobj_ops(dentry, mode, arg, &bpf_prog_iops,
351 static int bpf_mkmap(struct dentry *dentry, umode_t mode, void *arg)
353 struct bpf_map *map = arg;
355 return bpf_mkobj_ops(dentry, mode, arg, &bpf_map_iops,
356 bpf_map_support_seq_show(map) ?
357 &bpffs_map_fops : &bpffs_obj_fops);
360 static int bpf_mklink(struct dentry *dentry, umode_t mode, void *arg)
362 struct bpf_link *link = arg;
364 return bpf_mkobj_ops(dentry, mode, arg, &bpf_link_iops,
365 bpf_link_is_iter(link) ?
366 &bpf_iter_fops : &bpffs_obj_fops);
369 static struct dentry *
370 bpf_lookup(struct inode *dir, struct dentry *dentry, unsigned flags)
372 /* Dots in names (e.g. "/sys/fs/bpf/foo.bar") are reserved for future
373 * extensions. That allows popoulate_bpffs() create special files.
375 if ((dir->i_mode & S_IALLUGO) &&
376 strchr(dentry->d_name.name, '.'))
377 return ERR_PTR(-EPERM);
379 return simple_lookup(dir, dentry, flags);
382 static int bpf_symlink(struct mnt_idmap *idmap, struct inode *dir,
383 struct dentry *dentry, const char *target)
385 char *link = kstrdup(target, GFP_USER | __GFP_NOWARN);
391 inode = bpf_get_inode(dir->i_sb, dir, S_IRWXUGO | S_IFLNK);
394 return PTR_ERR(inode);
397 inode->i_op = &simple_symlink_inode_operations;
398 inode->i_link = link;
400 bpf_dentry_finalize(dentry, inode, dir);
404 static const struct inode_operations bpf_dir_iops = {
405 .lookup = bpf_lookup,
407 .symlink = bpf_symlink,
408 .rmdir = simple_rmdir,
409 .rename = simple_rename,
411 .unlink = simple_unlink,
414 /* pin iterator link into bpffs */
415 static int bpf_iter_link_pin_kernel(struct dentry *parent,
416 const char *name, struct bpf_link *link)
418 umode_t mode = S_IFREG | S_IRUSR;
419 struct dentry *dentry;
422 inode_lock(parent->d_inode);
423 dentry = lookup_one_len(name, parent, strlen(name));
424 if (IS_ERR(dentry)) {
425 inode_unlock(parent->d_inode);
426 return PTR_ERR(dentry);
428 ret = bpf_mkobj_ops(dentry, mode, link, &bpf_link_iops,
431 inode_unlock(parent->d_inode);
435 static int bpf_obj_do_pin(int path_fd, const char __user *pathname, void *raw,
438 struct dentry *dentry;
444 dentry = user_path_create(path_fd, pathname, &path, 0);
446 return PTR_ERR(dentry);
448 dir = d_inode(path.dentry);
449 if (dir->i_op != &bpf_dir_iops) {
454 mode = S_IFREG | ((S_IRUSR | S_IWUSR) & ~current_umask());
455 ret = security_path_mknod(&path, dentry, mode, 0);
461 ret = vfs_mkobj(dentry, mode, bpf_mkprog, raw);
464 ret = vfs_mkobj(dentry, mode, bpf_mkmap, raw);
467 ret = vfs_mkobj(dentry, mode, bpf_mklink, raw);
473 done_path_create(&path, dentry);
477 int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname)
483 raw = bpf_fd_probe_obj(ufd, &type);
487 ret = bpf_obj_do_pin(path_fd, pathname, raw, type);
489 bpf_any_put(raw, type);
494 static void *bpf_obj_do_get(int path_fd, const char __user *pathname,
495 enum bpf_type *type, int flags)
502 ret = user_path_at(path_fd, pathname, LOOKUP_FOLLOW, &path);
506 inode = d_backing_inode(path.dentry);
507 ret = path_permission(&path, ACC_MODE(flags));
511 ret = bpf_inode_type(inode, type);
515 raw = bpf_any_get(inode->i_private, *type);
526 int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags)
528 enum bpf_type type = BPF_TYPE_UNSPEC;
533 f_flags = bpf_get_file_flag(flags);
537 raw = bpf_obj_do_get(path_fd, pathname, &type, f_flags);
541 if (type == BPF_TYPE_PROG)
542 ret = bpf_prog_new_fd(raw);
543 else if (type == BPF_TYPE_MAP)
544 ret = bpf_map_new_fd(raw, f_flags);
545 else if (type == BPF_TYPE_LINK)
546 ret = (f_flags != O_RDWR) ? -EINVAL : bpf_link_new_fd(raw);
551 bpf_any_put(raw, type);
555 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type)
557 struct bpf_prog *prog;
558 int ret = inode_permission(&nop_mnt_idmap, inode, MAY_READ);
562 if (inode->i_op == &bpf_map_iops)
563 return ERR_PTR(-EINVAL);
564 if (inode->i_op == &bpf_link_iops)
565 return ERR_PTR(-EINVAL);
566 if (inode->i_op != &bpf_prog_iops)
567 return ERR_PTR(-EACCES);
569 prog = inode->i_private;
571 ret = security_bpf_prog(prog);
575 if (!bpf_prog_get_ok(prog, &type, false))
576 return ERR_PTR(-EINVAL);
582 struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type)
584 struct bpf_prog *prog;
586 int ret = kern_path(name, LOOKUP_FOLLOW, &path);
589 prog = __get_prog_inode(d_backing_inode(path.dentry), type);
595 EXPORT_SYMBOL(bpf_prog_get_type_path);
598 * Display the mount options in /proc/mounts.
600 static int bpf_show_options(struct seq_file *m, struct dentry *root)
602 umode_t mode = d_inode(root)->i_mode & S_IALLUGO & ~S_ISVTX;
604 if (mode != S_IRWXUGO)
605 seq_printf(m, ",mode=%o", mode);
609 static void bpf_free_inode(struct inode *inode)
613 if (S_ISLNK(inode->i_mode))
614 kfree(inode->i_link);
615 if (!bpf_inode_type(inode, &type))
616 bpf_any_put(inode->i_private, type);
617 free_inode_nonrcu(inode);
620 static const struct super_operations bpf_super_ops = {
621 .statfs = simple_statfs,
622 .drop_inode = generic_delete_inode,
623 .show_options = bpf_show_options,
624 .free_inode = bpf_free_inode,
631 static const struct fs_parameter_spec bpf_fs_parameters[] = {
632 fsparam_u32oct ("mode", OPT_MODE),
636 struct bpf_mount_opts {
640 static int bpf_parse_param(struct fs_context *fc, struct fs_parameter *param)
642 struct bpf_mount_opts *opts = fc->fs_private;
643 struct fs_parse_result result;
646 opt = fs_parse(fc, bpf_fs_parameters, param, &result);
648 /* We might like to report bad mount options here, but
649 * traditionally we've ignored all mount options, so we'd
650 * better continue to ignore non-existing options for bpf.
652 if (opt == -ENOPARAM) {
653 opt = vfs_parse_fs_param_source(fc, param);
654 if (opt != -ENOPARAM)
666 opts->mode = result.uint_32 & S_IALLUGO;
673 struct bpf_preload_ops *bpf_preload_ops;
674 EXPORT_SYMBOL_GPL(bpf_preload_ops);
676 static bool bpf_preload_mod_get(void)
678 /* If bpf_preload.ko wasn't loaded earlier then load it now.
679 * When bpf_preload is built into vmlinux the module's __init
680 * function will populate it.
682 if (!bpf_preload_ops) {
683 request_module("bpf_preload");
684 if (!bpf_preload_ops)
687 /* And grab the reference, so the module doesn't disappear while the
688 * kernel is interacting with the kernel module and its UMD.
690 if (!try_module_get(bpf_preload_ops->owner)) {
691 pr_err("bpf_preload module get failed.\n");
697 static void bpf_preload_mod_put(void)
700 /* now user can "rmmod bpf_preload" if necessary */
701 module_put(bpf_preload_ops->owner);
704 static DEFINE_MUTEX(bpf_preload_lock);
706 static int populate_bpffs(struct dentry *parent)
708 struct bpf_preload_info objs[BPF_PRELOAD_LINKS] = {};
711 /* grab the mutex to make sure the kernel interactions with bpf_preload
714 mutex_lock(&bpf_preload_lock);
716 /* if bpf_preload.ko wasn't built into vmlinux then load it */
717 if (!bpf_preload_mod_get())
720 err = bpf_preload_ops->preload(objs);
723 for (i = 0; i < BPF_PRELOAD_LINKS; i++) {
724 bpf_link_inc(objs[i].link);
725 err = bpf_iter_link_pin_kernel(parent,
726 objs[i].link_name, objs[i].link);
728 bpf_link_put(objs[i].link);
733 bpf_preload_mod_put();
735 mutex_unlock(&bpf_preload_lock);
739 static int bpf_fill_super(struct super_block *sb, struct fs_context *fc)
741 static const struct tree_descr bpf_rfiles[] = { { "" } };
742 struct bpf_mount_opts *opts = fc->fs_private;
746 ret = simple_fill_super(sb, BPF_FS_MAGIC, bpf_rfiles);
750 sb->s_op = &bpf_super_ops;
752 inode = sb->s_root->d_inode;
753 inode->i_op = &bpf_dir_iops;
754 inode->i_mode &= ~S_IALLUGO;
755 populate_bpffs(sb->s_root);
756 inode->i_mode |= S_ISVTX | opts->mode;
760 static int bpf_get_tree(struct fs_context *fc)
762 return get_tree_nodev(fc, bpf_fill_super);
765 static void bpf_free_fc(struct fs_context *fc)
767 kfree(fc->fs_private);
770 static const struct fs_context_operations bpf_context_ops = {
772 .parse_param = bpf_parse_param,
773 .get_tree = bpf_get_tree,
777 * Set up the filesystem mount context.
779 static int bpf_init_fs_context(struct fs_context *fc)
781 struct bpf_mount_opts *opts;
783 opts = kzalloc(sizeof(struct bpf_mount_opts), GFP_KERNEL);
787 opts->mode = S_IRWXUGO;
789 fc->fs_private = opts;
790 fc->ops = &bpf_context_ops;
794 static struct file_system_type bpf_fs_type = {
795 .owner = THIS_MODULE,
797 .init_fs_context = bpf_init_fs_context,
798 .parameters = bpf_fs_parameters,
799 .kill_sb = kill_litter_super,
802 static int __init bpf_init(void)
806 ret = sysfs_create_mount_point(fs_kobj, "bpf");
810 ret = register_filesystem(&bpf_fs_type);
812 sysfs_remove_mount_point(fs_kobj, "bpf");
816 fs_initcall(bpf_init);