2 * Functions to manage eBPF programs attached to cgroups
4 * Copyright (c) 2016 Daniel Mack
6 * This file is subject to the terms and conditions of version 2 of the GNU
7 * General Public License. See the file COPYING in the main directory of the
8 * Linux distribution for more details.
11 #include <linux/kernel.h>
12 #include <linux/atomic.h>
13 #include <linux/cgroup.h>
14 #include <linux/slab.h>
15 #include <linux/bpf.h>
16 #include <linux/bpf-cgroup.h>
19 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key);
20 EXPORT_SYMBOL(cgroup_bpf_enabled_key);
23 * cgroup_bpf_put() - put references of all bpf programs
24 * @cgrp: the cgroup to modify
26 void cgroup_bpf_put(struct cgroup *cgrp)
30 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
31 struct list_head *progs = &cgrp->bpf.progs[type];
32 struct bpf_prog_list *pl, *tmp;
34 list_for_each_entry_safe(pl, tmp, progs, node) {
36 bpf_prog_put(pl->prog);
37 bpf_cgroup_storage_unlink(pl->storage);
38 bpf_cgroup_storage_free(pl->storage);
40 static_branch_dec(&cgroup_bpf_enabled_key);
42 bpf_prog_array_free(cgrp->bpf.effective[type]);
46 /* count number of elements in the list.
47 * it's slow but the list cannot be long
49 static u32 prog_list_length(struct list_head *head)
51 struct bpf_prog_list *pl;
54 list_for_each_entry(pl, head, node) {
62 /* if parent has non-overridable prog attached,
63 * disallow attaching new programs to the descendent cgroup.
64 * if parent has overridable or multi-prog, allow attaching
66 static bool hierarchy_allows_attach(struct cgroup *cgrp,
67 enum bpf_attach_type type,
72 p = cgroup_parent(cgrp);
76 u32 flags = p->bpf.flags[type];
79 if (flags & BPF_F_ALLOW_MULTI)
81 cnt = prog_list_length(&p->bpf.progs[type]);
82 WARN_ON_ONCE(cnt > 1);
84 return !!(flags & BPF_F_ALLOW_OVERRIDE);
90 /* compute a chain of effective programs for a given cgroup:
91 * start from the list of programs in this cgroup and add
92 * all parent programs.
93 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding
94 * to programs in this cgroup
96 static int compute_effective_progs(struct cgroup *cgrp,
97 enum bpf_attach_type type,
98 struct bpf_prog_array __rcu **array)
100 struct bpf_prog_array *progs;
101 struct bpf_prog_list *pl;
102 struct cgroup *p = cgrp;
105 /* count number of effective programs by walking parents */
107 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
108 cnt += prog_list_length(&p->bpf.progs[type]);
109 p = cgroup_parent(p);
112 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL);
116 /* populate the array with effective progs */
120 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI))
121 list_for_each_entry(pl,
122 &p->bpf.progs[type], node) {
125 progs->progs[cnt++] = pl->prog;
127 p = cgroup_parent(p);
130 rcu_assign_pointer(*array, progs);
134 static void activate_effective_progs(struct cgroup *cgrp,
135 enum bpf_attach_type type,
136 struct bpf_prog_array __rcu *array)
138 struct bpf_prog_array __rcu *old_array;
140 old_array = xchg(&cgrp->bpf.effective[type], array);
141 /* free prog array after grace period, since __cgroup_bpf_run_*()
142 * might be still walking the array
144 bpf_prog_array_free(old_array);
148 * cgroup_bpf_inherit() - inherit effective programs from parent
149 * @cgrp: the cgroup to modify
151 int cgroup_bpf_inherit(struct cgroup *cgrp)
153 /* has to use marco instead of const int, since compiler thinks
154 * that array below is variable length
156 #define NR ARRAY_SIZE(cgrp->bpf.effective)
157 struct bpf_prog_array __rcu *arrays[NR] = {};
160 for (i = 0; i < NR; i++)
161 INIT_LIST_HEAD(&cgrp->bpf.progs[i]);
163 for (i = 0; i < NR; i++)
164 if (compute_effective_progs(cgrp, i, &arrays[i]))
167 for (i = 0; i < NR; i++)
168 activate_effective_progs(cgrp, i, arrays[i]);
172 for (i = 0; i < NR; i++)
173 bpf_prog_array_free(arrays[i]);
177 #define BPF_CGROUP_MAX_PROGS 64
180 * __cgroup_bpf_attach() - Attach the program to a cgroup, and
181 * propagate the change to descendants
182 * @cgrp: The cgroup which descendants to traverse
183 * @prog: A program to attach
184 * @type: Type of attach operation
186 * Must be called with cgroup_mutex held.
188 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
189 enum bpf_attach_type type, u32 flags)
191 struct list_head *progs = &cgrp->bpf.progs[type];
192 struct bpf_prog *old_prog = NULL;
193 struct bpf_cgroup_storage *storage, *old_storage = NULL;
194 struct cgroup_subsys_state *css;
195 struct bpf_prog_list *pl;
196 bool pl_was_allocated;
199 if ((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI))
200 /* invalid combination */
203 if (!hierarchy_allows_attach(cgrp, type, flags))
206 if (!list_empty(progs) && cgrp->bpf.flags[type] != flags)
207 /* Disallow attaching non-overridable on top
208 * of existing overridable in this cgroup.
209 * Disallow attaching multi-prog if overridable or none
213 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
216 storage = bpf_cgroup_storage_alloc(prog);
220 if (flags & BPF_F_ALLOW_MULTI) {
221 list_for_each_entry(pl, progs, node) {
222 if (pl->prog == prog) {
223 /* disallow attaching the same prog twice */
224 bpf_cgroup_storage_free(storage);
229 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
231 bpf_cgroup_storage_free(storage);
235 pl_was_allocated = true;
237 pl->storage = storage;
238 list_add_tail(&pl->node, progs);
240 if (list_empty(progs)) {
241 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
243 bpf_cgroup_storage_free(storage);
246 pl_was_allocated = true;
247 list_add_tail(&pl->node, progs);
249 pl = list_first_entry(progs, typeof(*pl), node);
251 old_storage = pl->storage;
252 bpf_cgroup_storage_unlink(old_storage);
253 pl_was_allocated = false;
256 pl->storage = storage;
259 cgrp->bpf.flags[type] = flags;
261 /* allocate and recompute effective prog arrays */
262 css_for_each_descendant_pre(css, &cgrp->self) {
263 struct cgroup *desc = container_of(css, struct cgroup, self);
265 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
270 /* all allocations were successful. Activate all prog arrays */
271 css_for_each_descendant_pre(css, &cgrp->self) {
272 struct cgroup *desc = container_of(css, struct cgroup, self);
274 activate_effective_progs(desc, type, desc->bpf.inactive);
275 desc->bpf.inactive = NULL;
278 static_branch_inc(&cgroup_bpf_enabled_key);
280 bpf_cgroup_storage_free(old_storage);
282 bpf_prog_put(old_prog);
283 static_branch_dec(&cgroup_bpf_enabled_key);
285 bpf_cgroup_storage_link(storage, cgrp, type);
289 /* oom while computing effective. Free all computed effective arrays
290 * since they were not activated
292 css_for_each_descendant_pre(css, &cgrp->self) {
293 struct cgroup *desc = container_of(css, struct cgroup, self);
295 bpf_prog_array_free(desc->bpf.inactive);
296 desc->bpf.inactive = NULL;
299 /* and cleanup the prog list */
301 bpf_cgroup_storage_free(pl->storage);
302 pl->storage = old_storage;
303 bpf_cgroup_storage_link(old_storage, cgrp, type);
304 if (pl_was_allocated) {
312 * __cgroup_bpf_detach() - Detach the program from a cgroup, and
313 * propagate the change to descendants
314 * @cgrp: The cgroup which descendants to traverse
315 * @prog: A program to detach or NULL
316 * @type: Type of detach operation
318 * Must be called with cgroup_mutex held.
320 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
321 enum bpf_attach_type type, u32 unused_flags)
323 struct list_head *progs = &cgrp->bpf.progs[type];
324 u32 flags = cgrp->bpf.flags[type];
325 struct bpf_prog *old_prog = NULL;
326 struct cgroup_subsys_state *css;
327 struct bpf_prog_list *pl;
330 if (flags & BPF_F_ALLOW_MULTI) {
332 /* to detach MULTI prog the user has to specify valid FD
333 * of the program to be detached
337 if (list_empty(progs))
338 /* report error when trying to detach and nothing is attached */
342 if (flags & BPF_F_ALLOW_MULTI) {
343 /* find the prog and detach it */
344 list_for_each_entry(pl, progs, node) {
345 if (pl->prog != prog)
348 /* mark it deleted, so it's ignored while
349 * recomputing effective
357 /* to maintain backward compatibility NONE and OVERRIDE cgroups
358 * allow detaching with invalid FD (prog==NULL)
360 pl = list_first_entry(progs, typeof(*pl), node);
365 /* allocate and recompute effective prog arrays */
366 css_for_each_descendant_pre(css, &cgrp->self) {
367 struct cgroup *desc = container_of(css, struct cgroup, self);
369 err = compute_effective_progs(desc, type, &desc->bpf.inactive);
374 /* all allocations were successful. Activate all prog arrays */
375 css_for_each_descendant_pre(css, &cgrp->self) {
376 struct cgroup *desc = container_of(css, struct cgroup, self);
378 activate_effective_progs(desc, type, desc->bpf.inactive);
379 desc->bpf.inactive = NULL;
382 /* now can actually delete it from this cgroup list */
384 bpf_cgroup_storage_unlink(pl->storage);
385 bpf_cgroup_storage_free(pl->storage);
387 if (list_empty(progs))
388 /* last program was detached, reset flags to zero */
389 cgrp->bpf.flags[type] = 0;
391 bpf_prog_put(old_prog);
392 static_branch_dec(&cgroup_bpf_enabled_key);
396 /* oom while computing effective. Free all computed effective arrays
397 * since they were not activated
399 css_for_each_descendant_pre(css, &cgrp->self) {
400 struct cgroup *desc = container_of(css, struct cgroup, self);
402 bpf_prog_array_free(desc->bpf.inactive);
403 desc->bpf.inactive = NULL;
406 /* and restore back old_prog */
411 /* Must be called with cgroup_mutex held to avoid races. */
412 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
413 union bpf_attr __user *uattr)
415 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids);
416 enum bpf_attach_type type = attr->query.attach_type;
417 struct list_head *progs = &cgrp->bpf.progs[type];
418 u32 flags = cgrp->bpf.flags[type];
421 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE)
422 cnt = bpf_prog_array_length(cgrp->bpf.effective[type]);
424 cnt = prog_list_length(progs);
426 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags)))
428 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt)))
430 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt)
431 /* return early if user requested only program count + flags */
433 if (attr->query.prog_cnt < cnt) {
434 cnt = attr->query.prog_cnt;
438 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) {
439 return bpf_prog_array_copy_to_user(cgrp->bpf.effective[type],
442 struct bpf_prog_list *pl;
446 list_for_each_entry(pl, progs, node) {
447 id = pl->prog->aux->id;
448 if (copy_to_user(prog_ids + i, &id, sizeof(id)))
457 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
458 enum bpf_prog_type ptype, struct bpf_prog *prog)
463 cgrp = cgroup_get_from_fd(attr->target_fd);
465 return PTR_ERR(cgrp);
467 ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
473 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
475 struct bpf_prog *prog;
479 cgrp = cgroup_get_from_fd(attr->target_fd);
481 return PTR_ERR(cgrp);
483 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
487 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
495 int cgroup_bpf_prog_query(const union bpf_attr *attr,
496 union bpf_attr __user *uattr)
501 cgrp = cgroup_get_from_fd(attr->query.target_fd);
503 return PTR_ERR(cgrp);
505 ret = cgroup_bpf_query(cgrp, attr, uattr);
512 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
513 * @sk: The socket sending or receiving traffic
514 * @skb: The skb that is being sent or received
515 * @type: The type of program to be exectuted
517 * If no socket is passed, or the socket is not of type INET or INET6,
518 * this function does nothing and returns 0.
520 * The program type passed in via @type must be suitable for network
521 * filtering. No further check is performed to assert that.
523 * This function will return %-EPERM if any if an attached program was found
524 * and if it returned != 1 during execution. In all other cases, 0 is returned.
526 int __cgroup_bpf_run_filter_skb(struct sock *sk,
528 enum bpf_attach_type type)
530 unsigned int offset = skb->data - skb_network_header(skb);
531 struct sock *save_sk;
535 if (!sk || !sk_fullsock(sk))
538 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
541 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
544 __skb_push(skb, offset);
545 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
546 bpf_prog_run_save_cb);
547 __skb_pull(skb, offset);
549 return ret == 1 ? 0 : -EPERM;
551 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);
554 * __cgroup_bpf_run_filter_sk() - Run a program on a sock
555 * @sk: sock structure to manipulate
556 * @type: The type of program to be exectuted
558 * socket is passed is expected to be of type INET or INET6.
560 * The program type passed in via @type must be suitable for sock
561 * filtering. No further check is performed to assert that.
563 * This function will return %-EPERM if any if an attached program was found
564 * and if it returned != 1 during execution. In all other cases, 0 is returned.
566 int __cgroup_bpf_run_filter_sk(struct sock *sk,
567 enum bpf_attach_type type)
569 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
572 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN);
573 return ret == 1 ? 0 : -EPERM;
575 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk);
578 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and
579 * provided by user sockaddr
580 * @sk: sock struct that will use sockaddr
581 * @uaddr: sockaddr struct provided by user
582 * @type: The type of program to be exectuted
583 * @t_ctx: Pointer to attach type specific context
585 * socket is expected to be of type INET or INET6.
587 * This function will return %-EPERM if an attached program is found and
588 * returned value != 1 during execution. In all other cases, 0 is returned.
590 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
591 struct sockaddr *uaddr,
592 enum bpf_attach_type type,
595 struct bpf_sock_addr_kern ctx = {
600 struct sockaddr_storage unspec;
604 /* Check socket family since not all sockets represent network
605 * endpoint (e.g. AF_UNIX).
607 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)
611 memset(&unspec, 0, sizeof(unspec));
612 ctx.uaddr = (struct sockaddr *)&unspec;
615 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
616 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN);
618 return ret == 1 ? 0 : -EPERM;
620 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr);
623 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock
624 * @sk: socket to get cgroup from
625 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains
626 * sk with connection information (IP addresses, etc.) May not contain
627 * cgroup info if it is a req sock.
628 * @type: The type of program to be exectuted
630 * socket passed is expected to be of type INET or INET6.
632 * The program type passed in via @type must be suitable for sock_ops
633 * filtering. No further check is performed to assert that.
635 * This function will return %-EPERM if any if an attached program was found
636 * and if it returned != 1 during execution. In all other cases, 0 is returned.
638 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
639 struct bpf_sock_ops_kern *sock_ops,
640 enum bpf_attach_type type)
642 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
645 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops,
647 return ret == 1 ? 0 : -EPERM;
649 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops);
651 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
652 short access, enum bpf_attach_type type)
655 struct bpf_cgroup_dev_ctx ctx = {
656 .access_type = (access << 16) | dev_type,
663 cgrp = task_dfl_cgroup(current);
664 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx,
670 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
672 static const struct bpf_func_proto *
673 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
676 case BPF_FUNC_map_lookup_elem:
677 return &bpf_map_lookup_elem_proto;
678 case BPF_FUNC_map_update_elem:
679 return &bpf_map_update_elem_proto;
680 case BPF_FUNC_map_delete_elem:
681 return &bpf_map_delete_elem_proto;
682 case BPF_FUNC_get_current_uid_gid:
683 return &bpf_get_current_uid_gid_proto;
684 case BPF_FUNC_trace_printk:
685 if (capable(CAP_SYS_ADMIN))
686 return bpf_get_trace_printk_proto();
692 static bool cgroup_dev_is_valid_access(int off, int size,
693 enum bpf_access_type type,
694 const struct bpf_prog *prog,
695 struct bpf_insn_access_aux *info)
697 const int size_default = sizeof(__u32);
699 if (type == BPF_WRITE)
702 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx))
704 /* The verifier guarantees that size > 0. */
709 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
710 bpf_ctx_record_field_size(info, size_default);
711 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
715 if (size != size_default)
722 const struct bpf_prog_ops cg_dev_prog_ops = {
725 const struct bpf_verifier_ops cg_dev_verifier_ops = {
726 .get_func_proto = cgroup_dev_func_proto,
727 .is_valid_access = cgroup_dev_is_valid_access,