1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
12 #include <uapi/linux/bpf.h>
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
23 struct ctl_table_header;
26 #ifdef CONFIG_CGROUP_BPF
28 #define CGROUP_ATYPE(type) \
29 case BPF_##type: return type
31 static inline enum cgroup_bpf_attach_type
32 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
34 switch (attach_type) {
35 CGROUP_ATYPE(CGROUP_INET_INGRESS);
36 CGROUP_ATYPE(CGROUP_INET_EGRESS);
37 CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
38 CGROUP_ATYPE(CGROUP_SOCK_OPS);
39 CGROUP_ATYPE(CGROUP_DEVICE);
40 CGROUP_ATYPE(CGROUP_INET4_BIND);
41 CGROUP_ATYPE(CGROUP_INET6_BIND);
42 CGROUP_ATYPE(CGROUP_INET4_CONNECT);
43 CGROUP_ATYPE(CGROUP_INET6_CONNECT);
44 CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
45 CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
46 CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
47 CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
48 CGROUP_ATYPE(CGROUP_SYSCTL);
49 CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
50 CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
51 CGROUP_ATYPE(CGROUP_GETSOCKOPT);
52 CGROUP_ATYPE(CGROUP_SETSOCKOPT);
53 CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
54 CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
55 CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
56 CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
57 CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
59 return CGROUP_BPF_ATTACH_TYPE_INVALID;
65 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
66 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
68 #define for_each_cgroup_storage_type(stype) \
69 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
71 struct bpf_cgroup_storage_map;
73 struct bpf_storage_buffer {
78 struct bpf_cgroup_storage {
80 struct bpf_storage_buffer *buf;
81 void __percpu *percpu_buf;
83 struct bpf_cgroup_storage_map *map;
84 struct bpf_cgroup_storage_key key;
85 struct list_head list_map;
86 struct list_head list_cg;
91 struct bpf_cgroup_link {
93 struct cgroup *cgroup;
94 enum bpf_attach_type type;
97 struct bpf_prog_list {
98 struct list_head node;
99 struct bpf_prog *prog;
100 struct bpf_cgroup_link *link;
101 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
104 int cgroup_bpf_inherit(struct cgroup *cgrp);
105 void cgroup_bpf_offline(struct cgroup *cgrp);
107 int __cgroup_bpf_run_filter_skb(struct sock *sk,
109 enum cgroup_bpf_attach_type atype);
111 int __cgroup_bpf_run_filter_sk(struct sock *sk,
112 enum cgroup_bpf_attach_type atype);
114 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
115 struct sockaddr *uaddr,
116 enum cgroup_bpf_attach_type atype,
120 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
121 struct bpf_sock_ops_kern *sock_ops,
122 enum cgroup_bpf_attach_type atype);
124 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
125 short access, enum cgroup_bpf_attach_type atype);
127 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
128 struct ctl_table *table, int write,
129 char **buf, size_t *pcount, loff_t *ppos,
130 enum cgroup_bpf_attach_type atype);
132 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
133 int *optname, char __user *optval,
134 int *optlen, char **kernel_optval);
135 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
136 int optname, char __user *optval,
137 int __user *optlen, int max_optlen,
140 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
141 int optname, void *optval,
142 int *optlen, int retval);
144 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
147 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
148 return BPF_CGROUP_STORAGE_PERCPU;
150 return BPF_CGROUP_STORAGE_SHARED;
153 struct bpf_cgroup_storage *
154 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
155 void *key, bool locked);
156 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
157 enum bpf_cgroup_storage_type stype);
158 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
159 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
160 struct cgroup *cgroup,
161 enum bpf_attach_type type);
162 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
163 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
165 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
166 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
167 void *value, u64 flags);
169 /* Opportunistic check to see whether we have any BPF program attached*/
170 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
171 enum cgroup_bpf_attach_type type)
173 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
174 struct bpf_prog_array *array;
176 array = rcu_access_pointer(cgrp->bpf.effective[type]);
177 return array != &bpf_empty_prog_array.hdr;
180 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
181 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
184 if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
185 cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
186 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
187 CGROUP_INET_INGRESS); \
192 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
195 if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \
196 typeof(sk) __sk = sk_to_full_sk(sk); \
197 if (sk_fullsock(__sk) && \
198 cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
199 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
200 CGROUP_INET_EGRESS); \
205 #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
208 if (cgroup_bpf_enabled(atype)) { \
209 __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
214 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
215 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
217 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
218 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
220 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
221 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
223 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
224 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
226 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
229 if (cgroup_bpf_enabled(atype)) \
230 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
235 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
238 if (cgroup_bpf_enabled(atype)) { \
240 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
247 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
248 * via upper bits of return code. The only flag that is supported
249 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
250 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
252 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
256 if (cgroup_bpf_enabled(atype)) { \
258 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
261 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
262 *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
267 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
268 ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
269 cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
270 (sk)->sk_prot->pre_connect)
272 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
273 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
275 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
276 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
278 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
279 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
281 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
282 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
284 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
285 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
287 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
288 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
290 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
291 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
293 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
294 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
296 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
297 * fullsock and its parent fullsock cannot be traced by
300 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
301 * Its listener-sk is not attached to the rsk_listener.
302 * In this case, the caller holds the listener-sk (unlocked),
303 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
304 * the listener-sk such that the cgroup-bpf-progs of the
305 * listener-sk will be run.
307 * Regardless of syncookie mode or not,
308 * calling bpf_setsockopt on listener-sk will not make sense anyway,
309 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
311 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
314 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
315 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
321 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
324 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
325 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
326 if (__sk && sk_fullsock(__sk)) \
327 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
334 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
337 if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
338 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
346 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
349 if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
350 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
356 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
360 if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
361 cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
362 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
369 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
372 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
373 get_user(__ret, optlen); \
377 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
378 max_optlen, retval) \
380 int __ret = retval; \
381 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
382 cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
383 if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
384 !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
385 tcp_bpf_bypass_getsockopt, \
387 __ret = __cgroup_bpf_run_filter_getsockopt( \
388 sock, level, optname, optval, optlen, \
389 max_optlen, retval); \
393 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
396 int __ret = retval; \
397 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
398 __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
399 sock, level, optname, optval, optlen, retval); \
403 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
404 enum bpf_prog_type ptype, struct bpf_prog *prog);
405 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
406 enum bpf_prog_type ptype);
407 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
408 int cgroup_bpf_prog_query(const union bpf_attr *attr,
409 union bpf_attr __user *uattr);
412 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
413 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
415 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
416 enum bpf_prog_type ptype,
417 struct bpf_prog *prog)
422 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
423 enum bpf_prog_type ptype)
428 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
429 struct bpf_prog *prog)
434 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
435 union bpf_attr __user *uattr)
440 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
441 struct bpf_map *map) { return 0; }
442 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
443 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
444 static inline void bpf_cgroup_storage_free(
445 struct bpf_cgroup_storage *storage) {}
446 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
450 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
451 void *key, void *value, u64 flags) {
455 #define cgroup_bpf_enabled(atype) (0)
456 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
457 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
458 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
459 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
460 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
461 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
462 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
463 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
464 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
465 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
466 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
467 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
468 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
469 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
470 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
471 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
472 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
473 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
474 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
475 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
476 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
477 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
478 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
479 optlen, max_optlen, retval) ({ retval; })
480 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
481 optlen, retval) ({ retval; })
482 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
483 kernel_optval) ({ 0; })
485 #define for_each_cgroup_storage_type(stype) for (; false; )
487 #endif /* CONFIG_CGROUP_BPF */
489 #endif /* _BPF_CGROUP_H */