1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/errno.h>
7 #include <linux/jump_label.h>
8 #include <linux/percpu.h>
9 #include <linux/percpu-refcount.h>
10 #include <linux/rbtree.h>
11 #include <uapi/linux/bpf.h>
19 struct bpf_sock_ops_kern;
20 struct bpf_cgroup_storage;
22 struct ctl_table_header;
24 #ifdef CONFIG_CGROUP_BPF
26 extern struct static_key_false cgroup_bpf_enabled_key;
27 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
29 DECLARE_PER_CPU(struct bpf_cgroup_storage*,
30 bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
32 #define for_each_cgroup_storage_type(stype) \
33 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
35 struct bpf_cgroup_storage_map;
37 struct bpf_storage_buffer {
42 struct bpf_cgroup_storage {
44 struct bpf_storage_buffer *buf;
45 void __percpu *percpu_buf;
47 struct bpf_cgroup_storage_map *map;
48 struct bpf_cgroup_storage_key key;
49 struct list_head list;
54 struct bpf_prog_list {
55 struct list_head node;
56 struct bpf_prog *prog;
57 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
60 struct bpf_prog_array;
63 /* array of effective progs in this cgroup */
64 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
66 /* attached progs to this cgroup and attach flags
67 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
68 * have either zero or one element
69 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
71 struct list_head progs[MAX_BPF_ATTACH_TYPE];
72 u32 flags[MAX_BPF_ATTACH_TYPE];
74 /* temp storage for effective prog array used by prog_attach/detach */
75 struct bpf_prog_array *inactive;
77 /* reference counter used to detach bpf programs after cgroup removal */
78 struct percpu_ref refcnt;
80 /* cgroup_bpf is released using a work queue */
81 struct work_struct release_work;
84 int cgroup_bpf_inherit(struct cgroup *cgrp);
85 void cgroup_bpf_offline(struct cgroup *cgrp);
87 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
88 enum bpf_attach_type type, u32 flags);
89 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
90 enum bpf_attach_type type);
91 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
92 union bpf_attr __user *uattr);
94 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
95 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
96 enum bpf_attach_type type, u32 flags);
97 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
98 enum bpf_attach_type type, u32 flags);
99 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
100 union bpf_attr __user *uattr);
102 int __cgroup_bpf_run_filter_skb(struct sock *sk,
104 enum bpf_attach_type type);
106 int __cgroup_bpf_run_filter_sk(struct sock *sk,
107 enum bpf_attach_type type);
109 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
110 struct sockaddr *uaddr,
111 enum bpf_attach_type type,
114 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
115 struct bpf_sock_ops_kern *sock_ops,
116 enum bpf_attach_type type);
118 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
119 short access, enum bpf_attach_type type);
121 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
122 struct ctl_table *table, int write,
123 void __user *buf, size_t *pcount,
124 loff_t *ppos, void **new_buf,
125 enum bpf_attach_type type);
127 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
128 int *optname, char __user *optval,
129 int *optlen, char **kernel_optval);
130 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
131 int optname, char __user *optval,
132 int __user *optlen, int max_optlen,
135 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
138 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
139 return BPF_CGROUP_STORAGE_PERCPU;
141 return BPF_CGROUP_STORAGE_SHARED;
144 static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
145 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
147 enum bpf_cgroup_storage_type stype;
149 for_each_cgroup_storage_type(stype)
150 this_cpu_write(bpf_cgroup_storage[stype], storage[stype]);
153 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
154 enum bpf_cgroup_storage_type stype);
155 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
156 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
157 struct cgroup *cgroup,
158 enum bpf_attach_type type);
159 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
160 int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
161 void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
163 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
164 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
165 void *value, u64 flags);
167 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
168 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
171 if (cgroup_bpf_enabled) \
172 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
173 BPF_CGROUP_INET_INGRESS); \
178 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
181 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
182 typeof(sk) __sk = sk_to_full_sk(sk); \
183 if (sk_fullsock(__sk)) \
184 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
185 BPF_CGROUP_INET_EGRESS); \
190 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
193 if (cgroup_bpf_enabled) { \
194 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
199 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
200 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
202 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
203 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
205 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
206 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
208 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
211 if (cgroup_bpf_enabled) \
212 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
217 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type, t_ctx) \
220 if (cgroup_bpf_enabled) { \
222 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type, \
229 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
230 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
232 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
233 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
235 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
236 sk->sk_prot->pre_connect)
238 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
239 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
241 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
242 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
244 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
245 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT, NULL)
247 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
248 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT, NULL)
250 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
251 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_SENDMSG, t_ctx)
253 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
254 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_SENDMSG, t_ctx)
256 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
257 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP4_RECVMSG, NULL)
259 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
260 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_UDP6_RECVMSG, NULL)
262 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
265 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
266 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
267 if (__sk && sk_fullsock(__sk)) \
268 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
270 BPF_CGROUP_SOCK_OPS); \
275 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
278 if (cgroup_bpf_enabled) \
279 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
281 BPF_CGROUP_DEVICE); \
287 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
290 if (cgroup_bpf_enabled) \
291 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
292 buf, count, pos, nbuf, \
293 BPF_CGROUP_SYSCTL); \
297 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
301 if (cgroup_bpf_enabled) \
302 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
309 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \
312 if (cgroup_bpf_enabled) \
313 get_user(__ret, optlen); \
317 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
318 max_optlen, retval) \
320 int __ret = retval; \
321 if (cgroup_bpf_enabled) \
322 __ret = __cgroup_bpf_run_filter_getsockopt(sock, level, \
324 optlen, max_optlen, \
329 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
330 enum bpf_prog_type ptype, struct bpf_prog *prog);
331 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
332 enum bpf_prog_type ptype);
333 int cgroup_bpf_prog_query(const union bpf_attr *attr,
334 union bpf_attr __user *uattr);
338 struct cgroup_bpf {};
339 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
340 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
342 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
343 enum bpf_prog_type ptype,
344 struct bpf_prog *prog)
349 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
350 enum bpf_prog_type ptype)
355 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
356 union bpf_attr __user *uattr)
361 static inline void bpf_cgroup_storage_set(
362 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
363 static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
364 struct bpf_map *map) { return 0; }
365 static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
366 struct bpf_map *map) {}
367 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
368 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
369 static inline void bpf_cgroup_storage_free(
370 struct bpf_cgroup_storage *storage) {}
371 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
375 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
376 void *key, void *value, u64 flags) {
380 #define cgroup_bpf_enabled (0)
381 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
382 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
383 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
384 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
385 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
386 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
387 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
388 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
389 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
390 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
391 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
392 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
393 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
394 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
395 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
396 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
397 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
398 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
399 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
400 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
401 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
402 optlen, max_optlen, retval) ({ retval; })
403 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
404 kernel_optval) ({ 0; })
406 #define for_each_cgroup_storage_type(stype) for (; false; )
408 #endif /* CONFIG_CGROUP_BPF */
410 #endif /* _BPF_CGROUP_H */