1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/jump_label.h>
6 #include <uapi/linux/bpf.h>
12 struct bpf_sock_ops_kern;
14 #ifdef CONFIG_CGROUP_BPF
16 extern struct static_key_false cgroup_bpf_enabled_key;
17 #define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
19 struct bpf_prog_list {
20 struct list_head node;
21 struct bpf_prog *prog;
24 struct bpf_prog_array;
27 /* array of effective progs in this cgroup */
28 struct bpf_prog_array __rcu *effective[MAX_BPF_ATTACH_TYPE];
30 /* attached progs to this cgroup and attach flags
31 * when flags == 0 or BPF_F_ALLOW_OVERRIDE the progs list will
32 * have either zero or one element
33 * when BPF_F_ALLOW_MULTI the list can have up to BPF_CGROUP_MAX_PROGS
35 struct list_head progs[MAX_BPF_ATTACH_TYPE];
36 u32 flags[MAX_BPF_ATTACH_TYPE];
38 /* temp storage for effective prog array used by prog_attach/detach */
39 struct bpf_prog_array __rcu *inactive;
42 void cgroup_bpf_put(struct cgroup *cgrp);
43 int cgroup_bpf_inherit(struct cgroup *cgrp);
45 int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
46 enum bpf_attach_type type, u32 flags);
47 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
48 enum bpf_attach_type type, u32 flags);
49 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
50 union bpf_attr __user *uattr);
52 /* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
53 int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
54 enum bpf_attach_type type, u32 flags);
55 int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
56 enum bpf_attach_type type, u32 flags);
57 int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
58 union bpf_attr __user *uattr);
60 int __cgroup_bpf_run_filter_skb(struct sock *sk,
62 enum bpf_attach_type type);
64 int __cgroup_bpf_run_filter_sk(struct sock *sk,
65 enum bpf_attach_type type);
67 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
68 struct sockaddr *uaddr,
69 enum bpf_attach_type type);
71 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
72 struct bpf_sock_ops_kern *sock_ops,
73 enum bpf_attach_type type);
75 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
76 short access, enum bpf_attach_type type);
78 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
79 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
82 if (cgroup_bpf_enabled) \
83 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
84 BPF_CGROUP_INET_INGRESS); \
89 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
92 if (cgroup_bpf_enabled && sk && sk == skb->sk) { \
93 typeof(sk) __sk = sk_to_full_sk(sk); \
94 if (sk_fullsock(__sk)) \
95 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
96 BPF_CGROUP_INET_EGRESS); \
101 #define BPF_CGROUP_RUN_SK_PROG(sk, type) \
104 if (cgroup_bpf_enabled) { \
105 __ret = __cgroup_bpf_run_filter_sk(sk, type); \
110 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
111 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET_SOCK_CREATE)
113 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
114 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET4_POST_BIND)
116 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
117 BPF_CGROUP_RUN_SK_PROG(sk, BPF_CGROUP_INET6_POST_BIND)
119 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, type) \
122 if (cgroup_bpf_enabled) \
123 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
127 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, type) \
130 if (cgroup_bpf_enabled) { \
132 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, type); \
138 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) \
139 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_BIND)
141 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) \
142 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_BIND)
144 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (cgroup_bpf_enabled && \
145 sk->sk_prot->pre_connect)
147 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
148 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
150 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
151 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
153 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
154 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET4_CONNECT)
156 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
157 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, BPF_CGROUP_INET6_CONNECT)
159 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
162 if (cgroup_bpf_enabled && (sock_ops)->sk) { \
163 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
164 if (__sk && sk_fullsock(__sk)) \
165 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
167 BPF_CGROUP_SOCK_OPS); \
172 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access) \
175 if (cgroup_bpf_enabled) \
176 __ret = __cgroup_bpf_check_dev_permission(type, major, minor, \
178 BPF_CGROUP_DEVICE); \
184 struct cgroup_bpf {};
185 static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
186 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
188 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
189 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
190 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
191 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
192 #define BPF_CGROUP_RUN_PROG_INET4_BIND(sk, uaddr) ({ 0; })
193 #define BPF_CGROUP_RUN_PROG_INET6_BIND(sk, uaddr) ({ 0; })
194 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
195 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
196 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
197 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
198 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
199 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
200 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
201 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
203 #endif /* CONFIG_CGROUP_BPF */
205 #endif /* _BPF_CGROUP_H */