| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _BPF_CGROUP_H |
| 3 | #define _BPF_CGROUP_H |
| 4 | |
| 5 | #include <linux/bpf.h> |
| 6 | #include <linux/bpf-cgroup-defs.h> |
| 7 | #include <linux/errno.h> |
| 8 | #include <linux/jump_label.h> |
| 9 | #include <linux/percpu.h> |
| 10 | #include <linux/rbtree.h> |
| 11 | #include <net/sock.h> |
| 12 | #include <uapi/linux/bpf.h> |
| 13 | |
| 14 | struct sock; |
| 15 | struct sockaddr; |
| 16 | struct cgroup; |
| 17 | struct sk_buff; |
| 18 | struct bpf_map; |
| 19 | struct bpf_prog; |
| 20 | struct bpf_sock_ops_kern; |
| 21 | struct bpf_cgroup_storage; |
| 22 | struct ctl_table; |
| 23 | struct ctl_table_header; |
| 24 | struct task_struct; |
| 25 | |
| 26 | unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx, |
| 27 | const struct bpf_insn *insn); |
| 28 | unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx, |
| 29 | const struct bpf_insn *insn); |
| 30 | unsigned int __cgroup_bpf_run_lsm_current(const void *ctx, |
| 31 | const struct bpf_insn *insn); |
| 32 | |
| 33 | #ifdef CONFIG_CGROUP_BPF |
| 34 | |
| 35 | #define CGROUP_ATYPE(type) \ |
| 36 | case BPF_##type: return type |
| 37 | |
| 38 | static inline enum cgroup_bpf_attach_type |
| 39 | to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) |
| 40 | { |
| 41 | switch (attach_type) { |
| 42 | CGROUP_ATYPE(CGROUP_INET_INGRESS); |
| 43 | CGROUP_ATYPE(CGROUP_INET_EGRESS); |
| 44 | CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE); |
| 45 | CGROUP_ATYPE(CGROUP_SOCK_OPS); |
| 46 | CGROUP_ATYPE(CGROUP_DEVICE); |
| 47 | CGROUP_ATYPE(CGROUP_INET4_BIND); |
| 48 | CGROUP_ATYPE(CGROUP_INET6_BIND); |
| 49 | CGROUP_ATYPE(CGROUP_INET4_CONNECT); |
| 50 | CGROUP_ATYPE(CGROUP_INET6_CONNECT); |
| 51 | CGROUP_ATYPE(CGROUP_UNIX_CONNECT); |
| 52 | CGROUP_ATYPE(CGROUP_INET4_POST_BIND); |
| 53 | CGROUP_ATYPE(CGROUP_INET6_POST_BIND); |
| 54 | CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); |
| 55 | CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); |
| 56 | CGROUP_ATYPE(CGROUP_UNIX_SENDMSG); |
| 57 | CGROUP_ATYPE(CGROUP_SYSCTL); |
| 58 | CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); |
| 59 | CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); |
| 60 | CGROUP_ATYPE(CGROUP_UNIX_RECVMSG); |
| 61 | CGROUP_ATYPE(CGROUP_GETSOCKOPT); |
| 62 | CGROUP_ATYPE(CGROUP_SETSOCKOPT); |
| 63 | CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); |
| 64 | CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); |
| 65 | CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME); |
| 66 | CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); |
| 67 | CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); |
| 68 | CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME); |
| 69 | CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); |
| 70 | default: |
| 71 | return CGROUP_BPF_ATTACH_TYPE_INVALID; |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | #undef CGROUP_ATYPE |
| 76 | |
| 77 | extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; |
| 78 | #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) |
| 79 | |
| 80 | #define for_each_cgroup_storage_type(stype) \ |
| 81 | for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) |
| 82 | |
| 83 | struct bpf_cgroup_storage_map; |
| 84 | |
| 85 | struct bpf_storage_buffer { |
| 86 | struct rcu_head rcu; |
| 87 | char data[]; |
| 88 | }; |
| 89 | |
| 90 | struct bpf_cgroup_storage { |
| 91 | union { |
| 92 | struct bpf_storage_buffer *buf; |
| 93 | void __percpu *percpu_buf; |
| 94 | }; |
| 95 | struct bpf_cgroup_storage_map *map; |
| 96 | struct bpf_cgroup_storage_key key; |
| 97 | struct list_head list_map; |
| 98 | struct list_head list_cg; |
| 99 | struct rb_node node; |
| 100 | struct rcu_head rcu; |
| 101 | }; |
| 102 | |
| 103 | struct bpf_cgroup_link { |
| 104 | struct bpf_link link; |
| 105 | struct cgroup *cgroup; |
| 106 | enum bpf_attach_type type; |
| 107 | }; |
| 108 | |
| 109 | struct bpf_prog_list { |
| 110 | struct hlist_node node; |
| 111 | struct bpf_prog *prog; |
| 112 | struct bpf_cgroup_link *link; |
| 113 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
| 114 | u32 flags; |
| 115 | }; |
| 116 | |
| 117 | void __init cgroup_bpf_lifetime_notifier_init(void); |
| 118 | |
| 119 | int __cgroup_bpf_run_filter_skb(struct sock *sk, |
| 120 | struct sk_buff *skb, |
| 121 | enum cgroup_bpf_attach_type atype); |
| 122 | |
| 123 | int __cgroup_bpf_run_filter_sk(struct sock *sk, |
| 124 | enum cgroup_bpf_attach_type atype); |
| 125 | |
| 126 | int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, |
| 127 | struct sockaddr *uaddr, |
| 128 | int *uaddrlen, |
| 129 | enum cgroup_bpf_attach_type atype, |
| 130 | void *t_ctx, |
| 131 | u32 *flags); |
| 132 | |
| 133 | int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, |
| 134 | struct bpf_sock_ops_kern *sock_ops, |
| 135 | enum cgroup_bpf_attach_type atype); |
| 136 | |
| 137 | int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, |
| 138 | short access, enum cgroup_bpf_attach_type atype); |
| 139 | |
| 140 | int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, |
| 141 | const struct ctl_table *table, int write, |
| 142 | char **buf, size_t *pcount, loff_t *ppos, |
| 143 | enum cgroup_bpf_attach_type atype); |
| 144 | |
| 145 | int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, |
| 146 | int *optname, sockptr_t optval, |
| 147 | int *optlen, char **kernel_optval); |
| 148 | |
| 149 | int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, |
| 150 | int optname, sockptr_t optval, |
| 151 | sockptr_t optlen, int max_optlen, |
| 152 | int retval); |
| 153 | |
| 154 | int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, |
| 155 | int optname, void *optval, |
| 156 | int *optlen, int retval); |
| 157 | |
| 158 | static inline enum bpf_cgroup_storage_type cgroup_storage_type( |
| 159 | struct bpf_map *map) |
| 160 | { |
| 161 | if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) |
| 162 | return BPF_CGROUP_STORAGE_PERCPU; |
| 163 | |
| 164 | return BPF_CGROUP_STORAGE_SHARED; |
| 165 | } |
| 166 | |
| 167 | struct bpf_cgroup_storage * |
| 168 | cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, |
| 169 | void *key, bool locked); |
| 170 | struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, |
| 171 | enum bpf_cgroup_storage_type stype); |
| 172 | void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); |
| 173 | void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, |
| 174 | struct cgroup *cgroup, |
| 175 | enum bpf_attach_type type); |
| 176 | void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); |
| 177 | int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); |
| 178 | |
| 179 | int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); |
| 180 | int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, |
| 181 | void *value, u64 flags); |
| 182 | |
| 183 | /* Opportunistic check to see whether we have any BPF program attached*/ |
| 184 | static inline bool cgroup_bpf_sock_enabled(struct sock *sk, |
| 185 | enum cgroup_bpf_attach_type type) |
| 186 | { |
| 187 | struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); |
| 188 | struct bpf_prog_array *array; |
| 189 | |
| 190 | array = rcu_access_pointer(cgrp->bpf.effective[type]); |
| 191 | return array != &bpf_empty_prog_array.hdr; |
| 192 | } |
| 193 | |
| 194 | /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ |
| 195 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ |
| 196 | ({ \ |
| 197 | int __ret = 0; \ |
| 198 | if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \ |
| 199 | cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS) && sk && \ |
| 200 | sk_fullsock(sk)) \ |
| 201 | __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ |
| 202 | CGROUP_INET_INGRESS); \ |
| 203 | \ |
| 204 | __ret; \ |
| 205 | }) |
| 206 | |
| 207 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ |
| 208 | ({ \ |
| 209 | int __ret = 0; \ |
| 210 | if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \ |
| 211 | typeof(sk) __sk = sk_to_full_sk(sk); \ |
| 212 | if (__sk && __sk == skb_to_full_sk(skb) && \ |
| 213 | cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ |
| 214 | __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ |
| 215 | CGROUP_INET_EGRESS); \ |
| 216 | } \ |
| 217 | __ret; \ |
| 218 | }) |
| 219 | |
| 220 | #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \ |
| 221 | ({ \ |
| 222 | int __ret = 0; \ |
| 223 | if (cgroup_bpf_enabled(atype)) { \ |
| 224 | __ret = __cgroup_bpf_run_filter_sk(sk, atype); \ |
| 225 | } \ |
| 226 | __ret; \ |
| 227 | }) |
| 228 | |
| 229 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ |
| 230 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE) |
| 231 | |
| 232 | #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ |
| 233 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE) |
| 234 | |
| 235 | #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ |
| 236 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND) |
| 237 | |
| 238 | #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ |
| 239 | BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) |
| 240 | |
| 241 | #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \ |
| 242 | ({ \ |
| 243 | int __ret = 0; \ |
| 244 | if (cgroup_bpf_enabled(atype)) \ |
| 245 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \ |
| 246 | atype, NULL, NULL); \ |
| 247 | __ret; \ |
| 248 | }) |
| 249 | |
| 250 | #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \ |
| 251 | ({ \ |
| 252 | int __ret = 0; \ |
| 253 | if (cgroup_bpf_enabled(atype)) { \ |
| 254 | lock_sock(sk); \ |
| 255 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \ |
| 256 | atype, t_ctx, NULL); \ |
| 257 | release_sock(sk); \ |
| 258 | } \ |
| 259 | __ret; \ |
| 260 | }) |
| 261 | |
| 262 | /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags |
| 263 | * via upper bits of return code. The only flag that is supported |
| 264 | * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check |
| 265 | * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). |
| 266 | */ |
| 267 | #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \ |
| 268 | ({ \ |
| 269 | u32 __flags = 0; \ |
| 270 | int __ret = 0; \ |
| 271 | if (cgroup_bpf_enabled(atype)) { \ |
| 272 | lock_sock(sk); \ |
| 273 | __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \ |
| 274 | atype, NULL, &__flags); \ |
| 275 | release_sock(sk); \ |
| 276 | if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ |
| 277 | *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \ |
| 278 | } \ |
| 279 | __ret; \ |
| 280 | }) |
| 281 | |
| 282 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ |
| 283 | ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \ |
| 284 | cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ |
| 285 | (sk)->sk_prot->pre_connect) |
| 286 | |
| 287 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \ |
| 288 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT) |
| 289 | |
| 290 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \ |
| 291 | BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT) |
| 292 | |
| 293 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \ |
| 294 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL) |
| 295 | |
| 296 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \ |
| 297 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL) |
| 298 | |
| 299 | #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \ |
| 300 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL) |
| 301 | |
| 302 | #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ |
| 303 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx) |
| 304 | |
| 305 | #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ |
| 306 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx) |
| 307 | |
| 308 | #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \ |
| 309 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx) |
| 310 | |
| 311 | #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ |
| 312 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL) |
| 313 | |
| 314 | #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ |
| 315 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL) |
| 316 | |
| 317 | #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \ |
| 318 | BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL) |
| 319 | |
| 320 | /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a |
| 321 | * fullsock and its parent fullsock cannot be traced by |
| 322 | * sk_to_full_sk(). |
| 323 | * |
| 324 | * e.g. sock_ops->sk is a request_sock and it is under syncookie mode. |
| 325 | * Its listener-sk is not attached to the rsk_listener. |
| 326 | * In this case, the caller holds the listener-sk (unlocked), |
| 327 | * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with |
| 328 | * the listener-sk such that the cgroup-bpf-progs of the |
| 329 | * listener-sk will be run. |
| 330 | * |
| 331 | * Regardless of syncookie mode or not, |
| 332 | * calling bpf_setsockopt on listener-sk will not make sense anyway, |
| 333 | * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here. |
| 334 | */ |
| 335 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ |
| 336 | ({ \ |
| 337 | int __ret = 0; \ |
| 338 | if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \ |
| 339 | __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ |
| 340 | sock_ops, \ |
| 341 | CGROUP_SOCK_OPS); \ |
| 342 | __ret; \ |
| 343 | }) |
| 344 | |
| 345 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ |
| 346 | ({ \ |
| 347 | int __ret = 0; \ |
| 348 | if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ |
| 349 | typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ |
| 350 | if (__sk && sk_fullsock(__sk)) \ |
| 351 | __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ |
| 352 | sock_ops, \ |
| 353 | CGROUP_SOCK_OPS); \ |
| 354 | } \ |
| 355 | __ret; \ |
| 356 | }) |
| 357 | |
| 358 | #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \ |
| 359 | ({ \ |
| 360 | int __ret = 0; \ |
| 361 | if (cgroup_bpf_enabled(CGROUP_DEVICE)) \ |
| 362 | __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \ |
| 363 | access, \ |
| 364 | CGROUP_DEVICE); \ |
| 365 | \ |
| 366 | __ret; \ |
| 367 | }) |
| 368 | |
| 369 | |
| 370 | #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ |
| 371 | ({ \ |
| 372 | int __ret = 0; \ |
| 373 | if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \ |
| 374 | __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ |
| 375 | buf, count, pos, \ |
| 376 | CGROUP_SYSCTL); \ |
| 377 | __ret; \ |
| 378 | }) |
| 379 | |
| 380 | #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ |
| 381 | kernel_optval) \ |
| 382 | ({ \ |
| 383 | int __ret = 0; \ |
| 384 | if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \ |
| 385 | cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \ |
| 386 | __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ |
| 387 | optname, optval, \ |
| 388 | optlen, \ |
| 389 | kernel_optval); \ |
| 390 | __ret; \ |
| 391 | }) |
| 392 | |
| 393 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ |
| 394 | max_optlen, retval) \ |
| 395 | ({ \ |
| 396 | int __ret = retval; \ |
| 397 | if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \ |
| 398 | cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \ |
| 399 | if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ |
| 400 | !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ |
| 401 | tcp_bpf_bypass_getsockopt, \ |
| 402 | level, optname)) \ |
| 403 | __ret = __cgroup_bpf_run_filter_getsockopt( \ |
| 404 | sock, level, optname, optval, optlen, \ |
| 405 | max_optlen, retval); \ |
| 406 | __ret; \ |
| 407 | }) |
| 408 | |
| 409 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ |
| 410 | optlen, retval) \ |
| 411 | ({ \ |
| 412 | int __ret = retval; \ |
| 413 | if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ |
| 414 | __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ |
| 415 | sock, level, optname, optval, optlen, retval); \ |
| 416 | __ret; \ |
| 417 | }) |
| 418 | |
| 419 | int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
| 420 | enum bpf_prog_type ptype, struct bpf_prog *prog); |
| 421 | int cgroup_bpf_prog_detach(const union bpf_attr *attr, |
| 422 | enum bpf_prog_type ptype); |
| 423 | int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); |
| 424 | int cgroup_bpf_prog_query(const union bpf_attr *attr, |
| 425 | union bpf_attr __user *uattr); |
| 426 | |
| 427 | const struct bpf_func_proto * |
| 428 | cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); |
| 429 | #else |
| 430 | |
| 431 | static inline void cgroup_bpf_lifetime_notifier_init(void) |
| 432 | { |
| 433 | return; |
| 434 | } |
| 435 | |
| 436 | static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, |
| 437 | enum bpf_prog_type ptype, |
| 438 | struct bpf_prog *prog) |
| 439 | { |
| 440 | return -EINVAL; |
| 441 | } |
| 442 | |
| 443 | static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, |
| 444 | enum bpf_prog_type ptype) |
| 445 | { |
| 446 | return -EINVAL; |
| 447 | } |
| 448 | |
| 449 | static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, |
| 450 | struct bpf_prog *prog) |
| 451 | { |
| 452 | return -EINVAL; |
| 453 | } |
| 454 | |
| 455 | static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, |
| 456 | union bpf_attr __user *uattr) |
| 457 | { |
| 458 | return -EINVAL; |
| 459 | } |
| 460 | |
| 461 | static inline const struct bpf_func_proto * |
| 462 | cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
| 463 | { |
| 464 | return NULL; |
| 465 | } |
| 466 | |
| 467 | static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, |
| 468 | struct bpf_map *map) { return 0; } |
| 469 | static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( |
| 470 | struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } |
| 471 | static inline void bpf_cgroup_storage_free( |
| 472 | struct bpf_cgroup_storage *storage) {} |
| 473 | static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, |
| 474 | void *value) { |
| 475 | return 0; |
| 476 | } |
| 477 | static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, |
| 478 | void *key, void *value, u64 flags) { |
| 479 | return 0; |
| 480 | } |
| 481 | |
| 482 | #define cgroup_bpf_enabled(atype) (0) |
| 483 | #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; }) |
| 484 | #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; }) |
| 485 | #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) |
| 486 | #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) |
| 487 | #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) |
| 488 | #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) |
| 489 | #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) |
| 490 | #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; }) |
| 491 | #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) |
| 492 | #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) |
| 493 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; }) |
| 494 | #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) |
| 495 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; }) |
| 496 | #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) |
| 497 | #define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; }) |
| 498 | #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) |
| 499 | #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) |
| 500 | #define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; }) |
| 501 | #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) |
| 502 | #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) |
| 503 | #define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; }) |
| 504 | #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) |
| 505 | #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) |
| 506 | #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) |
| 507 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ |
| 508 | optlen, max_optlen, retval) ({ retval; }) |
| 509 | #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ |
| 510 | optlen, retval) ({ retval; }) |
| 511 | #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ |
| 512 | kernel_optval) ({ 0; }) |
| 513 | |
| 514 | #define for_each_cgroup_storage_type(stype) for (; false; ) |
| 515 | |
| 516 | #endif /* CONFIG_CGROUP_BPF */ |
| 517 | |
| 518 | #endif /* _BPF_CGROUP_H */ |