bpf: Fix SO_RCVBUF/SO_SNDBUF handling in _bpf_setsockopt().
[linux-2.6-block.git] / net / core / filter.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * Linux Socket Filter - Kernel level socket filtering
4 *
bd4cf0ed
AS
5 * Based on the design of the Berkeley Packet Filter. The new
6 * internal format has been designed by PLUMgrid:
1da177e4 7 *
bd4cf0ed
AS
8 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9 *
10 * Authors:
11 *
12 * Jay Schulist <jschlst@samba.org>
13 * Alexei Starovoitov <ast@plumgrid.com>
14 * Daniel Borkmann <dborkman@redhat.com>
1da177e4 15 *
1da177e4 16 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 17 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
18 */
19
201e2c1b 20#include <linux/atomic.h>
1da177e4
LT
21#include <linux/module.h>
22#include <linux/types.h>
1da177e4
LT
23#include <linux/mm.h>
24#include <linux/fcntl.h>
25#include <linux/socket.h>
91b8270f 26#include <linux/sock_diag.h>
1da177e4
LT
27#include <linux/in.h>
28#include <linux/inet.h>
29#include <linux/netdevice.h>
30#include <linux/if_packet.h>
c491680f 31#include <linux/if_arp.h>
5a0e3ad6 32#include <linux/gfp.h>
d74bad4e 33#include <net/inet_common.h>
1da177e4
LT
34#include <net/ip.h>
35#include <net/protocol.h>
4738c1db 36#include <net/netlink.h>
1da177e4 37#include <linux/skbuff.h>
604326b4 38#include <linux/skmsg.h>
1da177e4 39#include <net/sock.h>
10b89ee4 40#include <net/flow_dissector.h>
1da177e4
LT
41#include <linux/errno.h>
42#include <linux/timer.h>
7c0f6ba6 43#include <linux/uaccess.h>
40daafc8 44#include <asm/unaligned.h>
1da177e4 45#include <linux/filter.h>
86e4ca66 46#include <linux/ratelimit.h>
46b325c7 47#include <linux/seccomp.h>
f3335031 48#include <linux/if_vlan.h>
89aa0758 49#include <linux/bpf.h>
af7ec138 50#include <linux/btf.h>
d691f9e8 51#include <net/sch_generic.h>
8d20aabe 52#include <net/cls_cgroup.h>
d3aa45ce 53#include <net/dst_metadata.h>
c46646d0 54#include <net/dst.h>
538950a1 55#include <net/sock_reuseport.h>
b1d9fc41 56#include <net/busy_poll.h>
8c4b4c7e 57#include <net/tcp.h>
12bed760 58#include <net/xfrm.h>
6acc9b43 59#include <net/udp.h>
5acaee0a 60#include <linux/bpf_trace.h>
02671e23 61#include <net/xdp_sock.h>
87f5fc7e 62#include <linux/inetdevice.h>
6acc9b43
JS
63#include <net/inet_hashtables.h>
64#include <net/inet6_hashtables.h>
87f5fc7e 65#include <net/ip_fib.h>
5481d73f 66#include <net/nexthop.h>
87f5fc7e
DA
67#include <net/flow.h>
68#include <net/arp.h>
fe94cc29 69#include <net/ipv6.h>
6acc9b43 70#include <net/net_namespace.h>
fe94cc29
MX
71#include <linux/seg6_local.h>
72#include <net/seg6.h>
73#include <net/seg6_local.h>
52f27877 74#include <net/lwtunnel.h>
3616d08b 75#include <net/ipv6_stubs.h>
6ac99e8f 76#include <net/bpf_sk_storage.h>
478cfbdf 77#include <net/transp_v6.h>
c9a0f3b8 78#include <linux/btf_ids.h>
18ebe16d 79#include <net/tls.h>
7445cf31 80#include <net/xdp.h>
1da177e4 81
1df8f55a
MKL
82static const struct bpf_func_proto *
83bpf_sk_base_func_proto(enum bpf_func_id func_id);
84
b1ea9ff6 85int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len)
4d295e54
CH
86{
87 if (in_compat_syscall()) {
88 struct compat_sock_fprog f32;
89
90 if (len != sizeof(f32))
91 return -EINVAL;
b1ea9ff6 92 if (copy_from_sockptr(&f32, src, sizeof(f32)))
4d295e54
CH
93 return -EFAULT;
94 memset(dst, 0, sizeof(*dst));
95 dst->len = f32.len;
96 dst->filter = compat_ptr(f32.filter);
97 } else {
98 if (len != sizeof(*dst))
99 return -EINVAL;
b1ea9ff6 100 if (copy_from_sockptr(dst, src, sizeof(*dst)))
4d295e54
CH
101 return -EFAULT;
102 }
103
104 return 0;
105}
106EXPORT_SYMBOL_GPL(copy_bpf_fprog_from_user);
107
43db6d65 108/**
f4979fce 109 * sk_filter_trim_cap - run a packet through a socket filter
43db6d65
SH
110 * @sk: sock associated with &sk_buff
111 * @skb: buffer to filter
f4979fce 112 * @cap: limit on how short the eBPF program may trim the packet
43db6d65 113 *
ff936a04
AS
114 * Run the eBPF program and then cut skb->data to correct size returned by
115 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 116 * than pkt_len we keep whole skb->data. This is the socket level
fb7dd8bc 117 * wrapper to bpf_prog_run. It returns 0 if the packet should
43db6d65
SH
118 * be accepted or -EPERM if the packet should be tossed.
119 *
120 */
f4979fce 121int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
43db6d65
SH
122{
123 int err;
124 struct sk_filter *filter;
125
c93bdd0e
MG
126 /*
127 * If the skb was allocated from pfmemalloc reserves, only
128 * allow SOCK_MEMALLOC sockets to use it as this socket is
129 * helping free memory
130 */
8fe809a9
ED
131 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
132 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
c93bdd0e 133 return -ENOMEM;
8fe809a9 134 }
c11cd3a6
DM
135 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
136 if (err)
137 return err;
138
43db6d65
SH
139 err = security_sock_rcv_skb(sk, skb);
140 if (err)
141 return err;
142
80f8f102
ED
143 rcu_read_lock();
144 filter = rcu_dereference(sk->sk_filter);
43db6d65 145 if (filter) {
8f917bba
WB
146 struct sock *save_sk = skb->sk;
147 unsigned int pkt_len;
148
149 skb->sk = sk;
150 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
8f917bba 151 skb->sk = save_sk;
d1f496fd 152 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
43db6d65 153 }
80f8f102 154 rcu_read_unlock();
43db6d65
SH
155
156 return err;
157}
f4979fce 158EXPORT_SYMBOL(sk_filter_trim_cap);
43db6d65 159
b390134c 160BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
bd4cf0ed 161{
f3694e00 162 return skb_get_poff(skb);
bd4cf0ed
AS
163}
164
b390134c 165BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 166{
bd4cf0ed
AS
167 struct nlattr *nla;
168
169 if (skb_is_nonlinear(skb))
170 return 0;
171
05ab8f26
MK
172 if (skb->len < sizeof(struct nlattr))
173 return 0;
174
30743837 175 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
176 return 0;
177
30743837 178 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
179 if (nla)
180 return (void *) nla - (void *) skb->data;
181
182 return 0;
183}
184
b390134c 185BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 186{
bd4cf0ed
AS
187 struct nlattr *nla;
188
189 if (skb_is_nonlinear(skb))
190 return 0;
191
05ab8f26
MK
192 if (skb->len < sizeof(struct nlattr))
193 return 0;
194
30743837 195 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
196 return 0;
197
30743837
DB
198 nla = (struct nlattr *) &skb->data[a];
199 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
200 return 0;
201
30743837 202 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
203 if (nla)
204 return (void *) nla - (void *) skb->data;
205
206 return 0;
207}
208
e0cea7ce
DB
209BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
210 data, int, headlen, int, offset)
211{
212 u8 tmp, *ptr;
213 const int len = sizeof(tmp);
214
215 if (offset >= 0) {
216 if (headlen - offset >= len)
217 return *(u8 *)(data + offset);
218 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
219 return tmp;
220 } else {
221 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
222 if (likely(ptr))
223 return *(u8 *)ptr;
224 }
225
226 return -EFAULT;
227}
228
229BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
230 int, offset)
231{
232 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
233 offset);
234}
235
236BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
237 data, int, headlen, int, offset)
238{
239 u16 tmp, *ptr;
240 const int len = sizeof(tmp);
241
242 if (offset >= 0) {
243 if (headlen - offset >= len)
244 return get_unaligned_be16(data + offset);
245 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
246 return be16_to_cpu(tmp);
247 } else {
248 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
249 if (likely(ptr))
250 return get_unaligned_be16(ptr);
251 }
252
253 return -EFAULT;
254}
255
256BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
257 int, offset)
258{
259 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
260 offset);
261}
262
263BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
264 data, int, headlen, int, offset)
265{
266 u32 tmp, *ptr;
267 const int len = sizeof(tmp);
268
269 if (likely(offset >= 0)) {
270 if (headlen - offset >= len)
271 return get_unaligned_be32(data + offset);
272 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
273 return be32_to_cpu(tmp);
274 } else {
275 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
276 if (likely(ptr))
277 return get_unaligned_be32(ptr);
278 }
279
280 return -EFAULT;
281}
282
283BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
284 int, offset)
285{
286 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
287 offset);
288}
289
9bac3d6d
AS
290static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
291 struct bpf_insn *insn_buf)
292{
293 struct bpf_insn *insn = insn_buf;
294
295 switch (skb_field) {
296 case SKF_AD_MARK:
c593642c 297 BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4);
9bac3d6d
AS
298
299 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
300 offsetof(struct sk_buff, mark));
301 break;
302
303 case SKF_AD_PKTTYPE:
fba84957 304 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET);
9bac3d6d
AS
305 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
306#ifdef __BIG_ENDIAN_BITFIELD
307 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
308#endif
309 break;
310
311 case SKF_AD_QUEUE:
c593642c 312 BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2);
9bac3d6d
AS
313
314 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
315 offsetof(struct sk_buff, queue_mapping));
316 break;
c2497395 317
c2497395 318 case SKF_AD_VLAN_TAG:
c593642c 319 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2);
c2497395
AS
320
321 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
322 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
323 offsetof(struct sk_buff, vlan_tci));
9c212255
MM
324 break;
325 case SKF_AD_VLAN_TAG_PRESENT:
fba84957 326 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET);
9c212255
MM
327 if (PKT_VLAN_PRESENT_BIT)
328 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
329 if (PKT_VLAN_PRESENT_BIT < 7)
c2497395 330 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
c2497395 331 break;
9bac3d6d
AS
332 }
333
334 return insn - insn_buf;
335}
336
bd4cf0ed 337static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 338 struct bpf_insn **insnp)
bd4cf0ed 339{
2695fb55 340 struct bpf_insn *insn = *insnp;
9bac3d6d 341 u32 cnt;
bd4cf0ed
AS
342
343 switch (fp->k) {
344 case SKF_AD_OFF + SKF_AD_PROTOCOL:
c593642c 345 BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2);
0b8c707d
DB
346
347 /* A = *(u16 *) (CTX + offsetof(protocol)) */
348 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
349 offsetof(struct sk_buff, protocol));
350 /* A = ntohs(A) [emitting a nop or swap16] */
351 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
352 break;
353
354 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
355 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
356 insn += cnt - 1;
bd4cf0ed
AS
357 break;
358
359 case SKF_AD_OFF + SKF_AD_IFINDEX:
360 case SKF_AD_OFF + SKF_AD_HATYPE:
c593642c
PB
361 BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4);
362 BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2);
f8f6d679 363
f035a515 364 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
f8f6d679
DB
365 BPF_REG_TMP, BPF_REG_CTX,
366 offsetof(struct sk_buff, dev));
367 /* if (tmp != 0) goto pc + 1 */
368 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
369 *insn++ = BPF_EXIT_INSN();
370 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
371 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
372 offsetof(struct net_device, ifindex));
373 else
374 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
375 offsetof(struct net_device, type));
bd4cf0ed
AS
376 break;
377
378 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
379 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
380 insn += cnt - 1;
bd4cf0ed
AS
381 break;
382
383 case SKF_AD_OFF + SKF_AD_RXHASH:
c593642c 384 BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4);
bd4cf0ed 385
9739eef1
AS
386 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
387 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
388 break;
389
390 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
391 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
392 insn += cnt - 1;
bd4cf0ed
AS
393 break;
394
395 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
396 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
397 BPF_REG_A, BPF_REG_CTX, insn);
398 insn += cnt - 1;
399 break;
bd4cf0ed 400
c2497395
AS
401 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
402 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
403 BPF_REG_A, BPF_REG_CTX, insn);
404 insn += cnt - 1;
bd4cf0ed
AS
405 break;
406
27cd5452 407 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
c593642c 408 BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2);
27cd5452
MS
409
410 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
411 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
412 offsetof(struct sk_buff, vlan_proto));
413 /* A = ntohs(A) [emitting a nop or swap16] */
414 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
415 break;
416
bd4cf0ed
AS
417 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
418 case SKF_AD_OFF + SKF_AD_NLATTR:
419 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
420 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 421 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 422 /* arg1 = CTX */
f8f6d679 423 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 424 /* arg2 = A */
f8f6d679 425 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 426 /* arg3 = X */
f8f6d679 427 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 428 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
429 switch (fp->k) {
430 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
b390134c 431 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
bd4cf0ed
AS
432 break;
433 case SKF_AD_OFF + SKF_AD_NLATTR:
b390134c 434 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
bd4cf0ed
AS
435 break;
436 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
b390134c 437 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
bd4cf0ed
AS
438 break;
439 case SKF_AD_OFF + SKF_AD_CPU:
b390134c 440 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
bd4cf0ed 441 break;
4cd3675e 442 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
443 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
444 bpf_user_rnd_init_once();
4cd3675e 445 break;
bd4cf0ed
AS
446 }
447 break;
448
449 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
450 /* A ^= X */
451 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
452 break;
453
454 default:
455 /* This is just a dummy call to avoid letting the compiler
456 * evict __bpf_call_base() as an optimization. Placed here
457 * where no-one bothers.
458 */
459 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
460 return false;
461 }
462
463 *insnp = insn;
464 return true;
465}
466
e0cea7ce
DB
467static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
468{
469 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
470 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
471 bool endian = BPF_SIZE(fp->code) == BPF_H ||
472 BPF_SIZE(fp->code) == BPF_W;
473 bool indirect = BPF_MODE(fp->code) == BPF_IND;
474 const int ip_align = NET_IP_ALIGN;
475 struct bpf_insn *insn = *insnp;
476 int offset = fp->k;
477
478 if (!indirect &&
479 ((unaligned_ok && offset >= 0) ||
480 (!unaligned_ok && offset >= 0 &&
481 offset + ip_align >= 0 &&
482 offset + ip_align % size == 0))) {
59ee4129
DB
483 bool ldx_off_ok = offset <= S16_MAX;
484
e0cea7ce 485 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
d8f3e978
DM
486 if (offset)
487 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
59ee4129
DB
488 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
489 size, 2 + endian + (!ldx_off_ok * 2));
490 if (ldx_off_ok) {
491 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
492 BPF_REG_D, offset);
493 } else {
494 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
495 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
496 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
497 BPF_REG_TMP, 0);
498 }
e0cea7ce
DB
499 if (endian)
500 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
501 *insn++ = BPF_JMP_A(8);
502 }
503
504 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
505 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
506 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
507 if (!indirect) {
508 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
509 } else {
510 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
511 if (fp->k)
512 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
513 }
514
515 switch (BPF_SIZE(fp->code)) {
516 case BPF_B:
517 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
518 break;
519 case BPF_H:
520 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
521 break;
522 case BPF_W:
523 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
524 break;
525 default:
526 return false;
527 }
528
529 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
530 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
531 *insn = BPF_EXIT_INSN();
532
533 *insnp = insn;
534 return true;
535}
536
bd4cf0ed 537/**
8fb575ca 538 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
539 * @prog: the user passed filter program
540 * @len: the length of the user passed filter program
50bbfed9 541 * @new_prog: allocated 'struct bpf_prog' or NULL
bd4cf0ed 542 * @new_len: pointer to store length of converted program
e0cea7ce 543 * @seen_ld_abs: bool whether we've seen ld_abs/ind
bd4cf0ed 544 *
1f504ec9
TK
545 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
546 * style extended BPF (eBPF).
bd4cf0ed
AS
547 * Conversion workflow:
548 *
549 * 1) First pass for calculating the new program length:
e0cea7ce 550 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
bd4cf0ed
AS
551 *
552 * 2) 2nd pass to remap in two passes: 1st pass finds new
553 * jump offsets, 2nd pass remapping:
e0cea7ce 554 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
bd4cf0ed 555 */
d9e12f42 556static int bpf_convert_filter(struct sock_filter *prog, int len,
e0cea7ce
DB
557 struct bpf_prog *new_prog, int *new_len,
558 bool *seen_ld_abs)
bd4cf0ed 559{
50bbfed9
AS
560 int new_flen = 0, pass = 0, target, i, stack_off;
561 struct bpf_insn *new_insn, *first_insn = NULL;
bd4cf0ed
AS
562 struct sock_filter *fp;
563 int *addrs = NULL;
564 u8 bpf_src;
565
566 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 567 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 568
6f9a093b 569 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
570 return -EINVAL;
571
572 if (new_prog) {
50bbfed9 573 first_insn = new_prog->insnsi;
658da937
DB
574 addrs = kcalloc(len, sizeof(*addrs),
575 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
576 if (!addrs)
577 return -ENOMEM;
578 }
579
580do_pass:
50bbfed9 581 new_insn = first_insn;
bd4cf0ed
AS
582 fp = prog;
583
8b614aeb 584 /* Classic BPF related prologue emission. */
50bbfed9 585 if (new_prog) {
8b614aeb
DB
586 /* Classic BPF expects A and X to be reset first. These need
587 * to be guaranteed to be the first two instructions.
588 */
1d621674
DB
589 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
590 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
8b614aeb
DB
591
592 /* All programs must keep CTX in callee saved BPF_REG_CTX.
593 * In eBPF case it's done by the compiler, here we need to
594 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
595 */
596 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
e0cea7ce
DB
597 if (*seen_ld_abs) {
598 /* For packet access in classic BPF, cache skb->data
599 * in callee-saved BPF R8 and skb->len - skb->data_len
600 * (headlen) in BPF R9. Since classic BPF is read-only
601 * on CTX, we only need to cache it once.
602 */
603 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
604 BPF_REG_D, BPF_REG_CTX,
605 offsetof(struct sk_buff, data));
606 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
607 offsetof(struct sk_buff, len));
608 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
609 offsetof(struct sk_buff, data_len));
610 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
611 }
8b614aeb
DB
612 } else {
613 new_insn += 3;
614 }
bd4cf0ed
AS
615
616 for (i = 0; i < len; fp++, i++) {
e0cea7ce 617 struct bpf_insn tmp_insns[32] = { };
2695fb55 618 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
619
620 if (addrs)
50bbfed9 621 addrs[i] = new_insn - first_insn;
bd4cf0ed
AS
622
623 switch (fp->code) {
624 /* All arithmetic insns and skb loads map as-is. */
625 case BPF_ALU | BPF_ADD | BPF_X:
626 case BPF_ALU | BPF_ADD | BPF_K:
627 case BPF_ALU | BPF_SUB | BPF_X:
628 case BPF_ALU | BPF_SUB | BPF_K:
629 case BPF_ALU | BPF_AND | BPF_X:
630 case BPF_ALU | BPF_AND | BPF_K:
631 case BPF_ALU | BPF_OR | BPF_X:
632 case BPF_ALU | BPF_OR | BPF_K:
633 case BPF_ALU | BPF_LSH | BPF_X:
634 case BPF_ALU | BPF_LSH | BPF_K:
635 case BPF_ALU | BPF_RSH | BPF_X:
636 case BPF_ALU | BPF_RSH | BPF_K:
637 case BPF_ALU | BPF_XOR | BPF_X:
638 case BPF_ALU | BPF_XOR | BPF_K:
639 case BPF_ALU | BPF_MUL | BPF_X:
640 case BPF_ALU | BPF_MUL | BPF_K:
641 case BPF_ALU | BPF_DIV | BPF_X:
642 case BPF_ALU | BPF_DIV | BPF_K:
643 case BPF_ALU | BPF_MOD | BPF_X:
644 case BPF_ALU | BPF_MOD | BPF_K:
645 case BPF_ALU | BPF_NEG:
646 case BPF_LD | BPF_ABS | BPF_W:
647 case BPF_LD | BPF_ABS | BPF_H:
648 case BPF_LD | BPF_ABS | BPF_B:
649 case BPF_LD | BPF_IND | BPF_W:
650 case BPF_LD | BPF_IND | BPF_H:
651 case BPF_LD | BPF_IND | BPF_B:
652 /* Check for overloaded BPF extension and
653 * directly convert it if found, otherwise
654 * just move on with mapping.
655 */
656 if (BPF_CLASS(fp->code) == BPF_LD &&
657 BPF_MODE(fp->code) == BPF_ABS &&
658 convert_bpf_extensions(fp, &insn))
659 break;
e0cea7ce
DB
660 if (BPF_CLASS(fp->code) == BPF_LD &&
661 convert_bpf_ld_abs(fp, &insn)) {
662 *seen_ld_abs = true;
663 break;
664 }
bd4cf0ed 665
68fda450 666 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
f6b1b3bf 667 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
68fda450 668 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
f6b1b3bf
DB
669 /* Error with exception code on div/mod by 0.
670 * For cBPF programs, this was always return 0.
671 */
672 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
673 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
674 *insn++ = BPF_EXIT_INSN();
675 }
68fda450 676
f8f6d679 677 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
678 break;
679
f8f6d679
DB
680 /* Jump transformation cannot use BPF block macros
681 * everywhere as offset calculation and target updates
682 * require a bit more work than the rest, i.e. jump
683 * opcodes map as-is, but offsets need adjustment.
684 */
685
686#define BPF_EMIT_JMP \
bd4cf0ed 687 do { \
050fad7c
DB
688 const s32 off_min = S16_MIN, off_max = S16_MAX; \
689 s32 off; \
690 \
bd4cf0ed
AS
691 if (target >= len || target < 0) \
692 goto err; \
050fad7c 693 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
bd4cf0ed 694 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
050fad7c
DB
695 off -= insn - tmp_insns; \
696 /* Reject anything not fitting into insn->off. */ \
697 if (off < off_min || off > off_max) \
698 goto err; \
699 insn->off = off; \
bd4cf0ed
AS
700 } while (0)
701
f8f6d679
DB
702 case BPF_JMP | BPF_JA:
703 target = i + fp->k + 1;
704 insn->code = fp->code;
705 BPF_EMIT_JMP;
bd4cf0ed
AS
706 break;
707
708 case BPF_JMP | BPF_JEQ | BPF_K:
709 case BPF_JMP | BPF_JEQ | BPF_X:
710 case BPF_JMP | BPF_JSET | BPF_K:
711 case BPF_JMP | BPF_JSET | BPF_X:
712 case BPF_JMP | BPF_JGT | BPF_K:
713 case BPF_JMP | BPF_JGT | BPF_X:
714 case BPF_JMP | BPF_JGE | BPF_K:
715 case BPF_JMP | BPF_JGE | BPF_X:
716 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
717 /* BPF immediates are signed, zero extend
718 * immediate into tmp register and use it
719 * in compare insn.
720 */
f8f6d679 721 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 722
e430f34e
AS
723 insn->dst_reg = BPF_REG_A;
724 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
725 bpf_src = BPF_X;
726 } else {
e430f34e 727 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
728 insn->imm = fp->k;
729 bpf_src = BPF_SRC(fp->code);
19539ce7 730 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 731 }
bd4cf0ed
AS
732
733 /* Common case where 'jump_false' is next insn. */
734 if (fp->jf == 0) {
735 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
736 target = i + fp->jt + 1;
f8f6d679 737 BPF_EMIT_JMP;
bd4cf0ed 738 break;
1da177e4 739 }
bd4cf0ed 740
92b31a9a
DB
741 /* Convert some jumps when 'jump_true' is next insn. */
742 if (fp->jt == 0) {
743 switch (BPF_OP(fp->code)) {
744 case BPF_JEQ:
745 insn->code = BPF_JMP | BPF_JNE | bpf_src;
746 break;
747 case BPF_JGT:
748 insn->code = BPF_JMP | BPF_JLE | bpf_src;
749 break;
750 case BPF_JGE:
751 insn->code = BPF_JMP | BPF_JLT | bpf_src;
752 break;
753 default:
754 goto jmp_rest;
755 }
756
bd4cf0ed 757 target = i + fp->jf + 1;
f8f6d679 758 BPF_EMIT_JMP;
bd4cf0ed 759 break;
0b05b2a4 760 }
92b31a9a 761jmp_rest:
bd4cf0ed
AS
762 /* Other jumps are mapped into two insns: Jxx and JA. */
763 target = i + fp->jt + 1;
764 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 765 BPF_EMIT_JMP;
bd4cf0ed
AS
766 insn++;
767
768 insn->code = BPF_JMP | BPF_JA;
769 target = i + fp->jf + 1;
f8f6d679 770 BPF_EMIT_JMP;
bd4cf0ed
AS
771 break;
772
773 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
e0cea7ce
DB
774 case BPF_LDX | BPF_MSH | BPF_B: {
775 struct sock_filter tmp = {
776 .code = BPF_LD | BPF_ABS | BPF_B,
777 .k = fp->k,
778 };
779
780 *seen_ld_abs = true;
781
782 /* X = A */
783 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1268e253 784 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
e0cea7ce
DB
785 convert_bpf_ld_abs(&tmp, &insn);
786 insn++;
9739eef1 787 /* A &= 0xf */
f8f6d679 788 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 789 /* A <<= 2 */
f8f6d679 790 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
e0cea7ce
DB
791 /* tmp = X */
792 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
9739eef1 793 /* X = A */
f8f6d679 794 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 795 /* A = tmp */
f8f6d679 796 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed 797 break;
e0cea7ce 798 }
6205b9cf
DB
799 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
800 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
801 */
bd4cf0ed
AS
802 case BPF_RET | BPF_A:
803 case BPF_RET | BPF_K:
6205b9cf
DB
804 if (BPF_RVAL(fp->code) == BPF_K)
805 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
806 0, fp->k);
9739eef1 807 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
808 break;
809
810 /* Store to stack. */
811 case BPF_ST:
812 case BPF_STX:
50bbfed9 813 stack_off = fp->k * 4 + 4;
f8f6d679
DB
814 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
815 BPF_ST ? BPF_REG_A : BPF_REG_X,
50bbfed9
AS
816 -stack_off);
817 /* check_load_and_stores() verifies that classic BPF can
818 * load from stack only after write, so tracking
819 * stack_depth for ST|STX insns is enough
820 */
821 if (new_prog && new_prog->aux->stack_depth < stack_off)
822 new_prog->aux->stack_depth = stack_off;
bd4cf0ed
AS
823 break;
824
825 /* Load from stack. */
826 case BPF_LD | BPF_MEM:
827 case BPF_LDX | BPF_MEM:
50bbfed9 828 stack_off = fp->k * 4 + 4;
f8f6d679
DB
829 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
830 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
50bbfed9 831 -stack_off);
bd4cf0ed
AS
832 break;
833
834 /* A = K or X = K */
835 case BPF_LD | BPF_IMM:
836 case BPF_LDX | BPF_IMM:
f8f6d679
DB
837 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
838 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
839 break;
840
841 /* X = A */
842 case BPF_MISC | BPF_TAX:
f8f6d679 843 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
844 break;
845
846 /* A = X */
847 case BPF_MISC | BPF_TXA:
f8f6d679 848 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
849 break;
850
851 /* A = skb->len or X = skb->len */
852 case BPF_LD | BPF_W | BPF_LEN:
853 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
854 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
855 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
856 offsetof(struct sk_buff, len));
bd4cf0ed
AS
857 break;
858
f8f6d679 859 /* Access seccomp_data fields. */
bd4cf0ed 860 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
861 /* A = *(u32 *) (ctx + K) */
862 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
863 break;
864
ca9f1fd2 865 /* Unknown instruction. */
1da177e4 866 default:
bd4cf0ed 867 goto err;
1da177e4 868 }
bd4cf0ed
AS
869
870 insn++;
871 if (new_prog)
872 memcpy(new_insn, tmp_insns,
873 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 874 new_insn += insn - tmp_insns;
1da177e4
LT
875 }
876
bd4cf0ed
AS
877 if (!new_prog) {
878 /* Only calculating new length. */
50bbfed9 879 *new_len = new_insn - first_insn;
e0cea7ce
DB
880 if (*seen_ld_abs)
881 *new_len += 4; /* Prologue bits. */
bd4cf0ed
AS
882 return 0;
883 }
884
885 pass++;
50bbfed9
AS
886 if (new_flen != new_insn - first_insn) {
887 new_flen = new_insn - first_insn;
bd4cf0ed
AS
888 if (pass > 2)
889 goto err;
bd4cf0ed
AS
890 goto do_pass;
891 }
892
893 kfree(addrs);
894 BUG_ON(*new_len != new_flen);
1da177e4 895 return 0;
bd4cf0ed
AS
896err:
897 kfree(addrs);
898 return -EINVAL;
1da177e4
LT
899}
900
bd4cf0ed 901/* Security:
bd4cf0ed 902 *
2d5311e4 903 * As we dont want to clear mem[] array for each packet going through
8ea6e345 904 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 905 * a cell if not previously written, and we check all branches to be sure
25985edc 906 * a malicious user doesn't try to abuse us.
2d5311e4 907 */
ec31a05c 908static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 909{
34805931 910 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
911 int pc, ret = 0;
912
913 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 914
99e72a0f 915 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
916 if (!masks)
917 return -ENOMEM;
34805931 918
2d5311e4
ED
919 memset(masks, 0xff, flen * sizeof(*masks));
920
921 for (pc = 0; pc < flen; pc++) {
922 memvalid &= masks[pc];
923
924 switch (filter[pc].code) {
34805931
DB
925 case BPF_ST:
926 case BPF_STX:
2d5311e4
ED
927 memvalid |= (1 << filter[pc].k);
928 break;
34805931
DB
929 case BPF_LD | BPF_MEM:
930 case BPF_LDX | BPF_MEM:
2d5311e4
ED
931 if (!(memvalid & (1 << filter[pc].k))) {
932 ret = -EINVAL;
933 goto error;
934 }
935 break;
34805931
DB
936 case BPF_JMP | BPF_JA:
937 /* A jump must set masks on target */
2d5311e4
ED
938 masks[pc + 1 + filter[pc].k] &= memvalid;
939 memvalid = ~0;
940 break;
34805931
DB
941 case BPF_JMP | BPF_JEQ | BPF_K:
942 case BPF_JMP | BPF_JEQ | BPF_X:
943 case BPF_JMP | BPF_JGE | BPF_K:
944 case BPF_JMP | BPF_JGE | BPF_X:
945 case BPF_JMP | BPF_JGT | BPF_K:
946 case BPF_JMP | BPF_JGT | BPF_X:
947 case BPF_JMP | BPF_JSET | BPF_K:
948 case BPF_JMP | BPF_JSET | BPF_X:
949 /* A jump must set masks on targets */
2d5311e4
ED
950 masks[pc + 1 + filter[pc].jt] &= memvalid;
951 masks[pc + 1 + filter[pc].jf] &= memvalid;
952 memvalid = ~0;
953 break;
954 }
955 }
956error:
957 kfree(masks);
958 return ret;
959}
960
34805931
DB
961static bool chk_code_allowed(u16 code_to_probe)
962{
963 static const bool codes[] = {
964 /* 32 bit ALU operations */
965 [BPF_ALU | BPF_ADD | BPF_K] = true,
966 [BPF_ALU | BPF_ADD | BPF_X] = true,
967 [BPF_ALU | BPF_SUB | BPF_K] = true,
968 [BPF_ALU | BPF_SUB | BPF_X] = true,
969 [BPF_ALU | BPF_MUL | BPF_K] = true,
970 [BPF_ALU | BPF_MUL | BPF_X] = true,
971 [BPF_ALU | BPF_DIV | BPF_K] = true,
972 [BPF_ALU | BPF_DIV | BPF_X] = true,
973 [BPF_ALU | BPF_MOD | BPF_K] = true,
974 [BPF_ALU | BPF_MOD | BPF_X] = true,
975 [BPF_ALU | BPF_AND | BPF_K] = true,
976 [BPF_ALU | BPF_AND | BPF_X] = true,
977 [BPF_ALU | BPF_OR | BPF_K] = true,
978 [BPF_ALU | BPF_OR | BPF_X] = true,
979 [BPF_ALU | BPF_XOR | BPF_K] = true,
980 [BPF_ALU | BPF_XOR | BPF_X] = true,
981 [BPF_ALU | BPF_LSH | BPF_K] = true,
982 [BPF_ALU | BPF_LSH | BPF_X] = true,
983 [BPF_ALU | BPF_RSH | BPF_K] = true,
984 [BPF_ALU | BPF_RSH | BPF_X] = true,
985 [BPF_ALU | BPF_NEG] = true,
986 /* Load instructions */
987 [BPF_LD | BPF_W | BPF_ABS] = true,
988 [BPF_LD | BPF_H | BPF_ABS] = true,
989 [BPF_LD | BPF_B | BPF_ABS] = true,
990 [BPF_LD | BPF_W | BPF_LEN] = true,
991 [BPF_LD | BPF_W | BPF_IND] = true,
992 [BPF_LD | BPF_H | BPF_IND] = true,
993 [BPF_LD | BPF_B | BPF_IND] = true,
994 [BPF_LD | BPF_IMM] = true,
995 [BPF_LD | BPF_MEM] = true,
996 [BPF_LDX | BPF_W | BPF_LEN] = true,
997 [BPF_LDX | BPF_B | BPF_MSH] = true,
998 [BPF_LDX | BPF_IMM] = true,
999 [BPF_LDX | BPF_MEM] = true,
1000 /* Store instructions */
1001 [BPF_ST] = true,
1002 [BPF_STX] = true,
1003 /* Misc instructions */
1004 [BPF_MISC | BPF_TAX] = true,
1005 [BPF_MISC | BPF_TXA] = true,
1006 /* Return instructions */
1007 [BPF_RET | BPF_K] = true,
1008 [BPF_RET | BPF_A] = true,
1009 /* Jump instructions */
1010 [BPF_JMP | BPF_JA] = true,
1011 [BPF_JMP | BPF_JEQ | BPF_K] = true,
1012 [BPF_JMP | BPF_JEQ | BPF_X] = true,
1013 [BPF_JMP | BPF_JGE | BPF_K] = true,
1014 [BPF_JMP | BPF_JGE | BPF_X] = true,
1015 [BPF_JMP | BPF_JGT | BPF_K] = true,
1016 [BPF_JMP | BPF_JGT | BPF_X] = true,
1017 [BPF_JMP | BPF_JSET | BPF_K] = true,
1018 [BPF_JMP | BPF_JSET | BPF_X] = true,
1019 };
1020
1021 if (code_to_probe >= ARRAY_SIZE(codes))
1022 return false;
1023
1024 return codes[code_to_probe];
1025}
1026
f7bd9e36
DB
1027static bool bpf_check_basics_ok(const struct sock_filter *filter,
1028 unsigned int flen)
1029{
1030 if (filter == NULL)
1031 return false;
1032 if (flen == 0 || flen > BPF_MAXINSNS)
1033 return false;
1034
1035 return true;
1036}
1037
1da177e4 1038/**
4df95ff4 1039 * bpf_check_classic - verify socket filter code
1da177e4
LT
1040 * @filter: filter to verify
1041 * @flen: length of filter
1042 *
1043 * Check the user's filter code. If we let some ugly
1044 * filter code slip through kaboom! The filter must contain
93699863
KK
1045 * no references or jumps that are out of range, no illegal
1046 * instructions, and must end with a RET instruction.
1da177e4 1047 *
7b11f69f
KK
1048 * All jumps are forward as they are not signed.
1049 *
1050 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 1051 */
d9e12f42
NS
1052static int bpf_check_classic(const struct sock_filter *filter,
1053 unsigned int flen)
1da177e4 1054{
aa1113d9 1055 bool anc_found;
34805931 1056 int pc;
1da177e4 1057
34805931 1058 /* Check the filter code now */
1da177e4 1059 for (pc = 0; pc < flen; pc++) {
ec31a05c 1060 const struct sock_filter *ftest = &filter[pc];
93699863 1061
34805931
DB
1062 /* May we actually operate on this code? */
1063 if (!chk_code_allowed(ftest->code))
cba328fc 1064 return -EINVAL;
34805931 1065
93699863 1066 /* Some instructions need special checks */
34805931
DB
1067 switch (ftest->code) {
1068 case BPF_ALU | BPF_DIV | BPF_K:
1069 case BPF_ALU | BPF_MOD | BPF_K:
1070 /* Check for division by zero */
b6069a95
ED
1071 if (ftest->k == 0)
1072 return -EINVAL;
1073 break;
229394e8
RV
1074 case BPF_ALU | BPF_LSH | BPF_K:
1075 case BPF_ALU | BPF_RSH | BPF_K:
1076 if (ftest->k >= 32)
1077 return -EINVAL;
1078 break;
34805931
DB
1079 case BPF_LD | BPF_MEM:
1080 case BPF_LDX | BPF_MEM:
1081 case BPF_ST:
1082 case BPF_STX:
1083 /* Check for invalid memory addresses */
93699863
KK
1084 if (ftest->k >= BPF_MEMWORDS)
1085 return -EINVAL;
1086 break;
34805931
DB
1087 case BPF_JMP | BPF_JA:
1088 /* Note, the large ftest->k might cause loops.
93699863
KK
1089 * Compare this with conditional jumps below,
1090 * where offsets are limited. --ANK (981016)
1091 */
34805931 1092 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 1093 return -EINVAL;
01f2f3f6 1094 break;
34805931
DB
1095 case BPF_JMP | BPF_JEQ | BPF_K:
1096 case BPF_JMP | BPF_JEQ | BPF_X:
1097 case BPF_JMP | BPF_JGE | BPF_K:
1098 case BPF_JMP | BPF_JGE | BPF_X:
1099 case BPF_JMP | BPF_JGT | BPF_K:
1100 case BPF_JMP | BPF_JGT | BPF_X:
1101 case BPF_JMP | BPF_JSET | BPF_K:
1102 case BPF_JMP | BPF_JSET | BPF_X:
1103 /* Both conditionals must be safe */
e35bedf3 1104 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
1105 pc + ftest->jf + 1 >= flen)
1106 return -EINVAL;
cba328fc 1107 break;
34805931
DB
1108 case BPF_LD | BPF_W | BPF_ABS:
1109 case BPF_LD | BPF_H | BPF_ABS:
1110 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 1111 anc_found = false;
34805931
DB
1112 if (bpf_anc_helper(ftest) & BPF_ANC)
1113 anc_found = true;
1114 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
1115 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1116 return -EINVAL;
01f2f3f6
HPP
1117 }
1118 }
93699863 1119
34805931 1120 /* Last instruction must be a RET code */
01f2f3f6 1121 switch (filter[flen - 1].code) {
34805931
DB
1122 case BPF_RET | BPF_K:
1123 case BPF_RET | BPF_A:
2d5311e4 1124 return check_load_and_stores(filter, flen);
cba328fc 1125 }
34805931 1126
cba328fc 1127 return -EINVAL;
1da177e4
LT
1128}
1129
7ae457c1
AS
1130static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1131 const struct sock_fprog *fprog)
a3ea269b 1132{
009937e7 1133 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
1134 struct sock_fprog_kern *fkprog;
1135
1136 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1137 if (!fp->orig_prog)
1138 return -ENOMEM;
1139
1140 fkprog = fp->orig_prog;
1141 fkprog->len = fprog->len;
658da937
DB
1142
1143 fkprog->filter = kmemdup(fp->insns, fsize,
1144 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
1145 if (!fkprog->filter) {
1146 kfree(fp->orig_prog);
1147 return -ENOMEM;
1148 }
1149
1150 return 0;
1151}
1152
7ae457c1 1153static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
1154{
1155 struct sock_fprog_kern *fprog = fp->orig_prog;
1156
1157 if (fprog) {
1158 kfree(fprog->filter);
1159 kfree(fprog);
1160 }
1161}
1162
7ae457c1
AS
1163static void __bpf_prog_release(struct bpf_prog *prog)
1164{
24701ece 1165 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
1166 bpf_prog_put(prog);
1167 } else {
1168 bpf_release_orig_filter(prog);
1169 bpf_prog_free(prog);
1170 }
7ae457c1
AS
1171}
1172
34c5bd66
PN
1173static void __sk_filter_release(struct sk_filter *fp)
1174{
7ae457c1
AS
1175 __bpf_prog_release(fp->prog);
1176 kfree(fp);
34c5bd66
PN
1177}
1178
47e958ea 1179/**
46bcf14f 1180 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
1181 * @rcu: rcu_head that contains the sk_filter to free
1182 */
fbc907f0 1183static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
1184{
1185 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1186
34c5bd66 1187 __sk_filter_release(fp);
47e958ea 1188}
fbc907f0
DB
1189
1190/**
1191 * sk_filter_release - release a socket filter
1192 * @fp: filter to remove
1193 *
1194 * Remove a filter from a socket and release its resources.
1195 */
1196static void sk_filter_release(struct sk_filter *fp)
1197{
4c355cdf 1198 if (refcount_dec_and_test(&fp->refcnt))
fbc907f0
DB
1199 call_rcu(&fp->rcu, sk_filter_release_rcu);
1200}
1201
1202void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1203{
7ae457c1 1204 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 1205
278571ba
AS
1206 atomic_sub(filter_size, &sk->sk_omem_alloc);
1207 sk_filter_release(fp);
fbc907f0 1208}
47e958ea 1209
278571ba
AS
1210/* try to charge the socket memory if there is space available
1211 * return true on success
1212 */
4c355cdf 1213static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 1214{
7ae457c1 1215 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
1216
1217 /* same check as in sock_kmalloc() */
1218 if (filter_size <= sysctl_optmem_max &&
1219 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
278571ba
AS
1220 atomic_add(filter_size, &sk->sk_omem_alloc);
1221 return true;
bd4cf0ed 1222 }
278571ba 1223 return false;
bd4cf0ed
AS
1224}
1225
4c355cdf
RE
1226bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1227{
eefca20e
ED
1228 if (!refcount_inc_not_zero(&fp->refcnt))
1229 return false;
1230
1231 if (!__sk_filter_charge(sk, fp)) {
1232 sk_filter_release(fp);
1233 return false;
1234 }
1235 return true;
4c355cdf
RE
1236}
1237
7ae457c1 1238static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
1239{
1240 struct sock_filter *old_prog;
7ae457c1 1241 struct bpf_prog *old_fp;
34805931 1242 int err, new_len, old_len = fp->len;
e0cea7ce 1243 bool seen_ld_abs = false;
bd4cf0ed 1244
06edc59c
CH
1245 /* We are free to overwrite insns et al right here as it won't be used at
1246 * this point in time anymore internally after the migration to the eBPF
1247 * instruction representation.
bd4cf0ed
AS
1248 */
1249 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 1250 sizeof(struct bpf_insn));
bd4cf0ed 1251
bd4cf0ed
AS
1252 /* Conversion cannot happen on overlapping memory areas,
1253 * so we need to keep the user BPF around until the 2nd
1254 * pass. At this time, the user BPF is stored in fp->insns.
1255 */
1256 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 1257 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
1258 if (!old_prog) {
1259 err = -ENOMEM;
1260 goto out_err;
1261 }
1262
1263 /* 1st pass: calculate the new program length. */
e0cea7ce
DB
1264 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1265 &seen_ld_abs);
bd4cf0ed
AS
1266 if (err)
1267 goto out_err_free;
1268
1269 /* Expand fp for appending the new filter representation. */
1270 old_fp = fp;
60a3b225 1271 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
1272 if (!fp) {
1273 /* The old_fp is still around in case we couldn't
1274 * allocate new memory, so uncharge on that one.
1275 */
1276 fp = old_fp;
1277 err = -ENOMEM;
1278 goto out_err_free;
1279 }
1280
bd4cf0ed
AS
1281 fp->len = new_len;
1282
2695fb55 1283 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
e0cea7ce
DB
1284 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1285 &seen_ld_abs);
bd4cf0ed 1286 if (err)
8fb575ca 1287 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
1288 * to allocate memory, remapping must succeed. Note,
1289 * that at this time old_fp has already been released
278571ba 1290 * by krealloc().
bd4cf0ed
AS
1291 */
1292 goto out_err_free;
1293
d1c55ab5 1294 fp = bpf_prog_select_runtime(fp, &err);
290af866
AS
1295 if (err)
1296 goto out_err_free;
5fe821a9 1297
bd4cf0ed
AS
1298 kfree(old_prog);
1299 return fp;
1300
1301out_err_free:
1302 kfree(old_prog);
1303out_err:
7ae457c1 1304 __bpf_prog_release(fp);
bd4cf0ed
AS
1305 return ERR_PTR(err);
1306}
1307
ac67eb2c
DB
1308static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1309 bpf_aux_classic_check_t trans)
302d6637
JP
1310{
1311 int err;
1312
bd4cf0ed 1313 fp->bpf_func = NULL;
a91263d5 1314 fp->jited = 0;
302d6637 1315
4df95ff4 1316 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1317 if (err) {
7ae457c1 1318 __bpf_prog_release(fp);
bd4cf0ed 1319 return ERR_PTR(err);
418c96ac 1320 }
302d6637 1321
4ae92bc7
NS
1322 /* There might be additional checks and transformations
1323 * needed on classic filters, f.e. in case of seccomp.
1324 */
1325 if (trans) {
1326 err = trans(fp->insns, fp->len);
1327 if (err) {
1328 __bpf_prog_release(fp);
1329 return ERR_PTR(err);
1330 }
1331 }
1332
bd4cf0ed
AS
1333 /* Probe if we can JIT compile the filter and if so, do
1334 * the compilation of the filter.
1335 */
302d6637 1336 bpf_jit_compile(fp);
bd4cf0ed 1337
06edc59c
CH
1338 /* JIT compiler couldn't process this filter, so do the eBPF translation
1339 * for the optimized interpreter.
bd4cf0ed 1340 */
5fe821a9 1341 if (!fp->jited)
7ae457c1 1342 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1343
1344 return fp;
302d6637
JP
1345}
1346
1347/**
7ae457c1 1348 * bpf_prog_create - create an unattached filter
c6c4b97c 1349 * @pfp: the unattached filter that is created
677a9fd3 1350 * @fprog: the filter program
302d6637 1351 *
c6c4b97c 1352 * Create a filter independent of any socket. We first run some
302d6637
JP
1353 * sanity checks on it to make sure it does not explode on us later.
1354 * If an error occurs or there is insufficient memory for the filter
1355 * a negative errno code is returned. On success the return is zero.
1356 */
7ae457c1 1357int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1358{
009937e7 1359 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1360 struct bpf_prog *fp;
302d6637
JP
1361
1362 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1363 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
302d6637
JP
1364 return -EINVAL;
1365
60a3b225 1366 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1367 if (!fp)
1368 return -ENOMEM;
a3ea269b 1369
302d6637
JP
1370 memcpy(fp->insns, fprog->filter, fsize);
1371
302d6637 1372 fp->len = fprog->len;
a3ea269b
DB
1373 /* Since unattached filters are not copied back to user
1374 * space through sk_get_filter(), we do not need to hold
1375 * a copy here, and can spare us the work.
1376 */
1377 fp->orig_prog = NULL;
302d6637 1378
7ae457c1 1379 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1380 * memory in case something goes wrong.
1381 */
4ae92bc7 1382 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1383 if (IS_ERR(fp))
1384 return PTR_ERR(fp);
302d6637
JP
1385
1386 *pfp = fp;
1387 return 0;
302d6637 1388}
7ae457c1 1389EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1390
ac67eb2c
DB
1391/**
1392 * bpf_prog_create_from_user - create an unattached filter from user buffer
1393 * @pfp: the unattached filter that is created
1394 * @fprog: the filter program
1395 * @trans: post-classic verifier transformation handler
bab18991 1396 * @save_orig: save classic BPF program
ac67eb2c
DB
1397 *
1398 * This function effectively does the same as bpf_prog_create(), only
1399 * that it builds up its insns buffer from user space provided buffer.
1400 * It also allows for passing a bpf_aux_classic_check_t handler.
1401 */
1402int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1403 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1404{
1405 unsigned int fsize = bpf_classic_proglen(fprog);
1406 struct bpf_prog *fp;
bab18991 1407 int err;
ac67eb2c
DB
1408
1409 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1410 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
ac67eb2c
DB
1411 return -EINVAL;
1412
1413 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1414 if (!fp)
1415 return -ENOMEM;
1416
1417 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1418 __bpf_prog_free(fp);
1419 return -EFAULT;
1420 }
1421
1422 fp->len = fprog->len;
ac67eb2c
DB
1423 fp->orig_prog = NULL;
1424
bab18991
DB
1425 if (save_orig) {
1426 err = bpf_prog_store_orig_filter(fp, fprog);
1427 if (err) {
1428 __bpf_prog_free(fp);
1429 return -ENOMEM;
1430 }
1431 }
1432
ac67eb2c
DB
1433 /* bpf_prepare_filter() already takes care of freeing
1434 * memory in case something goes wrong.
1435 */
1436 fp = bpf_prepare_filter(fp, trans);
1437 if (IS_ERR(fp))
1438 return PTR_ERR(fp);
1439
1440 *pfp = fp;
1441 return 0;
1442}
2ea273d7 1443EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1444
7ae457c1 1445void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1446{
7ae457c1 1447 __bpf_prog_release(fp);
302d6637 1448}
7ae457c1 1449EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1450
8ced425e 1451static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
49b31e57
DB
1452{
1453 struct sk_filter *fp, *old_fp;
1454
1455 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1456 if (!fp)
1457 return -ENOMEM;
1458
1459 fp->prog = prog;
49b31e57 1460
4c355cdf 1461 if (!__sk_filter_charge(sk, fp)) {
49b31e57
DB
1462 kfree(fp);
1463 return -ENOMEM;
1464 }
4c355cdf 1465 refcount_set(&fp->refcnt, 1);
49b31e57 1466
8ced425e
HFS
1467 old_fp = rcu_dereference_protected(sk->sk_filter,
1468 lockdep_sock_is_held(sk));
49b31e57 1469 rcu_assign_pointer(sk->sk_filter, fp);
8ced425e 1470
49b31e57
DB
1471 if (old_fp)
1472 sk_filter_uncharge(sk, old_fp);
1473
1474 return 0;
1475}
1476
538950a1
CG
1477static
1478struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1479{
009937e7 1480 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1481 struct bpf_prog *prog;
1da177e4
LT
1482 int err;
1483
d59577b6 1484 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1485 return ERR_PTR(-EPERM);
d59577b6 1486
1da177e4 1487 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1488 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
538950a1 1489 return ERR_PTR(-EINVAL);
1da177e4 1490
f7bd9e36 1491 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
7ae457c1 1492 if (!prog)
538950a1 1493 return ERR_PTR(-ENOMEM);
a3ea269b 1494
7ae457c1 1495 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1496 __bpf_prog_free(prog);
538950a1 1497 return ERR_PTR(-EFAULT);
1da177e4
LT
1498 }
1499
7ae457c1 1500 prog->len = fprog->len;
1da177e4 1501
7ae457c1 1502 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1503 if (err) {
c0d1379a 1504 __bpf_prog_free(prog);
538950a1 1505 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1506 }
1507
7ae457c1 1508 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1509 * memory in case something goes wrong.
1510 */
538950a1
CG
1511 return bpf_prepare_filter(prog, NULL);
1512}
1513
1514/**
1515 * sk_attach_filter - attach a socket filter
1516 * @fprog: the filter program
1517 * @sk: the socket to use
1518 *
1519 * Attach the user's filter code. We first run some sanity checks on
1520 * it to make sure it does not explode on us later. If an error
1521 * occurs or there is insufficient memory for the filter a negative
1522 * errno code is returned. On success the return is zero.
1523 */
8ced425e 1524int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
538950a1
CG
1525{
1526 struct bpf_prog *prog = __get_filter(fprog, sk);
1527 int err;
1528
7ae457c1
AS
1529 if (IS_ERR(prog))
1530 return PTR_ERR(prog);
1531
8ced425e 1532 err = __sk_attach_prog(prog, sk);
49b31e57 1533 if (err < 0) {
7ae457c1 1534 __bpf_prog_release(prog);
49b31e57 1535 return err;
278571ba
AS
1536 }
1537
d3904b73 1538 return 0;
1da177e4 1539}
8ced425e 1540EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1541
538950a1 1542int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1543{
538950a1 1544 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1545 int err;
89aa0758 1546
538950a1
CG
1547 if (IS_ERR(prog))
1548 return PTR_ERR(prog);
1549
8217ca65
MKL
1550 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1551 err = -ENOMEM;
1552 else
1553 err = reuseport_attach_prog(sk, prog);
1554
1555 if (err)
538950a1 1556 __bpf_prog_release(prog);
538950a1 1557
8217ca65 1558 return err;
538950a1
CG
1559}
1560
1561static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1562{
89aa0758 1563 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1564 return ERR_PTR(-EPERM);
89aa0758 1565
113214be 1566 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
538950a1
CG
1567}
1568
1569int sk_attach_bpf(u32 ufd, struct sock *sk)
1570{
1571 struct bpf_prog *prog = __get_bpf(ufd, sk);
1572 int err;
1573
1574 if (IS_ERR(prog))
1575 return PTR_ERR(prog);
1576
8ced425e 1577 err = __sk_attach_prog(prog, sk);
49b31e57 1578 if (err < 0) {
89aa0758 1579 bpf_prog_put(prog);
49b31e57 1580 return err;
89aa0758
AS
1581 }
1582
89aa0758
AS
1583 return 0;
1584}
1585
538950a1
CG
1586int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1587{
8217ca65 1588 struct bpf_prog *prog;
538950a1
CG
1589 int err;
1590
8217ca65
MKL
1591 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1592 return -EPERM;
1593
1594 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
45586c70 1595 if (PTR_ERR(prog) == -EINVAL)
8217ca65 1596 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
538950a1
CG
1597 if (IS_ERR(prog))
1598 return PTR_ERR(prog);
1599
8217ca65
MKL
1600 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1601 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1602 * bpf prog (e.g. sockmap). It depends on the
1603 * limitation imposed by bpf_prog_load().
1604 * Hence, sysctl_optmem_max is not checked.
1605 */
1606 if ((sk->sk_type != SOCK_STREAM &&
1607 sk->sk_type != SOCK_DGRAM) ||
1608 (sk->sk_protocol != IPPROTO_UDP &&
1609 sk->sk_protocol != IPPROTO_TCP) ||
1610 (sk->sk_family != AF_INET &&
1611 sk->sk_family != AF_INET6)) {
1612 err = -ENOTSUPP;
1613 goto err_prog_put;
1614 }
1615 } else {
1616 /* BPF_PROG_TYPE_SOCKET_FILTER */
1617 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1618 err = -ENOMEM;
1619 goto err_prog_put;
1620 }
538950a1
CG
1621 }
1622
8217ca65
MKL
1623 err = reuseport_attach_prog(sk, prog);
1624err_prog_put:
1625 if (err)
1626 bpf_prog_put(prog);
1627
1628 return err;
1629}
1630
1631void sk_reuseport_prog_free(struct bpf_prog *prog)
1632{
1633 if (!prog)
1634 return;
1635
1636 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1637 bpf_prog_put(prog);
1638 else
1639 bpf_prog_destroy(prog);
538950a1
CG
1640}
1641
21cafc1d
DB
1642struct bpf_scratchpad {
1643 union {
1644 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1645 u8 buff[MAX_BPF_STACK];
1646 };
1647};
1648
1649static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822 1650
5293efe6
DB
1651static inline int __bpf_try_make_writable(struct sk_buff *skb,
1652 unsigned int write_len)
1653{
1654 return skb_ensure_writable(skb, write_len);
1655}
1656
db58ba45
AS
1657static inline int bpf_try_make_writable(struct sk_buff *skb,
1658 unsigned int write_len)
1659{
5293efe6 1660 int err = __bpf_try_make_writable(skb, write_len);
db58ba45 1661
6aaae2b6 1662 bpf_compute_data_pointers(skb);
db58ba45
AS
1663 return err;
1664}
1665
36bbef52
DB
1666static int bpf_try_make_head_writable(struct sk_buff *skb)
1667{
1668 return bpf_try_make_writable(skb, skb_headlen(skb));
1669}
1670
a2bfe6bf
DB
1671static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1672{
1673 if (skb_at_tc_ingress(skb))
1674 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1675}
1676
8065694e
DB
1677static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1678{
1679 if (skb_at_tc_ingress(skb))
1680 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1681}
1682
f3694e00
DB
1683BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1684 const void *, from, u32, len, u64, flags)
608cd71a 1685{
608cd71a
AS
1686 void *ptr;
1687
8afd54c8 1688 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
781c53bc 1689 return -EINVAL;
0ed661d5 1690 if (unlikely(offset > 0xffff))
608cd71a 1691 return -EFAULT;
db58ba45 1692 if (unlikely(bpf_try_make_writable(skb, offset + len)))
608cd71a
AS
1693 return -EFAULT;
1694
0ed661d5 1695 ptr = skb->data + offset;
781c53bc 1696 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1697 __skb_postpull_rcsum(skb, ptr, len, offset);
608cd71a
AS
1698
1699 memcpy(ptr, from, len);
1700
781c53bc 1701 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1702 __skb_postpush_rcsum(skb, ptr, len, offset);
8afd54c8
DB
1703 if (flags & BPF_F_INVALIDATE_HASH)
1704 skb_clear_hash(skb);
f8ffad69 1705
608cd71a
AS
1706 return 0;
1707}
1708
577c50aa 1709static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
608cd71a
AS
1710 .func = bpf_skb_store_bytes,
1711 .gpl_only = false,
1712 .ret_type = RET_INTEGER,
1713 .arg1_type = ARG_PTR_TO_CTX,
1714 .arg2_type = ARG_ANYTHING,
216e3cd2 1715 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 1716 .arg4_type = ARG_CONST_SIZE,
91bc4822
AS
1717 .arg5_type = ARG_ANYTHING,
1718};
1719
f3694e00
DB
1720BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1721 void *, to, u32, len)
05c74e5e 1722{
05c74e5e
DB
1723 void *ptr;
1724
0ed661d5 1725 if (unlikely(offset > 0xffff))
074f528e 1726 goto err_clear;
05c74e5e
DB
1727
1728 ptr = skb_header_pointer(skb, offset, len, to);
1729 if (unlikely(!ptr))
074f528e 1730 goto err_clear;
05c74e5e
DB
1731 if (ptr != to)
1732 memcpy(to, ptr, len);
1733
1734 return 0;
074f528e
DB
1735err_clear:
1736 memset(to, 0, len);
1737 return -EFAULT;
05c74e5e
DB
1738}
1739
577c50aa 1740static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
05c74e5e
DB
1741 .func = bpf_skb_load_bytes,
1742 .gpl_only = false,
1743 .ret_type = RET_INTEGER,
1744 .arg1_type = ARG_PTR_TO_CTX,
1745 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1746 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1747 .arg4_type = ARG_CONST_SIZE,
05c74e5e
DB
1748};
1749
089b19a9
SF
1750BPF_CALL_4(bpf_flow_dissector_load_bytes,
1751 const struct bpf_flow_dissector *, ctx, u32, offset,
1752 void *, to, u32, len)
1753{
1754 void *ptr;
1755
1756 if (unlikely(offset > 0xffff))
1757 goto err_clear;
1758
1759 if (unlikely(!ctx->skb))
1760 goto err_clear;
1761
1762 ptr = skb_header_pointer(ctx->skb, offset, len, to);
1763 if (unlikely(!ptr))
1764 goto err_clear;
1765 if (ptr != to)
1766 memcpy(to, ptr, len);
1767
1768 return 0;
1769err_clear:
1770 memset(to, 0, len);
1771 return -EFAULT;
1772}
1773
1774static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1775 .func = bpf_flow_dissector_load_bytes,
1776 .gpl_only = false,
1777 .ret_type = RET_INTEGER,
1778 .arg1_type = ARG_PTR_TO_CTX,
1779 .arg2_type = ARG_ANYTHING,
1780 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1781 .arg4_type = ARG_CONST_SIZE,
1782};
1783
4e1ec56c
DB
1784BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1785 u32, offset, void *, to, u32, len, u32, start_header)
1786{
3eee1f75 1787 u8 *end = skb_tail_pointer(skb);
0f5d82f1 1788 u8 *start, *ptr;
4e1ec56c 1789
0f5d82f1 1790 if (unlikely(offset > 0xffff))
4e1ec56c
DB
1791 goto err_clear;
1792
1793 switch (start_header) {
1794 case BPF_HDR_START_MAC:
0f5d82f1
YZ
1795 if (unlikely(!skb_mac_header_was_set(skb)))
1796 goto err_clear;
1797 start = skb_mac_header(skb);
4e1ec56c
DB
1798 break;
1799 case BPF_HDR_START_NET:
0f5d82f1 1800 start = skb_network_header(skb);
4e1ec56c
DB
1801 break;
1802 default:
1803 goto err_clear;
1804 }
1805
0f5d82f1
YZ
1806 ptr = start + offset;
1807
1808 if (likely(ptr + len <= end)) {
4e1ec56c
DB
1809 memcpy(to, ptr, len);
1810 return 0;
1811 }
1812
1813err_clear:
1814 memset(to, 0, len);
1815 return -EFAULT;
1816}
1817
1818static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1819 .func = bpf_skb_load_bytes_relative,
1820 .gpl_only = false,
1821 .ret_type = RET_INTEGER,
1822 .arg1_type = ARG_PTR_TO_CTX,
1823 .arg2_type = ARG_ANYTHING,
1824 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1825 .arg4_type = ARG_CONST_SIZE,
1826 .arg5_type = ARG_ANYTHING,
1827};
1828
36bbef52
DB
1829BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1830{
1831 /* Idea is the following: should the needed direct read/write
1832 * test fail during runtime, we can pull in more data and redo
1833 * again, since implicitly, we invalidate previous checks here.
1834 *
1835 * Or, since we know how much we need to make read/writeable,
1836 * this can be done once at the program beginning for direct
1837 * access case. By this we overcome limitations of only current
1838 * headroom being accessible.
1839 */
1840 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1841}
1842
1843static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1844 .func = bpf_skb_pull_data,
1845 .gpl_only = false,
1846 .ret_type = RET_INTEGER,
1847 .arg1_type = ARG_PTR_TO_CTX,
1848 .arg2_type = ARG_ANYTHING,
1849};
1850
46f8bc92
MKL
1851BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1852{
46f8bc92
MKL
1853 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1854}
1855
1856static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1857 .func = bpf_sk_fullsock,
1858 .gpl_only = false,
1859 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
1860 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
1861};
1862
0ea488ff
JF
1863static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1864 unsigned int write_len)
1865{
16137b09 1866 return __bpf_try_make_writable(skb, write_len);
0ea488ff
JF
1867}
1868
1869BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1870{
1871 /* Idea is the following: should the needed direct read/write
1872 * test fail during runtime, we can pull in more data and redo
1873 * again, since implicitly, we invalidate previous checks here.
1874 *
1875 * Or, since we know how much we need to make read/writeable,
1876 * this can be done once at the program beginning for direct
1877 * access case. By this we overcome limitations of only current
1878 * headroom being accessible.
1879 */
1880 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1881}
1882
1883static const struct bpf_func_proto sk_skb_pull_data_proto = {
1884 .func = sk_skb_pull_data,
1885 .gpl_only = false,
1886 .ret_type = RET_INTEGER,
1887 .arg1_type = ARG_PTR_TO_CTX,
1888 .arg2_type = ARG_ANYTHING,
1889};
1890
f3694e00
DB
1891BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1892 u64, from, u64, to, u64, flags)
91bc4822 1893{
0ed661d5 1894 __sum16 *ptr;
91bc4822 1895
781c53bc
DB
1896 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1897 return -EINVAL;
0ed661d5 1898 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1899 return -EFAULT;
0ed661d5 1900 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1901 return -EFAULT;
1902
0ed661d5 1903 ptr = (__sum16 *)(skb->data + offset);
781c53bc 1904 switch (flags & BPF_F_HDR_FIELD_MASK) {
8050c0f0
DB
1905 case 0:
1906 if (unlikely(from != 0))
1907 return -EINVAL;
1908
1909 csum_replace_by_diff(ptr, to);
1910 break;
91bc4822
AS
1911 case 2:
1912 csum_replace2(ptr, from, to);
1913 break;
1914 case 4:
1915 csum_replace4(ptr, from, to);
1916 break;
1917 default:
1918 return -EINVAL;
1919 }
1920
91bc4822
AS
1921 return 0;
1922}
1923
577c50aa 1924static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
91bc4822
AS
1925 .func = bpf_l3_csum_replace,
1926 .gpl_only = false,
1927 .ret_type = RET_INTEGER,
1928 .arg1_type = ARG_PTR_TO_CTX,
1929 .arg2_type = ARG_ANYTHING,
1930 .arg3_type = ARG_ANYTHING,
1931 .arg4_type = ARG_ANYTHING,
1932 .arg5_type = ARG_ANYTHING,
1933};
1934
f3694e00
DB
1935BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1936 u64, from, u64, to, u64, flags)
91bc4822 1937{
781c53bc 1938 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
2f72959a 1939 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
d1b662ad 1940 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
0ed661d5 1941 __sum16 *ptr;
91bc4822 1942
d1b662ad
DB
1943 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1944 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
781c53bc 1945 return -EINVAL;
0ed661d5 1946 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1947 return -EFAULT;
0ed661d5 1948 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1949 return -EFAULT;
1950
0ed661d5 1951 ptr = (__sum16 *)(skb->data + offset);
d1b662ad 1952 if (is_mmzero && !do_mforce && !*ptr)
2f72959a 1953 return 0;
91bc4822 1954
781c53bc 1955 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1956 case 0:
1957 if (unlikely(from != 0))
1958 return -EINVAL;
1959
1960 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1961 break;
91bc4822
AS
1962 case 2:
1963 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1964 break;
1965 case 4:
1966 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1967 break;
1968 default:
1969 return -EINVAL;
1970 }
1971
2f72959a
DB
1972 if (is_mmzero && !*ptr)
1973 *ptr = CSUM_MANGLED_0;
91bc4822
AS
1974 return 0;
1975}
1976
577c50aa 1977static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
91bc4822
AS
1978 .func = bpf_l4_csum_replace,
1979 .gpl_only = false,
1980 .ret_type = RET_INTEGER,
1981 .arg1_type = ARG_PTR_TO_CTX,
1982 .arg2_type = ARG_ANYTHING,
1983 .arg3_type = ARG_ANYTHING,
1984 .arg4_type = ARG_ANYTHING,
1985 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1986};
1987
f3694e00
DB
1988BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1989 __be32 *, to, u32, to_size, __wsum, seed)
7d672345 1990{
21cafc1d 1991 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
f3694e00 1992 u32 diff_size = from_size + to_size;
7d672345
DB
1993 int i, j = 0;
1994
1995 /* This is quite flexible, some examples:
1996 *
1997 * from_size == 0, to_size > 0, seed := csum --> pushing data
1998 * from_size > 0, to_size == 0, seed := csum --> pulling data
1999 * from_size > 0, to_size > 0, seed := 0 --> diffing data
2000 *
2001 * Even for diffing, from_size and to_size don't need to be equal.
2002 */
2003 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
2004 diff_size > sizeof(sp->diff)))
2005 return -EINVAL;
2006
2007 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
2008 sp->diff[j] = ~from[i];
2009 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
2010 sp->diff[j] = to[i];
2011
2012 return csum_partial(sp->diff, diff_size, seed);
2013}
2014
577c50aa 2015static const struct bpf_func_proto bpf_csum_diff_proto = {
7d672345
DB
2016 .func = bpf_csum_diff,
2017 .gpl_only = false,
36bbef52 2018 .pkt_access = true,
7d672345 2019 .ret_type = RET_INTEGER,
216e3cd2 2020 .arg1_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
39f19ebb 2021 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
216e3cd2 2022 .arg3_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
39f19ebb 2023 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7d672345
DB
2024 .arg5_type = ARG_ANYTHING,
2025};
2026
36bbef52
DB
2027BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2028{
2029 /* The interface is to be used in combination with bpf_csum_diff()
2030 * for direct packet writes. csum rotation for alignment as well
2031 * as emulating csum_sub() can be done from the eBPF program.
2032 */
2033 if (skb->ip_summed == CHECKSUM_COMPLETE)
2034 return (skb->csum = csum_add(skb->csum, csum));
2035
2036 return -ENOTSUPP;
2037}
2038
2039static const struct bpf_func_proto bpf_csum_update_proto = {
2040 .func = bpf_csum_update,
2041 .gpl_only = false,
2042 .ret_type = RET_INTEGER,
2043 .arg1_type = ARG_PTR_TO_CTX,
2044 .arg2_type = ARG_ANYTHING,
2045};
2046
7cdec54f
DB
2047BPF_CALL_2(bpf_csum_level, struct sk_buff *, skb, u64, level)
2048{
2049 /* The interface is to be used in combination with bpf_skb_adjust_room()
2050 * for encap/decap of packet headers when BPF_F_ADJ_ROOM_NO_CSUM_RESET
2051 * is passed as flags, for example.
2052 */
2053 switch (level) {
2054 case BPF_CSUM_LEVEL_INC:
2055 __skb_incr_checksum_unnecessary(skb);
2056 break;
2057 case BPF_CSUM_LEVEL_DEC:
2058 __skb_decr_checksum_unnecessary(skb);
2059 break;
2060 case BPF_CSUM_LEVEL_RESET:
2061 __skb_reset_checksum_unnecessary(skb);
2062 break;
2063 case BPF_CSUM_LEVEL_QUERY:
2064 return skb->ip_summed == CHECKSUM_UNNECESSARY ?
2065 skb->csum_level : -EACCES;
2066 default:
2067 return -EINVAL;
2068 }
2069
2070 return 0;
2071}
2072
2073static const struct bpf_func_proto bpf_csum_level_proto = {
2074 .func = bpf_csum_level,
2075 .gpl_only = false,
2076 .ret_type = RET_INTEGER,
2077 .arg1_type = ARG_PTR_TO_CTX,
2078 .arg2_type = ARG_ANYTHING,
2079};
2080
a70b506e
DB
2081static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2082{
5f7d5728 2083 return dev_forward_skb_nomtu(dev, skb);
a70b506e
DB
2084}
2085
4e3264d2
MKL
2086static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2087 struct sk_buff *skb)
2088{
5f7d5728 2089 int ret = ____dev_forward_skb(dev, skb, false);
4e3264d2
MKL
2090
2091 if (likely(!ret)) {
2092 skb->dev = dev;
2093 ret = netif_rx(skb);
2094 }
2095
2096 return ret;
2097}
2098
a70b506e
DB
2099static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2100{
2101 int ret;
2102
97cdcf37 2103 if (dev_xmit_recursion()) {
a70b506e
DB
2104 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2105 kfree_skb(skb);
2106 return -ENETDOWN;
2107 }
2108
2109 skb->dev = dev;
5133498f 2110 skb->tstamp = 0;
a70b506e 2111
97cdcf37 2112 dev_xmit_recursion_inc();
a70b506e 2113 ret = dev_queue_xmit(skb);
97cdcf37 2114 dev_xmit_recursion_dec();
a70b506e
DB
2115
2116 return ret;
2117}
2118
4e3264d2
MKL
2119static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2120 u32 flags)
2121{
e7c87bd6 2122 unsigned int mlen = skb_network_offset(skb);
4e3264d2 2123
e7c87bd6
WB
2124 if (mlen) {
2125 __skb_pull(skb, mlen);
4e3264d2 2126
e7c87bd6
WB
2127 /* At ingress, the mac header has already been pulled once.
2128 * At egress, skb_pospull_rcsum has to be done in case that
2129 * the skb is originated from ingress (i.e. a forwarded skb)
2130 * to ensure that rcsum starts at net header.
2131 */
2132 if (!skb_at_tc_ingress(skb))
2133 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2134 }
4e3264d2
MKL
2135 skb_pop_mac_header(skb);
2136 skb_reset_mac_len(skb);
2137 return flags & BPF_F_INGRESS ?
2138 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2139}
2140
2141static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2142 u32 flags)
2143{
3a0af8fd
TG
2144 /* Verify that a link layer header is carried */
2145 if (unlikely(skb->mac_header >= skb->network_header)) {
2146 kfree_skb(skb);
2147 return -ERANGE;
2148 }
2149
4e3264d2
MKL
2150 bpf_push_mac_rcsum(skb);
2151 return flags & BPF_F_INGRESS ?
2152 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2153}
2154
2155static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2156 u32 flags)
2157{
c491680f 2158 if (dev_is_mac_header_xmit(dev))
4e3264d2 2159 return __bpf_redirect_common(skb, dev, flags);
c491680f
DB
2160 else
2161 return __bpf_redirect_no_mac(skb, dev, flags);
4e3264d2
MKL
2162}
2163
b4ab3141 2164#if IS_ENABLED(CONFIG_IPV6)
ba452c9e
THJ
2165static int bpf_out_neigh_v6(struct net *net, struct sk_buff *skb,
2166 struct net_device *dev, struct bpf_nh_params *nh)
b4ab3141 2167{
b4ab3141
DB
2168 u32 hh_len = LL_RESERVED_SPACE(dev);
2169 const struct in6_addr *nexthop;
ba452c9e 2170 struct dst_entry *dst = NULL;
b4ab3141
DB
2171 struct neighbour *neigh;
2172
2173 if (dev_xmit_recursion()) {
2174 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2175 goto out_drop;
2176 }
2177
2178 skb->dev = dev;
2179 skb->tstamp = 0;
2180
2181 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
a1e975e1
VA
2182 skb = skb_expand_head(skb, hh_len);
2183 if (!skb)
b4ab3141 2184 return -ENOMEM;
b4ab3141
DB
2185 }
2186
2187 rcu_read_lock_bh();
ba452c9e
THJ
2188 if (!nh) {
2189 dst = skb_dst(skb);
2190 nexthop = rt6_nexthop(container_of(dst, struct rt6_info, dst),
2191 &ipv6_hdr(skb)->daddr);
2192 } else {
2193 nexthop = &nh->ipv6_nh;
2194 }
b4ab3141
DB
2195 neigh = ip_neigh_gw6(dev, nexthop);
2196 if (likely(!IS_ERR(neigh))) {
2197 int ret;
2198
2199 sock_confirm_neigh(skb, neigh);
2200 dev_xmit_recursion_inc();
2201 ret = neigh_output(neigh, skb, false);
2202 dev_xmit_recursion_dec();
2203 rcu_read_unlock_bh();
2204 return ret;
2205 }
2206 rcu_read_unlock_bh();
ba452c9e 2207 if (dst)
a1e975e1 2208 IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
b4ab3141
DB
2209out_drop:
2210 kfree_skb(skb);
2211 return -ENETDOWN;
2212}
2213
ba452c9e
THJ
2214static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
2215 struct bpf_nh_params *nh)
b4ab3141
DB
2216{
2217 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
2218 struct net *net = dev_net(dev);
2219 int err, ret = NET_XMIT_DROP;
b4ab3141 2220
ba452c9e
THJ
2221 if (!nh) {
2222 struct dst_entry *dst;
2223 struct flowi6 fl6 = {
2224 .flowi6_flags = FLOWI_FLAG_ANYSRC,
2225 .flowi6_mark = skb->mark,
2226 .flowlabel = ip6_flowinfo(ip6h),
2227 .flowi6_oif = dev->ifindex,
2228 .flowi6_proto = ip6h->nexthdr,
2229 .daddr = ip6h->daddr,
2230 .saddr = ip6h->saddr,
2231 };
2232
2233 dst = ipv6_stub->ipv6_dst_lookup_flow(net, NULL, &fl6, NULL);
2234 if (IS_ERR(dst))
2235 goto out_drop;
b4ab3141 2236
ba452c9e
THJ
2237 skb_dst_set(skb, dst);
2238 } else if (nh->nh_family != AF_INET6) {
2239 goto out_drop;
2240 }
b4ab3141 2241
ba452c9e 2242 err = bpf_out_neigh_v6(net, skb, dev, nh);
b4ab3141
DB
2243 if (unlikely(net_xmit_eval(err)))
2244 dev->stats.tx_errors++;
2245 else
2246 ret = NET_XMIT_SUCCESS;
2247 goto out_xmit;
2248out_drop:
2249 dev->stats.tx_errors++;
2250 kfree_skb(skb);
2251out_xmit:
2252 return ret;
2253}
2254#else
ba452c9e
THJ
2255static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev,
2256 struct bpf_nh_params *nh)
b4ab3141
DB
2257{
2258 kfree_skb(skb);
2259 return NET_XMIT_DROP;
2260}
2261#endif /* CONFIG_IPV6 */
2262
2263#if IS_ENABLED(CONFIG_INET)
ba452c9e
THJ
2264static int bpf_out_neigh_v4(struct net *net, struct sk_buff *skb,
2265 struct net_device *dev, struct bpf_nh_params *nh)
b4ab3141 2266{
b4ab3141
DB
2267 u32 hh_len = LL_RESERVED_SPACE(dev);
2268 struct neighbour *neigh;
2269 bool is_v6gw = false;
2270
2271 if (dev_xmit_recursion()) {
2272 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2273 goto out_drop;
2274 }
2275
2276 skb->dev = dev;
2277 skb->tstamp = 0;
2278
2279 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
a1e975e1
VA
2280 skb = skb_expand_head(skb, hh_len);
2281 if (!skb)
b4ab3141 2282 return -ENOMEM;
b4ab3141
DB
2283 }
2284
2285 rcu_read_lock_bh();
ba452c9e
THJ
2286 if (!nh) {
2287 struct dst_entry *dst = skb_dst(skb);
2288 struct rtable *rt = container_of(dst, struct rtable, dst);
2289
2290 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
2291 } else if (nh->nh_family == AF_INET6) {
2292 neigh = ip_neigh_gw6(dev, &nh->ipv6_nh);
2293 is_v6gw = true;
2294 } else if (nh->nh_family == AF_INET) {
2295 neigh = ip_neigh_gw4(dev, nh->ipv4_nh);
2296 } else {
2297 rcu_read_unlock_bh();
2298 goto out_drop;
2299 }
2300
b4ab3141
DB
2301 if (likely(!IS_ERR(neigh))) {
2302 int ret;
2303
2304 sock_confirm_neigh(skb, neigh);
2305 dev_xmit_recursion_inc();
2306 ret = neigh_output(neigh, skb, is_v6gw);
2307 dev_xmit_recursion_dec();
2308 rcu_read_unlock_bh();
2309 return ret;
2310 }
2311 rcu_read_unlock_bh();
2312out_drop:
2313 kfree_skb(skb);
2314 return -ENETDOWN;
2315}
2316
ba452c9e
THJ
2317static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
2318 struct bpf_nh_params *nh)
b4ab3141
DB
2319{
2320 const struct iphdr *ip4h = ip_hdr(skb);
2321 struct net *net = dev_net(dev);
2322 int err, ret = NET_XMIT_DROP;
b4ab3141 2323
ba452c9e
THJ
2324 if (!nh) {
2325 struct flowi4 fl4 = {
2326 .flowi4_flags = FLOWI_FLAG_ANYSRC,
2327 .flowi4_mark = skb->mark,
2328 .flowi4_tos = RT_TOS(ip4h->tos),
2329 .flowi4_oif = dev->ifindex,
2330 .flowi4_proto = ip4h->protocol,
2331 .daddr = ip4h->daddr,
2332 .saddr = ip4h->saddr,
2333 };
2334 struct rtable *rt;
2335
2336 rt = ip_route_output_flow(net, &fl4, NULL);
2337 if (IS_ERR(rt))
2338 goto out_drop;
2339 if (rt->rt_type != RTN_UNICAST && rt->rt_type != RTN_LOCAL) {
2340 ip_rt_put(rt);
2341 goto out_drop;
2342 }
b4ab3141 2343
ba452c9e
THJ
2344 skb_dst_set(skb, &rt->dst);
2345 }
b4ab3141 2346
ba452c9e 2347 err = bpf_out_neigh_v4(net, skb, dev, nh);
b4ab3141
DB
2348 if (unlikely(net_xmit_eval(err)))
2349 dev->stats.tx_errors++;
2350 else
2351 ret = NET_XMIT_SUCCESS;
2352 goto out_xmit;
2353out_drop:
2354 dev->stats.tx_errors++;
2355 kfree_skb(skb);
2356out_xmit:
2357 return ret;
2358}
2359#else
ba452c9e
THJ
2360static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev,
2361 struct bpf_nh_params *nh)
b4ab3141
DB
2362{
2363 kfree_skb(skb);
2364 return NET_XMIT_DROP;
2365}
2366#endif /* CONFIG_INET */
2367
ba452c9e
THJ
2368static int __bpf_redirect_neigh(struct sk_buff *skb, struct net_device *dev,
2369 struct bpf_nh_params *nh)
b4ab3141
DB
2370{
2371 struct ethhdr *ethh = eth_hdr(skb);
2372
2373 if (unlikely(skb->mac_header >= skb->network_header))
2374 goto out;
2375 bpf_push_mac_rcsum(skb);
2376 if (is_multicast_ether_addr(ethh->h_dest))
2377 goto out;
2378
2379 skb_pull(skb, sizeof(*ethh));
2380 skb_unset_mac_header(skb);
2381 skb_reset_network_header(skb);
2382
2383 if (skb->protocol == htons(ETH_P_IP))
ba452c9e 2384 return __bpf_redirect_neigh_v4(skb, dev, nh);
b4ab3141 2385 else if (skb->protocol == htons(ETH_P_IPV6))
ba452c9e 2386 return __bpf_redirect_neigh_v6(skb, dev, nh);
b4ab3141
DB
2387out:
2388 kfree_skb(skb);
2389 return -ENOTSUPP;
2390}
2391
2392/* Internal, non-exposed redirect flags. */
2393enum {
9aa1206e
DB
2394 BPF_F_NEIGH = (1ULL << 1),
2395 BPF_F_PEER = (1ULL << 2),
ba452c9e
THJ
2396 BPF_F_NEXTHOP = (1ULL << 3),
2397#define BPF_F_REDIRECT_INTERNAL (BPF_F_NEIGH | BPF_F_PEER | BPF_F_NEXTHOP)
b4ab3141
DB
2398};
2399
f3694e00 2400BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
3896d655 2401{
3896d655 2402 struct net_device *dev;
36bbef52
DB
2403 struct sk_buff *clone;
2404 int ret;
3896d655 2405
b4ab3141 2406 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
781c53bc
DB
2407 return -EINVAL;
2408
3896d655
AS
2409 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2410 if (unlikely(!dev))
2411 return -EINVAL;
2412
36bbef52
DB
2413 clone = skb_clone(skb, GFP_ATOMIC);
2414 if (unlikely(!clone))
3896d655
AS
2415 return -ENOMEM;
2416
36bbef52
DB
2417 /* For direct write, we need to keep the invariant that the skbs
2418 * we're dealing with need to be uncloned. Should uncloning fail
2419 * here, we need to free the just generated clone to unclone once
2420 * again.
2421 */
2422 ret = bpf_try_make_head_writable(skb);
2423 if (unlikely(ret)) {
2424 kfree_skb(clone);
2425 return -ENOMEM;
2426 }
2427
4e3264d2 2428 return __bpf_redirect(clone, dev, flags);
3896d655
AS
2429}
2430
577c50aa 2431static const struct bpf_func_proto bpf_clone_redirect_proto = {
3896d655
AS
2432 .func = bpf_clone_redirect,
2433 .gpl_only = false,
2434 .ret_type = RET_INTEGER,
2435 .arg1_type = ARG_PTR_TO_CTX,
2436 .arg2_type = ARG_ANYTHING,
2437 .arg3_type = ARG_ANYTHING,
2438};
2439
0b19cc0a
TM
2440DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2441EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
781c53bc 2442
27b29f63
AS
2443int skb_do_redirect(struct sk_buff *skb)
2444{
0b19cc0a 2445 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
9aa1206e 2446 struct net *net = dev_net(skb->dev);
27b29f63 2447 struct net_device *dev;
b4ab3141 2448 u32 flags = ri->flags;
27b29f63 2449
9aa1206e 2450 dev = dev_get_by_index_rcu(net, ri->tgt_index);
4b55cf29 2451 ri->tgt_index = 0;
9aa1206e
DB
2452 ri->flags = 0;
2453 if (unlikely(!dev))
2454 goto out_drop;
2455 if (flags & BPF_F_PEER) {
2456 const struct net_device_ops *ops = dev->netdev_ops;
2457
2458 if (unlikely(!ops->ndo_get_peer_dev ||
2459 !skb_at_tc_ingress(skb)))
2460 goto out_drop;
2461 dev = ops->ndo_get_peer_dev(dev);
2462 if (unlikely(!dev ||
5f7d5728 2463 !(dev->flags & IFF_UP) ||
9aa1206e
DB
2464 net_eq(net, dev_net(dev))))
2465 goto out_drop;
2466 skb->dev = dev;
2467 return -EAGAIN;
27b29f63 2468 }
b4ab3141 2469 return flags & BPF_F_NEIGH ?
ba452c9e
THJ
2470 __bpf_redirect_neigh(skb, dev, flags & BPF_F_NEXTHOP ?
2471 &ri->nh : NULL) :
b4ab3141 2472 __bpf_redirect(skb, dev, flags);
9aa1206e
DB
2473out_drop:
2474 kfree_skb(skb);
2475 return -EINVAL;
b4ab3141
DB
2476}
2477
2478BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2479{
2480 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2481
2482 if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL)))
2483 return TC_ACT_SHOT;
2484
2485 ri->flags = flags;
2486 ri->tgt_index = ifindex;
2487
2488 return TC_ACT_REDIRECT;
27b29f63
AS
2489}
2490
577c50aa 2491static const struct bpf_func_proto bpf_redirect_proto = {
27b29f63
AS
2492 .func = bpf_redirect,
2493 .gpl_only = false,
2494 .ret_type = RET_INTEGER,
2495 .arg1_type = ARG_ANYTHING,
2496 .arg2_type = ARG_ANYTHING,
2497};
2498
9aa1206e
DB
2499BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags)
2500{
2501 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2502
2503 if (unlikely(flags))
2504 return TC_ACT_SHOT;
2505
2506 ri->flags = BPF_F_PEER;
2507 ri->tgt_index = ifindex;
2508
2509 return TC_ACT_REDIRECT;
2510}
2511
2512static const struct bpf_func_proto bpf_redirect_peer_proto = {
2513 .func = bpf_redirect_peer,
2514 .gpl_only = false,
2515 .ret_type = RET_INTEGER,
2516 .arg1_type = ARG_ANYTHING,
2517 .arg2_type = ARG_ANYTHING,
2518};
2519
ba452c9e
THJ
2520BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params,
2521 int, plen, u64, flags)
b4ab3141
DB
2522{
2523 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2524
ba452c9e 2525 if (unlikely((plen && plen < sizeof(*params)) || flags))
b4ab3141
DB
2526 return TC_ACT_SHOT;
2527
ba452c9e 2528 ri->flags = BPF_F_NEIGH | (plen ? BPF_F_NEXTHOP : 0);
b4ab3141
DB
2529 ri->tgt_index = ifindex;
2530
ba452c9e
THJ
2531 BUILD_BUG_ON(sizeof(struct bpf_redir_neigh) != sizeof(struct bpf_nh_params));
2532 if (plen)
2533 memcpy(&ri->nh, params, sizeof(ri->nh));
2534
b4ab3141
DB
2535 return TC_ACT_REDIRECT;
2536}
2537
2538static const struct bpf_func_proto bpf_redirect_neigh_proto = {
2539 .func = bpf_redirect_neigh,
2540 .gpl_only = false,
2541 .ret_type = RET_INTEGER,
2542 .arg1_type = ARG_ANYTHING,
216e3cd2 2543 .arg2_type = ARG_PTR_TO_MEM | PTR_MAYBE_NULL | MEM_RDONLY,
ba452c9e
THJ
2544 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
2545 .arg4_type = ARG_ANYTHING,
b4ab3141
DB
2546};
2547
604326b4 2548BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2a100317
JF
2549{
2550 msg->apply_bytes = bytes;
2551 return 0;
2552}
2553
2554static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2555 .func = bpf_msg_apply_bytes,
2556 .gpl_only = false,
2557 .ret_type = RET_INTEGER,
2558 .arg1_type = ARG_PTR_TO_CTX,
2559 .arg2_type = ARG_ANYTHING,
2560};
2561
604326b4 2562BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
91843d54
JF
2563{
2564 msg->cork_bytes = bytes;
2565 return 0;
2566}
2567
2568static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2569 .func = bpf_msg_cork_bytes,
2570 .gpl_only = false,
2571 .ret_type = RET_INTEGER,
2572 .arg1_type = ARG_PTR_TO_CTX,
2573 .arg2_type = ARG_ANYTHING,
2574};
2575
604326b4
DB
2576BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2577 u32, end, u64, flags)
015632bb 2578{
604326b4
DB
2579 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2580 u32 first_sge, last_sge, i, shift, bytes_sg_total;
2581 struct scatterlist *sge;
2582 u8 *raw, *to, *from;
015632bb
JF
2583 struct page *page;
2584
2585 if (unlikely(flags || end <= start))
2586 return -EINVAL;
2587
2588 /* First find the starting scatterlist element */
604326b4 2589 i = msg->sg.start;
015632bb 2590 do {
6562e29c 2591 offset += len;
604326b4 2592 len = sk_msg_elem(msg, i)->length;
015632bb
JF
2593 if (start < offset + len)
2594 break;
604326b4
DB
2595 sk_msg_iter_var_next(i);
2596 } while (i != msg->sg.end);
015632bb
JF
2597
2598 if (unlikely(start >= offset + len))
2599 return -EINVAL;
2600
604326b4 2601 first_sge = i;
5b24109b
DB
2602 /* The start may point into the sg element so we need to also
2603 * account for the headroom.
2604 */
2605 bytes_sg_total = start - offset + bytes;
163ab96b 2606 if (!test_bit(i, &msg->sg.copy) && bytes_sg_total <= len)
015632bb 2607 goto out;
015632bb
JF
2608
2609 /* At this point we need to linearize multiple scatterlist
2610 * elements or a single shared page. Either way we need to
2611 * copy into a linear buffer exclusively owned by BPF. Then
2612 * place the buffer in the scatterlist and fixup the original
2613 * entries by removing the entries now in the linear buffer
2614 * and shifting the remaining entries. For now we do not try
2615 * to copy partial entries to avoid complexity of running out
2616 * of sg_entry slots. The downside is reading a single byte
2617 * will copy the entire sg entry.
2618 */
2619 do {
604326b4
DB
2620 copy += sk_msg_elem(msg, i)->length;
2621 sk_msg_iter_var_next(i);
5b24109b 2622 if (bytes_sg_total <= copy)
015632bb 2623 break;
604326b4
DB
2624 } while (i != msg->sg.end);
2625 last_sge = i;
015632bb 2626
5b24109b 2627 if (unlikely(bytes_sg_total > copy))
015632bb
JF
2628 return -EINVAL;
2629
4c3d795c
TD
2630 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2631 get_order(copy));
015632bb
JF
2632 if (unlikely(!page))
2633 return -ENOMEM;
015632bb 2634
604326b4
DB
2635 raw = page_address(page);
2636 i = first_sge;
015632bb 2637 do {
604326b4
DB
2638 sge = sk_msg_elem(msg, i);
2639 from = sg_virt(sge);
2640 len = sge->length;
2641 to = raw + poffset;
015632bb
JF
2642
2643 memcpy(to, from, len);
9db39f4d 2644 poffset += len;
604326b4
DB
2645 sge->length = 0;
2646 put_page(sg_page(sge));
015632bb 2647
604326b4
DB
2648 sk_msg_iter_var_next(i);
2649 } while (i != last_sge);
015632bb 2650
604326b4 2651 sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
015632bb
JF
2652
2653 /* To repair sg ring we need to shift entries. If we only
2654 * had a single entry though we can just replace it and
2655 * be done. Otherwise walk the ring and shift the entries.
2656 */
604326b4
DB
2657 WARN_ON_ONCE(last_sge == first_sge);
2658 shift = last_sge > first_sge ?
2659 last_sge - first_sge - 1 :
031097d9 2660 NR_MSG_FRAG_IDS - first_sge + last_sge - 1;
015632bb
JF
2661 if (!shift)
2662 goto out;
2663
604326b4
DB
2664 i = first_sge;
2665 sk_msg_iter_var_next(i);
015632bb 2666 do {
604326b4 2667 u32 move_from;
015632bb 2668
031097d9
JK
2669 if (i + shift >= NR_MSG_FRAG_IDS)
2670 move_from = i + shift - NR_MSG_FRAG_IDS;
015632bb
JF
2671 else
2672 move_from = i + shift;
604326b4 2673 if (move_from == msg->sg.end)
015632bb
JF
2674 break;
2675
604326b4
DB
2676 msg->sg.data[i] = msg->sg.data[move_from];
2677 msg->sg.data[move_from].length = 0;
2678 msg->sg.data[move_from].page_link = 0;
2679 msg->sg.data[move_from].offset = 0;
2680 sk_msg_iter_var_next(i);
015632bb 2681 } while (1);
604326b4
DB
2682
2683 msg->sg.end = msg->sg.end - shift > msg->sg.end ?
031097d9 2684 msg->sg.end - shift + NR_MSG_FRAG_IDS :
604326b4 2685 msg->sg.end - shift;
015632bb 2686out:
604326b4 2687 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
015632bb 2688 msg->data_end = msg->data + bytes;
015632bb
JF
2689 return 0;
2690}
2691
2692static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2693 .func = bpf_msg_pull_data,
2694 .gpl_only = false,
2695 .ret_type = RET_INTEGER,
2696 .arg1_type = ARG_PTR_TO_CTX,
2697 .arg2_type = ARG_ANYTHING,
2698 .arg3_type = ARG_ANYTHING,
2699 .arg4_type = ARG_ANYTHING,
2700};
2701
6fff607e
JF
2702BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2703 u32, len, u64, flags)
2704{
2705 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
6562e29c 2706 u32 new, i = 0, l = 0, space, copy = 0, offset = 0;
6fff607e
JF
2707 u8 *raw, *to, *from;
2708 struct page *page;
2709
2710 if (unlikely(flags))
2711 return -EINVAL;
2712
2713 /* First find the starting scatterlist element */
2714 i = msg->sg.start;
2715 do {
6562e29c 2716 offset += l;
6fff607e
JF
2717 l = sk_msg_elem(msg, i)->length;
2718
2719 if (start < offset + l)
2720 break;
6fff607e
JF
2721 sk_msg_iter_var_next(i);
2722 } while (i != msg->sg.end);
2723
2724 if (start >= offset + l)
2725 return -EINVAL;
2726
2727 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2728
2729 /* If no space available will fallback to copy, we need at
2730 * least one scatterlist elem available to push data into
2731 * when start aligns to the beginning of an element or two
2732 * when it falls inside an element. We handle the start equals
2733 * offset case because its the common case for inserting a
2734 * header.
2735 */
2736 if (!space || (space == 1 && start != offset))
2737 copy = msg->sg.data[i].length;
2738
2739 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2740 get_order(copy + len));
2741 if (unlikely(!page))
2742 return -ENOMEM;
2743
2744 if (copy) {
2745 int front, back;
2746
2747 raw = page_address(page);
2748
2749 psge = sk_msg_elem(msg, i);
2750 front = start - offset;
2751 back = psge->length - front;
2752 from = sg_virt(psge);
2753
2754 if (front)
2755 memcpy(raw, from, front);
2756
2757 if (back) {
2758 from += front;
2759 to = raw + front + len;
2760
2761 memcpy(to, from, back);
2762 }
2763
2764 put_page(sg_page(psge));
2765 } else if (start - offset) {
2766 psge = sk_msg_elem(msg, i);
2767 rsge = sk_msg_elem_cpy(msg, i);
2768
2769 psge->length = start - offset;
2770 rsge.length -= psge->length;
2771 rsge.offset += start;
2772
2773 sk_msg_iter_var_next(i);
2774 sg_unmark_end(psge);
cf21e9ba 2775 sg_unmark_end(&rsge);
6fff607e
JF
2776 sk_msg_iter_next(msg, end);
2777 }
2778
2779 /* Slot(s) to place newly allocated data */
2780 new = i;
2781
2782 /* Shift one or two slots as needed */
2783 if (!copy) {
2784 sge = sk_msg_elem_cpy(msg, i);
2785
2786 sk_msg_iter_var_next(i);
2787 sg_unmark_end(&sge);
2788 sk_msg_iter_next(msg, end);
2789
2790 nsge = sk_msg_elem_cpy(msg, i);
2791 if (rsge.length) {
2792 sk_msg_iter_var_next(i);
2793 nnsge = sk_msg_elem_cpy(msg, i);
2794 }
2795
2796 while (i != msg->sg.end) {
2797 msg->sg.data[i] = sge;
2798 sge = nsge;
2799 sk_msg_iter_var_next(i);
2800 if (rsge.length) {
2801 nsge = nnsge;
2802 nnsge = sk_msg_elem_cpy(msg, i);
2803 } else {
2804 nsge = sk_msg_elem_cpy(msg, i);
2805 }
2806 }
2807 }
2808
2809 /* Place newly allocated data buffer */
2810 sk_mem_charge(msg->sk, len);
2811 msg->sg.size += len;
163ab96b 2812 __clear_bit(new, &msg->sg.copy);
6fff607e
JF
2813 sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2814 if (rsge.length) {
2815 get_page(sg_page(&rsge));
2816 sk_msg_iter_var_next(new);
2817 msg->sg.data[new] = rsge;
2818 }
2819
2820 sk_msg_compute_data_pointers(msg);
2821 return 0;
2822}
2823
2824static const struct bpf_func_proto bpf_msg_push_data_proto = {
2825 .func = bpf_msg_push_data,
2826 .gpl_only = false,
2827 .ret_type = RET_INTEGER,
2828 .arg1_type = ARG_PTR_TO_CTX,
2829 .arg2_type = ARG_ANYTHING,
2830 .arg3_type = ARG_ANYTHING,
2831 .arg4_type = ARG_ANYTHING,
2832};
2833
7246d8ed
JF
2834static void sk_msg_shift_left(struct sk_msg *msg, int i)
2835{
2836 int prev;
2837
2838 do {
2839 prev = i;
2840 sk_msg_iter_var_next(i);
2841 msg->sg.data[prev] = msg->sg.data[i];
2842 } while (i != msg->sg.end);
2843
2844 sk_msg_iter_prev(msg, end);
2845}
2846
2847static void sk_msg_shift_right(struct sk_msg *msg, int i)
2848{
2849 struct scatterlist tmp, sge;
2850
2851 sk_msg_iter_next(msg, end);
2852 sge = sk_msg_elem_cpy(msg, i);
2853 sk_msg_iter_var_next(i);
2854 tmp = sk_msg_elem_cpy(msg, i);
2855
2856 while (i != msg->sg.end) {
2857 msg->sg.data[i] = sge;
2858 sk_msg_iter_var_next(i);
2859 sge = tmp;
2860 tmp = sk_msg_elem_cpy(msg, i);
2861 }
2862}
2863
2864BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2865 u32, len, u64, flags)
2866{
6562e29c 2867 u32 i = 0, l = 0, space, offset = 0;
7246d8ed
JF
2868 u64 last = start + len;
2869 int pop;
2870
2871 if (unlikely(flags))
2872 return -EINVAL;
2873
2874 /* First find the starting scatterlist element */
2875 i = msg->sg.start;
2876 do {
6562e29c 2877 offset += l;
7246d8ed
JF
2878 l = sk_msg_elem(msg, i)->length;
2879
2880 if (start < offset + l)
2881 break;
7246d8ed
JF
2882 sk_msg_iter_var_next(i);
2883 } while (i != msg->sg.end);
2884
2885 /* Bounds checks: start and pop must be inside message */
2886 if (start >= offset + l || last >= msg->sg.size)
2887 return -EINVAL;
2888
2889 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2890
2891 pop = len;
2892 /* --------------| offset
2893 * -| start |-------- len -------|
2894 *
2895 * |----- a ----|-------- pop -------|----- b ----|
2896 * |______________________________________________| length
2897 *
2898 *
2899 * a: region at front of scatter element to save
2900 * b: region at back of scatter element to save when length > A + pop
2901 * pop: region to pop from element, same as input 'pop' here will be
2902 * decremented below per iteration.
2903 *
2904 * Two top-level cases to handle when start != offset, first B is non
2905 * zero and second B is zero corresponding to when a pop includes more
2906 * than one element.
2907 *
2908 * Then if B is non-zero AND there is no space allocate space and
2909 * compact A, B regions into page. If there is space shift ring to
2910 * the rigth free'ing the next element in ring to place B, leaving
2911 * A untouched except to reduce length.
2912 */
2913 if (start != offset) {
2914 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2915 int a = start;
2916 int b = sge->length - pop - a;
2917
2918 sk_msg_iter_var_next(i);
2919
2920 if (pop < sge->length - a) {
2921 if (space) {
2922 sge->length = a;
2923 sk_msg_shift_right(msg, i);
2924 nsge = sk_msg_elem(msg, i);
2925 get_page(sg_page(sge));
2926 sg_set_page(nsge,
2927 sg_page(sge),
2928 b, sge->offset + pop + a);
2929 } else {
2930 struct page *page, *orig;
2931 u8 *to, *from;
2932
2933 page = alloc_pages(__GFP_NOWARN |
2934 __GFP_COMP | GFP_ATOMIC,
2935 get_order(a + b));
2936 if (unlikely(!page))
2937 return -ENOMEM;
2938
2939 sge->length = a;
2940 orig = sg_page(sge);
2941 from = sg_virt(sge);
2942 to = page_address(page);
2943 memcpy(to, from, a);
2944 memcpy(to + a, from + a + pop, b);
2945 sg_set_page(sge, page, a + b, 0);
2946 put_page(orig);
2947 }
2948 pop = 0;
2949 } else if (pop >= sge->length - a) {
7246d8ed 2950 pop -= (sge->length - a);
3e104c23 2951 sge->length = a;
7246d8ed
JF
2952 }
2953 }
2954
2955 /* From above the current layout _must_ be as follows,
2956 *
2957 * -| offset
2958 * -| start
2959 *
2960 * |---- pop ---|---------------- b ------------|
2961 * |____________________________________________| length
2962 *
2963 * Offset and start of the current msg elem are equal because in the
2964 * previous case we handled offset != start and either consumed the
2965 * entire element and advanced to the next element OR pop == 0.
2966 *
2967 * Two cases to handle here are first pop is less than the length
2968 * leaving some remainder b above. Simply adjust the element's layout
2969 * in this case. Or pop >= length of the element so that b = 0. In this
2970 * case advance to next element decrementing pop.
2971 */
2972 while (pop) {
2973 struct scatterlist *sge = sk_msg_elem(msg, i);
2974
2975 if (pop < sge->length) {
2976 sge->length -= pop;
2977 sge->offset += pop;
2978 pop = 0;
2979 } else {
2980 pop -= sge->length;
2981 sk_msg_shift_left(msg, i);
2982 }
2983 sk_msg_iter_var_next(i);
2984 }
2985
2986 sk_mem_uncharge(msg->sk, len - pop);
2987 msg->sg.size -= (len - pop);
2988 sk_msg_compute_data_pointers(msg);
2989 return 0;
2990}
2991
2992static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2993 .func = bpf_msg_pop_data,
2994 .gpl_only = false,
2995 .ret_type = RET_INTEGER,
2996 .arg1_type = ARG_PTR_TO_CTX,
2997 .arg2_type = ARG_ANYTHING,
2998 .arg3_type = ARG_ANYTHING,
2999 .arg4_type = ARG_ANYTHING,
3000};
3001
5a52ae4e
DB
3002#ifdef CONFIG_CGROUP_NET_CLASSID
3003BPF_CALL_0(bpf_get_cgroup_classid_curr)
3004{
3005 return __task_get_classid(current);
3006}
3007
3008static const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto = {
3009 .func = bpf_get_cgroup_classid_curr,
3010 .gpl_only = false,
3011 .ret_type = RET_INTEGER,
3012};
b426ce83
DB
3013
3014BPF_CALL_1(bpf_skb_cgroup_classid, const struct sk_buff *, skb)
3015{
3016 struct sock *sk = skb_to_full_sk(skb);
3017
3018 if (!sk || !sk_fullsock(sk))
3019 return 0;
3020
3021 return sock_cgroup_classid(&sk->sk_cgrp_data);
3022}
3023
3024static const struct bpf_func_proto bpf_skb_cgroup_classid_proto = {
3025 .func = bpf_skb_cgroup_classid,
3026 .gpl_only = false,
3027 .ret_type = RET_INTEGER,
3028 .arg1_type = ARG_PTR_TO_CTX,
3029};
5a52ae4e
DB
3030#endif
3031
f3694e00 3032BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
8d20aabe 3033{
f3694e00 3034 return task_get_classid(skb);
8d20aabe
DB
3035}
3036
3037static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
3038 .func = bpf_get_cgroup_classid,
3039 .gpl_only = false,
3040 .ret_type = RET_INTEGER,
3041 .arg1_type = ARG_PTR_TO_CTX,
3042};
3043
f3694e00 3044BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
c46646d0 3045{
f3694e00 3046 return dst_tclassid(skb);
c46646d0
DB
3047}
3048
3049static const struct bpf_func_proto bpf_get_route_realm_proto = {
3050 .func = bpf_get_route_realm,
3051 .gpl_only = false,
3052 .ret_type = RET_INTEGER,
3053 .arg1_type = ARG_PTR_TO_CTX,
3054};
3055
f3694e00 3056BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
13c5c240
DB
3057{
3058 /* If skb_clear_hash() was called due to mangling, we can
3059 * trigger SW recalculation here. Later access to hash
3060 * can then use the inline skb->hash via context directly
3061 * instead of calling this helper again.
3062 */
f3694e00 3063 return skb_get_hash(skb);
13c5c240
DB
3064}
3065
3066static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
3067 .func = bpf_get_hash_recalc,
3068 .gpl_only = false,
3069 .ret_type = RET_INTEGER,
3070 .arg1_type = ARG_PTR_TO_CTX,
3071};
3072
7a4b28c6
DB
3073BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
3074{
3075 /* After all direct packet write, this can be used once for
3076 * triggering a lazy recalc on next skb_get_hash() invocation.
3077 */
3078 skb_clear_hash(skb);
3079 return 0;
3080}
3081
3082static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
3083 .func = bpf_set_hash_invalid,
3084 .gpl_only = false,
3085 .ret_type = RET_INTEGER,
3086 .arg1_type = ARG_PTR_TO_CTX,
3087};
3088
ded092cd
DB
3089BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
3090{
3091 /* Set user specified hash as L4(+), so that it gets returned
3092 * on skb_get_hash() call unless BPF prog later on triggers a
3093 * skb_clear_hash().
3094 */
3095 __skb_set_sw_hash(skb, hash, true);
3096 return 0;
3097}
3098
3099static const struct bpf_func_proto bpf_set_hash_proto = {
3100 .func = bpf_set_hash,
3101 .gpl_only = false,
3102 .ret_type = RET_INTEGER,
3103 .arg1_type = ARG_PTR_TO_CTX,
3104 .arg2_type = ARG_ANYTHING,
3105};
3106
f3694e00
DB
3107BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
3108 u16, vlan_tci)
4e10df9a 3109{
db58ba45 3110 int ret;
4e10df9a
AS
3111
3112 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
3113 vlan_proto != htons(ETH_P_8021AD)))
3114 vlan_proto = htons(ETH_P_8021Q);
3115
8065694e 3116 bpf_push_mac_rcsum(skb);
db58ba45 3117 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
8065694e
DB
3118 bpf_pull_mac_rcsum(skb);
3119
6aaae2b6 3120 bpf_compute_data_pointers(skb);
db58ba45 3121 return ret;
4e10df9a
AS
3122}
3123
93731ef0 3124static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
4e10df9a
AS
3125 .func = bpf_skb_vlan_push,
3126 .gpl_only = false,
3127 .ret_type = RET_INTEGER,
3128 .arg1_type = ARG_PTR_TO_CTX,
3129 .arg2_type = ARG_ANYTHING,
3130 .arg3_type = ARG_ANYTHING,
3131};
3132
f3694e00 3133BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
4e10df9a 3134{
db58ba45 3135 int ret;
4e10df9a 3136
8065694e 3137 bpf_push_mac_rcsum(skb);
db58ba45 3138 ret = skb_vlan_pop(skb);
8065694e
DB
3139 bpf_pull_mac_rcsum(skb);
3140
6aaae2b6 3141 bpf_compute_data_pointers(skb);
db58ba45 3142 return ret;
4e10df9a
AS
3143}
3144
93731ef0 3145static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
4e10df9a
AS
3146 .func = bpf_skb_vlan_pop,
3147 .gpl_only = false,
3148 .ret_type = RET_INTEGER,
3149 .arg1_type = ARG_PTR_TO_CTX,
3150};
3151
6578171a
DB
3152static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
3153{
3154 /* Caller already did skb_cow() with len as headroom,
3155 * so no need to do it here.
3156 */
3157 skb_push(skb, len);
3158 memmove(skb->data, skb->data + len, off);
3159 memset(skb->data + off, 0, len);
3160
3161 /* No skb_postpush_rcsum(skb, skb->data + off, len)
3162 * needed here as it does not change the skb->csum
3163 * result for checksum complete when summing over
3164 * zeroed blocks.
3165 */
3166 return 0;
3167}
3168
3169static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
3170{
3171 /* skb_ensure_writable() is not needed here, as we're
3172 * already working on an uncloned skb.
3173 */
3174 if (unlikely(!pskb_may_pull(skb, off + len)))
3175 return -ENOMEM;
3176
3177 skb_postpull_rcsum(skb, skb->data + off, len);
3178 memmove(skb->data + len, skb->data, off);
3179 __skb_pull(skb, len);
3180
3181 return 0;
3182}
3183
3184static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
3185{
3186 bool trans_same = skb->transport_header == skb->network_header;
3187 int ret;
3188
3189 /* There's no need for __skb_push()/__skb_pull() pair to
3190 * get to the start of the mac header as we're guaranteed
3191 * to always start from here under eBPF.
3192 */
3193 ret = bpf_skb_generic_push(skb, off, len);
3194 if (likely(!ret)) {
3195 skb->mac_header -= len;
3196 skb->network_header -= len;
3197 if (trans_same)
3198 skb->transport_header = skb->network_header;
3199 }
3200
3201 return ret;
3202}
3203
3204static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
3205{
3206 bool trans_same = skb->transport_header == skb->network_header;
3207 int ret;
3208
3209 /* Same here, __skb_push()/__skb_pull() pair not needed. */
3210 ret = bpf_skb_generic_pop(skb, off, len);
3211 if (likely(!ret)) {
3212 skb->mac_header += len;
3213 skb->network_header += len;
3214 if (trans_same)
3215 skb->transport_header = skb->network_header;
3216 }
3217
3218 return ret;
3219}
3220
3221static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
3222{
3223 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 3224 u32 off = skb_mac_header_len(skb);
6578171a
DB
3225 int ret;
3226
3227 ret = skb_cow(skb, len_diff);
3228 if (unlikely(ret < 0))
3229 return ret;
3230
3231 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3232 if (unlikely(ret < 0))
3233 return ret;
3234
3235 if (skb_is_gso(skb)) {
d02f51cb
DA
3236 struct skb_shared_info *shinfo = skb_shinfo(skb);
3237
0bc919d3 3238 /* SKB_GSO_TCPV4 needs to be changed into SKB_GSO_TCPV6. */
d02f51cb
DA
3239 if (shinfo->gso_type & SKB_GSO_TCPV4) {
3240 shinfo->gso_type &= ~SKB_GSO_TCPV4;
3241 shinfo->gso_type |= SKB_GSO_TCPV6;
6578171a 3242 }
6578171a
DB
3243 }
3244
3245 skb->protocol = htons(ETH_P_IPV6);
3246 skb_clear_hash(skb);
3247
3248 return 0;
3249}
3250
3251static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
3252{
3253 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 3254 u32 off = skb_mac_header_len(skb);
6578171a
DB
3255 int ret;
3256
3257 ret = skb_unclone(skb, GFP_ATOMIC);
3258 if (unlikely(ret < 0))
3259 return ret;
3260
3261 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3262 if (unlikely(ret < 0))
3263 return ret;
3264
3265 if (skb_is_gso(skb)) {
d02f51cb
DA
3266 struct skb_shared_info *shinfo = skb_shinfo(skb);
3267
0bc919d3 3268 /* SKB_GSO_TCPV6 needs to be changed into SKB_GSO_TCPV4. */
d02f51cb
DA
3269 if (shinfo->gso_type & SKB_GSO_TCPV6) {
3270 shinfo->gso_type &= ~SKB_GSO_TCPV6;
3271 shinfo->gso_type |= SKB_GSO_TCPV4;
6578171a 3272 }
6578171a
DB
3273 }
3274
3275 skb->protocol = htons(ETH_P_IP);
3276 skb_clear_hash(skb);
3277
3278 return 0;
3279}
3280
3281static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
3282{
3283 __be16 from_proto = skb->protocol;
3284
3285 if (from_proto == htons(ETH_P_IP) &&
3286 to_proto == htons(ETH_P_IPV6))
3287 return bpf_skb_proto_4_to_6(skb);
3288
3289 if (from_proto == htons(ETH_P_IPV6) &&
3290 to_proto == htons(ETH_P_IP))
3291 return bpf_skb_proto_6_to_4(skb);
3292
3293 return -ENOTSUPP;
3294}
3295
f3694e00
DB
3296BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
3297 u64, flags)
6578171a 3298{
6578171a
DB
3299 int ret;
3300
3301 if (unlikely(flags))
3302 return -EINVAL;
3303
3304 /* General idea is that this helper does the basic groundwork
3305 * needed for changing the protocol, and eBPF program fills the
3306 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
3307 * and other helpers, rather than passing a raw buffer here.
3308 *
3309 * The rationale is to keep this minimal and without a need to
3310 * deal with raw packet data. F.e. even if we would pass buffers
3311 * here, the program still needs to call the bpf_lX_csum_replace()
3312 * helpers anyway. Plus, this way we keep also separation of
3313 * concerns, since f.e. bpf_skb_store_bytes() should only take
3314 * care of stores.
3315 *
3316 * Currently, additional options and extension header space are
3317 * not supported, but flags register is reserved so we can adapt
3318 * that. For offloads, we mark packet as dodgy, so that headers
3319 * need to be verified first.
3320 */
3321 ret = bpf_skb_proto_xlat(skb, proto);
6aaae2b6 3322 bpf_compute_data_pointers(skb);
6578171a
DB
3323 return ret;
3324}
3325
3326static const struct bpf_func_proto bpf_skb_change_proto_proto = {
3327 .func = bpf_skb_change_proto,
3328 .gpl_only = false,
3329 .ret_type = RET_INTEGER,
3330 .arg1_type = ARG_PTR_TO_CTX,
3331 .arg2_type = ARG_ANYTHING,
3332 .arg3_type = ARG_ANYTHING,
3333};
3334
f3694e00 3335BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
d2485c42 3336{
d2485c42 3337 /* We only allow a restricted subset to be changed for now. */
45c7fffa
DB
3338 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
3339 !skb_pkt_type_ok(pkt_type)))
d2485c42
DB
3340 return -EINVAL;
3341
3342 skb->pkt_type = pkt_type;
3343 return 0;
3344}
3345
3346static const struct bpf_func_proto bpf_skb_change_type_proto = {
3347 .func = bpf_skb_change_type,
3348 .gpl_only = false,
3349 .ret_type = RET_INTEGER,
3350 .arg1_type = ARG_PTR_TO_CTX,
3351 .arg2_type = ARG_ANYTHING,
3352};
3353
2be7e212
DB
3354static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
3355{
3356 switch (skb->protocol) {
3357 case htons(ETH_P_IP):
3358 return sizeof(struct iphdr);
3359 case htons(ETH_P_IPV6):
3360 return sizeof(struct ipv6hdr);
3361 default:
3362 return ~0U;
3363 }
3364}
3365
868d5235
WB
3366#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3367 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3368
3369#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
3370 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3371 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
58dfc900 3372 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
d01b59c9 3373 BPF_F_ADJ_ROOM_ENCAP_L2_ETH | \
58dfc900
AM
3374 BPF_F_ADJ_ROOM_ENCAP_L2( \
3375 BPF_ADJ_ROOM_ENCAP_L2_MASK))
2278f6cc
WB
3376
3377static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3378 u64 flags)
2be7e212 3379{
58dfc900 3380 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
868d5235 3381 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
62b31b42 3382 u16 mac_len = 0, inner_net = 0, inner_trans = 0;
868d5235 3383 unsigned int gso_type = SKB_GSO_DODGY;
2be7e212
DB
3384 int ret;
3385
2278f6cc
WB
3386 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3387 /* udp gso_size delineates datagrams, only allow if fixed */
3388 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3389 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3390 return -ENOTSUPP;
3391 }
d02f51cb 3392
908adce6 3393 ret = skb_cow_head(skb, len_diff);
2be7e212
DB
3394 if (unlikely(ret < 0))
3395 return ret;
3396
868d5235
WB
3397 if (encap) {
3398 if (skb->protocol != htons(ETH_P_IP) &&
3399 skb->protocol != htons(ETH_P_IPV6))
3400 return -ENOTSUPP;
3401
3402 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3403 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3404 return -EINVAL;
3405
3406 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3407 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3408 return -EINVAL;
3409
d01b59c9
XH
3410 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH &&
3411 inner_mac_len < ETH_HLEN)
3412 return -EINVAL;
3413
868d5235
WB
3414 if (skb->encapsulation)
3415 return -EALREADY;
3416
3417 mac_len = skb->network_header - skb->mac_header;
3418 inner_net = skb->network_header;
58dfc900
AM
3419 if (inner_mac_len > len_diff)
3420 return -EINVAL;
868d5235
WB
3421 inner_trans = skb->transport_header;
3422 }
3423
2be7e212
DB
3424 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3425 if (unlikely(ret < 0))
3426 return ret;
3427
868d5235 3428 if (encap) {
58dfc900 3429 skb->inner_mac_header = inner_net - inner_mac_len;
868d5235
WB
3430 skb->inner_network_header = inner_net;
3431 skb->inner_transport_header = inner_trans;
d01b59c9
XH
3432
3433 if (flags & BPF_F_ADJ_ROOM_ENCAP_L2_ETH)
3434 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
3435 else
3436 skb_set_inner_protocol(skb, skb->protocol);
868d5235
WB
3437
3438 skb->encapsulation = 1;
3439 skb_set_network_header(skb, mac_len);
3440
3441 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3442 gso_type |= SKB_GSO_UDP_TUNNEL;
3443 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3444 gso_type |= SKB_GSO_GRE;
3445 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3446 gso_type |= SKB_GSO_IPXIP6;
58dfc900 3447 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
868d5235
WB
3448 gso_type |= SKB_GSO_IPXIP4;
3449
3450 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3451 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3452 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3453 sizeof(struct ipv6hdr) :
3454 sizeof(struct iphdr);
3455
3456 skb_set_transport_header(skb, mac_len + nh_len);
3457 }
1b00e0df
WB
3458
3459 /* Match skb->protocol to new outer l3 protocol */
3460 if (skb->protocol == htons(ETH_P_IP) &&
3461 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3462 skb->protocol = htons(ETH_P_IPV6);
3463 else if (skb->protocol == htons(ETH_P_IPV6) &&
3464 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3465 skb->protocol = htons(ETH_P_IP);
868d5235
WB
3466 }
3467
2be7e212 3468 if (skb_is_gso(skb)) {
d02f51cb
DA
3469 struct skb_shared_info *shinfo = skb_shinfo(skb);
3470
2be7e212 3471 /* Due to header grow, MSS needs to be downgraded. */
2278f6cc
WB
3472 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3473 skb_decrease_gso_size(shinfo, len_diff);
3474
2be7e212 3475 /* Header must be checked, and gso_segs recomputed. */
868d5235 3476 shinfo->gso_type |= gso_type;
d02f51cb 3477 shinfo->gso_segs = 0;
2be7e212
DB
3478 }
3479
3480 return 0;
3481}
3482
2278f6cc
WB
3483static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3484 u64 flags)
2be7e212 3485{
2be7e212
DB
3486 int ret;
3487
836e66c2
DB
3488 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_FIXED_GSO |
3489 BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
43537b8e
WB
3490 return -EINVAL;
3491
2278f6cc
WB
3492 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3493 /* udp gso_size delineates datagrams, only allow if fixed */
3494 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3495 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3496 return -ENOTSUPP;
3497 }
d02f51cb 3498
2be7e212
DB
3499 ret = skb_unclone(skb, GFP_ATOMIC);
3500 if (unlikely(ret < 0))
3501 return ret;
3502
3503 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3504 if (unlikely(ret < 0))
3505 return ret;
3506
3507 if (skb_is_gso(skb)) {
d02f51cb
DA
3508 struct skb_shared_info *shinfo = skb_shinfo(skb);
3509
2be7e212 3510 /* Due to header shrink, MSS can be upgraded. */
2278f6cc
WB
3511 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3512 skb_increase_gso_size(shinfo, len_diff);
3513
2be7e212 3514 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
3515 shinfo->gso_type |= SKB_GSO_DODGY;
3516 shinfo->gso_segs = 0;
2be7e212
DB
3517 }
3518
3519 return 0;
3520}
3521
6306c118 3522#define BPF_SKB_MAX_LEN SKB_MAX_ALLOC
2be7e212 3523
18ebe16d
JF
3524BPF_CALL_4(sk_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3525 u32, mode, u64, flags)
3526{
3527 u32 len_diff_abs = abs(len_diff);
3528 bool shrink = len_diff < 0;
3529 int ret = 0;
3530
3531 if (unlikely(flags || mode))
3532 return -EINVAL;
3533 if (unlikely(len_diff_abs > 0xfffU))
3534 return -EFAULT;
3535
3536 if (!shrink) {
3537 ret = skb_cow(skb, len_diff);
3538 if (unlikely(ret < 0))
3539 return ret;
3540 __skb_push(skb, len_diff_abs);
3541 memset(skb->data, 0, len_diff_abs);
3542 } else {
3543 if (unlikely(!pskb_may_pull(skb, len_diff_abs)))
3544 return -ENOMEM;
3545 __skb_pull(skb, len_diff_abs);
3546 }
18ebe16d
JF
3547 if (tls_sw_has_ctx_rx(skb->sk)) {
3548 struct strp_msg *rxm = strp_msg(skb);
3549
3550 rxm->full_len += len_diff;
3551 }
3552 return ret;
3553}
3554
3555static const struct bpf_func_proto sk_skb_adjust_room_proto = {
3556 .func = sk_skb_adjust_room,
3557 .gpl_only = false,
3558 .ret_type = RET_INTEGER,
3559 .arg1_type = ARG_PTR_TO_CTX,
3560 .arg2_type = ARG_ANYTHING,
3561 .arg3_type = ARG_ANYTHING,
3562 .arg4_type = ARG_ANYTHING,
3563};
3564
14aa3192
WB
3565BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3566 u32, mode, u64, flags)
2be7e212 3567{
2be7e212
DB
3568 u32 len_cur, len_diff_abs = abs(len_diff);
3569 u32 len_min = bpf_skb_net_base_len(skb);
6306c118 3570 u32 len_max = BPF_SKB_MAX_LEN;
2be7e212
DB
3571 __be16 proto = skb->protocol;
3572 bool shrink = len_diff < 0;
14aa3192 3573 u32 off;
2be7e212
DB
3574 int ret;
3575
836e66c2
DB
3576 if (unlikely(flags & ~(BPF_F_ADJ_ROOM_MASK |
3577 BPF_F_ADJ_ROOM_NO_CSUM_RESET)))
14aa3192 3578 return -EINVAL;
2be7e212
DB
3579 if (unlikely(len_diff_abs > 0xfffU))
3580 return -EFAULT;
3581 if (unlikely(proto != htons(ETH_P_IP) &&
3582 proto != htons(ETH_P_IPV6)))
3583 return -ENOTSUPP;
3584
14aa3192
WB
3585 off = skb_mac_header_len(skb);
3586 switch (mode) {
3587 case BPF_ADJ_ROOM_NET:
3588 off += bpf_skb_net_base_len(skb);
3589 break;
3590 case BPF_ADJ_ROOM_MAC:
3591 break;
3592 default:
3593 return -ENOTSUPP;
3594 }
3595
2be7e212 3596 len_cur = skb->len - skb_network_offset(skb);
2be7e212
DB
3597 if ((shrink && (len_diff_abs >= len_cur ||
3598 len_cur - len_diff_abs < len_min)) ||
3599 (!shrink && (skb->len + len_diff_abs > len_max &&
3600 !skb_is_gso(skb))))
3601 return -ENOTSUPP;
3602
2278f6cc
WB
3603 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3604 bpf_skb_net_grow(skb, off, len_diff_abs, flags);
836e66c2
DB
3605 if (!ret && !(flags & BPF_F_ADJ_ROOM_NO_CSUM_RESET))
3606 __skb_reset_checksum_unnecessary(skb);
2be7e212 3607
6aaae2b6 3608 bpf_compute_data_pointers(skb);
e4a6a342 3609 return ret;
2be7e212
DB
3610}
3611
2be7e212
DB
3612static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3613 .func = bpf_skb_adjust_room,
3614 .gpl_only = false,
3615 .ret_type = RET_INTEGER,
3616 .arg1_type = ARG_PTR_TO_CTX,
3617 .arg2_type = ARG_ANYTHING,
3618 .arg3_type = ARG_ANYTHING,
3619 .arg4_type = ARG_ANYTHING,
3620};
3621
5293efe6
DB
3622static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3623{
3624 u32 min_len = skb_network_offset(skb);
3625
3626 if (skb_transport_header_was_set(skb))
3627 min_len = skb_transport_offset(skb);
3628 if (skb->ip_summed == CHECKSUM_PARTIAL)
3629 min_len = skb_checksum_start_offset(skb) +
3630 skb->csum_offset + sizeof(__sum16);
3631 return min_len;
3632}
3633
5293efe6
DB
3634static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3635{
3636 unsigned int old_len = skb->len;
3637 int ret;
3638
3639 ret = __skb_grow_rcsum(skb, new_len);
3640 if (!ret)
3641 memset(skb->data + old_len, 0, new_len - old_len);
3642 return ret;
3643}
3644
3645static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3646{
3647 return __skb_trim_rcsum(skb, new_len);
3648}
3649
0ea488ff
JF
3650static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3651 u64 flags)
5293efe6 3652{
6306c118 3653 u32 max_len = BPF_SKB_MAX_LEN;
5293efe6 3654 u32 min_len = __bpf_skb_min_len(skb);
5293efe6
DB
3655 int ret;
3656
3657 if (unlikely(flags || new_len > max_len || new_len < min_len))
3658 return -EINVAL;
3659 if (skb->encapsulation)
3660 return -ENOTSUPP;
3661
3662 /* The basic idea of this helper is that it's performing the
3663 * needed work to either grow or trim an skb, and eBPF program
3664 * rewrites the rest via helpers like bpf_skb_store_bytes(),
3665 * bpf_lX_csum_replace() and others rather than passing a raw
3666 * buffer here. This one is a slow path helper and intended
3667 * for replies with control messages.
3668 *
3669 * Like in bpf_skb_change_proto(), we want to keep this rather
3670 * minimal and without protocol specifics so that we are able
3671 * to separate concerns as in bpf_skb_store_bytes() should only
3672 * be the one responsible for writing buffers.
3673 *
3674 * It's really expected to be a slow path operation here for
3675 * control message replies, so we're implicitly linearizing,
3676 * uncloning and drop offloads from the skb by this.
3677 */
3678 ret = __bpf_try_make_writable(skb, skb->len);
3679 if (!ret) {
3680 if (new_len > skb->len)
3681 ret = bpf_skb_grow_rcsum(skb, new_len);
3682 else if (new_len < skb->len)
3683 ret = bpf_skb_trim_rcsum(skb, new_len);
3684 if (!ret && skb_is_gso(skb))
3685 skb_gso_reset(skb);
3686 }
0ea488ff
JF
3687 return ret;
3688}
3689
3690BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3691 u64, flags)
3692{
3693 int ret = __bpf_skb_change_tail(skb, new_len, flags);
5293efe6 3694
6aaae2b6 3695 bpf_compute_data_pointers(skb);
5293efe6
DB
3696 return ret;
3697}
3698
3699static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3700 .func = bpf_skb_change_tail,
3701 .gpl_only = false,
3702 .ret_type = RET_INTEGER,
3703 .arg1_type = ARG_PTR_TO_CTX,
3704 .arg2_type = ARG_ANYTHING,
3705 .arg3_type = ARG_ANYTHING,
3706};
3707
0ea488ff 3708BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3a0af8fd 3709 u64, flags)
0ea488ff 3710{
16137b09 3711 return __bpf_skb_change_tail(skb, new_len, flags);
0ea488ff
JF
3712}
3713
3714static const struct bpf_func_proto sk_skb_change_tail_proto = {
3715 .func = sk_skb_change_tail,
3716 .gpl_only = false,
3717 .ret_type = RET_INTEGER,
3718 .arg1_type = ARG_PTR_TO_CTX,
3719 .arg2_type = ARG_ANYTHING,
3720 .arg3_type = ARG_ANYTHING,
3721};
3722
3723static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3724 u64 flags)
3a0af8fd 3725{
6306c118 3726 u32 max_len = BPF_SKB_MAX_LEN;
3a0af8fd
TG
3727 u32 new_len = skb->len + head_room;
3728 int ret;
3729
3730 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3731 new_len < skb->len))
3732 return -EINVAL;
3733
3734 ret = skb_cow(skb, head_room);
3735 if (likely(!ret)) {
3736 /* Idea for this helper is that we currently only
3737 * allow to expand on mac header. This means that
3738 * skb->protocol network header, etc, stay as is.
3739 * Compared to bpf_skb_change_tail(), we're more
3740 * flexible due to not needing to linearize or
3741 * reset GSO. Intention for this helper is to be
3742 * used by an L3 skb that needs to push mac header
3743 * for redirection into L2 device.
3744 */
3745 __skb_push(skb, head_room);
3746 memset(skb->data, 0, head_room);
3747 skb_reset_mac_header(skb);
84316ca4 3748 skb_reset_mac_len(skb);
3a0af8fd
TG
3749 }
3750
0ea488ff
JF
3751 return ret;
3752}
3753
3754BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3755 u64, flags)
3756{
3757 int ret = __bpf_skb_change_head(skb, head_room, flags);
3758
6aaae2b6 3759 bpf_compute_data_pointers(skb);
0ea488ff 3760 return ret;
3a0af8fd
TG
3761}
3762
3763static const struct bpf_func_proto bpf_skb_change_head_proto = {
3764 .func = bpf_skb_change_head,
3765 .gpl_only = false,
3766 .ret_type = RET_INTEGER,
3767 .arg1_type = ARG_PTR_TO_CTX,
3768 .arg2_type = ARG_ANYTHING,
3769 .arg3_type = ARG_ANYTHING,
3770};
3771
0ea488ff
JF
3772BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3773 u64, flags)
3774{
16137b09 3775 return __bpf_skb_change_head(skb, head_room, flags);
0ea488ff
JF
3776}
3777
3778static const struct bpf_func_proto sk_skb_change_head_proto = {
3779 .func = sk_skb_change_head,
3780 .gpl_only = false,
3781 .ret_type = RET_INTEGER,
3782 .arg1_type = ARG_PTR_TO_CTX,
3783 .arg2_type = ARG_ANYTHING,
3784 .arg3_type = ARG_ANYTHING,
3785};
de8f3a83
DB
3786static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3787{
3788 return xdp_data_meta_unsupported(xdp) ? 0 :
3789 xdp->data - xdp->data_meta;
3790}
3791
17bedab2
MKL
3792BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3793{
6dfb970d 3794 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83 3795 unsigned long metalen = xdp_get_metalen(xdp);
97e19cce 3796 void *data_start = xdp_frame_end + metalen;
17bedab2
MKL
3797 void *data = xdp->data + offset;
3798
de8f3a83 3799 if (unlikely(data < data_start ||
17bedab2
MKL
3800 data > xdp->data_end - ETH_HLEN))
3801 return -EINVAL;
3802
de8f3a83
DB
3803 if (metalen)
3804 memmove(xdp->data_meta + offset,
3805 xdp->data_meta, metalen);
3806 xdp->data_meta += offset;
17bedab2
MKL
3807 xdp->data = data;
3808
3809 return 0;
3810}
3811
3812static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3813 .func = bpf_xdp_adjust_head,
3814 .gpl_only = false,
3815 .ret_type = RET_INTEGER,
3816 .arg1_type = ARG_PTR_TO_CTX,
3817 .arg2_type = ARG_ANYTHING,
3818};
3819
b32cc5b9
NS
3820BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3821{
c8741e2b 3822 void *data_hard_end = xdp_data_hard_end(xdp); /* use xdp->frame_sz */
b32cc5b9
NS
3823 void *data_end = xdp->data_end + offset;
3824
c8741e2b
JDB
3825 /* Notice that xdp_data_hard_end have reserved some tailroom */
3826 if (unlikely(data_end > data_hard_end))
b32cc5b9
NS
3827 return -EINVAL;
3828
c8741e2b
JDB
3829 /* ALL drivers MUST init xdp->frame_sz, chicken check below */
3830 if (unlikely(xdp->frame_sz > PAGE_SIZE)) {
3831 WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz);
3832 return -EINVAL;
3833 }
3834
b32cc5b9
NS
3835 if (unlikely(data_end < xdp->data + ETH_HLEN))
3836 return -EINVAL;
3837
ddb47d51
JDB
3838 /* Clear memory area on grow, can contain uninit kernel memory */
3839 if (offset > 0)
3840 memset(xdp->data_end, 0, offset);
3841
b32cc5b9
NS
3842 xdp->data_end = data_end;
3843
3844 return 0;
3845}
3846
3847static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3848 .func = bpf_xdp_adjust_tail,
3849 .gpl_only = false,
3850 .ret_type = RET_INTEGER,
3851 .arg1_type = ARG_PTR_TO_CTX,
3852 .arg2_type = ARG_ANYTHING,
3853};
3854
de8f3a83
DB
3855BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3856{
97e19cce 3857 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83
DB
3858 void *meta = xdp->data_meta + offset;
3859 unsigned long metalen = xdp->data - meta;
3860
3861 if (xdp_data_meta_unsupported(xdp))
3862 return -ENOTSUPP;
97e19cce 3863 if (unlikely(meta < xdp_frame_end ||
de8f3a83
DB
3864 meta > xdp->data))
3865 return -EINVAL;
7445cf31 3866 if (unlikely(xdp_metalen_invalid(metalen)))
de8f3a83
DB
3867 return -EACCES;
3868
3869 xdp->data_meta = meta;
3870
3871 return 0;
3872}
3873
3874static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3875 .func = bpf_xdp_adjust_meta,
3876 .gpl_only = false,
3877 .ret_type = RET_INTEGER,
3878 .arg1_type = ARG_PTR_TO_CTX,
3879 .arg2_type = ARG_ANYTHING,
3880};
3881
782347b6
THJ
3882/* XDP_REDIRECT works by a three-step process, implemented in the functions
3883 * below:
3884 *
3885 * 1. The bpf_redirect() and bpf_redirect_map() helpers will lookup the target
3886 * of the redirect and store it (along with some other metadata) in a per-CPU
3887 * struct bpf_redirect_info.
3888 *
3889 * 2. When the program returns the XDP_REDIRECT return code, the driver will
3890 * call xdp_do_redirect() which will use the information in struct
3891 * bpf_redirect_info to actually enqueue the frame into a map type-specific
3892 * bulk queue structure.
3893 *
3894 * 3. Before exiting its NAPI poll loop, the driver will call xdp_do_flush(),
3895 * which will flush all the different bulk queues, thus completing the
3896 * redirect.
3897 *
3898 * Pointers to the map entries will be kept around for this whole sequence of
3899 * steps, protected by RCU. However, there is no top-level rcu_read_lock() in
3900 * the core code; instead, the RCU protection relies on everything happening
3901 * inside a single NAPI poll sequence, which means it's between a pair of calls
3902 * to local_bh_disable()/local_bh_enable().
3903 *
3904 * The map entries are marked as __rcu and the map code makes sure to
3905 * dereference those pointers with rcu_dereference_check() in a way that works
3906 * for both sections that to hold an rcu_read_lock() and sections that are
3907 * called from NAPI without a separate rcu_read_lock(). The code below does not
3908 * use RCU annotations, but relies on those in the map code.
3909 */
1d233886 3910void xdp_do_flush(void)
11393cc9 3911{
1d233886 3912 __dev_flush();
332f22a6
BT
3913 __cpu_map_flush();
3914 __xsk_map_flush();
11393cc9 3915}
1d233886 3916EXPORT_SYMBOL_GPL(xdp_do_flush);
11393cc9 3917
e624d4ed
HL
3918void bpf_clear_redirect_map(struct bpf_map *map)
3919{
3920 struct bpf_redirect_info *ri;
3921 int cpu;
3922
3923 for_each_possible_cpu(cpu) {
3924 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3925 /* Avoid polluting remote cacheline due to writes if
3926 * not needed. Once we pass this test, we need the
3927 * cmpxchg() to make sure it hasn't been changed in
3928 * the meantime by remote CPU.
3929 */
3930 if (unlikely(READ_ONCE(ri->map) == map))
3931 cmpxchg(&ri->map, map, NULL);
3932 }
3933}
3934
879af96f
JM
3935DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key);
3936EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key);
3937
3938u32 xdp_master_redirect(struct xdp_buff *xdp)
3939{
3940 struct net_device *master, *slave;
3941 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3942
3943 master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev);
3944 slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp);
3945 if (slave && slave != xdp->rxq->dev) {
3946 /* The target device is different from the receiving device, so
3947 * redirect it to the new device.
3948 * Using XDP_REDIRECT gets the correct behaviour from XDP enabled
3949 * drivers to unmap the packet from their rx ring.
3950 */
3951 ri->tgt_index = slave->ifindex;
3952 ri->map_id = INT_MAX;
3953 ri->map_type = BPF_MAP_TYPE_UNSPEC;
3954 return XDP_REDIRECT;
3955 }
3956 return XDP_TX;
3957}
3958EXPORT_SYMBOL_GPL(xdp_master_redirect);
3959
1d233886
THJ
3960int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3961 struct bpf_prog *xdp_prog)
97f91a7c 3962{
1d233886 3963 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
ee75aef2 3964 enum bpf_map_type map_type = ri->map_type;
43e74c02 3965 void *fwd = ri->tgt_value;
ee75aef2 3966 u32 map_id = ri->map_id;
e624d4ed 3967 struct bpf_map *map;
4c03bdd7 3968 int err;
97f91a7c 3969
ee75aef2
BT
3970 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
3971 ri->map_type = BPF_MAP_TYPE_UNSPEC;
97f91a7c 3972
ee75aef2
BT
3973 switch (map_type) {
3974 case BPF_MAP_TYPE_DEVMAP:
3975 fallthrough;
3976 case BPF_MAP_TYPE_DEVMAP_HASH:
e624d4ed
HL
3977 map = READ_ONCE(ri->map);
3978 if (unlikely(map)) {
3979 WRITE_ONCE(ri->map, NULL);
3980 err = dev_map_enqueue_multi(xdp, dev, map,
3981 ri->flags & BPF_F_EXCLUDE_INGRESS);
3982 } else {
3983 err = dev_map_enqueue(fwd, xdp, dev);
3984 }
ee75aef2
BT
3985 break;
3986 case BPF_MAP_TYPE_CPUMAP:
3987 err = cpu_map_enqueue(fwd, xdp, dev);
3988 break;
3989 case BPF_MAP_TYPE_XSKMAP:
3990 err = __xsk_map_redirect(fwd, xdp);
3991 break;
3992 case BPF_MAP_TYPE_UNSPEC:
3993 if (map_id == INT_MAX) {
3994 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
3995 if (unlikely(!fwd)) {
3996 err = -EINVAL;
3997 break;
3998 }
3999 err = dev_xdp_enqueue(fwd, xdp, dev);
4000 break;
1d233886 4001 }
ee75aef2
BT
4002 fallthrough;
4003 default:
4004 err = -EBADRQC;
1d233886
THJ
4005 }
4006
f5836ca5
JDB
4007 if (unlikely(err))
4008 goto err;
4009
ee75aef2 4010 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
f5836ca5
JDB
4011 return 0;
4012err:
ee75aef2 4013 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
97f91a7c
JF
4014 return err;
4015}
814abfab
JF
4016EXPORT_SYMBOL_GPL(xdp_do_redirect);
4017
c060bc61
XS
4018static int xdp_do_generic_redirect_map(struct net_device *dev,
4019 struct sk_buff *skb,
02671e23 4020 struct xdp_buff *xdp,
f6069b9a 4021 struct bpf_prog *xdp_prog,
ee75aef2
BT
4022 void *fwd,
4023 enum bpf_map_type map_type, u32 map_id)
6103aa96 4024{
0b19cc0a 4025 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
e624d4ed 4026 struct bpf_map *map;
ee75aef2 4027 int err;
6d5fc195 4028
ee75aef2
BT
4029 switch (map_type) {
4030 case BPF_MAP_TYPE_DEVMAP:
4031 fallthrough;
4032 case BPF_MAP_TYPE_DEVMAP_HASH:
e624d4ed
HL
4033 map = READ_ONCE(ri->map);
4034 if (unlikely(map)) {
4035 WRITE_ONCE(ri->map, NULL);
4036 err = dev_map_redirect_multi(dev, skb, xdp_prog, map,
4037 ri->flags & BPF_F_EXCLUDE_INGRESS);
4038 } else {
4039 err = dev_map_generic_redirect(fwd, skb, xdp_prog);
4040 }
6d5fc195 4041 if (unlikely(err))
9c270af3 4042 goto err;
ee75aef2
BT
4043 break;
4044 case BPF_MAP_TYPE_XSKMAP:
4045 err = xsk_generic_rcv(fwd, xdp);
02671e23
BT
4046 if (err)
4047 goto err;
4048 consume_skb(skb);
ee75aef2 4049 break;
11941f8a
KKD
4050 case BPF_MAP_TYPE_CPUMAP:
4051 err = cpu_map_generic_redirect(fwd, skb);
4052 if (unlikely(err))
4053 goto err;
4054 break;
ee75aef2 4055 default:
9c270af3 4056 err = -EBADRQC;
f5836ca5 4057 goto err;
2facaad6 4058 }
6103aa96 4059
ee75aef2 4060 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
9c270af3
JDB
4061 return 0;
4062err:
ee75aef2 4063 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
9c270af3
JDB
4064 return err;
4065}
4066
4067int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
02671e23 4068 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
9c270af3 4069{
0b19cc0a 4070 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
ee75aef2
BT
4071 enum bpf_map_type map_type = ri->map_type;
4072 void *fwd = ri->tgt_value;
4073 u32 map_id = ri->map_id;
4074 int err;
2facaad6 4075
ee75aef2
BT
4076 ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
4077 ri->map_type = BPF_MAP_TYPE_UNSPEC;
9c270af3 4078
ee75aef2
BT
4079 if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
4080 fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
4081 if (unlikely(!fwd)) {
4082 err = -EINVAL;
4083 goto err;
4084 }
4085
4086 err = xdp_ok_fwd_dev(fwd, skb->len);
4087 if (unlikely(err))
4088 goto err;
4089
4090 skb->dev = fwd;
4091 _trace_xdp_redirect(dev, xdp_prog, ri->tgt_index);
4092 generic_xdp_tx(skb, xdp_prog);
4093 return 0;
4094 }
4095
4096 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
f5836ca5 4097err:
ee75aef2 4098 _trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
2facaad6 4099 return err;
6103aa96 4100}
6103aa96 4101
814abfab
JF
4102BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
4103{
0b19cc0a 4104 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
814abfab
JF
4105
4106 if (unlikely(flags))
4107 return XDP_ABORTED;
4108
ee75aef2
BT
4109 /* NB! Map type UNSPEC and map_id == INT_MAX (never generated
4110 * by map_idr) is used for ifindex based XDP redirect.
4111 */
4b55cf29 4112 ri->tgt_index = ifindex;
ee75aef2
BT
4113 ri->map_id = INT_MAX;
4114 ri->map_type = BPF_MAP_TYPE_UNSPEC;
e4a8e817 4115
814abfab
JF
4116 return XDP_REDIRECT;
4117}
4118
4119static const struct bpf_func_proto bpf_xdp_redirect_proto = {
4120 .func = bpf_xdp_redirect,
4121 .gpl_only = false,
4122 .ret_type = RET_INTEGER,
4123 .arg1_type = ARG_ANYTHING,
4124 .arg2_type = ARG_ANYTHING,
4125};
4126
f6069b9a
DB
4127BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
4128 u64, flags)
e4a8e817 4129{
e6a4750f 4130 return map->ops->map_redirect(map, ifindex, flags);
e4a8e817
DB
4131}
4132
4133static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
4134 .func = bpf_xdp_redirect_map,
4135 .gpl_only = false,
4136 .ret_type = RET_INTEGER,
4137 .arg1_type = ARG_CONST_MAP_PTR,
4138 .arg2_type = ARG_ANYTHING,
4139 .arg3_type = ARG_ANYTHING,
4140};
4141
555c8a86 4142static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
aa7145c1 4143 unsigned long off, unsigned long len)
555c8a86 4144{
aa7145c1 4145 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
555c8a86
DB
4146
4147 if (unlikely(!ptr))
4148 return len;
4149 if (ptr != dst_buff)
4150 memcpy(dst_buff, ptr, len);
4151
4152 return 0;
4153}
4154
f3694e00
DB
4155BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
4156 u64, flags, void *, meta, u64, meta_size)
555c8a86 4157{
555c8a86 4158 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
555c8a86
DB
4159
4160 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4161 return -EINVAL;
a7658e1a 4162 if (unlikely(!skb || skb_size > skb->len))
555c8a86
DB
4163 return -EFAULT;
4164
4165 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
4166 bpf_skb_copy);
4167}
4168
4169static const struct bpf_func_proto bpf_skb_event_output_proto = {
4170 .func = bpf_skb_event_output,
4171 .gpl_only = true,
4172 .ret_type = RET_INTEGER,
4173 .arg1_type = ARG_PTR_TO_CTX,
4174 .arg2_type = ARG_CONST_MAP_PTR,
4175 .arg3_type = ARG_ANYTHING,
216e3cd2 4176 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1728a4f2 4177 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
555c8a86
DB
4178};
4179
9436ef6e 4180BTF_ID_LIST_SINGLE(bpf_skb_output_btf_ids, struct, sk_buff)
c9a0f3b8 4181
a7658e1a
AS
4182const struct bpf_func_proto bpf_skb_output_proto = {
4183 .func = bpf_skb_event_output,
4184 .gpl_only = true,
4185 .ret_type = RET_INTEGER,
4186 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 4187 .arg1_btf_id = &bpf_skb_output_btf_ids[0],
a7658e1a
AS
4188 .arg2_type = ARG_CONST_MAP_PTR,
4189 .arg3_type = ARG_ANYTHING,
216e3cd2 4190 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
a7658e1a 4191 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
a7658e1a
AS
4192};
4193
c6c33454
DB
4194static unsigned short bpf_tunnel_key_af(u64 flags)
4195{
4196 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
4197}
4198
f3694e00
DB
4199BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
4200 u32, size, u64, flags)
d3aa45ce 4201{
c6c33454
DB
4202 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
4203 u8 compat[sizeof(struct bpf_tunnel_key)];
074f528e
DB
4204 void *to_orig = to;
4205 int err;
d3aa45ce 4206
074f528e
DB
4207 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
4208 err = -EINVAL;
4209 goto err_clear;
4210 }
4211 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
4212 err = -EPROTO;
4213 goto err_clear;
4214 }
c6c33454 4215 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
074f528e 4216 err = -EINVAL;
c6c33454 4217 switch (size) {
4018ab18 4218 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 4219 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4018ab18 4220 goto set_compat;
c6c33454
DB
4221 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
4222 /* Fixup deprecated structure layouts here, so we have
4223 * a common path later on.
4224 */
4225 if (ip_tunnel_info_af(info) != AF_INET)
074f528e 4226 goto err_clear;
4018ab18 4227set_compat:
c6c33454
DB
4228 to = (struct bpf_tunnel_key *)compat;
4229 break;
4230 default:
074f528e 4231 goto err_clear;
c6c33454
DB
4232 }
4233 }
d3aa45ce
AS
4234
4235 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
4236 to->tunnel_tos = info->key.tos;
4237 to->tunnel_ttl = info->key.ttl;
1fbc2e0c 4238 to->tunnel_ext = 0;
c6c33454 4239
4018ab18 4240 if (flags & BPF_F_TUNINFO_IPV6) {
c6c33454
DB
4241 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
4242 sizeof(to->remote_ipv6));
4018ab18
DB
4243 to->tunnel_label = be32_to_cpu(info->key.label);
4244 } else {
c6c33454 4245 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
1fbc2e0c
DB
4246 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4247 to->tunnel_label = 0;
4018ab18 4248 }
c6c33454
DB
4249
4250 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
074f528e 4251 memcpy(to_orig, to, size);
d3aa45ce
AS
4252
4253 return 0;
074f528e
DB
4254err_clear:
4255 memset(to_orig, 0, size);
4256 return err;
d3aa45ce
AS
4257}
4258
577c50aa 4259static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
d3aa45ce
AS
4260 .func = bpf_skb_get_tunnel_key,
4261 .gpl_only = false,
4262 .ret_type = RET_INTEGER,
4263 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
4264 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
4265 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
4266 .arg4_type = ARG_ANYTHING,
4267};
4268
f3694e00 4269BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
14ca0751 4270{
14ca0751 4271 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
074f528e 4272 int err;
14ca0751
DB
4273
4274 if (unlikely(!info ||
074f528e
DB
4275 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
4276 err = -ENOENT;
4277 goto err_clear;
4278 }
4279 if (unlikely(size < info->options_len)) {
4280 err = -ENOMEM;
4281 goto err_clear;
4282 }
14ca0751
DB
4283
4284 ip_tunnel_info_opts_get(to, info);
074f528e
DB
4285 if (size > info->options_len)
4286 memset(to + info->options_len, 0, size - info->options_len);
14ca0751
DB
4287
4288 return info->options_len;
074f528e
DB
4289err_clear:
4290 memset(to, 0, size);
4291 return err;
14ca0751
DB
4292}
4293
4294static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
4295 .func = bpf_skb_get_tunnel_opt,
4296 .gpl_only = false,
4297 .ret_type = RET_INTEGER,
4298 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
4299 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
4300 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
4301};
4302
d3aa45ce
AS
4303static struct metadata_dst __percpu *md_dst;
4304
f3694e00
DB
4305BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
4306 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
d3aa45ce 4307{
d3aa45ce 4308 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 4309 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
4310 struct ip_tunnel_info *info;
4311
22080870 4312 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
77a5196a 4313 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
d3aa45ce 4314 return -EINVAL;
c6c33454
DB
4315 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
4316 switch (size) {
4018ab18 4317 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 4318 case offsetof(struct bpf_tunnel_key, tunnel_ext):
c6c33454
DB
4319 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
4320 /* Fixup deprecated structure layouts here, so we have
4321 * a common path later on.
4322 */
4323 memcpy(compat, from, size);
4324 memset(compat + size, 0, sizeof(compat) - size);
f3694e00 4325 from = (const struct bpf_tunnel_key *) compat;
c6c33454
DB
4326 break;
4327 default:
4328 return -EINVAL;
4329 }
4330 }
c0e760c9
DB
4331 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
4332 from->tunnel_ext))
4018ab18 4333 return -EINVAL;
d3aa45ce
AS
4334
4335 skb_dst_drop(skb);
4336 dst_hold((struct dst_entry *) md);
4337 skb_dst_set(skb, (struct dst_entry *) md);
4338
4339 info = &md->u.tun_info;
5540fbf4 4340 memset(info, 0, sizeof(*info));
d3aa45ce 4341 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 4342
db3c6139 4343 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
22080870
DB
4344 if (flags & BPF_F_DONT_FRAGMENT)
4345 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
792f3dd6
WT
4346 if (flags & BPF_F_ZERO_CSUM_TX)
4347 info->key.tun_flags &= ~TUNNEL_CSUM;
77a5196a
WT
4348 if (flags & BPF_F_SEQ_NUMBER)
4349 info->key.tun_flags |= TUNNEL_SEQ;
22080870 4350
d3aa45ce 4351 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
4352 info->key.tos = from->tunnel_tos;
4353 info->key.ttl = from->tunnel_ttl;
4354
4355 if (flags & BPF_F_TUNINFO_IPV6) {
4356 info->mode |= IP_TUNNEL_INFO_IPV6;
4357 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
4358 sizeof(from->remote_ipv6));
4018ab18
DB
4359 info->key.label = cpu_to_be32(from->tunnel_label) &
4360 IPV6_FLOWLABEL_MASK;
c6c33454
DB
4361 } else {
4362 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
4363 }
d3aa45ce
AS
4364
4365 return 0;
4366}
4367
577c50aa 4368static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
d3aa45ce
AS
4369 .func = bpf_skb_set_tunnel_key,
4370 .gpl_only = false,
4371 .ret_type = RET_INTEGER,
4372 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 4373 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 4374 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
4375 .arg4_type = ARG_ANYTHING,
4376};
4377
f3694e00
DB
4378BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4379 const u8 *, from, u32, size)
14ca0751 4380{
14ca0751
DB
4381 struct ip_tunnel_info *info = skb_tunnel_info(skb);
4382 const struct metadata_dst *md = this_cpu_ptr(md_dst);
4383
4384 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4385 return -EINVAL;
fca5fdf6 4386 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
14ca0751
DB
4387 return -ENOMEM;
4388
256c87c1 4389 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
14ca0751
DB
4390
4391 return 0;
4392}
4393
4394static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4395 .func = bpf_skb_set_tunnel_opt,
4396 .gpl_only = false,
4397 .ret_type = RET_INTEGER,
4398 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 4399 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39f19ebb 4400 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
4401};
4402
4403static const struct bpf_func_proto *
4404bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
d3aa45ce
AS
4405{
4406 if (!md_dst) {
d66f2b91
JK
4407 struct metadata_dst __percpu *tmp;
4408
4409 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4410 METADATA_IP_TUNNEL,
4411 GFP_KERNEL);
4412 if (!tmp)
d3aa45ce 4413 return NULL;
d66f2b91
JK
4414 if (cmpxchg(&md_dst, NULL, tmp))
4415 metadata_dst_free_percpu(tmp);
d3aa45ce 4416 }
14ca0751
DB
4417
4418 switch (which) {
4419 case BPF_FUNC_skb_set_tunnel_key:
4420 return &bpf_skb_set_tunnel_key_proto;
4421 case BPF_FUNC_skb_set_tunnel_opt:
4422 return &bpf_skb_set_tunnel_opt_proto;
4423 default:
4424 return NULL;
4425 }
d3aa45ce
AS
4426}
4427
f3694e00
DB
4428BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4429 u32, idx)
4a482f34 4430{
4a482f34
MKL
4431 struct bpf_array *array = container_of(map, struct bpf_array, map);
4432 struct cgroup *cgrp;
4433 struct sock *sk;
4a482f34 4434
2d48c5f9 4435 sk = skb_to_full_sk(skb);
4a482f34
MKL
4436 if (!sk || !sk_fullsock(sk))
4437 return -ENOENT;
f3694e00 4438 if (unlikely(idx >= array->map.max_entries))
4a482f34
MKL
4439 return -E2BIG;
4440
f3694e00 4441 cgrp = READ_ONCE(array->ptrs[idx]);
4a482f34
MKL
4442 if (unlikely(!cgrp))
4443 return -EAGAIN;
4444
54fd9c2d 4445 return sk_under_cgroup_hierarchy(sk, cgrp);
4a482f34
MKL
4446}
4447
747ea55e
DB
4448static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4449 .func = bpf_skb_under_cgroup,
4a482f34
MKL
4450 .gpl_only = false,
4451 .ret_type = RET_INTEGER,
4452 .arg1_type = ARG_PTR_TO_CTX,
4453 .arg2_type = ARG_CONST_MAP_PTR,
4454 .arg3_type = ARG_ANYTHING,
4455};
4a482f34 4456
cb20b08e 4457#ifdef CONFIG_SOCK_CGROUP_DATA
f307fa2c
AI
4458static inline u64 __bpf_sk_cgroup_id(struct sock *sk)
4459{
4460 struct cgroup *cgrp;
4461
a5fa25ad
MKL
4462 sk = sk_to_full_sk(sk);
4463 if (!sk || !sk_fullsock(sk))
4464 return 0;
4465
f307fa2c
AI
4466 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4467 return cgroup_id(cgrp);
4468}
4469
cb20b08e
DB
4470BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4471{
a5fa25ad 4472 return __bpf_sk_cgroup_id(skb->sk);
cb20b08e
DB
4473}
4474
4475static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4476 .func = bpf_skb_cgroup_id,
4477 .gpl_only = false,
4478 .ret_type = RET_INTEGER,
4479 .arg1_type = ARG_PTR_TO_CTX,
4480};
77236281 4481
f307fa2c
AI
4482static inline u64 __bpf_sk_ancestor_cgroup_id(struct sock *sk,
4483 int ancestor_level)
77236281 4484{
77236281
AI
4485 struct cgroup *ancestor;
4486 struct cgroup *cgrp;
4487
a5fa25ad
MKL
4488 sk = sk_to_full_sk(sk);
4489 if (!sk || !sk_fullsock(sk))
4490 return 0;
4491
77236281
AI
4492 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4493 ancestor = cgroup_ancestor(cgrp, ancestor_level);
4494 if (!ancestor)
4495 return 0;
4496
74321038 4497 return cgroup_id(ancestor);
77236281
AI
4498}
4499
f307fa2c
AI
4500BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4501 ancestor_level)
4502{
a5fa25ad 4503 return __bpf_sk_ancestor_cgroup_id(skb->sk, ancestor_level);
f307fa2c
AI
4504}
4505
77236281
AI
4506static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4507 .func = bpf_skb_ancestor_cgroup_id,
4508 .gpl_only = false,
4509 .ret_type = RET_INTEGER,
4510 .arg1_type = ARG_PTR_TO_CTX,
4511 .arg2_type = ARG_ANYTHING,
4512};
f307fa2c
AI
4513
4514BPF_CALL_1(bpf_sk_cgroup_id, struct sock *, sk)
4515{
4516 return __bpf_sk_cgroup_id(sk);
4517}
4518
4519static const struct bpf_func_proto bpf_sk_cgroup_id_proto = {
4520 .func = bpf_sk_cgroup_id,
4521 .gpl_only = false,
4522 .ret_type = RET_INTEGER,
a5fa25ad 4523 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
f307fa2c
AI
4524};
4525
4526BPF_CALL_2(bpf_sk_ancestor_cgroup_id, struct sock *, sk, int, ancestor_level)
4527{
4528 return __bpf_sk_ancestor_cgroup_id(sk, ancestor_level);
4529}
4530
4531static const struct bpf_func_proto bpf_sk_ancestor_cgroup_id_proto = {
4532 .func = bpf_sk_ancestor_cgroup_id,
4533 .gpl_only = false,
4534 .ret_type = RET_INTEGER,
a5fa25ad 4535 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
f307fa2c
AI
4536 .arg2_type = ARG_ANYTHING,
4537};
cb20b08e
DB
4538#endif
4539
4de16969
DB
4540static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4541 unsigned long off, unsigned long len)
4542{
4543 memcpy(dst_buff, src_buff + off, len);
4544 return 0;
4545}
4546
f3694e00
DB
4547BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4548 u64, flags, void *, meta, u64, meta_size)
4de16969 4549{
4de16969 4550 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4de16969
DB
4551
4552 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4553 return -EINVAL;
d831ee84
EC
4554 if (unlikely(!xdp ||
4555 xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4de16969
DB
4556 return -EFAULT;
4557
9c471370
MKL
4558 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4559 xdp_size, bpf_xdp_copy);
4de16969
DB
4560}
4561
4562static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4563 .func = bpf_xdp_event_output,
4564 .gpl_only = true,
4565 .ret_type = RET_INTEGER,
4566 .arg1_type = ARG_PTR_TO_CTX,
4567 .arg2_type = ARG_CONST_MAP_PTR,
4568 .arg3_type = ARG_ANYTHING,
216e3cd2 4569 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
1728a4f2 4570 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4de16969
DB
4571};
4572
9436ef6e 4573BTF_ID_LIST_SINGLE(bpf_xdp_output_btf_ids, struct, xdp_buff)
c9a0f3b8 4574
d831ee84
EC
4575const struct bpf_func_proto bpf_xdp_output_proto = {
4576 .func = bpf_xdp_event_output,
4577 .gpl_only = true,
4578 .ret_type = RET_INTEGER,
4579 .arg1_type = ARG_PTR_TO_BTF_ID,
9436ef6e 4580 .arg1_btf_id = &bpf_xdp_output_btf_ids[0],
d831ee84
EC
4581 .arg2_type = ARG_CONST_MAP_PTR,
4582 .arg3_type = ARG_ANYTHING,
216e3cd2 4583 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
d831ee84 4584 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
d831ee84
EC
4585};
4586
91b8270f
CF
4587BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4588{
92acdc58 4589 return skb->sk ? __sock_gen_cookie(skb->sk) : 0;
91b8270f
CF
4590}
4591
4592static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4593 .func = bpf_get_socket_cookie,
4594 .gpl_only = false,
4595 .ret_type = RET_INTEGER,
4596 .arg1_type = ARG_PTR_TO_CTX,
4597};
4598
d692f113
AI
4599BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4600{
92acdc58 4601 return __sock_gen_cookie(ctx->sk);
d692f113
AI
4602}
4603
4604static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4605 .func = bpf_get_socket_cookie_sock_addr,
4606 .gpl_only = false,
4607 .ret_type = RET_INTEGER,
4608 .arg1_type = ARG_PTR_TO_CTX,
4609};
4610
0e53d9e5
DB
4611BPF_CALL_1(bpf_get_socket_cookie_sock, struct sock *, ctx)
4612{
92acdc58 4613 return __sock_gen_cookie(ctx);
0e53d9e5
DB
4614}
4615
4616static const struct bpf_func_proto bpf_get_socket_cookie_sock_proto = {
4617 .func = bpf_get_socket_cookie_sock,
4618 .gpl_only = false,
4619 .ret_type = RET_INTEGER,
4620 .arg1_type = ARG_PTR_TO_CTX,
4621};
4622
c5dbb89f
FR
4623BPF_CALL_1(bpf_get_socket_ptr_cookie, struct sock *, sk)
4624{
4625 return sk ? sock_gen_cookie(sk) : 0;
4626}
4627
4628const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto = {
4629 .func = bpf_get_socket_ptr_cookie,
4630 .gpl_only = false,
4631 .ret_type = RET_INTEGER,
4632 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
4633};
4634
d692f113
AI
4635BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4636{
92acdc58 4637 return __sock_gen_cookie(ctx->sk);
d692f113
AI
4638}
4639
4640static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4641 .func = bpf_get_socket_cookie_sock_ops,
4642 .gpl_only = false,
4643 .ret_type = RET_INTEGER,
4644 .arg1_type = ARG_PTR_TO_CTX,
4645};
4646
f318903c
DB
4647static u64 __bpf_get_netns_cookie(struct sock *sk)
4648{
3d368ab8
ED
4649 const struct net *net = sk ? sock_net(sk) : &init_net;
4650
4651 return net->net_cookie;
f318903c
DB
4652}
4653
4654BPF_CALL_1(bpf_get_netns_cookie_sock, struct sock *, ctx)
4655{
4656 return __bpf_get_netns_cookie(ctx);
4657}
4658
4659static const struct bpf_func_proto bpf_get_netns_cookie_sock_proto = {
4660 .func = bpf_get_netns_cookie_sock,
4661 .gpl_only = false,
4662 .ret_type = RET_INTEGER,
4663 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
4664};
4665
4666BPF_CALL_1(bpf_get_netns_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4667{
4668 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
4669}
4670
4671static const struct bpf_func_proto bpf_get_netns_cookie_sock_addr_proto = {
4672 .func = bpf_get_netns_cookie_sock_addr,
4673 .gpl_only = false,
4674 .ret_type = RET_INTEGER,
4675 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
4676};
4677
6cf1770d
XL
4678BPF_CALL_1(bpf_get_netns_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4679{
4680 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
4681}
4682
4683static const struct bpf_func_proto bpf_get_netns_cookie_sock_ops_proto = {
4684 .func = bpf_get_netns_cookie_sock_ops,
4685 .gpl_only = false,
4686 .ret_type = RET_INTEGER,
4687 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
4688};
4689
fab60e29
XL
4690BPF_CALL_1(bpf_get_netns_cookie_sk_msg, struct sk_msg *, ctx)
4691{
4692 return __bpf_get_netns_cookie(ctx ? ctx->sk : NULL);
4693}
4694
4695static const struct bpf_func_proto bpf_get_netns_cookie_sk_msg_proto = {
4696 .func = bpf_get_netns_cookie_sk_msg,
4697 .gpl_only = false,
4698 .ret_type = RET_INTEGER,
4699 .arg1_type = ARG_PTR_TO_CTX_OR_NULL,
4700};
4701
6acc5c29
CF
4702BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4703{
4704 struct sock *sk = sk_to_full_sk(skb->sk);
4705 kuid_t kuid;
4706
4707 if (!sk || !sk_fullsock(sk))
4708 return overflowuid;
4709 kuid = sock_net_uid(sock_net(sk), sk);
4710 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4711}
4712
4713static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4714 .func = bpf_get_socket_uid,
4715 .gpl_only = false,
4716 .ret_type = RET_INTEGER,
4717 .arg1_type = ARG_PTR_TO_CTX,
4718};
4719
beecf11b 4720static int _bpf_setsockopt(struct sock *sk, int level, int optname,
5cdc744c 4721 char *optval, int optlen)
8c4b4c7e 4722{
70c58997 4723 char devname[IFNAMSIZ];
f9bcf968 4724 int val, valbool;
70c58997
FF
4725 struct net *net;
4726 int ifindex;
8c4b4c7e 4727 int ret = 0;
8c4b4c7e
LB
4728
4729 if (!sk_fullsock(sk))
4730 return -EINVAL;
4731
beecf11b
SF
4732 sock_owned_by_me(sk);
4733
8c4b4c7e 4734 if (level == SOL_SOCKET) {
70c58997 4735 if (optlen != sizeof(int) && optname != SO_BINDTODEVICE)
8c4b4c7e
LB
4736 return -EINVAL;
4737 val = *((int *)optval);
f9bcf968 4738 valbool = val ? 1 : 0;
8c4b4c7e
LB
4739
4740 /* Only some socketops are supported */
4741 switch (optname) {
4742 case SO_RCVBUF:
c9e45767 4743 val = min_t(u32, val, sysctl_rmem_max);
04c350b1 4744 val = min_t(int, val, INT_MAX / 2);
8c4b4c7e 4745 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
ebb3b78d
ED
4746 WRITE_ONCE(sk->sk_rcvbuf,
4747 max_t(int, val * 2, SOCK_MIN_RCVBUF));
8c4b4c7e
LB
4748 break;
4749 case SO_SNDBUF:
c9e45767 4750 val = min_t(u32, val, sysctl_wmem_max);
04c350b1 4751 val = min_t(int, val, INT_MAX / 2);
8c4b4c7e 4752 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
e292f05e
ED
4753 WRITE_ONCE(sk->sk_sndbuf,
4754 max_t(int, val * 2, SOCK_MIN_SNDBUF));
8c4b4c7e 4755 break;
76a9ebe8 4756 case SO_MAX_PACING_RATE: /* 32bit version */
e224c390
YC
4757 if (val != ~0U)
4758 cmpxchg(&sk->sk_pacing_status,
4759 SK_PACING_NONE,
4760 SK_PACING_NEEDED);
700465fd
KL
4761 sk->sk_max_pacing_rate = (val == ~0U) ?
4762 ~0UL : (unsigned int)val;
8c4b4c7e
LB
4763 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4764 sk->sk_max_pacing_rate);
4765 break;
4766 case SO_PRIORITY:
4767 sk->sk_priority = val;
4768 break;
4769 case SO_RCVLOWAT:
4770 if (val < 0)
4771 val = INT_MAX;
eac66402 4772 WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
8c4b4c7e
LB
4773 break;
4774 case SO_MARK:
f4924f24
PO
4775 if (sk->sk_mark != val) {
4776 sk->sk_mark = val;
4777 sk_dst_reset(sk);
4778 }
8c4b4c7e 4779 break;
70c58997 4780 case SO_BINDTODEVICE:
70c58997
FF
4781 optlen = min_t(long, optlen, IFNAMSIZ - 1);
4782 strncpy(devname, optval, optlen);
4783 devname[optlen] = 0;
4784
4785 ifindex = 0;
4786 if (devname[0] != '\0') {
4787 struct net_device *dev;
4788
4789 ret = -ENODEV;
4790
4791 net = sock_net(sk);
4792 dev = dev_get_by_name(net, devname);
4793 if (!dev)
4794 break;
4795 ifindex = dev->ifindex;
4796 dev_put(dev);
4797 }
bcd6f4a8
DB
4798 fallthrough;
4799 case SO_BINDTOIFINDEX:
4800 if (optname == SO_BINDTOIFINDEX)
4801 ifindex = val;
70c58997 4802 ret = sock_bindtoindex(sk, ifindex, false);
70c58997 4803 break;
f9bcf968
DY
4804 case SO_KEEPALIVE:
4805 if (sk->sk_prot->keepalive)
4806 sk->sk_prot->keepalive(sk, valbool);
4807 sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
4808 break;
6503b9f2
MB
4809 case SO_REUSEPORT:
4810 sk->sk_reuseport = valbool;
4811 break;
8c4b4c7e
LB
4812 default:
4813 ret = -EINVAL;
4814 }
a5192c52 4815#ifdef CONFIG_INET
6f5c39fa
NS
4816 } else if (level == SOL_IP) {
4817 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4818 return -EINVAL;
4819
4820 val = *((int *)optval);
4821 /* Only some options are supported */
4822 switch (optname) {
4823 case IP_TOS:
4824 if (val < -1 || val > 0xff) {
4825 ret = -EINVAL;
4826 } else {
4827 struct inet_sock *inet = inet_sk(sk);
4828
4829 if (val == -1)
4830 val = 0;
4831 inet->tos = val;
4832 }
4833 break;
4834 default:
4835 ret = -EINVAL;
4836 }
6f9bd3d7
LB
4837#if IS_ENABLED(CONFIG_IPV6)
4838 } else if (level == SOL_IPV6) {
4839 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4840 return -EINVAL;
4841
4842 val = *((int *)optval);
4843 /* Only some options are supported */
4844 switch (optname) {
4845 case IPV6_TCLASS:
4846 if (val < -1 || val > 0xff) {
4847 ret = -EINVAL;
4848 } else {
4849 struct ipv6_pinfo *np = inet6_sk(sk);
4850
4851 if (val == -1)
4852 val = 0;
4853 np->tclass = val;
4854 }
4855 break;
4856 default:
4857 ret = -EINVAL;
4858 }
4859#endif
8c4b4c7e
LB
4860 } else if (level == SOL_TCP &&
4861 sk->sk_prot->setsockopt == tcp_setsockopt) {
91b5b21c
LB
4862 if (optname == TCP_CONGESTION) {
4863 char name[TCP_CA_NAME_MAX];
4864
4865 strncpy(name, optval, min_t(long, optlen,
4866 TCP_CA_NAME_MAX-1));
4867 name[TCP_CA_NAME_MAX-1] = 0;
29a94932 4868 ret = tcp_set_congestion_control(sk, name, false, true);
91b5b21c 4869 } else {
f9bcf968 4870 struct inet_connection_sock *icsk = inet_csk(sk);
fc747810 4871 struct tcp_sock *tp = tcp_sk(sk);
2b8ee4f0 4872 unsigned long timeout;
fc747810
LB
4873
4874 if (optlen != sizeof(int))
4875 return -EINVAL;
4876
4877 val = *((int *)optval);
4878 /* Only some options are supported */
4879 switch (optname) {
4880 case TCP_BPF_IW:
31aa6503 4881 if (val <= 0 || tp->data_segs_out > tp->syn_data)
fc747810
LB
4882 ret = -EINVAL;
4883 else
4884 tp->snd_cwnd = val;
4885 break;
13bf9641
LB
4886 case TCP_BPF_SNDCWND_CLAMP:
4887 if (val <= 0) {
4888 ret = -EINVAL;
4889 } else {
4890 tp->snd_cwnd_clamp = val;
4891 tp->snd_ssthresh = val;
4892 }
6d3f06a0 4893 break;
2b8ee4f0
MKL
4894 case TCP_BPF_DELACK_MAX:
4895 timeout = usecs_to_jiffies(val);
4896 if (timeout > TCP_DELACK_MAX ||
4897 timeout < TCP_TIMEOUT_MIN)
4898 return -EINVAL;
4899 inet_csk(sk)->icsk_delack_max = timeout;
4900 break;
ca584ba0
MKL
4901 case TCP_BPF_RTO_MIN:
4902 timeout = usecs_to_jiffies(val);
4903 if (timeout > TCP_RTO_MIN ||
4904 timeout < TCP_TIMEOUT_MIN)
4905 return -EINVAL;
4906 inet_csk(sk)->icsk_rto_min = timeout;
4907 break;
1e215300
NS
4908 case TCP_SAVE_SYN:
4909 if (val < 0 || val > 1)
4910 ret = -EINVAL;
4911 else
4912 tp->save_syn = val;
4913 break;
f9bcf968
DY
4914 case TCP_KEEPIDLE:
4915 ret = tcp_sock_set_keepidle_locked(sk, val);
4916 break;
4917 case TCP_KEEPINTVL:
4918 if (val < 1 || val > MAX_TCP_KEEPINTVL)
4919 ret = -EINVAL;
4920 else
4921 tp->keepalive_intvl = val * HZ;
4922 break;
4923 case TCP_KEEPCNT:
4924 if (val < 1 || val > MAX_TCP_KEEPCNT)
4925 ret = -EINVAL;
4926 else
4927 tp->keepalive_probes = val;
4928 break;
4929 case TCP_SYNCNT:
4930 if (val < 1 || val > MAX_TCP_SYNCNT)
4931 ret = -EINVAL;
4932 else
4933 icsk->icsk_syn_retries = val;
4934 break;
4935 case TCP_USER_TIMEOUT:
4936 if (val < 0)
4937 ret = -EINVAL;
4938 else
4939 icsk->icsk_user_timeout = val;
4940 break;
eca43ee6
NS
4941 case TCP_NOTSENT_LOWAT:
4942 tp->notsent_lowat = val;
4943 sk->sk_write_space(sk);
4944 break;
cb811109
P
4945 case TCP_WINDOW_CLAMP:
4946 ret = tcp_set_window_clamp(sk, val);
4947 break;
fc747810
LB
4948 default:
4949 ret = -EINVAL;
4950 }
91b5b21c 4951 }
91b5b21c 4952#endif
8c4b4c7e
LB
4953 } else {
4954 ret = -EINVAL;
4955 }
4956 return ret;
4957}
4958
beecf11b
SF
4959static int _bpf_getsockopt(struct sock *sk, int level, int optname,
4960 char *optval, int optlen)
cd86d1fd 4961{
cd86d1fd
LB
4962 if (!sk_fullsock(sk))
4963 goto err_clear;
beecf11b
SF
4964
4965 sock_owned_by_me(sk);
4966
bcd6f4a8
DB
4967 if (level == SOL_SOCKET) {
4968 if (optlen != sizeof(int))
4969 goto err_clear;
4970
4971 switch (optname) {
4972 case SO_MARK:
4973 *((int *)optval) = sk->sk_mark;
4974 break;
4975 case SO_PRIORITY:
4976 *((int *)optval) = sk->sk_priority;
4977 break;
4978 case SO_BINDTOIFINDEX:
4979 *((int *)optval) = sk->sk_bound_dev_if;
4980 break;
6503b9f2
MB
4981 case SO_REUSEPORT:
4982 *((int *)optval) = sk->sk_reuseport;
4983 break;
bcd6f4a8
DB
4984 default:
4985 goto err_clear;
4986 }
cd86d1fd 4987#ifdef CONFIG_INET
bcd6f4a8 4988 } else if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
1edb6e03
AR
4989 struct inet_connection_sock *icsk;
4990 struct tcp_sock *tp;
4991
1e215300
NS
4992 switch (optname) {
4993 case TCP_CONGESTION:
4994 icsk = inet_csk(sk);
cd86d1fd
LB
4995
4996 if (!icsk->icsk_ca_ops || optlen <= 1)
4997 goto err_clear;
4998 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4999 optval[optlen - 1] = 0;
1e215300
NS
5000 break;
5001 case TCP_SAVED_SYN:
5002 tp = tcp_sk(sk);
5003
5004 if (optlen <= 0 || !tp->saved_syn ||
70a217f1 5005 optlen > tcp_saved_syn_len(tp->saved_syn))
1e215300 5006 goto err_clear;
70a217f1 5007 memcpy(optval, tp->saved_syn->data, optlen);
1e215300
NS
5008 break;
5009 default:
cd86d1fd
LB
5010 goto err_clear;
5011 }
6f5c39fa
NS
5012 } else if (level == SOL_IP) {
5013 struct inet_sock *inet = inet_sk(sk);
5014
5015 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
5016 goto err_clear;
5017
5018 /* Only some options are supported */
5019 switch (optname) {
5020 case IP_TOS:
5021 *((int *)optval) = (int)inet->tos;
5022 break;
5023 default:
5024 goto err_clear;
5025 }
6f9bd3d7
LB
5026#if IS_ENABLED(CONFIG_IPV6)
5027 } else if (level == SOL_IPV6) {
5028 struct ipv6_pinfo *np = inet6_sk(sk);
5029
5030 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
5031 goto err_clear;
5032
5033 /* Only some options are supported */
5034 switch (optname) {
5035 case IPV6_TCLASS:
5036 *((int *)optval) = (int)np->tclass;
5037 break;
5038 default:
5039 goto err_clear;
5040 }
bcd6f4a8 5041#endif
6f9bd3d7 5042#endif
cd86d1fd
LB
5043 } else {
5044 goto err_clear;
5045 }
aa2bc739 5046 return 0;
cd86d1fd
LB
5047err_clear:
5048 memset(optval, 0, optlen);
5049 return -EINVAL;
5050}
5051
3cee6fb8
MKL
5052BPF_CALL_5(bpf_sk_setsockopt, struct sock *, sk, int, level,
5053 int, optname, char *, optval, int, optlen)
5054{
eb18b49e
MKL
5055 if (level == SOL_TCP && optname == TCP_CONGESTION) {
5056 if (optlen >= sizeof("cdg") - 1 &&
5057 !strncmp("cdg", optval, optlen))
5058 return -ENOTSUPP;
5059 }
5060
3cee6fb8
MKL
5061 return _bpf_setsockopt(sk, level, optname, optval, optlen);
5062}
5063
5064const struct bpf_func_proto bpf_sk_setsockopt_proto = {
5065 .func = bpf_sk_setsockopt,
5066 .gpl_only = false,
5067 .ret_type = RET_INTEGER,
5068 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
5069 .arg2_type = ARG_ANYTHING,
5070 .arg3_type = ARG_ANYTHING,
216e3cd2 5071 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
3cee6fb8
MKL
5072 .arg5_type = ARG_CONST_SIZE,
5073};
5074
5075BPF_CALL_5(bpf_sk_getsockopt, struct sock *, sk, int, level,
5076 int, optname, char *, optval, int, optlen)
5077{
5078 return _bpf_getsockopt(sk, level, optname, optval, optlen);
5079}
5080
5081const struct bpf_func_proto bpf_sk_getsockopt_proto = {
5082 .func = bpf_sk_getsockopt,
5083 .gpl_only = false,
5084 .ret_type = RET_INTEGER,
5085 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
5086 .arg2_type = ARG_ANYTHING,
5087 .arg3_type = ARG_ANYTHING,
5088 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
5089 .arg5_type = ARG_CONST_SIZE,
5090};
5091
beecf11b
SF
5092BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
5093 int, level, int, optname, char *, optval, int, optlen)
5094{
5cdc744c 5095 return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen);
beecf11b
SF
5096}
5097
5098static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
5099 .func = bpf_sock_addr_setsockopt,
5100 .gpl_only = false,
5101 .ret_type = RET_INTEGER,
5102 .arg1_type = ARG_PTR_TO_CTX,
5103 .arg2_type = ARG_ANYTHING,
5104 .arg3_type = ARG_ANYTHING,
216e3cd2 5105 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
beecf11b
SF
5106 .arg5_type = ARG_CONST_SIZE,
5107};
5108
5109BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
5110 int, level, int, optname, char *, optval, int, optlen)
5111{
5112 return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen);
5113}
5114
5115static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
5116 .func = bpf_sock_addr_getsockopt,
5117 .gpl_only = false,
5118 .ret_type = RET_INTEGER,
5119 .arg1_type = ARG_PTR_TO_CTX,
5120 .arg2_type = ARG_ANYTHING,
5121 .arg3_type = ARG_ANYTHING,
5122 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
5123 .arg5_type = ARG_CONST_SIZE,
5124};
5125
5126BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
5127 int, level, int, optname, char *, optval, int, optlen)
5128{
5cdc744c 5129 return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen);
beecf11b
SF
5130}
5131
5132static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
5133 .func = bpf_sock_ops_setsockopt,
5134 .gpl_only = false,
5135 .ret_type = RET_INTEGER,
5136 .arg1_type = ARG_PTR_TO_CTX,
5137 .arg2_type = ARG_ANYTHING,
5138 .arg3_type = ARG_ANYTHING,
216e3cd2 5139 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
beecf11b
SF
5140 .arg5_type = ARG_CONST_SIZE,
5141};
5142
0813a841
MKL
5143static int bpf_sock_ops_get_syn(struct bpf_sock_ops_kern *bpf_sock,
5144 int optname, const u8 **start)
5145{
5146 struct sk_buff *syn_skb = bpf_sock->syn_skb;
5147 const u8 *hdr_start;
5148 int ret;
5149
5150 if (syn_skb) {
5151 /* sk is a request_sock here */
5152
5153 if (optname == TCP_BPF_SYN) {
5154 hdr_start = syn_skb->data;
5155 ret = tcp_hdrlen(syn_skb);
267cf9fa 5156 } else if (optname == TCP_BPF_SYN_IP) {
0813a841
MKL
5157 hdr_start = skb_network_header(syn_skb);
5158 ret = skb_network_header_len(syn_skb) +
5159 tcp_hdrlen(syn_skb);
267cf9fa
MKL
5160 } else {
5161 /* optname == TCP_BPF_SYN_MAC */
5162 hdr_start = skb_mac_header(syn_skb);
5163 ret = skb_mac_header_len(syn_skb) +
5164 skb_network_header_len(syn_skb) +
5165 tcp_hdrlen(syn_skb);
0813a841
MKL
5166 }
5167 } else {
5168 struct sock *sk = bpf_sock->sk;
5169 struct saved_syn *saved_syn;
5170
5171 if (sk->sk_state == TCP_NEW_SYN_RECV)
5172 /* synack retransmit. bpf_sock->syn_skb will
5173 * not be available. It has to resort to
5174 * saved_syn (if it is saved).
5175 */
5176 saved_syn = inet_reqsk(sk)->saved_syn;
5177 else
5178 saved_syn = tcp_sk(sk)->saved_syn;
5179
5180 if (!saved_syn)
5181 return -ENOENT;
5182
5183 if (optname == TCP_BPF_SYN) {
5184 hdr_start = saved_syn->data +
267cf9fa 5185 saved_syn->mac_hdrlen +
0813a841
MKL
5186 saved_syn->network_hdrlen;
5187 ret = saved_syn->tcp_hdrlen;
267cf9fa
MKL
5188 } else if (optname == TCP_BPF_SYN_IP) {
5189 hdr_start = saved_syn->data +
5190 saved_syn->mac_hdrlen;
5191 ret = saved_syn->network_hdrlen +
5192 saved_syn->tcp_hdrlen;
0813a841 5193 } else {
267cf9fa
MKL
5194 /* optname == TCP_BPF_SYN_MAC */
5195
5196 /* TCP_SAVE_SYN may not have saved the mac hdr */
5197 if (!saved_syn->mac_hdrlen)
5198 return -ENOENT;
5199
0813a841 5200 hdr_start = saved_syn->data;
267cf9fa
MKL
5201 ret = saved_syn->mac_hdrlen +
5202 saved_syn->network_hdrlen +
0813a841
MKL
5203 saved_syn->tcp_hdrlen;
5204 }
5205 }
5206
5207 *start = hdr_start;
5208 return ret;
5209}
5210
beecf11b
SF
5211BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
5212 int, level, int, optname, char *, optval, int, optlen)
5213{
0813a841 5214 if (IS_ENABLED(CONFIG_INET) && level == SOL_TCP &&
267cf9fa 5215 optname >= TCP_BPF_SYN && optname <= TCP_BPF_SYN_MAC) {
0813a841
MKL
5216 int ret, copy_len = 0;
5217 const u8 *start;
5218
5219 ret = bpf_sock_ops_get_syn(bpf_sock, optname, &start);
5220 if (ret > 0) {
5221 copy_len = ret;
5222 if (optlen < copy_len) {
5223 copy_len = optlen;
5224 ret = -ENOSPC;
5225 }
5226
5227 memcpy(optval, start, copy_len);
5228 }
5229
5230 /* Zero out unused buffer at the end */
5231 memset(optval + copy_len, 0, optlen - copy_len);
5232
5233 return ret;
5234 }
5235
beecf11b
SF
5236 return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
5237}
5238
5239static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = {
5240 .func = bpf_sock_ops_getsockopt,
cd86d1fd
LB
5241 .gpl_only = false,
5242 .ret_type = RET_INTEGER,
5243 .arg1_type = ARG_PTR_TO_CTX,
5244 .arg2_type = ARG_ANYTHING,
5245 .arg3_type = ARG_ANYTHING,
5246 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
5247 .arg5_type = ARG_CONST_SIZE,
5248};
5249
b13d8807
LB
5250BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
5251 int, argval)
5252{
5253 struct sock *sk = bpf_sock->sk;
5254 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
5255
a7dcdf6e 5256 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
b13d8807
LB
5257 return -EINVAL;
5258
725721a6 5259 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
b13d8807
LB
5260
5261 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
b13d8807
LB
5262}
5263
5264static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
5265 .func = bpf_sock_ops_cb_flags_set,
5266 .gpl_only = false,
5267 .ret_type = RET_INTEGER,
5268 .arg1_type = ARG_PTR_TO_CTX,
5269 .arg2_type = ARG_ANYTHING,
5270};
5271
d74bad4e
AI
5272const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
5273EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
5274
5275BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
5276 int, addr_len)
5277{
5278#ifdef CONFIG_INET
5279 struct sock *sk = ctx->sk;
8086fbaf 5280 u32 flags = BIND_FROM_BPF;
d74bad4e
AI
5281 int err;
5282
d74bad4e 5283 err = -EINVAL;
ba024f25
TH
5284 if (addr_len < offsetofend(struct sockaddr, sa_family))
5285 return err;
d74bad4e
AI
5286 if (addr->sa_family == AF_INET) {
5287 if (addr_len < sizeof(struct sockaddr_in))
5288 return err;
8086fbaf
SF
5289 if (((struct sockaddr_in *)addr)->sin_port == htons(0))
5290 flags |= BIND_FORCE_ADDRESS_NO_PORT;
5291 return __inet_bind(sk, addr, addr_len, flags);
d74bad4e
AI
5292#if IS_ENABLED(CONFIG_IPV6)
5293 } else if (addr->sa_family == AF_INET6) {
5294 if (addr_len < SIN6_LEN_RFC2133)
5295 return err;
8086fbaf
SF
5296 if (((struct sockaddr_in6 *)addr)->sin6_port == htons(0))
5297 flags |= BIND_FORCE_ADDRESS_NO_PORT;
d74bad4e
AI
5298 /* ipv6_bpf_stub cannot be NULL, since it's called from
5299 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
5300 */
8086fbaf 5301 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, flags);
d74bad4e
AI
5302#endif /* CONFIG_IPV6 */
5303 }
5304#endif /* CONFIG_INET */
5305
5306 return -EAFNOSUPPORT;
5307}
5308
5309static const struct bpf_func_proto bpf_bind_proto = {
5310 .func = bpf_bind,
5311 .gpl_only = false,
5312 .ret_type = RET_INTEGER,
5313 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 5314 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
d74bad4e
AI
5315 .arg3_type = ARG_CONST_SIZE,
5316};
5317
12bed760
EB
5318#ifdef CONFIG_XFRM
5319BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
5320 struct bpf_xfrm_state *, to, u32, size, u64, flags)
5321{
5322 const struct sec_path *sp = skb_sec_path(skb);
5323 const struct xfrm_state *x;
5324
5325 if (!sp || unlikely(index >= sp->len || flags))
5326 goto err_clear;
5327
5328 x = sp->xvec[index];
5329
5330 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
5331 goto err_clear;
5332
5333 to->reqid = x->props.reqid;
5334 to->spi = x->id.spi;
5335 to->family = x->props.family;
1fbc2e0c
DB
5336 to->ext = 0;
5337
12bed760
EB
5338 if (to->family == AF_INET6) {
5339 memcpy(to->remote_ipv6, x->props.saddr.a6,
5340 sizeof(to->remote_ipv6));
5341 } else {
5342 to->remote_ipv4 = x->props.saddr.a4;
1fbc2e0c 5343 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
12bed760
EB
5344 }
5345
5346 return 0;
5347err_clear:
5348 memset(to, 0, size);
5349 return -EINVAL;
5350}
5351
5352static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
5353 .func = bpf_skb_get_xfrm_state,
5354 .gpl_only = false,
5355 .ret_type = RET_INTEGER,
5356 .arg1_type = ARG_PTR_TO_CTX,
5357 .arg2_type = ARG_ANYTHING,
5358 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
5359 .arg4_type = ARG_CONST_SIZE,
5360 .arg5_type = ARG_ANYTHING,
5361};
5362#endif
5363
87f5fc7e
DA
5364#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
5365static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
5366 const struct neighbour *neigh,
e1850ea9 5367 const struct net_device *dev, u32 mtu)
87f5fc7e
DA
5368{
5369 memcpy(params->dmac, neigh->ha, ETH_ALEN);
5370 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
5371 params->h_vlan_TCI = 0;
5372 params->h_vlan_proto = 0;
e1850ea9
JDB
5373 if (mtu)
5374 params->mtu_result = mtu; /* union with tot_len */
87f5fc7e 5375
4c79579b 5376 return 0;
87f5fc7e
DA
5377}
5378#endif
5379
5380#if IS_ENABLED(CONFIG_INET)
5381static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 5382 u32 flags, bool check_mtu)
87f5fc7e 5383{
eba618ab 5384 struct fib_nh_common *nhc;
87f5fc7e
DA
5385 struct in_device *in_dev;
5386 struct neighbour *neigh;
5387 struct net_device *dev;
5388 struct fib_result res;
87f5fc7e 5389 struct flowi4 fl4;
e1850ea9 5390 u32 mtu = 0;
87f5fc7e
DA
5391 int err;
5392
5393 dev = dev_get_by_index_rcu(net, params->ifindex);
5394 if (unlikely(!dev))
5395 return -ENODEV;
5396
5397 /* verify forwarding is enabled on this interface */
5398 in_dev = __in_dev_get_rcu(dev);
5399 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4c79579b 5400 return BPF_FIB_LKUP_RET_FWD_DISABLED;
87f5fc7e
DA
5401
5402 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
5403 fl4.flowi4_iif = 1;
5404 fl4.flowi4_oif = params->ifindex;
5405 } else {
5406 fl4.flowi4_iif = params->ifindex;
5407 fl4.flowi4_oif = 0;
5408 }
5409 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
5410 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
5411 fl4.flowi4_flags = 0;
5412
5413 fl4.flowi4_proto = params->l4_protocol;
5414 fl4.daddr = params->ipv4_dst;
5415 fl4.saddr = params->ipv4_src;
5416 fl4.fl4_sport = params->sport;
5417 fl4.fl4_dport = params->dport;
1869e226 5418 fl4.flowi4_multipath_hash = 0;
87f5fc7e
DA
5419
5420 if (flags & BPF_FIB_LOOKUP_DIRECT) {
5421 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
5422 struct fib_table *tb;
5423
5424 tb = fib_get_table(net, tbid);
5425 if (unlikely(!tb))
4c79579b 5426 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
5427
5428 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
5429 } else {
5430 fl4.flowi4_mark = 0;
5431 fl4.flowi4_secid = 0;
5432 fl4.flowi4_tun_key.tun_id = 0;
5433 fl4.flowi4_uid = sock_net_uid(net, NULL);
5434
5435 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
5436 }
5437
4c79579b
DA
5438 if (err) {
5439 /* map fib lookup errors to RTN_ type */
5440 if (err == -EINVAL)
5441 return BPF_FIB_LKUP_RET_BLACKHOLE;
5442 if (err == -EHOSTUNREACH)
5443 return BPF_FIB_LKUP_RET_UNREACHABLE;
5444 if (err == -EACCES)
5445 return BPF_FIB_LKUP_RET_PROHIBIT;
5446
5447 return BPF_FIB_LKUP_RET_NOT_FWDED;
5448 }
5449
5450 if (res.type != RTN_UNICAST)
5451 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e 5452
5481d73f 5453 if (fib_info_num_path(res.fi) > 1)
87f5fc7e
DA
5454 fib_select_path(net, &res, &fl4, NULL);
5455
4f74fede
DA
5456 if (check_mtu) {
5457 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
e1850ea9
JDB
5458 if (params->tot_len > mtu) {
5459 params->mtu_result = mtu; /* union with tot_len */
4c79579b 5460 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
e1850ea9 5461 }
4f74fede
DA
5462 }
5463
eba618ab 5464 nhc = res.nhc;
87f5fc7e
DA
5465
5466 /* do not handle lwt encaps right now */
eba618ab 5467 if (nhc->nhc_lwtstate)
4c79579b 5468 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
87f5fc7e 5469
eba618ab 5470 dev = nhc->nhc_dev;
87f5fc7e
DA
5471
5472 params->rt_metric = res.fi->fib_priority;
d1c362e1 5473 params->ifindex = dev->ifindex;
87f5fc7e
DA
5474
5475 /* xdp and cls_bpf programs are run in RCU-bh so
5476 * rcu_read_lock_bh is not needed here
5477 */
6f5f68d0
DA
5478 if (likely(nhc->nhc_gw_family != AF_INET6)) {
5479 if (nhc->nhc_gw_family)
5480 params->ipv4_dst = nhc->nhc_gw.ipv4;
5481
5482 neigh = __ipv4_neigh_lookup_noref(dev,
5483 (__force u32)params->ipv4_dst);
5484 } else {
5485 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
5486
5487 params->family = AF_INET6;
5488 *dst = nhc->nhc_gw.ipv6;
5489 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
5490 }
5491
4c79579b
DA
5492 if (!neigh)
5493 return BPF_FIB_LKUP_RET_NO_NEIGH;
87f5fc7e 5494
e1850ea9 5495 return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
87f5fc7e
DA
5496}
5497#endif
5498
5499#if IS_ENABLED(CONFIG_IPV6)
5500static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 5501 u32 flags, bool check_mtu)
87f5fc7e
DA
5502{
5503 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
5504 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
e55449e7 5505 struct fib6_result res = {};
87f5fc7e
DA
5506 struct neighbour *neigh;
5507 struct net_device *dev;
5508 struct inet6_dev *idev;
87f5fc7e
DA
5509 struct flowi6 fl6;
5510 int strict = 0;
effda4dd 5511 int oif, err;
e1850ea9 5512 u32 mtu = 0;
87f5fc7e
DA
5513
5514 /* link local addresses are never forwarded */
5515 if (rt6_need_strict(dst) || rt6_need_strict(src))
4c79579b 5516 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
5517
5518 dev = dev_get_by_index_rcu(net, params->ifindex);
5519 if (unlikely(!dev))
5520 return -ENODEV;
5521
5522 idev = __in6_dev_get_safely(dev);
56f0f84e 5523 if (unlikely(!idev || !idev->cnf.forwarding))
4c79579b 5524 return BPF_FIB_LKUP_RET_FWD_DISABLED;
87f5fc7e
DA
5525
5526 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
5527 fl6.flowi6_iif = 1;
5528 oif = fl6.flowi6_oif = params->ifindex;
5529 } else {
5530 oif = fl6.flowi6_iif = params->ifindex;
5531 fl6.flowi6_oif = 0;
5532 strict = RT6_LOOKUP_F_HAS_SADDR;
5533 }
bd3a08aa 5534 fl6.flowlabel = params->flowinfo;
87f5fc7e
DA
5535 fl6.flowi6_scope = 0;
5536 fl6.flowi6_flags = 0;
5537 fl6.mp_hash = 0;
5538
5539 fl6.flowi6_proto = params->l4_protocol;
5540 fl6.daddr = *dst;
5541 fl6.saddr = *src;
5542 fl6.fl6_sport = params->sport;
5543 fl6.fl6_dport = params->dport;
5544
5545 if (flags & BPF_FIB_LOOKUP_DIRECT) {
5546 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
5547 struct fib6_table *tb;
5548
5549 tb = ipv6_stub->fib6_get_table(net, tbid);
5550 if (unlikely(!tb))
4c79579b 5551 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e 5552
effda4dd
DA
5553 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
5554 strict);
87f5fc7e
DA
5555 } else {
5556 fl6.flowi6_mark = 0;
5557 fl6.flowi6_secid = 0;
5558 fl6.flowi6_tun_key.tun_id = 0;
5559 fl6.flowi6_uid = sock_net_uid(net, NULL);
5560
effda4dd 5561 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
87f5fc7e
DA
5562 }
5563
effda4dd 5564 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
b1d40991 5565 res.f6i == net->ipv6.fib6_null_entry))
4c79579b
DA
5566 return BPF_FIB_LKUP_RET_NOT_FWDED;
5567
7d21fec9
DA
5568 switch (res.fib6_type) {
5569 /* only unicast is forwarded */
5570 case RTN_UNICAST:
5571 break;
5572 case RTN_BLACKHOLE:
5573 return BPF_FIB_LKUP_RET_BLACKHOLE;
5574 case RTN_UNREACHABLE:
5575 return BPF_FIB_LKUP_RET_UNREACHABLE;
5576 case RTN_PROHIBIT:
5577 return BPF_FIB_LKUP_RET_PROHIBIT;
5578 default:
4c79579b 5579 return BPF_FIB_LKUP_RET_NOT_FWDED;
7d21fec9 5580 }
87f5fc7e 5581
b1d40991
DA
5582 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
5583 fl6.flowi6_oif != 0, NULL, strict);
87f5fc7e 5584
4f74fede 5585 if (check_mtu) {
b748f260 5586 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
e1850ea9
JDB
5587 if (params->tot_len > mtu) {
5588 params->mtu_result = mtu; /* union with tot_len */
4c79579b 5589 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
e1850ea9 5590 }
4f74fede
DA
5591 }
5592
b1d40991 5593 if (res.nh->fib_nh_lws)
4c79579b 5594 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
87f5fc7e 5595
b1d40991
DA
5596 if (res.nh->fib_nh_gw_family)
5597 *dst = res.nh->fib_nh_gw6;
87f5fc7e 5598
b1d40991
DA
5599 dev = res.nh->fib_nh_dev;
5600 params->rt_metric = res.f6i->fib6_metric;
d1c362e1 5601 params->ifindex = dev->ifindex;
87f5fc7e
DA
5602
5603 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
71df5777 5604 * not needed here.
87f5fc7e 5605 */
71df5777 5606 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4c79579b
DA
5607 if (!neigh)
5608 return BPF_FIB_LKUP_RET_NO_NEIGH;
87f5fc7e 5609
e1850ea9 5610 return bpf_fib_set_fwd_params(params, neigh, dev, mtu);
87f5fc7e
DA
5611}
5612#endif
5613
5614BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
5615 struct bpf_fib_lookup *, params, int, plen, u32, flags)
5616{
5617 if (plen < sizeof(*params))
5618 return -EINVAL;
5619
9ce64f19
DA
5620 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
5621 return -EINVAL;
5622
87f5fc7e
DA
5623 switch (params->family) {
5624#if IS_ENABLED(CONFIG_INET)
5625 case AF_INET:
5626 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 5627 flags, true);
87f5fc7e
DA
5628#endif
5629#if IS_ENABLED(CONFIG_IPV6)
5630 case AF_INET6:
5631 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 5632 flags, true);
87f5fc7e
DA
5633#endif
5634 }
bcece5dc 5635 return -EAFNOSUPPORT;
87f5fc7e
DA
5636}
5637
5638static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
5639 .func = bpf_xdp_fib_lookup,
5640 .gpl_only = true,
5641 .ret_type = RET_INTEGER,
5642 .arg1_type = ARG_PTR_TO_CTX,
5643 .arg2_type = ARG_PTR_TO_MEM,
5644 .arg3_type = ARG_CONST_SIZE,
5645 .arg4_type = ARG_ANYTHING,
5646};
5647
5648BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
5649 struct bpf_fib_lookup *, params, int, plen, u32, flags)
5650{
4f74fede 5651 struct net *net = dev_net(skb->dev);
4c79579b 5652 int rc = -EAFNOSUPPORT;
2c0a10af 5653 bool check_mtu = false;
4f74fede 5654
87f5fc7e
DA
5655 if (plen < sizeof(*params))
5656 return -EINVAL;
5657
9ce64f19
DA
5658 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
5659 return -EINVAL;
5660
2c0a10af
JDB
5661 if (params->tot_len)
5662 check_mtu = true;
5663
87f5fc7e
DA
5664 switch (params->family) {
5665#if IS_ENABLED(CONFIG_INET)
5666 case AF_INET:
2c0a10af 5667 rc = bpf_ipv4_fib_lookup(net, params, flags, check_mtu);
4f74fede 5668 break;
87f5fc7e
DA
5669#endif
5670#if IS_ENABLED(CONFIG_IPV6)
5671 case AF_INET6:
2c0a10af 5672 rc = bpf_ipv6_fib_lookup(net, params, flags, check_mtu);
4f74fede 5673 break;
87f5fc7e
DA
5674#endif
5675 }
4f74fede 5676
2c0a10af 5677 if (rc == BPF_FIB_LKUP_RET_SUCCESS && !check_mtu) {
4f74fede
DA
5678 struct net_device *dev;
5679
2c0a10af
JDB
5680 /* When tot_len isn't provided by user, check skb
5681 * against MTU of FIB lookup resulting net_device
5682 */
4c79579b 5683 dev = dev_get_by_index_rcu(net, params->ifindex);
4f74fede 5684 if (!is_skb_forwardable(dev, skb))
4c79579b 5685 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
e1850ea9
JDB
5686
5687 params->mtu_result = dev->mtu; /* union with tot_len */
4f74fede
DA
5688 }
5689
4c79579b 5690 return rc;
87f5fc7e
DA
5691}
5692
5693static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
5694 .func = bpf_skb_fib_lookup,
5695 .gpl_only = true,
5696 .ret_type = RET_INTEGER,
5697 .arg1_type = ARG_PTR_TO_CTX,
5698 .arg2_type = ARG_PTR_TO_MEM,
5699 .arg3_type = ARG_CONST_SIZE,
5700 .arg4_type = ARG_ANYTHING,
5701};
5702
34b2021c
JDB
5703static struct net_device *__dev_via_ifindex(struct net_device *dev_curr,
5704 u32 ifindex)
5705{
5706 struct net *netns = dev_net(dev_curr);
5707
5708 /* Non-redirect use-cases can use ifindex=0 and save ifindex lookup */
5709 if (ifindex == 0)
5710 return dev_curr;
5711
5712 return dev_get_by_index_rcu(netns, ifindex);
5713}
5714
5715BPF_CALL_5(bpf_skb_check_mtu, struct sk_buff *, skb,
5716 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags)
5717{
5718 int ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
5719 struct net_device *dev = skb->dev;
5720 int skb_len, dev_len;
5721 int mtu;
5722
5723 if (unlikely(flags & ~(BPF_MTU_CHK_SEGS)))
5724 return -EINVAL;
5725
e5e35e75 5726 if (unlikely(flags & BPF_MTU_CHK_SEGS && (len_diff || *mtu_len)))
34b2021c
JDB
5727 return -EINVAL;
5728
5729 dev = __dev_via_ifindex(dev, ifindex);
5730 if (unlikely(!dev))
5731 return -ENODEV;
5732
5733 mtu = READ_ONCE(dev->mtu);
5734
5735 dev_len = mtu + dev->hard_header_len;
e5e35e75
JDB
5736
5737 /* If set use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
5738 skb_len = *mtu_len ? *mtu_len + dev->hard_header_len : skb->len;
5739
5740 skb_len += len_diff; /* minus result pass check */
34b2021c
JDB
5741 if (skb_len <= dev_len) {
5742 ret = BPF_MTU_CHK_RET_SUCCESS;
5743 goto out;
5744 }
5745 /* At this point, skb->len exceed MTU, but as it include length of all
5746 * segments, it can still be below MTU. The SKB can possibly get
5747 * re-segmented in transmit path (see validate_xmit_skb). Thus, user
5748 * must choose if segs are to be MTU checked.
5749 */
5750 if (skb_is_gso(skb)) {
5751 ret = BPF_MTU_CHK_RET_SUCCESS;
5752
5753 if (flags & BPF_MTU_CHK_SEGS &&
5754 !skb_gso_validate_network_len(skb, mtu))
5755 ret = BPF_MTU_CHK_RET_SEGS_TOOBIG;
5756 }
5757out:
5758 /* BPF verifier guarantees valid pointer */
5759 *mtu_len = mtu;
5760
5761 return ret;
5762}
5763
5764BPF_CALL_5(bpf_xdp_check_mtu, struct xdp_buff *, xdp,
5765 u32, ifindex, u32 *, mtu_len, s32, len_diff, u64, flags)
5766{
5767 struct net_device *dev = xdp->rxq->dev;
5768 int xdp_len = xdp->data_end - xdp->data;
5769 int ret = BPF_MTU_CHK_RET_SUCCESS;
5770 int mtu, dev_len;
5771
5772 /* XDP variant doesn't support multi-buffer segment check (yet) */
5773 if (unlikely(flags))
5774 return -EINVAL;
5775
5776 dev = __dev_via_ifindex(dev, ifindex);
5777 if (unlikely(!dev))
5778 return -ENODEV;
5779
5780 mtu = READ_ONCE(dev->mtu);
5781
5782 /* Add L2-header as dev MTU is L3 size */
5783 dev_len = mtu + dev->hard_header_len;
5784
e5e35e75
JDB
5785 /* Use *mtu_len as input, L3 as iph->tot_len (like fib_lookup) */
5786 if (*mtu_len)
5787 xdp_len = *mtu_len + dev->hard_header_len;
5788
34b2021c
JDB
5789 xdp_len += len_diff; /* minus result pass check */
5790 if (xdp_len > dev_len)
5791 ret = BPF_MTU_CHK_RET_FRAG_NEEDED;
5792
5793 /* BPF verifier guarantees valid pointer */
5794 *mtu_len = mtu;
5795
5796 return ret;
5797}
5798
5799static const struct bpf_func_proto bpf_skb_check_mtu_proto = {
5800 .func = bpf_skb_check_mtu,
5801 .gpl_only = true,
5802 .ret_type = RET_INTEGER,
5803 .arg1_type = ARG_PTR_TO_CTX,
5804 .arg2_type = ARG_ANYTHING,
5805 .arg3_type = ARG_PTR_TO_INT,
5806 .arg4_type = ARG_ANYTHING,
5807 .arg5_type = ARG_ANYTHING,
5808};
5809
5810static const struct bpf_func_proto bpf_xdp_check_mtu_proto = {
5811 .func = bpf_xdp_check_mtu,
5812 .gpl_only = true,
5813 .ret_type = RET_INTEGER,
5814 .arg1_type = ARG_PTR_TO_CTX,
5815 .arg2_type = ARG_ANYTHING,
5816 .arg3_type = ARG_PTR_TO_INT,
5817 .arg4_type = ARG_ANYTHING,
5818 .arg5_type = ARG_ANYTHING,
5819};
5820
fe94cc29
MX
5821#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5822static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
5823{
5824 int err;
5825 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
5826
bb986a50 5827 if (!seg6_validate_srh(srh, len, false))
fe94cc29
MX
5828 return -EINVAL;
5829
5830 switch (type) {
5831 case BPF_LWT_ENCAP_SEG6_INLINE:
5832 if (skb->protocol != htons(ETH_P_IPV6))
5833 return -EBADMSG;
5834
5835 err = seg6_do_srh_inline(skb, srh);
5836 break;
5837 case BPF_LWT_ENCAP_SEG6:
5838 skb_reset_inner_headers(skb);
5839 skb->encapsulation = 1;
5840 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
5841 break;
5842 default:
5843 return -EINVAL;
5844 }
5845
5846 bpf_compute_data_pointers(skb);
5847 if (err)
5848 return err;
5849
5850 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5851 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
5852
5853 return seg6_lookup_nexthop(skb, NULL, 0);
5854}
5855#endif /* CONFIG_IPV6_SEG6_BPF */
5856
3e0bd37c
PO
5857#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5858static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
5859 bool ingress)
5860{
52f27877 5861 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
3e0bd37c
PO
5862}
5863#endif
5864
5865BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
fe94cc29
MX
5866 u32, len)
5867{
5868 switch (type) {
5869#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5870 case BPF_LWT_ENCAP_SEG6:
5871 case BPF_LWT_ENCAP_SEG6_INLINE:
5872 return bpf_push_seg6_encap(skb, type, hdr, len);
3e0bd37c
PO
5873#endif
5874#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5875 case BPF_LWT_ENCAP_IP:
5876 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
fe94cc29
MX
5877#endif
5878 default:
5879 return -EINVAL;
5880 }
5881}
5882
3e0bd37c
PO
5883BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
5884 void *, hdr, u32, len)
5885{
5886 switch (type) {
5887#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
5888 case BPF_LWT_ENCAP_IP:
5889 return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
fe94cc29
MX
5890#endif
5891 default:
5892 return -EINVAL;
5893 }
5894}
5895
3e0bd37c
PO
5896static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
5897 .func = bpf_lwt_in_push_encap,
5898 .gpl_only = false,
5899 .ret_type = RET_INTEGER,
5900 .arg1_type = ARG_PTR_TO_CTX,
5901 .arg2_type = ARG_ANYTHING,
216e3cd2 5902 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
3e0bd37c
PO
5903 .arg4_type = ARG_CONST_SIZE
5904};
5905
5906static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
5907 .func = bpf_lwt_xmit_push_encap,
fe94cc29
MX
5908 .gpl_only = false,
5909 .ret_type = RET_INTEGER,
5910 .arg1_type = ARG_PTR_TO_CTX,
5911 .arg2_type = ARG_ANYTHING,
216e3cd2 5912 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
fe94cc29
MX
5913 .arg4_type = ARG_CONST_SIZE
5914};
5915
61d76980 5916#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
fe94cc29
MX
5917BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5918 const void *, from, u32, len)
5919{
fe94cc29
MX
5920 struct seg6_bpf_srh_state *srh_state =
5921 this_cpu_ptr(&seg6_bpf_srh_states);
486cdf21 5922 struct ipv6_sr_hdr *srh = srh_state->srh;
fe94cc29 5923 void *srh_tlvs, *srh_end, *ptr;
fe94cc29
MX
5924 int srhoff = 0;
5925
486cdf21 5926 if (srh == NULL)
fe94cc29
MX
5927 return -EINVAL;
5928
fe94cc29
MX
5929 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5930 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5931
5932 ptr = skb->data + offset;
5933 if (ptr >= srh_tlvs && ptr + len <= srh_end)
486cdf21 5934 srh_state->valid = false;
fe94cc29
MX
5935 else if (ptr < (void *)&srh->flags ||
5936 ptr + len > (void *)&srh->segments)
5937 return -EFAULT;
5938
5939 if (unlikely(bpf_try_make_writable(skb, offset + len)))
5940 return -EFAULT;
486cdf21
MX
5941 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5942 return -EINVAL;
5943 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
fe94cc29
MX
5944
5945 memcpy(skb->data + offset, from, len);
5946 return 0;
fe94cc29
MX
5947}
5948
5949static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5950 .func = bpf_lwt_seg6_store_bytes,
5951 .gpl_only = false,
5952 .ret_type = RET_INTEGER,
5953 .arg1_type = ARG_PTR_TO_CTX,
5954 .arg2_type = ARG_ANYTHING,
216e3cd2 5955 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
fe94cc29
MX
5956 .arg4_type = ARG_CONST_SIZE
5957};
5958
486cdf21 5959static void bpf_update_srh_state(struct sk_buff *skb)
fe94cc29 5960{
fe94cc29
MX
5961 struct seg6_bpf_srh_state *srh_state =
5962 this_cpu_ptr(&seg6_bpf_srh_states);
fe94cc29 5963 int srhoff = 0;
fe94cc29 5964
486cdf21
MX
5965 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5966 srh_state->srh = NULL;
5967 } else {
5968 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5969 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5970 srh_state->valid = true;
fe94cc29 5971 }
486cdf21
MX
5972}
5973
5974BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5975 u32, action, void *, param, u32, param_len)
5976{
5977 struct seg6_bpf_srh_state *srh_state =
5978 this_cpu_ptr(&seg6_bpf_srh_states);
5979 int hdroff = 0;
5980 int err;
fe94cc29
MX
5981
5982 switch (action) {
5983 case SEG6_LOCAL_ACTION_END_X:
486cdf21
MX
5984 if (!seg6_bpf_has_valid_srh(skb))
5985 return -EBADMSG;
fe94cc29
MX
5986 if (param_len != sizeof(struct in6_addr))
5987 return -EINVAL;
5988 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5989 case SEG6_LOCAL_ACTION_END_T:
486cdf21
MX
5990 if (!seg6_bpf_has_valid_srh(skb))
5991 return -EBADMSG;
fe94cc29
MX
5992 if (param_len != sizeof(int))
5993 return -EINVAL;
5994 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
486cdf21
MX
5995 case SEG6_LOCAL_ACTION_END_DT6:
5996 if (!seg6_bpf_has_valid_srh(skb))
5997 return -EBADMSG;
fe94cc29
MX
5998 if (param_len != sizeof(int))
5999 return -EINVAL;
486cdf21
MX
6000
6001 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
6002 return -EBADMSG;
6003 if (!pskb_pull(skb, hdroff))
6004 return -EBADMSG;
6005
6006 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
6007 skb_reset_network_header(skb);
6008 skb_reset_transport_header(skb);
6009 skb->encapsulation = 0;
6010
6011 bpf_compute_data_pointers(skb);
6012 bpf_update_srh_state(skb);
fe94cc29
MX
6013 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
6014 case SEG6_LOCAL_ACTION_END_B6:
486cdf21
MX
6015 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
6016 return -EBADMSG;
fe94cc29
MX
6017 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
6018 param, param_len);
6019 if (!err)
486cdf21
MX
6020 bpf_update_srh_state(skb);
6021
fe94cc29
MX
6022 return err;
6023 case SEG6_LOCAL_ACTION_END_B6_ENCAP:
486cdf21
MX
6024 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
6025 return -EBADMSG;
fe94cc29
MX
6026 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
6027 param, param_len);
6028 if (!err)
486cdf21
MX
6029 bpf_update_srh_state(skb);
6030
fe94cc29
MX
6031 return err;
6032 default:
6033 return -EINVAL;
6034 }
fe94cc29
MX
6035}
6036
6037static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
6038 .func = bpf_lwt_seg6_action,
6039 .gpl_only = false,
6040 .ret_type = RET_INTEGER,
6041 .arg1_type = ARG_PTR_TO_CTX,
6042 .arg2_type = ARG_ANYTHING,
216e3cd2 6043 .arg3_type = ARG_PTR_TO_MEM | MEM_RDONLY,
fe94cc29
MX
6044 .arg4_type = ARG_CONST_SIZE
6045};
6046
6047BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
6048 s32, len)
6049{
fe94cc29
MX
6050 struct seg6_bpf_srh_state *srh_state =
6051 this_cpu_ptr(&seg6_bpf_srh_states);
486cdf21 6052 struct ipv6_sr_hdr *srh = srh_state->srh;
fe94cc29 6053 void *srh_end, *srh_tlvs, *ptr;
fe94cc29
MX
6054 struct ipv6hdr *hdr;
6055 int srhoff = 0;
6056 int ret;
6057
486cdf21 6058 if (unlikely(srh == NULL))
fe94cc29 6059 return -EINVAL;
fe94cc29
MX
6060
6061 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
6062 ((srh->first_segment + 1) << 4));
6063 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
6064 srh_state->hdrlen);
6065 ptr = skb->data + offset;
6066
6067 if (unlikely(ptr < srh_tlvs || ptr > srh_end))
6068 return -EFAULT;
6069 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
6070 return -EFAULT;
6071
6072 if (len > 0) {
6073 ret = skb_cow_head(skb, len);
6074 if (unlikely(ret < 0))
6075 return ret;
6076
6077 ret = bpf_skb_net_hdr_push(skb, offset, len);
6078 } else {
6079 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
6080 }
6081
6082 bpf_compute_data_pointers(skb);
6083 if (unlikely(ret < 0))
6084 return ret;
6085
6086 hdr = (struct ipv6hdr *)skb->data;
6087 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
6088
486cdf21
MX
6089 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
6090 return -EINVAL;
6091 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
fe94cc29 6092 srh_state->hdrlen += len;
486cdf21 6093 srh_state->valid = false;
fe94cc29 6094 return 0;
fe94cc29
MX
6095}
6096
6097static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
6098 .func = bpf_lwt_seg6_adjust_srh,
6099 .gpl_only = false,
6100 .ret_type = RET_INTEGER,
6101 .arg1_type = ARG_PTR_TO_CTX,
6102 .arg2_type = ARG_ANYTHING,
6103 .arg3_type = ARG_ANYTHING,
6104};
61d76980 6105#endif /* CONFIG_IPV6_SEG6_BPF */
fe94cc29 6106
df3f94a0
AB
6107#ifdef CONFIG_INET
6108static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
c8123ead 6109 int dif, int sdif, u8 family, u8 proto)
6acc9b43 6110{
6acc9b43
JS
6111 bool refcounted = false;
6112 struct sock *sk = NULL;
6113
6114 if (family == AF_INET) {
6115 __be32 src4 = tuple->ipv4.saddr;
6116 __be32 dst4 = tuple->ipv4.daddr;
6acc9b43
JS
6117
6118 if (proto == IPPROTO_TCP)
c8123ead 6119 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
6acc9b43
JS
6120 src4, tuple->ipv4.sport,
6121 dst4, tuple->ipv4.dport,
6122 dif, sdif, &refcounted);
6123 else
6124 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
6125 dst4, tuple->ipv4.dport,
c8123ead 6126 dif, sdif, &udp_table, NULL);
8a615c6b 6127#if IS_ENABLED(CONFIG_IPV6)
6acc9b43
JS
6128 } else {
6129 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
6130 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
6acc9b43
JS
6131
6132 if (proto == IPPROTO_TCP)
c8123ead 6133 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
6acc9b43 6134 src6, tuple->ipv6.sport,
cac6cc2f 6135 dst6, ntohs(tuple->ipv6.dport),
6acc9b43 6136 dif, sdif, &refcounted);
8a615c6b
JS
6137 else if (likely(ipv6_bpf_stub))
6138 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
6139 src6, tuple->ipv6.sport,
cac6cc2f 6140 dst6, tuple->ipv6.dport,
8a615c6b 6141 dif, sdif,
c8123ead 6142 &udp_table, NULL);
6acc9b43
JS
6143#endif
6144 }
6145
6146 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
6147 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
6148 sk = NULL;
6149 }
6150 return sk;
6151}
6152
edbf8c01 6153/* bpf_skc_lookup performs the core lookup for different types of sockets,
6acc9b43
JS
6154 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
6155 * Returns the socket as an 'unsigned long' to simplify the casting in the
6156 * callers to satisfy BPF_CALL declarations.
6157 */
edbf8c01
LB
6158static struct sock *
6159__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
6160 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
6161 u64 flags)
6acc9b43 6162{
6acc9b43
JS
6163 struct sock *sk = NULL;
6164 u8 family = AF_UNSPEC;
6165 struct net *net;
c8123ead 6166 int sdif;
6acc9b43 6167
9b28ae24
LB
6168 if (len == sizeof(tuple->ipv4))
6169 family = AF_INET;
6170 else if (len == sizeof(tuple->ipv6))
6171 family = AF_INET6;
6172 else
6173 return NULL;
6174
f71c6143
JS
6175 if (unlikely(family == AF_UNSPEC || flags ||
6176 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
6acc9b43
JS
6177 goto out;
6178
c8123ead
NH
6179 if (family == AF_INET)
6180 sdif = inet_sdif(skb);
6acc9b43 6181 else
c8123ead
NH
6182 sdif = inet6_sdif(skb);
6183
f71c6143
JS
6184 if ((s32)netns_id < 0) {
6185 net = caller_net;
4cc1feeb 6186 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
f71c6143 6187 } else {
6acc9b43
JS
6188 net = get_net_ns_by_id(caller_net, netns_id);
6189 if (unlikely(!net))
6190 goto out;
c8123ead 6191 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
6acc9b43 6192 put_net(net);
6acc9b43
JS
6193 }
6194
edbf8c01
LB
6195out:
6196 return sk;
6197}
6198
6199static struct sock *
6200__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
6201 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
6202 u64 flags)
6203{
6204 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
6205 ifindex, proto, netns_id, flags);
6206
f7355a6c 6207 if (sk) {
6acc9b43 6208 sk = sk_to_full_sk(sk);
f7355a6c 6209 if (!sk_fullsock(sk)) {
2e012c74 6210 sock_gen_put(sk);
f7355a6c
MKL
6211 return NULL;
6212 }
6213 }
edbf8c01
LB
6214
6215 return sk;
6acc9b43
JS
6216}
6217
edbf8c01
LB
6218static struct sock *
6219bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
6220 u8 proto, u64 netns_id, u64 flags)
c8123ead
NH
6221{
6222 struct net *caller_net;
6223 int ifindex;
6224
6225 if (skb->dev) {
6226 caller_net = dev_net(skb->dev);
6227 ifindex = skb->dev->ifindex;
6228 } else {
6229 caller_net = sock_net(skb->sk);
6230 ifindex = 0;
6231 }
6232
edbf8c01
LB
6233 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
6234 netns_id, flags);
c8123ead
NH
6235}
6236
edbf8c01
LB
6237static struct sock *
6238bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
6239 u8 proto, u64 netns_id, u64 flags)
6240{
6241 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
6242 flags);
6243
f7355a6c 6244 if (sk) {
edbf8c01 6245 sk = sk_to_full_sk(sk);
f7355a6c 6246 if (!sk_fullsock(sk)) {
2e012c74 6247 sock_gen_put(sk);
f7355a6c
MKL
6248 return NULL;
6249 }
6250 }
edbf8c01
LB
6251
6252 return sk;
6253}
6254
6255BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
6256 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6257{
6258 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
6259 netns_id, flags);
6260}
6261
6262static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
6263 .func = bpf_skc_lookup_tcp,
6264 .gpl_only = false,
6265 .pkt_access = true,
6266 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6267 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6268 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
edbf8c01
LB
6269 .arg3_type = ARG_CONST_SIZE,
6270 .arg4_type = ARG_ANYTHING,
6271 .arg5_type = ARG_ANYTHING,
6272};
6273
6acc9b43
JS
6274BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
6275 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6276{
edbf8c01
LB
6277 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
6278 netns_id, flags);
6acc9b43
JS
6279}
6280
6281static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
6282 .func = bpf_sk_lookup_tcp,
6283 .gpl_only = false,
6284 .pkt_access = true,
6285 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6286 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6287 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6acc9b43
JS
6288 .arg3_type = ARG_CONST_SIZE,
6289 .arg4_type = ARG_ANYTHING,
6290 .arg5_type = ARG_ANYTHING,
6291};
6292
6293BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
6294 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6295{
edbf8c01
LB
6296 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
6297 netns_id, flags);
6acc9b43
JS
6298}
6299
6300static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
6301 .func = bpf_sk_lookup_udp,
6302 .gpl_only = false,
6303 .pkt_access = true,
6304 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6305 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6306 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6acc9b43
JS
6307 .arg3_type = ARG_CONST_SIZE,
6308 .arg4_type = ARG_ANYTHING,
6309 .arg5_type = ARG_ANYTHING,
6310};
6311
6312BPF_CALL_1(bpf_sk_release, struct sock *, sk)
6313{
a5fa25ad 6314 if (sk && sk_is_refcounted(sk))
6acc9b43
JS
6315 sock_gen_put(sk);
6316 return 0;
6317}
6318
6319static const struct bpf_func_proto bpf_sk_release_proto = {
6320 .func = bpf_sk_release,
6321 .gpl_only = false,
6322 .ret_type = RET_INTEGER,
a5fa25ad 6323 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
6acc9b43 6324};
c8123ead
NH
6325
6326BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
6327 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6328{
6329 struct net *caller_net = dev_net(ctx->rxq->dev);
6330 int ifindex = ctx->rxq->dev->ifindex;
6331
edbf8c01
LB
6332 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
6333 ifindex, IPPROTO_UDP, netns_id,
6334 flags);
c8123ead
NH
6335}
6336
6337static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
6338 .func = bpf_xdp_sk_lookup_udp,
6339 .gpl_only = false,
6340 .pkt_access = true,
6341 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6342 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6343 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c8123ead
NH
6344 .arg3_type = ARG_CONST_SIZE,
6345 .arg4_type = ARG_ANYTHING,
6346 .arg5_type = ARG_ANYTHING,
6347};
6348
edbf8c01
LB
6349BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
6350 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6351{
6352 struct net *caller_net = dev_net(ctx->rxq->dev);
6353 int ifindex = ctx->rxq->dev->ifindex;
6354
6355 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
6356 ifindex, IPPROTO_TCP, netns_id,
6357 flags);
6358}
6359
6360static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
6361 .func = bpf_xdp_skc_lookup_tcp,
6362 .gpl_only = false,
6363 .pkt_access = true,
6364 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6365 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6366 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
edbf8c01
LB
6367 .arg3_type = ARG_CONST_SIZE,
6368 .arg4_type = ARG_ANYTHING,
6369 .arg5_type = ARG_ANYTHING,
6370};
6371
c8123ead
NH
6372BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
6373 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
6374{
6375 struct net *caller_net = dev_net(ctx->rxq->dev);
6376 int ifindex = ctx->rxq->dev->ifindex;
6377
edbf8c01
LB
6378 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
6379 ifindex, IPPROTO_TCP, netns_id,
6380 flags);
c8123ead
NH
6381}
6382
6383static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
6384 .func = bpf_xdp_sk_lookup_tcp,
6385 .gpl_only = false,
6386 .pkt_access = true,
6387 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6388 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6389 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
c8123ead
NH
6390 .arg3_type = ARG_CONST_SIZE,
6391 .arg4_type = ARG_ANYTHING,
6392 .arg5_type = ARG_ANYTHING,
6393};
6c49e65e 6394
edbf8c01
LB
6395BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
6396 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6397{
6398 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
6399 sock_net(ctx->sk), 0,
6400 IPPROTO_TCP, netns_id, flags);
6401}
6402
6403static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
6404 .func = bpf_sock_addr_skc_lookup_tcp,
6405 .gpl_only = false,
6406 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
6407 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6408 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
edbf8c01
LB
6409 .arg3_type = ARG_CONST_SIZE,
6410 .arg4_type = ARG_ANYTHING,
6411 .arg5_type = ARG_ANYTHING,
6412};
6413
6c49e65e
AI
6414BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
6415 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6416{
edbf8c01
LB
6417 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
6418 sock_net(ctx->sk), 0, IPPROTO_TCP,
6419 netns_id, flags);
6c49e65e
AI
6420}
6421
6422static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
6423 .func = bpf_sock_addr_sk_lookup_tcp,
6424 .gpl_only = false,
6425 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6426 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6427 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6c49e65e
AI
6428 .arg3_type = ARG_CONST_SIZE,
6429 .arg4_type = ARG_ANYTHING,
6430 .arg5_type = ARG_ANYTHING,
6431};
6432
6433BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
6434 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
6435{
edbf8c01
LB
6436 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
6437 sock_net(ctx->sk), 0, IPPROTO_UDP,
6438 netns_id, flags);
6c49e65e
AI
6439}
6440
6441static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
6442 .func = bpf_sock_addr_sk_lookup_udp,
6443 .gpl_only = false,
6444 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6445 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 6446 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
6c49e65e
AI
6447 .arg3_type = ARG_CONST_SIZE,
6448 .arg4_type = ARG_ANYTHING,
6449 .arg5_type = ARG_ANYTHING,
6450};
6451
655a51e5
MKL
6452bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6453 struct bpf_insn_access_aux *info)
6454{
c2cb5e82
SF
6455 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
6456 icsk_retransmits))
655a51e5
MKL
6457 return false;
6458
6459 if (off % size != 0)
6460 return false;
6461
6462 switch (off) {
6463 case offsetof(struct bpf_tcp_sock, bytes_received):
6464 case offsetof(struct bpf_tcp_sock, bytes_acked):
6465 return size == sizeof(__u64);
6466 default:
6467 return size == sizeof(__u32);
6468 }
6469}
6470
6471u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
6472 const struct bpf_insn *si,
6473 struct bpf_insn *insn_buf,
6474 struct bpf_prog *prog, u32 *target_size)
6475{
6476 struct bpf_insn *insn = insn_buf;
6477
6478#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
6479 do { \
c593642c
PB
6480 BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \
6481 sizeof_field(struct bpf_tcp_sock, FIELD)); \
655a51e5
MKL
6482 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
6483 si->dst_reg, si->src_reg, \
6484 offsetof(struct tcp_sock, FIELD)); \
6485 } while (0)
6486
c2cb5e82
SF
6487#define BPF_INET_SOCK_GET_COMMON(FIELD) \
6488 do { \
c593642c 6489 BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \
c2cb5e82 6490 FIELD) > \
c593642c 6491 sizeof_field(struct bpf_tcp_sock, FIELD)); \
c2cb5e82
SF
6492 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6493 struct inet_connection_sock, \
6494 FIELD), \
6495 si->dst_reg, si->src_reg, \
6496 offsetof( \
6497 struct inet_connection_sock, \
6498 FIELD)); \
6499 } while (0)
6500
655a51e5
MKL
6501 if (insn > insn_buf)
6502 return insn - insn_buf;
6503
6504 switch (si->off) {
6505 case offsetof(struct bpf_tcp_sock, rtt_min):
c593642c 6506 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
655a51e5
MKL
6507 sizeof(struct minmax));
6508 BUILD_BUG_ON(sizeof(struct minmax) <
6509 sizeof(struct minmax_sample));
6510
6511 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
6512 offsetof(struct tcp_sock, rtt_min) +
6513 offsetof(struct minmax_sample, v));
6514 break;
2377b81d
SF
6515 case offsetof(struct bpf_tcp_sock, snd_cwnd):
6516 BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
6517 break;
6518 case offsetof(struct bpf_tcp_sock, srtt_us):
6519 BPF_TCP_SOCK_GET_COMMON(srtt_us);
6520 break;
6521 case offsetof(struct bpf_tcp_sock, snd_ssthresh):
6522 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
6523 break;
6524 case offsetof(struct bpf_tcp_sock, rcv_nxt):
6525 BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
6526 break;
6527 case offsetof(struct bpf_tcp_sock, snd_nxt):
6528 BPF_TCP_SOCK_GET_COMMON(snd_nxt);
6529 break;
6530 case offsetof(struct bpf_tcp_sock, snd_una):
6531 BPF_TCP_SOCK_GET_COMMON(snd_una);
6532 break;
6533 case offsetof(struct bpf_tcp_sock, mss_cache):
6534 BPF_TCP_SOCK_GET_COMMON(mss_cache);
6535 break;
6536 case offsetof(struct bpf_tcp_sock, ecn_flags):
6537 BPF_TCP_SOCK_GET_COMMON(ecn_flags);
6538 break;
6539 case offsetof(struct bpf_tcp_sock, rate_delivered):
6540 BPF_TCP_SOCK_GET_COMMON(rate_delivered);
6541 break;
6542 case offsetof(struct bpf_tcp_sock, rate_interval_us):
6543 BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
6544 break;
6545 case offsetof(struct bpf_tcp_sock, packets_out):
6546 BPF_TCP_SOCK_GET_COMMON(packets_out);
6547 break;
6548 case offsetof(struct bpf_tcp_sock, retrans_out):
6549 BPF_TCP_SOCK_GET_COMMON(retrans_out);
6550 break;
6551 case offsetof(struct bpf_tcp_sock, total_retrans):
6552 BPF_TCP_SOCK_GET_COMMON(total_retrans);
6553 break;
6554 case offsetof(struct bpf_tcp_sock, segs_in):
6555 BPF_TCP_SOCK_GET_COMMON(segs_in);
6556 break;
6557 case offsetof(struct bpf_tcp_sock, data_segs_in):
6558 BPF_TCP_SOCK_GET_COMMON(data_segs_in);
6559 break;
6560 case offsetof(struct bpf_tcp_sock, segs_out):
6561 BPF_TCP_SOCK_GET_COMMON(segs_out);
6562 break;
6563 case offsetof(struct bpf_tcp_sock, data_segs_out):
6564 BPF_TCP_SOCK_GET_COMMON(data_segs_out);
6565 break;
6566 case offsetof(struct bpf_tcp_sock, lost_out):
6567 BPF_TCP_SOCK_GET_COMMON(lost_out);
6568 break;
6569 case offsetof(struct bpf_tcp_sock, sacked_out):
6570 BPF_TCP_SOCK_GET_COMMON(sacked_out);
6571 break;
6572 case offsetof(struct bpf_tcp_sock, bytes_received):
6573 BPF_TCP_SOCK_GET_COMMON(bytes_received);
6574 break;
6575 case offsetof(struct bpf_tcp_sock, bytes_acked):
6576 BPF_TCP_SOCK_GET_COMMON(bytes_acked);
6577 break;
0357746d
SF
6578 case offsetof(struct bpf_tcp_sock, dsack_dups):
6579 BPF_TCP_SOCK_GET_COMMON(dsack_dups);
6580 break;
6581 case offsetof(struct bpf_tcp_sock, delivered):
6582 BPF_TCP_SOCK_GET_COMMON(delivered);
6583 break;
6584 case offsetof(struct bpf_tcp_sock, delivered_ce):
6585 BPF_TCP_SOCK_GET_COMMON(delivered_ce);
6586 break;
c2cb5e82
SF
6587 case offsetof(struct bpf_tcp_sock, icsk_retransmits):
6588 BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
6589 break;
655a51e5
MKL
6590 }
6591
6592 return insn - insn_buf;
6593}
6594
6595BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
6596{
655a51e5
MKL
6597 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
6598 return (unsigned long)sk;
6599
6600 return (unsigned long)NULL;
6601}
6602
0d01da6a 6603const struct bpf_func_proto bpf_tcp_sock_proto = {
655a51e5
MKL
6604 .func = bpf_tcp_sock,
6605 .gpl_only = false,
6606 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
6607 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
6608};
6609
dbafd7dd
MKL
6610BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
6611{
6612 sk = sk_to_full_sk(sk);
6613
6614 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
6615 return (unsigned long)sk;
6616
6617 return (unsigned long)NULL;
6618}
6619
6620static const struct bpf_func_proto bpf_get_listener_sock_proto = {
6621 .func = bpf_get_listener_sock,
6622 .gpl_only = false,
6623 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
6624 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
6625};
6626
f7c917ba 6627BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
6628{
6629 unsigned int iphdr_len;
6630
d7bf2ebe
THJ
6631 switch (skb_protocol(skb, true)) {
6632 case cpu_to_be16(ETH_P_IP):
f7c917ba 6633 iphdr_len = sizeof(struct iphdr);
d7bf2ebe
THJ
6634 break;
6635 case cpu_to_be16(ETH_P_IPV6):
f7c917ba 6636 iphdr_len = sizeof(struct ipv6hdr);
d7bf2ebe
THJ
6637 break;
6638 default:
f7c917ba 6639 return 0;
d7bf2ebe 6640 }
f7c917ba 6641
6642 if (skb_headlen(skb) < iphdr_len)
6643 return 0;
6644
6645 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
6646 return 0;
6647
6648 return INET_ECN_set_ce(skb);
6649}
6650
fada7fdc
JL
6651bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6652 struct bpf_insn_access_aux *info)
6653{
6654 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
6655 return false;
6656
6657 if (off % size != 0)
6658 return false;
6659
6660 switch (off) {
6661 default:
6662 return size == sizeof(__u32);
6663 }
6664}
6665
6666u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
6667 const struct bpf_insn *si,
6668 struct bpf_insn *insn_buf,
6669 struct bpf_prog *prog, u32 *target_size)
6670{
6671 struct bpf_insn *insn = insn_buf;
6672
6673#define BPF_XDP_SOCK_GET(FIELD) \
6674 do { \
c593642c
PB
6675 BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \
6676 sizeof_field(struct bpf_xdp_sock, FIELD)); \
fada7fdc
JL
6677 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
6678 si->dst_reg, si->src_reg, \
6679 offsetof(struct xdp_sock, FIELD)); \
6680 } while (0)
6681
6682 switch (si->off) {
6683 case offsetof(struct bpf_xdp_sock, queue_id):
6684 BPF_XDP_SOCK_GET(queue_id);
6685 break;
6686 }
6687
6688 return insn - insn_buf;
6689}
6690
f7c917ba 6691static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
6692 .func = bpf_skb_ecn_set_ce,
6693 .gpl_only = false,
6694 .ret_type = RET_INTEGER,
6695 .arg1_type = ARG_PTR_TO_CTX,
6696};
39904084
LB
6697
6698BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
6699 struct tcphdr *, th, u32, th_len)
6700{
6701#ifdef CONFIG_SYN_COOKIES
6702 u32 cookie;
6703 int ret;
6704
c0df236e 6705 if (unlikely(!sk || th_len < sizeof(*th)))
39904084
LB
6706 return -EINVAL;
6707
6708 /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
6709 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
6710 return -EINVAL;
6711
6712 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
6713 return -EINVAL;
6714
6715 if (!th->ack || th->rst || th->syn)
6716 return -ENOENT;
6717
6718 if (tcp_synq_no_recent_overflow(sk))
6719 return -ENOENT;
6720
6721 cookie = ntohl(th->ack_seq) - 1;
6722
6723 switch (sk->sk_family) {
6724 case AF_INET:
6725 if (unlikely(iph_len < sizeof(struct iphdr)))
6726 return -EINVAL;
6727
6728 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
6729 break;
6730
6731#if IS_BUILTIN(CONFIG_IPV6)
6732 case AF_INET6:
6733 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
6734 return -EINVAL;
6735
6736 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
6737 break;
6738#endif /* CONFIG_IPV6 */
6739
6740 default:
6741 return -EPROTONOSUPPORT;
6742 }
6743
6744 if (ret > 0)
6745 return 0;
6746
6747 return -ENOENT;
6748#else
6749 return -ENOTSUPP;
6750#endif
6751}
6752
6753static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
6754 .func = bpf_tcp_check_syncookie,
6755 .gpl_only = true,
6756 .pkt_access = true,
6757 .ret_type = RET_INTEGER,
c0df236e 6758 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
216e3cd2 6759 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39904084 6760 .arg3_type = ARG_CONST_SIZE,
216e3cd2 6761 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
39904084
LB
6762 .arg5_type = ARG_CONST_SIZE,
6763};
6764
70d66244
PP
6765BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
6766 struct tcphdr *, th, u32, th_len)
6767{
6768#ifdef CONFIG_SYN_COOKIES
6769 u32 cookie;
6770 u16 mss;
6771
c0df236e 6772 if (unlikely(!sk || th_len < sizeof(*th) || th_len != th->doff * 4))
70d66244
PP
6773 return -EINVAL;
6774
6775 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
6776 return -EINVAL;
6777
6778 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
6779 return -ENOENT;
6780
6781 if (!th->syn || th->ack || th->fin || th->rst)
6782 return -EINVAL;
6783
6784 if (unlikely(iph_len < sizeof(struct iphdr)))
6785 return -EINVAL;
6786
6787 /* Both struct iphdr and struct ipv6hdr have the version field at the
6788 * same offset so we can cast to the shorter header (struct iphdr).
6789 */
6790 switch (((struct iphdr *)iph)->version) {
6791 case 4:
6792 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
6793 return -EINVAL;
6794
6795 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
6796 break;
6797
6798#if IS_BUILTIN(CONFIG_IPV6)
6799 case 6:
6800 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
6801 return -EINVAL;
6802
6803 if (sk->sk_family != AF_INET6)
6804 return -EINVAL;
6805
6806 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
6807 break;
6808#endif /* CONFIG_IPV6 */
6809
6810 default:
6811 return -EPROTONOSUPPORT;
6812 }
0741be35 6813 if (mss == 0)
70d66244
PP
6814 return -ENOENT;
6815
6816 return cookie | ((u64)mss << 32);
6817#else
6818 return -EOPNOTSUPP;
6819#endif /* CONFIG_SYN_COOKIES */
6820}
6821
6822static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
6823 .func = bpf_tcp_gen_syncookie,
6824 .gpl_only = true, /* __cookie_v*_init_sequence() is GPL */
6825 .pkt_access = true,
6826 .ret_type = RET_INTEGER,
c0df236e 6827 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
216e3cd2 6828 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
70d66244 6829 .arg3_type = ARG_CONST_SIZE,
216e3cd2 6830 .arg4_type = ARG_PTR_TO_MEM | MEM_RDONLY,
70d66244
PP
6831 .arg5_type = ARG_CONST_SIZE,
6832};
6833
cf7fbe66
JS
6834BPF_CALL_3(bpf_sk_assign, struct sk_buff *, skb, struct sock *, sk, u64, flags)
6835{
27e5203b 6836 if (!sk || flags != 0)
cf7fbe66
JS
6837 return -EINVAL;
6838 if (!skb_at_tc_ingress(skb))
6839 return -EOPNOTSUPP;
6840 if (unlikely(dev_net(skb->dev) != sock_net(sk)))
6841 return -ENETUNREACH;
8e368dc7 6842 if (unlikely(sk_fullsock(sk) && sk->sk_reuseport))
cf7fbe66 6843 return -ESOCKTNOSUPPORT;
7ae215d2
JS
6844 if (sk_is_refcounted(sk) &&
6845 unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
cf7fbe66
JS
6846 return -ENOENT;
6847
6848 skb_orphan(skb);
6849 skb->sk = sk;
6850 skb->destructor = sock_pfree;
6851
6852 return 0;
6853}
6854
6855static const struct bpf_func_proto bpf_sk_assign_proto = {
6856 .func = bpf_sk_assign,
6857 .gpl_only = false,
6858 .ret_type = RET_INTEGER,
6859 .arg1_type = ARG_PTR_TO_CTX,
27e5203b 6860 .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
cf7fbe66
JS
6861 .arg3_type = ARG_ANYTHING,
6862};
6863
0813a841
MKL
6864static const u8 *bpf_search_tcp_opt(const u8 *op, const u8 *opend,
6865 u8 search_kind, const u8 *magic,
6866 u8 magic_len, bool *eol)
6867{
6868 u8 kind, kind_len;
6869
6870 *eol = false;
6871
6872 while (op < opend) {
6873 kind = op[0];
6874
6875 if (kind == TCPOPT_EOL) {
6876 *eol = true;
6877 return ERR_PTR(-ENOMSG);
6878 } else if (kind == TCPOPT_NOP) {
6879 op++;
6880 continue;
6881 }
6882
6883 if (opend - op < 2 || opend - op < op[1] || op[1] < 2)
6884 /* Something is wrong in the received header.
6885 * Follow the TCP stack's tcp_parse_options()
6886 * and just bail here.
6887 */
6888 return ERR_PTR(-EFAULT);
6889
6890 kind_len = op[1];
6891 if (search_kind == kind) {
6892 if (!magic_len)
6893 return op;
6894
6895 if (magic_len > kind_len - 2)
6896 return ERR_PTR(-ENOMSG);
6897
6898 if (!memcmp(&op[2], magic, magic_len))
6899 return op;
6900 }
6901
6902 op += kind_len;
6903 }
6904
6905 return ERR_PTR(-ENOMSG);
6906}
6907
6908BPF_CALL_4(bpf_sock_ops_load_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6909 void *, search_res, u32, len, u64, flags)
6910{
6911 bool eol, load_syn = flags & BPF_LOAD_HDR_OPT_TCP_SYN;
6912 const u8 *op, *opend, *magic, *search = search_res;
6913 u8 search_kind, search_len, copy_len, magic_len;
6914 int ret;
6915
6916 /* 2 byte is the minimal option len except TCPOPT_NOP and
6917 * TCPOPT_EOL which are useless for the bpf prog to learn
6918 * and this helper disallow loading them also.
6919 */
6920 if (len < 2 || flags & ~BPF_LOAD_HDR_OPT_TCP_SYN)
6921 return -EINVAL;
6922
6923 search_kind = search[0];
6924 search_len = search[1];
6925
6926 if (search_len > len || search_kind == TCPOPT_NOP ||
6927 search_kind == TCPOPT_EOL)
6928 return -EINVAL;
6929
6930 if (search_kind == TCPOPT_EXP || search_kind == 253) {
6931 /* 16 or 32 bit magic. +2 for kind and kind length */
6932 if (search_len != 4 && search_len != 6)
6933 return -EINVAL;
6934 magic = &search[2];
6935 magic_len = search_len - 2;
6936 } else {
6937 if (search_len)
6938 return -EINVAL;
6939 magic = NULL;
6940 magic_len = 0;
6941 }
6942
6943 if (load_syn) {
6944 ret = bpf_sock_ops_get_syn(bpf_sock, TCP_BPF_SYN, &op);
6945 if (ret < 0)
6946 return ret;
6947
6948 opend = op + ret;
6949 op += sizeof(struct tcphdr);
6950 } else {
6951 if (!bpf_sock->skb ||
6952 bpf_sock->op == BPF_SOCK_OPS_HDR_OPT_LEN_CB)
6953 /* This bpf_sock->op cannot call this helper */
6954 return -EPERM;
6955
6956 opend = bpf_sock->skb_data_end;
6957 op = bpf_sock->skb->data + sizeof(struct tcphdr);
6958 }
6959
6960 op = bpf_search_tcp_opt(op, opend, search_kind, magic, magic_len,
6961 &eol);
6962 if (IS_ERR(op))
6963 return PTR_ERR(op);
6964
6965 copy_len = op[1];
6966 ret = copy_len;
6967 if (copy_len > len) {
6968 ret = -ENOSPC;
6969 copy_len = len;
6970 }
6971
6972 memcpy(search_res, op, copy_len);
6973 return ret;
6974}
6975
6976static const struct bpf_func_proto bpf_sock_ops_load_hdr_opt_proto = {
6977 .func = bpf_sock_ops_load_hdr_opt,
6978 .gpl_only = false,
6979 .ret_type = RET_INTEGER,
6980 .arg1_type = ARG_PTR_TO_CTX,
6981 .arg2_type = ARG_PTR_TO_MEM,
6982 .arg3_type = ARG_CONST_SIZE,
6983 .arg4_type = ARG_ANYTHING,
6984};
6985
6986BPF_CALL_4(bpf_sock_ops_store_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
6987 const void *, from, u32, len, u64, flags)
6988{
6989 u8 new_kind, new_kind_len, magic_len = 0, *opend;
6990 const u8 *op, *new_op, *magic = NULL;
6991 struct sk_buff *skb;
6992 bool eol;
6993
6994 if (bpf_sock->op != BPF_SOCK_OPS_WRITE_HDR_OPT_CB)
6995 return -EPERM;
6996
6997 if (len < 2 || flags)
6998 return -EINVAL;
6999
7000 new_op = from;
7001 new_kind = new_op[0];
7002 new_kind_len = new_op[1];
7003
7004 if (new_kind_len > len || new_kind == TCPOPT_NOP ||
7005 new_kind == TCPOPT_EOL)
7006 return -EINVAL;
7007
7008 if (new_kind_len > bpf_sock->remaining_opt_len)
7009 return -ENOSPC;
7010
7011 /* 253 is another experimental kind */
7012 if (new_kind == TCPOPT_EXP || new_kind == 253) {
7013 if (new_kind_len < 4)
7014 return -EINVAL;
7015 /* Match for the 2 byte magic also.
7016 * RFC 6994: the magic could be 2 or 4 bytes.
7017 * Hence, matching by 2 byte only is on the
7018 * conservative side but it is the right
7019 * thing to do for the 'search-for-duplication'
7020 * purpose.
7021 */
7022 magic = &new_op[2];
7023 magic_len = 2;
7024 }
7025
7026 /* Check for duplication */
7027 skb = bpf_sock->skb;
7028 op = skb->data + sizeof(struct tcphdr);
7029 opend = bpf_sock->skb_data_end;
7030
7031 op = bpf_search_tcp_opt(op, opend, new_kind, magic, magic_len,
7032 &eol);
7033 if (!IS_ERR(op))
7034 return -EEXIST;
7035
7036 if (PTR_ERR(op) != -ENOMSG)
7037 return PTR_ERR(op);
7038
7039 if (eol)
7040 /* The option has been ended. Treat it as no more
7041 * header option can be written.
7042 */
7043 return -ENOSPC;
7044
7045 /* No duplication found. Store the header option. */
7046 memcpy(opend, from, new_kind_len);
7047
7048 bpf_sock->remaining_opt_len -= new_kind_len;
7049 bpf_sock->skb_data_end += new_kind_len;
7050
7051 return 0;
7052}
7053
7054static const struct bpf_func_proto bpf_sock_ops_store_hdr_opt_proto = {
7055 .func = bpf_sock_ops_store_hdr_opt,
7056 .gpl_only = false,
7057 .ret_type = RET_INTEGER,
7058 .arg1_type = ARG_PTR_TO_CTX,
216e3cd2 7059 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY,
0813a841
MKL
7060 .arg3_type = ARG_CONST_SIZE,
7061 .arg4_type = ARG_ANYTHING,
7062};
7063
7064BPF_CALL_3(bpf_sock_ops_reserve_hdr_opt, struct bpf_sock_ops_kern *, bpf_sock,
7065 u32, len, u64, flags)
7066{
7067 if (bpf_sock->op != BPF_SOCK_OPS_HDR_OPT_LEN_CB)
7068 return -EPERM;
7069
7070 if (flags || len < 2)
7071 return -EINVAL;
7072
7073 if (len > bpf_sock->remaining_opt_len)
7074 return -ENOSPC;
7075
7076 bpf_sock->remaining_opt_len -= len;
7077
7078 return 0;
7079}
7080
7081static const struct bpf_func_proto bpf_sock_ops_reserve_hdr_opt_proto = {
7082 .func = bpf_sock_ops_reserve_hdr_opt,
7083 .gpl_only = false,
7084 .ret_type = RET_INTEGER,
7085 .arg1_type = ARG_PTR_TO_CTX,
7086 .arg2_type = ARG_ANYTHING,
7087 .arg3_type = ARG_ANYTHING,
7088};
7089
df3f94a0 7090#endif /* CONFIG_INET */
6acc9b43 7091
fe94cc29
MX
7092bool bpf_helper_changes_pkt_data(void *func)
7093{
7094 if (func == bpf_skb_vlan_push ||
7095 func == bpf_skb_vlan_pop ||
7096 func == bpf_skb_store_bytes ||
7097 func == bpf_skb_change_proto ||
7098 func == bpf_skb_change_head ||
0ea488ff 7099 func == sk_skb_change_head ||
fe94cc29 7100 func == bpf_skb_change_tail ||
0ea488ff 7101 func == sk_skb_change_tail ||
fe94cc29 7102 func == bpf_skb_adjust_room ||
18ebe16d 7103 func == sk_skb_adjust_room ||
fe94cc29 7104 func == bpf_skb_pull_data ||
0ea488ff 7105 func == sk_skb_pull_data ||
fe94cc29
MX
7106 func == bpf_clone_redirect ||
7107 func == bpf_l3_csum_replace ||
7108 func == bpf_l4_csum_replace ||
7109 func == bpf_xdp_adjust_head ||
7110 func == bpf_xdp_adjust_meta ||
7111 func == bpf_msg_pull_data ||
6fff607e 7112 func == bpf_msg_push_data ||
7246d8ed 7113 func == bpf_msg_pop_data ||
fe94cc29 7114 func == bpf_xdp_adjust_tail ||
61d76980 7115#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
fe94cc29
MX
7116 func == bpf_lwt_seg6_store_bytes ||
7117 func == bpf_lwt_seg6_adjust_srh ||
61d76980 7118 func == bpf_lwt_seg6_action ||
0813a841
MKL
7119#endif
7120#ifdef CONFIG_INET
7121 func == bpf_sock_ops_store_hdr_opt ||
61d76980 7122#endif
3e0bd37c
PO
7123 func == bpf_lwt_in_push_encap ||
7124 func == bpf_lwt_xmit_push_encap)
fe94cc29
MX
7125 return true;
7126
7127 return false;
7128}
7129
6890896b 7130const struct bpf_func_proto bpf_event_output_data_proto __weak;
f7c6cb1d 7131const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto __weak;
89aa0758 7132
ae2cf1c4 7133static const struct bpf_func_proto *
5e43f899 7134sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
ae2cf1c4
DA
7135{
7136 switch (func_id) {
7137 /* inet and inet6 sockets are created in a process
7138 * context so there is always a valid uid/gid
7139 */
7140 case BPF_FUNC_get_current_uid_gid:
7141 return &bpf_get_current_uid_gid_proto;
cd339431
RG
7142 case BPF_FUNC_get_local_storage:
7143 return &bpf_get_local_storage_proto;
0e53d9e5
DB
7144 case BPF_FUNC_get_socket_cookie:
7145 return &bpf_get_socket_cookie_sock_proto;
f318903c
DB
7146 case BPF_FUNC_get_netns_cookie:
7147 return &bpf_get_netns_cookie_sock_proto;
fcf752ea
DB
7148 case BPF_FUNC_perf_event_output:
7149 return &bpf_event_output_data_proto;
834ebca8
DB
7150 case BPF_FUNC_get_current_pid_tgid:
7151 return &bpf_get_current_pid_tgid_proto;
7152 case BPF_FUNC_get_current_comm:
7153 return &bpf_get_current_comm_proto;
0f09abd1
DB
7154#ifdef CONFIG_CGROUPS
7155 case BPF_FUNC_get_current_cgroup_id:
7156 return &bpf_get_current_cgroup_id_proto;
7157 case BPF_FUNC_get_current_ancestor_cgroup_id:
7158 return &bpf_get_current_ancestor_cgroup_id_proto;
7159#endif
5a52ae4e
DB
7160#ifdef CONFIG_CGROUP_NET_CLASSID
7161 case BPF_FUNC_get_cgroup_classid:
7162 return &bpf_get_cgroup_classid_curr_proto;
7163#endif
f7c6cb1d
SF
7164 case BPF_FUNC_sk_storage_get:
7165 return &bpf_sk_storage_get_cg_sock_proto;
5e0bc308
DB
7166 case BPF_FUNC_ktime_get_coarse_ns:
7167 return &bpf_ktime_get_coarse_ns_proto;
ae2cf1c4
DA
7168 default:
7169 return bpf_base_func_proto(func_id);
7170 }
7171}
7172
4fbac77d
AI
7173static const struct bpf_func_proto *
7174sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7175{
7176 switch (func_id) {
7177 /* inet and inet6 sockets are created in a process
7178 * context so there is always a valid uid/gid
7179 */
7180 case BPF_FUNC_get_current_uid_gid:
7181 return &bpf_get_current_uid_gid_proto;
d74bad4e
AI
7182 case BPF_FUNC_bind:
7183 switch (prog->expected_attach_type) {
7184 case BPF_CGROUP_INET4_CONNECT:
7185 case BPF_CGROUP_INET6_CONNECT:
7186 return &bpf_bind_proto;
7187 default:
7188 return NULL;
7189 }
d692f113
AI
7190 case BPF_FUNC_get_socket_cookie:
7191 return &bpf_get_socket_cookie_sock_addr_proto;
f318903c
DB
7192 case BPF_FUNC_get_netns_cookie:
7193 return &bpf_get_netns_cookie_sock_addr_proto;
cd339431
RG
7194 case BPF_FUNC_get_local_storage:
7195 return &bpf_get_local_storage_proto;
fcf752ea
DB
7196 case BPF_FUNC_perf_event_output:
7197 return &bpf_event_output_data_proto;
834ebca8
DB
7198 case BPF_FUNC_get_current_pid_tgid:
7199 return &bpf_get_current_pid_tgid_proto;
7200 case BPF_FUNC_get_current_comm:
7201 return &bpf_get_current_comm_proto;
0f09abd1
DB
7202#ifdef CONFIG_CGROUPS
7203 case BPF_FUNC_get_current_cgroup_id:
7204 return &bpf_get_current_cgroup_id_proto;
7205 case BPF_FUNC_get_current_ancestor_cgroup_id:
7206 return &bpf_get_current_ancestor_cgroup_id_proto;
7207#endif
5a52ae4e
DB
7208#ifdef CONFIG_CGROUP_NET_CLASSID
7209 case BPF_FUNC_get_cgroup_classid:
7210 return &bpf_get_cgroup_classid_curr_proto;
7211#endif
6c49e65e
AI
7212#ifdef CONFIG_INET
7213 case BPF_FUNC_sk_lookup_tcp:
7214 return &bpf_sock_addr_sk_lookup_tcp_proto;
7215 case BPF_FUNC_sk_lookup_udp:
7216 return &bpf_sock_addr_sk_lookup_udp_proto;
7217 case BPF_FUNC_sk_release:
7218 return &bpf_sk_release_proto;
edbf8c01
LB
7219 case BPF_FUNC_skc_lookup_tcp:
7220 return &bpf_sock_addr_skc_lookup_tcp_proto;
6c49e65e 7221#endif /* CONFIG_INET */
fb85c4a7
SF
7222 case BPF_FUNC_sk_storage_get:
7223 return &bpf_sk_storage_get_proto;
7224 case BPF_FUNC_sk_storage_delete:
7225 return &bpf_sk_storage_delete_proto;
beecf11b
SF
7226 case BPF_FUNC_setsockopt:
7227 switch (prog->expected_attach_type) {
427167c0
SF
7228 case BPF_CGROUP_INET4_BIND:
7229 case BPF_CGROUP_INET6_BIND:
beecf11b
SF
7230 case BPF_CGROUP_INET4_CONNECT:
7231 case BPF_CGROUP_INET6_CONNECT:
4c3384d7
SF
7232 case BPF_CGROUP_UDP4_RECVMSG:
7233 case BPF_CGROUP_UDP6_RECVMSG:
62476cc1
SF
7234 case BPF_CGROUP_UDP4_SENDMSG:
7235 case BPF_CGROUP_UDP6_SENDMSG:
073f4ec1
SF
7236 case BPF_CGROUP_INET4_GETPEERNAME:
7237 case BPF_CGROUP_INET6_GETPEERNAME:
7238 case BPF_CGROUP_INET4_GETSOCKNAME:
7239 case BPF_CGROUP_INET6_GETSOCKNAME:
beecf11b
SF
7240 return &bpf_sock_addr_setsockopt_proto;
7241 default:
7242 return NULL;
7243 }
7244 case BPF_FUNC_getsockopt:
7245 switch (prog->expected_attach_type) {
427167c0
SF
7246 case BPF_CGROUP_INET4_BIND:
7247 case BPF_CGROUP_INET6_BIND:
beecf11b
SF
7248 case BPF_CGROUP_INET4_CONNECT:
7249 case BPF_CGROUP_INET6_CONNECT:
4c3384d7
SF
7250 case BPF_CGROUP_UDP4_RECVMSG:
7251 case BPF_CGROUP_UDP6_RECVMSG:
62476cc1
SF
7252 case BPF_CGROUP_UDP4_SENDMSG:
7253 case BPF_CGROUP_UDP6_SENDMSG:
073f4ec1
SF
7254 case BPF_CGROUP_INET4_GETPEERNAME:
7255 case BPF_CGROUP_INET6_GETPEERNAME:
7256 case BPF_CGROUP_INET4_GETSOCKNAME:
7257 case BPF_CGROUP_INET6_GETSOCKNAME:
beecf11b
SF
7258 return &bpf_sock_addr_getsockopt_proto;
7259 default:
7260 return NULL;
7261 }
4fbac77d 7262 default:
1df8f55a 7263 return bpf_sk_base_func_proto(func_id);
4fbac77d
AI
7264 }
7265}
7266
2492d3b8 7267static const struct bpf_func_proto *
5e43f899 7268sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2492d3b8
DB
7269{
7270 switch (func_id) {
7271 case BPF_FUNC_skb_load_bytes:
7272 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
7273 case BPF_FUNC_skb_load_bytes_relative:
7274 return &bpf_skb_load_bytes_relative_proto;
91b8270f
CF
7275 case BPF_FUNC_get_socket_cookie:
7276 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
7277 case BPF_FUNC_get_socket_uid:
7278 return &bpf_get_socket_uid_proto;
7c4b90d7
AZ
7279 case BPF_FUNC_perf_event_output:
7280 return &bpf_skb_event_output_proto;
2492d3b8 7281 default:
1df8f55a 7282 return bpf_sk_base_func_proto(func_id);
2492d3b8
DB
7283 }
7284}
7285
6ac99e8f
MKL
7286const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
7287const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
7288
cd339431
RG
7289static const struct bpf_func_proto *
7290cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7291{
7292 switch (func_id) {
7293 case BPF_FUNC_get_local_storage:
7294 return &bpf_get_local_storage_proto;
46f8bc92
MKL
7295 case BPF_FUNC_sk_fullsock:
7296 return &bpf_sk_fullsock_proto;
6ac99e8f
MKL
7297 case BPF_FUNC_sk_storage_get:
7298 return &bpf_sk_storage_get_proto;
7299 case BPF_FUNC_sk_storage_delete:
7300 return &bpf_sk_storage_delete_proto;
7c4b90d7
AZ
7301 case BPF_FUNC_perf_event_output:
7302 return &bpf_skb_event_output_proto;
4ecabd55
RG
7303#ifdef CONFIG_SOCK_CGROUP_DATA
7304 case BPF_FUNC_skb_cgroup_id:
7305 return &bpf_skb_cgroup_id_proto;
06d3e4c9
AI
7306 case BPF_FUNC_skb_ancestor_cgroup_id:
7307 return &bpf_skb_ancestor_cgroup_id_proto;
f307fa2c
AI
7308 case BPF_FUNC_sk_cgroup_id:
7309 return &bpf_sk_cgroup_id_proto;
7310 case BPF_FUNC_sk_ancestor_cgroup_id:
7311 return &bpf_sk_ancestor_cgroup_id_proto;
4ecabd55 7312#endif
655a51e5 7313#ifdef CONFIG_INET
d56c2f95
AI
7314 case BPF_FUNC_sk_lookup_tcp:
7315 return &bpf_sk_lookup_tcp_proto;
7316 case BPF_FUNC_sk_lookup_udp:
7317 return &bpf_sk_lookup_udp_proto;
7318 case BPF_FUNC_sk_release:
7319 return &bpf_sk_release_proto;
7320 case BPF_FUNC_skc_lookup_tcp:
7321 return &bpf_skc_lookup_tcp_proto;
655a51e5
MKL
7322 case BPF_FUNC_tcp_sock:
7323 return &bpf_tcp_sock_proto;
dbafd7dd
MKL
7324 case BPF_FUNC_get_listener_sock:
7325 return &bpf_get_listener_sock_proto;
f7c917ba 7326 case BPF_FUNC_skb_ecn_set_ce:
7327 return &bpf_skb_ecn_set_ce_proto;
655a51e5 7328#endif
cd339431
RG
7329 default:
7330 return sk_filter_func_proto(func_id, prog);
7331 }
7332}
7333
608cd71a 7334static const struct bpf_func_proto *
5e43f899 7335tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
608cd71a
AS
7336{
7337 switch (func_id) {
7338 case BPF_FUNC_skb_store_bytes:
7339 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
7340 case BPF_FUNC_skb_load_bytes:
7341 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
7342 case BPF_FUNC_skb_load_bytes_relative:
7343 return &bpf_skb_load_bytes_relative_proto;
36bbef52
DB
7344 case BPF_FUNC_skb_pull_data:
7345 return &bpf_skb_pull_data_proto;
7d672345
DB
7346 case BPF_FUNC_csum_diff:
7347 return &bpf_csum_diff_proto;
36bbef52
DB
7348 case BPF_FUNC_csum_update:
7349 return &bpf_csum_update_proto;
7cdec54f
DB
7350 case BPF_FUNC_csum_level:
7351 return &bpf_csum_level_proto;
91bc4822
AS
7352 case BPF_FUNC_l3_csum_replace:
7353 return &bpf_l3_csum_replace_proto;
7354 case BPF_FUNC_l4_csum_replace:
7355 return &bpf_l4_csum_replace_proto;
3896d655
AS
7356 case BPF_FUNC_clone_redirect:
7357 return &bpf_clone_redirect_proto;
8d20aabe
DB
7358 case BPF_FUNC_get_cgroup_classid:
7359 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
7360 case BPF_FUNC_skb_vlan_push:
7361 return &bpf_skb_vlan_push_proto;
7362 case BPF_FUNC_skb_vlan_pop:
7363 return &bpf_skb_vlan_pop_proto;
6578171a
DB
7364 case BPF_FUNC_skb_change_proto:
7365 return &bpf_skb_change_proto_proto;
d2485c42
DB
7366 case BPF_FUNC_skb_change_type:
7367 return &bpf_skb_change_type_proto;
2be7e212
DB
7368 case BPF_FUNC_skb_adjust_room:
7369 return &bpf_skb_adjust_room_proto;
5293efe6
DB
7370 case BPF_FUNC_skb_change_tail:
7371 return &bpf_skb_change_tail_proto;
6f3f65d8
LC
7372 case BPF_FUNC_skb_change_head:
7373 return &bpf_skb_change_head_proto;
d3aa45ce
AS
7374 case BPF_FUNC_skb_get_tunnel_key:
7375 return &bpf_skb_get_tunnel_key_proto;
7376 case BPF_FUNC_skb_set_tunnel_key:
14ca0751
DB
7377 return bpf_get_skb_set_tunnel_proto(func_id);
7378 case BPF_FUNC_skb_get_tunnel_opt:
7379 return &bpf_skb_get_tunnel_opt_proto;
7380 case BPF_FUNC_skb_set_tunnel_opt:
7381 return bpf_get_skb_set_tunnel_proto(func_id);
27b29f63
AS
7382 case BPF_FUNC_redirect:
7383 return &bpf_redirect_proto;
b4ab3141
DB
7384 case BPF_FUNC_redirect_neigh:
7385 return &bpf_redirect_neigh_proto;
9aa1206e
DB
7386 case BPF_FUNC_redirect_peer:
7387 return &bpf_redirect_peer_proto;
c46646d0
DB
7388 case BPF_FUNC_get_route_realm:
7389 return &bpf_get_route_realm_proto;
13c5c240
DB
7390 case BPF_FUNC_get_hash_recalc:
7391 return &bpf_get_hash_recalc_proto;
7a4b28c6
DB
7392 case BPF_FUNC_set_hash_invalid:
7393 return &bpf_set_hash_invalid_proto;
ded092cd
DB
7394 case BPF_FUNC_set_hash:
7395 return &bpf_set_hash_proto;
bd570ff9 7396 case BPF_FUNC_perf_event_output:
555c8a86 7397 return &bpf_skb_event_output_proto;
80b48c44
DB
7398 case BPF_FUNC_get_smp_processor_id:
7399 return &bpf_get_smp_processor_id_proto;
747ea55e
DB
7400 case BPF_FUNC_skb_under_cgroup:
7401 return &bpf_skb_under_cgroup_proto;
91b8270f
CF
7402 case BPF_FUNC_get_socket_cookie:
7403 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
7404 case BPF_FUNC_get_socket_uid:
7405 return &bpf_get_socket_uid_proto;
cb20b08e
DB
7406 case BPF_FUNC_fib_lookup:
7407 return &bpf_skb_fib_lookup_proto;
34b2021c
JDB
7408 case BPF_FUNC_check_mtu:
7409 return &bpf_skb_check_mtu_proto;
46f8bc92
MKL
7410 case BPF_FUNC_sk_fullsock:
7411 return &bpf_sk_fullsock_proto;
6ac99e8f
MKL
7412 case BPF_FUNC_sk_storage_get:
7413 return &bpf_sk_storage_get_proto;
7414 case BPF_FUNC_sk_storage_delete:
7415 return &bpf_sk_storage_delete_proto;
12bed760
EB
7416#ifdef CONFIG_XFRM
7417 case BPF_FUNC_skb_get_xfrm_state:
7418 return &bpf_skb_get_xfrm_state_proto;
7419#endif
b426ce83
DB
7420#ifdef CONFIG_CGROUP_NET_CLASSID
7421 case BPF_FUNC_skb_cgroup_classid:
7422 return &bpf_skb_cgroup_classid_proto;
7423#endif
cb20b08e
DB
7424#ifdef CONFIG_SOCK_CGROUP_DATA
7425 case BPF_FUNC_skb_cgroup_id:
7426 return &bpf_skb_cgroup_id_proto;
77236281
AI
7427 case BPF_FUNC_skb_ancestor_cgroup_id:
7428 return &bpf_skb_ancestor_cgroup_id_proto;
cb20b08e 7429#endif
df3f94a0 7430#ifdef CONFIG_INET
6acc9b43
JS
7431 case BPF_FUNC_sk_lookup_tcp:
7432 return &bpf_sk_lookup_tcp_proto;
7433 case BPF_FUNC_sk_lookup_udp:
7434 return &bpf_sk_lookup_udp_proto;
7435 case BPF_FUNC_sk_release:
7436 return &bpf_sk_release_proto;
655a51e5
MKL
7437 case BPF_FUNC_tcp_sock:
7438 return &bpf_tcp_sock_proto;
dbafd7dd
MKL
7439 case BPF_FUNC_get_listener_sock:
7440 return &bpf_get_listener_sock_proto;
edbf8c01
LB
7441 case BPF_FUNC_skc_lookup_tcp:
7442 return &bpf_skc_lookup_tcp_proto;
39904084
LB
7443 case BPF_FUNC_tcp_check_syncookie:
7444 return &bpf_tcp_check_syncookie_proto;
315a2029
PO
7445 case BPF_FUNC_skb_ecn_set_ce:
7446 return &bpf_skb_ecn_set_ce_proto;
70d66244
PP
7447 case BPF_FUNC_tcp_gen_syncookie:
7448 return &bpf_tcp_gen_syncookie_proto;
cf7fbe66
JS
7449 case BPF_FUNC_sk_assign:
7450 return &bpf_sk_assign_proto;
df3f94a0 7451#endif
608cd71a 7452 default:
1df8f55a 7453 return bpf_sk_base_func_proto(func_id);
608cd71a
AS
7454 }
7455}
7456
6a773a15 7457static const struct bpf_func_proto *
5e43f899 7458xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6a773a15 7459{
4de16969
DB
7460 switch (func_id) {
7461 case BPF_FUNC_perf_event_output:
7462 return &bpf_xdp_event_output_proto;
669dc4d7
DB
7463 case BPF_FUNC_get_smp_processor_id:
7464 return &bpf_get_smp_processor_id_proto;
205c3807
DB
7465 case BPF_FUNC_csum_diff:
7466 return &bpf_csum_diff_proto;
17bedab2
MKL
7467 case BPF_FUNC_xdp_adjust_head:
7468 return &bpf_xdp_adjust_head_proto;
de8f3a83
DB
7469 case BPF_FUNC_xdp_adjust_meta:
7470 return &bpf_xdp_adjust_meta_proto;
814abfab
JF
7471 case BPF_FUNC_redirect:
7472 return &bpf_xdp_redirect_proto;
97f91a7c 7473 case BPF_FUNC_redirect_map:
e4a8e817 7474 return &bpf_xdp_redirect_map_proto;
b32cc5b9
NS
7475 case BPF_FUNC_xdp_adjust_tail:
7476 return &bpf_xdp_adjust_tail_proto;
87f5fc7e
DA
7477 case BPF_FUNC_fib_lookup:
7478 return &bpf_xdp_fib_lookup_proto;
34b2021c
JDB
7479 case BPF_FUNC_check_mtu:
7480 return &bpf_xdp_check_mtu_proto;
c8123ead
NH
7481#ifdef CONFIG_INET
7482 case BPF_FUNC_sk_lookup_udp:
7483 return &bpf_xdp_sk_lookup_udp_proto;
7484 case BPF_FUNC_sk_lookup_tcp:
7485 return &bpf_xdp_sk_lookup_tcp_proto;
7486 case BPF_FUNC_sk_release:
7487 return &bpf_sk_release_proto;
edbf8c01
LB
7488 case BPF_FUNC_skc_lookup_tcp:
7489 return &bpf_xdp_skc_lookup_tcp_proto;
39904084
LB
7490 case BPF_FUNC_tcp_check_syncookie:
7491 return &bpf_tcp_check_syncookie_proto;
70d66244
PP
7492 case BPF_FUNC_tcp_gen_syncookie:
7493 return &bpf_tcp_gen_syncookie_proto;
c8123ead 7494#endif
4de16969 7495 default:
1df8f55a 7496 return bpf_sk_base_func_proto(func_id);
4de16969 7497 }
6a773a15
BB
7498}
7499
604326b4
DB
7500const struct bpf_func_proto bpf_sock_map_update_proto __weak;
7501const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
7502
8c4b4c7e 7503static const struct bpf_func_proto *
5e43f899 7504sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
8c4b4c7e
LB
7505{
7506 switch (func_id) {
7507 case BPF_FUNC_setsockopt:
beecf11b 7508 return &bpf_sock_ops_setsockopt_proto;
cd86d1fd 7509 case BPF_FUNC_getsockopt:
beecf11b 7510 return &bpf_sock_ops_getsockopt_proto;
b13d8807
LB
7511 case BPF_FUNC_sock_ops_cb_flags_set:
7512 return &bpf_sock_ops_cb_flags_set_proto;
174a79ff
JF
7513 case BPF_FUNC_sock_map_update:
7514 return &bpf_sock_map_update_proto;
81110384
JF
7515 case BPF_FUNC_sock_hash_update:
7516 return &bpf_sock_hash_update_proto;
d692f113
AI
7517 case BPF_FUNC_get_socket_cookie:
7518 return &bpf_get_socket_cookie_sock_ops_proto;
cd339431
RG
7519 case BPF_FUNC_get_local_storage:
7520 return &bpf_get_local_storage_proto;
a5a3a828 7521 case BPF_FUNC_perf_event_output:
fcf752ea 7522 return &bpf_event_output_data_proto;
1314ef56
SF
7523 case BPF_FUNC_sk_storage_get:
7524 return &bpf_sk_storage_get_proto;
7525 case BPF_FUNC_sk_storage_delete:
7526 return &bpf_sk_storage_delete_proto;
6cf1770d
XL
7527 case BPF_FUNC_get_netns_cookie:
7528 return &bpf_get_netns_cookie_sock_ops_proto;
1314ef56 7529#ifdef CONFIG_INET
0813a841
MKL
7530 case BPF_FUNC_load_hdr_opt:
7531 return &bpf_sock_ops_load_hdr_opt_proto;
7532 case BPF_FUNC_store_hdr_opt:
7533 return &bpf_sock_ops_store_hdr_opt_proto;
7534 case BPF_FUNC_reserve_hdr_opt:
7535 return &bpf_sock_ops_reserve_hdr_opt_proto;
1314ef56
SF
7536 case BPF_FUNC_tcp_sock:
7537 return &bpf_tcp_sock_proto;
7538#endif /* CONFIG_INET */
8c4b4c7e 7539 default:
1df8f55a 7540 return bpf_sk_base_func_proto(func_id);
8c4b4c7e
LB
7541 }
7542}
7543
604326b4
DB
7544const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
7545const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
7546
5e43f899
AI
7547static const struct bpf_func_proto *
7548sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4f738adb
JF
7549{
7550 switch (func_id) {
7551 case BPF_FUNC_msg_redirect_map:
7552 return &bpf_msg_redirect_map_proto;
81110384
JF
7553 case BPF_FUNC_msg_redirect_hash:
7554 return &bpf_msg_redirect_hash_proto;
2a100317
JF
7555 case BPF_FUNC_msg_apply_bytes:
7556 return &bpf_msg_apply_bytes_proto;
91843d54
JF
7557 case BPF_FUNC_msg_cork_bytes:
7558 return &bpf_msg_cork_bytes_proto;
015632bb
JF
7559 case BPF_FUNC_msg_pull_data:
7560 return &bpf_msg_pull_data_proto;
6fff607e
JF
7561 case BPF_FUNC_msg_push_data:
7562 return &bpf_msg_push_data_proto;
7246d8ed
JF
7563 case BPF_FUNC_msg_pop_data:
7564 return &bpf_msg_pop_data_proto;
abe3cac8
JF
7565 case BPF_FUNC_perf_event_output:
7566 return &bpf_event_output_data_proto;
7567 case BPF_FUNC_get_current_uid_gid:
7568 return &bpf_get_current_uid_gid_proto;
7569 case BPF_FUNC_get_current_pid_tgid:
7570 return &bpf_get_current_pid_tgid_proto;
13d70f5a
JF
7571 case BPF_FUNC_sk_storage_get:
7572 return &bpf_sk_storage_get_proto;
7573 case BPF_FUNC_sk_storage_delete:
7574 return &bpf_sk_storage_delete_proto;
fab60e29
XL
7575 case BPF_FUNC_get_netns_cookie:
7576 return &bpf_get_netns_cookie_sk_msg_proto;
abe3cac8
JF
7577#ifdef CONFIG_CGROUPS
7578 case BPF_FUNC_get_current_cgroup_id:
7579 return &bpf_get_current_cgroup_id_proto;
7580 case BPF_FUNC_get_current_ancestor_cgroup_id:
7581 return &bpf_get_current_ancestor_cgroup_id_proto;
7582#endif
7583#ifdef CONFIG_CGROUP_NET_CLASSID
7584 case BPF_FUNC_get_cgroup_classid:
7585 return &bpf_get_cgroup_classid_curr_proto;
7586#endif
4f738adb 7587 default:
1df8f55a 7588 return bpf_sk_base_func_proto(func_id);
4f738adb
JF
7589 }
7590}
7591
604326b4
DB
7592const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
7593const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
7594
5e43f899
AI
7595static const struct bpf_func_proto *
7596sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
b005fd18
JF
7597{
7598 switch (func_id) {
8a31db56
JF
7599 case BPF_FUNC_skb_store_bytes:
7600 return &bpf_skb_store_bytes_proto;
b005fd18
JF
7601 case BPF_FUNC_skb_load_bytes:
7602 return &bpf_skb_load_bytes_proto;
8a31db56 7603 case BPF_FUNC_skb_pull_data:
0ea488ff 7604 return &sk_skb_pull_data_proto;
8a31db56 7605 case BPF_FUNC_skb_change_tail:
0ea488ff 7606 return &sk_skb_change_tail_proto;
8a31db56 7607 case BPF_FUNC_skb_change_head:
0ea488ff 7608 return &sk_skb_change_head_proto;
18ebe16d
JF
7609 case BPF_FUNC_skb_adjust_room:
7610 return &sk_skb_adjust_room_proto;
b005fd18
JF
7611 case BPF_FUNC_get_socket_cookie:
7612 return &bpf_get_socket_cookie_proto;
7613 case BPF_FUNC_get_socket_uid:
7614 return &bpf_get_socket_uid_proto;
174a79ff
JF
7615 case BPF_FUNC_sk_redirect_map:
7616 return &bpf_sk_redirect_map_proto;
81110384
JF
7617 case BPF_FUNC_sk_redirect_hash:
7618 return &bpf_sk_redirect_hash_proto;
7c4b90d7
AZ
7619 case BPF_FUNC_perf_event_output:
7620 return &bpf_skb_event_output_proto;
df3f94a0 7621#ifdef CONFIG_INET
6acc9b43
JS
7622 case BPF_FUNC_sk_lookup_tcp:
7623 return &bpf_sk_lookup_tcp_proto;
7624 case BPF_FUNC_sk_lookup_udp:
7625 return &bpf_sk_lookup_udp_proto;
7626 case BPF_FUNC_sk_release:
7627 return &bpf_sk_release_proto;
edbf8c01
LB
7628 case BPF_FUNC_skc_lookup_tcp:
7629 return &bpf_skc_lookup_tcp_proto;
df3f94a0 7630#endif
b005fd18 7631 default:
1df8f55a 7632 return bpf_sk_base_func_proto(func_id);
b005fd18
JF
7633 }
7634}
7635
d58e468b
PP
7636static const struct bpf_func_proto *
7637flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7638{
7639 switch (func_id) {
7640 case BPF_FUNC_skb_load_bytes:
089b19a9 7641 return &bpf_flow_dissector_load_bytes_proto;
d58e468b 7642 default:
1df8f55a 7643 return bpf_sk_base_func_proto(func_id);
d58e468b
PP
7644 }
7645}
7646
cd3092c7
MX
7647static const struct bpf_func_proto *
7648lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7649{
7650 switch (func_id) {
7651 case BPF_FUNC_skb_load_bytes:
7652 return &bpf_skb_load_bytes_proto;
7653 case BPF_FUNC_skb_pull_data:
7654 return &bpf_skb_pull_data_proto;
7655 case BPF_FUNC_csum_diff:
7656 return &bpf_csum_diff_proto;
7657 case BPF_FUNC_get_cgroup_classid:
7658 return &bpf_get_cgroup_classid_proto;
7659 case BPF_FUNC_get_route_realm:
7660 return &bpf_get_route_realm_proto;
7661 case BPF_FUNC_get_hash_recalc:
7662 return &bpf_get_hash_recalc_proto;
7663 case BPF_FUNC_perf_event_output:
7664 return &bpf_skb_event_output_proto;
7665 case BPF_FUNC_get_smp_processor_id:
7666 return &bpf_get_smp_processor_id_proto;
7667 case BPF_FUNC_skb_under_cgroup:
7668 return &bpf_skb_under_cgroup_proto;
7669 default:
1df8f55a 7670 return bpf_sk_base_func_proto(func_id);
cd3092c7
MX
7671 }
7672}
7673
7674static const struct bpf_func_proto *
7675lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7676{
7677 switch (func_id) {
7678 case BPF_FUNC_lwt_push_encap:
3e0bd37c 7679 return &bpf_lwt_in_push_encap_proto;
cd3092c7
MX
7680 default:
7681 return lwt_out_func_proto(func_id, prog);
7682 }
7683}
7684
3a0af8fd 7685static const struct bpf_func_proto *
5e43f899 7686lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
7687{
7688 switch (func_id) {
7689 case BPF_FUNC_skb_get_tunnel_key:
7690 return &bpf_skb_get_tunnel_key_proto;
7691 case BPF_FUNC_skb_set_tunnel_key:
7692 return bpf_get_skb_set_tunnel_proto(func_id);
7693 case BPF_FUNC_skb_get_tunnel_opt:
7694 return &bpf_skb_get_tunnel_opt_proto;
7695 case BPF_FUNC_skb_set_tunnel_opt:
7696 return bpf_get_skb_set_tunnel_proto(func_id);
7697 case BPF_FUNC_redirect:
7698 return &bpf_redirect_proto;
7699 case BPF_FUNC_clone_redirect:
7700 return &bpf_clone_redirect_proto;
7701 case BPF_FUNC_skb_change_tail:
7702 return &bpf_skb_change_tail_proto;
7703 case BPF_FUNC_skb_change_head:
7704 return &bpf_skb_change_head_proto;
7705 case BPF_FUNC_skb_store_bytes:
7706 return &bpf_skb_store_bytes_proto;
7707 case BPF_FUNC_csum_update:
7708 return &bpf_csum_update_proto;
7cdec54f
DB
7709 case BPF_FUNC_csum_level:
7710 return &bpf_csum_level_proto;
3a0af8fd
TG
7711 case BPF_FUNC_l3_csum_replace:
7712 return &bpf_l3_csum_replace_proto;
7713 case BPF_FUNC_l4_csum_replace:
7714 return &bpf_l4_csum_replace_proto;
7715 case BPF_FUNC_set_hash_invalid:
7716 return &bpf_set_hash_invalid_proto;
3e0bd37c
PO
7717 case BPF_FUNC_lwt_push_encap:
7718 return &bpf_lwt_xmit_push_encap_proto;
3a0af8fd 7719 default:
cd3092c7 7720 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
7721 }
7722}
7723
004d4b27
MX
7724static const struct bpf_func_proto *
7725lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
7726{
7727 switch (func_id) {
61d76980 7728#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
004d4b27
MX
7729 case BPF_FUNC_lwt_seg6_store_bytes:
7730 return &bpf_lwt_seg6_store_bytes_proto;
7731 case BPF_FUNC_lwt_seg6_action:
7732 return &bpf_lwt_seg6_action_proto;
7733 case BPF_FUNC_lwt_seg6_adjust_srh:
7734 return &bpf_lwt_seg6_adjust_srh_proto;
61d76980 7735#endif
004d4b27
MX
7736 default:
7737 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
7738 }
7739}
7740
f96da094 7741static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 7742 const struct bpf_prog *prog,
f96da094 7743 struct bpf_insn_access_aux *info)
23994631 7744{
f96da094 7745 const int size_default = sizeof(__u32);
23994631 7746
9bac3d6d
AS
7747 if (off < 0 || off >= sizeof(struct __sk_buff))
7748 return false;
62c7989b 7749
4936e352 7750 /* The verifier guarantees that size > 0. */
9bac3d6d
AS
7751 if (off % size != 0)
7752 return false;
62c7989b
DB
7753
7754 switch (off) {
f96da094
DB
7755 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7756 if (off + size > offsetofend(struct __sk_buff, cb[4]))
62c7989b
DB
7757 return false;
7758 break;
8a31db56
JF
7759 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
7760 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
7761 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
7762 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
f96da094 7763 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 7764 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094
DB
7765 case bpf_ctx_range(struct __sk_buff, data_end):
7766 if (size != size_default)
23994631 7767 return false;
31fd8581 7768 break;
b7df9ada 7769 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
089b19a9 7770 return false;
f64c4ace
VF
7771 case bpf_ctx_range(struct __sk_buff, hwtstamp):
7772 if (type == BPF_WRITE || size != sizeof(__u64))
7773 return false;
7774 break;
f11216b2
VD
7775 case bpf_ctx_range(struct __sk_buff, tstamp):
7776 if (size != sizeof(__u64))
7777 return false;
7778 break;
46f8bc92
MKL
7779 case offsetof(struct __sk_buff, sk):
7780 if (type == BPF_WRITE || size != sizeof(__u64))
7781 return false;
7782 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
7783 break;
f64c4ace
VF
7784 case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1:
7785 /* Explicitly prohibit access to padding in __sk_buff. */
7786 return false;
31fd8581 7787 default:
f96da094 7788 /* Only narrow read access allowed for now. */
31fd8581 7789 if (type == BPF_WRITE) {
f96da094 7790 if (size != size_default)
31fd8581
YS
7791 return false;
7792 } else {
f96da094
DB
7793 bpf_ctx_record_field_size(info, size_default);
7794 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
23994631 7795 return false;
31fd8581 7796 }
62c7989b 7797 }
9bac3d6d
AS
7798
7799 return true;
7800}
7801
d691f9e8 7802static bool sk_filter_is_valid_access(int off, int size,
19de99f7 7803 enum bpf_access_type type,
5e43f899 7804 const struct bpf_prog *prog,
23994631 7805 struct bpf_insn_access_aux *info)
d691f9e8 7806{
db58ba45 7807 switch (off) {
f96da094
DB
7808 case bpf_ctx_range(struct __sk_buff, tc_classid):
7809 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 7810 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094 7811 case bpf_ctx_range(struct __sk_buff, data_end):
8a31db56 7812 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
f11216b2 7813 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 7814 case bpf_ctx_range(struct __sk_buff, wire_len):
f64c4ace 7815 case bpf_ctx_range(struct __sk_buff, hwtstamp):
045efa82 7816 return false;
db58ba45 7817 }
045efa82 7818
d691f9e8
AS
7819 if (type == BPF_WRITE) {
7820 switch (off) {
f96da094 7821 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
7822 break;
7823 default:
7824 return false;
7825 }
7826 }
7827
5e43f899 7828 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
7829}
7830
b39b5f41
SL
7831static bool cg_skb_is_valid_access(int off, int size,
7832 enum bpf_access_type type,
7833 const struct bpf_prog *prog,
7834 struct bpf_insn_access_aux *info)
7835{
7836 switch (off) {
7837 case bpf_ctx_range(struct __sk_buff, tc_classid):
7838 case bpf_ctx_range(struct __sk_buff, data_meta):
e3da08d0 7839 case bpf_ctx_range(struct __sk_buff, wire_len):
b39b5f41 7840 return false;
ab21c1b5
DB
7841 case bpf_ctx_range(struct __sk_buff, data):
7842 case bpf_ctx_range(struct __sk_buff, data_end):
2c78ee89 7843 if (!bpf_capable())
ab21c1b5
DB
7844 return false;
7845 break;
b39b5f41 7846 }
ab21c1b5 7847
b39b5f41
SL
7848 if (type == BPF_WRITE) {
7849 switch (off) {
7850 case bpf_ctx_range(struct __sk_buff, mark):
7851 case bpf_ctx_range(struct __sk_buff, priority):
7852 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
7853 break;
f11216b2 7854 case bpf_ctx_range(struct __sk_buff, tstamp):
2c78ee89 7855 if (!bpf_capable())
f11216b2
VD
7856 return false;
7857 break;
b39b5f41
SL
7858 default:
7859 return false;
7860 }
7861 }
7862
7863 switch (off) {
7864 case bpf_ctx_range(struct __sk_buff, data):
7865 info->reg_type = PTR_TO_PACKET;
7866 break;
7867 case bpf_ctx_range(struct __sk_buff, data_end):
7868 info->reg_type = PTR_TO_PACKET_END;
7869 break;
7870 }
7871
7872 return bpf_skb_is_valid_access(off, size, type, prog, info);
7873}
7874
3a0af8fd
TG
7875static bool lwt_is_valid_access(int off, int size,
7876 enum bpf_access_type type,
5e43f899 7877 const struct bpf_prog *prog,
23994631 7878 struct bpf_insn_access_aux *info)
3a0af8fd
TG
7879{
7880 switch (off) {
f96da094 7881 case bpf_ctx_range(struct __sk_buff, tc_classid):
8a31db56 7882 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
de8f3a83 7883 case bpf_ctx_range(struct __sk_buff, data_meta):
f11216b2 7884 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 7885 case bpf_ctx_range(struct __sk_buff, wire_len):
f64c4ace 7886 case bpf_ctx_range(struct __sk_buff, hwtstamp):
3a0af8fd
TG
7887 return false;
7888 }
7889
7890 if (type == BPF_WRITE) {
7891 switch (off) {
f96da094
DB
7892 case bpf_ctx_range(struct __sk_buff, mark):
7893 case bpf_ctx_range(struct __sk_buff, priority):
7894 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
3a0af8fd
TG
7895 break;
7896 default:
7897 return false;
7898 }
7899 }
7900
f96da094
DB
7901 switch (off) {
7902 case bpf_ctx_range(struct __sk_buff, data):
7903 info->reg_type = PTR_TO_PACKET;
7904 break;
7905 case bpf_ctx_range(struct __sk_buff, data_end):
7906 info->reg_type = PTR_TO_PACKET_END;
7907 break;
7908 }
7909
5e43f899 7910 return bpf_skb_is_valid_access(off, size, type, prog, info);
3a0af8fd
TG
7911}
7912
aac3fc32
AI
7913/* Attach type specific accesses */
7914static bool __sock_filter_check_attach_type(int off,
7915 enum bpf_access_type access_type,
7916 enum bpf_attach_type attach_type)
61023658 7917{
aac3fc32
AI
7918 switch (off) {
7919 case offsetof(struct bpf_sock, bound_dev_if):
7920 case offsetof(struct bpf_sock, mark):
7921 case offsetof(struct bpf_sock, priority):
7922 switch (attach_type) {
7923 case BPF_CGROUP_INET_SOCK_CREATE:
f5836749 7924 case BPF_CGROUP_INET_SOCK_RELEASE:
aac3fc32
AI
7925 goto full_access;
7926 default:
7927 return false;
7928 }
7929 case bpf_ctx_range(struct bpf_sock, src_ip4):
7930 switch (attach_type) {
7931 case BPF_CGROUP_INET4_POST_BIND:
7932 goto read_only;
7933 default:
7934 return false;
7935 }
7936 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7937 switch (attach_type) {
7938 case BPF_CGROUP_INET6_POST_BIND:
7939 goto read_only;
7940 default:
7941 return false;
7942 }
7943 case bpf_ctx_range(struct bpf_sock, src_port):
7944 switch (attach_type) {
7945 case BPF_CGROUP_INET4_POST_BIND:
7946 case BPF_CGROUP_INET6_POST_BIND:
7947 goto read_only;
61023658
DA
7948 default:
7949 return false;
7950 }
7951 }
aac3fc32
AI
7952read_only:
7953 return access_type == BPF_READ;
7954full_access:
7955 return true;
7956}
7957
46f8bc92
MKL
7958bool bpf_sock_common_is_valid_access(int off, int size,
7959 enum bpf_access_type type,
aac3fc32
AI
7960 struct bpf_insn_access_aux *info)
7961{
aac3fc32 7962 switch (off) {
46f8bc92
MKL
7963 case bpf_ctx_range_till(struct bpf_sock, type, priority):
7964 return false;
7965 default:
7966 return bpf_sock_is_valid_access(off, size, type, info);
aac3fc32 7967 }
aac3fc32
AI
7968}
7969
c64b7983
JS
7970bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
7971 struct bpf_insn_access_aux *info)
aac3fc32 7972{
aa65d696
MKL
7973 const int size_default = sizeof(__u32);
7974
aac3fc32 7975 if (off < 0 || off >= sizeof(struct bpf_sock))
61023658 7976 return false;
61023658
DA
7977 if (off % size != 0)
7978 return false;
aa65d696
MKL
7979
7980 switch (off) {
7981 case offsetof(struct bpf_sock, state):
7982 case offsetof(struct bpf_sock, family):
7983 case offsetof(struct bpf_sock, type):
7984 case offsetof(struct bpf_sock, protocol):
7985 case offsetof(struct bpf_sock, dst_port):
7986 case offsetof(struct bpf_sock, src_port):
c3c16f2e 7987 case offsetof(struct bpf_sock, rx_queue_mapping):
aa65d696
MKL
7988 case bpf_ctx_range(struct bpf_sock, src_ip4):
7989 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7990 case bpf_ctx_range(struct bpf_sock, dst_ip4):
7991 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7992 bpf_ctx_record_field_size(info, size_default);
7993 return bpf_ctx_narrow_access_ok(off, size, size_default);
7994 }
7995
7996 return size == size_default;
61023658
DA
7997}
7998
c64b7983
JS
7999static bool sock_filter_is_valid_access(int off, int size,
8000 enum bpf_access_type type,
8001 const struct bpf_prog *prog,
8002 struct bpf_insn_access_aux *info)
8003{
8004 if (!bpf_sock_is_valid_access(off, size, type, info))
8005 return false;
8006 return __sock_filter_check_attach_type(off, type,
8007 prog->expected_attach_type);
8008}
8009
b09928b9
DB
8010static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
8011 const struct bpf_prog *prog)
8012{
8013 /* Neither direct read nor direct write requires any preliminary
8014 * action.
8015 */
8016 return 0;
8017}
8018
047b0ecd
DB
8019static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
8020 const struct bpf_prog *prog, int drop_verdict)
36bbef52
DB
8021{
8022 struct bpf_insn *insn = insn_buf;
8023
8024 if (!direct_write)
8025 return 0;
8026
8027 /* if (!skb->cloned)
8028 * goto start;
8029 *
8030 * (Fast-path, otherwise approximation that we might be
8031 * a clone, do the rest in helper.)
8032 */
fba84957 8033 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET);
36bbef52
DB
8034 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
8035 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
8036
8037 /* ret = bpf_skb_pull_data(skb, 0); */
8038 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
8039 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
8040 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
8041 BPF_FUNC_skb_pull_data);
8042 /* if (!ret)
8043 * goto restore;
8044 * return TC_ACT_SHOT;
8045 */
8046 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
047b0ecd 8047 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
36bbef52
DB
8048 *insn++ = BPF_EXIT_INSN();
8049
8050 /* restore: */
8051 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
8052 /* start: */
8053 *insn++ = prog->insnsi[0];
8054
8055 return insn - insn_buf;
8056}
8057
e0cea7ce
DB
8058static int bpf_gen_ld_abs(const struct bpf_insn *orig,
8059 struct bpf_insn *insn_buf)
8060{
8061 bool indirect = BPF_MODE(orig->code) == BPF_IND;
8062 struct bpf_insn *insn = insn_buf;
8063
e0cea7ce
DB
8064 if (!indirect) {
8065 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
8066 } else {
8067 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
8068 if (orig->imm)
8069 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
8070 }
e6a18d36
DB
8071 /* We're guaranteed here that CTX is in R6. */
8072 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
e0cea7ce
DB
8073
8074 switch (BPF_SIZE(orig->code)) {
8075 case BPF_B:
8076 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
8077 break;
8078 case BPF_H:
8079 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
8080 break;
8081 case BPF_W:
8082 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
8083 break;
8084 }
8085
8086 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
8087 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
8088 *insn++ = BPF_EXIT_INSN();
8089
8090 return insn - insn_buf;
8091}
8092
047b0ecd
DB
8093static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
8094 const struct bpf_prog *prog)
8095{
8096 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
8097}
8098
d691f9e8 8099static bool tc_cls_act_is_valid_access(int off, int size,
19de99f7 8100 enum bpf_access_type type,
5e43f899 8101 const struct bpf_prog *prog,
23994631 8102 struct bpf_insn_access_aux *info)
d691f9e8
AS
8103{
8104 if (type == BPF_WRITE) {
8105 switch (off) {
f96da094
DB
8106 case bpf_ctx_range(struct __sk_buff, mark):
8107 case bpf_ctx_range(struct __sk_buff, tc_index):
8108 case bpf_ctx_range(struct __sk_buff, priority):
8109 case bpf_ctx_range(struct __sk_buff, tc_classid):
8110 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
f11216b2 8111 case bpf_ctx_range(struct __sk_buff, tstamp):
74e31ca8 8112 case bpf_ctx_range(struct __sk_buff, queue_mapping):
d691f9e8
AS
8113 break;
8114 default:
8115 return false;
8116 }
8117 }
19de99f7 8118
f96da094
DB
8119 switch (off) {
8120 case bpf_ctx_range(struct __sk_buff, data):
8121 info->reg_type = PTR_TO_PACKET;
8122 break;
de8f3a83
DB
8123 case bpf_ctx_range(struct __sk_buff, data_meta):
8124 info->reg_type = PTR_TO_PACKET_META;
8125 break;
f96da094
DB
8126 case bpf_ctx_range(struct __sk_buff, data_end):
8127 info->reg_type = PTR_TO_PACKET_END;
8128 break;
8a31db56
JF
8129 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
8130 return false;
f96da094
DB
8131 }
8132
5e43f899 8133 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
8134}
8135
1afaf661 8136static bool __is_valid_xdp_access(int off, int size)
6a773a15
BB
8137{
8138 if (off < 0 || off >= sizeof(struct xdp_md))
8139 return false;
8140 if (off % size != 0)
8141 return false;
6088b582 8142 if (size != sizeof(__u32))
6a773a15
BB
8143 return false;
8144
8145 return true;
8146}
8147
8148static bool xdp_is_valid_access(int off, int size,
8149 enum bpf_access_type type,
5e43f899 8150 const struct bpf_prog *prog,
23994631 8151 struct bpf_insn_access_aux *info)
6a773a15 8152{
64b59025
DA
8153 if (prog->expected_attach_type != BPF_XDP_DEVMAP) {
8154 switch (off) {
8155 case offsetof(struct xdp_md, egress_ifindex):
8156 return false;
8157 }
8158 }
8159
0d830032
JK
8160 if (type == BPF_WRITE) {
8161 if (bpf_prog_is_dev_bound(prog->aux)) {
8162 switch (off) {
8163 case offsetof(struct xdp_md, rx_queue_index):
8164 return __is_valid_xdp_access(off, size);
8165 }
8166 }
6a773a15 8167 return false;
0d830032 8168 }
6a773a15
BB
8169
8170 switch (off) {
8171 case offsetof(struct xdp_md, data):
23994631 8172 info->reg_type = PTR_TO_PACKET;
6a773a15 8173 break;
de8f3a83
DB
8174 case offsetof(struct xdp_md, data_meta):
8175 info->reg_type = PTR_TO_PACKET_META;
8176 break;
6a773a15 8177 case offsetof(struct xdp_md, data_end):
23994631 8178 info->reg_type = PTR_TO_PACKET_END;
6a773a15
BB
8179 break;
8180 }
8181
1afaf661 8182 return __is_valid_xdp_access(off, size);
6a773a15
BB
8183}
8184
c8064e5b 8185void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act)
6a773a15 8186{
9beb8bed
DB
8187 const u32 act_max = XDP_REDIRECT;
8188
c8064e5b 8189 pr_warn_once("%s XDP return value %u on prog %s (id %d) dev %s, expect packet loss!\n",
2cbad989 8190 act > act_max ? "Illegal" : "Driver unsupported",
c8064e5b 8191 act, prog->aux->name, prog->aux->id, dev ? dev->name : "N/A");
6a773a15
BB
8192}
8193EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
8194
4fbac77d
AI
8195static bool sock_addr_is_valid_access(int off, int size,
8196 enum bpf_access_type type,
8197 const struct bpf_prog *prog,
8198 struct bpf_insn_access_aux *info)
8199{
8200 const int size_default = sizeof(__u32);
8201
8202 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
8203 return false;
8204 if (off % size != 0)
8205 return false;
8206
8207 /* Disallow access to IPv6 fields from IPv4 contex and vise
8208 * versa.
8209 */
8210 switch (off) {
8211 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
8212 switch (prog->expected_attach_type) {
8213 case BPF_CGROUP_INET4_BIND:
d74bad4e 8214 case BPF_CGROUP_INET4_CONNECT:
1b66d253
DB
8215 case BPF_CGROUP_INET4_GETPEERNAME:
8216 case BPF_CGROUP_INET4_GETSOCKNAME:
1cedee13 8217 case BPF_CGROUP_UDP4_SENDMSG:
983695fa 8218 case BPF_CGROUP_UDP4_RECVMSG:
4fbac77d
AI
8219 break;
8220 default:
8221 return false;
8222 }
8223 break;
8224 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
8225 switch (prog->expected_attach_type) {
8226 case BPF_CGROUP_INET6_BIND:
d74bad4e 8227 case BPF_CGROUP_INET6_CONNECT:
1b66d253
DB
8228 case BPF_CGROUP_INET6_GETPEERNAME:
8229 case BPF_CGROUP_INET6_GETSOCKNAME:
1cedee13 8230 case BPF_CGROUP_UDP6_SENDMSG:
983695fa 8231 case BPF_CGROUP_UDP6_RECVMSG:
1cedee13
AI
8232 break;
8233 default:
8234 return false;
8235 }
8236 break;
8237 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
8238 switch (prog->expected_attach_type) {
8239 case BPF_CGROUP_UDP4_SENDMSG:
8240 break;
8241 default:
8242 return false;
8243 }
8244 break;
8245 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
8246 msg_src_ip6[3]):
8247 switch (prog->expected_attach_type) {
8248 case BPF_CGROUP_UDP6_SENDMSG:
4fbac77d
AI
8249 break;
8250 default:
8251 return false;
8252 }
8253 break;
8254 }
8255
8256 switch (off) {
8257 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
8258 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
1cedee13
AI
8259 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
8260 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
8261 msg_src_ip6[3]):
7aebfa1b 8262 case bpf_ctx_range(struct bpf_sock_addr, user_port):
4fbac77d
AI
8263 if (type == BPF_READ) {
8264 bpf_ctx_record_field_size(info, size_default);
d4ecfeb1
SF
8265
8266 if (bpf_ctx_wide_access_ok(off, size,
8267 struct bpf_sock_addr,
8268 user_ip6))
8269 return true;
8270
8271 if (bpf_ctx_wide_access_ok(off, size,
8272 struct bpf_sock_addr,
8273 msg_src_ip6))
8274 return true;
8275
4fbac77d
AI
8276 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
8277 return false;
8278 } else {
b4399546
SF
8279 if (bpf_ctx_wide_access_ok(off, size,
8280 struct bpf_sock_addr,
8281 user_ip6))
600c70ba
SF
8282 return true;
8283
b4399546
SF
8284 if (bpf_ctx_wide_access_ok(off, size,
8285 struct bpf_sock_addr,
8286 msg_src_ip6))
600c70ba
SF
8287 return true;
8288
4fbac77d
AI
8289 if (size != size_default)
8290 return false;
8291 }
8292 break;
fb85c4a7
SF
8293 case offsetof(struct bpf_sock_addr, sk):
8294 if (type != BPF_READ)
8295 return false;
8296 if (size != sizeof(__u64))
8297 return false;
8298 info->reg_type = PTR_TO_SOCKET;
8299 break;
4fbac77d
AI
8300 default:
8301 if (type == BPF_READ) {
8302 if (size != size_default)
8303 return false;
8304 } else {
8305 return false;
8306 }
8307 }
8308
8309 return true;
8310}
8311
44f0e430
LB
8312static bool sock_ops_is_valid_access(int off, int size,
8313 enum bpf_access_type type,
5e43f899 8314 const struct bpf_prog *prog,
44f0e430 8315 struct bpf_insn_access_aux *info)
40304b2a 8316{
44f0e430
LB
8317 const int size_default = sizeof(__u32);
8318
40304b2a
LB
8319 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
8320 return false;
44f0e430 8321
40304b2a
LB
8322 /* The verifier guarantees that size > 0. */
8323 if (off % size != 0)
8324 return false;
40304b2a 8325
40304b2a
LB
8326 if (type == BPF_WRITE) {
8327 switch (off) {
2585cd62 8328 case offsetof(struct bpf_sock_ops, reply):
6f9bd3d7 8329 case offsetof(struct bpf_sock_ops, sk_txhash):
44f0e430
LB
8330 if (size != size_default)
8331 return false;
40304b2a
LB
8332 break;
8333 default:
8334 return false;
8335 }
44f0e430
LB
8336 } else {
8337 switch (off) {
8338 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
8339 bytes_acked):
8340 if (size != sizeof(__u64))
8341 return false;
8342 break;
1314ef56
SF
8343 case offsetof(struct bpf_sock_ops, sk):
8344 if (size != sizeof(__u64))
8345 return false;
8346 info->reg_type = PTR_TO_SOCKET_OR_NULL;
8347 break;
0813a841
MKL
8348 case offsetof(struct bpf_sock_ops, skb_data):
8349 if (size != sizeof(__u64))
8350 return false;
8351 info->reg_type = PTR_TO_PACKET;
8352 break;
8353 case offsetof(struct bpf_sock_ops, skb_data_end):
8354 if (size != sizeof(__u64))
8355 return false;
8356 info->reg_type = PTR_TO_PACKET_END;
8357 break;
8358 case offsetof(struct bpf_sock_ops, skb_tcp_flags):
8359 bpf_ctx_record_field_size(info, size_default);
8360 return bpf_ctx_narrow_access_ok(off, size,
8361 size_default);
44f0e430
LB
8362 default:
8363 if (size != size_default)
8364 return false;
8365 break;
8366 }
40304b2a
LB
8367 }
8368
44f0e430 8369 return true;
40304b2a
LB
8370}
8371
8a31db56
JF
8372static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
8373 const struct bpf_prog *prog)
8374{
047b0ecd 8375 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8a31db56
JF
8376}
8377
b005fd18
JF
8378static bool sk_skb_is_valid_access(int off, int size,
8379 enum bpf_access_type type,
5e43f899 8380 const struct bpf_prog *prog,
b005fd18
JF
8381 struct bpf_insn_access_aux *info)
8382{
de8f3a83
DB
8383 switch (off) {
8384 case bpf_ctx_range(struct __sk_buff, tc_classid):
8385 case bpf_ctx_range(struct __sk_buff, data_meta):
f11216b2 8386 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 8387 case bpf_ctx_range(struct __sk_buff, wire_len):
f64c4ace 8388 case bpf_ctx_range(struct __sk_buff, hwtstamp):
de8f3a83
DB
8389 return false;
8390 }
8391
8a31db56
JF
8392 if (type == BPF_WRITE) {
8393 switch (off) {
8a31db56
JF
8394 case bpf_ctx_range(struct __sk_buff, tc_index):
8395 case bpf_ctx_range(struct __sk_buff, priority):
8396 break;
8397 default:
8398 return false;
8399 }
8400 }
8401
b005fd18 8402 switch (off) {
f7e9cb1e 8403 case bpf_ctx_range(struct __sk_buff, mark):
8a31db56 8404 return false;
b005fd18
JF
8405 case bpf_ctx_range(struct __sk_buff, data):
8406 info->reg_type = PTR_TO_PACKET;
8407 break;
8408 case bpf_ctx_range(struct __sk_buff, data_end):
8409 info->reg_type = PTR_TO_PACKET_END;
8410 break;
8411 }
8412
5e43f899 8413 return bpf_skb_is_valid_access(off, size, type, prog, info);
b005fd18
JF
8414}
8415
4f738adb
JF
8416static bool sk_msg_is_valid_access(int off, int size,
8417 enum bpf_access_type type,
5e43f899 8418 const struct bpf_prog *prog,
4f738adb
JF
8419 struct bpf_insn_access_aux *info)
8420{
8421 if (type == BPF_WRITE)
8422 return false;
8423
bc1b4f01
JF
8424 if (off % size != 0)
8425 return false;
8426
4f738adb
JF
8427 switch (off) {
8428 case offsetof(struct sk_msg_md, data):
8429 info->reg_type = PTR_TO_PACKET;
303def35
JF
8430 if (size != sizeof(__u64))
8431 return false;
4f738adb
JF
8432 break;
8433 case offsetof(struct sk_msg_md, data_end):
8434 info->reg_type = PTR_TO_PACKET_END;
303def35
JF
8435 if (size != sizeof(__u64))
8436 return false;
4f738adb 8437 break;
13d70f5a
JF
8438 case offsetof(struct sk_msg_md, sk):
8439 if (size != sizeof(__u64))
8440 return false;
8441 info->reg_type = PTR_TO_SOCKET;
8442 break;
bc1b4f01
JF
8443 case bpf_ctx_range(struct sk_msg_md, family):
8444 case bpf_ctx_range(struct sk_msg_md, remote_ip4):
8445 case bpf_ctx_range(struct sk_msg_md, local_ip4):
8446 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
8447 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
8448 case bpf_ctx_range(struct sk_msg_md, remote_port):
8449 case bpf_ctx_range(struct sk_msg_md, local_port):
8450 case bpf_ctx_range(struct sk_msg_md, size):
303def35
JF
8451 if (size != sizeof(__u32))
8452 return false;
bc1b4f01
JF
8453 break;
8454 default:
4f738adb 8455 return false;
bc1b4f01 8456 }
4f738adb
JF
8457 return true;
8458}
8459
d58e468b
PP
8460static bool flow_dissector_is_valid_access(int off, int size,
8461 enum bpf_access_type type,
8462 const struct bpf_prog *prog,
8463 struct bpf_insn_access_aux *info)
8464{
089b19a9
SF
8465 const int size_default = sizeof(__u32);
8466
8467 if (off < 0 || off >= sizeof(struct __sk_buff))
8468 return false;
8469
2ee7fba0
SF
8470 if (type == BPF_WRITE)
8471 return false;
d58e468b
PP
8472
8473 switch (off) {
8474 case bpf_ctx_range(struct __sk_buff, data):
089b19a9
SF
8475 if (size != size_default)
8476 return false;
d58e468b 8477 info->reg_type = PTR_TO_PACKET;
089b19a9 8478 return true;
d58e468b 8479 case bpf_ctx_range(struct __sk_buff, data_end):
089b19a9
SF
8480 if (size != size_default)
8481 return false;
d58e468b 8482 info->reg_type = PTR_TO_PACKET_END;
089b19a9 8483 return true;
b7df9ada 8484 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
089b19a9
SF
8485 if (size != sizeof(__u64))
8486 return false;
d58e468b 8487 info->reg_type = PTR_TO_FLOW_KEYS;
089b19a9 8488 return true;
2ee7fba0 8489 default:
d58e468b
PP
8490 return false;
8491 }
089b19a9 8492}
d58e468b 8493
089b19a9
SF
8494static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
8495 const struct bpf_insn *si,
8496 struct bpf_insn *insn_buf,
8497 struct bpf_prog *prog,
8498 u32 *target_size)
8499
8500{
8501 struct bpf_insn *insn = insn_buf;
8502
8503 switch (si->off) {
8504 case offsetof(struct __sk_buff, data):
8505 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
8506 si->dst_reg, si->src_reg,
8507 offsetof(struct bpf_flow_dissector, data));
8508 break;
8509
8510 case offsetof(struct __sk_buff, data_end):
8511 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
8512 si->dst_reg, si->src_reg,
8513 offsetof(struct bpf_flow_dissector, data_end));
8514 break;
8515
8516 case offsetof(struct __sk_buff, flow_keys):
8517 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
8518 si->dst_reg, si->src_reg,
8519 offsetof(struct bpf_flow_dissector, flow_keys));
8520 break;
8521 }
8522
8523 return insn - insn_buf;
d58e468b
PP
8524}
8525
cf62089b
WB
8526static struct bpf_insn *bpf_convert_shinfo_access(const struct bpf_insn *si,
8527 struct bpf_insn *insn)
8528{
8529 /* si->dst_reg = skb_shinfo(SKB); */
8530#ifdef NET_SKBUFF_DATA_USES_OFFSET
8531 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
8532 BPF_REG_AX, si->src_reg,
8533 offsetof(struct sk_buff, end));
8534 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
8535 si->dst_reg, si->src_reg,
8536 offsetof(struct sk_buff, head));
8537 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
8538#else
8539 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
8540 si->dst_reg, si->src_reg,
8541 offsetof(struct sk_buff, end));
8542#endif
8543
8544 return insn;
8545}
8546
2492d3b8
DB
8547static u32 bpf_convert_ctx_access(enum bpf_access_type type,
8548 const struct bpf_insn *si,
8549 struct bpf_insn *insn_buf,
f96da094 8550 struct bpf_prog *prog, u32 *target_size)
9bac3d6d
AS
8551{
8552 struct bpf_insn *insn = insn_buf;
6b8cc1d1 8553 int off;
9bac3d6d 8554
6b8cc1d1 8555 switch (si->off) {
9bac3d6d 8556 case offsetof(struct __sk_buff, len):
6b8cc1d1 8557 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8558 bpf_target_off(struct sk_buff, len, 4,
8559 target_size));
9bac3d6d
AS
8560 break;
8561
0b8c707d 8562 case offsetof(struct __sk_buff, protocol):
6b8cc1d1 8563 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
8564 bpf_target_off(struct sk_buff, protocol, 2,
8565 target_size));
0b8c707d
DB
8566 break;
8567
27cd5452 8568 case offsetof(struct __sk_buff, vlan_proto):
6b8cc1d1 8569 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
8570 bpf_target_off(struct sk_buff, vlan_proto, 2,
8571 target_size));
27cd5452
MS
8572 break;
8573
bcad5718 8574 case offsetof(struct __sk_buff, priority):
754f1e6a 8575 if (type == BPF_WRITE)
6b8cc1d1 8576 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8577 bpf_target_off(struct sk_buff, priority, 4,
8578 target_size));
754f1e6a 8579 else
6b8cc1d1 8580 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8581 bpf_target_off(struct sk_buff, priority, 4,
8582 target_size));
bcad5718
DB
8583 break;
8584
37e82c2f 8585 case offsetof(struct __sk_buff, ingress_ifindex):
6b8cc1d1 8586 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8587 bpf_target_off(struct sk_buff, skb_iif, 4,
8588 target_size));
37e82c2f
AS
8589 break;
8590
8591 case offsetof(struct __sk_buff, ifindex):
f035a515 8592 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 8593 si->dst_reg, si->src_reg,
37e82c2f 8594 offsetof(struct sk_buff, dev));
6b8cc1d1
DB
8595 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
8596 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
8597 bpf_target_off(struct net_device, ifindex, 4,
8598 target_size));
37e82c2f
AS
8599 break;
8600
ba7591d8 8601 case offsetof(struct __sk_buff, hash):
6b8cc1d1 8602 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8603 bpf_target_off(struct sk_buff, hash, 4,
8604 target_size));
ba7591d8
DB
8605 break;
8606
9bac3d6d 8607 case offsetof(struct __sk_buff, mark):
d691f9e8 8608 if (type == BPF_WRITE)
6b8cc1d1 8609 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8610 bpf_target_off(struct sk_buff, mark, 4,
8611 target_size));
d691f9e8 8612 else
6b8cc1d1 8613 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8614 bpf_target_off(struct sk_buff, mark, 4,
8615 target_size));
d691f9e8 8616 break;
9bac3d6d
AS
8617
8618 case offsetof(struct __sk_buff, pkt_type):
f96da094
DB
8619 *target_size = 1;
8620 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
fba84957 8621 PKT_TYPE_OFFSET);
f96da094
DB
8622 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
8623#ifdef __BIG_ENDIAN_BITFIELD
8624 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
8625#endif
8626 break;
9bac3d6d
AS
8627
8628 case offsetof(struct __sk_buff, queue_mapping):
74e31ca8
JDB
8629 if (type == BPF_WRITE) {
8630 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
8631 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
8632 bpf_target_off(struct sk_buff,
8633 queue_mapping,
8634 2, target_size));
8635 } else {
8636 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8637 bpf_target_off(struct sk_buff,
8638 queue_mapping,
8639 2, target_size));
8640 }
f96da094 8641 break;
c2497395 8642
c2497395 8643 case offsetof(struct __sk_buff, vlan_present):
9c212255
MM
8644 *target_size = 1;
8645 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
fba84957 8646 PKT_VLAN_PRESENT_OFFSET);
9c212255
MM
8647 if (PKT_VLAN_PRESENT_BIT)
8648 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
8649 if (PKT_VLAN_PRESENT_BIT < 7)
8650 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
8651 break;
f96da094 8652
9c212255 8653 case offsetof(struct __sk_buff, vlan_tci):
f96da094
DB
8654 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
8655 bpf_target_off(struct sk_buff, vlan_tci, 2,
8656 target_size));
f96da094 8657 break;
d691f9e8
AS
8658
8659 case offsetof(struct __sk_buff, cb[0]) ...
f96da094 8660 offsetofend(struct __sk_buff, cb[4]) - 1:
c593642c 8661 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20);
62c7989b
DB
8662 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
8663 offsetof(struct qdisc_skb_cb, data)) %
8664 sizeof(__u64));
d691f9e8 8665
ff936a04 8666 prog->cb_access = 1;
6b8cc1d1
DB
8667 off = si->off;
8668 off -= offsetof(struct __sk_buff, cb[0]);
8669 off += offsetof(struct sk_buff, cb);
8670 off += offsetof(struct qdisc_skb_cb, data);
d691f9e8 8671 if (type == BPF_WRITE)
62c7989b 8672 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 8673 si->src_reg, off);
d691f9e8 8674 else
62c7989b 8675 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 8676 si->src_reg, off);
d691f9e8
AS
8677 break;
8678
045efa82 8679 case offsetof(struct __sk_buff, tc_classid):
c593642c 8680 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2);
6b8cc1d1
DB
8681
8682 off = si->off;
8683 off -= offsetof(struct __sk_buff, tc_classid);
8684 off += offsetof(struct sk_buff, cb);
8685 off += offsetof(struct qdisc_skb_cb, tc_classid);
f96da094 8686 *target_size = 2;
09c37a2c 8687 if (type == BPF_WRITE)
6b8cc1d1
DB
8688 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
8689 si->src_reg, off);
09c37a2c 8690 else
6b8cc1d1
DB
8691 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
8692 si->src_reg, off);
045efa82
DB
8693 break;
8694
db58ba45 8695 case offsetof(struct __sk_buff, data):
f035a515 8696 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
6b8cc1d1 8697 si->dst_reg, si->src_reg,
db58ba45
AS
8698 offsetof(struct sk_buff, data));
8699 break;
8700
de8f3a83
DB
8701 case offsetof(struct __sk_buff, data_meta):
8702 off = si->off;
8703 off -= offsetof(struct __sk_buff, data_meta);
8704 off += offsetof(struct sk_buff, cb);
8705 off += offsetof(struct bpf_skb_data_end, data_meta);
8706 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8707 si->src_reg, off);
8708 break;
8709
db58ba45 8710 case offsetof(struct __sk_buff, data_end):
6b8cc1d1
DB
8711 off = si->off;
8712 off -= offsetof(struct __sk_buff, data_end);
8713 off += offsetof(struct sk_buff, cb);
8714 off += offsetof(struct bpf_skb_data_end, data_end);
8715 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8716 si->src_reg, off);
db58ba45
AS
8717 break;
8718
d691f9e8
AS
8719 case offsetof(struct __sk_buff, tc_index):
8720#ifdef CONFIG_NET_SCHED
d691f9e8 8721 if (type == BPF_WRITE)
6b8cc1d1 8722 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
8723 bpf_target_off(struct sk_buff, tc_index, 2,
8724 target_size));
d691f9e8 8725 else
6b8cc1d1 8726 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
8727 bpf_target_off(struct sk_buff, tc_index, 2,
8728 target_size));
d691f9e8 8729#else
2ed46ce4 8730 *target_size = 2;
d691f9e8 8731 if (type == BPF_WRITE)
6b8cc1d1 8732 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
d691f9e8 8733 else
6b8cc1d1 8734 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
b1d9fc41
DB
8735#endif
8736 break;
8737
8738 case offsetof(struct __sk_buff, napi_id):
8739#if defined(CONFIG_NET_RX_BUSY_POLL)
b1d9fc41 8740 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
8741 bpf_target_off(struct sk_buff, napi_id, 4,
8742 target_size));
b1d9fc41
DB
8743 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
8744 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
8745#else
2ed46ce4 8746 *target_size = 4;
b1d9fc41 8747 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
d691f9e8 8748#endif
6b8cc1d1 8749 break;
8a31db56 8750 case offsetof(struct __sk_buff, family):
c593642c 8751 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
8a31db56
JF
8752
8753 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8754 si->dst_reg, si->src_reg,
8755 offsetof(struct sk_buff, sk));
8756 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8757 bpf_target_off(struct sock_common,
8758 skc_family,
8759 2, target_size));
8760 break;
8761 case offsetof(struct __sk_buff, remote_ip4):
c593642c 8762 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
8a31db56
JF
8763
8764 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8765 si->dst_reg, si->src_reg,
8766 offsetof(struct sk_buff, sk));
8767 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8768 bpf_target_off(struct sock_common,
8769 skc_daddr,
8770 4, target_size));
8771 break;
8772 case offsetof(struct __sk_buff, local_ip4):
c593642c 8773 BUILD_BUG_ON(sizeof_field(struct sock_common,
8a31db56
JF
8774 skc_rcv_saddr) != 4);
8775
8776 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8777 si->dst_reg, si->src_reg,
8778 offsetof(struct sk_buff, sk));
8779 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8780 bpf_target_off(struct sock_common,
8781 skc_rcv_saddr,
8782 4, target_size));
8783 break;
8784 case offsetof(struct __sk_buff, remote_ip6[0]) ...
8785 offsetof(struct __sk_buff, remote_ip6[3]):
8786#if IS_ENABLED(CONFIG_IPV6)
c593642c 8787 BUILD_BUG_ON(sizeof_field(struct sock_common,
8a31db56
JF
8788 skc_v6_daddr.s6_addr32[0]) != 4);
8789
8790 off = si->off;
8791 off -= offsetof(struct __sk_buff, remote_ip6[0]);
8792
8793 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8794 si->dst_reg, si->src_reg,
8795 offsetof(struct sk_buff, sk));
8796 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8797 offsetof(struct sock_common,
8798 skc_v6_daddr.s6_addr32[0]) +
8799 off);
8800#else
8801 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8802#endif
8803 break;
8804 case offsetof(struct __sk_buff, local_ip6[0]) ...
8805 offsetof(struct __sk_buff, local_ip6[3]):
8806#if IS_ENABLED(CONFIG_IPV6)
c593642c 8807 BUILD_BUG_ON(sizeof_field(struct sock_common,
8a31db56
JF
8808 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8809
8810 off = si->off;
8811 off -= offsetof(struct __sk_buff, local_ip6[0]);
8812
8813 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8814 si->dst_reg, si->src_reg,
8815 offsetof(struct sk_buff, sk));
8816 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8817 offsetof(struct sock_common,
8818 skc_v6_rcv_saddr.s6_addr32[0]) +
8819 off);
8820#else
8821 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8822#endif
8823 break;
8824
8825 case offsetof(struct __sk_buff, remote_port):
c593642c 8826 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
8a31db56
JF
8827
8828 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8829 si->dst_reg, si->src_reg,
8830 offsetof(struct sk_buff, sk));
8831 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8832 bpf_target_off(struct sock_common,
8833 skc_dport,
8834 2, target_size));
8835#ifndef __BIG_ENDIAN_BITFIELD
8836 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8837#endif
8838 break;
8839
8840 case offsetof(struct __sk_buff, local_port):
c593642c 8841 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
8a31db56
JF
8842
8843 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8844 si->dst_reg, si->src_reg,
8845 offsetof(struct sk_buff, sk));
8846 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8847 bpf_target_off(struct sock_common,
8848 skc_num, 2, target_size));
8849 break;
d58e468b 8850
f11216b2 8851 case offsetof(struct __sk_buff, tstamp):
c593642c 8852 BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8);
f11216b2
VD
8853
8854 if (type == BPF_WRITE)
8855 *insn++ = BPF_STX_MEM(BPF_DW,
8856 si->dst_reg, si->src_reg,
8857 bpf_target_off(struct sk_buff,
8858 tstamp, 8,
8859 target_size));
8860 else
8861 *insn++ = BPF_LDX_MEM(BPF_DW,
8862 si->dst_reg, si->src_reg,
8863 bpf_target_off(struct sk_buff,
8864 tstamp, 8,
8865 target_size));
e3da08d0
PP
8866 break;
8867
d9ff286a 8868 case offsetof(struct __sk_buff, gso_segs):
cf62089b 8869 insn = bpf_convert_shinfo_access(si, insn);
d9ff286a
ED
8870 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
8871 si->dst_reg, si->dst_reg,
8872 bpf_target_off(struct skb_shared_info,
8873 gso_segs, 2,
8874 target_size));
8875 break;
cf62089b
WB
8876 case offsetof(struct __sk_buff, gso_size):
8877 insn = bpf_convert_shinfo_access(si, insn);
8878 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_size),
8879 si->dst_reg, si->dst_reg,
8880 bpf_target_off(struct skb_shared_info,
8881 gso_size, 2,
8882 target_size));
8883 break;
e3da08d0 8884 case offsetof(struct __sk_buff, wire_len):
c593642c 8885 BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4);
e3da08d0
PP
8886
8887 off = si->off;
8888 off -= offsetof(struct __sk_buff, wire_len);
8889 off += offsetof(struct sk_buff, cb);
8890 off += offsetof(struct qdisc_skb_cb, pkt_len);
8891 *target_size = 4;
8892 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
46f8bc92
MKL
8893 break;
8894
8895 case offsetof(struct __sk_buff, sk):
8896 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
8897 si->dst_reg, si->src_reg,
8898 offsetof(struct sk_buff, sk));
8899 break;
f64c4ace
VF
8900 case offsetof(struct __sk_buff, hwtstamp):
8901 BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8);
8902 BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0);
8903
8904 insn = bpf_convert_shinfo_access(si, insn);
8905 *insn++ = BPF_LDX_MEM(BPF_DW,
8906 si->dst_reg, si->dst_reg,
8907 bpf_target_off(struct skb_shared_info,
8908 hwtstamps, 8,
8909 target_size));
8910 break;
9bac3d6d
AS
8911 }
8912
8913 return insn - insn_buf;
89aa0758
AS
8914}
8915
c64b7983
JS
8916u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
8917 const struct bpf_insn *si,
8918 struct bpf_insn *insn_buf,
8919 struct bpf_prog *prog, u32 *target_size)
61023658
DA
8920{
8921 struct bpf_insn *insn = insn_buf;
aac3fc32 8922 int off;
61023658 8923
6b8cc1d1 8924 switch (si->off) {
61023658 8925 case offsetof(struct bpf_sock, bound_dev_if):
c593642c 8926 BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4);
61023658
DA
8927
8928 if (type == BPF_WRITE)
6b8cc1d1 8929 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
8930 offsetof(struct sock, sk_bound_dev_if));
8931 else
6b8cc1d1 8932 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
8933 offsetof(struct sock, sk_bound_dev_if));
8934 break;
aa4c1037 8935
482dca93 8936 case offsetof(struct bpf_sock, mark):
c593642c 8937 BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4);
482dca93
DA
8938
8939 if (type == BPF_WRITE)
8940 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8941 offsetof(struct sock, sk_mark));
8942 else
8943 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8944 offsetof(struct sock, sk_mark));
8945 break;
8946
8947 case offsetof(struct bpf_sock, priority):
c593642c 8948 BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4);
482dca93
DA
8949
8950 if (type == BPF_WRITE)
8951 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8952 offsetof(struct sock, sk_priority));
8953 else
8954 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8955 offsetof(struct sock, sk_priority));
8956 break;
8957
aa4c1037 8958 case offsetof(struct bpf_sock, family):
aa65d696
MKL
8959 *insn++ = BPF_LDX_MEM(
8960 BPF_FIELD_SIZEOF(struct sock_common, skc_family),
8961 si->dst_reg, si->src_reg,
8962 bpf_target_off(struct sock_common,
8963 skc_family,
c593642c 8964 sizeof_field(struct sock_common,
aa65d696
MKL
8965 skc_family),
8966 target_size));
aa4c1037
DA
8967 break;
8968
8969 case offsetof(struct bpf_sock, type):
bf976514
MM
8970 *insn++ = BPF_LDX_MEM(
8971 BPF_FIELD_SIZEOF(struct sock, sk_type),
8972 si->dst_reg, si->src_reg,
8973 bpf_target_off(struct sock, sk_type,
8974 sizeof_field(struct sock, sk_type),
8975 target_size));
aa4c1037
DA
8976 break;
8977
8978 case offsetof(struct bpf_sock, protocol):
bf976514
MM
8979 *insn++ = BPF_LDX_MEM(
8980 BPF_FIELD_SIZEOF(struct sock, sk_protocol),
8981 si->dst_reg, si->src_reg,
8982 bpf_target_off(struct sock, sk_protocol,
8983 sizeof_field(struct sock, sk_protocol),
8984 target_size));
aa4c1037 8985 break;
aac3fc32
AI
8986
8987 case offsetof(struct bpf_sock, src_ip4):
8988 *insn++ = BPF_LDX_MEM(
8989 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8990 bpf_target_off(struct sock_common, skc_rcv_saddr,
c593642c 8991 sizeof_field(struct sock_common,
aac3fc32
AI
8992 skc_rcv_saddr),
8993 target_size));
8994 break;
8995
aa65d696
MKL
8996 case offsetof(struct bpf_sock, dst_ip4):
8997 *insn++ = BPF_LDX_MEM(
8998 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
8999 bpf_target_off(struct sock_common, skc_daddr,
c593642c 9000 sizeof_field(struct sock_common,
aa65d696
MKL
9001 skc_daddr),
9002 target_size));
9003 break;
9004
aac3fc32
AI
9005 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
9006#if IS_ENABLED(CONFIG_IPV6)
9007 off = si->off;
9008 off -= offsetof(struct bpf_sock, src_ip6[0]);
9009 *insn++ = BPF_LDX_MEM(
9010 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
9011 bpf_target_off(
9012 struct sock_common,
9013 skc_v6_rcv_saddr.s6_addr32[0],
c593642c 9014 sizeof_field(struct sock_common,
aac3fc32
AI
9015 skc_v6_rcv_saddr.s6_addr32[0]),
9016 target_size) + off);
9017#else
9018 (void)off;
9019 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9020#endif
9021 break;
9022
aa65d696
MKL
9023 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
9024#if IS_ENABLED(CONFIG_IPV6)
9025 off = si->off;
9026 off -= offsetof(struct bpf_sock, dst_ip6[0]);
9027 *insn++ = BPF_LDX_MEM(
9028 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
9029 bpf_target_off(struct sock_common,
9030 skc_v6_daddr.s6_addr32[0],
c593642c 9031 sizeof_field(struct sock_common,
aa65d696
MKL
9032 skc_v6_daddr.s6_addr32[0]),
9033 target_size) + off);
9034#else
9035 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9036 *target_size = 4;
9037#endif
9038 break;
9039
aac3fc32
AI
9040 case offsetof(struct bpf_sock, src_port):
9041 *insn++ = BPF_LDX_MEM(
9042 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
9043 si->dst_reg, si->src_reg,
9044 bpf_target_off(struct sock_common, skc_num,
c593642c 9045 sizeof_field(struct sock_common,
aac3fc32
AI
9046 skc_num),
9047 target_size));
9048 break;
aa65d696
MKL
9049
9050 case offsetof(struct bpf_sock, dst_port):
9051 *insn++ = BPF_LDX_MEM(
9052 BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
9053 si->dst_reg, si->src_reg,
9054 bpf_target_off(struct sock_common, skc_dport,
c593642c 9055 sizeof_field(struct sock_common,
aa65d696
MKL
9056 skc_dport),
9057 target_size));
9058 break;
9059
9060 case offsetof(struct bpf_sock, state):
9061 *insn++ = BPF_LDX_MEM(
9062 BPF_FIELD_SIZEOF(struct sock_common, skc_state),
9063 si->dst_reg, si->src_reg,
9064 bpf_target_off(struct sock_common, skc_state,
c593642c 9065 sizeof_field(struct sock_common,
aa65d696
MKL
9066 skc_state),
9067 target_size));
9068 break;
c3c16f2e 9069 case offsetof(struct bpf_sock, rx_queue_mapping):
4e1beecc 9070#ifdef CONFIG_SOCK_RX_QUEUE_MAPPING
c3c16f2e
AN
9071 *insn++ = BPF_LDX_MEM(
9072 BPF_FIELD_SIZEOF(struct sock, sk_rx_queue_mapping),
9073 si->dst_reg, si->src_reg,
9074 bpf_target_off(struct sock, sk_rx_queue_mapping,
9075 sizeof_field(struct sock,
9076 sk_rx_queue_mapping),
9077 target_size));
9078 *insn++ = BPF_JMP_IMM(BPF_JNE, si->dst_reg, NO_QUEUE_MAPPING,
9079 1);
9080 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
9081#else
9082 *insn++ = BPF_MOV64_IMM(si->dst_reg, -1);
9083 *target_size = 2;
9084#endif
9085 break;
61023658
DA
9086 }
9087
9088 return insn - insn_buf;
9089}
9090
6b8cc1d1
DB
9091static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
9092 const struct bpf_insn *si,
374fb54e 9093 struct bpf_insn *insn_buf,
f96da094 9094 struct bpf_prog *prog, u32 *target_size)
374fb54e
DB
9095{
9096 struct bpf_insn *insn = insn_buf;
9097
6b8cc1d1 9098 switch (si->off) {
374fb54e 9099 case offsetof(struct __sk_buff, ifindex):
374fb54e 9100 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 9101 si->dst_reg, si->src_reg,
374fb54e 9102 offsetof(struct sk_buff, dev));
6b8cc1d1 9103 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
9104 bpf_target_off(struct net_device, ifindex, 4,
9105 target_size));
374fb54e
DB
9106 break;
9107 default:
f96da094
DB
9108 return bpf_convert_ctx_access(type, si, insn_buf, prog,
9109 target_size);
374fb54e
DB
9110 }
9111
9112 return insn - insn_buf;
9113}
9114
6b8cc1d1
DB
9115static u32 xdp_convert_ctx_access(enum bpf_access_type type,
9116 const struct bpf_insn *si,
6a773a15 9117 struct bpf_insn *insn_buf,
f96da094 9118 struct bpf_prog *prog, u32 *target_size)
6a773a15
BB
9119{
9120 struct bpf_insn *insn = insn_buf;
9121
6b8cc1d1 9122 switch (si->off) {
6a773a15 9123 case offsetof(struct xdp_md, data):
f035a515 9124 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
6b8cc1d1 9125 si->dst_reg, si->src_reg,
6a773a15
BB
9126 offsetof(struct xdp_buff, data));
9127 break;
de8f3a83
DB
9128 case offsetof(struct xdp_md, data_meta):
9129 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
9130 si->dst_reg, si->src_reg,
9131 offsetof(struct xdp_buff, data_meta));
9132 break;
6a773a15 9133 case offsetof(struct xdp_md, data_end):
f035a515 9134 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
6b8cc1d1 9135 si->dst_reg, si->src_reg,
6a773a15
BB
9136 offsetof(struct xdp_buff, data_end));
9137 break;
02dd3291
JDB
9138 case offsetof(struct xdp_md, ingress_ifindex):
9139 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
9140 si->dst_reg, si->src_reg,
9141 offsetof(struct xdp_buff, rxq));
9142 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
9143 si->dst_reg, si->dst_reg,
9144 offsetof(struct xdp_rxq_info, dev));
9145 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6 9146 offsetof(struct net_device, ifindex));
02dd3291
JDB
9147 break;
9148 case offsetof(struct xdp_md, rx_queue_index):
9149 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
9150 si->dst_reg, si->src_reg,
9151 offsetof(struct xdp_buff, rxq));
9152 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6
JDB
9153 offsetof(struct xdp_rxq_info,
9154 queue_index));
02dd3291 9155 break;
64b59025
DA
9156 case offsetof(struct xdp_md, egress_ifindex):
9157 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, txq),
9158 si->dst_reg, si->src_reg,
9159 offsetof(struct xdp_buff, txq));
9160 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_txq_info, dev),
9161 si->dst_reg, si->dst_reg,
9162 offsetof(struct xdp_txq_info, dev));
9163 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9164 offsetof(struct net_device, ifindex));
9165 break;
6a773a15
BB
9166 }
9167
9168 return insn - insn_buf;
9169}
9170
4fbac77d
AI
9171/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
9172 * context Structure, F is Field in context structure that contains a pointer
9173 * to Nested Structure of type NS that has the field NF.
9174 *
9175 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
9176 * sure that SIZE is not greater than actual size of S.F.NF.
9177 *
9178 * If offset OFF is provided, the load happens from that offset relative to
9179 * offset of NF.
9180 */
9181#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
9182 do { \
9183 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
9184 si->src_reg, offsetof(S, F)); \
9185 *insn++ = BPF_LDX_MEM( \
9186 SIZE, si->dst_reg, si->dst_reg, \
c593642c 9187 bpf_target_off(NS, NF, sizeof_field(NS, NF), \
4fbac77d
AI
9188 target_size) \
9189 + OFF); \
9190 } while (0)
9191
9192#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
9193 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
9194 BPF_FIELD_SIZEOF(NS, NF), 0)
9195
9196/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
9197 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
9198 *
4fbac77d
AI
9199 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
9200 * "register" since two registers available in convert_ctx_access are not
9201 * enough: we can't override neither SRC, since it contains value to store, nor
9202 * DST since it contains pointer to context that may be used by later
9203 * instructions. But we need a temporary place to save pointer to nested
9204 * structure whose field we want to store to.
9205 */
600c70ba 9206#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF) \
4fbac77d
AI
9207 do { \
9208 int tmp_reg = BPF_REG_9; \
9209 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
9210 --tmp_reg; \
9211 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
9212 --tmp_reg; \
9213 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
9214 offsetof(S, TF)); \
9215 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
9216 si->dst_reg, offsetof(S, F)); \
600c70ba 9217 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \
c593642c 9218 bpf_target_off(NS, NF, sizeof_field(NS, NF), \
4fbac77d
AI
9219 target_size) \
9220 + OFF); \
9221 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
9222 offsetof(S, TF)); \
9223 } while (0)
9224
9225#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
9226 TF) \
9227 do { \
9228 if (type == BPF_WRITE) { \
600c70ba
SF
9229 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, \
9230 OFF, TF); \
4fbac77d
AI
9231 } else { \
9232 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
9233 S, NS, F, NF, SIZE, OFF); \
9234 } \
9235 } while (0)
9236
9237#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
9238 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
9239 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
9240
9241static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
9242 const struct bpf_insn *si,
9243 struct bpf_insn *insn_buf,
9244 struct bpf_prog *prog, u32 *target_size)
9245{
7aebfa1b 9246 int off, port_size = sizeof_field(struct sockaddr_in6, sin6_port);
4fbac77d 9247 struct bpf_insn *insn = insn_buf;
4fbac77d
AI
9248
9249 switch (si->off) {
9250 case offsetof(struct bpf_sock_addr, user_family):
9251 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9252 struct sockaddr, uaddr, sa_family);
9253 break;
9254
9255 case offsetof(struct bpf_sock_addr, user_ip4):
9256 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9257 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
9258 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
9259 break;
9260
9261 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
9262 off = si->off;
9263 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
9264 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9265 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
9266 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
9267 tmp_reg);
9268 break;
9269
9270 case offsetof(struct bpf_sock_addr, user_port):
9271 /* To get port we need to know sa_family first and then treat
9272 * sockaddr as either sockaddr_in or sockaddr_in6.
9273 * Though we can simplify since port field has same offset and
9274 * size in both structures.
9275 * Here we check this invariant and use just one of the
9276 * structures if it's true.
9277 */
9278 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
9279 offsetof(struct sockaddr_in6, sin6_port));
c593642c
PB
9280 BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) !=
9281 sizeof_field(struct sockaddr_in6, sin6_port));
7aebfa1b
AI
9282 /* Account for sin6_port being smaller than user_port. */
9283 port_size = min(port_size, BPF_LDST_BYTES(si));
9284 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9285 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
9286 sin6_port, bytes_to_bpf_size(port_size), 0, tmp_reg);
4fbac77d
AI
9287 break;
9288
9289 case offsetof(struct bpf_sock_addr, family):
9290 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9291 struct sock, sk, sk_family);
9292 break;
9293
9294 case offsetof(struct bpf_sock_addr, type):
bf976514
MM
9295 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9296 struct sock, sk, sk_type);
4fbac77d
AI
9297 break;
9298
9299 case offsetof(struct bpf_sock_addr, protocol):
bf976514
MM
9300 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
9301 struct sock, sk, sk_protocol);
4fbac77d 9302 break;
1cedee13
AI
9303
9304 case offsetof(struct bpf_sock_addr, msg_src_ip4):
9305 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
9306 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9307 struct bpf_sock_addr_kern, struct in_addr, t_ctx,
9308 s_addr, BPF_SIZE(si->code), 0, tmp_reg);
9309 break;
9310
9311 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
9312 msg_src_ip6[3]):
9313 off = si->off;
9314 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
9315 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
9316 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
9317 struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
9318 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
9319 break;
fb85c4a7
SF
9320 case offsetof(struct bpf_sock_addr, sk):
9321 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
9322 si->dst_reg, si->src_reg,
9323 offsetof(struct bpf_sock_addr_kern, sk));
9324 break;
4fbac77d
AI
9325 }
9326
9327 return insn - insn_buf;
9328}
9329
40304b2a
LB
9330static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
9331 const struct bpf_insn *si,
9332 struct bpf_insn *insn_buf,
f96da094
DB
9333 struct bpf_prog *prog,
9334 u32 *target_size)
40304b2a
LB
9335{
9336 struct bpf_insn *insn = insn_buf;
9337 int off;
9338
9b1f3d6e
MKL
9339/* Helper macro for adding read access to tcp_sock or sock fields. */
9340#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
9341 do { \
fd09af01 9342 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 2; \
c593642c
PB
9343 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
9344 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
fd09af01
JF
9345 if (si->dst_reg == reg || si->src_reg == reg) \
9346 reg--; \
9347 if (si->dst_reg == reg || si->src_reg == reg) \
9348 reg--; \
9349 if (si->dst_reg == si->src_reg) { \
9350 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
9351 offsetof(struct bpf_sock_ops_kern, \
9352 temp)); \
9353 fullsock_reg = reg; \
9354 jmp += 2; \
9355 } \
9b1f3d6e
MKL
9356 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9357 struct bpf_sock_ops_kern, \
9358 is_fullsock), \
fd09af01 9359 fullsock_reg, si->src_reg, \
9b1f3d6e
MKL
9360 offsetof(struct bpf_sock_ops_kern, \
9361 is_fullsock)); \
fd09af01
JF
9362 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
9363 if (si->dst_reg == si->src_reg) \
9364 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
9365 offsetof(struct bpf_sock_ops_kern, \
9366 temp)); \
9b1f3d6e
MKL
9367 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9368 struct bpf_sock_ops_kern, sk),\
9369 si->dst_reg, si->src_reg, \
9370 offsetof(struct bpf_sock_ops_kern, sk));\
9371 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
9372 OBJ_FIELD), \
9373 si->dst_reg, si->dst_reg, \
9374 offsetof(OBJ, OBJ_FIELD)); \
fd09af01
JF
9375 if (si->dst_reg == si->src_reg) { \
9376 *insn++ = BPF_JMP_A(1); \
9377 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
9378 offsetof(struct bpf_sock_ops_kern, \
9379 temp)); \
9380 } \
9b1f3d6e
MKL
9381 } while (0)
9382
84f44df6
JF
9383#define SOCK_OPS_GET_SK() \
9384 do { \
9385 int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
9386 if (si->dst_reg == reg || si->src_reg == reg) \
9387 reg--; \
9388 if (si->dst_reg == reg || si->src_reg == reg) \
9389 reg--; \
9390 if (si->dst_reg == si->src_reg) { \
9391 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
9392 offsetof(struct bpf_sock_ops_kern, \
9393 temp)); \
9394 fullsock_reg = reg; \
9395 jmp += 2; \
9396 } \
9397 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9398 struct bpf_sock_ops_kern, \
9399 is_fullsock), \
9400 fullsock_reg, si->src_reg, \
9401 offsetof(struct bpf_sock_ops_kern, \
9402 is_fullsock)); \
9403 *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
9404 if (si->dst_reg == si->src_reg) \
9405 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
9406 offsetof(struct bpf_sock_ops_kern, \
9407 temp)); \
9408 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9409 struct bpf_sock_ops_kern, sk),\
9410 si->dst_reg, si->src_reg, \
9411 offsetof(struct bpf_sock_ops_kern, sk));\
9412 if (si->dst_reg == si->src_reg) { \
9413 *insn++ = BPF_JMP_A(1); \
9414 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
9415 offsetof(struct bpf_sock_ops_kern, \
9416 temp)); \
9417 } \
9418 } while (0)
9419
9b1f3d6e
MKL
9420#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
9421 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
9422
9423/* Helper macro for adding write access to tcp_sock or sock fields.
9424 * The macro is called with two registers, dst_reg which contains a pointer
9425 * to ctx (context) and src_reg which contains the value that should be
9426 * stored. However, we need an additional register since we cannot overwrite
9427 * dst_reg because it may be used later in the program.
9428 * Instead we "borrow" one of the other register. We first save its value
9429 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
9430 * it at the end of the macro.
9431 */
9432#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
9433 do { \
9434 int reg = BPF_REG_9; \
c593642c
PB
9435 BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \
9436 sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \
9b1f3d6e
MKL
9437 if (si->dst_reg == reg || si->src_reg == reg) \
9438 reg--; \
9439 if (si->dst_reg == reg || si->src_reg == reg) \
9440 reg--; \
9441 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
9442 offsetof(struct bpf_sock_ops_kern, \
9443 temp)); \
9444 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9445 struct bpf_sock_ops_kern, \
9446 is_fullsock), \
9447 reg, si->dst_reg, \
9448 offsetof(struct bpf_sock_ops_kern, \
9449 is_fullsock)); \
9450 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
9451 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
9452 struct bpf_sock_ops_kern, sk),\
9453 reg, si->dst_reg, \
9454 offsetof(struct bpf_sock_ops_kern, sk));\
9455 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
9456 reg, si->src_reg, \
9457 offsetof(OBJ, OBJ_FIELD)); \
9458 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
9459 offsetof(struct bpf_sock_ops_kern, \
9460 temp)); \
9461 } while (0)
9462
9463#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
9464 do { \
9465 if (TYPE == BPF_WRITE) \
9466 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
9467 else \
9468 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
9469 } while (0)
9470
9b1f3d6e
MKL
9471 if (insn > insn_buf)
9472 return insn - insn_buf;
9473
40304b2a 9474 switch (si->off) {
c9985d09
MKL
9475 case offsetof(struct bpf_sock_ops, op):
9476 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9477 op),
9478 si->dst_reg, si->src_reg,
9479 offsetof(struct bpf_sock_ops_kern, op));
9480 break;
9481
9482 case offsetof(struct bpf_sock_ops, replylong[0]) ...
40304b2a 9483 offsetof(struct bpf_sock_ops, replylong[3]):
c593642c
PB
9484 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) !=
9485 sizeof_field(struct bpf_sock_ops_kern, reply));
9486 BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) !=
9487 sizeof_field(struct bpf_sock_ops_kern, replylong));
40304b2a 9488 off = si->off;
c9985d09
MKL
9489 off -= offsetof(struct bpf_sock_ops, replylong[0]);
9490 off += offsetof(struct bpf_sock_ops_kern, replylong[0]);
40304b2a
LB
9491 if (type == BPF_WRITE)
9492 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
9493 off);
9494 else
9495 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
9496 off);
9497 break;
9498
9499 case offsetof(struct bpf_sock_ops, family):
c593642c 9500 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
40304b2a
LB
9501
9502 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9503 struct bpf_sock_ops_kern, sk),
9504 si->dst_reg, si->src_reg,
9505 offsetof(struct bpf_sock_ops_kern, sk));
9506 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9507 offsetof(struct sock_common, skc_family));
9508 break;
9509
9510 case offsetof(struct bpf_sock_ops, remote_ip4):
c593642c 9511 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
40304b2a
LB
9512
9513 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9514 struct bpf_sock_ops_kern, sk),
9515 si->dst_reg, si->src_reg,
9516 offsetof(struct bpf_sock_ops_kern, sk));
9517 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9518 offsetof(struct sock_common, skc_daddr));
9519 break;
9520
9521 case offsetof(struct bpf_sock_ops, local_ip4):
c593642c 9522 BUILD_BUG_ON(sizeof_field(struct sock_common,
303def35 9523 skc_rcv_saddr) != 4);
40304b2a
LB
9524
9525 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9526 struct bpf_sock_ops_kern, sk),
9527 si->dst_reg, si->src_reg,
9528 offsetof(struct bpf_sock_ops_kern, sk));
9529 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9530 offsetof(struct sock_common,
9531 skc_rcv_saddr));
9532 break;
9533
9534 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
9535 offsetof(struct bpf_sock_ops, remote_ip6[3]):
9536#if IS_ENABLED(CONFIG_IPV6)
c593642c 9537 BUILD_BUG_ON(sizeof_field(struct sock_common,
40304b2a
LB
9538 skc_v6_daddr.s6_addr32[0]) != 4);
9539
9540 off = si->off;
9541 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
9542 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9543 struct bpf_sock_ops_kern, sk),
9544 si->dst_reg, si->src_reg,
9545 offsetof(struct bpf_sock_ops_kern, sk));
9546 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9547 offsetof(struct sock_common,
9548 skc_v6_daddr.s6_addr32[0]) +
9549 off);
9550#else
9551 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9552#endif
9553 break;
9554
9555 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
9556 offsetof(struct bpf_sock_ops, local_ip6[3]):
9557#if IS_ENABLED(CONFIG_IPV6)
c593642c 9558 BUILD_BUG_ON(sizeof_field(struct sock_common,
40304b2a
LB
9559 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
9560
9561 off = si->off;
9562 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
9563 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9564 struct bpf_sock_ops_kern, sk),
9565 si->dst_reg, si->src_reg,
9566 offsetof(struct bpf_sock_ops_kern, sk));
9567 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9568 offsetof(struct sock_common,
9569 skc_v6_rcv_saddr.s6_addr32[0]) +
9570 off);
9571#else
9572 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9573#endif
9574 break;
9575
9576 case offsetof(struct bpf_sock_ops, remote_port):
c593642c 9577 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
40304b2a
LB
9578
9579 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9580 struct bpf_sock_ops_kern, sk),
9581 si->dst_reg, si->src_reg,
9582 offsetof(struct bpf_sock_ops_kern, sk));
9583 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9584 offsetof(struct sock_common, skc_dport));
9585#ifndef __BIG_ENDIAN_BITFIELD
9586 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
9587#endif
9588 break;
9589
9590 case offsetof(struct bpf_sock_ops, local_port):
c593642c 9591 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
40304b2a
LB
9592
9593 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9594 struct bpf_sock_ops_kern, sk),
9595 si->dst_reg, si->src_reg,
9596 offsetof(struct bpf_sock_ops_kern, sk));
9597 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9598 offsetof(struct sock_common, skc_num));
9599 break;
f19397a5
LB
9600
9601 case offsetof(struct bpf_sock_ops, is_fullsock):
9602 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9603 struct bpf_sock_ops_kern,
9604 is_fullsock),
9605 si->dst_reg, si->src_reg,
9606 offsetof(struct bpf_sock_ops_kern,
9607 is_fullsock));
9608 break;
9609
44f0e430 9610 case offsetof(struct bpf_sock_ops, state):
c593642c 9611 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1);
44f0e430
LB
9612
9613 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9614 struct bpf_sock_ops_kern, sk),
9615 si->dst_reg, si->src_reg,
9616 offsetof(struct bpf_sock_ops_kern, sk));
9617 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
9618 offsetof(struct sock_common, skc_state));
9619 break;
9620
9621 case offsetof(struct bpf_sock_ops, rtt_min):
c593642c 9622 BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) !=
44f0e430
LB
9623 sizeof(struct minmax));
9624 BUILD_BUG_ON(sizeof(struct minmax) <
9625 sizeof(struct minmax_sample));
9626
9627 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
9628 struct bpf_sock_ops_kern, sk),
9629 si->dst_reg, si->src_reg,
9630 offsetof(struct bpf_sock_ops_kern, sk));
9631 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9632 offsetof(struct tcp_sock, rtt_min) +
c593642c 9633 sizeof_field(struct minmax_sample, t));
44f0e430
LB
9634 break;
9635
b13d8807
LB
9636 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
9637 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
9638 struct tcp_sock);
9639 break;
44f0e430 9640
44f0e430 9641 case offsetof(struct bpf_sock_ops, sk_txhash):
6f9bd3d7
LB
9642 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
9643 struct sock, type);
44f0e430 9644 break;
2377b81d
SF
9645 case offsetof(struct bpf_sock_ops, snd_cwnd):
9646 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
9647 break;
9648 case offsetof(struct bpf_sock_ops, srtt_us):
9649 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
9650 break;
9651 case offsetof(struct bpf_sock_ops, snd_ssthresh):
9652 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
9653 break;
9654 case offsetof(struct bpf_sock_ops, rcv_nxt):
9655 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
9656 break;
9657 case offsetof(struct bpf_sock_ops, snd_nxt):
9658 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
9659 break;
9660 case offsetof(struct bpf_sock_ops, snd_una):
9661 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
9662 break;
9663 case offsetof(struct bpf_sock_ops, mss_cache):
9664 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
9665 break;
9666 case offsetof(struct bpf_sock_ops, ecn_flags):
9667 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
9668 break;
9669 case offsetof(struct bpf_sock_ops, rate_delivered):
9670 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
9671 break;
9672 case offsetof(struct bpf_sock_ops, rate_interval_us):
9673 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
9674 break;
9675 case offsetof(struct bpf_sock_ops, packets_out):
9676 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
9677 break;
9678 case offsetof(struct bpf_sock_ops, retrans_out):
9679 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
9680 break;
9681 case offsetof(struct bpf_sock_ops, total_retrans):
9682 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
9683 break;
9684 case offsetof(struct bpf_sock_ops, segs_in):
9685 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
9686 break;
9687 case offsetof(struct bpf_sock_ops, data_segs_in):
9688 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
9689 break;
9690 case offsetof(struct bpf_sock_ops, segs_out):
9691 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
9692 break;
9693 case offsetof(struct bpf_sock_ops, data_segs_out):
9694 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
9695 break;
9696 case offsetof(struct bpf_sock_ops, lost_out):
9697 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
9698 break;
9699 case offsetof(struct bpf_sock_ops, sacked_out):
9700 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
9701 break;
9702 case offsetof(struct bpf_sock_ops, bytes_received):
9703 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
9704 break;
9705 case offsetof(struct bpf_sock_ops, bytes_acked):
9706 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
9707 break;
1314ef56 9708 case offsetof(struct bpf_sock_ops, sk):
84f44df6 9709 SOCK_OPS_GET_SK();
1314ef56 9710 break;
0813a841
MKL
9711 case offsetof(struct bpf_sock_ops, skb_data_end):
9712 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9713 skb_data_end),
9714 si->dst_reg, si->src_reg,
9715 offsetof(struct bpf_sock_ops_kern,
9716 skb_data_end));
9717 break;
9718 case offsetof(struct bpf_sock_ops, skb_data):
9719 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9720 skb),
9721 si->dst_reg, si->src_reg,
9722 offsetof(struct bpf_sock_ops_kern,
9723 skb));
9724 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9725 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
9726 si->dst_reg, si->dst_reg,
9727 offsetof(struct sk_buff, data));
9728 break;
9729 case offsetof(struct bpf_sock_ops, skb_len):
9730 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9731 skb),
9732 si->dst_reg, si->src_reg,
9733 offsetof(struct bpf_sock_ops_kern,
9734 skb));
9735 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9736 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
9737 si->dst_reg, si->dst_reg,
9738 offsetof(struct sk_buff, len));
9739 break;
9740 case offsetof(struct bpf_sock_ops, skb_tcp_flags):
9741 off = offsetof(struct sk_buff, cb);
9742 off += offsetof(struct tcp_skb_cb, tcp_flags);
9743 *target_size = sizeof_field(struct tcp_skb_cb, tcp_flags);
9744 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_ops_kern,
9745 skb),
9746 si->dst_reg, si->src_reg,
9747 offsetof(struct bpf_sock_ops_kern,
9748 skb));
9749 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
9750 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_skb_cb,
9751 tcp_flags),
9752 si->dst_reg, si->dst_reg, off);
9753 break;
40304b2a
LB
9754 }
9755 return insn - insn_buf;
9756}
9757
16137b09
CW
9758/* data_end = skb->data + skb_headlen() */
9759static struct bpf_insn *bpf_convert_data_end_access(const struct bpf_insn *si,
9760 struct bpf_insn *insn)
9761{
b2c46181
JM
9762 int reg;
9763 int temp_reg_off = offsetof(struct sk_buff, cb) +
9764 offsetof(struct sk_skb_cb, temp_reg);
9765
9766 if (si->src_reg == si->dst_reg) {
9767 /* We need an extra register, choose and save a register. */
9768 reg = BPF_REG_9;
9769 if (si->src_reg == reg || si->dst_reg == reg)
9770 reg--;
9771 if (si->src_reg == reg || si->dst_reg == reg)
9772 reg--;
9773 *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, temp_reg_off);
9774 } else {
9775 reg = si->dst_reg;
9776 }
9777
9778 /* reg = skb->data */
16137b09 9779 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
b2c46181 9780 reg, si->src_reg,
16137b09
CW
9781 offsetof(struct sk_buff, data));
9782 /* AX = skb->len */
9783 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, len),
9784 BPF_REG_AX, si->src_reg,
9785 offsetof(struct sk_buff, len));
b2c46181
JM
9786 /* reg = skb->data + skb->len */
9787 *insn++ = BPF_ALU64_REG(BPF_ADD, reg, BPF_REG_AX);
16137b09
CW
9788 /* AX = skb->data_len */
9789 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data_len),
9790 BPF_REG_AX, si->src_reg,
9791 offsetof(struct sk_buff, data_len));
b2c46181
JM
9792
9793 /* reg = skb->data + skb->len - skb->data_len */
9794 *insn++ = BPF_ALU64_REG(BPF_SUB, reg, BPF_REG_AX);
9795
9796 if (si->src_reg == si->dst_reg) {
9797 /* Restore the saved register */
9798 *insn++ = BPF_MOV64_REG(BPF_REG_AX, si->src_reg);
9799 *insn++ = BPF_MOV64_REG(si->dst_reg, reg);
9800 *insn++ = BPF_LDX_MEM(BPF_DW, reg, BPF_REG_AX, temp_reg_off);
9801 }
16137b09
CW
9802
9803 return insn;
9804}
9805
8108a775
JF
9806static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
9807 const struct bpf_insn *si,
9808 struct bpf_insn *insn_buf,
9809 struct bpf_prog *prog, u32 *target_size)
9810{
9811 struct bpf_insn *insn = insn_buf;
e0dc3b93 9812 int off;
8108a775
JF
9813
9814 switch (si->off) {
9815 case offsetof(struct __sk_buff, data_end):
16137b09 9816 insn = bpf_convert_data_end_access(si, insn);
8108a775 9817 break;
e0dc3b93
JF
9818 case offsetof(struct __sk_buff, cb[0]) ...
9819 offsetofend(struct __sk_buff, cb[4]) - 1:
9820 BUILD_BUG_ON(sizeof_field(struct sk_skb_cb, data) < 20);
9821 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
9822 offsetof(struct sk_skb_cb, data)) %
9823 sizeof(__u64));
9824
9825 prog->cb_access = 1;
9826 off = si->off;
9827 off -= offsetof(struct __sk_buff, cb[0]);
9828 off += offsetof(struct sk_buff, cb);
9829 off += offsetof(struct sk_skb_cb, data);
9830 if (type == BPF_WRITE)
9831 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
9832 si->src_reg, off);
9833 else
9834 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
9835 si->src_reg, off);
9836 break;
9837
9838
8108a775
JF
9839 default:
9840 return bpf_convert_ctx_access(type, si, insn_buf, prog,
9841 target_size);
9842 }
9843
9844 return insn - insn_buf;
9845}
9846
4f738adb
JF
9847static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
9848 const struct bpf_insn *si,
9849 struct bpf_insn *insn_buf,
9850 struct bpf_prog *prog, u32 *target_size)
9851{
9852 struct bpf_insn *insn = insn_buf;
720e7f38 9853#if IS_ENABLED(CONFIG_IPV6)
303def35 9854 int off;
720e7f38 9855#endif
4f738adb 9856
7a69c0f2
JF
9857 /* convert ctx uses the fact sg element is first in struct */
9858 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
9859
4f738adb
JF
9860 switch (si->off) {
9861 case offsetof(struct sk_msg_md, data):
604326b4 9862 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
4f738adb 9863 si->dst_reg, si->src_reg,
604326b4 9864 offsetof(struct sk_msg, data));
4f738adb
JF
9865 break;
9866 case offsetof(struct sk_msg_md, data_end):
604326b4 9867 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
4f738adb 9868 si->dst_reg, si->src_reg,
604326b4 9869 offsetof(struct sk_msg, data_end));
4f738adb 9870 break;
303def35 9871 case offsetof(struct sk_msg_md, family):
c593642c 9872 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2);
303def35
JF
9873
9874 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9875 struct sk_msg, sk),
303def35 9876 si->dst_reg, si->src_reg,
604326b4 9877 offsetof(struct sk_msg, sk));
303def35
JF
9878 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9879 offsetof(struct sock_common, skc_family));
9880 break;
9881
9882 case offsetof(struct sk_msg_md, remote_ip4):
c593642c 9883 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4);
303def35
JF
9884
9885 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9886 struct sk_msg, sk),
303def35 9887 si->dst_reg, si->src_reg,
604326b4 9888 offsetof(struct sk_msg, sk));
303def35
JF
9889 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9890 offsetof(struct sock_common, skc_daddr));
9891 break;
9892
9893 case offsetof(struct sk_msg_md, local_ip4):
c593642c 9894 BUILD_BUG_ON(sizeof_field(struct sock_common,
303def35
JF
9895 skc_rcv_saddr) != 4);
9896
9897 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9898 struct sk_msg, sk),
303def35 9899 si->dst_reg, si->src_reg,
604326b4 9900 offsetof(struct sk_msg, sk));
303def35
JF
9901 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9902 offsetof(struct sock_common,
9903 skc_rcv_saddr));
9904 break;
9905
9906 case offsetof(struct sk_msg_md, remote_ip6[0]) ...
9907 offsetof(struct sk_msg_md, remote_ip6[3]):
9908#if IS_ENABLED(CONFIG_IPV6)
c593642c 9909 BUILD_BUG_ON(sizeof_field(struct sock_common,
303def35
JF
9910 skc_v6_daddr.s6_addr32[0]) != 4);
9911
9912 off = si->off;
9913 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
9914 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9915 struct sk_msg, sk),
303def35 9916 si->dst_reg, si->src_reg,
604326b4 9917 offsetof(struct sk_msg, sk));
303def35
JF
9918 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9919 offsetof(struct sock_common,
9920 skc_v6_daddr.s6_addr32[0]) +
9921 off);
9922#else
9923 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9924#endif
9925 break;
9926
9927 case offsetof(struct sk_msg_md, local_ip6[0]) ...
9928 offsetof(struct sk_msg_md, local_ip6[3]):
9929#if IS_ENABLED(CONFIG_IPV6)
c593642c 9930 BUILD_BUG_ON(sizeof_field(struct sock_common,
303def35
JF
9931 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
9932
9933 off = si->off;
9934 off -= offsetof(struct sk_msg_md, local_ip6[0]);
9935 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9936 struct sk_msg, sk),
303def35 9937 si->dst_reg, si->src_reg,
604326b4 9938 offsetof(struct sk_msg, sk));
303def35
JF
9939 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
9940 offsetof(struct sock_common,
9941 skc_v6_rcv_saddr.s6_addr32[0]) +
9942 off);
9943#else
9944 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
9945#endif
9946 break;
9947
9948 case offsetof(struct sk_msg_md, remote_port):
c593642c 9949 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2);
303def35
JF
9950
9951 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9952 struct sk_msg, sk),
303def35 9953 si->dst_reg, si->src_reg,
604326b4 9954 offsetof(struct sk_msg, sk));
303def35
JF
9955 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9956 offsetof(struct sock_common, skc_dport));
9957#ifndef __BIG_ENDIAN_BITFIELD
9958 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
9959#endif
9960 break;
9961
9962 case offsetof(struct sk_msg_md, local_port):
c593642c 9963 BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2);
303def35
JF
9964
9965 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 9966 struct sk_msg, sk),
303def35 9967 si->dst_reg, si->src_reg,
604326b4 9968 offsetof(struct sk_msg, sk));
303def35
JF
9969 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
9970 offsetof(struct sock_common, skc_num));
9971 break;
3bdbd022
JF
9972
9973 case offsetof(struct sk_msg_md, size):
9974 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
9975 si->dst_reg, si->src_reg,
9976 offsetof(struct sk_msg_sg, size));
9977 break;
13d70f5a
JF
9978
9979 case offsetof(struct sk_msg_md, sk):
9980 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, sk),
9981 si->dst_reg, si->src_reg,
9982 offsetof(struct sk_msg, sk));
9983 break;
4f738adb
JF
9984 }
9985
9986 return insn - insn_buf;
9987}
9988
7de16e3a 9989const struct bpf_verifier_ops sk_filter_verifier_ops = {
4936e352
DB
9990 .get_func_proto = sk_filter_func_proto,
9991 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 9992 .convert_ctx_access = bpf_convert_ctx_access,
e0cea7ce 9993 .gen_ld_abs = bpf_gen_ld_abs,
89aa0758
AS
9994};
9995
7de16e3a 9996const struct bpf_prog_ops sk_filter_prog_ops = {
61f3c964 9997 .test_run = bpf_prog_test_run_skb,
7de16e3a
JK
9998};
9999
10000const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4936e352
DB
10001 .get_func_proto = tc_cls_act_func_proto,
10002 .is_valid_access = tc_cls_act_is_valid_access,
374fb54e 10003 .convert_ctx_access = tc_cls_act_convert_ctx_access,
36bbef52 10004 .gen_prologue = tc_cls_act_prologue,
e0cea7ce 10005 .gen_ld_abs = bpf_gen_ld_abs,
7bd1590d 10006 .check_kfunc_call = bpf_prog_test_check_kfunc_call,
7de16e3a
JK
10007};
10008
10009const struct bpf_prog_ops tc_cls_act_prog_ops = {
1cf1cae9 10010 .test_run = bpf_prog_test_run_skb,
608cd71a
AS
10011};
10012
7de16e3a 10013const struct bpf_verifier_ops xdp_verifier_ops = {
6a773a15
BB
10014 .get_func_proto = xdp_func_proto,
10015 .is_valid_access = xdp_is_valid_access,
10016 .convert_ctx_access = xdp_convert_ctx_access,
b09928b9 10017 .gen_prologue = bpf_noop_prologue,
7de16e3a
JK
10018};
10019
10020const struct bpf_prog_ops xdp_prog_ops = {
1cf1cae9 10021 .test_run = bpf_prog_test_run_xdp,
6a773a15
BB
10022};
10023
7de16e3a 10024const struct bpf_verifier_ops cg_skb_verifier_ops = {
cd339431 10025 .get_func_proto = cg_skb_func_proto,
b39b5f41 10026 .is_valid_access = cg_skb_is_valid_access,
2492d3b8 10027 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
10028};
10029
10030const struct bpf_prog_ops cg_skb_prog_ops = {
1cf1cae9 10031 .test_run = bpf_prog_test_run_skb,
0e33661d
DM
10032};
10033
cd3092c7
MX
10034const struct bpf_verifier_ops lwt_in_verifier_ops = {
10035 .get_func_proto = lwt_in_func_proto,
3a0af8fd 10036 .is_valid_access = lwt_is_valid_access,
2492d3b8 10037 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
10038};
10039
cd3092c7
MX
10040const struct bpf_prog_ops lwt_in_prog_ops = {
10041 .test_run = bpf_prog_test_run_skb,
10042};
10043
10044const struct bpf_verifier_ops lwt_out_verifier_ops = {
10045 .get_func_proto = lwt_out_func_proto,
3a0af8fd 10046 .is_valid_access = lwt_is_valid_access,
2492d3b8 10047 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
10048};
10049
cd3092c7 10050const struct bpf_prog_ops lwt_out_prog_ops = {
1cf1cae9 10051 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
10052};
10053
7de16e3a 10054const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
3a0af8fd
TG
10055 .get_func_proto = lwt_xmit_func_proto,
10056 .is_valid_access = lwt_is_valid_access,
2492d3b8 10057 .convert_ctx_access = bpf_convert_ctx_access,
3a0af8fd 10058 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
10059};
10060
10061const struct bpf_prog_ops lwt_xmit_prog_ops = {
1cf1cae9 10062 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
10063};
10064
004d4b27
MX
10065const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
10066 .get_func_proto = lwt_seg6local_func_proto,
10067 .is_valid_access = lwt_is_valid_access,
10068 .convert_ctx_access = bpf_convert_ctx_access,
10069};
10070
10071const struct bpf_prog_ops lwt_seg6local_prog_ops = {
10072 .test_run = bpf_prog_test_run_skb,
10073};
10074
7de16e3a 10075const struct bpf_verifier_ops cg_sock_verifier_ops = {
ae2cf1c4 10076 .get_func_proto = sock_filter_func_proto,
61023658 10077 .is_valid_access = sock_filter_is_valid_access,
c64b7983 10078 .convert_ctx_access = bpf_sock_convert_ctx_access,
61023658
DA
10079};
10080
7de16e3a
JK
10081const struct bpf_prog_ops cg_sock_prog_ops = {
10082};
10083
4fbac77d
AI
10084const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
10085 .get_func_proto = sock_addr_func_proto,
10086 .is_valid_access = sock_addr_is_valid_access,
10087 .convert_ctx_access = sock_addr_convert_ctx_access,
10088};
10089
10090const struct bpf_prog_ops cg_sock_addr_prog_ops = {
10091};
10092
7de16e3a 10093const struct bpf_verifier_ops sock_ops_verifier_ops = {
8c4b4c7e 10094 .get_func_proto = sock_ops_func_proto,
40304b2a
LB
10095 .is_valid_access = sock_ops_is_valid_access,
10096 .convert_ctx_access = sock_ops_convert_ctx_access,
10097};
10098
7de16e3a
JK
10099const struct bpf_prog_ops sock_ops_prog_ops = {
10100};
10101
10102const struct bpf_verifier_ops sk_skb_verifier_ops = {
b005fd18
JF
10103 .get_func_proto = sk_skb_func_proto,
10104 .is_valid_access = sk_skb_is_valid_access,
8108a775 10105 .convert_ctx_access = sk_skb_convert_ctx_access,
8a31db56 10106 .gen_prologue = sk_skb_prologue,
b005fd18
JF
10107};
10108
7de16e3a
JK
10109const struct bpf_prog_ops sk_skb_prog_ops = {
10110};
10111
4f738adb
JF
10112const struct bpf_verifier_ops sk_msg_verifier_ops = {
10113 .get_func_proto = sk_msg_func_proto,
10114 .is_valid_access = sk_msg_is_valid_access,
10115 .convert_ctx_access = sk_msg_convert_ctx_access,
b09928b9 10116 .gen_prologue = bpf_noop_prologue,
4f738adb
JF
10117};
10118
10119const struct bpf_prog_ops sk_msg_prog_ops = {
10120};
10121
d58e468b
PP
10122const struct bpf_verifier_ops flow_dissector_verifier_ops = {
10123 .get_func_proto = flow_dissector_func_proto,
10124 .is_valid_access = flow_dissector_is_valid_access,
089b19a9 10125 .convert_ctx_access = flow_dissector_convert_ctx_access,
d58e468b
PP
10126};
10127
10128const struct bpf_prog_ops flow_dissector_prog_ops = {
b7a1848e 10129 .test_run = bpf_prog_test_run_flow_dissector,
d58e468b
PP
10130};
10131
8ced425e 10132int sk_detach_filter(struct sock *sk)
55b33325
PE
10133{
10134 int ret = -ENOENT;
10135 struct sk_filter *filter;
10136
d59577b6
VB
10137 if (sock_flag(sk, SOCK_FILTER_LOCKED))
10138 return -EPERM;
10139
8ced425e
HFS
10140 filter = rcu_dereference_protected(sk->sk_filter,
10141 lockdep_sock_is_held(sk));
55b33325 10142 if (filter) {
a9b3cd7f 10143 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 10144 sk_filter_uncharge(sk, filter);
55b33325
PE
10145 ret = 0;
10146 }
a3ea269b 10147
55b33325
PE
10148 return ret;
10149}
8ced425e 10150EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 10151
a3ea269b
DB
10152int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
10153 unsigned int len)
a8fc9277 10154{
a3ea269b 10155 struct sock_fprog_kern *fprog;
a8fc9277 10156 struct sk_filter *filter;
a3ea269b 10157 int ret = 0;
a8fc9277
PE
10158
10159 lock_sock(sk);
10160 filter = rcu_dereference_protected(sk->sk_filter,
8ced425e 10161 lockdep_sock_is_held(sk));
a8fc9277
PE
10162 if (!filter)
10163 goto out;
a3ea269b
DB
10164
10165 /* We're copying the filter that has been originally attached,
93d08b69
DB
10166 * so no conversion/decode needed anymore. eBPF programs that
10167 * have no original program cannot be dumped through this.
a3ea269b 10168 */
93d08b69 10169 ret = -EACCES;
7ae457c1 10170 fprog = filter->prog->orig_prog;
93d08b69
DB
10171 if (!fprog)
10172 goto out;
a3ea269b
DB
10173
10174 ret = fprog->len;
a8fc9277 10175 if (!len)
a3ea269b 10176 /* User space only enquires number of filter blocks. */
a8fc9277 10177 goto out;
a3ea269b 10178
a8fc9277 10179 ret = -EINVAL;
a3ea269b 10180 if (len < fprog->len)
a8fc9277
PE
10181 goto out;
10182
10183 ret = -EFAULT;
009937e7 10184 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 10185 goto out;
a8fc9277 10186
a3ea269b
DB
10187 /* Instead of bytes, the API requests to return the number
10188 * of filter blocks.
10189 */
10190 ret = fprog->len;
a8fc9277
PE
10191out:
10192 release_sock(sk);
10193 return ret;
10194}
2dbb9b9e
MKL
10195
10196#ifdef CONFIG_INET
2dbb9b9e
MKL
10197static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
10198 struct sock_reuseport *reuse,
10199 struct sock *sk, struct sk_buff *skb,
d5e4ddae 10200 struct sock *migrating_sk,
2dbb9b9e
MKL
10201 u32 hash)
10202{
10203 reuse_kern->skb = skb;
10204 reuse_kern->sk = sk;
10205 reuse_kern->selected_sk = NULL;
d5e4ddae 10206 reuse_kern->migrating_sk = migrating_sk;
2dbb9b9e
MKL
10207 reuse_kern->data_end = skb->data + skb_headlen(skb);
10208 reuse_kern->hash = hash;
10209 reuse_kern->reuseport_id = reuse->reuseport_id;
10210 reuse_kern->bind_inany = reuse->bind_inany;
10211}
10212
10213struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
10214 struct bpf_prog *prog, struct sk_buff *skb,
d5e4ddae 10215 struct sock *migrating_sk,
2dbb9b9e
MKL
10216 u32 hash)
10217{
10218 struct sk_reuseport_kern reuse_kern;
10219 enum sk_action action;
10220
d5e4ddae 10221 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, migrating_sk, hash);
fb7dd8bc 10222 action = bpf_prog_run(prog, &reuse_kern);
2dbb9b9e
MKL
10223
10224 if (action == SK_PASS)
10225 return reuse_kern.selected_sk;
10226 else
10227 return ERR_PTR(-ECONNREFUSED);
10228}
10229
10230BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
10231 struct bpf_map *, map, void *, key, u32, flags)
10232{
9fed9000 10233 bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
2dbb9b9e
MKL
10234 struct sock_reuseport *reuse;
10235 struct sock *selected_sk;
10236
10237 selected_sk = map->ops->map_lookup_elem(map, key);
10238 if (!selected_sk)
10239 return -ENOENT;
10240
10241 reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
9fed9000 10242 if (!reuse) {
64d85290
JS
10243 /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
10244 if (sk_is_refcounted(selected_sk))
10245 sock_put(selected_sk);
10246
9fed9000
JS
10247 /* reuseport_array has only sk with non NULL sk_reuseport_cb.
10248 * The only (!reuse) case here is - the sk has already been
10249 * unhashed (e.g. by close()), so treat it as -ENOENT.
10250 *
10251 * Other maps (e.g. sock_map) do not provide this guarantee and
10252 * the sk may never be in the reuseport group to begin with.
2dbb9b9e 10253 */
9fed9000
JS
10254 return is_sockarray ? -ENOENT : -EINVAL;
10255 }
2dbb9b9e
MKL
10256
10257 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
035ff358 10258 struct sock *sk = reuse_kern->sk;
2dbb9b9e 10259
2dbb9b9e
MKL
10260 if (sk->sk_protocol != selected_sk->sk_protocol)
10261 return -EPROTOTYPE;
10262 else if (sk->sk_family != selected_sk->sk_family)
10263 return -EAFNOSUPPORT;
10264
10265 /* Catch all. Likely bound to a different sockaddr. */
10266 return -EBADFD;
10267 }
10268
10269 reuse_kern->selected_sk = selected_sk;
10270
10271 return 0;
10272}
10273
10274static const struct bpf_func_proto sk_select_reuseport_proto = {
10275 .func = sk_select_reuseport,
10276 .gpl_only = false,
10277 .ret_type = RET_INTEGER,
10278 .arg1_type = ARG_PTR_TO_CTX,
10279 .arg2_type = ARG_CONST_MAP_PTR,
10280 .arg3_type = ARG_PTR_TO_MAP_KEY,
10281 .arg4_type = ARG_ANYTHING,
10282};
10283
10284BPF_CALL_4(sk_reuseport_load_bytes,
10285 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
10286 void *, to, u32, len)
10287{
10288 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
10289}
10290
10291static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
10292 .func = sk_reuseport_load_bytes,
10293 .gpl_only = false,
10294 .ret_type = RET_INTEGER,
10295 .arg1_type = ARG_PTR_TO_CTX,
10296 .arg2_type = ARG_ANYTHING,
10297 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
10298 .arg4_type = ARG_CONST_SIZE,
10299};
10300
10301BPF_CALL_5(sk_reuseport_load_bytes_relative,
10302 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
10303 void *, to, u32, len, u32, start_header)
10304{
10305 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
10306 len, start_header);
10307}
10308
10309static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
10310 .func = sk_reuseport_load_bytes_relative,
10311 .gpl_only = false,
10312 .ret_type = RET_INTEGER,
10313 .arg1_type = ARG_PTR_TO_CTX,
10314 .arg2_type = ARG_ANYTHING,
10315 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
10316 .arg4_type = ARG_CONST_SIZE,
10317 .arg5_type = ARG_ANYTHING,
10318};
10319
10320static const struct bpf_func_proto *
10321sk_reuseport_func_proto(enum bpf_func_id func_id,
10322 const struct bpf_prog *prog)
10323{
10324 switch (func_id) {
10325 case BPF_FUNC_sk_select_reuseport:
10326 return &sk_select_reuseport_proto;
10327 case BPF_FUNC_skb_load_bytes:
10328 return &sk_reuseport_load_bytes_proto;
10329 case BPF_FUNC_skb_load_bytes_relative:
10330 return &sk_reuseport_load_bytes_relative_proto;
e0610476
KI
10331 case BPF_FUNC_get_socket_cookie:
10332 return &bpf_get_socket_ptr_cookie_proto;
5e0bc308
DB
10333 case BPF_FUNC_ktime_get_coarse_ns:
10334 return &bpf_ktime_get_coarse_ns_proto;
2dbb9b9e
MKL
10335 default:
10336 return bpf_base_func_proto(func_id);
10337 }
10338}
10339
10340static bool
10341sk_reuseport_is_valid_access(int off, int size,
10342 enum bpf_access_type type,
10343 const struct bpf_prog *prog,
10344 struct bpf_insn_access_aux *info)
10345{
10346 const u32 size_default = sizeof(__u32);
10347
10348 if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
10349 off % size || type != BPF_READ)
10350 return false;
10351
10352 switch (off) {
10353 case offsetof(struct sk_reuseport_md, data):
10354 info->reg_type = PTR_TO_PACKET;
10355 return size == sizeof(__u64);
10356
10357 case offsetof(struct sk_reuseport_md, data_end):
10358 info->reg_type = PTR_TO_PACKET_END;
10359 return size == sizeof(__u64);
10360
10361 case offsetof(struct sk_reuseport_md, hash):
10362 return size == size_default;
10363
e0610476
KI
10364 case offsetof(struct sk_reuseport_md, sk):
10365 info->reg_type = PTR_TO_SOCKET;
10366 return size == sizeof(__u64);
10367
d5e4ddae
KI
10368 case offsetof(struct sk_reuseport_md, migrating_sk):
10369 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
10370 return size == sizeof(__u64);
10371
2dbb9b9e 10372 /* Fields that allow narrowing */
2c238177 10373 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
c593642c 10374 if (size < sizeof_field(struct sk_buff, protocol))
2dbb9b9e 10375 return false;
df561f66 10376 fallthrough;
2c238177
IL
10377 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
10378 case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
10379 case bpf_ctx_range(struct sk_reuseport_md, len):
2dbb9b9e
MKL
10380 bpf_ctx_record_field_size(info, size_default);
10381 return bpf_ctx_narrow_access_ok(off, size, size_default);
10382
10383 default:
10384 return false;
10385 }
10386}
10387
10388#define SK_REUSEPORT_LOAD_FIELD(F) ({ \
10389 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
10390 si->dst_reg, si->src_reg, \
10391 bpf_target_off(struct sk_reuseport_kern, F, \
c593642c 10392 sizeof_field(struct sk_reuseport_kern, F), \
2dbb9b9e
MKL
10393 target_size)); \
10394 })
10395
10396#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
10397 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
10398 struct sk_buff, \
10399 skb, \
10400 SKB_FIELD)
10401
bf976514
MM
10402#define SK_REUSEPORT_LOAD_SK_FIELD(SK_FIELD) \
10403 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
10404 struct sock, \
10405 sk, \
10406 SK_FIELD)
2dbb9b9e
MKL
10407
10408static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
10409 const struct bpf_insn *si,
10410 struct bpf_insn *insn_buf,
10411 struct bpf_prog *prog,
10412 u32 *target_size)
10413{
10414 struct bpf_insn *insn = insn_buf;
10415
10416 switch (si->off) {
10417 case offsetof(struct sk_reuseport_md, data):
10418 SK_REUSEPORT_LOAD_SKB_FIELD(data);
10419 break;
10420
10421 case offsetof(struct sk_reuseport_md, len):
10422 SK_REUSEPORT_LOAD_SKB_FIELD(len);
10423 break;
10424
10425 case offsetof(struct sk_reuseport_md, eth_protocol):
10426 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
10427 break;
10428
10429 case offsetof(struct sk_reuseport_md, ip_protocol):
bf976514 10430 SK_REUSEPORT_LOAD_SK_FIELD(sk_protocol);
2dbb9b9e
MKL
10431 break;
10432
10433 case offsetof(struct sk_reuseport_md, data_end):
10434 SK_REUSEPORT_LOAD_FIELD(data_end);
10435 break;
10436
10437 case offsetof(struct sk_reuseport_md, hash):
10438 SK_REUSEPORT_LOAD_FIELD(hash);
10439 break;
10440
10441 case offsetof(struct sk_reuseport_md, bind_inany):
10442 SK_REUSEPORT_LOAD_FIELD(bind_inany);
10443 break;
e0610476
KI
10444
10445 case offsetof(struct sk_reuseport_md, sk):
10446 SK_REUSEPORT_LOAD_FIELD(sk);
10447 break;
d5e4ddae
KI
10448
10449 case offsetof(struct sk_reuseport_md, migrating_sk):
10450 SK_REUSEPORT_LOAD_FIELD(migrating_sk);
10451 break;
2dbb9b9e
MKL
10452 }
10453
10454 return insn - insn_buf;
10455}
10456
10457const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
10458 .get_func_proto = sk_reuseport_func_proto,
10459 .is_valid_access = sk_reuseport_is_valid_access,
10460 .convert_ctx_access = sk_reuseport_convert_ctx_access,
10461};
10462
10463const struct bpf_prog_ops sk_reuseport_prog_ops = {
10464};
7e6897f9 10465
1559b4aa
JS
10466DEFINE_STATIC_KEY_FALSE(bpf_sk_lookup_enabled);
10467EXPORT_SYMBOL(bpf_sk_lookup_enabled);
7e6897f9 10468
e9ddbb77
JS
10469BPF_CALL_3(bpf_sk_lookup_assign, struct bpf_sk_lookup_kern *, ctx,
10470 struct sock *, sk, u64, flags)
7e6897f9 10471{
e9ddbb77
JS
10472 if (unlikely(flags & ~(BPF_SK_LOOKUP_F_REPLACE |
10473 BPF_SK_LOOKUP_F_NO_REUSEPORT)))
10474 return -EINVAL;
10475 if (unlikely(sk && sk_is_refcounted(sk)))
10476 return -ESOCKTNOSUPPORT; /* reject non-RCU freed sockets */
40a34121
JF
10477 if (unlikely(sk && sk_is_tcp(sk) && sk->sk_state != TCP_LISTEN))
10478 return -ESOCKTNOSUPPORT; /* only accept TCP socket in LISTEN */
10479 if (unlikely(sk && sk_is_udp(sk) && sk->sk_state != TCP_CLOSE))
10480 return -ESOCKTNOSUPPORT; /* only accept UDP socket in CLOSE */
e9ddbb77
JS
10481
10482 /* Check if socket is suitable for packet L3/L4 protocol */
10483 if (sk && sk->sk_protocol != ctx->protocol)
10484 return -EPROTOTYPE;
10485 if (sk && sk->sk_family != ctx->family &&
10486 (sk->sk_family == AF_INET || ipv6_only_sock(sk)))
10487 return -EAFNOSUPPORT;
10488
10489 if (ctx->selected_sk && !(flags & BPF_SK_LOOKUP_F_REPLACE))
10490 return -EEXIST;
10491
10492 /* Select socket as lookup result */
10493 ctx->selected_sk = sk;
10494 ctx->no_reuseport = flags & BPF_SK_LOOKUP_F_NO_REUSEPORT;
10495 return 0;
7e6897f9 10496}
af7ec138 10497
e9ddbb77
JS
10498static const struct bpf_func_proto bpf_sk_lookup_assign_proto = {
10499 .func = bpf_sk_lookup_assign,
10500 .gpl_only = false,
10501 .ret_type = RET_INTEGER,
10502 .arg1_type = ARG_PTR_TO_CTX,
10503 .arg2_type = ARG_PTR_TO_SOCKET_OR_NULL,
10504 .arg3_type = ARG_ANYTHING,
af7ec138
YS
10505};
10506
e9ddbb77
JS
10507static const struct bpf_func_proto *
10508sk_lookup_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
10509{
10510 switch (func_id) {
10511 case BPF_FUNC_perf_event_output:
10512 return &bpf_event_output_data_proto;
10513 case BPF_FUNC_sk_assign:
10514 return &bpf_sk_lookup_assign_proto;
10515 case BPF_FUNC_sk_release:
10516 return &bpf_sk_release_proto;
10517 default:
1df8f55a 10518 return bpf_sk_base_func_proto(func_id);
e9ddbb77
JS
10519 }
10520}
af7ec138 10521
e9ddbb77
JS
10522static bool sk_lookup_is_valid_access(int off, int size,
10523 enum bpf_access_type type,
10524 const struct bpf_prog *prog,
10525 struct bpf_insn_access_aux *info)
10526{
10527 if (off < 0 || off >= sizeof(struct bpf_sk_lookup))
10528 return false;
10529 if (off % size != 0)
10530 return false;
10531 if (type != BPF_READ)
10532 return false;
10533
10534 switch (off) {
10535 case offsetof(struct bpf_sk_lookup, sk):
10536 info->reg_type = PTR_TO_SOCKET_OR_NULL;
10537 return size == sizeof(__u64);
af7ec138 10538
e9ddbb77
JS
10539 case bpf_ctx_range(struct bpf_sk_lookup, family):
10540 case bpf_ctx_range(struct bpf_sk_lookup, protocol):
10541 case bpf_ctx_range(struct bpf_sk_lookup, remote_ip4):
10542 case bpf_ctx_range(struct bpf_sk_lookup, local_ip4):
10543 case bpf_ctx_range_till(struct bpf_sk_lookup, remote_ip6[0], remote_ip6[3]):
10544 case bpf_ctx_range_till(struct bpf_sk_lookup, local_ip6[0], local_ip6[3]):
10545 case bpf_ctx_range(struct bpf_sk_lookup, remote_port):
10546 case bpf_ctx_range(struct bpf_sk_lookup, local_port):
f8931565 10547 case bpf_ctx_range(struct bpf_sk_lookup, ingress_ifindex):
e9ddbb77
JS
10548 bpf_ctx_record_field_size(info, sizeof(__u32));
10549 return bpf_ctx_narrow_access_ok(off, size, sizeof(__u32));
10550
10551 default:
10552 return false;
10553 }
10554}
10555
10556static u32 sk_lookup_convert_ctx_access(enum bpf_access_type type,
10557 const struct bpf_insn *si,
10558 struct bpf_insn *insn_buf,
10559 struct bpf_prog *prog,
10560 u32 *target_size)
af7ec138 10561{
e9ddbb77
JS
10562 struct bpf_insn *insn = insn_buf;
10563
10564 switch (si->off) {
10565 case offsetof(struct bpf_sk_lookup, sk):
10566 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10567 offsetof(struct bpf_sk_lookup_kern, selected_sk));
10568 break;
af7ec138 10569
e9ddbb77
JS
10570 case offsetof(struct bpf_sk_lookup, family):
10571 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10572 bpf_target_off(struct bpf_sk_lookup_kern,
10573 family, 2, target_size));
10574 break;
10575
10576 case offsetof(struct bpf_sk_lookup, protocol):
10577 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10578 bpf_target_off(struct bpf_sk_lookup_kern,
10579 protocol, 2, target_size));
10580 break;
10581
10582 case offsetof(struct bpf_sk_lookup, remote_ip4):
10583 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10584 bpf_target_off(struct bpf_sk_lookup_kern,
10585 v4.saddr, 4, target_size));
10586 break;
10587
10588 case offsetof(struct bpf_sk_lookup, local_ip4):
10589 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10590 bpf_target_off(struct bpf_sk_lookup_kern,
10591 v4.daddr, 4, target_size));
10592 break;
10593
10594 case bpf_ctx_range_till(struct bpf_sk_lookup,
10595 remote_ip6[0], remote_ip6[3]): {
10596#if IS_ENABLED(CONFIG_IPV6)
10597 int off = si->off;
10598
10599 off -= offsetof(struct bpf_sk_lookup, remote_ip6[0]);
10600 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
10601 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10602 offsetof(struct bpf_sk_lookup_kern, v6.saddr));
10603 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
10604 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
10605#else
10606 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
10607#endif
10608 break;
10609 }
10610 case bpf_ctx_range_till(struct bpf_sk_lookup,
10611 local_ip6[0], local_ip6[3]): {
10612#if IS_ENABLED(CONFIG_IPV6)
10613 int off = si->off;
10614
10615 off -= offsetof(struct bpf_sk_lookup, local_ip6[0]);
10616 off += bpf_target_off(struct in6_addr, s6_addr32[0], 4, target_size);
10617 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg,
10618 offsetof(struct bpf_sk_lookup_kern, v6.daddr));
10619 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
10620 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, off);
10621#else
10622 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
10623#endif
10624 break;
af7ec138 10625 }
e9ddbb77
JS
10626 case offsetof(struct bpf_sk_lookup, remote_port):
10627 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10628 bpf_target_off(struct bpf_sk_lookup_kern,
10629 sport, 2, target_size));
10630 break;
10631
10632 case offsetof(struct bpf_sk_lookup, local_port):
10633 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
10634 bpf_target_off(struct bpf_sk_lookup_kern,
10635 dport, 2, target_size));
10636 break;
f8931565
MP
10637
10638 case offsetof(struct bpf_sk_lookup, ingress_ifindex):
10639 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
10640 bpf_target_off(struct bpf_sk_lookup_kern,
10641 ingress_ifindex, 4, target_size));
10642 break;
e9ddbb77
JS
10643 }
10644
10645 return insn - insn_buf;
af7ec138 10646}
e9ddbb77
JS
10647
10648const struct bpf_prog_ops sk_lookup_prog_ops = {
7c32e8f8 10649 .test_run = bpf_prog_test_run_sk_lookup,
e9ddbb77
JS
10650};
10651
10652const struct bpf_verifier_ops sk_lookup_verifier_ops = {
10653 .get_func_proto = sk_lookup_func_proto,
10654 .is_valid_access = sk_lookup_is_valid_access,
10655 .convert_ctx_access = sk_lookup_convert_ctx_access,
10656};
10657
2dbb9b9e 10658#endif /* CONFIG_INET */
7e6897f9 10659
6a64037d 10660DEFINE_BPF_DISPATCHER(xdp)
7e6897f9
BT
10661
10662void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog)
10663{
6a64037d 10664 bpf_dispatcher_change_prog(BPF_DISPATCHER_PTR(xdp), prev_prog, prog);
7e6897f9 10665}
af7ec138 10666
9e2ad638 10667BTF_ID_LIST_GLOBAL(btf_sock_ids, MAX_BTF_SOCK_TYPE)
bc4f0548 10668#define BTF_SOCK_TYPE(name, type) BTF_ID(struct, type)
af7ec138
YS
10669BTF_SOCK_TYPE_xxx
10670#undef BTF_SOCK_TYPE
af7ec138 10671
af7ec138
YS
10672BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk)
10673{
10674 /* tcp6_sock type is not generated in dwarf and hence btf,
10675 * trigger an explicit type generation here.
10676 */
10677 BTF_TYPE_EMIT(struct tcp6_sock);
8c33dadc 10678 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP &&
af7ec138
YS
10679 sk->sk_family == AF_INET6)
10680 return (unsigned long)sk;
10681
10682 return (unsigned long)NULL;
10683}
10684
10685const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = {
10686 .func = bpf_skc_to_tcp6_sock,
10687 .gpl_only = false,
10688 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1df8f55a 10689 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
af7ec138
YS
10690 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP6],
10691};
478cfbdf
YS
10692
10693BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk)
10694{
8c33dadc 10695 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
478cfbdf
YS
10696 return (unsigned long)sk;
10697
10698 return (unsigned long)NULL;
10699}
10700
10701const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = {
10702 .func = bpf_skc_to_tcp_sock,
10703 .gpl_only = false,
10704 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1df8f55a 10705 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
478cfbdf
YS
10706 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP],
10707};
10708
10709BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk)
10710{
d82a532a
YS
10711 /* BTF types for tcp_timewait_sock and inet_timewait_sock are not
10712 * generated if CONFIG_INET=n. Trigger an explicit generation here.
10713 */
10714 BTF_TYPE_EMIT(struct inet_timewait_sock);
10715 BTF_TYPE_EMIT(struct tcp_timewait_sock);
10716
6b207d66 10717#ifdef CONFIG_INET
8c33dadc 10718 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT)
478cfbdf 10719 return (unsigned long)sk;
6b207d66 10720#endif
478cfbdf
YS
10721
10722#if IS_BUILTIN(CONFIG_IPV6)
8c33dadc 10723 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT)
478cfbdf
YS
10724 return (unsigned long)sk;
10725#endif
10726
10727 return (unsigned long)NULL;
10728}
10729
10730const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = {
10731 .func = bpf_skc_to_tcp_timewait_sock,
10732 .gpl_only = false,
10733 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1df8f55a 10734 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
478cfbdf
YS
10735 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_TW],
10736};
10737
10738BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk)
10739{
6b207d66 10740#ifdef CONFIG_INET
8c33dadc 10741 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV)
478cfbdf 10742 return (unsigned long)sk;
6b207d66 10743#endif
478cfbdf
YS
10744
10745#if IS_BUILTIN(CONFIG_IPV6)
8c33dadc 10746 if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV)
478cfbdf
YS
10747 return (unsigned long)sk;
10748#endif
10749
10750 return (unsigned long)NULL;
10751}
10752
10753const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto = {
10754 .func = bpf_skc_to_tcp_request_sock,
10755 .gpl_only = false,
10756 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1df8f55a 10757 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
478cfbdf
YS
10758 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_TCP_REQ],
10759};
0d4fad3e
YS
10760
10761BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk)
10762{
10763 /* udp6_sock type is not generated in dwarf and hence btf,
10764 * trigger an explicit type generation here.
10765 */
10766 BTF_TYPE_EMIT(struct udp6_sock);
8c33dadc 10767 if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP &&
0d4fad3e
YS
10768 sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6)
10769 return (unsigned long)sk;
10770
10771 return (unsigned long)NULL;
10772}
10773
10774const struct bpf_func_proto bpf_skc_to_udp6_sock_proto = {
10775 .func = bpf_skc_to_udp6_sock,
10776 .gpl_only = false,
10777 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
1df8f55a 10778 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
0d4fad3e
YS
10779 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UDP6],
10780};
1df8f55a 10781
9eeb3aa3
HC
10782BPF_CALL_1(bpf_skc_to_unix_sock, struct sock *, sk)
10783{
10784 /* unix_sock type is not generated in dwarf and hence btf,
10785 * trigger an explicit type generation here.
10786 */
10787 BTF_TYPE_EMIT(struct unix_sock);
10788 if (sk && sk_fullsock(sk) && sk->sk_family == AF_UNIX)
10789 return (unsigned long)sk;
10790
10791 return (unsigned long)NULL;
10792}
10793
10794const struct bpf_func_proto bpf_skc_to_unix_sock_proto = {
10795 .func = bpf_skc_to_unix_sock,
10796 .gpl_only = false,
10797 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10798 .arg1_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
10799 .ret_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_UNIX],
10800};
10801
b60da495
FR
10802BPF_CALL_1(bpf_sock_from_file, struct file *, file)
10803{
10804 return (unsigned long)sock_from_file(file);
10805}
10806
10807BTF_ID_LIST(bpf_sock_from_file_btf_ids)
10808BTF_ID(struct, socket)
10809BTF_ID(struct, file)
10810
10811const struct bpf_func_proto bpf_sock_from_file_proto = {
10812 .func = bpf_sock_from_file,
10813 .gpl_only = false,
10814 .ret_type = RET_PTR_TO_BTF_ID_OR_NULL,
10815 .ret_btf_id = &bpf_sock_from_file_btf_ids[0],
10816 .arg1_type = ARG_PTR_TO_BTF_ID,
10817 .arg1_btf_id = &bpf_sock_from_file_btf_ids[1],
10818};
10819
1df8f55a
MKL
10820static const struct bpf_func_proto *
10821bpf_sk_base_func_proto(enum bpf_func_id func_id)
10822{
10823 const struct bpf_func_proto *func;
10824
10825 switch (func_id) {
10826 case BPF_FUNC_skc_to_tcp6_sock:
10827 func = &bpf_skc_to_tcp6_sock_proto;
10828 break;
10829 case BPF_FUNC_skc_to_tcp_sock:
10830 func = &bpf_skc_to_tcp_sock_proto;
10831 break;
10832 case BPF_FUNC_skc_to_tcp_timewait_sock:
10833 func = &bpf_skc_to_tcp_timewait_sock_proto;
10834 break;
10835 case BPF_FUNC_skc_to_tcp_request_sock:
10836 func = &bpf_skc_to_tcp_request_sock_proto;
10837 break;
10838 case BPF_FUNC_skc_to_udp6_sock:
10839 func = &bpf_skc_to_udp6_sock_proto;
10840 break;
9eeb3aa3
HC
10841 case BPF_FUNC_skc_to_unix_sock:
10842 func = &bpf_skc_to_unix_sock_proto;
10843 break;
5e0bc308
DB
10844 case BPF_FUNC_ktime_get_coarse_ns:
10845 return &bpf_ktime_get_coarse_ns_proto;
1df8f55a
MKL
10846 default:
10847 return bpf_base_func_proto(func_id);
10848 }
10849
10850 if (!perfmon_capable())
10851 return NULL;
10852
10853 return func;
10854}