bpf: add skc_lookup_tcp helper
[linux-2.6-block.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
bd4cf0ed
AS
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
1da177e4 6 *
bd4cf0ed
AS
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
1da177e4
LT
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
91b8270f 29#include <linux/sock_diag.h>
1da177e4
LT
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/if_packet.h>
c491680f 34#include <linux/if_arp.h>
5a0e3ad6 35#include <linux/gfp.h>
d74bad4e 36#include <net/inet_common.h>
1da177e4
LT
37#include <net/ip.h>
38#include <net/protocol.h>
4738c1db 39#include <net/netlink.h>
1da177e4 40#include <linux/skbuff.h>
604326b4 41#include <linux/skmsg.h>
1da177e4 42#include <net/sock.h>
10b89ee4 43#include <net/flow_dissector.h>
1da177e4
LT
44#include <linux/errno.h>
45#include <linux/timer.h>
7c0f6ba6 46#include <linux/uaccess.h>
40daafc8 47#include <asm/unaligned.h>
d66f2b91 48#include <asm/cmpxchg.h>
1da177e4 49#include <linux/filter.h>
86e4ca66 50#include <linux/ratelimit.h>
46b325c7 51#include <linux/seccomp.h>
f3335031 52#include <linux/if_vlan.h>
89aa0758 53#include <linux/bpf.h>
d691f9e8 54#include <net/sch_generic.h>
8d20aabe 55#include <net/cls_cgroup.h>
d3aa45ce 56#include <net/dst_metadata.h>
c46646d0 57#include <net/dst.h>
538950a1 58#include <net/sock_reuseport.h>
b1d9fc41 59#include <net/busy_poll.h>
8c4b4c7e 60#include <net/tcp.h>
12bed760 61#include <net/xfrm.h>
6acc9b43 62#include <net/udp.h>
5acaee0a 63#include <linux/bpf_trace.h>
02671e23 64#include <net/xdp_sock.h>
87f5fc7e 65#include <linux/inetdevice.h>
6acc9b43
JS
66#include <net/inet_hashtables.h>
67#include <net/inet6_hashtables.h>
87f5fc7e
DA
68#include <net/ip_fib.h>
69#include <net/flow.h>
70#include <net/arp.h>
fe94cc29 71#include <net/ipv6.h>
6acc9b43 72#include <net/net_namespace.h>
fe94cc29
MX
73#include <linux/seg6_local.h>
74#include <net/seg6.h>
75#include <net/seg6_local.h>
52f27877 76#include <net/lwtunnel.h>
1da177e4 77
43db6d65 78/**
f4979fce 79 * sk_filter_trim_cap - run a packet through a socket filter
43db6d65
SH
80 * @sk: sock associated with &sk_buff
81 * @skb: buffer to filter
f4979fce 82 * @cap: limit on how short the eBPF program may trim the packet
43db6d65 83 *
ff936a04
AS
84 * Run the eBPF program and then cut skb->data to correct size returned by
85 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 86 * than pkt_len we keep whole skb->data. This is the socket level
ff936a04 87 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
43db6d65
SH
88 * be accepted or -EPERM if the packet should be tossed.
89 *
90 */
f4979fce 91int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
43db6d65
SH
92{
93 int err;
94 struct sk_filter *filter;
95
c93bdd0e
MG
96 /*
97 * If the skb was allocated from pfmemalloc reserves, only
98 * allow SOCK_MEMALLOC sockets to use it as this socket is
99 * helping free memory
100 */
8fe809a9
ED
101 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
102 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
c93bdd0e 103 return -ENOMEM;
8fe809a9 104 }
c11cd3a6
DM
105 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
106 if (err)
107 return err;
108
43db6d65
SH
109 err = security_sock_rcv_skb(sk, skb);
110 if (err)
111 return err;
112
80f8f102
ED
113 rcu_read_lock();
114 filter = rcu_dereference(sk->sk_filter);
43db6d65 115 if (filter) {
8f917bba
WB
116 struct sock *save_sk = skb->sk;
117 unsigned int pkt_len;
118
119 skb->sk = sk;
120 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
8f917bba 121 skb->sk = save_sk;
d1f496fd 122 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
43db6d65 123 }
80f8f102 124 rcu_read_unlock();
43db6d65
SH
125
126 return err;
127}
f4979fce 128EXPORT_SYMBOL(sk_filter_trim_cap);
43db6d65 129
b390134c 130BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
bd4cf0ed 131{
f3694e00 132 return skb_get_poff(skb);
bd4cf0ed
AS
133}
134
b390134c 135BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 136{
bd4cf0ed
AS
137 struct nlattr *nla;
138
139 if (skb_is_nonlinear(skb))
140 return 0;
141
05ab8f26
MK
142 if (skb->len < sizeof(struct nlattr))
143 return 0;
144
30743837 145 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
146 return 0;
147
30743837 148 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
149 if (nla)
150 return (void *) nla - (void *) skb->data;
151
152 return 0;
153}
154
b390134c 155BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 156{
bd4cf0ed
AS
157 struct nlattr *nla;
158
159 if (skb_is_nonlinear(skb))
160 return 0;
161
05ab8f26
MK
162 if (skb->len < sizeof(struct nlattr))
163 return 0;
164
30743837 165 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
166 return 0;
167
30743837
DB
168 nla = (struct nlattr *) &skb->data[a];
169 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
170 return 0;
171
30743837 172 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
173 if (nla)
174 return (void *) nla - (void *) skb->data;
175
176 return 0;
177}
178
e0cea7ce
DB
179BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
180 data, int, headlen, int, offset)
181{
182 u8 tmp, *ptr;
183 const int len = sizeof(tmp);
184
185 if (offset >= 0) {
186 if (headlen - offset >= len)
187 return *(u8 *)(data + offset);
188 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
189 return tmp;
190 } else {
191 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
192 if (likely(ptr))
193 return *(u8 *)ptr;
194 }
195
196 return -EFAULT;
197}
198
199BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
200 int, offset)
201{
202 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
203 offset);
204}
205
206BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
207 data, int, headlen, int, offset)
208{
209 u16 tmp, *ptr;
210 const int len = sizeof(tmp);
211
212 if (offset >= 0) {
213 if (headlen - offset >= len)
214 return get_unaligned_be16(data + offset);
215 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
216 return be16_to_cpu(tmp);
217 } else {
218 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
219 if (likely(ptr))
220 return get_unaligned_be16(ptr);
221 }
222
223 return -EFAULT;
224}
225
226BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
227 int, offset)
228{
229 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
230 offset);
231}
232
233BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
234 data, int, headlen, int, offset)
235{
236 u32 tmp, *ptr;
237 const int len = sizeof(tmp);
238
239 if (likely(offset >= 0)) {
240 if (headlen - offset >= len)
241 return get_unaligned_be32(data + offset);
242 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
243 return be32_to_cpu(tmp);
244 } else {
245 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
246 if (likely(ptr))
247 return get_unaligned_be32(ptr);
248 }
249
250 return -EFAULT;
251}
252
253BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
254 int, offset)
255{
256 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
257 offset);
258}
259
b390134c 260BPF_CALL_0(bpf_get_raw_cpu_id)
bd4cf0ed
AS
261{
262 return raw_smp_processor_id();
263}
264
80b48c44 265static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
b390134c 266 .func = bpf_get_raw_cpu_id,
80b48c44
DB
267 .gpl_only = false,
268 .ret_type = RET_INTEGER,
269};
270
9bac3d6d
AS
271static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
272 struct bpf_insn *insn_buf)
273{
274 struct bpf_insn *insn = insn_buf;
275
276 switch (skb_field) {
277 case SKF_AD_MARK:
278 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
279
280 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
281 offsetof(struct sk_buff, mark));
282 break;
283
284 case SKF_AD_PKTTYPE:
285 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
286 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
287#ifdef __BIG_ENDIAN_BITFIELD
288 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
289#endif
290 break;
291
292 case SKF_AD_QUEUE:
293 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
294
295 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
296 offsetof(struct sk_buff, queue_mapping));
297 break;
c2497395 298
c2497395 299 case SKF_AD_VLAN_TAG:
c2497395 300 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
c2497395
AS
301
302 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
303 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
304 offsetof(struct sk_buff, vlan_tci));
9c212255
MM
305 break;
306 case SKF_AD_VLAN_TAG_PRESENT:
307 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
308 if (PKT_VLAN_PRESENT_BIT)
309 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
310 if (PKT_VLAN_PRESENT_BIT < 7)
c2497395 311 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
c2497395 312 break;
9bac3d6d
AS
313 }
314
315 return insn - insn_buf;
316}
317
bd4cf0ed 318static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 319 struct bpf_insn **insnp)
bd4cf0ed 320{
2695fb55 321 struct bpf_insn *insn = *insnp;
9bac3d6d 322 u32 cnt;
bd4cf0ed
AS
323
324 switch (fp->k) {
325 case SKF_AD_OFF + SKF_AD_PROTOCOL:
0b8c707d
DB
326 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
327
328 /* A = *(u16 *) (CTX + offsetof(protocol)) */
329 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
330 offsetof(struct sk_buff, protocol));
331 /* A = ntohs(A) [emitting a nop or swap16] */
332 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
333 break;
334
335 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
336 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
337 insn += cnt - 1;
bd4cf0ed
AS
338 break;
339
340 case SKF_AD_OFF + SKF_AD_IFINDEX:
341 case SKF_AD_OFF + SKF_AD_HATYPE:
bd4cf0ed
AS
342 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
343 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
f8f6d679 344
f035a515 345 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
f8f6d679
DB
346 BPF_REG_TMP, BPF_REG_CTX,
347 offsetof(struct sk_buff, dev));
348 /* if (tmp != 0) goto pc + 1 */
349 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
350 *insn++ = BPF_EXIT_INSN();
351 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
352 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
353 offsetof(struct net_device, ifindex));
354 else
355 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
356 offsetof(struct net_device, type));
bd4cf0ed
AS
357 break;
358
359 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
360 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
361 insn += cnt - 1;
bd4cf0ed
AS
362 break;
363
364 case SKF_AD_OFF + SKF_AD_RXHASH:
365 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
366
9739eef1
AS
367 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
368 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
369 break;
370
371 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
372 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
373 insn += cnt - 1;
bd4cf0ed
AS
374 break;
375
376 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
377 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
378 BPF_REG_A, BPF_REG_CTX, insn);
379 insn += cnt - 1;
380 break;
bd4cf0ed 381
c2497395
AS
382 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
383 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
384 BPF_REG_A, BPF_REG_CTX, insn);
385 insn += cnt - 1;
bd4cf0ed
AS
386 break;
387
27cd5452
MS
388 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
389 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
390
391 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
392 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
393 offsetof(struct sk_buff, vlan_proto));
394 /* A = ntohs(A) [emitting a nop or swap16] */
395 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
396 break;
397
bd4cf0ed
AS
398 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
399 case SKF_AD_OFF + SKF_AD_NLATTR:
400 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
401 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 402 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 403 /* arg1 = CTX */
f8f6d679 404 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 405 /* arg2 = A */
f8f6d679 406 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 407 /* arg3 = X */
f8f6d679 408 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 409 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
410 switch (fp->k) {
411 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
b390134c 412 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
bd4cf0ed
AS
413 break;
414 case SKF_AD_OFF + SKF_AD_NLATTR:
b390134c 415 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
bd4cf0ed
AS
416 break;
417 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
b390134c 418 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
bd4cf0ed
AS
419 break;
420 case SKF_AD_OFF + SKF_AD_CPU:
b390134c 421 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
bd4cf0ed 422 break;
4cd3675e 423 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
424 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
425 bpf_user_rnd_init_once();
4cd3675e 426 break;
bd4cf0ed
AS
427 }
428 break;
429
430 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
431 /* A ^= X */
432 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
433 break;
434
435 default:
436 /* This is just a dummy call to avoid letting the compiler
437 * evict __bpf_call_base() as an optimization. Placed here
438 * where no-one bothers.
439 */
440 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
441 return false;
442 }
443
444 *insnp = insn;
445 return true;
446}
447
e0cea7ce
DB
448static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
449{
450 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
451 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
452 bool endian = BPF_SIZE(fp->code) == BPF_H ||
453 BPF_SIZE(fp->code) == BPF_W;
454 bool indirect = BPF_MODE(fp->code) == BPF_IND;
455 const int ip_align = NET_IP_ALIGN;
456 struct bpf_insn *insn = *insnp;
457 int offset = fp->k;
458
459 if (!indirect &&
460 ((unaligned_ok && offset >= 0) ||
461 (!unaligned_ok && offset >= 0 &&
462 offset + ip_align >= 0 &&
463 offset + ip_align % size == 0))) {
59ee4129
DB
464 bool ldx_off_ok = offset <= S16_MAX;
465
e0cea7ce 466 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
d8f3e978
DM
467 if (offset)
468 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
59ee4129
DB
469 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
470 size, 2 + endian + (!ldx_off_ok * 2));
471 if (ldx_off_ok) {
472 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
473 BPF_REG_D, offset);
474 } else {
475 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
476 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
477 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
478 BPF_REG_TMP, 0);
479 }
e0cea7ce
DB
480 if (endian)
481 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
482 *insn++ = BPF_JMP_A(8);
483 }
484
485 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
486 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
487 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
488 if (!indirect) {
489 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
490 } else {
491 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
492 if (fp->k)
493 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
494 }
495
496 switch (BPF_SIZE(fp->code)) {
497 case BPF_B:
498 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
499 break;
500 case BPF_H:
501 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
502 break;
503 case BPF_W:
504 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
505 break;
506 default:
507 return false;
508 }
509
510 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
511 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
512 *insn = BPF_EXIT_INSN();
513
514 *insnp = insn;
515 return true;
516}
517
bd4cf0ed 518/**
8fb575ca 519 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
520 * @prog: the user passed filter program
521 * @len: the length of the user passed filter program
50bbfed9 522 * @new_prog: allocated 'struct bpf_prog' or NULL
bd4cf0ed 523 * @new_len: pointer to store length of converted program
e0cea7ce 524 * @seen_ld_abs: bool whether we've seen ld_abs/ind
bd4cf0ed 525 *
1f504ec9
TK
526 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
527 * style extended BPF (eBPF).
bd4cf0ed
AS
528 * Conversion workflow:
529 *
530 * 1) First pass for calculating the new program length:
e0cea7ce 531 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
bd4cf0ed
AS
532 *
533 * 2) 2nd pass to remap in two passes: 1st pass finds new
534 * jump offsets, 2nd pass remapping:
e0cea7ce 535 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
bd4cf0ed 536 */
d9e12f42 537static int bpf_convert_filter(struct sock_filter *prog, int len,
e0cea7ce
DB
538 struct bpf_prog *new_prog, int *new_len,
539 bool *seen_ld_abs)
bd4cf0ed 540{
50bbfed9
AS
541 int new_flen = 0, pass = 0, target, i, stack_off;
542 struct bpf_insn *new_insn, *first_insn = NULL;
bd4cf0ed
AS
543 struct sock_filter *fp;
544 int *addrs = NULL;
545 u8 bpf_src;
546
547 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 548 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 549
6f9a093b 550 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
551 return -EINVAL;
552
553 if (new_prog) {
50bbfed9 554 first_insn = new_prog->insnsi;
658da937
DB
555 addrs = kcalloc(len, sizeof(*addrs),
556 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
557 if (!addrs)
558 return -ENOMEM;
559 }
560
561do_pass:
50bbfed9 562 new_insn = first_insn;
bd4cf0ed
AS
563 fp = prog;
564
8b614aeb 565 /* Classic BPF related prologue emission. */
50bbfed9 566 if (new_prog) {
8b614aeb
DB
567 /* Classic BPF expects A and X to be reset first. These need
568 * to be guaranteed to be the first two instructions.
569 */
1d621674
DB
570 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
571 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
8b614aeb
DB
572
573 /* All programs must keep CTX in callee saved BPF_REG_CTX.
574 * In eBPF case it's done by the compiler, here we need to
575 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
576 */
577 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
e0cea7ce
DB
578 if (*seen_ld_abs) {
579 /* For packet access in classic BPF, cache skb->data
580 * in callee-saved BPF R8 and skb->len - skb->data_len
581 * (headlen) in BPF R9. Since classic BPF is read-only
582 * on CTX, we only need to cache it once.
583 */
584 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
585 BPF_REG_D, BPF_REG_CTX,
586 offsetof(struct sk_buff, data));
587 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
588 offsetof(struct sk_buff, len));
589 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
590 offsetof(struct sk_buff, data_len));
591 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
592 }
8b614aeb
DB
593 } else {
594 new_insn += 3;
595 }
bd4cf0ed
AS
596
597 for (i = 0; i < len; fp++, i++) {
e0cea7ce 598 struct bpf_insn tmp_insns[32] = { };
2695fb55 599 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
600
601 if (addrs)
50bbfed9 602 addrs[i] = new_insn - first_insn;
bd4cf0ed
AS
603
604 switch (fp->code) {
605 /* All arithmetic insns and skb loads map as-is. */
606 case BPF_ALU | BPF_ADD | BPF_X:
607 case BPF_ALU | BPF_ADD | BPF_K:
608 case BPF_ALU | BPF_SUB | BPF_X:
609 case BPF_ALU | BPF_SUB | BPF_K:
610 case BPF_ALU | BPF_AND | BPF_X:
611 case BPF_ALU | BPF_AND | BPF_K:
612 case BPF_ALU | BPF_OR | BPF_X:
613 case BPF_ALU | BPF_OR | BPF_K:
614 case BPF_ALU | BPF_LSH | BPF_X:
615 case BPF_ALU | BPF_LSH | BPF_K:
616 case BPF_ALU | BPF_RSH | BPF_X:
617 case BPF_ALU | BPF_RSH | BPF_K:
618 case BPF_ALU | BPF_XOR | BPF_X:
619 case BPF_ALU | BPF_XOR | BPF_K:
620 case BPF_ALU | BPF_MUL | BPF_X:
621 case BPF_ALU | BPF_MUL | BPF_K:
622 case BPF_ALU | BPF_DIV | BPF_X:
623 case BPF_ALU | BPF_DIV | BPF_K:
624 case BPF_ALU | BPF_MOD | BPF_X:
625 case BPF_ALU | BPF_MOD | BPF_K:
626 case BPF_ALU | BPF_NEG:
627 case BPF_LD | BPF_ABS | BPF_W:
628 case BPF_LD | BPF_ABS | BPF_H:
629 case BPF_LD | BPF_ABS | BPF_B:
630 case BPF_LD | BPF_IND | BPF_W:
631 case BPF_LD | BPF_IND | BPF_H:
632 case BPF_LD | BPF_IND | BPF_B:
633 /* Check for overloaded BPF extension and
634 * directly convert it if found, otherwise
635 * just move on with mapping.
636 */
637 if (BPF_CLASS(fp->code) == BPF_LD &&
638 BPF_MODE(fp->code) == BPF_ABS &&
639 convert_bpf_extensions(fp, &insn))
640 break;
e0cea7ce
DB
641 if (BPF_CLASS(fp->code) == BPF_LD &&
642 convert_bpf_ld_abs(fp, &insn)) {
643 *seen_ld_abs = true;
644 break;
645 }
bd4cf0ed 646
68fda450 647 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
f6b1b3bf 648 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
68fda450 649 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
f6b1b3bf
DB
650 /* Error with exception code on div/mod by 0.
651 * For cBPF programs, this was always return 0.
652 */
653 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
654 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
655 *insn++ = BPF_EXIT_INSN();
656 }
68fda450 657
f8f6d679 658 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
659 break;
660
f8f6d679
DB
661 /* Jump transformation cannot use BPF block macros
662 * everywhere as offset calculation and target updates
663 * require a bit more work than the rest, i.e. jump
664 * opcodes map as-is, but offsets need adjustment.
665 */
666
667#define BPF_EMIT_JMP \
bd4cf0ed 668 do { \
050fad7c
DB
669 const s32 off_min = S16_MIN, off_max = S16_MAX; \
670 s32 off; \
671 \
bd4cf0ed
AS
672 if (target >= len || target < 0) \
673 goto err; \
050fad7c 674 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
bd4cf0ed 675 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
050fad7c
DB
676 off -= insn - tmp_insns; \
677 /* Reject anything not fitting into insn->off. */ \
678 if (off < off_min || off > off_max) \
679 goto err; \
680 insn->off = off; \
bd4cf0ed
AS
681 } while (0)
682
f8f6d679
DB
683 case BPF_JMP | BPF_JA:
684 target = i + fp->k + 1;
685 insn->code = fp->code;
686 BPF_EMIT_JMP;
bd4cf0ed
AS
687 break;
688
689 case BPF_JMP | BPF_JEQ | BPF_K:
690 case BPF_JMP | BPF_JEQ | BPF_X:
691 case BPF_JMP | BPF_JSET | BPF_K:
692 case BPF_JMP | BPF_JSET | BPF_X:
693 case BPF_JMP | BPF_JGT | BPF_K:
694 case BPF_JMP | BPF_JGT | BPF_X:
695 case BPF_JMP | BPF_JGE | BPF_K:
696 case BPF_JMP | BPF_JGE | BPF_X:
697 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
698 /* BPF immediates are signed, zero extend
699 * immediate into tmp register and use it
700 * in compare insn.
701 */
f8f6d679 702 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 703
e430f34e
AS
704 insn->dst_reg = BPF_REG_A;
705 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
706 bpf_src = BPF_X;
707 } else {
e430f34e 708 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
709 insn->imm = fp->k;
710 bpf_src = BPF_SRC(fp->code);
19539ce7 711 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 712 }
bd4cf0ed
AS
713
714 /* Common case where 'jump_false' is next insn. */
715 if (fp->jf == 0) {
716 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
717 target = i + fp->jt + 1;
f8f6d679 718 BPF_EMIT_JMP;
bd4cf0ed 719 break;
1da177e4 720 }
bd4cf0ed 721
92b31a9a
DB
722 /* Convert some jumps when 'jump_true' is next insn. */
723 if (fp->jt == 0) {
724 switch (BPF_OP(fp->code)) {
725 case BPF_JEQ:
726 insn->code = BPF_JMP | BPF_JNE | bpf_src;
727 break;
728 case BPF_JGT:
729 insn->code = BPF_JMP | BPF_JLE | bpf_src;
730 break;
731 case BPF_JGE:
732 insn->code = BPF_JMP | BPF_JLT | bpf_src;
733 break;
734 default:
735 goto jmp_rest;
736 }
737
bd4cf0ed 738 target = i + fp->jf + 1;
f8f6d679 739 BPF_EMIT_JMP;
bd4cf0ed 740 break;
0b05b2a4 741 }
92b31a9a 742jmp_rest:
bd4cf0ed
AS
743 /* Other jumps are mapped into two insns: Jxx and JA. */
744 target = i + fp->jt + 1;
745 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 746 BPF_EMIT_JMP;
bd4cf0ed
AS
747 insn++;
748
749 insn->code = BPF_JMP | BPF_JA;
750 target = i + fp->jf + 1;
f8f6d679 751 BPF_EMIT_JMP;
bd4cf0ed
AS
752 break;
753
754 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
e0cea7ce
DB
755 case BPF_LDX | BPF_MSH | BPF_B: {
756 struct sock_filter tmp = {
757 .code = BPF_LD | BPF_ABS | BPF_B,
758 .k = fp->k,
759 };
760
761 *seen_ld_abs = true;
762
763 /* X = A */
764 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1268e253 765 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
e0cea7ce
DB
766 convert_bpf_ld_abs(&tmp, &insn);
767 insn++;
9739eef1 768 /* A &= 0xf */
f8f6d679 769 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 770 /* A <<= 2 */
f8f6d679 771 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
e0cea7ce
DB
772 /* tmp = X */
773 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
9739eef1 774 /* X = A */
f8f6d679 775 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 776 /* A = tmp */
f8f6d679 777 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed 778 break;
e0cea7ce 779 }
6205b9cf
DB
780 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
781 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
782 */
bd4cf0ed
AS
783 case BPF_RET | BPF_A:
784 case BPF_RET | BPF_K:
6205b9cf
DB
785 if (BPF_RVAL(fp->code) == BPF_K)
786 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
787 0, fp->k);
9739eef1 788 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
789 break;
790
791 /* Store to stack. */
792 case BPF_ST:
793 case BPF_STX:
50bbfed9 794 stack_off = fp->k * 4 + 4;
f8f6d679
DB
795 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
796 BPF_ST ? BPF_REG_A : BPF_REG_X,
50bbfed9
AS
797 -stack_off);
798 /* check_load_and_stores() verifies that classic BPF can
799 * load from stack only after write, so tracking
800 * stack_depth for ST|STX insns is enough
801 */
802 if (new_prog && new_prog->aux->stack_depth < stack_off)
803 new_prog->aux->stack_depth = stack_off;
bd4cf0ed
AS
804 break;
805
806 /* Load from stack. */
807 case BPF_LD | BPF_MEM:
808 case BPF_LDX | BPF_MEM:
50bbfed9 809 stack_off = fp->k * 4 + 4;
f8f6d679
DB
810 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
811 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
50bbfed9 812 -stack_off);
bd4cf0ed
AS
813 break;
814
815 /* A = K or X = K */
816 case BPF_LD | BPF_IMM:
817 case BPF_LDX | BPF_IMM:
f8f6d679
DB
818 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
819 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
820 break;
821
822 /* X = A */
823 case BPF_MISC | BPF_TAX:
f8f6d679 824 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
825 break;
826
827 /* A = X */
828 case BPF_MISC | BPF_TXA:
f8f6d679 829 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
830 break;
831
832 /* A = skb->len or X = skb->len */
833 case BPF_LD | BPF_W | BPF_LEN:
834 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
835 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
836 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
837 offsetof(struct sk_buff, len));
bd4cf0ed
AS
838 break;
839
f8f6d679 840 /* Access seccomp_data fields. */
bd4cf0ed 841 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
842 /* A = *(u32 *) (ctx + K) */
843 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
844 break;
845
ca9f1fd2 846 /* Unknown instruction. */
1da177e4 847 default:
bd4cf0ed 848 goto err;
1da177e4 849 }
bd4cf0ed
AS
850
851 insn++;
852 if (new_prog)
853 memcpy(new_insn, tmp_insns,
854 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 855 new_insn += insn - tmp_insns;
1da177e4
LT
856 }
857
bd4cf0ed
AS
858 if (!new_prog) {
859 /* Only calculating new length. */
50bbfed9 860 *new_len = new_insn - first_insn;
e0cea7ce
DB
861 if (*seen_ld_abs)
862 *new_len += 4; /* Prologue bits. */
bd4cf0ed
AS
863 return 0;
864 }
865
866 pass++;
50bbfed9
AS
867 if (new_flen != new_insn - first_insn) {
868 new_flen = new_insn - first_insn;
bd4cf0ed
AS
869 if (pass > 2)
870 goto err;
bd4cf0ed
AS
871 goto do_pass;
872 }
873
874 kfree(addrs);
875 BUG_ON(*new_len != new_flen);
1da177e4 876 return 0;
bd4cf0ed
AS
877err:
878 kfree(addrs);
879 return -EINVAL;
1da177e4
LT
880}
881
bd4cf0ed 882/* Security:
bd4cf0ed 883 *
2d5311e4 884 * As we dont want to clear mem[] array for each packet going through
8ea6e345 885 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 886 * a cell if not previously written, and we check all branches to be sure
25985edc 887 * a malicious user doesn't try to abuse us.
2d5311e4 888 */
ec31a05c 889static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 890{
34805931 891 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
892 int pc, ret = 0;
893
894 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 895
99e72a0f 896 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
897 if (!masks)
898 return -ENOMEM;
34805931 899
2d5311e4
ED
900 memset(masks, 0xff, flen * sizeof(*masks));
901
902 for (pc = 0; pc < flen; pc++) {
903 memvalid &= masks[pc];
904
905 switch (filter[pc].code) {
34805931
DB
906 case BPF_ST:
907 case BPF_STX:
2d5311e4
ED
908 memvalid |= (1 << filter[pc].k);
909 break;
34805931
DB
910 case BPF_LD | BPF_MEM:
911 case BPF_LDX | BPF_MEM:
2d5311e4
ED
912 if (!(memvalid & (1 << filter[pc].k))) {
913 ret = -EINVAL;
914 goto error;
915 }
916 break;
34805931
DB
917 case BPF_JMP | BPF_JA:
918 /* A jump must set masks on target */
2d5311e4
ED
919 masks[pc + 1 + filter[pc].k] &= memvalid;
920 memvalid = ~0;
921 break;
34805931
DB
922 case BPF_JMP | BPF_JEQ | BPF_K:
923 case BPF_JMP | BPF_JEQ | BPF_X:
924 case BPF_JMP | BPF_JGE | BPF_K:
925 case BPF_JMP | BPF_JGE | BPF_X:
926 case BPF_JMP | BPF_JGT | BPF_K:
927 case BPF_JMP | BPF_JGT | BPF_X:
928 case BPF_JMP | BPF_JSET | BPF_K:
929 case BPF_JMP | BPF_JSET | BPF_X:
930 /* A jump must set masks on targets */
2d5311e4
ED
931 masks[pc + 1 + filter[pc].jt] &= memvalid;
932 masks[pc + 1 + filter[pc].jf] &= memvalid;
933 memvalid = ~0;
934 break;
935 }
936 }
937error:
938 kfree(masks);
939 return ret;
940}
941
34805931
DB
942static bool chk_code_allowed(u16 code_to_probe)
943{
944 static const bool codes[] = {
945 /* 32 bit ALU operations */
946 [BPF_ALU | BPF_ADD | BPF_K] = true,
947 [BPF_ALU | BPF_ADD | BPF_X] = true,
948 [BPF_ALU | BPF_SUB | BPF_K] = true,
949 [BPF_ALU | BPF_SUB | BPF_X] = true,
950 [BPF_ALU | BPF_MUL | BPF_K] = true,
951 [BPF_ALU | BPF_MUL | BPF_X] = true,
952 [BPF_ALU | BPF_DIV | BPF_K] = true,
953 [BPF_ALU | BPF_DIV | BPF_X] = true,
954 [BPF_ALU | BPF_MOD | BPF_K] = true,
955 [BPF_ALU | BPF_MOD | BPF_X] = true,
956 [BPF_ALU | BPF_AND | BPF_K] = true,
957 [BPF_ALU | BPF_AND | BPF_X] = true,
958 [BPF_ALU | BPF_OR | BPF_K] = true,
959 [BPF_ALU | BPF_OR | BPF_X] = true,
960 [BPF_ALU | BPF_XOR | BPF_K] = true,
961 [BPF_ALU | BPF_XOR | BPF_X] = true,
962 [BPF_ALU | BPF_LSH | BPF_K] = true,
963 [BPF_ALU | BPF_LSH | BPF_X] = true,
964 [BPF_ALU | BPF_RSH | BPF_K] = true,
965 [BPF_ALU | BPF_RSH | BPF_X] = true,
966 [BPF_ALU | BPF_NEG] = true,
967 /* Load instructions */
968 [BPF_LD | BPF_W | BPF_ABS] = true,
969 [BPF_LD | BPF_H | BPF_ABS] = true,
970 [BPF_LD | BPF_B | BPF_ABS] = true,
971 [BPF_LD | BPF_W | BPF_LEN] = true,
972 [BPF_LD | BPF_W | BPF_IND] = true,
973 [BPF_LD | BPF_H | BPF_IND] = true,
974 [BPF_LD | BPF_B | BPF_IND] = true,
975 [BPF_LD | BPF_IMM] = true,
976 [BPF_LD | BPF_MEM] = true,
977 [BPF_LDX | BPF_W | BPF_LEN] = true,
978 [BPF_LDX | BPF_B | BPF_MSH] = true,
979 [BPF_LDX | BPF_IMM] = true,
980 [BPF_LDX | BPF_MEM] = true,
981 /* Store instructions */
982 [BPF_ST] = true,
983 [BPF_STX] = true,
984 /* Misc instructions */
985 [BPF_MISC | BPF_TAX] = true,
986 [BPF_MISC | BPF_TXA] = true,
987 /* Return instructions */
988 [BPF_RET | BPF_K] = true,
989 [BPF_RET | BPF_A] = true,
990 /* Jump instructions */
991 [BPF_JMP | BPF_JA] = true,
992 [BPF_JMP | BPF_JEQ | BPF_K] = true,
993 [BPF_JMP | BPF_JEQ | BPF_X] = true,
994 [BPF_JMP | BPF_JGE | BPF_K] = true,
995 [BPF_JMP | BPF_JGE | BPF_X] = true,
996 [BPF_JMP | BPF_JGT | BPF_K] = true,
997 [BPF_JMP | BPF_JGT | BPF_X] = true,
998 [BPF_JMP | BPF_JSET | BPF_K] = true,
999 [BPF_JMP | BPF_JSET | BPF_X] = true,
1000 };
1001
1002 if (code_to_probe >= ARRAY_SIZE(codes))
1003 return false;
1004
1005 return codes[code_to_probe];
1006}
1007
f7bd9e36
DB
1008static bool bpf_check_basics_ok(const struct sock_filter *filter,
1009 unsigned int flen)
1010{
1011 if (filter == NULL)
1012 return false;
1013 if (flen == 0 || flen > BPF_MAXINSNS)
1014 return false;
1015
1016 return true;
1017}
1018
1da177e4 1019/**
4df95ff4 1020 * bpf_check_classic - verify socket filter code
1da177e4
LT
1021 * @filter: filter to verify
1022 * @flen: length of filter
1023 *
1024 * Check the user's filter code. If we let some ugly
1025 * filter code slip through kaboom! The filter must contain
93699863
KK
1026 * no references or jumps that are out of range, no illegal
1027 * instructions, and must end with a RET instruction.
1da177e4 1028 *
7b11f69f
KK
1029 * All jumps are forward as they are not signed.
1030 *
1031 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 1032 */
d9e12f42
NS
1033static int bpf_check_classic(const struct sock_filter *filter,
1034 unsigned int flen)
1da177e4 1035{
aa1113d9 1036 bool anc_found;
34805931 1037 int pc;
1da177e4 1038
34805931 1039 /* Check the filter code now */
1da177e4 1040 for (pc = 0; pc < flen; pc++) {
ec31a05c 1041 const struct sock_filter *ftest = &filter[pc];
93699863 1042
34805931
DB
1043 /* May we actually operate on this code? */
1044 if (!chk_code_allowed(ftest->code))
cba328fc 1045 return -EINVAL;
34805931 1046
93699863 1047 /* Some instructions need special checks */
34805931
DB
1048 switch (ftest->code) {
1049 case BPF_ALU | BPF_DIV | BPF_K:
1050 case BPF_ALU | BPF_MOD | BPF_K:
1051 /* Check for division by zero */
b6069a95
ED
1052 if (ftest->k == 0)
1053 return -EINVAL;
1054 break;
229394e8
RV
1055 case BPF_ALU | BPF_LSH | BPF_K:
1056 case BPF_ALU | BPF_RSH | BPF_K:
1057 if (ftest->k >= 32)
1058 return -EINVAL;
1059 break;
34805931
DB
1060 case BPF_LD | BPF_MEM:
1061 case BPF_LDX | BPF_MEM:
1062 case BPF_ST:
1063 case BPF_STX:
1064 /* Check for invalid memory addresses */
93699863
KK
1065 if (ftest->k >= BPF_MEMWORDS)
1066 return -EINVAL;
1067 break;
34805931
DB
1068 case BPF_JMP | BPF_JA:
1069 /* Note, the large ftest->k might cause loops.
93699863
KK
1070 * Compare this with conditional jumps below,
1071 * where offsets are limited. --ANK (981016)
1072 */
34805931 1073 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 1074 return -EINVAL;
01f2f3f6 1075 break;
34805931
DB
1076 case BPF_JMP | BPF_JEQ | BPF_K:
1077 case BPF_JMP | BPF_JEQ | BPF_X:
1078 case BPF_JMP | BPF_JGE | BPF_K:
1079 case BPF_JMP | BPF_JGE | BPF_X:
1080 case BPF_JMP | BPF_JGT | BPF_K:
1081 case BPF_JMP | BPF_JGT | BPF_X:
1082 case BPF_JMP | BPF_JSET | BPF_K:
1083 case BPF_JMP | BPF_JSET | BPF_X:
1084 /* Both conditionals must be safe */
e35bedf3 1085 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
1086 pc + ftest->jf + 1 >= flen)
1087 return -EINVAL;
cba328fc 1088 break;
34805931
DB
1089 case BPF_LD | BPF_W | BPF_ABS:
1090 case BPF_LD | BPF_H | BPF_ABS:
1091 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 1092 anc_found = false;
34805931
DB
1093 if (bpf_anc_helper(ftest) & BPF_ANC)
1094 anc_found = true;
1095 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
1096 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1097 return -EINVAL;
01f2f3f6
HPP
1098 }
1099 }
93699863 1100
34805931 1101 /* Last instruction must be a RET code */
01f2f3f6 1102 switch (filter[flen - 1].code) {
34805931
DB
1103 case BPF_RET | BPF_K:
1104 case BPF_RET | BPF_A:
2d5311e4 1105 return check_load_and_stores(filter, flen);
cba328fc 1106 }
34805931 1107
cba328fc 1108 return -EINVAL;
1da177e4
LT
1109}
1110
7ae457c1
AS
1111static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1112 const struct sock_fprog *fprog)
a3ea269b 1113{
009937e7 1114 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
1115 struct sock_fprog_kern *fkprog;
1116
1117 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1118 if (!fp->orig_prog)
1119 return -ENOMEM;
1120
1121 fkprog = fp->orig_prog;
1122 fkprog->len = fprog->len;
658da937
DB
1123
1124 fkprog->filter = kmemdup(fp->insns, fsize,
1125 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
1126 if (!fkprog->filter) {
1127 kfree(fp->orig_prog);
1128 return -ENOMEM;
1129 }
1130
1131 return 0;
1132}
1133
7ae457c1 1134static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
1135{
1136 struct sock_fprog_kern *fprog = fp->orig_prog;
1137
1138 if (fprog) {
1139 kfree(fprog->filter);
1140 kfree(fprog);
1141 }
1142}
1143
7ae457c1
AS
1144static void __bpf_prog_release(struct bpf_prog *prog)
1145{
24701ece 1146 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
1147 bpf_prog_put(prog);
1148 } else {
1149 bpf_release_orig_filter(prog);
1150 bpf_prog_free(prog);
1151 }
7ae457c1
AS
1152}
1153
34c5bd66
PN
1154static void __sk_filter_release(struct sk_filter *fp)
1155{
7ae457c1
AS
1156 __bpf_prog_release(fp->prog);
1157 kfree(fp);
34c5bd66
PN
1158}
1159
47e958ea 1160/**
46bcf14f 1161 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
1162 * @rcu: rcu_head that contains the sk_filter to free
1163 */
fbc907f0 1164static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
1165{
1166 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1167
34c5bd66 1168 __sk_filter_release(fp);
47e958ea 1169}
fbc907f0
DB
1170
1171/**
1172 * sk_filter_release - release a socket filter
1173 * @fp: filter to remove
1174 *
1175 * Remove a filter from a socket and release its resources.
1176 */
1177static void sk_filter_release(struct sk_filter *fp)
1178{
4c355cdf 1179 if (refcount_dec_and_test(&fp->refcnt))
fbc907f0
DB
1180 call_rcu(&fp->rcu, sk_filter_release_rcu);
1181}
1182
1183void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1184{
7ae457c1 1185 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 1186
278571ba
AS
1187 atomic_sub(filter_size, &sk->sk_omem_alloc);
1188 sk_filter_release(fp);
fbc907f0 1189}
47e958ea 1190
278571ba
AS
1191/* try to charge the socket memory if there is space available
1192 * return true on success
1193 */
4c355cdf 1194static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 1195{
7ae457c1 1196 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
1197
1198 /* same check as in sock_kmalloc() */
1199 if (filter_size <= sysctl_optmem_max &&
1200 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
278571ba
AS
1201 atomic_add(filter_size, &sk->sk_omem_alloc);
1202 return true;
bd4cf0ed 1203 }
278571ba 1204 return false;
bd4cf0ed
AS
1205}
1206
4c355cdf
RE
1207bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1208{
eefca20e
ED
1209 if (!refcount_inc_not_zero(&fp->refcnt))
1210 return false;
1211
1212 if (!__sk_filter_charge(sk, fp)) {
1213 sk_filter_release(fp);
1214 return false;
1215 }
1216 return true;
4c355cdf
RE
1217}
1218
7ae457c1 1219static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
1220{
1221 struct sock_filter *old_prog;
7ae457c1 1222 struct bpf_prog *old_fp;
34805931 1223 int err, new_len, old_len = fp->len;
e0cea7ce 1224 bool seen_ld_abs = false;
bd4cf0ed
AS
1225
1226 /* We are free to overwrite insns et al right here as it
1227 * won't be used at this point in time anymore internally
1228 * after the migration to the internal BPF instruction
1229 * representation.
1230 */
1231 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 1232 sizeof(struct bpf_insn));
bd4cf0ed 1233
bd4cf0ed
AS
1234 /* Conversion cannot happen on overlapping memory areas,
1235 * so we need to keep the user BPF around until the 2nd
1236 * pass. At this time, the user BPF is stored in fp->insns.
1237 */
1238 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 1239 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
1240 if (!old_prog) {
1241 err = -ENOMEM;
1242 goto out_err;
1243 }
1244
1245 /* 1st pass: calculate the new program length. */
e0cea7ce
DB
1246 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1247 &seen_ld_abs);
bd4cf0ed
AS
1248 if (err)
1249 goto out_err_free;
1250
1251 /* Expand fp for appending the new filter representation. */
1252 old_fp = fp;
60a3b225 1253 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
1254 if (!fp) {
1255 /* The old_fp is still around in case we couldn't
1256 * allocate new memory, so uncharge on that one.
1257 */
1258 fp = old_fp;
1259 err = -ENOMEM;
1260 goto out_err_free;
1261 }
1262
bd4cf0ed
AS
1263 fp->len = new_len;
1264
2695fb55 1265 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
e0cea7ce
DB
1266 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1267 &seen_ld_abs);
bd4cf0ed 1268 if (err)
8fb575ca 1269 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
1270 * to allocate memory, remapping must succeed. Note,
1271 * that at this time old_fp has already been released
278571ba 1272 * by krealloc().
bd4cf0ed
AS
1273 */
1274 goto out_err_free;
1275
d1c55ab5 1276 fp = bpf_prog_select_runtime(fp, &err);
290af866
AS
1277 if (err)
1278 goto out_err_free;
5fe821a9 1279
bd4cf0ed
AS
1280 kfree(old_prog);
1281 return fp;
1282
1283out_err_free:
1284 kfree(old_prog);
1285out_err:
7ae457c1 1286 __bpf_prog_release(fp);
bd4cf0ed
AS
1287 return ERR_PTR(err);
1288}
1289
ac67eb2c
DB
1290static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1291 bpf_aux_classic_check_t trans)
302d6637
JP
1292{
1293 int err;
1294
bd4cf0ed 1295 fp->bpf_func = NULL;
a91263d5 1296 fp->jited = 0;
302d6637 1297
4df95ff4 1298 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1299 if (err) {
7ae457c1 1300 __bpf_prog_release(fp);
bd4cf0ed 1301 return ERR_PTR(err);
418c96ac 1302 }
302d6637 1303
4ae92bc7
NS
1304 /* There might be additional checks and transformations
1305 * needed on classic filters, f.e. in case of seccomp.
1306 */
1307 if (trans) {
1308 err = trans(fp->insns, fp->len);
1309 if (err) {
1310 __bpf_prog_release(fp);
1311 return ERR_PTR(err);
1312 }
1313 }
1314
bd4cf0ed
AS
1315 /* Probe if we can JIT compile the filter and if so, do
1316 * the compilation of the filter.
1317 */
302d6637 1318 bpf_jit_compile(fp);
bd4cf0ed
AS
1319
1320 /* JIT compiler couldn't process this filter, so do the
1321 * internal BPF translation for the optimized interpreter.
1322 */
5fe821a9 1323 if (!fp->jited)
7ae457c1 1324 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1325
1326 return fp;
302d6637
JP
1327}
1328
1329/**
7ae457c1 1330 * bpf_prog_create - create an unattached filter
c6c4b97c 1331 * @pfp: the unattached filter that is created
677a9fd3 1332 * @fprog: the filter program
302d6637 1333 *
c6c4b97c 1334 * Create a filter independent of any socket. We first run some
302d6637
JP
1335 * sanity checks on it to make sure it does not explode on us later.
1336 * If an error occurs or there is insufficient memory for the filter
1337 * a negative errno code is returned. On success the return is zero.
1338 */
7ae457c1 1339int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1340{
009937e7 1341 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1342 struct bpf_prog *fp;
302d6637
JP
1343
1344 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1345 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
302d6637
JP
1346 return -EINVAL;
1347
60a3b225 1348 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1349 if (!fp)
1350 return -ENOMEM;
a3ea269b 1351
302d6637
JP
1352 memcpy(fp->insns, fprog->filter, fsize);
1353
302d6637 1354 fp->len = fprog->len;
a3ea269b
DB
1355 /* Since unattached filters are not copied back to user
1356 * space through sk_get_filter(), we do not need to hold
1357 * a copy here, and can spare us the work.
1358 */
1359 fp->orig_prog = NULL;
302d6637 1360
7ae457c1 1361 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1362 * memory in case something goes wrong.
1363 */
4ae92bc7 1364 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1365 if (IS_ERR(fp))
1366 return PTR_ERR(fp);
302d6637
JP
1367
1368 *pfp = fp;
1369 return 0;
302d6637 1370}
7ae457c1 1371EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1372
ac67eb2c
DB
1373/**
1374 * bpf_prog_create_from_user - create an unattached filter from user buffer
1375 * @pfp: the unattached filter that is created
1376 * @fprog: the filter program
1377 * @trans: post-classic verifier transformation handler
bab18991 1378 * @save_orig: save classic BPF program
ac67eb2c
DB
1379 *
1380 * This function effectively does the same as bpf_prog_create(), only
1381 * that it builds up its insns buffer from user space provided buffer.
1382 * It also allows for passing a bpf_aux_classic_check_t handler.
1383 */
1384int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1385 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1386{
1387 unsigned int fsize = bpf_classic_proglen(fprog);
1388 struct bpf_prog *fp;
bab18991 1389 int err;
ac67eb2c
DB
1390
1391 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1392 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
ac67eb2c
DB
1393 return -EINVAL;
1394
1395 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1396 if (!fp)
1397 return -ENOMEM;
1398
1399 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1400 __bpf_prog_free(fp);
1401 return -EFAULT;
1402 }
1403
1404 fp->len = fprog->len;
ac67eb2c
DB
1405 fp->orig_prog = NULL;
1406
bab18991
DB
1407 if (save_orig) {
1408 err = bpf_prog_store_orig_filter(fp, fprog);
1409 if (err) {
1410 __bpf_prog_free(fp);
1411 return -ENOMEM;
1412 }
1413 }
1414
ac67eb2c
DB
1415 /* bpf_prepare_filter() already takes care of freeing
1416 * memory in case something goes wrong.
1417 */
1418 fp = bpf_prepare_filter(fp, trans);
1419 if (IS_ERR(fp))
1420 return PTR_ERR(fp);
1421
1422 *pfp = fp;
1423 return 0;
1424}
2ea273d7 1425EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1426
7ae457c1 1427void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1428{
7ae457c1 1429 __bpf_prog_release(fp);
302d6637 1430}
7ae457c1 1431EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1432
8ced425e 1433static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
49b31e57
DB
1434{
1435 struct sk_filter *fp, *old_fp;
1436
1437 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1438 if (!fp)
1439 return -ENOMEM;
1440
1441 fp->prog = prog;
49b31e57 1442
4c355cdf 1443 if (!__sk_filter_charge(sk, fp)) {
49b31e57
DB
1444 kfree(fp);
1445 return -ENOMEM;
1446 }
4c355cdf 1447 refcount_set(&fp->refcnt, 1);
49b31e57 1448
8ced425e
HFS
1449 old_fp = rcu_dereference_protected(sk->sk_filter,
1450 lockdep_sock_is_held(sk));
49b31e57 1451 rcu_assign_pointer(sk->sk_filter, fp);
8ced425e 1452
49b31e57
DB
1453 if (old_fp)
1454 sk_filter_uncharge(sk, old_fp);
1455
1456 return 0;
1457}
1458
538950a1
CG
1459static
1460struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1461{
009937e7 1462 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1463 struct bpf_prog *prog;
1da177e4
LT
1464 int err;
1465
d59577b6 1466 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1467 return ERR_PTR(-EPERM);
d59577b6 1468
1da177e4 1469 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1470 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
538950a1 1471 return ERR_PTR(-EINVAL);
1da177e4 1472
f7bd9e36 1473 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
7ae457c1 1474 if (!prog)
538950a1 1475 return ERR_PTR(-ENOMEM);
a3ea269b 1476
7ae457c1 1477 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1478 __bpf_prog_free(prog);
538950a1 1479 return ERR_PTR(-EFAULT);
1da177e4
LT
1480 }
1481
7ae457c1 1482 prog->len = fprog->len;
1da177e4 1483
7ae457c1 1484 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1485 if (err) {
c0d1379a 1486 __bpf_prog_free(prog);
538950a1 1487 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1488 }
1489
7ae457c1 1490 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1491 * memory in case something goes wrong.
1492 */
538950a1
CG
1493 return bpf_prepare_filter(prog, NULL);
1494}
1495
1496/**
1497 * sk_attach_filter - attach a socket filter
1498 * @fprog: the filter program
1499 * @sk: the socket to use
1500 *
1501 * Attach the user's filter code. We first run some sanity checks on
1502 * it to make sure it does not explode on us later. If an error
1503 * occurs or there is insufficient memory for the filter a negative
1504 * errno code is returned. On success the return is zero.
1505 */
8ced425e 1506int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
538950a1
CG
1507{
1508 struct bpf_prog *prog = __get_filter(fprog, sk);
1509 int err;
1510
7ae457c1
AS
1511 if (IS_ERR(prog))
1512 return PTR_ERR(prog);
1513
8ced425e 1514 err = __sk_attach_prog(prog, sk);
49b31e57 1515 if (err < 0) {
7ae457c1 1516 __bpf_prog_release(prog);
49b31e57 1517 return err;
278571ba
AS
1518 }
1519
d3904b73 1520 return 0;
1da177e4 1521}
8ced425e 1522EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1523
538950a1 1524int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1525{
538950a1 1526 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1527 int err;
89aa0758 1528
538950a1
CG
1529 if (IS_ERR(prog))
1530 return PTR_ERR(prog);
1531
8217ca65
MKL
1532 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1533 err = -ENOMEM;
1534 else
1535 err = reuseport_attach_prog(sk, prog);
1536
1537 if (err)
538950a1 1538 __bpf_prog_release(prog);
538950a1 1539
8217ca65 1540 return err;
538950a1
CG
1541}
1542
1543static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1544{
89aa0758 1545 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1546 return ERR_PTR(-EPERM);
89aa0758 1547
113214be 1548 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
538950a1
CG
1549}
1550
1551int sk_attach_bpf(u32 ufd, struct sock *sk)
1552{
1553 struct bpf_prog *prog = __get_bpf(ufd, sk);
1554 int err;
1555
1556 if (IS_ERR(prog))
1557 return PTR_ERR(prog);
1558
8ced425e 1559 err = __sk_attach_prog(prog, sk);
49b31e57 1560 if (err < 0) {
89aa0758 1561 bpf_prog_put(prog);
49b31e57 1562 return err;
89aa0758
AS
1563 }
1564
89aa0758
AS
1565 return 0;
1566}
1567
538950a1
CG
1568int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1569{
8217ca65 1570 struct bpf_prog *prog;
538950a1
CG
1571 int err;
1572
8217ca65
MKL
1573 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1574 return -EPERM;
1575
1576 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1577 if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
1578 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
538950a1
CG
1579 if (IS_ERR(prog))
1580 return PTR_ERR(prog);
1581
8217ca65
MKL
1582 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1583 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1584 * bpf prog (e.g. sockmap). It depends on the
1585 * limitation imposed by bpf_prog_load().
1586 * Hence, sysctl_optmem_max is not checked.
1587 */
1588 if ((sk->sk_type != SOCK_STREAM &&
1589 sk->sk_type != SOCK_DGRAM) ||
1590 (sk->sk_protocol != IPPROTO_UDP &&
1591 sk->sk_protocol != IPPROTO_TCP) ||
1592 (sk->sk_family != AF_INET &&
1593 sk->sk_family != AF_INET6)) {
1594 err = -ENOTSUPP;
1595 goto err_prog_put;
1596 }
1597 } else {
1598 /* BPF_PROG_TYPE_SOCKET_FILTER */
1599 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1600 err = -ENOMEM;
1601 goto err_prog_put;
1602 }
538950a1
CG
1603 }
1604
8217ca65
MKL
1605 err = reuseport_attach_prog(sk, prog);
1606err_prog_put:
1607 if (err)
1608 bpf_prog_put(prog);
1609
1610 return err;
1611}
1612
1613void sk_reuseport_prog_free(struct bpf_prog *prog)
1614{
1615 if (!prog)
1616 return;
1617
1618 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1619 bpf_prog_put(prog);
1620 else
1621 bpf_prog_destroy(prog);
538950a1
CG
1622}
1623
21cafc1d
DB
1624struct bpf_scratchpad {
1625 union {
1626 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1627 u8 buff[MAX_BPF_STACK];
1628 };
1629};
1630
1631static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822 1632
5293efe6
DB
1633static inline int __bpf_try_make_writable(struct sk_buff *skb,
1634 unsigned int write_len)
1635{
1636 return skb_ensure_writable(skb, write_len);
1637}
1638
db58ba45
AS
1639static inline int bpf_try_make_writable(struct sk_buff *skb,
1640 unsigned int write_len)
1641{
5293efe6 1642 int err = __bpf_try_make_writable(skb, write_len);
db58ba45 1643
6aaae2b6 1644 bpf_compute_data_pointers(skb);
db58ba45
AS
1645 return err;
1646}
1647
36bbef52
DB
1648static int bpf_try_make_head_writable(struct sk_buff *skb)
1649{
1650 return bpf_try_make_writable(skb, skb_headlen(skb));
1651}
1652
a2bfe6bf
DB
1653static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1654{
1655 if (skb_at_tc_ingress(skb))
1656 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1657}
1658
8065694e
DB
1659static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1660{
1661 if (skb_at_tc_ingress(skb))
1662 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1663}
1664
f3694e00
DB
1665BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1666 const void *, from, u32, len, u64, flags)
608cd71a 1667{
608cd71a
AS
1668 void *ptr;
1669
8afd54c8 1670 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
781c53bc 1671 return -EINVAL;
0ed661d5 1672 if (unlikely(offset > 0xffff))
608cd71a 1673 return -EFAULT;
db58ba45 1674 if (unlikely(bpf_try_make_writable(skb, offset + len)))
608cd71a
AS
1675 return -EFAULT;
1676
0ed661d5 1677 ptr = skb->data + offset;
781c53bc 1678 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1679 __skb_postpull_rcsum(skb, ptr, len, offset);
608cd71a
AS
1680
1681 memcpy(ptr, from, len);
1682
781c53bc 1683 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1684 __skb_postpush_rcsum(skb, ptr, len, offset);
8afd54c8
DB
1685 if (flags & BPF_F_INVALIDATE_HASH)
1686 skb_clear_hash(skb);
f8ffad69 1687
608cd71a
AS
1688 return 0;
1689}
1690
577c50aa 1691static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
608cd71a
AS
1692 .func = bpf_skb_store_bytes,
1693 .gpl_only = false,
1694 .ret_type = RET_INTEGER,
1695 .arg1_type = ARG_PTR_TO_CTX,
1696 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1697 .arg3_type = ARG_PTR_TO_MEM,
1698 .arg4_type = ARG_CONST_SIZE,
91bc4822
AS
1699 .arg5_type = ARG_ANYTHING,
1700};
1701
f3694e00
DB
1702BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1703 void *, to, u32, len)
05c74e5e 1704{
05c74e5e
DB
1705 void *ptr;
1706
0ed661d5 1707 if (unlikely(offset > 0xffff))
074f528e 1708 goto err_clear;
05c74e5e
DB
1709
1710 ptr = skb_header_pointer(skb, offset, len, to);
1711 if (unlikely(!ptr))
074f528e 1712 goto err_clear;
05c74e5e
DB
1713 if (ptr != to)
1714 memcpy(to, ptr, len);
1715
1716 return 0;
074f528e
DB
1717err_clear:
1718 memset(to, 0, len);
1719 return -EFAULT;
05c74e5e
DB
1720}
1721
577c50aa 1722static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
05c74e5e
DB
1723 .func = bpf_skb_load_bytes,
1724 .gpl_only = false,
1725 .ret_type = RET_INTEGER,
1726 .arg1_type = ARG_PTR_TO_CTX,
1727 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1728 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1729 .arg4_type = ARG_CONST_SIZE,
05c74e5e
DB
1730};
1731
4e1ec56c
DB
1732BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1733 u32, offset, void *, to, u32, len, u32, start_header)
1734{
3eee1f75
DB
1735 u8 *end = skb_tail_pointer(skb);
1736 u8 *net = skb_network_header(skb);
1737 u8 *mac = skb_mac_header(skb);
4e1ec56c
DB
1738 u8 *ptr;
1739
3eee1f75 1740 if (unlikely(offset > 0xffff || len > (end - mac)))
4e1ec56c
DB
1741 goto err_clear;
1742
1743 switch (start_header) {
1744 case BPF_HDR_START_MAC:
3eee1f75 1745 ptr = mac + offset;
4e1ec56c
DB
1746 break;
1747 case BPF_HDR_START_NET:
3eee1f75 1748 ptr = net + offset;
4e1ec56c
DB
1749 break;
1750 default:
1751 goto err_clear;
1752 }
1753
3eee1f75 1754 if (likely(ptr >= mac && ptr + len <= end)) {
4e1ec56c
DB
1755 memcpy(to, ptr, len);
1756 return 0;
1757 }
1758
1759err_clear:
1760 memset(to, 0, len);
1761 return -EFAULT;
1762}
1763
1764static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1765 .func = bpf_skb_load_bytes_relative,
1766 .gpl_only = false,
1767 .ret_type = RET_INTEGER,
1768 .arg1_type = ARG_PTR_TO_CTX,
1769 .arg2_type = ARG_ANYTHING,
1770 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1771 .arg4_type = ARG_CONST_SIZE,
1772 .arg5_type = ARG_ANYTHING,
1773};
1774
36bbef52
DB
1775BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1776{
1777 /* Idea is the following: should the needed direct read/write
1778 * test fail during runtime, we can pull in more data and redo
1779 * again, since implicitly, we invalidate previous checks here.
1780 *
1781 * Or, since we know how much we need to make read/writeable,
1782 * this can be done once at the program beginning for direct
1783 * access case. By this we overcome limitations of only current
1784 * headroom being accessible.
1785 */
1786 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1787}
1788
1789static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1790 .func = bpf_skb_pull_data,
1791 .gpl_only = false,
1792 .ret_type = RET_INTEGER,
1793 .arg1_type = ARG_PTR_TO_CTX,
1794 .arg2_type = ARG_ANYTHING,
1795};
1796
46f8bc92
MKL
1797BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1798{
46f8bc92
MKL
1799 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1800}
1801
1802static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1803 .func = bpf_sk_fullsock,
1804 .gpl_only = false,
1805 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
1806 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
1807};
1808
0ea488ff
JF
1809static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1810 unsigned int write_len)
1811{
1812 int err = __bpf_try_make_writable(skb, write_len);
1813
1814 bpf_compute_data_end_sk_skb(skb);
1815 return err;
1816}
1817
1818BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1819{
1820 /* Idea is the following: should the needed direct read/write
1821 * test fail during runtime, we can pull in more data and redo
1822 * again, since implicitly, we invalidate previous checks here.
1823 *
1824 * Or, since we know how much we need to make read/writeable,
1825 * this can be done once at the program beginning for direct
1826 * access case. By this we overcome limitations of only current
1827 * headroom being accessible.
1828 */
1829 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1830}
1831
1832static const struct bpf_func_proto sk_skb_pull_data_proto = {
1833 .func = sk_skb_pull_data,
1834 .gpl_only = false,
1835 .ret_type = RET_INTEGER,
1836 .arg1_type = ARG_PTR_TO_CTX,
1837 .arg2_type = ARG_ANYTHING,
1838};
1839
f3694e00
DB
1840BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1841 u64, from, u64, to, u64, flags)
91bc4822 1842{
0ed661d5 1843 __sum16 *ptr;
91bc4822 1844
781c53bc
DB
1845 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1846 return -EINVAL;
0ed661d5 1847 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1848 return -EFAULT;
0ed661d5 1849 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1850 return -EFAULT;
1851
0ed661d5 1852 ptr = (__sum16 *)(skb->data + offset);
781c53bc 1853 switch (flags & BPF_F_HDR_FIELD_MASK) {
8050c0f0
DB
1854 case 0:
1855 if (unlikely(from != 0))
1856 return -EINVAL;
1857
1858 csum_replace_by_diff(ptr, to);
1859 break;
91bc4822
AS
1860 case 2:
1861 csum_replace2(ptr, from, to);
1862 break;
1863 case 4:
1864 csum_replace4(ptr, from, to);
1865 break;
1866 default:
1867 return -EINVAL;
1868 }
1869
91bc4822
AS
1870 return 0;
1871}
1872
577c50aa 1873static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
91bc4822
AS
1874 .func = bpf_l3_csum_replace,
1875 .gpl_only = false,
1876 .ret_type = RET_INTEGER,
1877 .arg1_type = ARG_PTR_TO_CTX,
1878 .arg2_type = ARG_ANYTHING,
1879 .arg3_type = ARG_ANYTHING,
1880 .arg4_type = ARG_ANYTHING,
1881 .arg5_type = ARG_ANYTHING,
1882};
1883
f3694e00
DB
1884BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1885 u64, from, u64, to, u64, flags)
91bc4822 1886{
781c53bc 1887 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
2f72959a 1888 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
d1b662ad 1889 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
0ed661d5 1890 __sum16 *ptr;
91bc4822 1891
d1b662ad
DB
1892 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1893 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
781c53bc 1894 return -EINVAL;
0ed661d5 1895 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1896 return -EFAULT;
0ed661d5 1897 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1898 return -EFAULT;
1899
0ed661d5 1900 ptr = (__sum16 *)(skb->data + offset);
d1b662ad 1901 if (is_mmzero && !do_mforce && !*ptr)
2f72959a 1902 return 0;
91bc4822 1903
781c53bc 1904 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1905 case 0:
1906 if (unlikely(from != 0))
1907 return -EINVAL;
1908
1909 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1910 break;
91bc4822
AS
1911 case 2:
1912 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1913 break;
1914 case 4:
1915 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1916 break;
1917 default:
1918 return -EINVAL;
1919 }
1920
2f72959a
DB
1921 if (is_mmzero && !*ptr)
1922 *ptr = CSUM_MANGLED_0;
91bc4822
AS
1923 return 0;
1924}
1925
577c50aa 1926static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
91bc4822
AS
1927 .func = bpf_l4_csum_replace,
1928 .gpl_only = false,
1929 .ret_type = RET_INTEGER,
1930 .arg1_type = ARG_PTR_TO_CTX,
1931 .arg2_type = ARG_ANYTHING,
1932 .arg3_type = ARG_ANYTHING,
1933 .arg4_type = ARG_ANYTHING,
1934 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1935};
1936
f3694e00
DB
1937BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1938 __be32 *, to, u32, to_size, __wsum, seed)
7d672345 1939{
21cafc1d 1940 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
f3694e00 1941 u32 diff_size = from_size + to_size;
7d672345
DB
1942 int i, j = 0;
1943
1944 /* This is quite flexible, some examples:
1945 *
1946 * from_size == 0, to_size > 0, seed := csum --> pushing data
1947 * from_size > 0, to_size == 0, seed := csum --> pulling data
1948 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1949 *
1950 * Even for diffing, from_size and to_size don't need to be equal.
1951 */
1952 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1953 diff_size > sizeof(sp->diff)))
1954 return -EINVAL;
1955
1956 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1957 sp->diff[j] = ~from[i];
1958 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1959 sp->diff[j] = to[i];
1960
1961 return csum_partial(sp->diff, diff_size, seed);
1962}
1963
577c50aa 1964static const struct bpf_func_proto bpf_csum_diff_proto = {
7d672345
DB
1965 .func = bpf_csum_diff,
1966 .gpl_only = false,
36bbef52 1967 .pkt_access = true,
7d672345 1968 .ret_type = RET_INTEGER,
db1ac496 1969 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1970 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
db1ac496 1971 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1972 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7d672345
DB
1973 .arg5_type = ARG_ANYTHING,
1974};
1975
36bbef52
DB
1976BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1977{
1978 /* The interface is to be used in combination with bpf_csum_diff()
1979 * for direct packet writes. csum rotation for alignment as well
1980 * as emulating csum_sub() can be done from the eBPF program.
1981 */
1982 if (skb->ip_summed == CHECKSUM_COMPLETE)
1983 return (skb->csum = csum_add(skb->csum, csum));
1984
1985 return -ENOTSUPP;
1986}
1987
1988static const struct bpf_func_proto bpf_csum_update_proto = {
1989 .func = bpf_csum_update,
1990 .gpl_only = false,
1991 .ret_type = RET_INTEGER,
1992 .arg1_type = ARG_PTR_TO_CTX,
1993 .arg2_type = ARG_ANYTHING,
1994};
1995
a70b506e
DB
1996static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1997{
a70b506e
DB
1998 return dev_forward_skb(dev, skb);
1999}
2000
4e3264d2
MKL
2001static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2002 struct sk_buff *skb)
2003{
2004 int ret = ____dev_forward_skb(dev, skb);
2005
2006 if (likely(!ret)) {
2007 skb->dev = dev;
2008 ret = netif_rx(skb);
2009 }
2010
2011 return ret;
2012}
2013
a70b506e
DB
2014static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2015{
2016 int ret;
2017
2018 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
2019 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2020 kfree_skb(skb);
2021 return -ENETDOWN;
2022 }
2023
2024 skb->dev = dev;
2025
2026 __this_cpu_inc(xmit_recursion);
2027 ret = dev_queue_xmit(skb);
2028 __this_cpu_dec(xmit_recursion);
2029
2030 return ret;
2031}
2032
4e3264d2
MKL
2033static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2034 u32 flags)
2035{
e7c87bd6 2036 unsigned int mlen = skb_network_offset(skb);
4e3264d2 2037
e7c87bd6
WB
2038 if (mlen) {
2039 __skb_pull(skb, mlen);
4e3264d2 2040
e7c87bd6
WB
2041 /* At ingress, the mac header has already been pulled once.
2042 * At egress, skb_pospull_rcsum has to be done in case that
2043 * the skb is originated from ingress (i.e. a forwarded skb)
2044 * to ensure that rcsum starts at net header.
2045 */
2046 if (!skb_at_tc_ingress(skb))
2047 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2048 }
4e3264d2
MKL
2049 skb_pop_mac_header(skb);
2050 skb_reset_mac_len(skb);
2051 return flags & BPF_F_INGRESS ?
2052 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2053}
2054
2055static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2056 u32 flags)
2057{
3a0af8fd
TG
2058 /* Verify that a link layer header is carried */
2059 if (unlikely(skb->mac_header >= skb->network_header)) {
2060 kfree_skb(skb);
2061 return -ERANGE;
2062 }
2063
4e3264d2
MKL
2064 bpf_push_mac_rcsum(skb);
2065 return flags & BPF_F_INGRESS ?
2066 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2067}
2068
2069static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2070 u32 flags)
2071{
c491680f 2072 if (dev_is_mac_header_xmit(dev))
4e3264d2 2073 return __bpf_redirect_common(skb, dev, flags);
c491680f
DB
2074 else
2075 return __bpf_redirect_no_mac(skb, dev, flags);
4e3264d2
MKL
2076}
2077
f3694e00 2078BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
3896d655 2079{
3896d655 2080 struct net_device *dev;
36bbef52
DB
2081 struct sk_buff *clone;
2082 int ret;
3896d655 2083
781c53bc
DB
2084 if (unlikely(flags & ~(BPF_F_INGRESS)))
2085 return -EINVAL;
2086
3896d655
AS
2087 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2088 if (unlikely(!dev))
2089 return -EINVAL;
2090
36bbef52
DB
2091 clone = skb_clone(skb, GFP_ATOMIC);
2092 if (unlikely(!clone))
3896d655
AS
2093 return -ENOMEM;
2094
36bbef52
DB
2095 /* For direct write, we need to keep the invariant that the skbs
2096 * we're dealing with need to be uncloned. Should uncloning fail
2097 * here, we need to free the just generated clone to unclone once
2098 * again.
2099 */
2100 ret = bpf_try_make_head_writable(skb);
2101 if (unlikely(ret)) {
2102 kfree_skb(clone);
2103 return -ENOMEM;
2104 }
2105
4e3264d2 2106 return __bpf_redirect(clone, dev, flags);
3896d655
AS
2107}
2108
577c50aa 2109static const struct bpf_func_proto bpf_clone_redirect_proto = {
3896d655
AS
2110 .func = bpf_clone_redirect,
2111 .gpl_only = false,
2112 .ret_type = RET_INTEGER,
2113 .arg1_type = ARG_PTR_TO_CTX,
2114 .arg2_type = ARG_ANYTHING,
2115 .arg3_type = ARG_ANYTHING,
2116};
2117
0b19cc0a
TM
2118DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2119EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
781c53bc 2120
f3694e00 2121BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
27b29f63 2122{
0b19cc0a 2123 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
27b29f63 2124
781c53bc
DB
2125 if (unlikely(flags & ~(BPF_F_INGRESS)))
2126 return TC_ACT_SHOT;
2127
27b29f63
AS
2128 ri->ifindex = ifindex;
2129 ri->flags = flags;
781c53bc 2130
27b29f63
AS
2131 return TC_ACT_REDIRECT;
2132}
2133
2134int skb_do_redirect(struct sk_buff *skb)
2135{
0b19cc0a 2136 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
27b29f63
AS
2137 struct net_device *dev;
2138
2139 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
2140 ri->ifindex = 0;
2141 if (unlikely(!dev)) {
2142 kfree_skb(skb);
2143 return -EINVAL;
2144 }
2145
4e3264d2 2146 return __bpf_redirect(skb, dev, ri->flags);
27b29f63
AS
2147}
2148
577c50aa 2149static const struct bpf_func_proto bpf_redirect_proto = {
27b29f63
AS
2150 .func = bpf_redirect,
2151 .gpl_only = false,
2152 .ret_type = RET_INTEGER,
2153 .arg1_type = ARG_ANYTHING,
2154 .arg2_type = ARG_ANYTHING,
2155};
2156
604326b4 2157BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2a100317
JF
2158{
2159 msg->apply_bytes = bytes;
2160 return 0;
2161}
2162
2163static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2164 .func = bpf_msg_apply_bytes,
2165 .gpl_only = false,
2166 .ret_type = RET_INTEGER,
2167 .arg1_type = ARG_PTR_TO_CTX,
2168 .arg2_type = ARG_ANYTHING,
2169};
2170
604326b4 2171BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
91843d54
JF
2172{
2173 msg->cork_bytes = bytes;
2174 return 0;
2175}
2176
2177static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2178 .func = bpf_msg_cork_bytes,
2179 .gpl_only = false,
2180 .ret_type = RET_INTEGER,
2181 .arg1_type = ARG_PTR_TO_CTX,
2182 .arg2_type = ARG_ANYTHING,
2183};
2184
604326b4
DB
2185BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2186 u32, end, u64, flags)
015632bb 2187{
604326b4
DB
2188 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2189 u32 first_sge, last_sge, i, shift, bytes_sg_total;
2190 struct scatterlist *sge;
2191 u8 *raw, *to, *from;
015632bb
JF
2192 struct page *page;
2193
2194 if (unlikely(flags || end <= start))
2195 return -EINVAL;
2196
2197 /* First find the starting scatterlist element */
604326b4 2198 i = msg->sg.start;
015632bb 2199 do {
604326b4 2200 len = sk_msg_elem(msg, i)->length;
015632bb
JF
2201 if (start < offset + len)
2202 break;
5b24109b 2203 offset += len;
604326b4
DB
2204 sk_msg_iter_var_next(i);
2205 } while (i != msg->sg.end);
015632bb
JF
2206
2207 if (unlikely(start >= offset + len))
2208 return -EINVAL;
2209
604326b4 2210 first_sge = i;
5b24109b
DB
2211 /* The start may point into the sg element so we need to also
2212 * account for the headroom.
2213 */
2214 bytes_sg_total = start - offset + bytes;
604326b4 2215 if (!msg->sg.copy[i] && bytes_sg_total <= len)
015632bb 2216 goto out;
015632bb
JF
2217
2218 /* At this point we need to linearize multiple scatterlist
2219 * elements or a single shared page. Either way we need to
2220 * copy into a linear buffer exclusively owned by BPF. Then
2221 * place the buffer in the scatterlist and fixup the original
2222 * entries by removing the entries now in the linear buffer
2223 * and shifting the remaining entries. For now we do not try
2224 * to copy partial entries to avoid complexity of running out
2225 * of sg_entry slots. The downside is reading a single byte
2226 * will copy the entire sg entry.
2227 */
2228 do {
604326b4
DB
2229 copy += sk_msg_elem(msg, i)->length;
2230 sk_msg_iter_var_next(i);
5b24109b 2231 if (bytes_sg_total <= copy)
015632bb 2232 break;
604326b4
DB
2233 } while (i != msg->sg.end);
2234 last_sge = i;
015632bb 2235
5b24109b 2236 if (unlikely(bytes_sg_total > copy))
015632bb
JF
2237 return -EINVAL;
2238
4c3d795c
TD
2239 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2240 get_order(copy));
015632bb
JF
2241 if (unlikely(!page))
2242 return -ENOMEM;
015632bb 2243
604326b4
DB
2244 raw = page_address(page);
2245 i = first_sge;
015632bb 2246 do {
604326b4
DB
2247 sge = sk_msg_elem(msg, i);
2248 from = sg_virt(sge);
2249 len = sge->length;
2250 to = raw + poffset;
015632bb
JF
2251
2252 memcpy(to, from, len);
9db39f4d 2253 poffset += len;
604326b4
DB
2254 sge->length = 0;
2255 put_page(sg_page(sge));
015632bb 2256
604326b4
DB
2257 sk_msg_iter_var_next(i);
2258 } while (i != last_sge);
015632bb 2259
604326b4 2260 sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
015632bb
JF
2261
2262 /* To repair sg ring we need to shift entries. If we only
2263 * had a single entry though we can just replace it and
2264 * be done. Otherwise walk the ring and shift the entries.
2265 */
604326b4
DB
2266 WARN_ON_ONCE(last_sge == first_sge);
2267 shift = last_sge > first_sge ?
2268 last_sge - first_sge - 1 :
2269 MAX_SKB_FRAGS - first_sge + last_sge - 1;
015632bb
JF
2270 if (!shift)
2271 goto out;
2272
604326b4
DB
2273 i = first_sge;
2274 sk_msg_iter_var_next(i);
015632bb 2275 do {
604326b4 2276 u32 move_from;
015632bb 2277
604326b4
DB
2278 if (i + shift >= MAX_MSG_FRAGS)
2279 move_from = i + shift - MAX_MSG_FRAGS;
015632bb
JF
2280 else
2281 move_from = i + shift;
604326b4 2282 if (move_from == msg->sg.end)
015632bb
JF
2283 break;
2284
604326b4
DB
2285 msg->sg.data[i] = msg->sg.data[move_from];
2286 msg->sg.data[move_from].length = 0;
2287 msg->sg.data[move_from].page_link = 0;
2288 msg->sg.data[move_from].offset = 0;
2289 sk_msg_iter_var_next(i);
015632bb 2290 } while (1);
604326b4
DB
2291
2292 msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2293 msg->sg.end - shift + MAX_MSG_FRAGS :
2294 msg->sg.end - shift;
015632bb 2295out:
604326b4 2296 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
015632bb 2297 msg->data_end = msg->data + bytes;
015632bb
JF
2298 return 0;
2299}
2300
2301static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2302 .func = bpf_msg_pull_data,
2303 .gpl_only = false,
2304 .ret_type = RET_INTEGER,
2305 .arg1_type = ARG_PTR_TO_CTX,
2306 .arg2_type = ARG_ANYTHING,
2307 .arg3_type = ARG_ANYTHING,
2308 .arg4_type = ARG_ANYTHING,
2309};
2310
6fff607e
JF
2311BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2312 u32, len, u64, flags)
2313{
2314 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2315 u32 new, i = 0, l, space, copy = 0, offset = 0;
2316 u8 *raw, *to, *from;
2317 struct page *page;
2318
2319 if (unlikely(flags))
2320 return -EINVAL;
2321
2322 /* First find the starting scatterlist element */
2323 i = msg->sg.start;
2324 do {
2325 l = sk_msg_elem(msg, i)->length;
2326
2327 if (start < offset + l)
2328 break;
2329 offset += l;
2330 sk_msg_iter_var_next(i);
2331 } while (i != msg->sg.end);
2332
2333 if (start >= offset + l)
2334 return -EINVAL;
2335
2336 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2337
2338 /* If no space available will fallback to copy, we need at
2339 * least one scatterlist elem available to push data into
2340 * when start aligns to the beginning of an element or two
2341 * when it falls inside an element. We handle the start equals
2342 * offset case because its the common case for inserting a
2343 * header.
2344 */
2345 if (!space || (space == 1 && start != offset))
2346 copy = msg->sg.data[i].length;
2347
2348 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2349 get_order(copy + len));
2350 if (unlikely(!page))
2351 return -ENOMEM;
2352
2353 if (copy) {
2354 int front, back;
2355
2356 raw = page_address(page);
2357
2358 psge = sk_msg_elem(msg, i);
2359 front = start - offset;
2360 back = psge->length - front;
2361 from = sg_virt(psge);
2362
2363 if (front)
2364 memcpy(raw, from, front);
2365
2366 if (back) {
2367 from += front;
2368 to = raw + front + len;
2369
2370 memcpy(to, from, back);
2371 }
2372
2373 put_page(sg_page(psge));
2374 } else if (start - offset) {
2375 psge = sk_msg_elem(msg, i);
2376 rsge = sk_msg_elem_cpy(msg, i);
2377
2378 psge->length = start - offset;
2379 rsge.length -= psge->length;
2380 rsge.offset += start;
2381
2382 sk_msg_iter_var_next(i);
2383 sg_unmark_end(psge);
2384 sk_msg_iter_next(msg, end);
2385 }
2386
2387 /* Slot(s) to place newly allocated data */
2388 new = i;
2389
2390 /* Shift one or two slots as needed */
2391 if (!copy) {
2392 sge = sk_msg_elem_cpy(msg, i);
2393
2394 sk_msg_iter_var_next(i);
2395 sg_unmark_end(&sge);
2396 sk_msg_iter_next(msg, end);
2397
2398 nsge = sk_msg_elem_cpy(msg, i);
2399 if (rsge.length) {
2400 sk_msg_iter_var_next(i);
2401 nnsge = sk_msg_elem_cpy(msg, i);
2402 }
2403
2404 while (i != msg->sg.end) {
2405 msg->sg.data[i] = sge;
2406 sge = nsge;
2407 sk_msg_iter_var_next(i);
2408 if (rsge.length) {
2409 nsge = nnsge;
2410 nnsge = sk_msg_elem_cpy(msg, i);
2411 } else {
2412 nsge = sk_msg_elem_cpy(msg, i);
2413 }
2414 }
2415 }
2416
2417 /* Place newly allocated data buffer */
2418 sk_mem_charge(msg->sk, len);
2419 msg->sg.size += len;
2420 msg->sg.copy[new] = false;
2421 sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2422 if (rsge.length) {
2423 get_page(sg_page(&rsge));
2424 sk_msg_iter_var_next(new);
2425 msg->sg.data[new] = rsge;
2426 }
2427
2428 sk_msg_compute_data_pointers(msg);
2429 return 0;
2430}
2431
2432static const struct bpf_func_proto bpf_msg_push_data_proto = {
2433 .func = bpf_msg_push_data,
2434 .gpl_only = false,
2435 .ret_type = RET_INTEGER,
2436 .arg1_type = ARG_PTR_TO_CTX,
2437 .arg2_type = ARG_ANYTHING,
2438 .arg3_type = ARG_ANYTHING,
2439 .arg4_type = ARG_ANYTHING,
2440};
2441
7246d8ed
JF
2442static void sk_msg_shift_left(struct sk_msg *msg, int i)
2443{
2444 int prev;
2445
2446 do {
2447 prev = i;
2448 sk_msg_iter_var_next(i);
2449 msg->sg.data[prev] = msg->sg.data[i];
2450 } while (i != msg->sg.end);
2451
2452 sk_msg_iter_prev(msg, end);
2453}
2454
2455static void sk_msg_shift_right(struct sk_msg *msg, int i)
2456{
2457 struct scatterlist tmp, sge;
2458
2459 sk_msg_iter_next(msg, end);
2460 sge = sk_msg_elem_cpy(msg, i);
2461 sk_msg_iter_var_next(i);
2462 tmp = sk_msg_elem_cpy(msg, i);
2463
2464 while (i != msg->sg.end) {
2465 msg->sg.data[i] = sge;
2466 sk_msg_iter_var_next(i);
2467 sge = tmp;
2468 tmp = sk_msg_elem_cpy(msg, i);
2469 }
2470}
2471
2472BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2473 u32, len, u64, flags)
2474{
2475 u32 i = 0, l, space, offset = 0;
2476 u64 last = start + len;
2477 int pop;
2478
2479 if (unlikely(flags))
2480 return -EINVAL;
2481
2482 /* First find the starting scatterlist element */
2483 i = msg->sg.start;
2484 do {
2485 l = sk_msg_elem(msg, i)->length;
2486
2487 if (start < offset + l)
2488 break;
2489 offset += l;
2490 sk_msg_iter_var_next(i);
2491 } while (i != msg->sg.end);
2492
2493 /* Bounds checks: start and pop must be inside message */
2494 if (start >= offset + l || last >= msg->sg.size)
2495 return -EINVAL;
2496
2497 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2498
2499 pop = len;
2500 /* --------------| offset
2501 * -| start |-------- len -------|
2502 *
2503 * |----- a ----|-------- pop -------|----- b ----|
2504 * |______________________________________________| length
2505 *
2506 *
2507 * a: region at front of scatter element to save
2508 * b: region at back of scatter element to save when length > A + pop
2509 * pop: region to pop from element, same as input 'pop' here will be
2510 * decremented below per iteration.
2511 *
2512 * Two top-level cases to handle when start != offset, first B is non
2513 * zero and second B is zero corresponding to when a pop includes more
2514 * than one element.
2515 *
2516 * Then if B is non-zero AND there is no space allocate space and
2517 * compact A, B regions into page. If there is space shift ring to
2518 * the rigth free'ing the next element in ring to place B, leaving
2519 * A untouched except to reduce length.
2520 */
2521 if (start != offset) {
2522 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2523 int a = start;
2524 int b = sge->length - pop - a;
2525
2526 sk_msg_iter_var_next(i);
2527
2528 if (pop < sge->length - a) {
2529 if (space) {
2530 sge->length = a;
2531 sk_msg_shift_right(msg, i);
2532 nsge = sk_msg_elem(msg, i);
2533 get_page(sg_page(sge));
2534 sg_set_page(nsge,
2535 sg_page(sge),
2536 b, sge->offset + pop + a);
2537 } else {
2538 struct page *page, *orig;
2539 u8 *to, *from;
2540
2541 page = alloc_pages(__GFP_NOWARN |
2542 __GFP_COMP | GFP_ATOMIC,
2543 get_order(a + b));
2544 if (unlikely(!page))
2545 return -ENOMEM;
2546
2547 sge->length = a;
2548 orig = sg_page(sge);
2549 from = sg_virt(sge);
2550 to = page_address(page);
2551 memcpy(to, from, a);
2552 memcpy(to + a, from + a + pop, b);
2553 sg_set_page(sge, page, a + b, 0);
2554 put_page(orig);
2555 }
2556 pop = 0;
2557 } else if (pop >= sge->length - a) {
2558 sge->length = a;
2559 pop -= (sge->length - a);
2560 }
2561 }
2562
2563 /* From above the current layout _must_ be as follows,
2564 *
2565 * -| offset
2566 * -| start
2567 *
2568 * |---- pop ---|---------------- b ------------|
2569 * |____________________________________________| length
2570 *
2571 * Offset and start of the current msg elem are equal because in the
2572 * previous case we handled offset != start and either consumed the
2573 * entire element and advanced to the next element OR pop == 0.
2574 *
2575 * Two cases to handle here are first pop is less than the length
2576 * leaving some remainder b above. Simply adjust the element's layout
2577 * in this case. Or pop >= length of the element so that b = 0. In this
2578 * case advance to next element decrementing pop.
2579 */
2580 while (pop) {
2581 struct scatterlist *sge = sk_msg_elem(msg, i);
2582
2583 if (pop < sge->length) {
2584 sge->length -= pop;
2585 sge->offset += pop;
2586 pop = 0;
2587 } else {
2588 pop -= sge->length;
2589 sk_msg_shift_left(msg, i);
2590 }
2591 sk_msg_iter_var_next(i);
2592 }
2593
2594 sk_mem_uncharge(msg->sk, len - pop);
2595 msg->sg.size -= (len - pop);
2596 sk_msg_compute_data_pointers(msg);
2597 return 0;
2598}
2599
2600static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2601 .func = bpf_msg_pop_data,
2602 .gpl_only = false,
2603 .ret_type = RET_INTEGER,
2604 .arg1_type = ARG_PTR_TO_CTX,
2605 .arg2_type = ARG_ANYTHING,
2606 .arg3_type = ARG_ANYTHING,
2607 .arg4_type = ARG_ANYTHING,
2608};
2609
f3694e00 2610BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
8d20aabe 2611{
f3694e00 2612 return task_get_classid(skb);
8d20aabe
DB
2613}
2614
2615static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2616 .func = bpf_get_cgroup_classid,
2617 .gpl_only = false,
2618 .ret_type = RET_INTEGER,
2619 .arg1_type = ARG_PTR_TO_CTX,
2620};
2621
f3694e00 2622BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
c46646d0 2623{
f3694e00 2624 return dst_tclassid(skb);
c46646d0
DB
2625}
2626
2627static const struct bpf_func_proto bpf_get_route_realm_proto = {
2628 .func = bpf_get_route_realm,
2629 .gpl_only = false,
2630 .ret_type = RET_INTEGER,
2631 .arg1_type = ARG_PTR_TO_CTX,
2632};
2633
f3694e00 2634BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
13c5c240
DB
2635{
2636 /* If skb_clear_hash() was called due to mangling, we can
2637 * trigger SW recalculation here. Later access to hash
2638 * can then use the inline skb->hash via context directly
2639 * instead of calling this helper again.
2640 */
f3694e00 2641 return skb_get_hash(skb);
13c5c240
DB
2642}
2643
2644static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2645 .func = bpf_get_hash_recalc,
2646 .gpl_only = false,
2647 .ret_type = RET_INTEGER,
2648 .arg1_type = ARG_PTR_TO_CTX,
2649};
2650
7a4b28c6
DB
2651BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2652{
2653 /* After all direct packet write, this can be used once for
2654 * triggering a lazy recalc on next skb_get_hash() invocation.
2655 */
2656 skb_clear_hash(skb);
2657 return 0;
2658}
2659
2660static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2661 .func = bpf_set_hash_invalid,
2662 .gpl_only = false,
2663 .ret_type = RET_INTEGER,
2664 .arg1_type = ARG_PTR_TO_CTX,
2665};
2666
ded092cd
DB
2667BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2668{
2669 /* Set user specified hash as L4(+), so that it gets returned
2670 * on skb_get_hash() call unless BPF prog later on triggers a
2671 * skb_clear_hash().
2672 */
2673 __skb_set_sw_hash(skb, hash, true);
2674 return 0;
2675}
2676
2677static const struct bpf_func_proto bpf_set_hash_proto = {
2678 .func = bpf_set_hash,
2679 .gpl_only = false,
2680 .ret_type = RET_INTEGER,
2681 .arg1_type = ARG_PTR_TO_CTX,
2682 .arg2_type = ARG_ANYTHING,
2683};
2684
f3694e00
DB
2685BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2686 u16, vlan_tci)
4e10df9a 2687{
db58ba45 2688 int ret;
4e10df9a
AS
2689
2690 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2691 vlan_proto != htons(ETH_P_8021AD)))
2692 vlan_proto = htons(ETH_P_8021Q);
2693
8065694e 2694 bpf_push_mac_rcsum(skb);
db58ba45 2695 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
8065694e
DB
2696 bpf_pull_mac_rcsum(skb);
2697
6aaae2b6 2698 bpf_compute_data_pointers(skb);
db58ba45 2699 return ret;
4e10df9a
AS
2700}
2701
93731ef0 2702static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
4e10df9a
AS
2703 .func = bpf_skb_vlan_push,
2704 .gpl_only = false,
2705 .ret_type = RET_INTEGER,
2706 .arg1_type = ARG_PTR_TO_CTX,
2707 .arg2_type = ARG_ANYTHING,
2708 .arg3_type = ARG_ANYTHING,
2709};
2710
f3694e00 2711BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
4e10df9a 2712{
db58ba45 2713 int ret;
4e10df9a 2714
8065694e 2715 bpf_push_mac_rcsum(skb);
db58ba45 2716 ret = skb_vlan_pop(skb);
8065694e
DB
2717 bpf_pull_mac_rcsum(skb);
2718
6aaae2b6 2719 bpf_compute_data_pointers(skb);
db58ba45 2720 return ret;
4e10df9a
AS
2721}
2722
93731ef0 2723static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
4e10df9a
AS
2724 .func = bpf_skb_vlan_pop,
2725 .gpl_only = false,
2726 .ret_type = RET_INTEGER,
2727 .arg1_type = ARG_PTR_TO_CTX,
2728};
2729
6578171a
DB
2730static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2731{
2732 /* Caller already did skb_cow() with len as headroom,
2733 * so no need to do it here.
2734 */
2735 skb_push(skb, len);
2736 memmove(skb->data, skb->data + len, off);
2737 memset(skb->data + off, 0, len);
2738
2739 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2740 * needed here as it does not change the skb->csum
2741 * result for checksum complete when summing over
2742 * zeroed blocks.
2743 */
2744 return 0;
2745}
2746
2747static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2748{
2749 /* skb_ensure_writable() is not needed here, as we're
2750 * already working on an uncloned skb.
2751 */
2752 if (unlikely(!pskb_may_pull(skb, off + len)))
2753 return -ENOMEM;
2754
2755 skb_postpull_rcsum(skb, skb->data + off, len);
2756 memmove(skb->data + len, skb->data, off);
2757 __skb_pull(skb, len);
2758
2759 return 0;
2760}
2761
2762static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2763{
2764 bool trans_same = skb->transport_header == skb->network_header;
2765 int ret;
2766
2767 /* There's no need for __skb_push()/__skb_pull() pair to
2768 * get to the start of the mac header as we're guaranteed
2769 * to always start from here under eBPF.
2770 */
2771 ret = bpf_skb_generic_push(skb, off, len);
2772 if (likely(!ret)) {
2773 skb->mac_header -= len;
2774 skb->network_header -= len;
2775 if (trans_same)
2776 skb->transport_header = skb->network_header;
2777 }
2778
2779 return ret;
2780}
2781
2782static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2783{
2784 bool trans_same = skb->transport_header == skb->network_header;
2785 int ret;
2786
2787 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2788 ret = bpf_skb_generic_pop(skb, off, len);
2789 if (likely(!ret)) {
2790 skb->mac_header += len;
2791 skb->network_header += len;
2792 if (trans_same)
2793 skb->transport_header = skb->network_header;
2794 }
2795
2796 return ret;
2797}
2798
2799static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2800{
2801 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2802 u32 off = skb_mac_header_len(skb);
6578171a
DB
2803 int ret;
2804
4c3024de 2805 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
d02f51cb
DA
2806 return -ENOTSUPP;
2807
6578171a
DB
2808 ret = skb_cow(skb, len_diff);
2809 if (unlikely(ret < 0))
2810 return ret;
2811
2812 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2813 if (unlikely(ret < 0))
2814 return ret;
2815
2816 if (skb_is_gso(skb)) {
d02f51cb
DA
2817 struct skb_shared_info *shinfo = skb_shinfo(skb);
2818
880388aa
DM
2819 /* SKB_GSO_TCPV4 needs to be changed into
2820 * SKB_GSO_TCPV6.
6578171a 2821 */
d02f51cb
DA
2822 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2823 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2824 shinfo->gso_type |= SKB_GSO_TCPV6;
6578171a
DB
2825 }
2826
2827 /* Due to IPv6 header, MSS needs to be downgraded. */
d02f51cb 2828 skb_decrease_gso_size(shinfo, len_diff);
6578171a 2829 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2830 shinfo->gso_type |= SKB_GSO_DODGY;
2831 shinfo->gso_segs = 0;
6578171a
DB
2832 }
2833
2834 skb->protocol = htons(ETH_P_IPV6);
2835 skb_clear_hash(skb);
2836
2837 return 0;
2838}
2839
2840static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2841{
2842 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2843 u32 off = skb_mac_header_len(skb);
6578171a
DB
2844 int ret;
2845
4c3024de 2846 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
d02f51cb
DA
2847 return -ENOTSUPP;
2848
6578171a
DB
2849 ret = skb_unclone(skb, GFP_ATOMIC);
2850 if (unlikely(ret < 0))
2851 return ret;
2852
2853 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2854 if (unlikely(ret < 0))
2855 return ret;
2856
2857 if (skb_is_gso(skb)) {
d02f51cb
DA
2858 struct skb_shared_info *shinfo = skb_shinfo(skb);
2859
880388aa
DM
2860 /* SKB_GSO_TCPV6 needs to be changed into
2861 * SKB_GSO_TCPV4.
6578171a 2862 */
d02f51cb
DA
2863 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2864 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2865 shinfo->gso_type |= SKB_GSO_TCPV4;
6578171a
DB
2866 }
2867
2868 /* Due to IPv4 header, MSS can be upgraded. */
d02f51cb 2869 skb_increase_gso_size(shinfo, len_diff);
6578171a 2870 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2871 shinfo->gso_type |= SKB_GSO_DODGY;
2872 shinfo->gso_segs = 0;
6578171a
DB
2873 }
2874
2875 skb->protocol = htons(ETH_P_IP);
2876 skb_clear_hash(skb);
2877
2878 return 0;
2879}
2880
2881static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2882{
2883 __be16 from_proto = skb->protocol;
2884
2885 if (from_proto == htons(ETH_P_IP) &&
2886 to_proto == htons(ETH_P_IPV6))
2887 return bpf_skb_proto_4_to_6(skb);
2888
2889 if (from_proto == htons(ETH_P_IPV6) &&
2890 to_proto == htons(ETH_P_IP))
2891 return bpf_skb_proto_6_to_4(skb);
2892
2893 return -ENOTSUPP;
2894}
2895
f3694e00
DB
2896BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2897 u64, flags)
6578171a 2898{
6578171a
DB
2899 int ret;
2900
2901 if (unlikely(flags))
2902 return -EINVAL;
2903
2904 /* General idea is that this helper does the basic groundwork
2905 * needed for changing the protocol, and eBPF program fills the
2906 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2907 * and other helpers, rather than passing a raw buffer here.
2908 *
2909 * The rationale is to keep this minimal and without a need to
2910 * deal with raw packet data. F.e. even if we would pass buffers
2911 * here, the program still needs to call the bpf_lX_csum_replace()
2912 * helpers anyway. Plus, this way we keep also separation of
2913 * concerns, since f.e. bpf_skb_store_bytes() should only take
2914 * care of stores.
2915 *
2916 * Currently, additional options and extension header space are
2917 * not supported, but flags register is reserved so we can adapt
2918 * that. For offloads, we mark packet as dodgy, so that headers
2919 * need to be verified first.
2920 */
2921 ret = bpf_skb_proto_xlat(skb, proto);
6aaae2b6 2922 bpf_compute_data_pointers(skb);
6578171a
DB
2923 return ret;
2924}
2925
2926static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2927 .func = bpf_skb_change_proto,
2928 .gpl_only = false,
2929 .ret_type = RET_INTEGER,
2930 .arg1_type = ARG_PTR_TO_CTX,
2931 .arg2_type = ARG_ANYTHING,
2932 .arg3_type = ARG_ANYTHING,
2933};
2934
f3694e00 2935BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
d2485c42 2936{
d2485c42 2937 /* We only allow a restricted subset to be changed for now. */
45c7fffa
DB
2938 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2939 !skb_pkt_type_ok(pkt_type)))
d2485c42
DB
2940 return -EINVAL;
2941
2942 skb->pkt_type = pkt_type;
2943 return 0;
2944}
2945
2946static const struct bpf_func_proto bpf_skb_change_type_proto = {
2947 .func = bpf_skb_change_type,
2948 .gpl_only = false,
2949 .ret_type = RET_INTEGER,
2950 .arg1_type = ARG_PTR_TO_CTX,
2951 .arg2_type = ARG_ANYTHING,
2952};
2953
2be7e212
DB
2954static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2955{
2956 switch (skb->protocol) {
2957 case htons(ETH_P_IP):
2958 return sizeof(struct iphdr);
2959 case htons(ETH_P_IPV6):
2960 return sizeof(struct ipv6hdr);
2961 default:
2962 return ~0U;
2963 }
2964}
2965
2966static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2967{
2968 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2969 int ret;
2970
4c3024de 2971 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
d02f51cb
DA
2972 return -ENOTSUPP;
2973
2be7e212
DB
2974 ret = skb_cow(skb, len_diff);
2975 if (unlikely(ret < 0))
2976 return ret;
2977
2978 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2979 if (unlikely(ret < 0))
2980 return ret;
2981
2982 if (skb_is_gso(skb)) {
d02f51cb
DA
2983 struct skb_shared_info *shinfo = skb_shinfo(skb);
2984
2be7e212 2985 /* Due to header grow, MSS needs to be downgraded. */
d02f51cb 2986 skb_decrease_gso_size(shinfo, len_diff);
2be7e212 2987 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2988 shinfo->gso_type |= SKB_GSO_DODGY;
2989 shinfo->gso_segs = 0;
2be7e212
DB
2990 }
2991
2992 return 0;
2993}
2994
2995static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2996{
2997 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2998 int ret;
2999
4c3024de 3000 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
d02f51cb
DA
3001 return -ENOTSUPP;
3002
2be7e212
DB
3003 ret = skb_unclone(skb, GFP_ATOMIC);
3004 if (unlikely(ret < 0))
3005 return ret;
3006
3007 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3008 if (unlikely(ret < 0))
3009 return ret;
3010
3011 if (skb_is_gso(skb)) {
d02f51cb
DA
3012 struct skb_shared_info *shinfo = skb_shinfo(skb);
3013
2be7e212 3014 /* Due to header shrink, MSS can be upgraded. */
d02f51cb 3015 skb_increase_gso_size(shinfo, len_diff);
2be7e212 3016 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
3017 shinfo->gso_type |= SKB_GSO_DODGY;
3018 shinfo->gso_segs = 0;
2be7e212
DB
3019 }
3020
3021 return 0;
3022}
3023
3024static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3025{
0c6bc6e5
JF
3026 return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3027 SKB_MAX_ALLOC;
2be7e212
DB
3028}
3029
3030static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
3031{
3032 bool trans_same = skb->transport_header == skb->network_header;
3033 u32 len_cur, len_diff_abs = abs(len_diff);
3034 u32 len_min = bpf_skb_net_base_len(skb);
3035 u32 len_max = __bpf_skb_max_len(skb);
3036 __be16 proto = skb->protocol;
3037 bool shrink = len_diff < 0;
3038 int ret;
3039
3040 if (unlikely(len_diff_abs > 0xfffU))
3041 return -EFAULT;
3042 if (unlikely(proto != htons(ETH_P_IP) &&
3043 proto != htons(ETH_P_IPV6)))
3044 return -ENOTSUPP;
3045
3046 len_cur = skb->len - skb_network_offset(skb);
3047 if (skb_transport_header_was_set(skb) && !trans_same)
3048 len_cur = skb_network_header_len(skb);
3049 if ((shrink && (len_diff_abs >= len_cur ||
3050 len_cur - len_diff_abs < len_min)) ||
3051 (!shrink && (skb->len + len_diff_abs > len_max &&
3052 !skb_is_gso(skb))))
3053 return -ENOTSUPP;
3054
3055 ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
3056 bpf_skb_net_grow(skb, len_diff_abs);
3057
6aaae2b6 3058 bpf_compute_data_pointers(skb);
e4a6a342 3059 return ret;
2be7e212
DB
3060}
3061
3062BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3063 u32, mode, u64, flags)
3064{
3065 if (unlikely(flags))
3066 return -EINVAL;
3067 if (likely(mode == BPF_ADJ_ROOM_NET))
3068 return bpf_skb_adjust_net(skb, len_diff);
3069
3070 return -ENOTSUPP;
3071}
3072
3073static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3074 .func = bpf_skb_adjust_room,
3075 .gpl_only = false,
3076 .ret_type = RET_INTEGER,
3077 .arg1_type = ARG_PTR_TO_CTX,
3078 .arg2_type = ARG_ANYTHING,
3079 .arg3_type = ARG_ANYTHING,
3080 .arg4_type = ARG_ANYTHING,
3081};
3082
5293efe6
DB
3083static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3084{
3085 u32 min_len = skb_network_offset(skb);
3086
3087 if (skb_transport_header_was_set(skb))
3088 min_len = skb_transport_offset(skb);
3089 if (skb->ip_summed == CHECKSUM_PARTIAL)
3090 min_len = skb_checksum_start_offset(skb) +
3091 skb->csum_offset + sizeof(__sum16);
3092 return min_len;
3093}
3094
5293efe6
DB
3095static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3096{
3097 unsigned int old_len = skb->len;
3098 int ret;
3099
3100 ret = __skb_grow_rcsum(skb, new_len);
3101 if (!ret)
3102 memset(skb->data + old_len, 0, new_len - old_len);
3103 return ret;
3104}
3105
3106static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3107{
3108 return __skb_trim_rcsum(skb, new_len);
3109}
3110
0ea488ff
JF
3111static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3112 u64 flags)
5293efe6 3113{
5293efe6
DB
3114 u32 max_len = __bpf_skb_max_len(skb);
3115 u32 min_len = __bpf_skb_min_len(skb);
5293efe6
DB
3116 int ret;
3117
3118 if (unlikely(flags || new_len > max_len || new_len < min_len))
3119 return -EINVAL;
3120 if (skb->encapsulation)
3121 return -ENOTSUPP;
3122
3123 /* The basic idea of this helper is that it's performing the
3124 * needed work to either grow or trim an skb, and eBPF program
3125 * rewrites the rest via helpers like bpf_skb_store_bytes(),
3126 * bpf_lX_csum_replace() and others rather than passing a raw
3127 * buffer here. This one is a slow path helper and intended
3128 * for replies with control messages.
3129 *
3130 * Like in bpf_skb_change_proto(), we want to keep this rather
3131 * minimal and without protocol specifics so that we are able
3132 * to separate concerns as in bpf_skb_store_bytes() should only
3133 * be the one responsible for writing buffers.
3134 *
3135 * It's really expected to be a slow path operation here for
3136 * control message replies, so we're implicitly linearizing,
3137 * uncloning and drop offloads from the skb by this.
3138 */
3139 ret = __bpf_try_make_writable(skb, skb->len);
3140 if (!ret) {
3141 if (new_len > skb->len)
3142 ret = bpf_skb_grow_rcsum(skb, new_len);
3143 else if (new_len < skb->len)
3144 ret = bpf_skb_trim_rcsum(skb, new_len);
3145 if (!ret && skb_is_gso(skb))
3146 skb_gso_reset(skb);
3147 }
0ea488ff
JF
3148 return ret;
3149}
3150
3151BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3152 u64, flags)
3153{
3154 int ret = __bpf_skb_change_tail(skb, new_len, flags);
5293efe6 3155
6aaae2b6 3156 bpf_compute_data_pointers(skb);
5293efe6
DB
3157 return ret;
3158}
3159
3160static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3161 .func = bpf_skb_change_tail,
3162 .gpl_only = false,
3163 .ret_type = RET_INTEGER,
3164 .arg1_type = ARG_PTR_TO_CTX,
3165 .arg2_type = ARG_ANYTHING,
3166 .arg3_type = ARG_ANYTHING,
3167};
3168
0ea488ff 3169BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3a0af8fd 3170 u64, flags)
0ea488ff
JF
3171{
3172 int ret = __bpf_skb_change_tail(skb, new_len, flags);
3173
3174 bpf_compute_data_end_sk_skb(skb);
3175 return ret;
3176}
3177
3178static const struct bpf_func_proto sk_skb_change_tail_proto = {
3179 .func = sk_skb_change_tail,
3180 .gpl_only = false,
3181 .ret_type = RET_INTEGER,
3182 .arg1_type = ARG_PTR_TO_CTX,
3183 .arg2_type = ARG_ANYTHING,
3184 .arg3_type = ARG_ANYTHING,
3185};
3186
3187static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3188 u64 flags)
3a0af8fd
TG
3189{
3190 u32 max_len = __bpf_skb_max_len(skb);
3191 u32 new_len = skb->len + head_room;
3192 int ret;
3193
3194 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3195 new_len < skb->len))
3196 return -EINVAL;
3197
3198 ret = skb_cow(skb, head_room);
3199 if (likely(!ret)) {
3200 /* Idea for this helper is that we currently only
3201 * allow to expand on mac header. This means that
3202 * skb->protocol network header, etc, stay as is.
3203 * Compared to bpf_skb_change_tail(), we're more
3204 * flexible due to not needing to linearize or
3205 * reset GSO. Intention for this helper is to be
3206 * used by an L3 skb that needs to push mac header
3207 * for redirection into L2 device.
3208 */
3209 __skb_push(skb, head_room);
3210 memset(skb->data, 0, head_room);
3211 skb_reset_mac_header(skb);
3212 }
3213
0ea488ff
JF
3214 return ret;
3215}
3216
3217BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3218 u64, flags)
3219{
3220 int ret = __bpf_skb_change_head(skb, head_room, flags);
3221
6aaae2b6 3222 bpf_compute_data_pointers(skb);
0ea488ff 3223 return ret;
3a0af8fd
TG
3224}
3225
3226static const struct bpf_func_proto bpf_skb_change_head_proto = {
3227 .func = bpf_skb_change_head,
3228 .gpl_only = false,
3229 .ret_type = RET_INTEGER,
3230 .arg1_type = ARG_PTR_TO_CTX,
3231 .arg2_type = ARG_ANYTHING,
3232 .arg3_type = ARG_ANYTHING,
3233};
3234
0ea488ff
JF
3235BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3236 u64, flags)
3237{
3238 int ret = __bpf_skb_change_head(skb, head_room, flags);
3239
3240 bpf_compute_data_end_sk_skb(skb);
3241 return ret;
3242}
3243
3244static const struct bpf_func_proto sk_skb_change_head_proto = {
3245 .func = sk_skb_change_head,
3246 .gpl_only = false,
3247 .ret_type = RET_INTEGER,
3248 .arg1_type = ARG_PTR_TO_CTX,
3249 .arg2_type = ARG_ANYTHING,
3250 .arg3_type = ARG_ANYTHING,
3251};
de8f3a83
DB
3252static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3253{
3254 return xdp_data_meta_unsupported(xdp) ? 0 :
3255 xdp->data - xdp->data_meta;
3256}
3257
17bedab2
MKL
3258BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3259{
6dfb970d 3260 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83 3261 unsigned long metalen = xdp_get_metalen(xdp);
97e19cce 3262 void *data_start = xdp_frame_end + metalen;
17bedab2
MKL
3263 void *data = xdp->data + offset;
3264
de8f3a83 3265 if (unlikely(data < data_start ||
17bedab2
MKL
3266 data > xdp->data_end - ETH_HLEN))
3267 return -EINVAL;
3268
de8f3a83
DB
3269 if (metalen)
3270 memmove(xdp->data_meta + offset,
3271 xdp->data_meta, metalen);
3272 xdp->data_meta += offset;
17bedab2
MKL
3273 xdp->data = data;
3274
3275 return 0;
3276}
3277
3278static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3279 .func = bpf_xdp_adjust_head,
3280 .gpl_only = false,
3281 .ret_type = RET_INTEGER,
3282 .arg1_type = ARG_PTR_TO_CTX,
3283 .arg2_type = ARG_ANYTHING,
3284};
3285
b32cc5b9
NS
3286BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3287{
3288 void *data_end = xdp->data_end + offset;
3289
3290 /* only shrinking is allowed for now. */
3291 if (unlikely(offset >= 0))
3292 return -EINVAL;
3293
3294 if (unlikely(data_end < xdp->data + ETH_HLEN))
3295 return -EINVAL;
3296
3297 xdp->data_end = data_end;
3298
3299 return 0;
3300}
3301
3302static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3303 .func = bpf_xdp_adjust_tail,
3304 .gpl_only = false,
3305 .ret_type = RET_INTEGER,
3306 .arg1_type = ARG_PTR_TO_CTX,
3307 .arg2_type = ARG_ANYTHING,
3308};
3309
de8f3a83
DB
3310BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3311{
97e19cce 3312 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83
DB
3313 void *meta = xdp->data_meta + offset;
3314 unsigned long metalen = xdp->data - meta;
3315
3316 if (xdp_data_meta_unsupported(xdp))
3317 return -ENOTSUPP;
97e19cce 3318 if (unlikely(meta < xdp_frame_end ||
de8f3a83
DB
3319 meta > xdp->data))
3320 return -EINVAL;
3321 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3322 (metalen > 32)))
3323 return -EACCES;
3324
3325 xdp->data_meta = meta;
3326
3327 return 0;
3328}
3329
3330static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3331 .func = bpf_xdp_adjust_meta,
3332 .gpl_only = false,
3333 .ret_type = RET_INTEGER,
3334 .arg1_type = ARG_PTR_TO_CTX,
3335 .arg2_type = ARG_ANYTHING,
3336};
3337
11393cc9
JF
3338static int __bpf_tx_xdp(struct net_device *dev,
3339 struct bpf_map *map,
3340 struct xdp_buff *xdp,
3341 u32 index)
814abfab 3342{
44fa2dbd 3343 struct xdp_frame *xdpf;
d8d7218a 3344 int err, sent;
11393cc9
JF
3345
3346 if (!dev->netdev_ops->ndo_xdp_xmit) {
11393cc9 3347 return -EOPNOTSUPP;
814abfab 3348 }
11393cc9 3349
d8d7218a
TM
3350 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
3351 if (unlikely(err))
3352 return err;
3353
44fa2dbd
JDB
3354 xdpf = convert_to_xdp_frame(xdp);
3355 if (unlikely(!xdpf))
3356 return -EOVERFLOW;
3357
1e67575a 3358 sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
735fc405
JDB
3359 if (sent <= 0)
3360 return sent;
9c270af3
JDB
3361 return 0;
3362}
3363
47b123ed
JDB
3364static noinline int
3365xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
3366 struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
3367{
3368 struct net_device *fwd;
3369 u32 index = ri->ifindex;
3370 int err;
3371
3372 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3373 ri->ifindex = 0;
3374 if (unlikely(!fwd)) {
3375 err = -EINVAL;
3376 goto err;
3377 }
3378
3379 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
3380 if (unlikely(err))
3381 goto err;
3382
3383 _trace_xdp_redirect(dev, xdp_prog, index);
3384 return 0;
3385err:
3386 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3387 return err;
3388}
3389
9c270af3
JDB
3390static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3391 struct bpf_map *map,
3392 struct xdp_buff *xdp,
3393 u32 index)
3394{
3395 int err;
3396
1b1a251c
BT
3397 switch (map->map_type) {
3398 case BPF_MAP_TYPE_DEVMAP: {
67f29e07 3399 struct bpf_dtab_netdev *dst = fwd;
9c270af3 3400
38edddb8 3401 err = dev_map_enqueue(dst, xdp, dev_rx);
e1302542 3402 if (unlikely(err))
9c270af3 3403 return err;
11393cc9 3404 __dev_map_insert_ctx(map, index);
1b1a251c
BT
3405 break;
3406 }
3407 case BPF_MAP_TYPE_CPUMAP: {
9c270af3
JDB
3408 struct bpf_cpu_map_entry *rcpu = fwd;
3409
3410 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
e1302542 3411 if (unlikely(err))
9c270af3
JDB
3412 return err;
3413 __cpu_map_insert_ctx(map, index);
1b1a251c
BT
3414 break;
3415 }
3416 case BPF_MAP_TYPE_XSKMAP: {
3417 struct xdp_sock *xs = fwd;
3418
3419 err = __xsk_map_redirect(map, xdp, xs);
3420 return err;
3421 }
3422 default:
3423 break;
9c270af3 3424 }
e4a8e817 3425 return 0;
814abfab
JF
3426}
3427
11393cc9
JF
3428void xdp_do_flush_map(void)
3429{
0b19cc0a 3430 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
11393cc9
JF
3431 struct bpf_map *map = ri->map_to_flush;
3432
11393cc9 3433 ri->map_to_flush = NULL;
9c270af3
JDB
3434 if (map) {
3435 switch (map->map_type) {
3436 case BPF_MAP_TYPE_DEVMAP:
3437 __dev_map_flush(map);
3438 break;
3439 case BPF_MAP_TYPE_CPUMAP:
3440 __cpu_map_flush(map);
3441 break;
1b1a251c
BT
3442 case BPF_MAP_TYPE_XSKMAP:
3443 __xsk_map_flush(map);
3444 break;
9c270af3
JDB
3445 default:
3446 break;
3447 }
3448 }
11393cc9
JF
3449}
3450EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3451
2a68d85f 3452static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
9c270af3
JDB
3453{
3454 switch (map->map_type) {
3455 case BPF_MAP_TYPE_DEVMAP:
3456 return __dev_map_lookup_elem(map, index);
3457 case BPF_MAP_TYPE_CPUMAP:
3458 return __cpu_map_lookup_elem(map, index);
1b1a251c
BT
3459 case BPF_MAP_TYPE_XSKMAP:
3460 return __xsk_map_lookup_elem(map, index);
9c270af3
JDB
3461 default:
3462 return NULL;
3463 }
3464}
3465
f6069b9a 3466void bpf_clear_redirect_map(struct bpf_map *map)
7c300131 3467{
f6069b9a
DB
3468 struct bpf_redirect_info *ri;
3469 int cpu;
3470
3471 for_each_possible_cpu(cpu) {
3472 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3473 /* Avoid polluting remote cacheline due to writes if
3474 * not needed. Once we pass this test, we need the
3475 * cmpxchg() to make sure it hasn't been changed in
3476 * the meantime by remote CPU.
3477 */
3478 if (unlikely(READ_ONCE(ri->map) == map))
3479 cmpxchg(&ri->map, map, NULL);
3480 }
7c300131
DB
3481}
3482
e4a8e817 3483static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
47b123ed
JDB
3484 struct bpf_prog *xdp_prog, struct bpf_map *map,
3485 struct bpf_redirect_info *ri)
97f91a7c 3486{
11393cc9 3487 u32 index = ri->ifindex;
9c270af3 3488 void *fwd = NULL;
4c03bdd7 3489 int err;
97f91a7c
JF
3490
3491 ri->ifindex = 0;
f6069b9a 3492 WRITE_ONCE(ri->map, NULL);
97f91a7c 3493
9c270af3 3494 fwd = __xdp_map_lookup_elem(map, index);
2a68d85f 3495 if (unlikely(!fwd)) {
4c03bdd7 3496 err = -EINVAL;
f5836ca5 3497 goto err;
4c03bdd7 3498 }
e1302542 3499 if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
11393cc9
JF
3500 xdp_do_flush_map();
3501
9c270af3 3502 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
f5836ca5
JDB
3503 if (unlikely(err))
3504 goto err;
3505
3506 ri->map_to_flush = map;
59a30896 3507 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
f5836ca5
JDB
3508 return 0;
3509err:
59a30896 3510 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
97f91a7c
JF
3511 return err;
3512}
3513
5acaee0a
JF
3514int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3515 struct bpf_prog *xdp_prog)
814abfab 3516{
0b19cc0a 3517 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
f6069b9a 3518 struct bpf_map *map = READ_ONCE(ri->map);
814abfab 3519
2a68d85f 3520 if (likely(map))
47b123ed 3521 return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
97f91a7c 3522
47b123ed 3523 return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
814abfab
JF
3524}
3525EXPORT_SYMBOL_GPL(xdp_do_redirect);
3526
c060bc61
XS
3527static int xdp_do_generic_redirect_map(struct net_device *dev,
3528 struct sk_buff *skb,
02671e23 3529 struct xdp_buff *xdp,
f6069b9a
DB
3530 struct bpf_prog *xdp_prog,
3531 struct bpf_map *map)
6103aa96 3532{
0b19cc0a 3533 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
eb48d682 3534 u32 index = ri->ifindex;
02671e23 3535 void *fwd = NULL;
2facaad6 3536 int err = 0;
6103aa96 3537
6103aa96 3538 ri->ifindex = 0;
f6069b9a 3539 WRITE_ONCE(ri->map, NULL);
96c5508e 3540
9c270af3 3541 fwd = __xdp_map_lookup_elem(map, index);
2facaad6
JDB
3542 if (unlikely(!fwd)) {
3543 err = -EINVAL;
f5836ca5 3544 goto err;
6103aa96
JF
3545 }
3546
9c270af3 3547 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
6d5fc195
TM
3548 struct bpf_dtab_netdev *dst = fwd;
3549
3550 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3551 if (unlikely(err))
9c270af3 3552 goto err;
02671e23
BT
3553 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3554 struct xdp_sock *xs = fwd;
3555
3556 err = xsk_generic_rcv(xs, xdp);
3557 if (err)
3558 goto err;
3559 consume_skb(skb);
9c270af3
JDB
3560 } else {
3561 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3562 err = -EBADRQC;
f5836ca5 3563 goto err;
2facaad6 3564 }
6103aa96 3565
9c270af3
JDB
3566 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3567 return 0;
3568err:
3569 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3570 return err;
3571}
3572
3573int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
02671e23 3574 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
9c270af3 3575{
0b19cc0a 3576 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
f6069b9a 3577 struct bpf_map *map = READ_ONCE(ri->map);
9c270af3
JDB
3578 u32 index = ri->ifindex;
3579 struct net_device *fwd;
3580 int err = 0;
3581
f6069b9a
DB
3582 if (map)
3583 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
3584 map);
9c270af3
JDB
3585 ri->ifindex = 0;
3586 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3587 if (unlikely(!fwd)) {
3588 err = -EINVAL;
f5836ca5 3589 goto err;
2facaad6
JDB
3590 }
3591
d8d7218a
TM
3592 err = xdp_ok_fwd_dev(fwd, skb->len);
3593 if (unlikely(err))
9c270af3
JDB
3594 goto err;
3595
2facaad6 3596 skb->dev = fwd;
9c270af3 3597 _trace_xdp_redirect(dev, xdp_prog, index);
02671e23 3598 generic_xdp_tx(skb, xdp_prog);
f5836ca5
JDB
3599 return 0;
3600err:
9c270af3 3601 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2facaad6 3602 return err;
6103aa96
JF
3603}
3604EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3605
814abfab
JF
3606BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3607{
0b19cc0a 3608 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
814abfab
JF
3609
3610 if (unlikely(flags))
3611 return XDP_ABORTED;
3612
3613 ri->ifindex = ifindex;
3614 ri->flags = flags;
f6069b9a 3615 WRITE_ONCE(ri->map, NULL);
e4a8e817 3616
814abfab
JF
3617 return XDP_REDIRECT;
3618}
3619
3620static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3621 .func = bpf_xdp_redirect,
3622 .gpl_only = false,
3623 .ret_type = RET_INTEGER,
3624 .arg1_type = ARG_ANYTHING,
3625 .arg2_type = ARG_ANYTHING,
3626};
3627
f6069b9a
DB
3628BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
3629 u64, flags)
e4a8e817 3630{
0b19cc0a 3631 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
e4a8e817
DB
3632
3633 if (unlikely(flags))
3634 return XDP_ABORTED;
3635
3636 ri->ifindex = ifindex;
3637 ri->flags = flags;
f6069b9a 3638 WRITE_ONCE(ri->map, map);
e4a8e817
DB
3639
3640 return XDP_REDIRECT;
3641}
3642
3643static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3644 .func = bpf_xdp_redirect_map,
3645 .gpl_only = false,
3646 .ret_type = RET_INTEGER,
3647 .arg1_type = ARG_CONST_MAP_PTR,
3648 .arg2_type = ARG_ANYTHING,
3649 .arg3_type = ARG_ANYTHING,
3650};
3651
555c8a86 3652static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
aa7145c1 3653 unsigned long off, unsigned long len)
555c8a86 3654{
aa7145c1 3655 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
555c8a86
DB
3656
3657 if (unlikely(!ptr))
3658 return len;
3659 if (ptr != dst_buff)
3660 memcpy(dst_buff, ptr, len);
3661
3662 return 0;
3663}
3664
f3694e00
DB
3665BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3666 u64, flags, void *, meta, u64, meta_size)
555c8a86 3667{
555c8a86 3668 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
555c8a86
DB
3669
3670 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3671 return -EINVAL;
3672 if (unlikely(skb_size > skb->len))
3673 return -EFAULT;
3674
3675 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3676 bpf_skb_copy);
3677}
3678
3679static const struct bpf_func_proto bpf_skb_event_output_proto = {
3680 .func = bpf_skb_event_output,
3681 .gpl_only = true,
3682 .ret_type = RET_INTEGER,
3683 .arg1_type = ARG_PTR_TO_CTX,
3684 .arg2_type = ARG_CONST_MAP_PTR,
3685 .arg3_type = ARG_ANYTHING,
39f19ebb 3686 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3687 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
555c8a86
DB
3688};
3689
c6c33454
DB
3690static unsigned short bpf_tunnel_key_af(u64 flags)
3691{
3692 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3693}
3694
f3694e00
DB
3695BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3696 u32, size, u64, flags)
d3aa45ce 3697{
c6c33454
DB
3698 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3699 u8 compat[sizeof(struct bpf_tunnel_key)];
074f528e
DB
3700 void *to_orig = to;
3701 int err;
d3aa45ce 3702
074f528e
DB
3703 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3704 err = -EINVAL;
3705 goto err_clear;
3706 }
3707 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3708 err = -EPROTO;
3709 goto err_clear;
3710 }
c6c33454 3711 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
074f528e 3712 err = -EINVAL;
c6c33454 3713 switch (size) {
4018ab18 3714 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3715 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4018ab18 3716 goto set_compat;
c6c33454
DB
3717 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3718 /* Fixup deprecated structure layouts here, so we have
3719 * a common path later on.
3720 */
3721 if (ip_tunnel_info_af(info) != AF_INET)
074f528e 3722 goto err_clear;
4018ab18 3723set_compat:
c6c33454
DB
3724 to = (struct bpf_tunnel_key *)compat;
3725 break;
3726 default:
074f528e 3727 goto err_clear;
c6c33454
DB
3728 }
3729 }
d3aa45ce
AS
3730
3731 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
3732 to->tunnel_tos = info->key.tos;
3733 to->tunnel_ttl = info->key.ttl;
1fbc2e0c 3734 to->tunnel_ext = 0;
c6c33454 3735
4018ab18 3736 if (flags & BPF_F_TUNINFO_IPV6) {
c6c33454
DB
3737 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3738 sizeof(to->remote_ipv6));
4018ab18
DB
3739 to->tunnel_label = be32_to_cpu(info->key.label);
3740 } else {
c6c33454 3741 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
1fbc2e0c
DB
3742 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3743 to->tunnel_label = 0;
4018ab18 3744 }
c6c33454
DB
3745
3746 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
074f528e 3747 memcpy(to_orig, to, size);
d3aa45ce
AS
3748
3749 return 0;
074f528e
DB
3750err_clear:
3751 memset(to_orig, 0, size);
3752 return err;
d3aa45ce
AS
3753}
3754
577c50aa 3755static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
d3aa45ce
AS
3756 .func = bpf_skb_get_tunnel_key,
3757 .gpl_only = false,
3758 .ret_type = RET_INTEGER,
3759 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3760 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3761 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3762 .arg4_type = ARG_ANYTHING,
3763};
3764
f3694e00 3765BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
14ca0751 3766{
14ca0751 3767 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
074f528e 3768 int err;
14ca0751
DB
3769
3770 if (unlikely(!info ||
074f528e
DB
3771 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3772 err = -ENOENT;
3773 goto err_clear;
3774 }
3775 if (unlikely(size < info->options_len)) {
3776 err = -ENOMEM;
3777 goto err_clear;
3778 }
14ca0751
DB
3779
3780 ip_tunnel_info_opts_get(to, info);
074f528e
DB
3781 if (size > info->options_len)
3782 memset(to + info->options_len, 0, size - info->options_len);
14ca0751
DB
3783
3784 return info->options_len;
074f528e
DB
3785err_clear:
3786 memset(to, 0, size);
3787 return err;
14ca0751
DB
3788}
3789
3790static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3791 .func = bpf_skb_get_tunnel_opt,
3792 .gpl_only = false,
3793 .ret_type = RET_INTEGER,
3794 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3795 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3796 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3797};
3798
d3aa45ce
AS
3799static struct metadata_dst __percpu *md_dst;
3800
f3694e00
DB
3801BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3802 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
d3aa45ce 3803{
d3aa45ce 3804 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 3805 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
3806 struct ip_tunnel_info *info;
3807
22080870 3808 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
77a5196a 3809 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
d3aa45ce 3810 return -EINVAL;
c6c33454
DB
3811 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3812 switch (size) {
4018ab18 3813 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3814 case offsetof(struct bpf_tunnel_key, tunnel_ext):
c6c33454
DB
3815 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3816 /* Fixup deprecated structure layouts here, so we have
3817 * a common path later on.
3818 */
3819 memcpy(compat, from, size);
3820 memset(compat + size, 0, sizeof(compat) - size);
f3694e00 3821 from = (const struct bpf_tunnel_key *) compat;
c6c33454
DB
3822 break;
3823 default:
3824 return -EINVAL;
3825 }
3826 }
c0e760c9
DB
3827 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3828 from->tunnel_ext))
4018ab18 3829 return -EINVAL;
d3aa45ce
AS
3830
3831 skb_dst_drop(skb);
3832 dst_hold((struct dst_entry *) md);
3833 skb_dst_set(skb, (struct dst_entry *) md);
3834
3835 info = &md->u.tun_info;
5540fbf4 3836 memset(info, 0, sizeof(*info));
d3aa45ce 3837 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 3838
db3c6139 3839 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
22080870
DB
3840 if (flags & BPF_F_DONT_FRAGMENT)
3841 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
792f3dd6
WT
3842 if (flags & BPF_F_ZERO_CSUM_TX)
3843 info->key.tun_flags &= ~TUNNEL_CSUM;
77a5196a
WT
3844 if (flags & BPF_F_SEQ_NUMBER)
3845 info->key.tun_flags |= TUNNEL_SEQ;
22080870 3846
d3aa45ce 3847 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
3848 info->key.tos = from->tunnel_tos;
3849 info->key.ttl = from->tunnel_ttl;
3850
3851 if (flags & BPF_F_TUNINFO_IPV6) {
3852 info->mode |= IP_TUNNEL_INFO_IPV6;
3853 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3854 sizeof(from->remote_ipv6));
4018ab18
DB
3855 info->key.label = cpu_to_be32(from->tunnel_label) &
3856 IPV6_FLOWLABEL_MASK;
c6c33454
DB
3857 } else {
3858 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3859 }
d3aa45ce
AS
3860
3861 return 0;
3862}
3863
577c50aa 3864static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
d3aa45ce
AS
3865 .func = bpf_skb_set_tunnel_key,
3866 .gpl_only = false,
3867 .ret_type = RET_INTEGER,
3868 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3869 .arg2_type = ARG_PTR_TO_MEM,
3870 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3871 .arg4_type = ARG_ANYTHING,
3872};
3873
f3694e00
DB
3874BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
3875 const u8 *, from, u32, size)
14ca0751 3876{
14ca0751
DB
3877 struct ip_tunnel_info *info = skb_tunnel_info(skb);
3878 const struct metadata_dst *md = this_cpu_ptr(md_dst);
3879
3880 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
3881 return -EINVAL;
fca5fdf6 3882 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
14ca0751
DB
3883 return -ENOMEM;
3884
256c87c1 3885 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
14ca0751
DB
3886
3887 return 0;
3888}
3889
3890static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
3891 .func = bpf_skb_set_tunnel_opt,
3892 .gpl_only = false,
3893 .ret_type = RET_INTEGER,
3894 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3895 .arg2_type = ARG_PTR_TO_MEM,
3896 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3897};
3898
3899static const struct bpf_func_proto *
3900bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
d3aa45ce
AS
3901{
3902 if (!md_dst) {
d66f2b91
JK
3903 struct metadata_dst __percpu *tmp;
3904
3905 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
3906 METADATA_IP_TUNNEL,
3907 GFP_KERNEL);
3908 if (!tmp)
d3aa45ce 3909 return NULL;
d66f2b91
JK
3910 if (cmpxchg(&md_dst, NULL, tmp))
3911 metadata_dst_free_percpu(tmp);
d3aa45ce 3912 }
14ca0751
DB
3913
3914 switch (which) {
3915 case BPF_FUNC_skb_set_tunnel_key:
3916 return &bpf_skb_set_tunnel_key_proto;
3917 case BPF_FUNC_skb_set_tunnel_opt:
3918 return &bpf_skb_set_tunnel_opt_proto;
3919 default:
3920 return NULL;
3921 }
d3aa45ce
AS
3922}
3923
f3694e00
DB
3924BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
3925 u32, idx)
4a482f34 3926{
4a482f34
MKL
3927 struct bpf_array *array = container_of(map, struct bpf_array, map);
3928 struct cgroup *cgrp;
3929 struct sock *sk;
4a482f34 3930
2d48c5f9 3931 sk = skb_to_full_sk(skb);
4a482f34
MKL
3932 if (!sk || !sk_fullsock(sk))
3933 return -ENOENT;
f3694e00 3934 if (unlikely(idx >= array->map.max_entries))
4a482f34
MKL
3935 return -E2BIG;
3936
f3694e00 3937 cgrp = READ_ONCE(array->ptrs[idx]);
4a482f34
MKL
3938 if (unlikely(!cgrp))
3939 return -EAGAIN;
3940
54fd9c2d 3941 return sk_under_cgroup_hierarchy(sk, cgrp);
4a482f34
MKL
3942}
3943
747ea55e
DB
3944static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
3945 .func = bpf_skb_under_cgroup,
4a482f34
MKL
3946 .gpl_only = false,
3947 .ret_type = RET_INTEGER,
3948 .arg1_type = ARG_PTR_TO_CTX,
3949 .arg2_type = ARG_CONST_MAP_PTR,
3950 .arg3_type = ARG_ANYTHING,
3951};
4a482f34 3952
cb20b08e
DB
3953#ifdef CONFIG_SOCK_CGROUP_DATA
3954BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
3955{
3956 struct sock *sk = skb_to_full_sk(skb);
3957 struct cgroup *cgrp;
3958
3959 if (!sk || !sk_fullsock(sk))
3960 return 0;
3961
3962 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
3963 return cgrp->kn->id.id;
3964}
3965
3966static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
3967 .func = bpf_skb_cgroup_id,
3968 .gpl_only = false,
3969 .ret_type = RET_INTEGER,
3970 .arg1_type = ARG_PTR_TO_CTX,
3971};
77236281
AI
3972
3973BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
3974 ancestor_level)
3975{
3976 struct sock *sk = skb_to_full_sk(skb);
3977 struct cgroup *ancestor;
3978 struct cgroup *cgrp;
3979
3980 if (!sk || !sk_fullsock(sk))
3981 return 0;
3982
3983 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
3984 ancestor = cgroup_ancestor(cgrp, ancestor_level);
3985 if (!ancestor)
3986 return 0;
3987
3988 return ancestor->kn->id.id;
3989}
3990
3991static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
3992 .func = bpf_skb_ancestor_cgroup_id,
3993 .gpl_only = false,
3994 .ret_type = RET_INTEGER,
3995 .arg1_type = ARG_PTR_TO_CTX,
3996 .arg2_type = ARG_ANYTHING,
3997};
cb20b08e
DB
3998#endif
3999
4de16969
DB
4000static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4001 unsigned long off, unsigned long len)
4002{
4003 memcpy(dst_buff, src_buff + off, len);
4004 return 0;
4005}
4006
f3694e00
DB
4007BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4008 u64, flags, void *, meta, u64, meta_size)
4de16969 4009{
4de16969 4010 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4de16969
DB
4011
4012 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4013 return -EINVAL;
4014 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4015 return -EFAULT;
4016
9c471370
MKL
4017 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4018 xdp_size, bpf_xdp_copy);
4de16969
DB
4019}
4020
4021static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4022 .func = bpf_xdp_event_output,
4023 .gpl_only = true,
4024 .ret_type = RET_INTEGER,
4025 .arg1_type = ARG_PTR_TO_CTX,
4026 .arg2_type = ARG_CONST_MAP_PTR,
4027 .arg3_type = ARG_ANYTHING,
39f19ebb 4028 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 4029 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4de16969
DB
4030};
4031
91b8270f
CF
4032BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4033{
4034 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
4035}
4036
4037static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4038 .func = bpf_get_socket_cookie,
4039 .gpl_only = false,
4040 .ret_type = RET_INTEGER,
4041 .arg1_type = ARG_PTR_TO_CTX,
4042};
4043
d692f113
AI
4044BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4045{
4046 return sock_gen_cookie(ctx->sk);
4047}
4048
4049static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4050 .func = bpf_get_socket_cookie_sock_addr,
4051 .gpl_only = false,
4052 .ret_type = RET_INTEGER,
4053 .arg1_type = ARG_PTR_TO_CTX,
4054};
4055
4056BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4057{
4058 return sock_gen_cookie(ctx->sk);
4059}
4060
4061static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4062 .func = bpf_get_socket_cookie_sock_ops,
4063 .gpl_only = false,
4064 .ret_type = RET_INTEGER,
4065 .arg1_type = ARG_PTR_TO_CTX,
4066};
4067
6acc5c29
CF
4068BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4069{
4070 struct sock *sk = sk_to_full_sk(skb->sk);
4071 kuid_t kuid;
4072
4073 if (!sk || !sk_fullsock(sk))
4074 return overflowuid;
4075 kuid = sock_net_uid(sock_net(sk), sk);
4076 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4077}
4078
4079static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4080 .func = bpf_get_socket_uid,
4081 .gpl_only = false,
4082 .ret_type = RET_INTEGER,
4083 .arg1_type = ARG_PTR_TO_CTX,
4084};
4085
a5a3a828
SV
4086BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
4087 struct bpf_map *, map, u64, flags, void *, data, u64, size)
4088{
4089 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
4090 return -EINVAL;
4091
4092 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
4093}
4094
4095static const struct bpf_func_proto bpf_sockopt_event_output_proto = {
4096 .func = bpf_sockopt_event_output,
4097 .gpl_only = true,
4098 .ret_type = RET_INTEGER,
4099 .arg1_type = ARG_PTR_TO_CTX,
4100 .arg2_type = ARG_CONST_MAP_PTR,
4101 .arg3_type = ARG_ANYTHING,
4102 .arg4_type = ARG_PTR_TO_MEM,
4103 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4104};
4105
8c4b4c7e
LB
4106BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4107 int, level, int, optname, char *, optval, int, optlen)
4108{
4109 struct sock *sk = bpf_sock->sk;
4110 int ret = 0;
4111 int val;
4112
4113 if (!sk_fullsock(sk))
4114 return -EINVAL;
4115
4116 if (level == SOL_SOCKET) {
4117 if (optlen != sizeof(int))
4118 return -EINVAL;
4119 val = *((int *)optval);
4120
4121 /* Only some socketops are supported */
4122 switch (optname) {
4123 case SO_RCVBUF:
c9e45767 4124 val = min_t(u32, val, sysctl_rmem_max);
8c4b4c7e
LB
4125 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4126 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4127 break;
4128 case SO_SNDBUF:
c9e45767 4129 val = min_t(u32, val, sysctl_wmem_max);
8c4b4c7e
LB
4130 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4131 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4132 break;
76a9ebe8 4133 case SO_MAX_PACING_RATE: /* 32bit version */
e224c390
YC
4134 if (val != ~0U)
4135 cmpxchg(&sk->sk_pacing_status,
4136 SK_PACING_NONE,
4137 SK_PACING_NEEDED);
76a9ebe8 4138 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
8c4b4c7e
LB
4139 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4140 sk->sk_max_pacing_rate);
4141 break;
4142 case SO_PRIORITY:
4143 sk->sk_priority = val;
4144 break;
4145 case SO_RCVLOWAT:
4146 if (val < 0)
4147 val = INT_MAX;
4148 sk->sk_rcvlowat = val ? : 1;
4149 break;
4150 case SO_MARK:
f4924f24
PO
4151 if (sk->sk_mark != val) {
4152 sk->sk_mark = val;
4153 sk_dst_reset(sk);
4154 }
8c4b4c7e
LB
4155 break;
4156 default:
4157 ret = -EINVAL;
4158 }
a5192c52 4159#ifdef CONFIG_INET
6f5c39fa
NS
4160 } else if (level == SOL_IP) {
4161 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4162 return -EINVAL;
4163
4164 val = *((int *)optval);
4165 /* Only some options are supported */
4166 switch (optname) {
4167 case IP_TOS:
4168 if (val < -1 || val > 0xff) {
4169 ret = -EINVAL;
4170 } else {
4171 struct inet_sock *inet = inet_sk(sk);
4172
4173 if (val == -1)
4174 val = 0;
4175 inet->tos = val;
4176 }
4177 break;
4178 default:
4179 ret = -EINVAL;
4180 }
6f9bd3d7
LB
4181#if IS_ENABLED(CONFIG_IPV6)
4182 } else if (level == SOL_IPV6) {
4183 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4184 return -EINVAL;
4185
4186 val = *((int *)optval);
4187 /* Only some options are supported */
4188 switch (optname) {
4189 case IPV6_TCLASS:
4190 if (val < -1 || val > 0xff) {
4191 ret = -EINVAL;
4192 } else {
4193 struct ipv6_pinfo *np = inet6_sk(sk);
4194
4195 if (val == -1)
4196 val = 0;
4197 np->tclass = val;
4198 }
4199 break;
4200 default:
4201 ret = -EINVAL;
4202 }
4203#endif
8c4b4c7e
LB
4204 } else if (level == SOL_TCP &&
4205 sk->sk_prot->setsockopt == tcp_setsockopt) {
91b5b21c
LB
4206 if (optname == TCP_CONGESTION) {
4207 char name[TCP_CA_NAME_MAX];
ebfa00c5 4208 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
91b5b21c
LB
4209
4210 strncpy(name, optval, min_t(long, optlen,
4211 TCP_CA_NAME_MAX-1));
4212 name[TCP_CA_NAME_MAX-1] = 0;
6f9bd3d7
LB
4213 ret = tcp_set_congestion_control(sk, name, false,
4214 reinit);
91b5b21c 4215 } else {
fc747810
LB
4216 struct tcp_sock *tp = tcp_sk(sk);
4217
4218 if (optlen != sizeof(int))
4219 return -EINVAL;
4220
4221 val = *((int *)optval);
4222 /* Only some options are supported */
4223 switch (optname) {
4224 case TCP_BPF_IW:
31aa6503 4225 if (val <= 0 || tp->data_segs_out > tp->syn_data)
fc747810
LB
4226 ret = -EINVAL;
4227 else
4228 tp->snd_cwnd = val;
4229 break;
13bf9641
LB
4230 case TCP_BPF_SNDCWND_CLAMP:
4231 if (val <= 0) {
4232 ret = -EINVAL;
4233 } else {
4234 tp->snd_cwnd_clamp = val;
4235 tp->snd_ssthresh = val;
4236 }
6d3f06a0 4237 break;
1e215300
NS
4238 case TCP_SAVE_SYN:
4239 if (val < 0 || val > 1)
4240 ret = -EINVAL;
4241 else
4242 tp->save_syn = val;
4243 break;
fc747810
LB
4244 default:
4245 ret = -EINVAL;
4246 }
91b5b21c 4247 }
91b5b21c 4248#endif
8c4b4c7e
LB
4249 } else {
4250 ret = -EINVAL;
4251 }
4252 return ret;
4253}
4254
4255static const struct bpf_func_proto bpf_setsockopt_proto = {
4256 .func = bpf_setsockopt,
cd86d1fd 4257 .gpl_only = false,
8c4b4c7e
LB
4258 .ret_type = RET_INTEGER,
4259 .arg1_type = ARG_PTR_TO_CTX,
4260 .arg2_type = ARG_ANYTHING,
4261 .arg3_type = ARG_ANYTHING,
4262 .arg4_type = ARG_PTR_TO_MEM,
4263 .arg5_type = ARG_CONST_SIZE,
4264};
4265
cd86d1fd
LB
4266BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4267 int, level, int, optname, char *, optval, int, optlen)
4268{
4269 struct sock *sk = bpf_sock->sk;
cd86d1fd
LB
4270
4271 if (!sk_fullsock(sk))
4272 goto err_clear;
cd86d1fd
LB
4273#ifdef CONFIG_INET
4274 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
1edb6e03
AR
4275 struct inet_connection_sock *icsk;
4276 struct tcp_sock *tp;
4277
1e215300
NS
4278 switch (optname) {
4279 case TCP_CONGESTION:
4280 icsk = inet_csk(sk);
cd86d1fd
LB
4281
4282 if (!icsk->icsk_ca_ops || optlen <= 1)
4283 goto err_clear;
4284 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4285 optval[optlen - 1] = 0;
1e215300
NS
4286 break;
4287 case TCP_SAVED_SYN:
4288 tp = tcp_sk(sk);
4289
4290 if (optlen <= 0 || !tp->saved_syn ||
4291 optlen > tp->saved_syn[0])
4292 goto err_clear;
4293 memcpy(optval, tp->saved_syn + 1, optlen);
4294 break;
4295 default:
cd86d1fd
LB
4296 goto err_clear;
4297 }
6f5c39fa
NS
4298 } else if (level == SOL_IP) {
4299 struct inet_sock *inet = inet_sk(sk);
4300
4301 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4302 goto err_clear;
4303
4304 /* Only some options are supported */
4305 switch (optname) {
4306 case IP_TOS:
4307 *((int *)optval) = (int)inet->tos;
4308 break;
4309 default:
4310 goto err_clear;
4311 }
6f9bd3d7
LB
4312#if IS_ENABLED(CONFIG_IPV6)
4313 } else if (level == SOL_IPV6) {
4314 struct ipv6_pinfo *np = inet6_sk(sk);
4315
4316 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4317 goto err_clear;
4318
4319 /* Only some options are supported */
4320 switch (optname) {
4321 case IPV6_TCLASS:
4322 *((int *)optval) = (int)np->tclass;
4323 break;
4324 default:
4325 goto err_clear;
4326 }
4327#endif
cd86d1fd
LB
4328 } else {
4329 goto err_clear;
4330 }
aa2bc739 4331 return 0;
cd86d1fd
LB
4332#endif
4333err_clear:
4334 memset(optval, 0, optlen);
4335 return -EINVAL;
4336}
4337
4338static const struct bpf_func_proto bpf_getsockopt_proto = {
4339 .func = bpf_getsockopt,
4340 .gpl_only = false,
4341 .ret_type = RET_INTEGER,
4342 .arg1_type = ARG_PTR_TO_CTX,
4343 .arg2_type = ARG_ANYTHING,
4344 .arg3_type = ARG_ANYTHING,
4345 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
4346 .arg5_type = ARG_CONST_SIZE,
4347};
4348
b13d8807
LB
4349BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
4350 int, argval)
4351{
4352 struct sock *sk = bpf_sock->sk;
4353 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
4354
a7dcdf6e 4355 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
b13d8807
LB
4356 return -EINVAL;
4357
b13d8807
LB
4358 if (val)
4359 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
4360
4361 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
b13d8807
LB
4362}
4363
4364static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
4365 .func = bpf_sock_ops_cb_flags_set,
4366 .gpl_only = false,
4367 .ret_type = RET_INTEGER,
4368 .arg1_type = ARG_PTR_TO_CTX,
4369 .arg2_type = ARG_ANYTHING,
4370};
4371
d74bad4e
AI
4372const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
4373EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
4374
4375BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
4376 int, addr_len)
4377{
4378#ifdef CONFIG_INET
4379 struct sock *sk = ctx->sk;
4380 int err;
4381
4382 /* Binding to port can be expensive so it's prohibited in the helper.
4383 * Only binding to IP is supported.
4384 */
4385 err = -EINVAL;
4386 if (addr->sa_family == AF_INET) {
4387 if (addr_len < sizeof(struct sockaddr_in))
4388 return err;
4389 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
4390 return err;
4391 return __inet_bind(sk, addr, addr_len, true, false);
4392#if IS_ENABLED(CONFIG_IPV6)
4393 } else if (addr->sa_family == AF_INET6) {
4394 if (addr_len < SIN6_LEN_RFC2133)
4395 return err;
4396 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4397 return err;
4398 /* ipv6_bpf_stub cannot be NULL, since it's called from
4399 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4400 */
4401 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4402#endif /* CONFIG_IPV6 */
4403 }
4404#endif /* CONFIG_INET */
4405
4406 return -EAFNOSUPPORT;
4407}
4408
4409static const struct bpf_func_proto bpf_bind_proto = {
4410 .func = bpf_bind,
4411 .gpl_only = false,
4412 .ret_type = RET_INTEGER,
4413 .arg1_type = ARG_PTR_TO_CTX,
4414 .arg2_type = ARG_PTR_TO_MEM,
4415 .arg3_type = ARG_CONST_SIZE,
4416};
4417
12bed760
EB
4418#ifdef CONFIG_XFRM
4419BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4420 struct bpf_xfrm_state *, to, u32, size, u64, flags)
4421{
4422 const struct sec_path *sp = skb_sec_path(skb);
4423 const struct xfrm_state *x;
4424
4425 if (!sp || unlikely(index >= sp->len || flags))
4426 goto err_clear;
4427
4428 x = sp->xvec[index];
4429
4430 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4431 goto err_clear;
4432
4433 to->reqid = x->props.reqid;
4434 to->spi = x->id.spi;
4435 to->family = x->props.family;
1fbc2e0c
DB
4436 to->ext = 0;
4437
12bed760
EB
4438 if (to->family == AF_INET6) {
4439 memcpy(to->remote_ipv6, x->props.saddr.a6,
4440 sizeof(to->remote_ipv6));
4441 } else {
4442 to->remote_ipv4 = x->props.saddr.a4;
1fbc2e0c 4443 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
12bed760
EB
4444 }
4445
4446 return 0;
4447err_clear:
4448 memset(to, 0, size);
4449 return -EINVAL;
4450}
4451
4452static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4453 .func = bpf_skb_get_xfrm_state,
4454 .gpl_only = false,
4455 .ret_type = RET_INTEGER,
4456 .arg1_type = ARG_PTR_TO_CTX,
4457 .arg2_type = ARG_ANYTHING,
4458 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
4459 .arg4_type = ARG_CONST_SIZE,
4460 .arg5_type = ARG_ANYTHING,
4461};
4462#endif
4463
87f5fc7e
DA
4464#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4465static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4466 const struct neighbour *neigh,
4467 const struct net_device *dev)
4468{
4469 memcpy(params->dmac, neigh->ha, ETH_ALEN);
4470 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4471 params->h_vlan_TCI = 0;
4472 params->h_vlan_proto = 0;
4c79579b 4473 params->ifindex = dev->ifindex;
87f5fc7e 4474
4c79579b 4475 return 0;
87f5fc7e
DA
4476}
4477#endif
4478
4479#if IS_ENABLED(CONFIG_INET)
4480static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 4481 u32 flags, bool check_mtu)
87f5fc7e
DA
4482{
4483 struct in_device *in_dev;
4484 struct neighbour *neigh;
4485 struct net_device *dev;
4486 struct fib_result res;
4487 struct fib_nh *nh;
4488 struct flowi4 fl4;
4489 int err;
4f74fede 4490 u32 mtu;
87f5fc7e
DA
4491
4492 dev = dev_get_by_index_rcu(net, params->ifindex);
4493 if (unlikely(!dev))
4494 return -ENODEV;
4495
4496 /* verify forwarding is enabled on this interface */
4497 in_dev = __in_dev_get_rcu(dev);
4498 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4c79579b 4499 return BPF_FIB_LKUP_RET_FWD_DISABLED;
87f5fc7e
DA
4500
4501 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4502 fl4.flowi4_iif = 1;
4503 fl4.flowi4_oif = params->ifindex;
4504 } else {
4505 fl4.flowi4_iif = params->ifindex;
4506 fl4.flowi4_oif = 0;
4507 }
4508 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4509 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4510 fl4.flowi4_flags = 0;
4511
4512 fl4.flowi4_proto = params->l4_protocol;
4513 fl4.daddr = params->ipv4_dst;
4514 fl4.saddr = params->ipv4_src;
4515 fl4.fl4_sport = params->sport;
4516 fl4.fl4_dport = params->dport;
4517
4518 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4519 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4520 struct fib_table *tb;
4521
4522 tb = fib_get_table(net, tbid);
4523 if (unlikely(!tb))
4c79579b 4524 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4525
4526 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4527 } else {
4528 fl4.flowi4_mark = 0;
4529 fl4.flowi4_secid = 0;
4530 fl4.flowi4_tun_key.tun_id = 0;
4531 fl4.flowi4_uid = sock_net_uid(net, NULL);
4532
4533 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4534 }
4535
4c79579b
DA
4536 if (err) {
4537 /* map fib lookup errors to RTN_ type */
4538 if (err == -EINVAL)
4539 return BPF_FIB_LKUP_RET_BLACKHOLE;
4540 if (err == -EHOSTUNREACH)
4541 return BPF_FIB_LKUP_RET_UNREACHABLE;
4542 if (err == -EACCES)
4543 return BPF_FIB_LKUP_RET_PROHIBIT;
4544
4545 return BPF_FIB_LKUP_RET_NOT_FWDED;
4546 }
4547
4548 if (res.type != RTN_UNICAST)
4549 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4550
4551 if (res.fi->fib_nhs > 1)
4552 fib_select_path(net, &res, &fl4, NULL);
4553
4f74fede
DA
4554 if (check_mtu) {
4555 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4556 if (params->tot_len > mtu)
4c79579b 4557 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4f74fede
DA
4558 }
4559
87f5fc7e
DA
4560 nh = &res.fi->fib_nh[res.nh_sel];
4561
4562 /* do not handle lwt encaps right now */
4563 if (nh->nh_lwtstate)
4c79579b 4564 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
87f5fc7e
DA
4565
4566 dev = nh->nh_dev;
87f5fc7e
DA
4567 if (nh->nh_gw)
4568 params->ipv4_dst = nh->nh_gw;
4569
4570 params->rt_metric = res.fi->fib_priority;
4571
4572 /* xdp and cls_bpf programs are run in RCU-bh so
4573 * rcu_read_lock_bh is not needed here
4574 */
4575 neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
4c79579b
DA
4576 if (!neigh)
4577 return BPF_FIB_LKUP_RET_NO_NEIGH;
87f5fc7e 4578
4c79579b 4579 return bpf_fib_set_fwd_params(params, neigh, dev);
87f5fc7e
DA
4580}
4581#endif
4582
4583#if IS_ENABLED(CONFIG_IPV6)
4584static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 4585 u32 flags, bool check_mtu)
87f5fc7e
DA
4586{
4587 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4588 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4589 struct neighbour *neigh;
4590 struct net_device *dev;
4591 struct inet6_dev *idev;
4592 struct fib6_info *f6i;
4593 struct flowi6 fl6;
4594 int strict = 0;
4595 int oif;
4f74fede 4596 u32 mtu;
87f5fc7e
DA
4597
4598 /* link local addresses are never forwarded */
4599 if (rt6_need_strict(dst) || rt6_need_strict(src))
4c79579b 4600 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4601
4602 dev = dev_get_by_index_rcu(net, params->ifindex);
4603 if (unlikely(!dev))
4604 return -ENODEV;
4605
4606 idev = __in6_dev_get_safely(dev);
4607 if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
4c79579b 4608 return BPF_FIB_LKUP_RET_FWD_DISABLED;
87f5fc7e
DA
4609
4610 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4611 fl6.flowi6_iif = 1;
4612 oif = fl6.flowi6_oif = params->ifindex;
4613 } else {
4614 oif = fl6.flowi6_iif = params->ifindex;
4615 fl6.flowi6_oif = 0;
4616 strict = RT6_LOOKUP_F_HAS_SADDR;
4617 }
bd3a08aa 4618 fl6.flowlabel = params->flowinfo;
87f5fc7e
DA
4619 fl6.flowi6_scope = 0;
4620 fl6.flowi6_flags = 0;
4621 fl6.mp_hash = 0;
4622
4623 fl6.flowi6_proto = params->l4_protocol;
4624 fl6.daddr = *dst;
4625 fl6.saddr = *src;
4626 fl6.fl6_sport = params->sport;
4627 fl6.fl6_dport = params->dport;
4628
4629 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4630 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4631 struct fib6_table *tb;
4632
4633 tb = ipv6_stub->fib6_get_table(net, tbid);
4634 if (unlikely(!tb))
4c79579b 4635 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4636
4637 f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
4638 } else {
4639 fl6.flowi6_mark = 0;
4640 fl6.flowi6_secid = 0;
4641 fl6.flowi6_tun_key.tun_id = 0;
4642 fl6.flowi6_uid = sock_net_uid(net, NULL);
4643
4644 f6i = ipv6_stub->fib6_lookup(net, oif, &fl6, strict);
4645 }
4646
4647 if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
4c79579b
DA
4648 return BPF_FIB_LKUP_RET_NOT_FWDED;
4649
4650 if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
4651 switch (f6i->fib6_type) {
4652 case RTN_BLACKHOLE:
4653 return BPF_FIB_LKUP_RET_BLACKHOLE;
4654 case RTN_UNREACHABLE:
4655 return BPF_FIB_LKUP_RET_UNREACHABLE;
4656 case RTN_PROHIBIT:
4657 return BPF_FIB_LKUP_RET_PROHIBIT;
4658 default:
4659 return BPF_FIB_LKUP_RET_NOT_FWDED;
4660 }
4661 }
87f5fc7e 4662
4c79579b
DA
4663 if (f6i->fib6_type != RTN_UNICAST)
4664 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4665
4666 if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
4667 f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
4668 fl6.flowi6_oif, NULL,
4669 strict);
4670
4f74fede
DA
4671 if (check_mtu) {
4672 mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
4673 if (params->tot_len > mtu)
4c79579b 4674 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4f74fede
DA
4675 }
4676
87f5fc7e 4677 if (f6i->fib6_nh.nh_lwtstate)
4c79579b 4678 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
87f5fc7e
DA
4679
4680 if (f6i->fib6_flags & RTF_GATEWAY)
4681 *dst = f6i->fib6_nh.nh_gw;
4682
4683 dev = f6i->fib6_nh.nh_dev;
4684 params->rt_metric = f6i->fib6_metric;
4685
4686 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4687 * not needed here. Can not use __ipv6_neigh_lookup_noref here
4688 * because we need to get nd_tbl via the stub
4689 */
4690 neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
4691 ndisc_hashfn, dst, dev);
4c79579b
DA
4692 if (!neigh)
4693 return BPF_FIB_LKUP_RET_NO_NEIGH;
87f5fc7e 4694
4c79579b 4695 return bpf_fib_set_fwd_params(params, neigh, dev);
87f5fc7e
DA
4696}
4697#endif
4698
4699BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4700 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4701{
4702 if (plen < sizeof(*params))
4703 return -EINVAL;
4704
9ce64f19
DA
4705 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4706 return -EINVAL;
4707
87f5fc7e
DA
4708 switch (params->family) {
4709#if IS_ENABLED(CONFIG_INET)
4710 case AF_INET:
4711 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 4712 flags, true);
87f5fc7e
DA
4713#endif
4714#if IS_ENABLED(CONFIG_IPV6)
4715 case AF_INET6:
4716 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 4717 flags, true);
87f5fc7e
DA
4718#endif
4719 }
bcece5dc 4720 return -EAFNOSUPPORT;
87f5fc7e
DA
4721}
4722
4723static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4724 .func = bpf_xdp_fib_lookup,
4725 .gpl_only = true,
4726 .ret_type = RET_INTEGER,
4727 .arg1_type = ARG_PTR_TO_CTX,
4728 .arg2_type = ARG_PTR_TO_MEM,
4729 .arg3_type = ARG_CONST_SIZE,
4730 .arg4_type = ARG_ANYTHING,
4731};
4732
4733BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4734 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4735{
4f74fede 4736 struct net *net = dev_net(skb->dev);
4c79579b 4737 int rc = -EAFNOSUPPORT;
4f74fede 4738
87f5fc7e
DA
4739 if (plen < sizeof(*params))
4740 return -EINVAL;
4741
9ce64f19
DA
4742 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4743 return -EINVAL;
4744
87f5fc7e
DA
4745 switch (params->family) {
4746#if IS_ENABLED(CONFIG_INET)
4747 case AF_INET:
4c79579b 4748 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
4f74fede 4749 break;
87f5fc7e
DA
4750#endif
4751#if IS_ENABLED(CONFIG_IPV6)
4752 case AF_INET6:
4c79579b 4753 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
4f74fede 4754 break;
87f5fc7e
DA
4755#endif
4756 }
4f74fede 4757
4c79579b 4758 if (!rc) {
4f74fede
DA
4759 struct net_device *dev;
4760
4c79579b 4761 dev = dev_get_by_index_rcu(net, params->ifindex);
4f74fede 4762 if (!is_skb_forwardable(dev, skb))
4c79579b 4763 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
4f74fede
DA
4764 }
4765
4c79579b 4766 return rc;
87f5fc7e
DA
4767}
4768
4769static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4770 .func = bpf_skb_fib_lookup,
4771 .gpl_only = true,
4772 .ret_type = RET_INTEGER,
4773 .arg1_type = ARG_PTR_TO_CTX,
4774 .arg2_type = ARG_PTR_TO_MEM,
4775 .arg3_type = ARG_CONST_SIZE,
4776 .arg4_type = ARG_ANYTHING,
4777};
4778
fe94cc29
MX
4779#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4780static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4781{
4782 int err;
4783 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4784
4785 if (!seg6_validate_srh(srh, len))
4786 return -EINVAL;
4787
4788 switch (type) {
4789 case BPF_LWT_ENCAP_SEG6_INLINE:
4790 if (skb->protocol != htons(ETH_P_IPV6))
4791 return -EBADMSG;
4792
4793 err = seg6_do_srh_inline(skb, srh);
4794 break;
4795 case BPF_LWT_ENCAP_SEG6:
4796 skb_reset_inner_headers(skb);
4797 skb->encapsulation = 1;
4798 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4799 break;
4800 default:
4801 return -EINVAL;
4802 }
4803
4804 bpf_compute_data_pointers(skb);
4805 if (err)
4806 return err;
4807
4808 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4809 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4810
4811 return seg6_lookup_nexthop(skb, NULL, 0);
4812}
4813#endif /* CONFIG_IPV6_SEG6_BPF */
4814
3e0bd37c
PO
4815#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4816static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
4817 bool ingress)
4818{
52f27877 4819 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
3e0bd37c
PO
4820}
4821#endif
4822
4823BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
fe94cc29
MX
4824 u32, len)
4825{
4826 switch (type) {
4827#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4828 case BPF_LWT_ENCAP_SEG6:
4829 case BPF_LWT_ENCAP_SEG6_INLINE:
4830 return bpf_push_seg6_encap(skb, type, hdr, len);
3e0bd37c
PO
4831#endif
4832#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4833 case BPF_LWT_ENCAP_IP:
4834 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
fe94cc29
MX
4835#endif
4836 default:
4837 return -EINVAL;
4838 }
4839}
4840
3e0bd37c
PO
4841BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
4842 void *, hdr, u32, len)
4843{
4844 switch (type) {
4845#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4846 case BPF_LWT_ENCAP_IP:
4847 return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
fe94cc29
MX
4848#endif
4849 default:
4850 return -EINVAL;
4851 }
4852}
4853
3e0bd37c
PO
4854static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
4855 .func = bpf_lwt_in_push_encap,
4856 .gpl_only = false,
4857 .ret_type = RET_INTEGER,
4858 .arg1_type = ARG_PTR_TO_CTX,
4859 .arg2_type = ARG_ANYTHING,
4860 .arg3_type = ARG_PTR_TO_MEM,
4861 .arg4_type = ARG_CONST_SIZE
4862};
4863
4864static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
4865 .func = bpf_lwt_xmit_push_encap,
fe94cc29
MX
4866 .gpl_only = false,
4867 .ret_type = RET_INTEGER,
4868 .arg1_type = ARG_PTR_TO_CTX,
4869 .arg2_type = ARG_ANYTHING,
4870 .arg3_type = ARG_PTR_TO_MEM,
4871 .arg4_type = ARG_CONST_SIZE
4872};
4873
61d76980 4874#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
fe94cc29
MX
4875BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
4876 const void *, from, u32, len)
4877{
fe94cc29
MX
4878 struct seg6_bpf_srh_state *srh_state =
4879 this_cpu_ptr(&seg6_bpf_srh_states);
486cdf21 4880 struct ipv6_sr_hdr *srh = srh_state->srh;
fe94cc29 4881 void *srh_tlvs, *srh_end, *ptr;
fe94cc29
MX
4882 int srhoff = 0;
4883
486cdf21 4884 if (srh == NULL)
fe94cc29
MX
4885 return -EINVAL;
4886
fe94cc29
MX
4887 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
4888 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
4889
4890 ptr = skb->data + offset;
4891 if (ptr >= srh_tlvs && ptr + len <= srh_end)
486cdf21 4892 srh_state->valid = false;
fe94cc29
MX
4893 else if (ptr < (void *)&srh->flags ||
4894 ptr + len > (void *)&srh->segments)
4895 return -EFAULT;
4896
4897 if (unlikely(bpf_try_make_writable(skb, offset + len)))
4898 return -EFAULT;
486cdf21
MX
4899 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
4900 return -EINVAL;
4901 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
fe94cc29
MX
4902
4903 memcpy(skb->data + offset, from, len);
4904 return 0;
fe94cc29
MX
4905}
4906
4907static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
4908 .func = bpf_lwt_seg6_store_bytes,
4909 .gpl_only = false,
4910 .ret_type = RET_INTEGER,
4911 .arg1_type = ARG_PTR_TO_CTX,
4912 .arg2_type = ARG_ANYTHING,
4913 .arg3_type = ARG_PTR_TO_MEM,
4914 .arg4_type = ARG_CONST_SIZE
4915};
4916
486cdf21 4917static void bpf_update_srh_state(struct sk_buff *skb)
fe94cc29 4918{
fe94cc29
MX
4919 struct seg6_bpf_srh_state *srh_state =
4920 this_cpu_ptr(&seg6_bpf_srh_states);
fe94cc29 4921 int srhoff = 0;
fe94cc29 4922
486cdf21
MX
4923 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
4924 srh_state->srh = NULL;
4925 } else {
4926 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
4927 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
4928 srh_state->valid = true;
fe94cc29 4929 }
486cdf21
MX
4930}
4931
4932BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
4933 u32, action, void *, param, u32, param_len)
4934{
4935 struct seg6_bpf_srh_state *srh_state =
4936 this_cpu_ptr(&seg6_bpf_srh_states);
4937 int hdroff = 0;
4938 int err;
fe94cc29
MX
4939
4940 switch (action) {
4941 case SEG6_LOCAL_ACTION_END_X:
486cdf21
MX
4942 if (!seg6_bpf_has_valid_srh(skb))
4943 return -EBADMSG;
fe94cc29
MX
4944 if (param_len != sizeof(struct in6_addr))
4945 return -EINVAL;
4946 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
4947 case SEG6_LOCAL_ACTION_END_T:
486cdf21
MX
4948 if (!seg6_bpf_has_valid_srh(skb))
4949 return -EBADMSG;
fe94cc29
MX
4950 if (param_len != sizeof(int))
4951 return -EINVAL;
4952 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
486cdf21
MX
4953 case SEG6_LOCAL_ACTION_END_DT6:
4954 if (!seg6_bpf_has_valid_srh(skb))
4955 return -EBADMSG;
fe94cc29
MX
4956 if (param_len != sizeof(int))
4957 return -EINVAL;
486cdf21
MX
4958
4959 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
4960 return -EBADMSG;
4961 if (!pskb_pull(skb, hdroff))
4962 return -EBADMSG;
4963
4964 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
4965 skb_reset_network_header(skb);
4966 skb_reset_transport_header(skb);
4967 skb->encapsulation = 0;
4968
4969 bpf_compute_data_pointers(skb);
4970 bpf_update_srh_state(skb);
fe94cc29
MX
4971 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
4972 case SEG6_LOCAL_ACTION_END_B6:
486cdf21
MX
4973 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
4974 return -EBADMSG;
fe94cc29
MX
4975 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
4976 param, param_len);
4977 if (!err)
486cdf21
MX
4978 bpf_update_srh_state(skb);
4979
fe94cc29
MX
4980 return err;
4981 case SEG6_LOCAL_ACTION_END_B6_ENCAP:
486cdf21
MX
4982 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
4983 return -EBADMSG;
fe94cc29
MX
4984 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
4985 param, param_len);
4986 if (!err)
486cdf21
MX
4987 bpf_update_srh_state(skb);
4988
fe94cc29
MX
4989 return err;
4990 default:
4991 return -EINVAL;
4992 }
fe94cc29
MX
4993}
4994
4995static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
4996 .func = bpf_lwt_seg6_action,
4997 .gpl_only = false,
4998 .ret_type = RET_INTEGER,
4999 .arg1_type = ARG_PTR_TO_CTX,
5000 .arg2_type = ARG_ANYTHING,
5001 .arg3_type = ARG_PTR_TO_MEM,
5002 .arg4_type = ARG_CONST_SIZE
5003};
5004
5005BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5006 s32, len)
5007{
fe94cc29
MX
5008 struct seg6_bpf_srh_state *srh_state =
5009 this_cpu_ptr(&seg6_bpf_srh_states);
486cdf21 5010 struct ipv6_sr_hdr *srh = srh_state->srh;
fe94cc29 5011 void *srh_end, *srh_tlvs, *ptr;
fe94cc29
MX
5012 struct ipv6hdr *hdr;
5013 int srhoff = 0;
5014 int ret;
5015
486cdf21 5016 if (unlikely(srh == NULL))
fe94cc29 5017 return -EINVAL;
fe94cc29
MX
5018
5019 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5020 ((srh->first_segment + 1) << 4));
5021 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5022 srh_state->hdrlen);
5023 ptr = skb->data + offset;
5024
5025 if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5026 return -EFAULT;
5027 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5028 return -EFAULT;
5029
5030 if (len > 0) {
5031 ret = skb_cow_head(skb, len);
5032 if (unlikely(ret < 0))
5033 return ret;
5034
5035 ret = bpf_skb_net_hdr_push(skb, offset, len);
5036 } else {
5037 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5038 }
5039
5040 bpf_compute_data_pointers(skb);
5041 if (unlikely(ret < 0))
5042 return ret;
5043
5044 hdr = (struct ipv6hdr *)skb->data;
5045 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5046
486cdf21
MX
5047 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5048 return -EINVAL;
5049 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
fe94cc29 5050 srh_state->hdrlen += len;
486cdf21 5051 srh_state->valid = false;
fe94cc29 5052 return 0;
fe94cc29
MX
5053}
5054
5055static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5056 .func = bpf_lwt_seg6_adjust_srh,
5057 .gpl_only = false,
5058 .ret_type = RET_INTEGER,
5059 .arg1_type = ARG_PTR_TO_CTX,
5060 .arg2_type = ARG_ANYTHING,
5061 .arg3_type = ARG_ANYTHING,
5062};
61d76980 5063#endif /* CONFIG_IPV6_SEG6_BPF */
fe94cc29 5064
9b1f3d6e
MKL
5065#define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \
5066do { \
5067 switch (si->off) { \
5068 case offsetof(md_type, snd_cwnd): \
5069 CONVERT(snd_cwnd); break; \
5070 case offsetof(md_type, srtt_us): \
5071 CONVERT(srtt_us); break; \
5072 case offsetof(md_type, snd_ssthresh): \
5073 CONVERT(snd_ssthresh); break; \
5074 case offsetof(md_type, rcv_nxt): \
5075 CONVERT(rcv_nxt); break; \
5076 case offsetof(md_type, snd_nxt): \
5077 CONVERT(snd_nxt); break; \
5078 case offsetof(md_type, snd_una): \
5079 CONVERT(snd_una); break; \
5080 case offsetof(md_type, mss_cache): \
5081 CONVERT(mss_cache); break; \
5082 case offsetof(md_type, ecn_flags): \
5083 CONVERT(ecn_flags); break; \
5084 case offsetof(md_type, rate_delivered): \
5085 CONVERT(rate_delivered); break; \
5086 case offsetof(md_type, rate_interval_us): \
5087 CONVERT(rate_interval_us); break; \
5088 case offsetof(md_type, packets_out): \
5089 CONVERT(packets_out); break; \
5090 case offsetof(md_type, retrans_out): \
5091 CONVERT(retrans_out); break; \
5092 case offsetof(md_type, total_retrans): \
5093 CONVERT(total_retrans); break; \
5094 case offsetof(md_type, segs_in): \
5095 CONVERT(segs_in); break; \
5096 case offsetof(md_type, data_segs_in): \
5097 CONVERT(data_segs_in); break; \
5098 case offsetof(md_type, segs_out): \
5099 CONVERT(segs_out); break; \
5100 case offsetof(md_type, data_segs_out): \
5101 CONVERT(data_segs_out); break; \
5102 case offsetof(md_type, lost_out): \
5103 CONVERT(lost_out); break; \
5104 case offsetof(md_type, sacked_out): \
5105 CONVERT(sacked_out); break; \
5106 case offsetof(md_type, bytes_received): \
5107 CONVERT(bytes_received); break; \
5108 case offsetof(md_type, bytes_acked): \
5109 CONVERT(bytes_acked); break; \
5110 } \
5111} while (0)
5112
df3f94a0
AB
5113#ifdef CONFIG_INET
5114static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
c8123ead 5115 int dif, int sdif, u8 family, u8 proto)
6acc9b43 5116{
6acc9b43
JS
5117 bool refcounted = false;
5118 struct sock *sk = NULL;
5119
5120 if (family == AF_INET) {
5121 __be32 src4 = tuple->ipv4.saddr;
5122 __be32 dst4 = tuple->ipv4.daddr;
6acc9b43
JS
5123
5124 if (proto == IPPROTO_TCP)
c8123ead 5125 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
6acc9b43
JS
5126 src4, tuple->ipv4.sport,
5127 dst4, tuple->ipv4.dport,
5128 dif, sdif, &refcounted);
5129 else
5130 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5131 dst4, tuple->ipv4.dport,
c8123ead 5132 dif, sdif, &udp_table, NULL);
8a615c6b 5133#if IS_ENABLED(CONFIG_IPV6)
6acc9b43
JS
5134 } else {
5135 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5136 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
6acc9b43
JS
5137
5138 if (proto == IPPROTO_TCP)
c8123ead 5139 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
6acc9b43 5140 src6, tuple->ipv6.sport,
cac6cc2f 5141 dst6, ntohs(tuple->ipv6.dport),
6acc9b43 5142 dif, sdif, &refcounted);
8a615c6b
JS
5143 else if (likely(ipv6_bpf_stub))
5144 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5145 src6, tuple->ipv6.sport,
cac6cc2f 5146 dst6, tuple->ipv6.dport,
8a615c6b 5147 dif, sdif,
c8123ead 5148 &udp_table, NULL);
6acc9b43
JS
5149#endif
5150 }
5151
5152 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5153 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5154 sk = NULL;
5155 }
5156 return sk;
5157}
5158
edbf8c01 5159/* bpf_skc_lookup performs the core lookup for different types of sockets,
6acc9b43
JS
5160 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
5161 * Returns the socket as an 'unsigned long' to simplify the casting in the
5162 * callers to satisfy BPF_CALL declarations.
5163 */
edbf8c01
LB
5164static struct sock *
5165__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5166 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5167 u64 flags)
6acc9b43 5168{
6acc9b43
JS
5169 struct sock *sk = NULL;
5170 u8 family = AF_UNSPEC;
5171 struct net *net;
c8123ead 5172 int sdif;
6acc9b43
JS
5173
5174 family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
f71c6143
JS
5175 if (unlikely(family == AF_UNSPEC || flags ||
5176 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
6acc9b43
JS
5177 goto out;
5178
c8123ead
NH
5179 if (family == AF_INET)
5180 sdif = inet_sdif(skb);
6acc9b43 5181 else
c8123ead
NH
5182 sdif = inet6_sdif(skb);
5183
f71c6143
JS
5184 if ((s32)netns_id < 0) {
5185 net = caller_net;
4cc1feeb 5186 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
f71c6143 5187 } else {
6acc9b43
JS
5188 net = get_net_ns_by_id(caller_net, netns_id);
5189 if (unlikely(!net))
5190 goto out;
c8123ead 5191 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
6acc9b43 5192 put_net(net);
6acc9b43
JS
5193 }
5194
edbf8c01
LB
5195out:
5196 return sk;
5197}
5198
5199static struct sock *
5200__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5201 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5202 u64 flags)
5203{
5204 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
5205 ifindex, proto, netns_id, flags);
5206
6acc9b43
JS
5207 if (sk)
5208 sk = sk_to_full_sk(sk);
edbf8c01
LB
5209
5210 return sk;
6acc9b43
JS
5211}
5212
edbf8c01
LB
5213static struct sock *
5214bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5215 u8 proto, u64 netns_id, u64 flags)
c8123ead
NH
5216{
5217 struct net *caller_net;
5218 int ifindex;
5219
5220 if (skb->dev) {
5221 caller_net = dev_net(skb->dev);
5222 ifindex = skb->dev->ifindex;
5223 } else {
5224 caller_net = sock_net(skb->sk);
5225 ifindex = 0;
5226 }
5227
edbf8c01
LB
5228 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
5229 netns_id, flags);
c8123ead
NH
5230}
5231
edbf8c01
LB
5232static struct sock *
5233bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5234 u8 proto, u64 netns_id, u64 flags)
5235{
5236 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
5237 flags);
5238
5239 if (sk)
5240 sk = sk_to_full_sk(sk);
5241
5242 return sk;
5243}
5244
5245BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
5246 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5247{
5248 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
5249 netns_id, flags);
5250}
5251
5252static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
5253 .func = bpf_skc_lookup_tcp,
5254 .gpl_only = false,
5255 .pkt_access = true,
5256 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5257 .arg1_type = ARG_PTR_TO_CTX,
5258 .arg2_type = ARG_PTR_TO_MEM,
5259 .arg3_type = ARG_CONST_SIZE,
5260 .arg4_type = ARG_ANYTHING,
5261 .arg5_type = ARG_ANYTHING,
5262};
5263
6acc9b43
JS
5264BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
5265 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5266{
edbf8c01
LB
5267 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
5268 netns_id, flags);
6acc9b43
JS
5269}
5270
5271static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
5272 .func = bpf_sk_lookup_tcp,
5273 .gpl_only = false,
5274 .pkt_access = true,
5275 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5276 .arg1_type = ARG_PTR_TO_CTX,
5277 .arg2_type = ARG_PTR_TO_MEM,
5278 .arg3_type = ARG_CONST_SIZE,
5279 .arg4_type = ARG_ANYTHING,
5280 .arg5_type = ARG_ANYTHING,
5281};
5282
5283BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
5284 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5285{
edbf8c01
LB
5286 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
5287 netns_id, flags);
6acc9b43
JS
5288}
5289
5290static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
5291 .func = bpf_sk_lookup_udp,
5292 .gpl_only = false,
5293 .pkt_access = true,
5294 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5295 .arg1_type = ARG_PTR_TO_CTX,
5296 .arg2_type = ARG_PTR_TO_MEM,
5297 .arg3_type = ARG_CONST_SIZE,
5298 .arg4_type = ARG_ANYTHING,
5299 .arg5_type = ARG_ANYTHING,
5300};
5301
5302BPF_CALL_1(bpf_sk_release, struct sock *, sk)
5303{
5304 if (!sock_flag(sk, SOCK_RCU_FREE))
5305 sock_gen_put(sk);
5306 return 0;
5307}
5308
5309static const struct bpf_func_proto bpf_sk_release_proto = {
5310 .func = bpf_sk_release,
5311 .gpl_only = false,
5312 .ret_type = RET_INTEGER,
1b986589 5313 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
6acc9b43 5314};
c8123ead
NH
5315
5316BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
5317 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5318{
5319 struct net *caller_net = dev_net(ctx->rxq->dev);
5320 int ifindex = ctx->rxq->dev->ifindex;
5321
edbf8c01
LB
5322 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5323 ifindex, IPPROTO_UDP, netns_id,
5324 flags);
c8123ead
NH
5325}
5326
5327static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
5328 .func = bpf_xdp_sk_lookup_udp,
5329 .gpl_only = false,
5330 .pkt_access = true,
5331 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5332 .arg1_type = ARG_PTR_TO_CTX,
5333 .arg2_type = ARG_PTR_TO_MEM,
5334 .arg3_type = ARG_CONST_SIZE,
5335 .arg4_type = ARG_ANYTHING,
5336 .arg5_type = ARG_ANYTHING,
5337};
5338
edbf8c01
LB
5339BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
5340 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5341{
5342 struct net *caller_net = dev_net(ctx->rxq->dev);
5343 int ifindex = ctx->rxq->dev->ifindex;
5344
5345 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
5346 ifindex, IPPROTO_TCP, netns_id,
5347 flags);
5348}
5349
5350static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
5351 .func = bpf_xdp_skc_lookup_tcp,
5352 .gpl_only = false,
5353 .pkt_access = true,
5354 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5355 .arg1_type = ARG_PTR_TO_CTX,
5356 .arg2_type = ARG_PTR_TO_MEM,
5357 .arg3_type = ARG_CONST_SIZE,
5358 .arg4_type = ARG_ANYTHING,
5359 .arg5_type = ARG_ANYTHING,
5360};
5361
c8123ead
NH
5362BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
5363 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5364{
5365 struct net *caller_net = dev_net(ctx->rxq->dev);
5366 int ifindex = ctx->rxq->dev->ifindex;
5367
edbf8c01
LB
5368 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5369 ifindex, IPPROTO_TCP, netns_id,
5370 flags);
c8123ead
NH
5371}
5372
5373static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
5374 .func = bpf_xdp_sk_lookup_tcp,
5375 .gpl_only = false,
5376 .pkt_access = true,
5377 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5378 .arg1_type = ARG_PTR_TO_CTX,
5379 .arg2_type = ARG_PTR_TO_MEM,
5380 .arg3_type = ARG_CONST_SIZE,
5381 .arg4_type = ARG_ANYTHING,
5382 .arg5_type = ARG_ANYTHING,
5383};
6c49e65e 5384
edbf8c01
LB
5385BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5386 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5387{
5388 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
5389 sock_net(ctx->sk), 0,
5390 IPPROTO_TCP, netns_id, flags);
5391}
5392
5393static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
5394 .func = bpf_sock_addr_skc_lookup_tcp,
5395 .gpl_only = false,
5396 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5397 .arg1_type = ARG_PTR_TO_CTX,
5398 .arg2_type = ARG_PTR_TO_MEM,
5399 .arg3_type = ARG_CONST_SIZE,
5400 .arg4_type = ARG_ANYTHING,
5401 .arg5_type = ARG_ANYTHING,
5402};
5403
6c49e65e
AI
5404BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5405 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5406{
edbf8c01
LB
5407 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5408 sock_net(ctx->sk), 0, IPPROTO_TCP,
5409 netns_id, flags);
6c49e65e
AI
5410}
5411
5412static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
5413 .func = bpf_sock_addr_sk_lookup_tcp,
5414 .gpl_only = false,
5415 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5416 .arg1_type = ARG_PTR_TO_CTX,
5417 .arg2_type = ARG_PTR_TO_MEM,
5418 .arg3_type = ARG_CONST_SIZE,
5419 .arg4_type = ARG_ANYTHING,
5420 .arg5_type = ARG_ANYTHING,
5421};
5422
5423BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
5424 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5425{
edbf8c01
LB
5426 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5427 sock_net(ctx->sk), 0, IPPROTO_UDP,
5428 netns_id, flags);
6c49e65e
AI
5429}
5430
5431static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
5432 .func = bpf_sock_addr_sk_lookup_udp,
5433 .gpl_only = false,
5434 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5435 .arg1_type = ARG_PTR_TO_CTX,
5436 .arg2_type = ARG_PTR_TO_MEM,
5437 .arg3_type = ARG_CONST_SIZE,
5438 .arg4_type = ARG_ANYTHING,
5439 .arg5_type = ARG_ANYTHING,
5440};
5441
655a51e5
MKL
5442bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5443 struct bpf_insn_access_aux *info)
5444{
5445 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked))
5446 return false;
5447
5448 if (off % size != 0)
5449 return false;
5450
5451 switch (off) {
5452 case offsetof(struct bpf_tcp_sock, bytes_received):
5453 case offsetof(struct bpf_tcp_sock, bytes_acked):
5454 return size == sizeof(__u64);
5455 default:
5456 return size == sizeof(__u32);
5457 }
5458}
5459
5460u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
5461 const struct bpf_insn *si,
5462 struct bpf_insn *insn_buf,
5463 struct bpf_prog *prog, u32 *target_size)
5464{
5465 struct bpf_insn *insn = insn_buf;
5466
5467#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
5468 do { \
5469 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \
5470 FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
5471 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
5472 si->dst_reg, si->src_reg, \
5473 offsetof(struct tcp_sock, FIELD)); \
5474 } while (0)
5475
5476 CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock,
5477 BPF_TCP_SOCK_GET_COMMON);
5478
5479 if (insn > insn_buf)
5480 return insn - insn_buf;
5481
5482 switch (si->off) {
5483 case offsetof(struct bpf_tcp_sock, rtt_min):
5484 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5485 sizeof(struct minmax));
5486 BUILD_BUG_ON(sizeof(struct minmax) <
5487 sizeof(struct minmax_sample));
5488
5489 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5490 offsetof(struct tcp_sock, rtt_min) +
5491 offsetof(struct minmax_sample, v));
5492 break;
5493 }
5494
5495 return insn - insn_buf;
5496}
5497
5498BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
5499{
655a51e5
MKL
5500 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
5501 return (unsigned long)sk;
5502
5503 return (unsigned long)NULL;
5504}
5505
5506static const struct bpf_func_proto bpf_tcp_sock_proto = {
5507 .func = bpf_tcp_sock,
5508 .gpl_only = false,
5509 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
5510 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5511};
5512
dbafd7dd
MKL
5513BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
5514{
5515 sk = sk_to_full_sk(sk);
5516
5517 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
5518 return (unsigned long)sk;
5519
5520 return (unsigned long)NULL;
5521}
5522
5523static const struct bpf_func_proto bpf_get_listener_sock_proto = {
5524 .func = bpf_get_listener_sock,
5525 .gpl_only = false,
5526 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5527 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5528};
5529
f7c917ba 5530BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
5531{
5532 unsigned int iphdr_len;
5533
5534 if (skb->protocol == cpu_to_be16(ETH_P_IP))
5535 iphdr_len = sizeof(struct iphdr);
5536 else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
5537 iphdr_len = sizeof(struct ipv6hdr);
5538 else
5539 return 0;
5540
5541 if (skb_headlen(skb) < iphdr_len)
5542 return 0;
5543
5544 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
5545 return 0;
5546
5547 return INET_ECN_set_ce(skb);
5548}
5549
5550static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
5551 .func = bpf_skb_ecn_set_ce,
5552 .gpl_only = false,
5553 .ret_type = RET_INTEGER,
5554 .arg1_type = ARG_PTR_TO_CTX,
5555};
df3f94a0 5556#endif /* CONFIG_INET */
6acc9b43 5557
fe94cc29
MX
5558bool bpf_helper_changes_pkt_data(void *func)
5559{
5560 if (func == bpf_skb_vlan_push ||
5561 func == bpf_skb_vlan_pop ||
5562 func == bpf_skb_store_bytes ||
5563 func == bpf_skb_change_proto ||
5564 func == bpf_skb_change_head ||
0ea488ff 5565 func == sk_skb_change_head ||
fe94cc29 5566 func == bpf_skb_change_tail ||
0ea488ff 5567 func == sk_skb_change_tail ||
fe94cc29
MX
5568 func == bpf_skb_adjust_room ||
5569 func == bpf_skb_pull_data ||
0ea488ff 5570 func == sk_skb_pull_data ||
fe94cc29
MX
5571 func == bpf_clone_redirect ||
5572 func == bpf_l3_csum_replace ||
5573 func == bpf_l4_csum_replace ||
5574 func == bpf_xdp_adjust_head ||
5575 func == bpf_xdp_adjust_meta ||
5576 func == bpf_msg_pull_data ||
6fff607e 5577 func == bpf_msg_push_data ||
7246d8ed 5578 func == bpf_msg_pop_data ||
fe94cc29 5579 func == bpf_xdp_adjust_tail ||
61d76980 5580#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
fe94cc29
MX
5581 func == bpf_lwt_seg6_store_bytes ||
5582 func == bpf_lwt_seg6_adjust_srh ||
61d76980
MX
5583 func == bpf_lwt_seg6_action ||
5584#endif
3e0bd37c
PO
5585 func == bpf_lwt_in_push_encap ||
5586 func == bpf_lwt_xmit_push_encap)
fe94cc29
MX
5587 return true;
5588
5589 return false;
5590}
5591
d4052c4a 5592static const struct bpf_func_proto *
2492d3b8 5593bpf_base_func_proto(enum bpf_func_id func_id)
89aa0758
AS
5594{
5595 switch (func_id) {
5596 case BPF_FUNC_map_lookup_elem:
5597 return &bpf_map_lookup_elem_proto;
5598 case BPF_FUNC_map_update_elem:
5599 return &bpf_map_update_elem_proto;
5600 case BPF_FUNC_map_delete_elem:
5601 return &bpf_map_delete_elem_proto;
f1a2e44a
MV
5602 case BPF_FUNC_map_push_elem:
5603 return &bpf_map_push_elem_proto;
5604 case BPF_FUNC_map_pop_elem:
5605 return &bpf_map_pop_elem_proto;
5606 case BPF_FUNC_map_peek_elem:
5607 return &bpf_map_peek_elem_proto;
03e69b50
DB
5608 case BPF_FUNC_get_prandom_u32:
5609 return &bpf_get_prandom_u32_proto;
c04167ce 5610 case BPF_FUNC_get_smp_processor_id:
80b48c44 5611 return &bpf_get_raw_smp_processor_id_proto;
2d0e30c3
DB
5612 case BPF_FUNC_get_numa_node_id:
5613 return &bpf_get_numa_node_id_proto;
04fd61ab
AS
5614 case BPF_FUNC_tail_call:
5615 return &bpf_tail_call_proto;
17ca8cbf
DB
5616 case BPF_FUNC_ktime_get_ns:
5617 return &bpf_ktime_get_ns_proto;
d83525ca
AS
5618 default:
5619 break;
5620 }
5621
5622 if (!capable(CAP_SYS_ADMIN))
5623 return NULL;
5624
5625 switch (func_id) {
5626 case BPF_FUNC_spin_lock:
5627 return &bpf_spin_lock_proto;
5628 case BPF_FUNC_spin_unlock:
5629 return &bpf_spin_unlock_proto;
0756ea3e 5630 case BPF_FUNC_trace_printk:
d83525ca 5631 return bpf_get_trace_printk_proto();
89aa0758
AS
5632 default:
5633 return NULL;
5634 }
5635}
5636
ae2cf1c4 5637static const struct bpf_func_proto *
5e43f899 5638sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
ae2cf1c4
DA
5639{
5640 switch (func_id) {
5641 /* inet and inet6 sockets are created in a process
5642 * context so there is always a valid uid/gid
5643 */
5644 case BPF_FUNC_get_current_uid_gid:
5645 return &bpf_get_current_uid_gid_proto;
cd339431
RG
5646 case BPF_FUNC_get_local_storage:
5647 return &bpf_get_local_storage_proto;
ae2cf1c4
DA
5648 default:
5649 return bpf_base_func_proto(func_id);
5650 }
5651}
5652
4fbac77d
AI
5653static const struct bpf_func_proto *
5654sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5655{
5656 switch (func_id) {
5657 /* inet and inet6 sockets are created in a process
5658 * context so there is always a valid uid/gid
5659 */
5660 case BPF_FUNC_get_current_uid_gid:
5661 return &bpf_get_current_uid_gid_proto;
d74bad4e
AI
5662 case BPF_FUNC_bind:
5663 switch (prog->expected_attach_type) {
5664 case BPF_CGROUP_INET4_CONNECT:
5665 case BPF_CGROUP_INET6_CONNECT:
5666 return &bpf_bind_proto;
5667 default:
5668 return NULL;
5669 }
d692f113
AI
5670 case BPF_FUNC_get_socket_cookie:
5671 return &bpf_get_socket_cookie_sock_addr_proto;
cd339431
RG
5672 case BPF_FUNC_get_local_storage:
5673 return &bpf_get_local_storage_proto;
6c49e65e
AI
5674#ifdef CONFIG_INET
5675 case BPF_FUNC_sk_lookup_tcp:
5676 return &bpf_sock_addr_sk_lookup_tcp_proto;
5677 case BPF_FUNC_sk_lookup_udp:
5678 return &bpf_sock_addr_sk_lookup_udp_proto;
5679 case BPF_FUNC_sk_release:
5680 return &bpf_sk_release_proto;
edbf8c01
LB
5681 case BPF_FUNC_skc_lookup_tcp:
5682 return &bpf_sock_addr_skc_lookup_tcp_proto;
6c49e65e 5683#endif /* CONFIG_INET */
4fbac77d
AI
5684 default:
5685 return bpf_base_func_proto(func_id);
5686 }
5687}
5688
2492d3b8 5689static const struct bpf_func_proto *
5e43f899 5690sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2492d3b8
DB
5691{
5692 switch (func_id) {
5693 case BPF_FUNC_skb_load_bytes:
5694 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
5695 case BPF_FUNC_skb_load_bytes_relative:
5696 return &bpf_skb_load_bytes_relative_proto;
91b8270f
CF
5697 case BPF_FUNC_get_socket_cookie:
5698 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
5699 case BPF_FUNC_get_socket_uid:
5700 return &bpf_get_socket_uid_proto;
2492d3b8
DB
5701 default:
5702 return bpf_base_func_proto(func_id);
5703 }
5704}
5705
cd339431
RG
5706static const struct bpf_func_proto *
5707cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5708{
5709 switch (func_id) {
5710 case BPF_FUNC_get_local_storage:
5711 return &bpf_get_local_storage_proto;
46f8bc92
MKL
5712 case BPF_FUNC_sk_fullsock:
5713 return &bpf_sk_fullsock_proto;
655a51e5
MKL
5714#ifdef CONFIG_INET
5715 case BPF_FUNC_tcp_sock:
5716 return &bpf_tcp_sock_proto;
dbafd7dd
MKL
5717 case BPF_FUNC_get_listener_sock:
5718 return &bpf_get_listener_sock_proto;
f7c917ba 5719 case BPF_FUNC_skb_ecn_set_ce:
5720 return &bpf_skb_ecn_set_ce_proto;
655a51e5 5721#endif
cd339431
RG
5722 default:
5723 return sk_filter_func_proto(func_id, prog);
5724 }
5725}
5726
608cd71a 5727static const struct bpf_func_proto *
5e43f899 5728tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
608cd71a
AS
5729{
5730 switch (func_id) {
5731 case BPF_FUNC_skb_store_bytes:
5732 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
5733 case BPF_FUNC_skb_load_bytes:
5734 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
5735 case BPF_FUNC_skb_load_bytes_relative:
5736 return &bpf_skb_load_bytes_relative_proto;
36bbef52
DB
5737 case BPF_FUNC_skb_pull_data:
5738 return &bpf_skb_pull_data_proto;
7d672345
DB
5739 case BPF_FUNC_csum_diff:
5740 return &bpf_csum_diff_proto;
36bbef52
DB
5741 case BPF_FUNC_csum_update:
5742 return &bpf_csum_update_proto;
91bc4822
AS
5743 case BPF_FUNC_l3_csum_replace:
5744 return &bpf_l3_csum_replace_proto;
5745 case BPF_FUNC_l4_csum_replace:
5746 return &bpf_l4_csum_replace_proto;
3896d655
AS
5747 case BPF_FUNC_clone_redirect:
5748 return &bpf_clone_redirect_proto;
8d20aabe
DB
5749 case BPF_FUNC_get_cgroup_classid:
5750 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
5751 case BPF_FUNC_skb_vlan_push:
5752 return &bpf_skb_vlan_push_proto;
5753 case BPF_FUNC_skb_vlan_pop:
5754 return &bpf_skb_vlan_pop_proto;
6578171a
DB
5755 case BPF_FUNC_skb_change_proto:
5756 return &bpf_skb_change_proto_proto;
d2485c42
DB
5757 case BPF_FUNC_skb_change_type:
5758 return &bpf_skb_change_type_proto;
2be7e212
DB
5759 case BPF_FUNC_skb_adjust_room:
5760 return &bpf_skb_adjust_room_proto;
5293efe6
DB
5761 case BPF_FUNC_skb_change_tail:
5762 return &bpf_skb_change_tail_proto;
d3aa45ce
AS
5763 case BPF_FUNC_skb_get_tunnel_key:
5764 return &bpf_skb_get_tunnel_key_proto;
5765 case BPF_FUNC_skb_set_tunnel_key:
14ca0751
DB
5766 return bpf_get_skb_set_tunnel_proto(func_id);
5767 case BPF_FUNC_skb_get_tunnel_opt:
5768 return &bpf_skb_get_tunnel_opt_proto;
5769 case BPF_FUNC_skb_set_tunnel_opt:
5770 return bpf_get_skb_set_tunnel_proto(func_id);
27b29f63
AS
5771 case BPF_FUNC_redirect:
5772 return &bpf_redirect_proto;
c46646d0
DB
5773 case BPF_FUNC_get_route_realm:
5774 return &bpf_get_route_realm_proto;
13c5c240
DB
5775 case BPF_FUNC_get_hash_recalc:
5776 return &bpf_get_hash_recalc_proto;
7a4b28c6
DB
5777 case BPF_FUNC_set_hash_invalid:
5778 return &bpf_set_hash_invalid_proto;
ded092cd
DB
5779 case BPF_FUNC_set_hash:
5780 return &bpf_set_hash_proto;
bd570ff9 5781 case BPF_FUNC_perf_event_output:
555c8a86 5782 return &bpf_skb_event_output_proto;
80b48c44
DB
5783 case BPF_FUNC_get_smp_processor_id:
5784 return &bpf_get_smp_processor_id_proto;
747ea55e
DB
5785 case BPF_FUNC_skb_under_cgroup:
5786 return &bpf_skb_under_cgroup_proto;
91b8270f
CF
5787 case BPF_FUNC_get_socket_cookie:
5788 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
5789 case BPF_FUNC_get_socket_uid:
5790 return &bpf_get_socket_uid_proto;
cb20b08e
DB
5791 case BPF_FUNC_fib_lookup:
5792 return &bpf_skb_fib_lookup_proto;
46f8bc92
MKL
5793 case BPF_FUNC_sk_fullsock:
5794 return &bpf_sk_fullsock_proto;
12bed760
EB
5795#ifdef CONFIG_XFRM
5796 case BPF_FUNC_skb_get_xfrm_state:
5797 return &bpf_skb_get_xfrm_state_proto;
5798#endif
cb20b08e
DB
5799#ifdef CONFIG_SOCK_CGROUP_DATA
5800 case BPF_FUNC_skb_cgroup_id:
5801 return &bpf_skb_cgroup_id_proto;
77236281
AI
5802 case BPF_FUNC_skb_ancestor_cgroup_id:
5803 return &bpf_skb_ancestor_cgroup_id_proto;
cb20b08e 5804#endif
df3f94a0 5805#ifdef CONFIG_INET
6acc9b43
JS
5806 case BPF_FUNC_sk_lookup_tcp:
5807 return &bpf_sk_lookup_tcp_proto;
5808 case BPF_FUNC_sk_lookup_udp:
5809 return &bpf_sk_lookup_udp_proto;
5810 case BPF_FUNC_sk_release:
5811 return &bpf_sk_release_proto;
655a51e5
MKL
5812 case BPF_FUNC_tcp_sock:
5813 return &bpf_tcp_sock_proto;
dbafd7dd
MKL
5814 case BPF_FUNC_get_listener_sock:
5815 return &bpf_get_listener_sock_proto;
edbf8c01
LB
5816 case BPF_FUNC_skc_lookup_tcp:
5817 return &bpf_skc_lookup_tcp_proto;
df3f94a0 5818#endif
608cd71a 5819 default:
2492d3b8 5820 return bpf_base_func_proto(func_id);
608cd71a
AS
5821 }
5822}
5823
6a773a15 5824static const struct bpf_func_proto *
5e43f899 5825xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6a773a15 5826{
4de16969
DB
5827 switch (func_id) {
5828 case BPF_FUNC_perf_event_output:
5829 return &bpf_xdp_event_output_proto;
669dc4d7
DB
5830 case BPF_FUNC_get_smp_processor_id:
5831 return &bpf_get_smp_processor_id_proto;
205c3807
DB
5832 case BPF_FUNC_csum_diff:
5833 return &bpf_csum_diff_proto;
17bedab2
MKL
5834 case BPF_FUNC_xdp_adjust_head:
5835 return &bpf_xdp_adjust_head_proto;
de8f3a83
DB
5836 case BPF_FUNC_xdp_adjust_meta:
5837 return &bpf_xdp_adjust_meta_proto;
814abfab
JF
5838 case BPF_FUNC_redirect:
5839 return &bpf_xdp_redirect_proto;
97f91a7c 5840 case BPF_FUNC_redirect_map:
e4a8e817 5841 return &bpf_xdp_redirect_map_proto;
b32cc5b9
NS
5842 case BPF_FUNC_xdp_adjust_tail:
5843 return &bpf_xdp_adjust_tail_proto;
87f5fc7e
DA
5844 case BPF_FUNC_fib_lookup:
5845 return &bpf_xdp_fib_lookup_proto;
c8123ead
NH
5846#ifdef CONFIG_INET
5847 case BPF_FUNC_sk_lookup_udp:
5848 return &bpf_xdp_sk_lookup_udp_proto;
5849 case BPF_FUNC_sk_lookup_tcp:
5850 return &bpf_xdp_sk_lookup_tcp_proto;
5851 case BPF_FUNC_sk_release:
5852 return &bpf_sk_release_proto;
edbf8c01
LB
5853 case BPF_FUNC_skc_lookup_tcp:
5854 return &bpf_xdp_skc_lookup_tcp_proto;
c8123ead 5855#endif
4de16969 5856 default:
2492d3b8 5857 return bpf_base_func_proto(func_id);
4de16969 5858 }
6a773a15
BB
5859}
5860
604326b4
DB
5861const struct bpf_func_proto bpf_sock_map_update_proto __weak;
5862const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
5863
8c4b4c7e 5864static const struct bpf_func_proto *
5e43f899 5865sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
8c4b4c7e
LB
5866{
5867 switch (func_id) {
5868 case BPF_FUNC_setsockopt:
5869 return &bpf_setsockopt_proto;
cd86d1fd
LB
5870 case BPF_FUNC_getsockopt:
5871 return &bpf_getsockopt_proto;
b13d8807
LB
5872 case BPF_FUNC_sock_ops_cb_flags_set:
5873 return &bpf_sock_ops_cb_flags_set_proto;
174a79ff
JF
5874 case BPF_FUNC_sock_map_update:
5875 return &bpf_sock_map_update_proto;
81110384
JF
5876 case BPF_FUNC_sock_hash_update:
5877 return &bpf_sock_hash_update_proto;
d692f113
AI
5878 case BPF_FUNC_get_socket_cookie:
5879 return &bpf_get_socket_cookie_sock_ops_proto;
cd339431
RG
5880 case BPF_FUNC_get_local_storage:
5881 return &bpf_get_local_storage_proto;
a5a3a828
SV
5882 case BPF_FUNC_perf_event_output:
5883 return &bpf_sockopt_event_output_proto;
8c4b4c7e
LB
5884 default:
5885 return bpf_base_func_proto(func_id);
5886 }
5887}
5888
604326b4
DB
5889const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
5890const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
5891
5e43f899
AI
5892static const struct bpf_func_proto *
5893sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4f738adb
JF
5894{
5895 switch (func_id) {
5896 case BPF_FUNC_msg_redirect_map:
5897 return &bpf_msg_redirect_map_proto;
81110384
JF
5898 case BPF_FUNC_msg_redirect_hash:
5899 return &bpf_msg_redirect_hash_proto;
2a100317
JF
5900 case BPF_FUNC_msg_apply_bytes:
5901 return &bpf_msg_apply_bytes_proto;
91843d54
JF
5902 case BPF_FUNC_msg_cork_bytes:
5903 return &bpf_msg_cork_bytes_proto;
015632bb
JF
5904 case BPF_FUNC_msg_pull_data:
5905 return &bpf_msg_pull_data_proto;
6fff607e
JF
5906 case BPF_FUNC_msg_push_data:
5907 return &bpf_msg_push_data_proto;
7246d8ed
JF
5908 case BPF_FUNC_msg_pop_data:
5909 return &bpf_msg_pop_data_proto;
4f738adb
JF
5910 default:
5911 return bpf_base_func_proto(func_id);
5912 }
5913}
5914
604326b4
DB
5915const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
5916const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
5917
5e43f899
AI
5918static const struct bpf_func_proto *
5919sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
b005fd18
JF
5920{
5921 switch (func_id) {
8a31db56
JF
5922 case BPF_FUNC_skb_store_bytes:
5923 return &bpf_skb_store_bytes_proto;
b005fd18
JF
5924 case BPF_FUNC_skb_load_bytes:
5925 return &bpf_skb_load_bytes_proto;
8a31db56 5926 case BPF_FUNC_skb_pull_data:
0ea488ff 5927 return &sk_skb_pull_data_proto;
8a31db56 5928 case BPF_FUNC_skb_change_tail:
0ea488ff 5929 return &sk_skb_change_tail_proto;
8a31db56 5930 case BPF_FUNC_skb_change_head:
0ea488ff 5931 return &sk_skb_change_head_proto;
b005fd18
JF
5932 case BPF_FUNC_get_socket_cookie:
5933 return &bpf_get_socket_cookie_proto;
5934 case BPF_FUNC_get_socket_uid:
5935 return &bpf_get_socket_uid_proto;
174a79ff
JF
5936 case BPF_FUNC_sk_redirect_map:
5937 return &bpf_sk_redirect_map_proto;
81110384
JF
5938 case BPF_FUNC_sk_redirect_hash:
5939 return &bpf_sk_redirect_hash_proto;
df3f94a0 5940#ifdef CONFIG_INET
6acc9b43
JS
5941 case BPF_FUNC_sk_lookup_tcp:
5942 return &bpf_sk_lookup_tcp_proto;
5943 case BPF_FUNC_sk_lookup_udp:
5944 return &bpf_sk_lookup_udp_proto;
5945 case BPF_FUNC_sk_release:
5946 return &bpf_sk_release_proto;
edbf8c01
LB
5947 case BPF_FUNC_skc_lookup_tcp:
5948 return &bpf_skc_lookup_tcp_proto;
df3f94a0 5949#endif
b005fd18
JF
5950 default:
5951 return bpf_base_func_proto(func_id);
5952 }
5953}
5954
d58e468b
PP
5955static const struct bpf_func_proto *
5956flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5957{
5958 switch (func_id) {
5959 case BPF_FUNC_skb_load_bytes:
5960 return &bpf_skb_load_bytes_proto;
5961 default:
5962 return bpf_base_func_proto(func_id);
5963 }
5964}
5965
cd3092c7
MX
5966static const struct bpf_func_proto *
5967lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5968{
5969 switch (func_id) {
5970 case BPF_FUNC_skb_load_bytes:
5971 return &bpf_skb_load_bytes_proto;
5972 case BPF_FUNC_skb_pull_data:
5973 return &bpf_skb_pull_data_proto;
5974 case BPF_FUNC_csum_diff:
5975 return &bpf_csum_diff_proto;
5976 case BPF_FUNC_get_cgroup_classid:
5977 return &bpf_get_cgroup_classid_proto;
5978 case BPF_FUNC_get_route_realm:
5979 return &bpf_get_route_realm_proto;
5980 case BPF_FUNC_get_hash_recalc:
5981 return &bpf_get_hash_recalc_proto;
5982 case BPF_FUNC_perf_event_output:
5983 return &bpf_skb_event_output_proto;
5984 case BPF_FUNC_get_smp_processor_id:
5985 return &bpf_get_smp_processor_id_proto;
5986 case BPF_FUNC_skb_under_cgroup:
5987 return &bpf_skb_under_cgroup_proto;
5988 default:
5989 return bpf_base_func_proto(func_id);
5990 }
5991}
5992
5993static const struct bpf_func_proto *
5994lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5995{
5996 switch (func_id) {
5997 case BPF_FUNC_lwt_push_encap:
3e0bd37c 5998 return &bpf_lwt_in_push_encap_proto;
cd3092c7
MX
5999 default:
6000 return lwt_out_func_proto(func_id, prog);
6001 }
6002}
6003
3a0af8fd 6004static const struct bpf_func_proto *
5e43f899 6005lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
6006{
6007 switch (func_id) {
6008 case BPF_FUNC_skb_get_tunnel_key:
6009 return &bpf_skb_get_tunnel_key_proto;
6010 case BPF_FUNC_skb_set_tunnel_key:
6011 return bpf_get_skb_set_tunnel_proto(func_id);
6012 case BPF_FUNC_skb_get_tunnel_opt:
6013 return &bpf_skb_get_tunnel_opt_proto;
6014 case BPF_FUNC_skb_set_tunnel_opt:
6015 return bpf_get_skb_set_tunnel_proto(func_id);
6016 case BPF_FUNC_redirect:
6017 return &bpf_redirect_proto;
6018 case BPF_FUNC_clone_redirect:
6019 return &bpf_clone_redirect_proto;
6020 case BPF_FUNC_skb_change_tail:
6021 return &bpf_skb_change_tail_proto;
6022 case BPF_FUNC_skb_change_head:
6023 return &bpf_skb_change_head_proto;
6024 case BPF_FUNC_skb_store_bytes:
6025 return &bpf_skb_store_bytes_proto;
6026 case BPF_FUNC_csum_update:
6027 return &bpf_csum_update_proto;
6028 case BPF_FUNC_l3_csum_replace:
6029 return &bpf_l3_csum_replace_proto;
6030 case BPF_FUNC_l4_csum_replace:
6031 return &bpf_l4_csum_replace_proto;
6032 case BPF_FUNC_set_hash_invalid:
6033 return &bpf_set_hash_invalid_proto;
3e0bd37c
PO
6034 case BPF_FUNC_lwt_push_encap:
6035 return &bpf_lwt_xmit_push_encap_proto;
3a0af8fd 6036 default:
cd3092c7 6037 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
6038 }
6039}
6040
004d4b27
MX
6041static const struct bpf_func_proto *
6042lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6043{
6044 switch (func_id) {
61d76980 6045#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
004d4b27
MX
6046 case BPF_FUNC_lwt_seg6_store_bytes:
6047 return &bpf_lwt_seg6_store_bytes_proto;
6048 case BPF_FUNC_lwt_seg6_action:
6049 return &bpf_lwt_seg6_action_proto;
6050 case BPF_FUNC_lwt_seg6_adjust_srh:
6051 return &bpf_lwt_seg6_adjust_srh_proto;
61d76980 6052#endif
004d4b27
MX
6053 default:
6054 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
6055 }
6056}
6057
f96da094 6058static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 6059 const struct bpf_prog *prog,
f96da094 6060 struct bpf_insn_access_aux *info)
23994631 6061{
f96da094 6062 const int size_default = sizeof(__u32);
23994631 6063
9bac3d6d
AS
6064 if (off < 0 || off >= sizeof(struct __sk_buff))
6065 return false;
62c7989b 6066
4936e352 6067 /* The verifier guarantees that size > 0. */
9bac3d6d
AS
6068 if (off % size != 0)
6069 return false;
62c7989b
DB
6070
6071 switch (off) {
f96da094
DB
6072 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6073 if (off + size > offsetofend(struct __sk_buff, cb[4]))
62c7989b
DB
6074 return false;
6075 break;
8a31db56
JF
6076 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
6077 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
6078 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
6079 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
f96da094 6080 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 6081 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094
DB
6082 case bpf_ctx_range(struct __sk_buff, data_end):
6083 if (size != size_default)
23994631 6084 return false;
31fd8581 6085 break;
b7df9ada
DB
6086 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6087 if (size != sizeof(__u64))
d58e468b
PP
6088 return false;
6089 break;
f11216b2
VD
6090 case bpf_ctx_range(struct __sk_buff, tstamp):
6091 if (size != sizeof(__u64))
6092 return false;
6093 break;
46f8bc92
MKL
6094 case offsetof(struct __sk_buff, sk):
6095 if (type == BPF_WRITE || size != sizeof(__u64))
6096 return false;
6097 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
6098 break;
31fd8581 6099 default:
f96da094 6100 /* Only narrow read access allowed for now. */
31fd8581 6101 if (type == BPF_WRITE) {
f96da094 6102 if (size != size_default)
31fd8581
YS
6103 return false;
6104 } else {
f96da094
DB
6105 bpf_ctx_record_field_size(info, size_default);
6106 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
23994631 6107 return false;
31fd8581 6108 }
62c7989b 6109 }
9bac3d6d
AS
6110
6111 return true;
6112}
6113
d691f9e8 6114static bool sk_filter_is_valid_access(int off, int size,
19de99f7 6115 enum bpf_access_type type,
5e43f899 6116 const struct bpf_prog *prog,
23994631 6117 struct bpf_insn_access_aux *info)
d691f9e8 6118{
db58ba45 6119 switch (off) {
f96da094
DB
6120 case bpf_ctx_range(struct __sk_buff, tc_classid):
6121 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 6122 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094 6123 case bpf_ctx_range(struct __sk_buff, data_end):
b7df9ada 6124 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
8a31db56 6125 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
f11216b2 6126 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6127 case bpf_ctx_range(struct __sk_buff, wire_len):
045efa82 6128 return false;
db58ba45 6129 }
045efa82 6130
d691f9e8
AS
6131 if (type == BPF_WRITE) {
6132 switch (off) {
f96da094 6133 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
6134 break;
6135 default:
6136 return false;
6137 }
6138 }
6139
5e43f899 6140 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
6141}
6142
b39b5f41
SL
6143static bool cg_skb_is_valid_access(int off, int size,
6144 enum bpf_access_type type,
6145 const struct bpf_prog *prog,
6146 struct bpf_insn_access_aux *info)
6147{
6148 switch (off) {
6149 case bpf_ctx_range(struct __sk_buff, tc_classid):
6150 case bpf_ctx_range(struct __sk_buff, data_meta):
b7df9ada 6151 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
e3da08d0 6152 case bpf_ctx_range(struct __sk_buff, wire_len):
b39b5f41 6153 return false;
ab21c1b5
DB
6154 case bpf_ctx_range(struct __sk_buff, data):
6155 case bpf_ctx_range(struct __sk_buff, data_end):
6156 if (!capable(CAP_SYS_ADMIN))
6157 return false;
6158 break;
b39b5f41 6159 }
ab21c1b5 6160
b39b5f41
SL
6161 if (type == BPF_WRITE) {
6162 switch (off) {
6163 case bpf_ctx_range(struct __sk_buff, mark):
6164 case bpf_ctx_range(struct __sk_buff, priority):
6165 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6166 break;
f11216b2
VD
6167 case bpf_ctx_range(struct __sk_buff, tstamp):
6168 if (!capable(CAP_SYS_ADMIN))
6169 return false;
6170 break;
b39b5f41
SL
6171 default:
6172 return false;
6173 }
6174 }
6175
6176 switch (off) {
6177 case bpf_ctx_range(struct __sk_buff, data):
6178 info->reg_type = PTR_TO_PACKET;
6179 break;
6180 case bpf_ctx_range(struct __sk_buff, data_end):
6181 info->reg_type = PTR_TO_PACKET_END;
6182 break;
6183 }
6184
6185 return bpf_skb_is_valid_access(off, size, type, prog, info);
6186}
6187
3a0af8fd
TG
6188static bool lwt_is_valid_access(int off, int size,
6189 enum bpf_access_type type,
5e43f899 6190 const struct bpf_prog *prog,
23994631 6191 struct bpf_insn_access_aux *info)
3a0af8fd
TG
6192{
6193 switch (off) {
f96da094 6194 case bpf_ctx_range(struct __sk_buff, tc_classid):
8a31db56 6195 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
de8f3a83 6196 case bpf_ctx_range(struct __sk_buff, data_meta):
b7df9ada 6197 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
f11216b2 6198 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6199 case bpf_ctx_range(struct __sk_buff, wire_len):
3a0af8fd
TG
6200 return false;
6201 }
6202
6203 if (type == BPF_WRITE) {
6204 switch (off) {
f96da094
DB
6205 case bpf_ctx_range(struct __sk_buff, mark):
6206 case bpf_ctx_range(struct __sk_buff, priority):
6207 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
3a0af8fd
TG
6208 break;
6209 default:
6210 return false;
6211 }
6212 }
6213
f96da094
DB
6214 switch (off) {
6215 case bpf_ctx_range(struct __sk_buff, data):
6216 info->reg_type = PTR_TO_PACKET;
6217 break;
6218 case bpf_ctx_range(struct __sk_buff, data_end):
6219 info->reg_type = PTR_TO_PACKET_END;
6220 break;
6221 }
6222
5e43f899 6223 return bpf_skb_is_valid_access(off, size, type, prog, info);
3a0af8fd
TG
6224}
6225
aac3fc32
AI
6226/* Attach type specific accesses */
6227static bool __sock_filter_check_attach_type(int off,
6228 enum bpf_access_type access_type,
6229 enum bpf_attach_type attach_type)
61023658 6230{
aac3fc32
AI
6231 switch (off) {
6232 case offsetof(struct bpf_sock, bound_dev_if):
6233 case offsetof(struct bpf_sock, mark):
6234 case offsetof(struct bpf_sock, priority):
6235 switch (attach_type) {
6236 case BPF_CGROUP_INET_SOCK_CREATE:
6237 goto full_access;
6238 default:
6239 return false;
6240 }
6241 case bpf_ctx_range(struct bpf_sock, src_ip4):
6242 switch (attach_type) {
6243 case BPF_CGROUP_INET4_POST_BIND:
6244 goto read_only;
6245 default:
6246 return false;
6247 }
6248 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6249 switch (attach_type) {
6250 case BPF_CGROUP_INET6_POST_BIND:
6251 goto read_only;
6252 default:
6253 return false;
6254 }
6255 case bpf_ctx_range(struct bpf_sock, src_port):
6256 switch (attach_type) {
6257 case BPF_CGROUP_INET4_POST_BIND:
6258 case BPF_CGROUP_INET6_POST_BIND:
6259 goto read_only;
61023658
DA
6260 default:
6261 return false;
6262 }
6263 }
aac3fc32
AI
6264read_only:
6265 return access_type == BPF_READ;
6266full_access:
6267 return true;
6268}
6269
46f8bc92
MKL
6270bool bpf_sock_common_is_valid_access(int off, int size,
6271 enum bpf_access_type type,
aac3fc32
AI
6272 struct bpf_insn_access_aux *info)
6273{
aac3fc32 6274 switch (off) {
46f8bc92
MKL
6275 case bpf_ctx_range_till(struct bpf_sock, type, priority):
6276 return false;
6277 default:
6278 return bpf_sock_is_valid_access(off, size, type, info);
aac3fc32 6279 }
aac3fc32
AI
6280}
6281
c64b7983
JS
6282bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6283 struct bpf_insn_access_aux *info)
aac3fc32 6284{
aa65d696
MKL
6285 const int size_default = sizeof(__u32);
6286
aac3fc32 6287 if (off < 0 || off >= sizeof(struct bpf_sock))
61023658 6288 return false;
61023658
DA
6289 if (off % size != 0)
6290 return false;
aa65d696
MKL
6291
6292 switch (off) {
6293 case offsetof(struct bpf_sock, state):
6294 case offsetof(struct bpf_sock, family):
6295 case offsetof(struct bpf_sock, type):
6296 case offsetof(struct bpf_sock, protocol):
6297 case offsetof(struct bpf_sock, dst_port):
6298 case offsetof(struct bpf_sock, src_port):
6299 case bpf_ctx_range(struct bpf_sock, src_ip4):
6300 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6301 case bpf_ctx_range(struct bpf_sock, dst_ip4):
6302 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
6303 bpf_ctx_record_field_size(info, size_default);
6304 return bpf_ctx_narrow_access_ok(off, size, size_default);
6305 }
6306
6307 return size == size_default;
61023658
DA
6308}
6309
c64b7983
JS
6310static bool sock_filter_is_valid_access(int off, int size,
6311 enum bpf_access_type type,
6312 const struct bpf_prog *prog,
6313 struct bpf_insn_access_aux *info)
6314{
6315 if (!bpf_sock_is_valid_access(off, size, type, info))
6316 return false;
6317 return __sock_filter_check_attach_type(off, type,
6318 prog->expected_attach_type);
6319}
6320
b09928b9
DB
6321static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
6322 const struct bpf_prog *prog)
6323{
6324 /* Neither direct read nor direct write requires any preliminary
6325 * action.
6326 */
6327 return 0;
6328}
6329
047b0ecd
DB
6330static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
6331 const struct bpf_prog *prog, int drop_verdict)
36bbef52
DB
6332{
6333 struct bpf_insn *insn = insn_buf;
6334
6335 if (!direct_write)
6336 return 0;
6337
6338 /* if (!skb->cloned)
6339 * goto start;
6340 *
6341 * (Fast-path, otherwise approximation that we might be
6342 * a clone, do the rest in helper.)
6343 */
6344 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
6345 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
6346 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
6347
6348 /* ret = bpf_skb_pull_data(skb, 0); */
6349 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
6350 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
6351 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6352 BPF_FUNC_skb_pull_data);
6353 /* if (!ret)
6354 * goto restore;
6355 * return TC_ACT_SHOT;
6356 */
6357 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
047b0ecd 6358 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
36bbef52
DB
6359 *insn++ = BPF_EXIT_INSN();
6360
6361 /* restore: */
6362 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
6363 /* start: */
6364 *insn++ = prog->insnsi[0];
6365
6366 return insn - insn_buf;
6367}
6368
e0cea7ce
DB
6369static int bpf_gen_ld_abs(const struct bpf_insn *orig,
6370 struct bpf_insn *insn_buf)
6371{
6372 bool indirect = BPF_MODE(orig->code) == BPF_IND;
6373 struct bpf_insn *insn = insn_buf;
6374
6375 /* We're guaranteed here that CTX is in R6. */
6376 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
6377 if (!indirect) {
6378 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
6379 } else {
6380 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
6381 if (orig->imm)
6382 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
6383 }
6384
6385 switch (BPF_SIZE(orig->code)) {
6386 case BPF_B:
6387 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
6388 break;
6389 case BPF_H:
6390 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
6391 break;
6392 case BPF_W:
6393 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
6394 break;
6395 }
6396
6397 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
6398 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
6399 *insn++ = BPF_EXIT_INSN();
6400
6401 return insn - insn_buf;
6402}
6403
047b0ecd
DB
6404static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
6405 const struct bpf_prog *prog)
6406{
6407 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
6408}
6409
d691f9e8 6410static bool tc_cls_act_is_valid_access(int off, int size,
19de99f7 6411 enum bpf_access_type type,
5e43f899 6412 const struct bpf_prog *prog,
23994631 6413 struct bpf_insn_access_aux *info)
d691f9e8
AS
6414{
6415 if (type == BPF_WRITE) {
6416 switch (off) {
f96da094
DB
6417 case bpf_ctx_range(struct __sk_buff, mark):
6418 case bpf_ctx_range(struct __sk_buff, tc_index):
6419 case bpf_ctx_range(struct __sk_buff, priority):
6420 case bpf_ctx_range(struct __sk_buff, tc_classid):
6421 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
f11216b2 6422 case bpf_ctx_range(struct __sk_buff, tstamp):
74e31ca8 6423 case bpf_ctx_range(struct __sk_buff, queue_mapping):
d691f9e8
AS
6424 break;
6425 default:
6426 return false;
6427 }
6428 }
19de99f7 6429
f96da094
DB
6430 switch (off) {
6431 case bpf_ctx_range(struct __sk_buff, data):
6432 info->reg_type = PTR_TO_PACKET;
6433 break;
de8f3a83
DB
6434 case bpf_ctx_range(struct __sk_buff, data_meta):
6435 info->reg_type = PTR_TO_PACKET_META;
6436 break;
f96da094
DB
6437 case bpf_ctx_range(struct __sk_buff, data_end):
6438 info->reg_type = PTR_TO_PACKET_END;
6439 break;
b7df9ada 6440 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
8a31db56
JF
6441 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6442 return false;
f96da094
DB
6443 }
6444
5e43f899 6445 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
6446}
6447
1afaf661 6448static bool __is_valid_xdp_access(int off, int size)
6a773a15
BB
6449{
6450 if (off < 0 || off >= sizeof(struct xdp_md))
6451 return false;
6452 if (off % size != 0)
6453 return false;
6088b582 6454 if (size != sizeof(__u32))
6a773a15
BB
6455 return false;
6456
6457 return true;
6458}
6459
6460static bool xdp_is_valid_access(int off, int size,
6461 enum bpf_access_type type,
5e43f899 6462 const struct bpf_prog *prog,
23994631 6463 struct bpf_insn_access_aux *info)
6a773a15 6464{
0d830032
JK
6465 if (type == BPF_WRITE) {
6466 if (bpf_prog_is_dev_bound(prog->aux)) {
6467 switch (off) {
6468 case offsetof(struct xdp_md, rx_queue_index):
6469 return __is_valid_xdp_access(off, size);
6470 }
6471 }
6a773a15 6472 return false;
0d830032 6473 }
6a773a15
BB
6474
6475 switch (off) {
6476 case offsetof(struct xdp_md, data):
23994631 6477 info->reg_type = PTR_TO_PACKET;
6a773a15 6478 break;
de8f3a83
DB
6479 case offsetof(struct xdp_md, data_meta):
6480 info->reg_type = PTR_TO_PACKET_META;
6481 break;
6a773a15 6482 case offsetof(struct xdp_md, data_end):
23994631 6483 info->reg_type = PTR_TO_PACKET_END;
6a773a15
BB
6484 break;
6485 }
6486
1afaf661 6487 return __is_valid_xdp_access(off, size);
6a773a15
BB
6488}
6489
6490void bpf_warn_invalid_xdp_action(u32 act)
6491{
9beb8bed
DB
6492 const u32 act_max = XDP_REDIRECT;
6493
6494 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
6495 act > act_max ? "Illegal" : "Driver unsupported",
6496 act);
6a773a15
BB
6497}
6498EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
6499
4fbac77d
AI
6500static bool sock_addr_is_valid_access(int off, int size,
6501 enum bpf_access_type type,
6502 const struct bpf_prog *prog,
6503 struct bpf_insn_access_aux *info)
6504{
6505 const int size_default = sizeof(__u32);
6506
6507 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
6508 return false;
6509 if (off % size != 0)
6510 return false;
6511
6512 /* Disallow access to IPv6 fields from IPv4 contex and vise
6513 * versa.
6514 */
6515 switch (off) {
6516 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6517 switch (prog->expected_attach_type) {
6518 case BPF_CGROUP_INET4_BIND:
d74bad4e 6519 case BPF_CGROUP_INET4_CONNECT:
1cedee13 6520 case BPF_CGROUP_UDP4_SENDMSG:
4fbac77d
AI
6521 break;
6522 default:
6523 return false;
6524 }
6525 break;
6526 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6527 switch (prog->expected_attach_type) {
6528 case BPF_CGROUP_INET6_BIND:
d74bad4e 6529 case BPF_CGROUP_INET6_CONNECT:
1cedee13
AI
6530 case BPF_CGROUP_UDP6_SENDMSG:
6531 break;
6532 default:
6533 return false;
6534 }
6535 break;
6536 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6537 switch (prog->expected_attach_type) {
6538 case BPF_CGROUP_UDP4_SENDMSG:
6539 break;
6540 default:
6541 return false;
6542 }
6543 break;
6544 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6545 msg_src_ip6[3]):
6546 switch (prog->expected_attach_type) {
6547 case BPF_CGROUP_UDP6_SENDMSG:
4fbac77d
AI
6548 break;
6549 default:
6550 return false;
6551 }
6552 break;
6553 }
6554
6555 switch (off) {
6556 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6557 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
1cedee13
AI
6558 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6559 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6560 msg_src_ip6[3]):
4fbac77d
AI
6561 /* Only narrow read access allowed for now. */
6562 if (type == BPF_READ) {
6563 bpf_ctx_record_field_size(info, size_default);
6564 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6565 return false;
6566 } else {
6567 if (size != size_default)
6568 return false;
6569 }
6570 break;
6571 case bpf_ctx_range(struct bpf_sock_addr, user_port):
6572 if (size != size_default)
6573 return false;
6574 break;
6575 default:
6576 if (type == BPF_READ) {
6577 if (size != size_default)
6578 return false;
6579 } else {
6580 return false;
6581 }
6582 }
6583
6584 return true;
6585}
6586
44f0e430
LB
6587static bool sock_ops_is_valid_access(int off, int size,
6588 enum bpf_access_type type,
5e43f899 6589 const struct bpf_prog *prog,
44f0e430 6590 struct bpf_insn_access_aux *info)
40304b2a 6591{
44f0e430
LB
6592 const int size_default = sizeof(__u32);
6593
40304b2a
LB
6594 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
6595 return false;
44f0e430 6596
40304b2a
LB
6597 /* The verifier guarantees that size > 0. */
6598 if (off % size != 0)
6599 return false;
40304b2a 6600
40304b2a
LB
6601 if (type == BPF_WRITE) {
6602 switch (off) {
2585cd62 6603 case offsetof(struct bpf_sock_ops, reply):
6f9bd3d7 6604 case offsetof(struct bpf_sock_ops, sk_txhash):
44f0e430
LB
6605 if (size != size_default)
6606 return false;
40304b2a
LB
6607 break;
6608 default:
6609 return false;
6610 }
44f0e430
LB
6611 } else {
6612 switch (off) {
6613 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
6614 bytes_acked):
6615 if (size != sizeof(__u64))
6616 return false;
6617 break;
6618 default:
6619 if (size != size_default)
6620 return false;
6621 break;
6622 }
40304b2a
LB
6623 }
6624
44f0e430 6625 return true;
40304b2a
LB
6626}
6627
8a31db56
JF
6628static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
6629 const struct bpf_prog *prog)
6630{
047b0ecd 6631 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8a31db56
JF
6632}
6633
b005fd18
JF
6634static bool sk_skb_is_valid_access(int off, int size,
6635 enum bpf_access_type type,
5e43f899 6636 const struct bpf_prog *prog,
b005fd18
JF
6637 struct bpf_insn_access_aux *info)
6638{
de8f3a83
DB
6639 switch (off) {
6640 case bpf_ctx_range(struct __sk_buff, tc_classid):
6641 case bpf_ctx_range(struct __sk_buff, data_meta):
b7df9ada 6642 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
f11216b2 6643 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6644 case bpf_ctx_range(struct __sk_buff, wire_len):
de8f3a83
DB
6645 return false;
6646 }
6647
8a31db56
JF
6648 if (type == BPF_WRITE) {
6649 switch (off) {
8a31db56
JF
6650 case bpf_ctx_range(struct __sk_buff, tc_index):
6651 case bpf_ctx_range(struct __sk_buff, priority):
6652 break;
6653 default:
6654 return false;
6655 }
6656 }
6657
b005fd18 6658 switch (off) {
f7e9cb1e 6659 case bpf_ctx_range(struct __sk_buff, mark):
8a31db56 6660 return false;
b005fd18
JF
6661 case bpf_ctx_range(struct __sk_buff, data):
6662 info->reg_type = PTR_TO_PACKET;
6663 break;
6664 case bpf_ctx_range(struct __sk_buff, data_end):
6665 info->reg_type = PTR_TO_PACKET_END;
6666 break;
6667 }
6668
5e43f899 6669 return bpf_skb_is_valid_access(off, size, type, prog, info);
b005fd18
JF
6670}
6671
4f738adb
JF
6672static bool sk_msg_is_valid_access(int off, int size,
6673 enum bpf_access_type type,
5e43f899 6674 const struct bpf_prog *prog,
4f738adb
JF
6675 struct bpf_insn_access_aux *info)
6676{
6677 if (type == BPF_WRITE)
6678 return false;
6679
bc1b4f01
JF
6680 if (off % size != 0)
6681 return false;
6682
4f738adb
JF
6683 switch (off) {
6684 case offsetof(struct sk_msg_md, data):
6685 info->reg_type = PTR_TO_PACKET;
303def35
JF
6686 if (size != sizeof(__u64))
6687 return false;
4f738adb
JF
6688 break;
6689 case offsetof(struct sk_msg_md, data_end):
6690 info->reg_type = PTR_TO_PACKET_END;
303def35
JF
6691 if (size != sizeof(__u64))
6692 return false;
4f738adb 6693 break;
bc1b4f01
JF
6694 case bpf_ctx_range(struct sk_msg_md, family):
6695 case bpf_ctx_range(struct sk_msg_md, remote_ip4):
6696 case bpf_ctx_range(struct sk_msg_md, local_ip4):
6697 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
6698 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
6699 case bpf_ctx_range(struct sk_msg_md, remote_port):
6700 case bpf_ctx_range(struct sk_msg_md, local_port):
6701 case bpf_ctx_range(struct sk_msg_md, size):
303def35
JF
6702 if (size != sizeof(__u32))
6703 return false;
bc1b4f01
JF
6704 break;
6705 default:
4f738adb 6706 return false;
bc1b4f01 6707 }
4f738adb
JF
6708 return true;
6709}
6710
d58e468b
PP
6711static bool flow_dissector_is_valid_access(int off, int size,
6712 enum bpf_access_type type,
6713 const struct bpf_prog *prog,
6714 struct bpf_insn_access_aux *info)
6715{
6716 if (type == BPF_WRITE) {
6717 switch (off) {
6718 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6719 break;
6720 default:
6721 return false;
6722 }
6723 }
6724
6725 switch (off) {
6726 case bpf_ctx_range(struct __sk_buff, data):
6727 info->reg_type = PTR_TO_PACKET;
6728 break;
6729 case bpf_ctx_range(struct __sk_buff, data_end):
6730 info->reg_type = PTR_TO_PACKET_END;
6731 break;
b7df9ada 6732 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
d58e468b
PP
6733 info->reg_type = PTR_TO_FLOW_KEYS;
6734 break;
6735 case bpf_ctx_range(struct __sk_buff, tc_classid):
6736 case bpf_ctx_range(struct __sk_buff, data_meta):
6737 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
f11216b2 6738 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6739 case bpf_ctx_range(struct __sk_buff, wire_len):
d58e468b
PP
6740 return false;
6741 }
6742
6743 return bpf_skb_is_valid_access(off, size, type, prog, info);
6744}
6745
2492d3b8
DB
6746static u32 bpf_convert_ctx_access(enum bpf_access_type type,
6747 const struct bpf_insn *si,
6748 struct bpf_insn *insn_buf,
f96da094 6749 struct bpf_prog *prog, u32 *target_size)
9bac3d6d
AS
6750{
6751 struct bpf_insn *insn = insn_buf;
6b8cc1d1 6752 int off;
9bac3d6d 6753
6b8cc1d1 6754 switch (si->off) {
9bac3d6d 6755 case offsetof(struct __sk_buff, len):
6b8cc1d1 6756 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6757 bpf_target_off(struct sk_buff, len, 4,
6758 target_size));
9bac3d6d
AS
6759 break;
6760
0b8c707d 6761 case offsetof(struct __sk_buff, protocol):
6b8cc1d1 6762 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
6763 bpf_target_off(struct sk_buff, protocol, 2,
6764 target_size));
0b8c707d
DB
6765 break;
6766
27cd5452 6767 case offsetof(struct __sk_buff, vlan_proto):
6b8cc1d1 6768 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
6769 bpf_target_off(struct sk_buff, vlan_proto, 2,
6770 target_size));
27cd5452
MS
6771 break;
6772
bcad5718 6773 case offsetof(struct __sk_buff, priority):
754f1e6a 6774 if (type == BPF_WRITE)
6b8cc1d1 6775 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6776 bpf_target_off(struct sk_buff, priority, 4,
6777 target_size));
754f1e6a 6778 else
6b8cc1d1 6779 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6780 bpf_target_off(struct sk_buff, priority, 4,
6781 target_size));
bcad5718
DB
6782 break;
6783
37e82c2f 6784 case offsetof(struct __sk_buff, ingress_ifindex):
6b8cc1d1 6785 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6786 bpf_target_off(struct sk_buff, skb_iif, 4,
6787 target_size));
37e82c2f
AS
6788 break;
6789
6790 case offsetof(struct __sk_buff, ifindex):
f035a515 6791 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 6792 si->dst_reg, si->src_reg,
37e82c2f 6793 offsetof(struct sk_buff, dev));
6b8cc1d1
DB
6794 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
6795 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
6796 bpf_target_off(struct net_device, ifindex, 4,
6797 target_size));
37e82c2f
AS
6798 break;
6799
ba7591d8 6800 case offsetof(struct __sk_buff, hash):
6b8cc1d1 6801 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6802 bpf_target_off(struct sk_buff, hash, 4,
6803 target_size));
ba7591d8
DB
6804 break;
6805
9bac3d6d 6806 case offsetof(struct __sk_buff, mark):
d691f9e8 6807 if (type == BPF_WRITE)
6b8cc1d1 6808 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6809 bpf_target_off(struct sk_buff, mark, 4,
6810 target_size));
d691f9e8 6811 else
6b8cc1d1 6812 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6813 bpf_target_off(struct sk_buff, mark, 4,
6814 target_size));
d691f9e8 6815 break;
9bac3d6d
AS
6816
6817 case offsetof(struct __sk_buff, pkt_type):
f96da094
DB
6818 *target_size = 1;
6819 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
6820 PKT_TYPE_OFFSET());
6821 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
6822#ifdef __BIG_ENDIAN_BITFIELD
6823 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
6824#endif
6825 break;
9bac3d6d
AS
6826
6827 case offsetof(struct __sk_buff, queue_mapping):
74e31ca8
JDB
6828 if (type == BPF_WRITE) {
6829 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
6830 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
6831 bpf_target_off(struct sk_buff,
6832 queue_mapping,
6833 2, target_size));
6834 } else {
6835 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
6836 bpf_target_off(struct sk_buff,
6837 queue_mapping,
6838 2, target_size));
6839 }
f96da094 6840 break;
c2497395 6841
c2497395 6842 case offsetof(struct __sk_buff, vlan_present):
9c212255
MM
6843 *target_size = 1;
6844 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
6845 PKT_VLAN_PRESENT_OFFSET());
6846 if (PKT_VLAN_PRESENT_BIT)
6847 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
6848 if (PKT_VLAN_PRESENT_BIT < 7)
6849 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
6850 break;
f96da094 6851
9c212255 6852 case offsetof(struct __sk_buff, vlan_tci):
f96da094
DB
6853 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
6854 bpf_target_off(struct sk_buff, vlan_tci, 2,
6855 target_size));
f96da094 6856 break;
d691f9e8
AS
6857
6858 case offsetof(struct __sk_buff, cb[0]) ...
f96da094 6859 offsetofend(struct __sk_buff, cb[4]) - 1:
d691f9e8 6860 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
62c7989b
DB
6861 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
6862 offsetof(struct qdisc_skb_cb, data)) %
6863 sizeof(__u64));
d691f9e8 6864
ff936a04 6865 prog->cb_access = 1;
6b8cc1d1
DB
6866 off = si->off;
6867 off -= offsetof(struct __sk_buff, cb[0]);
6868 off += offsetof(struct sk_buff, cb);
6869 off += offsetof(struct qdisc_skb_cb, data);
d691f9e8 6870 if (type == BPF_WRITE)
62c7989b 6871 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 6872 si->src_reg, off);
d691f9e8 6873 else
62c7989b 6874 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 6875 si->src_reg, off);
d691f9e8
AS
6876 break;
6877
045efa82 6878 case offsetof(struct __sk_buff, tc_classid):
6b8cc1d1
DB
6879 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
6880
6881 off = si->off;
6882 off -= offsetof(struct __sk_buff, tc_classid);
6883 off += offsetof(struct sk_buff, cb);
6884 off += offsetof(struct qdisc_skb_cb, tc_classid);
f96da094 6885 *target_size = 2;
09c37a2c 6886 if (type == BPF_WRITE)
6b8cc1d1
DB
6887 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
6888 si->src_reg, off);
09c37a2c 6889 else
6b8cc1d1
DB
6890 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
6891 si->src_reg, off);
045efa82
DB
6892 break;
6893
db58ba45 6894 case offsetof(struct __sk_buff, data):
f035a515 6895 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
6b8cc1d1 6896 si->dst_reg, si->src_reg,
db58ba45
AS
6897 offsetof(struct sk_buff, data));
6898 break;
6899
de8f3a83
DB
6900 case offsetof(struct __sk_buff, data_meta):
6901 off = si->off;
6902 off -= offsetof(struct __sk_buff, data_meta);
6903 off += offsetof(struct sk_buff, cb);
6904 off += offsetof(struct bpf_skb_data_end, data_meta);
6905 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
6906 si->src_reg, off);
6907 break;
6908
db58ba45 6909 case offsetof(struct __sk_buff, data_end):
6b8cc1d1
DB
6910 off = si->off;
6911 off -= offsetof(struct __sk_buff, data_end);
6912 off += offsetof(struct sk_buff, cb);
6913 off += offsetof(struct bpf_skb_data_end, data_end);
6914 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
6915 si->src_reg, off);
db58ba45
AS
6916 break;
6917
d691f9e8
AS
6918 case offsetof(struct __sk_buff, tc_index):
6919#ifdef CONFIG_NET_SCHED
d691f9e8 6920 if (type == BPF_WRITE)
6b8cc1d1 6921 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
6922 bpf_target_off(struct sk_buff, tc_index, 2,
6923 target_size));
d691f9e8 6924 else
6b8cc1d1 6925 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
6926 bpf_target_off(struct sk_buff, tc_index, 2,
6927 target_size));
d691f9e8 6928#else
2ed46ce4 6929 *target_size = 2;
d691f9e8 6930 if (type == BPF_WRITE)
6b8cc1d1 6931 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
d691f9e8 6932 else
6b8cc1d1 6933 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
b1d9fc41
DB
6934#endif
6935 break;
6936
6937 case offsetof(struct __sk_buff, napi_id):
6938#if defined(CONFIG_NET_RX_BUSY_POLL)
b1d9fc41 6939 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
6940 bpf_target_off(struct sk_buff, napi_id, 4,
6941 target_size));
b1d9fc41
DB
6942 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
6943 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
6944#else
2ed46ce4 6945 *target_size = 4;
b1d9fc41 6946 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
d691f9e8 6947#endif
6b8cc1d1 6948 break;
8a31db56
JF
6949 case offsetof(struct __sk_buff, family):
6950 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
6951
6952 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
6953 si->dst_reg, si->src_reg,
6954 offsetof(struct sk_buff, sk));
6955 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
6956 bpf_target_off(struct sock_common,
6957 skc_family,
6958 2, target_size));
6959 break;
6960 case offsetof(struct __sk_buff, remote_ip4):
6961 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
6962
6963 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
6964 si->dst_reg, si->src_reg,
6965 offsetof(struct sk_buff, sk));
6966 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6967 bpf_target_off(struct sock_common,
6968 skc_daddr,
6969 4, target_size));
6970 break;
6971 case offsetof(struct __sk_buff, local_ip4):
6972 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6973 skc_rcv_saddr) != 4);
6974
6975 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
6976 si->dst_reg, si->src_reg,
6977 offsetof(struct sk_buff, sk));
6978 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6979 bpf_target_off(struct sock_common,
6980 skc_rcv_saddr,
6981 4, target_size));
6982 break;
6983 case offsetof(struct __sk_buff, remote_ip6[0]) ...
6984 offsetof(struct __sk_buff, remote_ip6[3]):
6985#if IS_ENABLED(CONFIG_IPV6)
6986 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
6987 skc_v6_daddr.s6_addr32[0]) != 4);
6988
6989 off = si->off;
6990 off -= offsetof(struct __sk_buff, remote_ip6[0]);
6991
6992 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
6993 si->dst_reg, si->src_reg,
6994 offsetof(struct sk_buff, sk));
6995 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
6996 offsetof(struct sock_common,
6997 skc_v6_daddr.s6_addr32[0]) +
6998 off);
6999#else
7000 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7001#endif
7002 break;
7003 case offsetof(struct __sk_buff, local_ip6[0]) ...
7004 offsetof(struct __sk_buff, local_ip6[3]):
7005#if IS_ENABLED(CONFIG_IPV6)
7006 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7007 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7008
7009 off = si->off;
7010 off -= offsetof(struct __sk_buff, local_ip6[0]);
7011
7012 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7013 si->dst_reg, si->src_reg,
7014 offsetof(struct sk_buff, sk));
7015 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7016 offsetof(struct sock_common,
7017 skc_v6_rcv_saddr.s6_addr32[0]) +
7018 off);
7019#else
7020 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7021#endif
7022 break;
7023
7024 case offsetof(struct __sk_buff, remote_port):
7025 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7026
7027 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7028 si->dst_reg, si->src_reg,
7029 offsetof(struct sk_buff, sk));
7030 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7031 bpf_target_off(struct sock_common,
7032 skc_dport,
7033 2, target_size));
7034#ifndef __BIG_ENDIAN_BITFIELD
7035 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7036#endif
7037 break;
7038
7039 case offsetof(struct __sk_buff, local_port):
7040 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
7041
7042 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7043 si->dst_reg, si->src_reg,
7044 offsetof(struct sk_buff, sk));
7045 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7046 bpf_target_off(struct sock_common,
7047 skc_num, 2, target_size));
7048 break;
d58e468b
PP
7049
7050 case offsetof(struct __sk_buff, flow_keys):
7051 off = si->off;
7052 off -= offsetof(struct __sk_buff, flow_keys);
7053 off += offsetof(struct sk_buff, cb);
7054 off += offsetof(struct qdisc_skb_cb, flow_keys);
7055 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7056 si->src_reg, off);
7057 break;
f11216b2
VD
7058
7059 case offsetof(struct __sk_buff, tstamp):
7060 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
7061
7062 if (type == BPF_WRITE)
7063 *insn++ = BPF_STX_MEM(BPF_DW,
7064 si->dst_reg, si->src_reg,
7065 bpf_target_off(struct sk_buff,
7066 tstamp, 8,
7067 target_size));
7068 else
7069 *insn++ = BPF_LDX_MEM(BPF_DW,
7070 si->dst_reg, si->src_reg,
7071 bpf_target_off(struct sk_buff,
7072 tstamp, 8,
7073 target_size));
e3da08d0
PP
7074 break;
7075
d9ff286a
ED
7076 case offsetof(struct __sk_buff, gso_segs):
7077 /* si->dst_reg = skb_shinfo(SKB); */
7078#ifdef NET_SKBUFF_DATA_USES_OFFSET
7079 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7080 si->dst_reg, si->src_reg,
7081 offsetof(struct sk_buff, head));
7082 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7083 BPF_REG_AX, si->src_reg,
7084 offsetof(struct sk_buff, end));
7085 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7086#else
7087 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7088 si->dst_reg, si->src_reg,
7089 offsetof(struct sk_buff, end));
7090#endif
7091 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
7092 si->dst_reg, si->dst_reg,
7093 bpf_target_off(struct skb_shared_info,
7094 gso_segs, 2,
7095 target_size));
7096 break;
e3da08d0
PP
7097 case offsetof(struct __sk_buff, wire_len):
7098 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
7099
7100 off = si->off;
7101 off -= offsetof(struct __sk_buff, wire_len);
7102 off += offsetof(struct sk_buff, cb);
7103 off += offsetof(struct qdisc_skb_cb, pkt_len);
7104 *target_size = 4;
7105 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
46f8bc92
MKL
7106 break;
7107
7108 case offsetof(struct __sk_buff, sk):
7109 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7110 si->dst_reg, si->src_reg,
7111 offsetof(struct sk_buff, sk));
7112 break;
9bac3d6d
AS
7113 }
7114
7115 return insn - insn_buf;
89aa0758
AS
7116}
7117
c64b7983
JS
7118u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
7119 const struct bpf_insn *si,
7120 struct bpf_insn *insn_buf,
7121 struct bpf_prog *prog, u32 *target_size)
61023658
DA
7122{
7123 struct bpf_insn *insn = insn_buf;
aac3fc32 7124 int off;
61023658 7125
6b8cc1d1 7126 switch (si->off) {
61023658
DA
7127 case offsetof(struct bpf_sock, bound_dev_if):
7128 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
7129
7130 if (type == BPF_WRITE)
6b8cc1d1 7131 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
7132 offsetof(struct sock, sk_bound_dev_if));
7133 else
6b8cc1d1 7134 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
7135 offsetof(struct sock, sk_bound_dev_if));
7136 break;
aa4c1037 7137
482dca93
DA
7138 case offsetof(struct bpf_sock, mark):
7139 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
7140
7141 if (type == BPF_WRITE)
7142 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7143 offsetof(struct sock, sk_mark));
7144 else
7145 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7146 offsetof(struct sock, sk_mark));
7147 break;
7148
7149 case offsetof(struct bpf_sock, priority):
7150 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
7151
7152 if (type == BPF_WRITE)
7153 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7154 offsetof(struct sock, sk_priority));
7155 else
7156 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7157 offsetof(struct sock, sk_priority));
7158 break;
7159
aa4c1037 7160 case offsetof(struct bpf_sock, family):
aa65d696
MKL
7161 *insn++ = BPF_LDX_MEM(
7162 BPF_FIELD_SIZEOF(struct sock_common, skc_family),
7163 si->dst_reg, si->src_reg,
7164 bpf_target_off(struct sock_common,
7165 skc_family,
7166 FIELD_SIZEOF(struct sock_common,
7167 skc_family),
7168 target_size));
aa4c1037
DA
7169 break;
7170
7171 case offsetof(struct bpf_sock, type):
aa65d696 7172 BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
6b8cc1d1 7173 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 7174 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
7175 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7176 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
aa65d696 7177 *target_size = 2;
aa4c1037
DA
7178 break;
7179
7180 case offsetof(struct bpf_sock, protocol):
aa65d696 7181 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
6b8cc1d1 7182 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 7183 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
7184 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7185 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
aa65d696 7186 *target_size = 1;
aa4c1037 7187 break;
aac3fc32
AI
7188
7189 case offsetof(struct bpf_sock, src_ip4):
7190 *insn++ = BPF_LDX_MEM(
7191 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7192 bpf_target_off(struct sock_common, skc_rcv_saddr,
7193 FIELD_SIZEOF(struct sock_common,
7194 skc_rcv_saddr),
7195 target_size));
7196 break;
7197
aa65d696
MKL
7198 case offsetof(struct bpf_sock, dst_ip4):
7199 *insn++ = BPF_LDX_MEM(
7200 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7201 bpf_target_off(struct sock_common, skc_daddr,
7202 FIELD_SIZEOF(struct sock_common,
7203 skc_daddr),
7204 target_size));
7205 break;
7206
aac3fc32
AI
7207 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7208#if IS_ENABLED(CONFIG_IPV6)
7209 off = si->off;
7210 off -= offsetof(struct bpf_sock, src_ip6[0]);
7211 *insn++ = BPF_LDX_MEM(
7212 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7213 bpf_target_off(
7214 struct sock_common,
7215 skc_v6_rcv_saddr.s6_addr32[0],
7216 FIELD_SIZEOF(struct sock_common,
7217 skc_v6_rcv_saddr.s6_addr32[0]),
7218 target_size) + off);
7219#else
7220 (void)off;
7221 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7222#endif
7223 break;
7224
aa65d696
MKL
7225 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7226#if IS_ENABLED(CONFIG_IPV6)
7227 off = si->off;
7228 off -= offsetof(struct bpf_sock, dst_ip6[0]);
7229 *insn++ = BPF_LDX_MEM(
7230 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7231 bpf_target_off(struct sock_common,
7232 skc_v6_daddr.s6_addr32[0],
7233 FIELD_SIZEOF(struct sock_common,
7234 skc_v6_daddr.s6_addr32[0]),
7235 target_size) + off);
7236#else
7237 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7238 *target_size = 4;
7239#endif
7240 break;
7241
aac3fc32
AI
7242 case offsetof(struct bpf_sock, src_port):
7243 *insn++ = BPF_LDX_MEM(
7244 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
7245 si->dst_reg, si->src_reg,
7246 bpf_target_off(struct sock_common, skc_num,
7247 FIELD_SIZEOF(struct sock_common,
7248 skc_num),
7249 target_size));
7250 break;
aa65d696
MKL
7251
7252 case offsetof(struct bpf_sock, dst_port):
7253 *insn++ = BPF_LDX_MEM(
7254 BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
7255 si->dst_reg, si->src_reg,
7256 bpf_target_off(struct sock_common, skc_dport,
7257 FIELD_SIZEOF(struct sock_common,
7258 skc_dport),
7259 target_size));
7260 break;
7261
7262 case offsetof(struct bpf_sock, state):
7263 *insn++ = BPF_LDX_MEM(
7264 BPF_FIELD_SIZEOF(struct sock_common, skc_state),
7265 si->dst_reg, si->src_reg,
7266 bpf_target_off(struct sock_common, skc_state,
7267 FIELD_SIZEOF(struct sock_common,
7268 skc_state),
7269 target_size));
7270 break;
61023658
DA
7271 }
7272
7273 return insn - insn_buf;
7274}
7275
6b8cc1d1
DB
7276static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
7277 const struct bpf_insn *si,
374fb54e 7278 struct bpf_insn *insn_buf,
f96da094 7279 struct bpf_prog *prog, u32 *target_size)
374fb54e
DB
7280{
7281 struct bpf_insn *insn = insn_buf;
7282
6b8cc1d1 7283 switch (si->off) {
374fb54e 7284 case offsetof(struct __sk_buff, ifindex):
374fb54e 7285 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 7286 si->dst_reg, si->src_reg,
374fb54e 7287 offsetof(struct sk_buff, dev));
6b8cc1d1 7288 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
7289 bpf_target_off(struct net_device, ifindex, 4,
7290 target_size));
374fb54e
DB
7291 break;
7292 default:
f96da094
DB
7293 return bpf_convert_ctx_access(type, si, insn_buf, prog,
7294 target_size);
374fb54e
DB
7295 }
7296
7297 return insn - insn_buf;
7298}
7299
6b8cc1d1
DB
7300static u32 xdp_convert_ctx_access(enum bpf_access_type type,
7301 const struct bpf_insn *si,
6a773a15 7302 struct bpf_insn *insn_buf,
f96da094 7303 struct bpf_prog *prog, u32 *target_size)
6a773a15
BB
7304{
7305 struct bpf_insn *insn = insn_buf;
7306
6b8cc1d1 7307 switch (si->off) {
6a773a15 7308 case offsetof(struct xdp_md, data):
f035a515 7309 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
6b8cc1d1 7310 si->dst_reg, si->src_reg,
6a773a15
BB
7311 offsetof(struct xdp_buff, data));
7312 break;
de8f3a83
DB
7313 case offsetof(struct xdp_md, data_meta):
7314 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
7315 si->dst_reg, si->src_reg,
7316 offsetof(struct xdp_buff, data_meta));
7317 break;
6a773a15 7318 case offsetof(struct xdp_md, data_end):
f035a515 7319 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
6b8cc1d1 7320 si->dst_reg, si->src_reg,
6a773a15
BB
7321 offsetof(struct xdp_buff, data_end));
7322 break;
02dd3291
JDB
7323 case offsetof(struct xdp_md, ingress_ifindex):
7324 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7325 si->dst_reg, si->src_reg,
7326 offsetof(struct xdp_buff, rxq));
7327 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
7328 si->dst_reg, si->dst_reg,
7329 offsetof(struct xdp_rxq_info, dev));
7330 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6 7331 offsetof(struct net_device, ifindex));
02dd3291
JDB
7332 break;
7333 case offsetof(struct xdp_md, rx_queue_index):
7334 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7335 si->dst_reg, si->src_reg,
7336 offsetof(struct xdp_buff, rxq));
7337 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6
JDB
7338 offsetof(struct xdp_rxq_info,
7339 queue_index));
02dd3291 7340 break;
6a773a15
BB
7341 }
7342
7343 return insn - insn_buf;
7344}
7345
4fbac77d
AI
7346/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
7347 * context Structure, F is Field in context structure that contains a pointer
7348 * to Nested Structure of type NS that has the field NF.
7349 *
7350 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
7351 * sure that SIZE is not greater than actual size of S.F.NF.
7352 *
7353 * If offset OFF is provided, the load happens from that offset relative to
7354 * offset of NF.
7355 */
7356#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
7357 do { \
7358 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
7359 si->src_reg, offsetof(S, F)); \
7360 *insn++ = BPF_LDX_MEM( \
7361 SIZE, si->dst_reg, si->dst_reg, \
7362 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
7363 target_size) \
7364 + OFF); \
7365 } while (0)
7366
7367#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
7368 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
7369 BPF_FIELD_SIZEOF(NS, NF), 0)
7370
7371/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
7372 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
7373 *
7374 * It doesn't support SIZE argument though since narrow stores are not
7375 * supported for now.
7376 *
7377 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
7378 * "register" since two registers available in convert_ctx_access are not
7379 * enough: we can't override neither SRC, since it contains value to store, nor
7380 * DST since it contains pointer to context that may be used by later
7381 * instructions. But we need a temporary place to save pointer to nested
7382 * structure whose field we want to store to.
7383 */
7384#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
7385 do { \
7386 int tmp_reg = BPF_REG_9; \
7387 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
7388 --tmp_reg; \
7389 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
7390 --tmp_reg; \
7391 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
7392 offsetof(S, TF)); \
7393 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
7394 si->dst_reg, offsetof(S, F)); \
7395 *insn++ = BPF_STX_MEM( \
7396 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
7397 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
7398 target_size) \
7399 + OFF); \
7400 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
7401 offsetof(S, TF)); \
7402 } while (0)
7403
7404#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
7405 TF) \
7406 do { \
7407 if (type == BPF_WRITE) { \
7408 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
7409 TF); \
7410 } else { \
7411 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
7412 S, NS, F, NF, SIZE, OFF); \
7413 } \
7414 } while (0)
7415
7416#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
7417 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
7418 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
7419
7420static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
7421 const struct bpf_insn *si,
7422 struct bpf_insn *insn_buf,
7423 struct bpf_prog *prog, u32 *target_size)
7424{
7425 struct bpf_insn *insn = insn_buf;
7426 int off;
7427
7428 switch (si->off) {
7429 case offsetof(struct bpf_sock_addr, user_family):
7430 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7431 struct sockaddr, uaddr, sa_family);
7432 break;
7433
7434 case offsetof(struct bpf_sock_addr, user_ip4):
7435 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7436 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
7437 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
7438 break;
7439
7440 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7441 off = si->off;
7442 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
7443 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7444 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
7445 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
7446 tmp_reg);
7447 break;
7448
7449 case offsetof(struct bpf_sock_addr, user_port):
7450 /* To get port we need to know sa_family first and then treat
7451 * sockaddr as either sockaddr_in or sockaddr_in6.
7452 * Though we can simplify since port field has same offset and
7453 * size in both structures.
7454 * Here we check this invariant and use just one of the
7455 * structures if it's true.
7456 */
7457 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
7458 offsetof(struct sockaddr_in6, sin6_port));
7459 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
7460 FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
7461 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
7462 struct sockaddr_in6, uaddr,
7463 sin6_port, tmp_reg);
7464 break;
7465
7466 case offsetof(struct bpf_sock_addr, family):
7467 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7468 struct sock, sk, sk_family);
7469 break;
7470
7471 case offsetof(struct bpf_sock_addr, type):
7472 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7473 struct bpf_sock_addr_kern, struct sock, sk,
7474 __sk_flags_offset, BPF_W, 0);
7475 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7476 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7477 break;
7478
7479 case offsetof(struct bpf_sock_addr, protocol):
7480 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7481 struct bpf_sock_addr_kern, struct sock, sk,
7482 __sk_flags_offset, BPF_W, 0);
7483 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7484 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
7485 SK_FL_PROTO_SHIFT);
7486 break;
1cedee13
AI
7487
7488 case offsetof(struct bpf_sock_addr, msg_src_ip4):
7489 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
7490 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7491 struct bpf_sock_addr_kern, struct in_addr, t_ctx,
7492 s_addr, BPF_SIZE(si->code), 0, tmp_reg);
7493 break;
7494
7495 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7496 msg_src_ip6[3]):
7497 off = si->off;
7498 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
7499 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
7500 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7501 struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
7502 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
7503 break;
4fbac77d
AI
7504 }
7505
7506 return insn - insn_buf;
7507}
7508
40304b2a
LB
7509static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
7510 const struct bpf_insn *si,
7511 struct bpf_insn *insn_buf,
f96da094
DB
7512 struct bpf_prog *prog,
7513 u32 *target_size)
40304b2a
LB
7514{
7515 struct bpf_insn *insn = insn_buf;
7516 int off;
7517
9b1f3d6e
MKL
7518/* Helper macro for adding read access to tcp_sock or sock fields. */
7519#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
7520 do { \
7521 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
7522 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
7523 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7524 struct bpf_sock_ops_kern, \
7525 is_fullsock), \
7526 si->dst_reg, si->src_reg, \
7527 offsetof(struct bpf_sock_ops_kern, \
7528 is_fullsock)); \
7529 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
7530 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7531 struct bpf_sock_ops_kern, sk),\
7532 si->dst_reg, si->src_reg, \
7533 offsetof(struct bpf_sock_ops_kern, sk));\
7534 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
7535 OBJ_FIELD), \
7536 si->dst_reg, si->dst_reg, \
7537 offsetof(OBJ, OBJ_FIELD)); \
7538 } while (0)
7539
7540#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
7541 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
7542
7543/* Helper macro for adding write access to tcp_sock or sock fields.
7544 * The macro is called with two registers, dst_reg which contains a pointer
7545 * to ctx (context) and src_reg which contains the value that should be
7546 * stored. However, we need an additional register since we cannot overwrite
7547 * dst_reg because it may be used later in the program.
7548 * Instead we "borrow" one of the other register. We first save its value
7549 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
7550 * it at the end of the macro.
7551 */
7552#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
7553 do { \
7554 int reg = BPF_REG_9; \
7555 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
7556 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
7557 if (si->dst_reg == reg || si->src_reg == reg) \
7558 reg--; \
7559 if (si->dst_reg == reg || si->src_reg == reg) \
7560 reg--; \
7561 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
7562 offsetof(struct bpf_sock_ops_kern, \
7563 temp)); \
7564 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7565 struct bpf_sock_ops_kern, \
7566 is_fullsock), \
7567 reg, si->dst_reg, \
7568 offsetof(struct bpf_sock_ops_kern, \
7569 is_fullsock)); \
7570 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
7571 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7572 struct bpf_sock_ops_kern, sk),\
7573 reg, si->dst_reg, \
7574 offsetof(struct bpf_sock_ops_kern, sk));\
7575 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
7576 reg, si->src_reg, \
7577 offsetof(OBJ, OBJ_FIELD)); \
7578 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
7579 offsetof(struct bpf_sock_ops_kern, \
7580 temp)); \
7581 } while (0)
7582
7583#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
7584 do { \
7585 if (TYPE == BPF_WRITE) \
7586 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
7587 else \
7588 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
7589 } while (0)
7590
7591 CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops,
7592 SOCK_OPS_GET_TCP_SOCK_FIELD);
7593
7594 if (insn > insn_buf)
7595 return insn - insn_buf;
7596
40304b2a
LB
7597 switch (si->off) {
7598 case offsetof(struct bpf_sock_ops, op) ...
7599 offsetof(struct bpf_sock_ops, replylong[3]):
7600 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
7601 FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
7602 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
7603 FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
7604 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
7605 FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
7606 off = si->off;
7607 off -= offsetof(struct bpf_sock_ops, op);
7608 off += offsetof(struct bpf_sock_ops_kern, op);
7609 if (type == BPF_WRITE)
7610 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7611 off);
7612 else
7613 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7614 off);
7615 break;
7616
7617 case offsetof(struct bpf_sock_ops, family):
7618 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
7619
7620 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7621 struct bpf_sock_ops_kern, sk),
7622 si->dst_reg, si->src_reg,
7623 offsetof(struct bpf_sock_ops_kern, sk));
7624 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7625 offsetof(struct sock_common, skc_family));
7626 break;
7627
7628 case offsetof(struct bpf_sock_ops, remote_ip4):
7629 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
7630
7631 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7632 struct bpf_sock_ops_kern, sk),
7633 si->dst_reg, si->src_reg,
7634 offsetof(struct bpf_sock_ops_kern, sk));
7635 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7636 offsetof(struct sock_common, skc_daddr));
7637 break;
7638
7639 case offsetof(struct bpf_sock_ops, local_ip4):
303def35
JF
7640 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7641 skc_rcv_saddr) != 4);
40304b2a
LB
7642
7643 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7644 struct bpf_sock_ops_kern, sk),
7645 si->dst_reg, si->src_reg,
7646 offsetof(struct bpf_sock_ops_kern, sk));
7647 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7648 offsetof(struct sock_common,
7649 skc_rcv_saddr));
7650 break;
7651
7652 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
7653 offsetof(struct bpf_sock_ops, remote_ip6[3]):
7654#if IS_ENABLED(CONFIG_IPV6)
7655 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7656 skc_v6_daddr.s6_addr32[0]) != 4);
7657
7658 off = si->off;
7659 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
7660 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7661 struct bpf_sock_ops_kern, sk),
7662 si->dst_reg, si->src_reg,
7663 offsetof(struct bpf_sock_ops_kern, sk));
7664 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7665 offsetof(struct sock_common,
7666 skc_v6_daddr.s6_addr32[0]) +
7667 off);
7668#else
7669 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7670#endif
7671 break;
7672
7673 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
7674 offsetof(struct bpf_sock_ops, local_ip6[3]):
7675#if IS_ENABLED(CONFIG_IPV6)
7676 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7677 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7678
7679 off = si->off;
7680 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
7681 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7682 struct bpf_sock_ops_kern, sk),
7683 si->dst_reg, si->src_reg,
7684 offsetof(struct bpf_sock_ops_kern, sk));
7685 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7686 offsetof(struct sock_common,
7687 skc_v6_rcv_saddr.s6_addr32[0]) +
7688 off);
7689#else
7690 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7691#endif
7692 break;
7693
7694 case offsetof(struct bpf_sock_ops, remote_port):
7695 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7696
7697 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7698 struct bpf_sock_ops_kern, sk),
7699 si->dst_reg, si->src_reg,
7700 offsetof(struct bpf_sock_ops_kern, sk));
7701 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7702 offsetof(struct sock_common, skc_dport));
7703#ifndef __BIG_ENDIAN_BITFIELD
7704 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7705#endif
7706 break;
7707
7708 case offsetof(struct bpf_sock_ops, local_port):
7709 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
7710
7711 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7712 struct bpf_sock_ops_kern, sk),
7713 si->dst_reg, si->src_reg,
7714 offsetof(struct bpf_sock_ops_kern, sk));
7715 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7716 offsetof(struct sock_common, skc_num));
7717 break;
f19397a5
LB
7718
7719 case offsetof(struct bpf_sock_ops, is_fullsock):
7720 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7721 struct bpf_sock_ops_kern,
7722 is_fullsock),
7723 si->dst_reg, si->src_reg,
7724 offsetof(struct bpf_sock_ops_kern,
7725 is_fullsock));
7726 break;
7727
44f0e430
LB
7728 case offsetof(struct bpf_sock_ops, state):
7729 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
7730
7731 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7732 struct bpf_sock_ops_kern, sk),
7733 si->dst_reg, si->src_reg,
7734 offsetof(struct bpf_sock_ops_kern, sk));
7735 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
7736 offsetof(struct sock_common, skc_state));
7737 break;
7738
7739 case offsetof(struct bpf_sock_ops, rtt_min):
7740 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
7741 sizeof(struct minmax));
7742 BUILD_BUG_ON(sizeof(struct minmax) <
7743 sizeof(struct minmax_sample));
7744
7745 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7746 struct bpf_sock_ops_kern, sk),
7747 si->dst_reg, si->src_reg,
7748 offsetof(struct bpf_sock_ops_kern, sk));
7749 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7750 offsetof(struct tcp_sock, rtt_min) +
7751 FIELD_SIZEOF(struct minmax_sample, t));
7752 break;
7753
b13d8807
LB
7754 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
7755 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
7756 struct tcp_sock);
7757 break;
44f0e430 7758
44f0e430 7759 case offsetof(struct bpf_sock_ops, sk_txhash):
6f9bd3d7
LB
7760 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
7761 struct sock, type);
44f0e430 7762 break;
40304b2a
LB
7763 }
7764 return insn - insn_buf;
7765}
7766
8108a775
JF
7767static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
7768 const struct bpf_insn *si,
7769 struct bpf_insn *insn_buf,
7770 struct bpf_prog *prog, u32 *target_size)
7771{
7772 struct bpf_insn *insn = insn_buf;
7773 int off;
7774
7775 switch (si->off) {
7776 case offsetof(struct __sk_buff, data_end):
7777 off = si->off;
7778 off -= offsetof(struct __sk_buff, data_end);
7779 off += offsetof(struct sk_buff, cb);
7780 off += offsetof(struct tcp_skb_cb, bpf.data_end);
7781 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7782 si->src_reg, off);
7783 break;
7784 default:
7785 return bpf_convert_ctx_access(type, si, insn_buf, prog,
7786 target_size);
7787 }
7788
7789 return insn - insn_buf;
7790}
7791
4f738adb
JF
7792static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
7793 const struct bpf_insn *si,
7794 struct bpf_insn *insn_buf,
7795 struct bpf_prog *prog, u32 *target_size)
7796{
7797 struct bpf_insn *insn = insn_buf;
720e7f38 7798#if IS_ENABLED(CONFIG_IPV6)
303def35 7799 int off;
720e7f38 7800#endif
4f738adb 7801
7a69c0f2
JF
7802 /* convert ctx uses the fact sg element is first in struct */
7803 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
7804
4f738adb
JF
7805 switch (si->off) {
7806 case offsetof(struct sk_msg_md, data):
604326b4 7807 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
4f738adb 7808 si->dst_reg, si->src_reg,
604326b4 7809 offsetof(struct sk_msg, data));
4f738adb
JF
7810 break;
7811 case offsetof(struct sk_msg_md, data_end):
604326b4 7812 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
4f738adb 7813 si->dst_reg, si->src_reg,
604326b4 7814 offsetof(struct sk_msg, data_end));
4f738adb 7815 break;
303def35
JF
7816 case offsetof(struct sk_msg_md, family):
7817 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
7818
7819 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7820 struct sk_msg, sk),
303def35 7821 si->dst_reg, si->src_reg,
604326b4 7822 offsetof(struct sk_msg, sk));
303def35
JF
7823 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7824 offsetof(struct sock_common, skc_family));
7825 break;
7826
7827 case offsetof(struct sk_msg_md, remote_ip4):
7828 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
7829
7830 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7831 struct sk_msg, sk),
303def35 7832 si->dst_reg, si->src_reg,
604326b4 7833 offsetof(struct sk_msg, sk));
303def35
JF
7834 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7835 offsetof(struct sock_common, skc_daddr));
7836 break;
7837
7838 case offsetof(struct sk_msg_md, local_ip4):
7839 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7840 skc_rcv_saddr) != 4);
7841
7842 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7843 struct sk_msg, sk),
303def35 7844 si->dst_reg, si->src_reg,
604326b4 7845 offsetof(struct sk_msg, sk));
303def35
JF
7846 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7847 offsetof(struct sock_common,
7848 skc_rcv_saddr));
7849 break;
7850
7851 case offsetof(struct sk_msg_md, remote_ip6[0]) ...
7852 offsetof(struct sk_msg_md, remote_ip6[3]):
7853#if IS_ENABLED(CONFIG_IPV6)
7854 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7855 skc_v6_daddr.s6_addr32[0]) != 4);
7856
7857 off = si->off;
7858 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
7859 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7860 struct sk_msg, sk),
303def35 7861 si->dst_reg, si->src_reg,
604326b4 7862 offsetof(struct sk_msg, sk));
303def35
JF
7863 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7864 offsetof(struct sock_common,
7865 skc_v6_daddr.s6_addr32[0]) +
7866 off);
7867#else
7868 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7869#endif
7870 break;
7871
7872 case offsetof(struct sk_msg_md, local_ip6[0]) ...
7873 offsetof(struct sk_msg_md, local_ip6[3]):
7874#if IS_ENABLED(CONFIG_IPV6)
7875 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7876 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7877
7878 off = si->off;
7879 off -= offsetof(struct sk_msg_md, local_ip6[0]);
7880 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7881 struct sk_msg, sk),
303def35 7882 si->dst_reg, si->src_reg,
604326b4 7883 offsetof(struct sk_msg, sk));
303def35
JF
7884 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7885 offsetof(struct sock_common,
7886 skc_v6_rcv_saddr.s6_addr32[0]) +
7887 off);
7888#else
7889 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7890#endif
7891 break;
7892
7893 case offsetof(struct sk_msg_md, remote_port):
7894 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7895
7896 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7897 struct sk_msg, sk),
303def35 7898 si->dst_reg, si->src_reg,
604326b4 7899 offsetof(struct sk_msg, sk));
303def35
JF
7900 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7901 offsetof(struct sock_common, skc_dport));
7902#ifndef __BIG_ENDIAN_BITFIELD
7903 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7904#endif
7905 break;
7906
7907 case offsetof(struct sk_msg_md, local_port):
7908 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
7909
7910 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 7911 struct sk_msg, sk),
303def35 7912 si->dst_reg, si->src_reg,
604326b4 7913 offsetof(struct sk_msg, sk));
303def35
JF
7914 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7915 offsetof(struct sock_common, skc_num));
7916 break;
3bdbd022
JF
7917
7918 case offsetof(struct sk_msg_md, size):
7919 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
7920 si->dst_reg, si->src_reg,
7921 offsetof(struct sk_msg_sg, size));
7922 break;
4f738adb
JF
7923 }
7924
7925 return insn - insn_buf;
7926}
7927
7de16e3a 7928const struct bpf_verifier_ops sk_filter_verifier_ops = {
4936e352
DB
7929 .get_func_proto = sk_filter_func_proto,
7930 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 7931 .convert_ctx_access = bpf_convert_ctx_access,
e0cea7ce 7932 .gen_ld_abs = bpf_gen_ld_abs,
89aa0758
AS
7933};
7934
7de16e3a 7935const struct bpf_prog_ops sk_filter_prog_ops = {
61f3c964 7936 .test_run = bpf_prog_test_run_skb,
7de16e3a
JK
7937};
7938
7939const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4936e352
DB
7940 .get_func_proto = tc_cls_act_func_proto,
7941 .is_valid_access = tc_cls_act_is_valid_access,
374fb54e 7942 .convert_ctx_access = tc_cls_act_convert_ctx_access,
36bbef52 7943 .gen_prologue = tc_cls_act_prologue,
e0cea7ce 7944 .gen_ld_abs = bpf_gen_ld_abs,
7de16e3a
JK
7945};
7946
7947const struct bpf_prog_ops tc_cls_act_prog_ops = {
1cf1cae9 7948 .test_run = bpf_prog_test_run_skb,
608cd71a
AS
7949};
7950
7de16e3a 7951const struct bpf_verifier_ops xdp_verifier_ops = {
6a773a15
BB
7952 .get_func_proto = xdp_func_proto,
7953 .is_valid_access = xdp_is_valid_access,
7954 .convert_ctx_access = xdp_convert_ctx_access,
b09928b9 7955 .gen_prologue = bpf_noop_prologue,
7de16e3a
JK
7956};
7957
7958const struct bpf_prog_ops xdp_prog_ops = {
1cf1cae9 7959 .test_run = bpf_prog_test_run_xdp,
6a773a15
BB
7960};
7961
7de16e3a 7962const struct bpf_verifier_ops cg_skb_verifier_ops = {
cd339431 7963 .get_func_proto = cg_skb_func_proto,
b39b5f41 7964 .is_valid_access = cg_skb_is_valid_access,
2492d3b8 7965 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
7966};
7967
7968const struct bpf_prog_ops cg_skb_prog_ops = {
1cf1cae9 7969 .test_run = bpf_prog_test_run_skb,
0e33661d
DM
7970};
7971
cd3092c7
MX
7972const struct bpf_verifier_ops lwt_in_verifier_ops = {
7973 .get_func_proto = lwt_in_func_proto,
3a0af8fd 7974 .is_valid_access = lwt_is_valid_access,
2492d3b8 7975 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
7976};
7977
cd3092c7
MX
7978const struct bpf_prog_ops lwt_in_prog_ops = {
7979 .test_run = bpf_prog_test_run_skb,
7980};
7981
7982const struct bpf_verifier_ops lwt_out_verifier_ops = {
7983 .get_func_proto = lwt_out_func_proto,
3a0af8fd 7984 .is_valid_access = lwt_is_valid_access,
2492d3b8 7985 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
7986};
7987
cd3092c7 7988const struct bpf_prog_ops lwt_out_prog_ops = {
1cf1cae9 7989 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
7990};
7991
7de16e3a 7992const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
3a0af8fd
TG
7993 .get_func_proto = lwt_xmit_func_proto,
7994 .is_valid_access = lwt_is_valid_access,
2492d3b8 7995 .convert_ctx_access = bpf_convert_ctx_access,
3a0af8fd 7996 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
7997};
7998
7999const struct bpf_prog_ops lwt_xmit_prog_ops = {
1cf1cae9 8000 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
8001};
8002
004d4b27
MX
8003const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
8004 .get_func_proto = lwt_seg6local_func_proto,
8005 .is_valid_access = lwt_is_valid_access,
8006 .convert_ctx_access = bpf_convert_ctx_access,
8007};
8008
8009const struct bpf_prog_ops lwt_seg6local_prog_ops = {
8010 .test_run = bpf_prog_test_run_skb,
8011};
8012
7de16e3a 8013const struct bpf_verifier_ops cg_sock_verifier_ops = {
ae2cf1c4 8014 .get_func_proto = sock_filter_func_proto,
61023658 8015 .is_valid_access = sock_filter_is_valid_access,
c64b7983 8016 .convert_ctx_access = bpf_sock_convert_ctx_access,
61023658
DA
8017};
8018
7de16e3a
JK
8019const struct bpf_prog_ops cg_sock_prog_ops = {
8020};
8021
4fbac77d
AI
8022const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
8023 .get_func_proto = sock_addr_func_proto,
8024 .is_valid_access = sock_addr_is_valid_access,
8025 .convert_ctx_access = sock_addr_convert_ctx_access,
8026};
8027
8028const struct bpf_prog_ops cg_sock_addr_prog_ops = {
8029};
8030
7de16e3a 8031const struct bpf_verifier_ops sock_ops_verifier_ops = {
8c4b4c7e 8032 .get_func_proto = sock_ops_func_proto,
40304b2a
LB
8033 .is_valid_access = sock_ops_is_valid_access,
8034 .convert_ctx_access = sock_ops_convert_ctx_access,
8035};
8036
7de16e3a
JK
8037const struct bpf_prog_ops sock_ops_prog_ops = {
8038};
8039
8040const struct bpf_verifier_ops sk_skb_verifier_ops = {
b005fd18
JF
8041 .get_func_proto = sk_skb_func_proto,
8042 .is_valid_access = sk_skb_is_valid_access,
8108a775 8043 .convert_ctx_access = sk_skb_convert_ctx_access,
8a31db56 8044 .gen_prologue = sk_skb_prologue,
b005fd18
JF
8045};
8046
7de16e3a
JK
8047const struct bpf_prog_ops sk_skb_prog_ops = {
8048};
8049
4f738adb
JF
8050const struct bpf_verifier_ops sk_msg_verifier_ops = {
8051 .get_func_proto = sk_msg_func_proto,
8052 .is_valid_access = sk_msg_is_valid_access,
8053 .convert_ctx_access = sk_msg_convert_ctx_access,
b09928b9 8054 .gen_prologue = bpf_noop_prologue,
4f738adb
JF
8055};
8056
8057const struct bpf_prog_ops sk_msg_prog_ops = {
8058};
8059
d58e468b
PP
8060const struct bpf_verifier_ops flow_dissector_verifier_ops = {
8061 .get_func_proto = flow_dissector_func_proto,
8062 .is_valid_access = flow_dissector_is_valid_access,
8063 .convert_ctx_access = bpf_convert_ctx_access,
8064};
8065
8066const struct bpf_prog_ops flow_dissector_prog_ops = {
b7a1848e 8067 .test_run = bpf_prog_test_run_flow_dissector,
d58e468b
PP
8068};
8069
8ced425e 8070int sk_detach_filter(struct sock *sk)
55b33325
PE
8071{
8072 int ret = -ENOENT;
8073 struct sk_filter *filter;
8074
d59577b6
VB
8075 if (sock_flag(sk, SOCK_FILTER_LOCKED))
8076 return -EPERM;
8077
8ced425e
HFS
8078 filter = rcu_dereference_protected(sk->sk_filter,
8079 lockdep_sock_is_held(sk));
55b33325 8080 if (filter) {
a9b3cd7f 8081 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 8082 sk_filter_uncharge(sk, filter);
55b33325
PE
8083 ret = 0;
8084 }
a3ea269b 8085
55b33325
PE
8086 return ret;
8087}
8ced425e 8088EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 8089
a3ea269b
DB
8090int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
8091 unsigned int len)
a8fc9277 8092{
a3ea269b 8093 struct sock_fprog_kern *fprog;
a8fc9277 8094 struct sk_filter *filter;
a3ea269b 8095 int ret = 0;
a8fc9277
PE
8096
8097 lock_sock(sk);
8098 filter = rcu_dereference_protected(sk->sk_filter,
8ced425e 8099 lockdep_sock_is_held(sk));
a8fc9277
PE
8100 if (!filter)
8101 goto out;
a3ea269b
DB
8102
8103 /* We're copying the filter that has been originally attached,
93d08b69
DB
8104 * so no conversion/decode needed anymore. eBPF programs that
8105 * have no original program cannot be dumped through this.
a3ea269b 8106 */
93d08b69 8107 ret = -EACCES;
7ae457c1 8108 fprog = filter->prog->orig_prog;
93d08b69
DB
8109 if (!fprog)
8110 goto out;
a3ea269b
DB
8111
8112 ret = fprog->len;
a8fc9277 8113 if (!len)
a3ea269b 8114 /* User space only enquires number of filter blocks. */
a8fc9277 8115 goto out;
a3ea269b 8116
a8fc9277 8117 ret = -EINVAL;
a3ea269b 8118 if (len < fprog->len)
a8fc9277
PE
8119 goto out;
8120
8121 ret = -EFAULT;
009937e7 8122 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 8123 goto out;
a8fc9277 8124
a3ea269b
DB
8125 /* Instead of bytes, the API requests to return the number
8126 * of filter blocks.
8127 */
8128 ret = fprog->len;
a8fc9277
PE
8129out:
8130 release_sock(sk);
8131 return ret;
8132}
2dbb9b9e
MKL
8133
8134#ifdef CONFIG_INET
8135struct sk_reuseport_kern {
8136 struct sk_buff *skb;
8137 struct sock *sk;
8138 struct sock *selected_sk;
8139 void *data_end;
8140 u32 hash;
8141 u32 reuseport_id;
8142 bool bind_inany;
8143};
8144
8145static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
8146 struct sock_reuseport *reuse,
8147 struct sock *sk, struct sk_buff *skb,
8148 u32 hash)
8149{
8150 reuse_kern->skb = skb;
8151 reuse_kern->sk = sk;
8152 reuse_kern->selected_sk = NULL;
8153 reuse_kern->data_end = skb->data + skb_headlen(skb);
8154 reuse_kern->hash = hash;
8155 reuse_kern->reuseport_id = reuse->reuseport_id;
8156 reuse_kern->bind_inany = reuse->bind_inany;
8157}
8158
8159struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
8160 struct bpf_prog *prog, struct sk_buff *skb,
8161 u32 hash)
8162{
8163 struct sk_reuseport_kern reuse_kern;
8164 enum sk_action action;
8165
8166 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
8167 action = BPF_PROG_RUN(prog, &reuse_kern);
8168
8169 if (action == SK_PASS)
8170 return reuse_kern.selected_sk;
8171 else
8172 return ERR_PTR(-ECONNREFUSED);
8173}
8174
8175BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
8176 struct bpf_map *, map, void *, key, u32, flags)
8177{
8178 struct sock_reuseport *reuse;
8179 struct sock *selected_sk;
8180
8181 selected_sk = map->ops->map_lookup_elem(map, key);
8182 if (!selected_sk)
8183 return -ENOENT;
8184
8185 reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
8186 if (!reuse)
8187 /* selected_sk is unhashed (e.g. by close()) after the
8188 * above map_lookup_elem(). Treat selected_sk has already
8189 * been removed from the map.
8190 */
8191 return -ENOENT;
8192
8193 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
8194 struct sock *sk;
8195
8196 if (unlikely(!reuse_kern->reuseport_id))
8197 /* There is a small race between adding the
8198 * sk to the map and setting the
8199 * reuse_kern->reuseport_id.
8200 * Treat it as the sk has not been added to
8201 * the bpf map yet.
8202 */
8203 return -ENOENT;
8204
8205 sk = reuse_kern->sk;
8206 if (sk->sk_protocol != selected_sk->sk_protocol)
8207 return -EPROTOTYPE;
8208 else if (sk->sk_family != selected_sk->sk_family)
8209 return -EAFNOSUPPORT;
8210
8211 /* Catch all. Likely bound to a different sockaddr. */
8212 return -EBADFD;
8213 }
8214
8215 reuse_kern->selected_sk = selected_sk;
8216
8217 return 0;
8218}
8219
8220static const struct bpf_func_proto sk_select_reuseport_proto = {
8221 .func = sk_select_reuseport,
8222 .gpl_only = false,
8223 .ret_type = RET_INTEGER,
8224 .arg1_type = ARG_PTR_TO_CTX,
8225 .arg2_type = ARG_CONST_MAP_PTR,
8226 .arg3_type = ARG_PTR_TO_MAP_KEY,
8227 .arg4_type = ARG_ANYTHING,
8228};
8229
8230BPF_CALL_4(sk_reuseport_load_bytes,
8231 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8232 void *, to, u32, len)
8233{
8234 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
8235}
8236
8237static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
8238 .func = sk_reuseport_load_bytes,
8239 .gpl_only = false,
8240 .ret_type = RET_INTEGER,
8241 .arg1_type = ARG_PTR_TO_CTX,
8242 .arg2_type = ARG_ANYTHING,
8243 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
8244 .arg4_type = ARG_CONST_SIZE,
8245};
8246
8247BPF_CALL_5(sk_reuseport_load_bytes_relative,
8248 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8249 void *, to, u32, len, u32, start_header)
8250{
8251 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
8252 len, start_header);
8253}
8254
8255static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
8256 .func = sk_reuseport_load_bytes_relative,
8257 .gpl_only = false,
8258 .ret_type = RET_INTEGER,
8259 .arg1_type = ARG_PTR_TO_CTX,
8260 .arg2_type = ARG_ANYTHING,
8261 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
8262 .arg4_type = ARG_CONST_SIZE,
8263 .arg5_type = ARG_ANYTHING,
8264};
8265
8266static const struct bpf_func_proto *
8267sk_reuseport_func_proto(enum bpf_func_id func_id,
8268 const struct bpf_prog *prog)
8269{
8270 switch (func_id) {
8271 case BPF_FUNC_sk_select_reuseport:
8272 return &sk_select_reuseport_proto;
8273 case BPF_FUNC_skb_load_bytes:
8274 return &sk_reuseport_load_bytes_proto;
8275 case BPF_FUNC_skb_load_bytes_relative:
8276 return &sk_reuseport_load_bytes_relative_proto;
8277 default:
8278 return bpf_base_func_proto(func_id);
8279 }
8280}
8281
8282static bool
8283sk_reuseport_is_valid_access(int off, int size,
8284 enum bpf_access_type type,
8285 const struct bpf_prog *prog,
8286 struct bpf_insn_access_aux *info)
8287{
8288 const u32 size_default = sizeof(__u32);
8289
8290 if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
8291 off % size || type != BPF_READ)
8292 return false;
8293
8294 switch (off) {
8295 case offsetof(struct sk_reuseport_md, data):
8296 info->reg_type = PTR_TO_PACKET;
8297 return size == sizeof(__u64);
8298
8299 case offsetof(struct sk_reuseport_md, data_end):
8300 info->reg_type = PTR_TO_PACKET_END;
8301 return size == sizeof(__u64);
8302
8303 case offsetof(struct sk_reuseport_md, hash):
8304 return size == size_default;
8305
8306 /* Fields that allow narrowing */
8307 case offsetof(struct sk_reuseport_md, eth_protocol):
8308 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8309 return false;
4597b62f 8310 /* fall through */
2dbb9b9e
MKL
8311 case offsetof(struct sk_reuseport_md, ip_protocol):
8312 case offsetof(struct sk_reuseport_md, bind_inany):
8313 case offsetof(struct sk_reuseport_md, len):
8314 bpf_ctx_record_field_size(info, size_default);
8315 return bpf_ctx_narrow_access_ok(off, size, size_default);
8316
8317 default:
8318 return false;
8319 }
8320}
8321
8322#define SK_REUSEPORT_LOAD_FIELD(F) ({ \
8323 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8324 si->dst_reg, si->src_reg, \
8325 bpf_target_off(struct sk_reuseport_kern, F, \
8326 FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8327 target_size)); \
8328 })
8329
8330#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
8331 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
8332 struct sk_buff, \
8333 skb, \
8334 SKB_FIELD)
8335
8336#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
8337 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \
8338 struct sock, \
8339 sk, \
8340 SK_FIELD, BPF_SIZE, EXTRA_OFF)
8341
8342static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
8343 const struct bpf_insn *si,
8344 struct bpf_insn *insn_buf,
8345 struct bpf_prog *prog,
8346 u32 *target_size)
8347{
8348 struct bpf_insn *insn = insn_buf;
8349
8350 switch (si->off) {
8351 case offsetof(struct sk_reuseport_md, data):
8352 SK_REUSEPORT_LOAD_SKB_FIELD(data);
8353 break;
8354
8355 case offsetof(struct sk_reuseport_md, len):
8356 SK_REUSEPORT_LOAD_SKB_FIELD(len);
8357 break;
8358
8359 case offsetof(struct sk_reuseport_md, eth_protocol):
8360 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
8361 break;
8362
8363 case offsetof(struct sk_reuseport_md, ip_protocol):
3f6e138d 8364 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
2dbb9b9e
MKL
8365 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
8366 BPF_W, 0);
8367 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
8368 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
8369 SK_FL_PROTO_SHIFT);
8370 /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
8371 * aware. No further narrowing or masking is needed.
8372 */
8373 *target_size = 1;
8374 break;
8375
8376 case offsetof(struct sk_reuseport_md, data_end):
8377 SK_REUSEPORT_LOAD_FIELD(data_end);
8378 break;
8379
8380 case offsetof(struct sk_reuseport_md, hash):
8381 SK_REUSEPORT_LOAD_FIELD(hash);
8382 break;
8383
8384 case offsetof(struct sk_reuseport_md, bind_inany):
8385 SK_REUSEPORT_LOAD_FIELD(bind_inany);
8386 break;
8387 }
8388
8389 return insn - insn_buf;
8390}
8391
8392const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
8393 .get_func_proto = sk_reuseport_func_proto,
8394 .is_valid_access = sk_reuseport_is_valid_access,
8395 .convert_ctx_access = sk_reuseport_convert_ctx_access,
8396};
8397
8398const struct bpf_prog_ops sk_reuseport_prog_ops = {
8399};
8400#endif /* CONFIG_INET */