bpf: export bpf_sock for BPF_PROG_TYPE_CGROUP_SOCK_ADDR prog type
[linux-2.6-block.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
bd4cf0ed
AS
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
1da177e4 6 *
bd4cf0ed
AS
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
1da177e4
LT
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
91b8270f 29#include <linux/sock_diag.h>
1da177e4
LT
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/if_packet.h>
c491680f 34#include <linux/if_arp.h>
5a0e3ad6 35#include <linux/gfp.h>
d74bad4e 36#include <net/inet_common.h>
1da177e4
LT
37#include <net/ip.h>
38#include <net/protocol.h>
4738c1db 39#include <net/netlink.h>
1da177e4 40#include <linux/skbuff.h>
604326b4 41#include <linux/skmsg.h>
1da177e4 42#include <net/sock.h>
10b89ee4 43#include <net/flow_dissector.h>
1da177e4
LT
44#include <linux/errno.h>
45#include <linux/timer.h>
7c0f6ba6 46#include <linux/uaccess.h>
40daafc8 47#include <asm/unaligned.h>
d66f2b91 48#include <asm/cmpxchg.h>
1da177e4 49#include <linux/filter.h>
86e4ca66 50#include <linux/ratelimit.h>
46b325c7 51#include <linux/seccomp.h>
f3335031 52#include <linux/if_vlan.h>
89aa0758 53#include <linux/bpf.h>
d691f9e8 54#include <net/sch_generic.h>
8d20aabe 55#include <net/cls_cgroup.h>
d3aa45ce 56#include <net/dst_metadata.h>
c46646d0 57#include <net/dst.h>
538950a1 58#include <net/sock_reuseport.h>
b1d9fc41 59#include <net/busy_poll.h>
8c4b4c7e 60#include <net/tcp.h>
12bed760 61#include <net/xfrm.h>
6acc9b43 62#include <net/udp.h>
5acaee0a 63#include <linux/bpf_trace.h>
02671e23 64#include <net/xdp_sock.h>
87f5fc7e 65#include <linux/inetdevice.h>
6acc9b43
JS
66#include <net/inet_hashtables.h>
67#include <net/inet6_hashtables.h>
87f5fc7e
DA
68#include <net/ip_fib.h>
69#include <net/flow.h>
70#include <net/arp.h>
fe94cc29 71#include <net/ipv6.h>
6acc9b43 72#include <net/net_namespace.h>
fe94cc29
MX
73#include <linux/seg6_local.h>
74#include <net/seg6.h>
75#include <net/seg6_local.h>
52f27877 76#include <net/lwtunnel.h>
3616d08b 77#include <net/ipv6_stubs.h>
6ac99e8f 78#include <net/bpf_sk_storage.h>
1da177e4 79
43db6d65 80/**
f4979fce 81 * sk_filter_trim_cap - run a packet through a socket filter
43db6d65
SH
82 * @sk: sock associated with &sk_buff
83 * @skb: buffer to filter
f4979fce 84 * @cap: limit on how short the eBPF program may trim the packet
43db6d65 85 *
ff936a04
AS
86 * Run the eBPF program and then cut skb->data to correct size returned by
87 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 88 * than pkt_len we keep whole skb->data. This is the socket level
ff936a04 89 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
43db6d65
SH
90 * be accepted or -EPERM if the packet should be tossed.
91 *
92 */
f4979fce 93int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
43db6d65
SH
94{
95 int err;
96 struct sk_filter *filter;
97
c93bdd0e
MG
98 /*
99 * If the skb was allocated from pfmemalloc reserves, only
100 * allow SOCK_MEMALLOC sockets to use it as this socket is
101 * helping free memory
102 */
8fe809a9
ED
103 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
104 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
c93bdd0e 105 return -ENOMEM;
8fe809a9 106 }
c11cd3a6
DM
107 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
108 if (err)
109 return err;
110
43db6d65
SH
111 err = security_sock_rcv_skb(sk, skb);
112 if (err)
113 return err;
114
80f8f102
ED
115 rcu_read_lock();
116 filter = rcu_dereference(sk->sk_filter);
43db6d65 117 if (filter) {
8f917bba
WB
118 struct sock *save_sk = skb->sk;
119 unsigned int pkt_len;
120
121 skb->sk = sk;
122 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
8f917bba 123 skb->sk = save_sk;
d1f496fd 124 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
43db6d65 125 }
80f8f102 126 rcu_read_unlock();
43db6d65
SH
127
128 return err;
129}
f4979fce 130EXPORT_SYMBOL(sk_filter_trim_cap);
43db6d65 131
b390134c 132BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
bd4cf0ed 133{
f3694e00 134 return skb_get_poff(skb);
bd4cf0ed
AS
135}
136
b390134c 137BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 138{
bd4cf0ed
AS
139 struct nlattr *nla;
140
141 if (skb_is_nonlinear(skb))
142 return 0;
143
05ab8f26
MK
144 if (skb->len < sizeof(struct nlattr))
145 return 0;
146
30743837 147 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
148 return 0;
149
30743837 150 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
151 if (nla)
152 return (void *) nla - (void *) skb->data;
153
154 return 0;
155}
156
b390134c 157BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 158{
bd4cf0ed
AS
159 struct nlattr *nla;
160
161 if (skb_is_nonlinear(skb))
162 return 0;
163
05ab8f26
MK
164 if (skb->len < sizeof(struct nlattr))
165 return 0;
166
30743837 167 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
168 return 0;
169
30743837
DB
170 nla = (struct nlattr *) &skb->data[a];
171 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
172 return 0;
173
30743837 174 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
175 if (nla)
176 return (void *) nla - (void *) skb->data;
177
178 return 0;
179}
180
e0cea7ce
DB
181BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
182 data, int, headlen, int, offset)
183{
184 u8 tmp, *ptr;
185 const int len = sizeof(tmp);
186
187 if (offset >= 0) {
188 if (headlen - offset >= len)
189 return *(u8 *)(data + offset);
190 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
191 return tmp;
192 } else {
193 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
194 if (likely(ptr))
195 return *(u8 *)ptr;
196 }
197
198 return -EFAULT;
199}
200
201BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
202 int, offset)
203{
204 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
205 offset);
206}
207
208BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
209 data, int, headlen, int, offset)
210{
211 u16 tmp, *ptr;
212 const int len = sizeof(tmp);
213
214 if (offset >= 0) {
215 if (headlen - offset >= len)
216 return get_unaligned_be16(data + offset);
217 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
218 return be16_to_cpu(tmp);
219 } else {
220 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
221 if (likely(ptr))
222 return get_unaligned_be16(ptr);
223 }
224
225 return -EFAULT;
226}
227
228BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
229 int, offset)
230{
231 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
232 offset);
233}
234
235BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
236 data, int, headlen, int, offset)
237{
238 u32 tmp, *ptr;
239 const int len = sizeof(tmp);
240
241 if (likely(offset >= 0)) {
242 if (headlen - offset >= len)
243 return get_unaligned_be32(data + offset);
244 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
245 return be32_to_cpu(tmp);
246 } else {
247 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
248 if (likely(ptr))
249 return get_unaligned_be32(ptr);
250 }
251
252 return -EFAULT;
253}
254
255BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
256 int, offset)
257{
258 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
259 offset);
260}
261
b390134c 262BPF_CALL_0(bpf_get_raw_cpu_id)
bd4cf0ed
AS
263{
264 return raw_smp_processor_id();
265}
266
80b48c44 267static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
b390134c 268 .func = bpf_get_raw_cpu_id,
80b48c44
DB
269 .gpl_only = false,
270 .ret_type = RET_INTEGER,
271};
272
9bac3d6d
AS
273static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
274 struct bpf_insn *insn_buf)
275{
276 struct bpf_insn *insn = insn_buf;
277
278 switch (skb_field) {
279 case SKF_AD_MARK:
280 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
281
282 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
283 offsetof(struct sk_buff, mark));
284 break;
285
286 case SKF_AD_PKTTYPE:
287 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
288 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
289#ifdef __BIG_ENDIAN_BITFIELD
290 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
291#endif
292 break;
293
294 case SKF_AD_QUEUE:
295 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
296
297 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
298 offsetof(struct sk_buff, queue_mapping));
299 break;
c2497395 300
c2497395 301 case SKF_AD_VLAN_TAG:
c2497395 302 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
c2497395
AS
303
304 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
305 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
306 offsetof(struct sk_buff, vlan_tci));
9c212255
MM
307 break;
308 case SKF_AD_VLAN_TAG_PRESENT:
309 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
310 if (PKT_VLAN_PRESENT_BIT)
311 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
312 if (PKT_VLAN_PRESENT_BIT < 7)
c2497395 313 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
c2497395 314 break;
9bac3d6d
AS
315 }
316
317 return insn - insn_buf;
318}
319
bd4cf0ed 320static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 321 struct bpf_insn **insnp)
bd4cf0ed 322{
2695fb55 323 struct bpf_insn *insn = *insnp;
9bac3d6d 324 u32 cnt;
bd4cf0ed
AS
325
326 switch (fp->k) {
327 case SKF_AD_OFF + SKF_AD_PROTOCOL:
0b8c707d
DB
328 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
329
330 /* A = *(u16 *) (CTX + offsetof(protocol)) */
331 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
332 offsetof(struct sk_buff, protocol));
333 /* A = ntohs(A) [emitting a nop or swap16] */
334 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
335 break;
336
337 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
338 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
339 insn += cnt - 1;
bd4cf0ed
AS
340 break;
341
342 case SKF_AD_OFF + SKF_AD_IFINDEX:
343 case SKF_AD_OFF + SKF_AD_HATYPE:
bd4cf0ed
AS
344 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
345 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
f8f6d679 346
f035a515 347 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
f8f6d679
DB
348 BPF_REG_TMP, BPF_REG_CTX,
349 offsetof(struct sk_buff, dev));
350 /* if (tmp != 0) goto pc + 1 */
351 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
352 *insn++ = BPF_EXIT_INSN();
353 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
354 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
355 offsetof(struct net_device, ifindex));
356 else
357 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
358 offsetof(struct net_device, type));
bd4cf0ed
AS
359 break;
360
361 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
362 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
363 insn += cnt - 1;
bd4cf0ed
AS
364 break;
365
366 case SKF_AD_OFF + SKF_AD_RXHASH:
367 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
368
9739eef1
AS
369 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
370 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
371 break;
372
373 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
374 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
375 insn += cnt - 1;
bd4cf0ed
AS
376 break;
377
378 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
379 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
380 BPF_REG_A, BPF_REG_CTX, insn);
381 insn += cnt - 1;
382 break;
bd4cf0ed 383
c2497395
AS
384 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
385 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
386 BPF_REG_A, BPF_REG_CTX, insn);
387 insn += cnt - 1;
bd4cf0ed
AS
388 break;
389
27cd5452
MS
390 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
391 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
392
393 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
394 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
395 offsetof(struct sk_buff, vlan_proto));
396 /* A = ntohs(A) [emitting a nop or swap16] */
397 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
398 break;
399
bd4cf0ed
AS
400 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
401 case SKF_AD_OFF + SKF_AD_NLATTR:
402 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
403 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 404 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 405 /* arg1 = CTX */
f8f6d679 406 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 407 /* arg2 = A */
f8f6d679 408 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 409 /* arg3 = X */
f8f6d679 410 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 411 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
412 switch (fp->k) {
413 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
b390134c 414 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
bd4cf0ed
AS
415 break;
416 case SKF_AD_OFF + SKF_AD_NLATTR:
b390134c 417 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
bd4cf0ed
AS
418 break;
419 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
b390134c 420 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
bd4cf0ed
AS
421 break;
422 case SKF_AD_OFF + SKF_AD_CPU:
b390134c 423 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
bd4cf0ed 424 break;
4cd3675e 425 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
426 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
427 bpf_user_rnd_init_once();
4cd3675e 428 break;
bd4cf0ed
AS
429 }
430 break;
431
432 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
433 /* A ^= X */
434 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
435 break;
436
437 default:
438 /* This is just a dummy call to avoid letting the compiler
439 * evict __bpf_call_base() as an optimization. Placed here
440 * where no-one bothers.
441 */
442 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
443 return false;
444 }
445
446 *insnp = insn;
447 return true;
448}
449
e0cea7ce
DB
450static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
451{
452 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
453 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
454 bool endian = BPF_SIZE(fp->code) == BPF_H ||
455 BPF_SIZE(fp->code) == BPF_W;
456 bool indirect = BPF_MODE(fp->code) == BPF_IND;
457 const int ip_align = NET_IP_ALIGN;
458 struct bpf_insn *insn = *insnp;
459 int offset = fp->k;
460
461 if (!indirect &&
462 ((unaligned_ok && offset >= 0) ||
463 (!unaligned_ok && offset >= 0 &&
464 offset + ip_align >= 0 &&
465 offset + ip_align % size == 0))) {
59ee4129
DB
466 bool ldx_off_ok = offset <= S16_MAX;
467
e0cea7ce 468 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
d8f3e978
DM
469 if (offset)
470 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
59ee4129
DB
471 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
472 size, 2 + endian + (!ldx_off_ok * 2));
473 if (ldx_off_ok) {
474 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
475 BPF_REG_D, offset);
476 } else {
477 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
478 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
479 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
480 BPF_REG_TMP, 0);
481 }
e0cea7ce
DB
482 if (endian)
483 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
484 *insn++ = BPF_JMP_A(8);
485 }
486
487 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
488 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
489 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
490 if (!indirect) {
491 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
492 } else {
493 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
494 if (fp->k)
495 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
496 }
497
498 switch (BPF_SIZE(fp->code)) {
499 case BPF_B:
500 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
501 break;
502 case BPF_H:
503 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
504 break;
505 case BPF_W:
506 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
507 break;
508 default:
509 return false;
510 }
511
512 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
513 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
514 *insn = BPF_EXIT_INSN();
515
516 *insnp = insn;
517 return true;
518}
519
bd4cf0ed 520/**
8fb575ca 521 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
522 * @prog: the user passed filter program
523 * @len: the length of the user passed filter program
50bbfed9 524 * @new_prog: allocated 'struct bpf_prog' or NULL
bd4cf0ed 525 * @new_len: pointer to store length of converted program
e0cea7ce 526 * @seen_ld_abs: bool whether we've seen ld_abs/ind
bd4cf0ed 527 *
1f504ec9
TK
528 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
529 * style extended BPF (eBPF).
bd4cf0ed
AS
530 * Conversion workflow:
531 *
532 * 1) First pass for calculating the new program length:
e0cea7ce 533 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
bd4cf0ed
AS
534 *
535 * 2) 2nd pass to remap in two passes: 1st pass finds new
536 * jump offsets, 2nd pass remapping:
e0cea7ce 537 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
bd4cf0ed 538 */
d9e12f42 539static int bpf_convert_filter(struct sock_filter *prog, int len,
e0cea7ce
DB
540 struct bpf_prog *new_prog, int *new_len,
541 bool *seen_ld_abs)
bd4cf0ed 542{
50bbfed9
AS
543 int new_flen = 0, pass = 0, target, i, stack_off;
544 struct bpf_insn *new_insn, *first_insn = NULL;
bd4cf0ed
AS
545 struct sock_filter *fp;
546 int *addrs = NULL;
547 u8 bpf_src;
548
549 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 550 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 551
6f9a093b 552 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
553 return -EINVAL;
554
555 if (new_prog) {
50bbfed9 556 first_insn = new_prog->insnsi;
658da937
DB
557 addrs = kcalloc(len, sizeof(*addrs),
558 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
559 if (!addrs)
560 return -ENOMEM;
561 }
562
563do_pass:
50bbfed9 564 new_insn = first_insn;
bd4cf0ed
AS
565 fp = prog;
566
8b614aeb 567 /* Classic BPF related prologue emission. */
50bbfed9 568 if (new_prog) {
8b614aeb
DB
569 /* Classic BPF expects A and X to be reset first. These need
570 * to be guaranteed to be the first two instructions.
571 */
1d621674
DB
572 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
573 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
8b614aeb
DB
574
575 /* All programs must keep CTX in callee saved BPF_REG_CTX.
576 * In eBPF case it's done by the compiler, here we need to
577 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
578 */
579 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
e0cea7ce
DB
580 if (*seen_ld_abs) {
581 /* For packet access in classic BPF, cache skb->data
582 * in callee-saved BPF R8 and skb->len - skb->data_len
583 * (headlen) in BPF R9. Since classic BPF is read-only
584 * on CTX, we only need to cache it once.
585 */
586 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
587 BPF_REG_D, BPF_REG_CTX,
588 offsetof(struct sk_buff, data));
589 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
590 offsetof(struct sk_buff, len));
591 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
592 offsetof(struct sk_buff, data_len));
593 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
594 }
8b614aeb
DB
595 } else {
596 new_insn += 3;
597 }
bd4cf0ed
AS
598
599 for (i = 0; i < len; fp++, i++) {
e0cea7ce 600 struct bpf_insn tmp_insns[32] = { };
2695fb55 601 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
602
603 if (addrs)
50bbfed9 604 addrs[i] = new_insn - first_insn;
bd4cf0ed
AS
605
606 switch (fp->code) {
607 /* All arithmetic insns and skb loads map as-is. */
608 case BPF_ALU | BPF_ADD | BPF_X:
609 case BPF_ALU | BPF_ADD | BPF_K:
610 case BPF_ALU | BPF_SUB | BPF_X:
611 case BPF_ALU | BPF_SUB | BPF_K:
612 case BPF_ALU | BPF_AND | BPF_X:
613 case BPF_ALU | BPF_AND | BPF_K:
614 case BPF_ALU | BPF_OR | BPF_X:
615 case BPF_ALU | BPF_OR | BPF_K:
616 case BPF_ALU | BPF_LSH | BPF_X:
617 case BPF_ALU | BPF_LSH | BPF_K:
618 case BPF_ALU | BPF_RSH | BPF_X:
619 case BPF_ALU | BPF_RSH | BPF_K:
620 case BPF_ALU | BPF_XOR | BPF_X:
621 case BPF_ALU | BPF_XOR | BPF_K:
622 case BPF_ALU | BPF_MUL | BPF_X:
623 case BPF_ALU | BPF_MUL | BPF_K:
624 case BPF_ALU | BPF_DIV | BPF_X:
625 case BPF_ALU | BPF_DIV | BPF_K:
626 case BPF_ALU | BPF_MOD | BPF_X:
627 case BPF_ALU | BPF_MOD | BPF_K:
628 case BPF_ALU | BPF_NEG:
629 case BPF_LD | BPF_ABS | BPF_W:
630 case BPF_LD | BPF_ABS | BPF_H:
631 case BPF_LD | BPF_ABS | BPF_B:
632 case BPF_LD | BPF_IND | BPF_W:
633 case BPF_LD | BPF_IND | BPF_H:
634 case BPF_LD | BPF_IND | BPF_B:
635 /* Check for overloaded BPF extension and
636 * directly convert it if found, otherwise
637 * just move on with mapping.
638 */
639 if (BPF_CLASS(fp->code) == BPF_LD &&
640 BPF_MODE(fp->code) == BPF_ABS &&
641 convert_bpf_extensions(fp, &insn))
642 break;
e0cea7ce
DB
643 if (BPF_CLASS(fp->code) == BPF_LD &&
644 convert_bpf_ld_abs(fp, &insn)) {
645 *seen_ld_abs = true;
646 break;
647 }
bd4cf0ed 648
68fda450 649 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
f6b1b3bf 650 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
68fda450 651 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
f6b1b3bf
DB
652 /* Error with exception code on div/mod by 0.
653 * For cBPF programs, this was always return 0.
654 */
655 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
656 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
657 *insn++ = BPF_EXIT_INSN();
658 }
68fda450 659
f8f6d679 660 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
661 break;
662
f8f6d679
DB
663 /* Jump transformation cannot use BPF block macros
664 * everywhere as offset calculation and target updates
665 * require a bit more work than the rest, i.e. jump
666 * opcodes map as-is, but offsets need adjustment.
667 */
668
669#define BPF_EMIT_JMP \
bd4cf0ed 670 do { \
050fad7c
DB
671 const s32 off_min = S16_MIN, off_max = S16_MAX; \
672 s32 off; \
673 \
bd4cf0ed
AS
674 if (target >= len || target < 0) \
675 goto err; \
050fad7c 676 off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
bd4cf0ed 677 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
050fad7c
DB
678 off -= insn - tmp_insns; \
679 /* Reject anything not fitting into insn->off. */ \
680 if (off < off_min || off > off_max) \
681 goto err; \
682 insn->off = off; \
bd4cf0ed
AS
683 } while (0)
684
f8f6d679
DB
685 case BPF_JMP | BPF_JA:
686 target = i + fp->k + 1;
687 insn->code = fp->code;
688 BPF_EMIT_JMP;
bd4cf0ed
AS
689 break;
690
691 case BPF_JMP | BPF_JEQ | BPF_K:
692 case BPF_JMP | BPF_JEQ | BPF_X:
693 case BPF_JMP | BPF_JSET | BPF_K:
694 case BPF_JMP | BPF_JSET | BPF_X:
695 case BPF_JMP | BPF_JGT | BPF_K:
696 case BPF_JMP | BPF_JGT | BPF_X:
697 case BPF_JMP | BPF_JGE | BPF_K:
698 case BPF_JMP | BPF_JGE | BPF_X:
699 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
700 /* BPF immediates are signed, zero extend
701 * immediate into tmp register and use it
702 * in compare insn.
703 */
f8f6d679 704 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 705
e430f34e
AS
706 insn->dst_reg = BPF_REG_A;
707 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
708 bpf_src = BPF_X;
709 } else {
e430f34e 710 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
711 insn->imm = fp->k;
712 bpf_src = BPF_SRC(fp->code);
19539ce7 713 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 714 }
bd4cf0ed
AS
715
716 /* Common case where 'jump_false' is next insn. */
717 if (fp->jf == 0) {
718 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
719 target = i + fp->jt + 1;
f8f6d679 720 BPF_EMIT_JMP;
bd4cf0ed 721 break;
1da177e4 722 }
bd4cf0ed 723
92b31a9a
DB
724 /* Convert some jumps when 'jump_true' is next insn. */
725 if (fp->jt == 0) {
726 switch (BPF_OP(fp->code)) {
727 case BPF_JEQ:
728 insn->code = BPF_JMP | BPF_JNE | bpf_src;
729 break;
730 case BPF_JGT:
731 insn->code = BPF_JMP | BPF_JLE | bpf_src;
732 break;
733 case BPF_JGE:
734 insn->code = BPF_JMP | BPF_JLT | bpf_src;
735 break;
736 default:
737 goto jmp_rest;
738 }
739
bd4cf0ed 740 target = i + fp->jf + 1;
f8f6d679 741 BPF_EMIT_JMP;
bd4cf0ed 742 break;
0b05b2a4 743 }
92b31a9a 744jmp_rest:
bd4cf0ed
AS
745 /* Other jumps are mapped into two insns: Jxx and JA. */
746 target = i + fp->jt + 1;
747 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 748 BPF_EMIT_JMP;
bd4cf0ed
AS
749 insn++;
750
751 insn->code = BPF_JMP | BPF_JA;
752 target = i + fp->jf + 1;
f8f6d679 753 BPF_EMIT_JMP;
bd4cf0ed
AS
754 break;
755
756 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
e0cea7ce
DB
757 case BPF_LDX | BPF_MSH | BPF_B: {
758 struct sock_filter tmp = {
759 .code = BPF_LD | BPF_ABS | BPF_B,
760 .k = fp->k,
761 };
762
763 *seen_ld_abs = true;
764
765 /* X = A */
766 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1268e253 767 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
e0cea7ce
DB
768 convert_bpf_ld_abs(&tmp, &insn);
769 insn++;
9739eef1 770 /* A &= 0xf */
f8f6d679 771 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 772 /* A <<= 2 */
f8f6d679 773 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
e0cea7ce
DB
774 /* tmp = X */
775 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
9739eef1 776 /* X = A */
f8f6d679 777 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 778 /* A = tmp */
f8f6d679 779 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed 780 break;
e0cea7ce 781 }
6205b9cf
DB
782 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
783 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
784 */
bd4cf0ed
AS
785 case BPF_RET | BPF_A:
786 case BPF_RET | BPF_K:
6205b9cf
DB
787 if (BPF_RVAL(fp->code) == BPF_K)
788 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
789 0, fp->k);
9739eef1 790 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
791 break;
792
793 /* Store to stack. */
794 case BPF_ST:
795 case BPF_STX:
50bbfed9 796 stack_off = fp->k * 4 + 4;
f8f6d679
DB
797 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
798 BPF_ST ? BPF_REG_A : BPF_REG_X,
50bbfed9
AS
799 -stack_off);
800 /* check_load_and_stores() verifies that classic BPF can
801 * load from stack only after write, so tracking
802 * stack_depth for ST|STX insns is enough
803 */
804 if (new_prog && new_prog->aux->stack_depth < stack_off)
805 new_prog->aux->stack_depth = stack_off;
bd4cf0ed
AS
806 break;
807
808 /* Load from stack. */
809 case BPF_LD | BPF_MEM:
810 case BPF_LDX | BPF_MEM:
50bbfed9 811 stack_off = fp->k * 4 + 4;
f8f6d679
DB
812 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
813 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
50bbfed9 814 -stack_off);
bd4cf0ed
AS
815 break;
816
817 /* A = K or X = K */
818 case BPF_LD | BPF_IMM:
819 case BPF_LDX | BPF_IMM:
f8f6d679
DB
820 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
821 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
822 break;
823
824 /* X = A */
825 case BPF_MISC | BPF_TAX:
f8f6d679 826 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
827 break;
828
829 /* A = X */
830 case BPF_MISC | BPF_TXA:
f8f6d679 831 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
832 break;
833
834 /* A = skb->len or X = skb->len */
835 case BPF_LD | BPF_W | BPF_LEN:
836 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
837 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
838 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
839 offsetof(struct sk_buff, len));
bd4cf0ed
AS
840 break;
841
f8f6d679 842 /* Access seccomp_data fields. */
bd4cf0ed 843 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
844 /* A = *(u32 *) (ctx + K) */
845 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
846 break;
847
ca9f1fd2 848 /* Unknown instruction. */
1da177e4 849 default:
bd4cf0ed 850 goto err;
1da177e4 851 }
bd4cf0ed
AS
852
853 insn++;
854 if (new_prog)
855 memcpy(new_insn, tmp_insns,
856 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 857 new_insn += insn - tmp_insns;
1da177e4
LT
858 }
859
bd4cf0ed
AS
860 if (!new_prog) {
861 /* Only calculating new length. */
50bbfed9 862 *new_len = new_insn - first_insn;
e0cea7ce
DB
863 if (*seen_ld_abs)
864 *new_len += 4; /* Prologue bits. */
bd4cf0ed
AS
865 return 0;
866 }
867
868 pass++;
50bbfed9
AS
869 if (new_flen != new_insn - first_insn) {
870 new_flen = new_insn - first_insn;
bd4cf0ed
AS
871 if (pass > 2)
872 goto err;
bd4cf0ed
AS
873 goto do_pass;
874 }
875
876 kfree(addrs);
877 BUG_ON(*new_len != new_flen);
1da177e4 878 return 0;
bd4cf0ed
AS
879err:
880 kfree(addrs);
881 return -EINVAL;
1da177e4
LT
882}
883
bd4cf0ed 884/* Security:
bd4cf0ed 885 *
2d5311e4 886 * As we dont want to clear mem[] array for each packet going through
8ea6e345 887 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 888 * a cell if not previously written, and we check all branches to be sure
25985edc 889 * a malicious user doesn't try to abuse us.
2d5311e4 890 */
ec31a05c 891static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 892{
34805931 893 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
894 int pc, ret = 0;
895
896 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 897
99e72a0f 898 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
899 if (!masks)
900 return -ENOMEM;
34805931 901
2d5311e4
ED
902 memset(masks, 0xff, flen * sizeof(*masks));
903
904 for (pc = 0; pc < flen; pc++) {
905 memvalid &= masks[pc];
906
907 switch (filter[pc].code) {
34805931
DB
908 case BPF_ST:
909 case BPF_STX:
2d5311e4
ED
910 memvalid |= (1 << filter[pc].k);
911 break;
34805931
DB
912 case BPF_LD | BPF_MEM:
913 case BPF_LDX | BPF_MEM:
2d5311e4
ED
914 if (!(memvalid & (1 << filter[pc].k))) {
915 ret = -EINVAL;
916 goto error;
917 }
918 break;
34805931
DB
919 case BPF_JMP | BPF_JA:
920 /* A jump must set masks on target */
2d5311e4
ED
921 masks[pc + 1 + filter[pc].k] &= memvalid;
922 memvalid = ~0;
923 break;
34805931
DB
924 case BPF_JMP | BPF_JEQ | BPF_K:
925 case BPF_JMP | BPF_JEQ | BPF_X:
926 case BPF_JMP | BPF_JGE | BPF_K:
927 case BPF_JMP | BPF_JGE | BPF_X:
928 case BPF_JMP | BPF_JGT | BPF_K:
929 case BPF_JMP | BPF_JGT | BPF_X:
930 case BPF_JMP | BPF_JSET | BPF_K:
931 case BPF_JMP | BPF_JSET | BPF_X:
932 /* A jump must set masks on targets */
2d5311e4
ED
933 masks[pc + 1 + filter[pc].jt] &= memvalid;
934 masks[pc + 1 + filter[pc].jf] &= memvalid;
935 memvalid = ~0;
936 break;
937 }
938 }
939error:
940 kfree(masks);
941 return ret;
942}
943
34805931
DB
944static bool chk_code_allowed(u16 code_to_probe)
945{
946 static const bool codes[] = {
947 /* 32 bit ALU operations */
948 [BPF_ALU | BPF_ADD | BPF_K] = true,
949 [BPF_ALU | BPF_ADD | BPF_X] = true,
950 [BPF_ALU | BPF_SUB | BPF_K] = true,
951 [BPF_ALU | BPF_SUB | BPF_X] = true,
952 [BPF_ALU | BPF_MUL | BPF_K] = true,
953 [BPF_ALU | BPF_MUL | BPF_X] = true,
954 [BPF_ALU | BPF_DIV | BPF_K] = true,
955 [BPF_ALU | BPF_DIV | BPF_X] = true,
956 [BPF_ALU | BPF_MOD | BPF_K] = true,
957 [BPF_ALU | BPF_MOD | BPF_X] = true,
958 [BPF_ALU | BPF_AND | BPF_K] = true,
959 [BPF_ALU | BPF_AND | BPF_X] = true,
960 [BPF_ALU | BPF_OR | BPF_K] = true,
961 [BPF_ALU | BPF_OR | BPF_X] = true,
962 [BPF_ALU | BPF_XOR | BPF_K] = true,
963 [BPF_ALU | BPF_XOR | BPF_X] = true,
964 [BPF_ALU | BPF_LSH | BPF_K] = true,
965 [BPF_ALU | BPF_LSH | BPF_X] = true,
966 [BPF_ALU | BPF_RSH | BPF_K] = true,
967 [BPF_ALU | BPF_RSH | BPF_X] = true,
968 [BPF_ALU | BPF_NEG] = true,
969 /* Load instructions */
970 [BPF_LD | BPF_W | BPF_ABS] = true,
971 [BPF_LD | BPF_H | BPF_ABS] = true,
972 [BPF_LD | BPF_B | BPF_ABS] = true,
973 [BPF_LD | BPF_W | BPF_LEN] = true,
974 [BPF_LD | BPF_W | BPF_IND] = true,
975 [BPF_LD | BPF_H | BPF_IND] = true,
976 [BPF_LD | BPF_B | BPF_IND] = true,
977 [BPF_LD | BPF_IMM] = true,
978 [BPF_LD | BPF_MEM] = true,
979 [BPF_LDX | BPF_W | BPF_LEN] = true,
980 [BPF_LDX | BPF_B | BPF_MSH] = true,
981 [BPF_LDX | BPF_IMM] = true,
982 [BPF_LDX | BPF_MEM] = true,
983 /* Store instructions */
984 [BPF_ST] = true,
985 [BPF_STX] = true,
986 /* Misc instructions */
987 [BPF_MISC | BPF_TAX] = true,
988 [BPF_MISC | BPF_TXA] = true,
989 /* Return instructions */
990 [BPF_RET | BPF_K] = true,
991 [BPF_RET | BPF_A] = true,
992 /* Jump instructions */
993 [BPF_JMP | BPF_JA] = true,
994 [BPF_JMP | BPF_JEQ | BPF_K] = true,
995 [BPF_JMP | BPF_JEQ | BPF_X] = true,
996 [BPF_JMP | BPF_JGE | BPF_K] = true,
997 [BPF_JMP | BPF_JGE | BPF_X] = true,
998 [BPF_JMP | BPF_JGT | BPF_K] = true,
999 [BPF_JMP | BPF_JGT | BPF_X] = true,
1000 [BPF_JMP | BPF_JSET | BPF_K] = true,
1001 [BPF_JMP | BPF_JSET | BPF_X] = true,
1002 };
1003
1004 if (code_to_probe >= ARRAY_SIZE(codes))
1005 return false;
1006
1007 return codes[code_to_probe];
1008}
1009
f7bd9e36
DB
1010static bool bpf_check_basics_ok(const struct sock_filter *filter,
1011 unsigned int flen)
1012{
1013 if (filter == NULL)
1014 return false;
1015 if (flen == 0 || flen > BPF_MAXINSNS)
1016 return false;
1017
1018 return true;
1019}
1020
1da177e4 1021/**
4df95ff4 1022 * bpf_check_classic - verify socket filter code
1da177e4
LT
1023 * @filter: filter to verify
1024 * @flen: length of filter
1025 *
1026 * Check the user's filter code. If we let some ugly
1027 * filter code slip through kaboom! The filter must contain
93699863
KK
1028 * no references or jumps that are out of range, no illegal
1029 * instructions, and must end with a RET instruction.
1da177e4 1030 *
7b11f69f
KK
1031 * All jumps are forward as they are not signed.
1032 *
1033 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 1034 */
d9e12f42
NS
1035static int bpf_check_classic(const struct sock_filter *filter,
1036 unsigned int flen)
1da177e4 1037{
aa1113d9 1038 bool anc_found;
34805931 1039 int pc;
1da177e4 1040
34805931 1041 /* Check the filter code now */
1da177e4 1042 for (pc = 0; pc < flen; pc++) {
ec31a05c 1043 const struct sock_filter *ftest = &filter[pc];
93699863 1044
34805931
DB
1045 /* May we actually operate on this code? */
1046 if (!chk_code_allowed(ftest->code))
cba328fc 1047 return -EINVAL;
34805931 1048
93699863 1049 /* Some instructions need special checks */
34805931
DB
1050 switch (ftest->code) {
1051 case BPF_ALU | BPF_DIV | BPF_K:
1052 case BPF_ALU | BPF_MOD | BPF_K:
1053 /* Check for division by zero */
b6069a95
ED
1054 if (ftest->k == 0)
1055 return -EINVAL;
1056 break;
229394e8
RV
1057 case BPF_ALU | BPF_LSH | BPF_K:
1058 case BPF_ALU | BPF_RSH | BPF_K:
1059 if (ftest->k >= 32)
1060 return -EINVAL;
1061 break;
34805931
DB
1062 case BPF_LD | BPF_MEM:
1063 case BPF_LDX | BPF_MEM:
1064 case BPF_ST:
1065 case BPF_STX:
1066 /* Check for invalid memory addresses */
93699863
KK
1067 if (ftest->k >= BPF_MEMWORDS)
1068 return -EINVAL;
1069 break;
34805931
DB
1070 case BPF_JMP | BPF_JA:
1071 /* Note, the large ftest->k might cause loops.
93699863
KK
1072 * Compare this with conditional jumps below,
1073 * where offsets are limited. --ANK (981016)
1074 */
34805931 1075 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 1076 return -EINVAL;
01f2f3f6 1077 break;
34805931
DB
1078 case BPF_JMP | BPF_JEQ | BPF_K:
1079 case BPF_JMP | BPF_JEQ | BPF_X:
1080 case BPF_JMP | BPF_JGE | BPF_K:
1081 case BPF_JMP | BPF_JGE | BPF_X:
1082 case BPF_JMP | BPF_JGT | BPF_K:
1083 case BPF_JMP | BPF_JGT | BPF_X:
1084 case BPF_JMP | BPF_JSET | BPF_K:
1085 case BPF_JMP | BPF_JSET | BPF_X:
1086 /* Both conditionals must be safe */
e35bedf3 1087 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
1088 pc + ftest->jf + 1 >= flen)
1089 return -EINVAL;
cba328fc 1090 break;
34805931
DB
1091 case BPF_LD | BPF_W | BPF_ABS:
1092 case BPF_LD | BPF_H | BPF_ABS:
1093 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 1094 anc_found = false;
34805931
DB
1095 if (bpf_anc_helper(ftest) & BPF_ANC)
1096 anc_found = true;
1097 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
1098 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1099 return -EINVAL;
01f2f3f6
HPP
1100 }
1101 }
93699863 1102
34805931 1103 /* Last instruction must be a RET code */
01f2f3f6 1104 switch (filter[flen - 1].code) {
34805931
DB
1105 case BPF_RET | BPF_K:
1106 case BPF_RET | BPF_A:
2d5311e4 1107 return check_load_and_stores(filter, flen);
cba328fc 1108 }
34805931 1109
cba328fc 1110 return -EINVAL;
1da177e4
LT
1111}
1112
7ae457c1
AS
1113static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1114 const struct sock_fprog *fprog)
a3ea269b 1115{
009937e7 1116 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
1117 struct sock_fprog_kern *fkprog;
1118
1119 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1120 if (!fp->orig_prog)
1121 return -ENOMEM;
1122
1123 fkprog = fp->orig_prog;
1124 fkprog->len = fprog->len;
658da937
DB
1125
1126 fkprog->filter = kmemdup(fp->insns, fsize,
1127 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
1128 if (!fkprog->filter) {
1129 kfree(fp->orig_prog);
1130 return -ENOMEM;
1131 }
1132
1133 return 0;
1134}
1135
7ae457c1 1136static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
1137{
1138 struct sock_fprog_kern *fprog = fp->orig_prog;
1139
1140 if (fprog) {
1141 kfree(fprog->filter);
1142 kfree(fprog);
1143 }
1144}
1145
7ae457c1
AS
1146static void __bpf_prog_release(struct bpf_prog *prog)
1147{
24701ece 1148 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
1149 bpf_prog_put(prog);
1150 } else {
1151 bpf_release_orig_filter(prog);
1152 bpf_prog_free(prog);
1153 }
7ae457c1
AS
1154}
1155
34c5bd66
PN
1156static void __sk_filter_release(struct sk_filter *fp)
1157{
7ae457c1
AS
1158 __bpf_prog_release(fp->prog);
1159 kfree(fp);
34c5bd66
PN
1160}
1161
47e958ea 1162/**
46bcf14f 1163 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
1164 * @rcu: rcu_head that contains the sk_filter to free
1165 */
fbc907f0 1166static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
1167{
1168 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1169
34c5bd66 1170 __sk_filter_release(fp);
47e958ea 1171}
fbc907f0
DB
1172
1173/**
1174 * sk_filter_release - release a socket filter
1175 * @fp: filter to remove
1176 *
1177 * Remove a filter from a socket and release its resources.
1178 */
1179static void sk_filter_release(struct sk_filter *fp)
1180{
4c355cdf 1181 if (refcount_dec_and_test(&fp->refcnt))
fbc907f0
DB
1182 call_rcu(&fp->rcu, sk_filter_release_rcu);
1183}
1184
1185void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1186{
7ae457c1 1187 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 1188
278571ba
AS
1189 atomic_sub(filter_size, &sk->sk_omem_alloc);
1190 sk_filter_release(fp);
fbc907f0 1191}
47e958ea 1192
278571ba
AS
1193/* try to charge the socket memory if there is space available
1194 * return true on success
1195 */
4c355cdf 1196static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 1197{
7ae457c1 1198 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
1199
1200 /* same check as in sock_kmalloc() */
1201 if (filter_size <= sysctl_optmem_max &&
1202 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
278571ba
AS
1203 atomic_add(filter_size, &sk->sk_omem_alloc);
1204 return true;
bd4cf0ed 1205 }
278571ba 1206 return false;
bd4cf0ed
AS
1207}
1208
4c355cdf
RE
1209bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1210{
eefca20e
ED
1211 if (!refcount_inc_not_zero(&fp->refcnt))
1212 return false;
1213
1214 if (!__sk_filter_charge(sk, fp)) {
1215 sk_filter_release(fp);
1216 return false;
1217 }
1218 return true;
4c355cdf
RE
1219}
1220
7ae457c1 1221static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
1222{
1223 struct sock_filter *old_prog;
7ae457c1 1224 struct bpf_prog *old_fp;
34805931 1225 int err, new_len, old_len = fp->len;
e0cea7ce 1226 bool seen_ld_abs = false;
bd4cf0ed
AS
1227
1228 /* We are free to overwrite insns et al right here as it
1229 * won't be used at this point in time anymore internally
1230 * after the migration to the internal BPF instruction
1231 * representation.
1232 */
1233 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 1234 sizeof(struct bpf_insn));
bd4cf0ed 1235
bd4cf0ed
AS
1236 /* Conversion cannot happen on overlapping memory areas,
1237 * so we need to keep the user BPF around until the 2nd
1238 * pass. At this time, the user BPF is stored in fp->insns.
1239 */
1240 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 1241 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
1242 if (!old_prog) {
1243 err = -ENOMEM;
1244 goto out_err;
1245 }
1246
1247 /* 1st pass: calculate the new program length. */
e0cea7ce
DB
1248 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1249 &seen_ld_abs);
bd4cf0ed
AS
1250 if (err)
1251 goto out_err_free;
1252
1253 /* Expand fp for appending the new filter representation. */
1254 old_fp = fp;
60a3b225 1255 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
1256 if (!fp) {
1257 /* The old_fp is still around in case we couldn't
1258 * allocate new memory, so uncharge on that one.
1259 */
1260 fp = old_fp;
1261 err = -ENOMEM;
1262 goto out_err_free;
1263 }
1264
bd4cf0ed
AS
1265 fp->len = new_len;
1266
2695fb55 1267 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
e0cea7ce
DB
1268 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1269 &seen_ld_abs);
bd4cf0ed 1270 if (err)
8fb575ca 1271 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
1272 * to allocate memory, remapping must succeed. Note,
1273 * that at this time old_fp has already been released
278571ba 1274 * by krealloc().
bd4cf0ed
AS
1275 */
1276 goto out_err_free;
1277
d1c55ab5 1278 fp = bpf_prog_select_runtime(fp, &err);
290af866
AS
1279 if (err)
1280 goto out_err_free;
5fe821a9 1281
bd4cf0ed
AS
1282 kfree(old_prog);
1283 return fp;
1284
1285out_err_free:
1286 kfree(old_prog);
1287out_err:
7ae457c1 1288 __bpf_prog_release(fp);
bd4cf0ed
AS
1289 return ERR_PTR(err);
1290}
1291
ac67eb2c
DB
1292static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1293 bpf_aux_classic_check_t trans)
302d6637
JP
1294{
1295 int err;
1296
bd4cf0ed 1297 fp->bpf_func = NULL;
a91263d5 1298 fp->jited = 0;
302d6637 1299
4df95ff4 1300 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1301 if (err) {
7ae457c1 1302 __bpf_prog_release(fp);
bd4cf0ed 1303 return ERR_PTR(err);
418c96ac 1304 }
302d6637 1305
4ae92bc7
NS
1306 /* There might be additional checks and transformations
1307 * needed on classic filters, f.e. in case of seccomp.
1308 */
1309 if (trans) {
1310 err = trans(fp->insns, fp->len);
1311 if (err) {
1312 __bpf_prog_release(fp);
1313 return ERR_PTR(err);
1314 }
1315 }
1316
bd4cf0ed
AS
1317 /* Probe if we can JIT compile the filter and if so, do
1318 * the compilation of the filter.
1319 */
302d6637 1320 bpf_jit_compile(fp);
bd4cf0ed
AS
1321
1322 /* JIT compiler couldn't process this filter, so do the
1323 * internal BPF translation for the optimized interpreter.
1324 */
5fe821a9 1325 if (!fp->jited)
7ae457c1 1326 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1327
1328 return fp;
302d6637
JP
1329}
1330
1331/**
7ae457c1 1332 * bpf_prog_create - create an unattached filter
c6c4b97c 1333 * @pfp: the unattached filter that is created
677a9fd3 1334 * @fprog: the filter program
302d6637 1335 *
c6c4b97c 1336 * Create a filter independent of any socket. We first run some
302d6637
JP
1337 * sanity checks on it to make sure it does not explode on us later.
1338 * If an error occurs or there is insufficient memory for the filter
1339 * a negative errno code is returned. On success the return is zero.
1340 */
7ae457c1 1341int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1342{
009937e7 1343 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1344 struct bpf_prog *fp;
302d6637
JP
1345
1346 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1347 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
302d6637
JP
1348 return -EINVAL;
1349
60a3b225 1350 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1351 if (!fp)
1352 return -ENOMEM;
a3ea269b 1353
302d6637
JP
1354 memcpy(fp->insns, fprog->filter, fsize);
1355
302d6637 1356 fp->len = fprog->len;
a3ea269b
DB
1357 /* Since unattached filters are not copied back to user
1358 * space through sk_get_filter(), we do not need to hold
1359 * a copy here, and can spare us the work.
1360 */
1361 fp->orig_prog = NULL;
302d6637 1362
7ae457c1 1363 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1364 * memory in case something goes wrong.
1365 */
4ae92bc7 1366 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1367 if (IS_ERR(fp))
1368 return PTR_ERR(fp);
302d6637
JP
1369
1370 *pfp = fp;
1371 return 0;
302d6637 1372}
7ae457c1 1373EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1374
ac67eb2c
DB
1375/**
1376 * bpf_prog_create_from_user - create an unattached filter from user buffer
1377 * @pfp: the unattached filter that is created
1378 * @fprog: the filter program
1379 * @trans: post-classic verifier transformation handler
bab18991 1380 * @save_orig: save classic BPF program
ac67eb2c
DB
1381 *
1382 * This function effectively does the same as bpf_prog_create(), only
1383 * that it builds up its insns buffer from user space provided buffer.
1384 * It also allows for passing a bpf_aux_classic_check_t handler.
1385 */
1386int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1387 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1388{
1389 unsigned int fsize = bpf_classic_proglen(fprog);
1390 struct bpf_prog *fp;
bab18991 1391 int err;
ac67eb2c
DB
1392
1393 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1394 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
ac67eb2c
DB
1395 return -EINVAL;
1396
1397 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1398 if (!fp)
1399 return -ENOMEM;
1400
1401 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1402 __bpf_prog_free(fp);
1403 return -EFAULT;
1404 }
1405
1406 fp->len = fprog->len;
ac67eb2c
DB
1407 fp->orig_prog = NULL;
1408
bab18991
DB
1409 if (save_orig) {
1410 err = bpf_prog_store_orig_filter(fp, fprog);
1411 if (err) {
1412 __bpf_prog_free(fp);
1413 return -ENOMEM;
1414 }
1415 }
1416
ac67eb2c
DB
1417 /* bpf_prepare_filter() already takes care of freeing
1418 * memory in case something goes wrong.
1419 */
1420 fp = bpf_prepare_filter(fp, trans);
1421 if (IS_ERR(fp))
1422 return PTR_ERR(fp);
1423
1424 *pfp = fp;
1425 return 0;
1426}
2ea273d7 1427EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1428
7ae457c1 1429void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1430{
7ae457c1 1431 __bpf_prog_release(fp);
302d6637 1432}
7ae457c1 1433EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1434
8ced425e 1435static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
49b31e57
DB
1436{
1437 struct sk_filter *fp, *old_fp;
1438
1439 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1440 if (!fp)
1441 return -ENOMEM;
1442
1443 fp->prog = prog;
49b31e57 1444
4c355cdf 1445 if (!__sk_filter_charge(sk, fp)) {
49b31e57
DB
1446 kfree(fp);
1447 return -ENOMEM;
1448 }
4c355cdf 1449 refcount_set(&fp->refcnt, 1);
49b31e57 1450
8ced425e
HFS
1451 old_fp = rcu_dereference_protected(sk->sk_filter,
1452 lockdep_sock_is_held(sk));
49b31e57 1453 rcu_assign_pointer(sk->sk_filter, fp);
8ced425e 1454
49b31e57
DB
1455 if (old_fp)
1456 sk_filter_uncharge(sk, old_fp);
1457
1458 return 0;
1459}
1460
538950a1
CG
1461static
1462struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1463{
009937e7 1464 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1465 struct bpf_prog *prog;
1da177e4
LT
1466 int err;
1467
d59577b6 1468 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1469 return ERR_PTR(-EPERM);
d59577b6 1470
1da177e4 1471 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1472 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
538950a1 1473 return ERR_PTR(-EINVAL);
1da177e4 1474
f7bd9e36 1475 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
7ae457c1 1476 if (!prog)
538950a1 1477 return ERR_PTR(-ENOMEM);
a3ea269b 1478
7ae457c1 1479 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1480 __bpf_prog_free(prog);
538950a1 1481 return ERR_PTR(-EFAULT);
1da177e4
LT
1482 }
1483
7ae457c1 1484 prog->len = fprog->len;
1da177e4 1485
7ae457c1 1486 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1487 if (err) {
c0d1379a 1488 __bpf_prog_free(prog);
538950a1 1489 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1490 }
1491
7ae457c1 1492 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1493 * memory in case something goes wrong.
1494 */
538950a1
CG
1495 return bpf_prepare_filter(prog, NULL);
1496}
1497
1498/**
1499 * sk_attach_filter - attach a socket filter
1500 * @fprog: the filter program
1501 * @sk: the socket to use
1502 *
1503 * Attach the user's filter code. We first run some sanity checks on
1504 * it to make sure it does not explode on us later. If an error
1505 * occurs or there is insufficient memory for the filter a negative
1506 * errno code is returned. On success the return is zero.
1507 */
8ced425e 1508int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
538950a1
CG
1509{
1510 struct bpf_prog *prog = __get_filter(fprog, sk);
1511 int err;
1512
7ae457c1
AS
1513 if (IS_ERR(prog))
1514 return PTR_ERR(prog);
1515
8ced425e 1516 err = __sk_attach_prog(prog, sk);
49b31e57 1517 if (err < 0) {
7ae457c1 1518 __bpf_prog_release(prog);
49b31e57 1519 return err;
278571ba
AS
1520 }
1521
d3904b73 1522 return 0;
1da177e4 1523}
8ced425e 1524EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1525
538950a1 1526int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1527{
538950a1 1528 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1529 int err;
89aa0758 1530
538950a1
CG
1531 if (IS_ERR(prog))
1532 return PTR_ERR(prog);
1533
8217ca65
MKL
1534 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1535 err = -ENOMEM;
1536 else
1537 err = reuseport_attach_prog(sk, prog);
1538
1539 if (err)
538950a1 1540 __bpf_prog_release(prog);
538950a1 1541
8217ca65 1542 return err;
538950a1
CG
1543}
1544
1545static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1546{
89aa0758 1547 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1548 return ERR_PTR(-EPERM);
89aa0758 1549
113214be 1550 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
538950a1
CG
1551}
1552
1553int sk_attach_bpf(u32 ufd, struct sock *sk)
1554{
1555 struct bpf_prog *prog = __get_bpf(ufd, sk);
1556 int err;
1557
1558 if (IS_ERR(prog))
1559 return PTR_ERR(prog);
1560
8ced425e 1561 err = __sk_attach_prog(prog, sk);
49b31e57 1562 if (err < 0) {
89aa0758 1563 bpf_prog_put(prog);
49b31e57 1564 return err;
89aa0758
AS
1565 }
1566
89aa0758
AS
1567 return 0;
1568}
1569
538950a1
CG
1570int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1571{
8217ca65 1572 struct bpf_prog *prog;
538950a1
CG
1573 int err;
1574
8217ca65
MKL
1575 if (sock_flag(sk, SOCK_FILTER_LOCKED))
1576 return -EPERM;
1577
1578 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1579 if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
1580 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
538950a1
CG
1581 if (IS_ERR(prog))
1582 return PTR_ERR(prog);
1583
8217ca65
MKL
1584 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1585 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1586 * bpf prog (e.g. sockmap). It depends on the
1587 * limitation imposed by bpf_prog_load().
1588 * Hence, sysctl_optmem_max is not checked.
1589 */
1590 if ((sk->sk_type != SOCK_STREAM &&
1591 sk->sk_type != SOCK_DGRAM) ||
1592 (sk->sk_protocol != IPPROTO_UDP &&
1593 sk->sk_protocol != IPPROTO_TCP) ||
1594 (sk->sk_family != AF_INET &&
1595 sk->sk_family != AF_INET6)) {
1596 err = -ENOTSUPP;
1597 goto err_prog_put;
1598 }
1599 } else {
1600 /* BPF_PROG_TYPE_SOCKET_FILTER */
1601 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1602 err = -ENOMEM;
1603 goto err_prog_put;
1604 }
538950a1
CG
1605 }
1606
8217ca65
MKL
1607 err = reuseport_attach_prog(sk, prog);
1608err_prog_put:
1609 if (err)
1610 bpf_prog_put(prog);
1611
1612 return err;
1613}
1614
1615void sk_reuseport_prog_free(struct bpf_prog *prog)
1616{
1617 if (!prog)
1618 return;
1619
1620 if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1621 bpf_prog_put(prog);
1622 else
1623 bpf_prog_destroy(prog);
538950a1
CG
1624}
1625
21cafc1d
DB
1626struct bpf_scratchpad {
1627 union {
1628 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1629 u8 buff[MAX_BPF_STACK];
1630 };
1631};
1632
1633static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822 1634
5293efe6
DB
1635static inline int __bpf_try_make_writable(struct sk_buff *skb,
1636 unsigned int write_len)
1637{
1638 return skb_ensure_writable(skb, write_len);
1639}
1640
db58ba45
AS
1641static inline int bpf_try_make_writable(struct sk_buff *skb,
1642 unsigned int write_len)
1643{
5293efe6 1644 int err = __bpf_try_make_writable(skb, write_len);
db58ba45 1645
6aaae2b6 1646 bpf_compute_data_pointers(skb);
db58ba45
AS
1647 return err;
1648}
1649
36bbef52
DB
1650static int bpf_try_make_head_writable(struct sk_buff *skb)
1651{
1652 return bpf_try_make_writable(skb, skb_headlen(skb));
1653}
1654
a2bfe6bf
DB
1655static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1656{
1657 if (skb_at_tc_ingress(skb))
1658 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1659}
1660
8065694e
DB
1661static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1662{
1663 if (skb_at_tc_ingress(skb))
1664 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1665}
1666
f3694e00
DB
1667BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1668 const void *, from, u32, len, u64, flags)
608cd71a 1669{
608cd71a
AS
1670 void *ptr;
1671
8afd54c8 1672 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
781c53bc 1673 return -EINVAL;
0ed661d5 1674 if (unlikely(offset > 0xffff))
608cd71a 1675 return -EFAULT;
db58ba45 1676 if (unlikely(bpf_try_make_writable(skb, offset + len)))
608cd71a
AS
1677 return -EFAULT;
1678
0ed661d5 1679 ptr = skb->data + offset;
781c53bc 1680 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1681 __skb_postpull_rcsum(skb, ptr, len, offset);
608cd71a
AS
1682
1683 memcpy(ptr, from, len);
1684
781c53bc 1685 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1686 __skb_postpush_rcsum(skb, ptr, len, offset);
8afd54c8
DB
1687 if (flags & BPF_F_INVALIDATE_HASH)
1688 skb_clear_hash(skb);
f8ffad69 1689
608cd71a
AS
1690 return 0;
1691}
1692
577c50aa 1693static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
608cd71a
AS
1694 .func = bpf_skb_store_bytes,
1695 .gpl_only = false,
1696 .ret_type = RET_INTEGER,
1697 .arg1_type = ARG_PTR_TO_CTX,
1698 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1699 .arg3_type = ARG_PTR_TO_MEM,
1700 .arg4_type = ARG_CONST_SIZE,
91bc4822
AS
1701 .arg5_type = ARG_ANYTHING,
1702};
1703
f3694e00
DB
1704BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1705 void *, to, u32, len)
05c74e5e 1706{
05c74e5e
DB
1707 void *ptr;
1708
0ed661d5 1709 if (unlikely(offset > 0xffff))
074f528e 1710 goto err_clear;
05c74e5e
DB
1711
1712 ptr = skb_header_pointer(skb, offset, len, to);
1713 if (unlikely(!ptr))
074f528e 1714 goto err_clear;
05c74e5e
DB
1715 if (ptr != to)
1716 memcpy(to, ptr, len);
1717
1718 return 0;
074f528e
DB
1719err_clear:
1720 memset(to, 0, len);
1721 return -EFAULT;
05c74e5e
DB
1722}
1723
577c50aa 1724static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
05c74e5e
DB
1725 .func = bpf_skb_load_bytes,
1726 .gpl_only = false,
1727 .ret_type = RET_INTEGER,
1728 .arg1_type = ARG_PTR_TO_CTX,
1729 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1730 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1731 .arg4_type = ARG_CONST_SIZE,
05c74e5e
DB
1732};
1733
089b19a9
SF
1734BPF_CALL_4(bpf_flow_dissector_load_bytes,
1735 const struct bpf_flow_dissector *, ctx, u32, offset,
1736 void *, to, u32, len)
1737{
1738 void *ptr;
1739
1740 if (unlikely(offset > 0xffff))
1741 goto err_clear;
1742
1743 if (unlikely(!ctx->skb))
1744 goto err_clear;
1745
1746 ptr = skb_header_pointer(ctx->skb, offset, len, to);
1747 if (unlikely(!ptr))
1748 goto err_clear;
1749 if (ptr != to)
1750 memcpy(to, ptr, len);
1751
1752 return 0;
1753err_clear:
1754 memset(to, 0, len);
1755 return -EFAULT;
1756}
1757
1758static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1759 .func = bpf_flow_dissector_load_bytes,
1760 .gpl_only = false,
1761 .ret_type = RET_INTEGER,
1762 .arg1_type = ARG_PTR_TO_CTX,
1763 .arg2_type = ARG_ANYTHING,
1764 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1765 .arg4_type = ARG_CONST_SIZE,
1766};
1767
4e1ec56c
DB
1768BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1769 u32, offset, void *, to, u32, len, u32, start_header)
1770{
3eee1f75
DB
1771 u8 *end = skb_tail_pointer(skb);
1772 u8 *net = skb_network_header(skb);
1773 u8 *mac = skb_mac_header(skb);
4e1ec56c
DB
1774 u8 *ptr;
1775
3eee1f75 1776 if (unlikely(offset > 0xffff || len > (end - mac)))
4e1ec56c
DB
1777 goto err_clear;
1778
1779 switch (start_header) {
1780 case BPF_HDR_START_MAC:
3eee1f75 1781 ptr = mac + offset;
4e1ec56c
DB
1782 break;
1783 case BPF_HDR_START_NET:
3eee1f75 1784 ptr = net + offset;
4e1ec56c
DB
1785 break;
1786 default:
1787 goto err_clear;
1788 }
1789
3eee1f75 1790 if (likely(ptr >= mac && ptr + len <= end)) {
4e1ec56c
DB
1791 memcpy(to, ptr, len);
1792 return 0;
1793 }
1794
1795err_clear:
1796 memset(to, 0, len);
1797 return -EFAULT;
1798}
1799
1800static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1801 .func = bpf_skb_load_bytes_relative,
1802 .gpl_only = false,
1803 .ret_type = RET_INTEGER,
1804 .arg1_type = ARG_PTR_TO_CTX,
1805 .arg2_type = ARG_ANYTHING,
1806 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1807 .arg4_type = ARG_CONST_SIZE,
1808 .arg5_type = ARG_ANYTHING,
1809};
1810
36bbef52
DB
1811BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1812{
1813 /* Idea is the following: should the needed direct read/write
1814 * test fail during runtime, we can pull in more data and redo
1815 * again, since implicitly, we invalidate previous checks here.
1816 *
1817 * Or, since we know how much we need to make read/writeable,
1818 * this can be done once at the program beginning for direct
1819 * access case. By this we overcome limitations of only current
1820 * headroom being accessible.
1821 */
1822 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1823}
1824
1825static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1826 .func = bpf_skb_pull_data,
1827 .gpl_only = false,
1828 .ret_type = RET_INTEGER,
1829 .arg1_type = ARG_PTR_TO_CTX,
1830 .arg2_type = ARG_ANYTHING,
1831};
1832
46f8bc92
MKL
1833BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1834{
46f8bc92
MKL
1835 return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1836}
1837
1838static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1839 .func = bpf_sk_fullsock,
1840 .gpl_only = false,
1841 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
1842 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
1843};
1844
0ea488ff
JF
1845static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1846 unsigned int write_len)
1847{
1848 int err = __bpf_try_make_writable(skb, write_len);
1849
1850 bpf_compute_data_end_sk_skb(skb);
1851 return err;
1852}
1853
1854BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1855{
1856 /* Idea is the following: should the needed direct read/write
1857 * test fail during runtime, we can pull in more data and redo
1858 * again, since implicitly, we invalidate previous checks here.
1859 *
1860 * Or, since we know how much we need to make read/writeable,
1861 * this can be done once at the program beginning for direct
1862 * access case. By this we overcome limitations of only current
1863 * headroom being accessible.
1864 */
1865 return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1866}
1867
1868static const struct bpf_func_proto sk_skb_pull_data_proto = {
1869 .func = sk_skb_pull_data,
1870 .gpl_only = false,
1871 .ret_type = RET_INTEGER,
1872 .arg1_type = ARG_PTR_TO_CTX,
1873 .arg2_type = ARG_ANYTHING,
1874};
1875
f3694e00
DB
1876BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1877 u64, from, u64, to, u64, flags)
91bc4822 1878{
0ed661d5 1879 __sum16 *ptr;
91bc4822 1880
781c53bc
DB
1881 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1882 return -EINVAL;
0ed661d5 1883 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1884 return -EFAULT;
0ed661d5 1885 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1886 return -EFAULT;
1887
0ed661d5 1888 ptr = (__sum16 *)(skb->data + offset);
781c53bc 1889 switch (flags & BPF_F_HDR_FIELD_MASK) {
8050c0f0
DB
1890 case 0:
1891 if (unlikely(from != 0))
1892 return -EINVAL;
1893
1894 csum_replace_by_diff(ptr, to);
1895 break;
91bc4822
AS
1896 case 2:
1897 csum_replace2(ptr, from, to);
1898 break;
1899 case 4:
1900 csum_replace4(ptr, from, to);
1901 break;
1902 default:
1903 return -EINVAL;
1904 }
1905
91bc4822
AS
1906 return 0;
1907}
1908
577c50aa 1909static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
91bc4822
AS
1910 .func = bpf_l3_csum_replace,
1911 .gpl_only = false,
1912 .ret_type = RET_INTEGER,
1913 .arg1_type = ARG_PTR_TO_CTX,
1914 .arg2_type = ARG_ANYTHING,
1915 .arg3_type = ARG_ANYTHING,
1916 .arg4_type = ARG_ANYTHING,
1917 .arg5_type = ARG_ANYTHING,
1918};
1919
f3694e00
DB
1920BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1921 u64, from, u64, to, u64, flags)
91bc4822 1922{
781c53bc 1923 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
2f72959a 1924 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
d1b662ad 1925 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
0ed661d5 1926 __sum16 *ptr;
91bc4822 1927
d1b662ad
DB
1928 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1929 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
781c53bc 1930 return -EINVAL;
0ed661d5 1931 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1932 return -EFAULT;
0ed661d5 1933 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1934 return -EFAULT;
1935
0ed661d5 1936 ptr = (__sum16 *)(skb->data + offset);
d1b662ad 1937 if (is_mmzero && !do_mforce && !*ptr)
2f72959a 1938 return 0;
91bc4822 1939
781c53bc 1940 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1941 case 0:
1942 if (unlikely(from != 0))
1943 return -EINVAL;
1944
1945 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1946 break;
91bc4822
AS
1947 case 2:
1948 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1949 break;
1950 case 4:
1951 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1952 break;
1953 default:
1954 return -EINVAL;
1955 }
1956
2f72959a
DB
1957 if (is_mmzero && !*ptr)
1958 *ptr = CSUM_MANGLED_0;
91bc4822
AS
1959 return 0;
1960}
1961
577c50aa 1962static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
91bc4822
AS
1963 .func = bpf_l4_csum_replace,
1964 .gpl_only = false,
1965 .ret_type = RET_INTEGER,
1966 .arg1_type = ARG_PTR_TO_CTX,
1967 .arg2_type = ARG_ANYTHING,
1968 .arg3_type = ARG_ANYTHING,
1969 .arg4_type = ARG_ANYTHING,
1970 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1971};
1972
f3694e00
DB
1973BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1974 __be32 *, to, u32, to_size, __wsum, seed)
7d672345 1975{
21cafc1d 1976 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
f3694e00 1977 u32 diff_size = from_size + to_size;
7d672345
DB
1978 int i, j = 0;
1979
1980 /* This is quite flexible, some examples:
1981 *
1982 * from_size == 0, to_size > 0, seed := csum --> pushing data
1983 * from_size > 0, to_size == 0, seed := csum --> pulling data
1984 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1985 *
1986 * Even for diffing, from_size and to_size don't need to be equal.
1987 */
1988 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1989 diff_size > sizeof(sp->diff)))
1990 return -EINVAL;
1991
1992 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1993 sp->diff[j] = ~from[i];
1994 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1995 sp->diff[j] = to[i];
1996
1997 return csum_partial(sp->diff, diff_size, seed);
1998}
1999
577c50aa 2000static const struct bpf_func_proto bpf_csum_diff_proto = {
7d672345
DB
2001 .func = bpf_csum_diff,
2002 .gpl_only = false,
36bbef52 2003 .pkt_access = true,
7d672345 2004 .ret_type = RET_INTEGER,
db1ac496 2005 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 2006 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
db1ac496 2007 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 2008 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7d672345
DB
2009 .arg5_type = ARG_ANYTHING,
2010};
2011
36bbef52
DB
2012BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2013{
2014 /* The interface is to be used in combination with bpf_csum_diff()
2015 * for direct packet writes. csum rotation for alignment as well
2016 * as emulating csum_sub() can be done from the eBPF program.
2017 */
2018 if (skb->ip_summed == CHECKSUM_COMPLETE)
2019 return (skb->csum = csum_add(skb->csum, csum));
2020
2021 return -ENOTSUPP;
2022}
2023
2024static const struct bpf_func_proto bpf_csum_update_proto = {
2025 .func = bpf_csum_update,
2026 .gpl_only = false,
2027 .ret_type = RET_INTEGER,
2028 .arg1_type = ARG_PTR_TO_CTX,
2029 .arg2_type = ARG_ANYTHING,
2030};
2031
a70b506e
DB
2032static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2033{
a70b506e
DB
2034 return dev_forward_skb(dev, skb);
2035}
2036
4e3264d2
MKL
2037static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2038 struct sk_buff *skb)
2039{
2040 int ret = ____dev_forward_skb(dev, skb);
2041
2042 if (likely(!ret)) {
2043 skb->dev = dev;
2044 ret = netif_rx(skb);
2045 }
2046
2047 return ret;
2048}
2049
a70b506e
DB
2050static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2051{
2052 int ret;
2053
97cdcf37 2054 if (dev_xmit_recursion()) {
a70b506e
DB
2055 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2056 kfree_skb(skb);
2057 return -ENETDOWN;
2058 }
2059
2060 skb->dev = dev;
2061
97cdcf37 2062 dev_xmit_recursion_inc();
a70b506e 2063 ret = dev_queue_xmit(skb);
97cdcf37 2064 dev_xmit_recursion_dec();
a70b506e
DB
2065
2066 return ret;
2067}
2068
4e3264d2
MKL
2069static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2070 u32 flags)
2071{
e7c87bd6 2072 unsigned int mlen = skb_network_offset(skb);
4e3264d2 2073
e7c87bd6
WB
2074 if (mlen) {
2075 __skb_pull(skb, mlen);
4e3264d2 2076
e7c87bd6
WB
2077 /* At ingress, the mac header has already been pulled once.
2078 * At egress, skb_pospull_rcsum has to be done in case that
2079 * the skb is originated from ingress (i.e. a forwarded skb)
2080 * to ensure that rcsum starts at net header.
2081 */
2082 if (!skb_at_tc_ingress(skb))
2083 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2084 }
4e3264d2
MKL
2085 skb_pop_mac_header(skb);
2086 skb_reset_mac_len(skb);
2087 return flags & BPF_F_INGRESS ?
2088 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2089}
2090
2091static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2092 u32 flags)
2093{
3a0af8fd
TG
2094 /* Verify that a link layer header is carried */
2095 if (unlikely(skb->mac_header >= skb->network_header)) {
2096 kfree_skb(skb);
2097 return -ERANGE;
2098 }
2099
4e3264d2
MKL
2100 bpf_push_mac_rcsum(skb);
2101 return flags & BPF_F_INGRESS ?
2102 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2103}
2104
2105static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2106 u32 flags)
2107{
c491680f 2108 if (dev_is_mac_header_xmit(dev))
4e3264d2 2109 return __bpf_redirect_common(skb, dev, flags);
c491680f
DB
2110 else
2111 return __bpf_redirect_no_mac(skb, dev, flags);
4e3264d2
MKL
2112}
2113
f3694e00 2114BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
3896d655 2115{
3896d655 2116 struct net_device *dev;
36bbef52
DB
2117 struct sk_buff *clone;
2118 int ret;
3896d655 2119
781c53bc
DB
2120 if (unlikely(flags & ~(BPF_F_INGRESS)))
2121 return -EINVAL;
2122
3896d655
AS
2123 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2124 if (unlikely(!dev))
2125 return -EINVAL;
2126
36bbef52
DB
2127 clone = skb_clone(skb, GFP_ATOMIC);
2128 if (unlikely(!clone))
3896d655
AS
2129 return -ENOMEM;
2130
36bbef52
DB
2131 /* For direct write, we need to keep the invariant that the skbs
2132 * we're dealing with need to be uncloned. Should uncloning fail
2133 * here, we need to free the just generated clone to unclone once
2134 * again.
2135 */
2136 ret = bpf_try_make_head_writable(skb);
2137 if (unlikely(ret)) {
2138 kfree_skb(clone);
2139 return -ENOMEM;
2140 }
2141
4e3264d2 2142 return __bpf_redirect(clone, dev, flags);
3896d655
AS
2143}
2144
577c50aa 2145static const struct bpf_func_proto bpf_clone_redirect_proto = {
3896d655
AS
2146 .func = bpf_clone_redirect,
2147 .gpl_only = false,
2148 .ret_type = RET_INTEGER,
2149 .arg1_type = ARG_PTR_TO_CTX,
2150 .arg2_type = ARG_ANYTHING,
2151 .arg3_type = ARG_ANYTHING,
2152};
2153
0b19cc0a
TM
2154DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2155EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
781c53bc 2156
f3694e00 2157BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
27b29f63 2158{
0b19cc0a 2159 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
27b29f63 2160
781c53bc
DB
2161 if (unlikely(flags & ~(BPF_F_INGRESS)))
2162 return TC_ACT_SHOT;
2163
27b29f63
AS
2164 ri->ifindex = ifindex;
2165 ri->flags = flags;
781c53bc 2166
27b29f63
AS
2167 return TC_ACT_REDIRECT;
2168}
2169
2170int skb_do_redirect(struct sk_buff *skb)
2171{
0b19cc0a 2172 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
27b29f63
AS
2173 struct net_device *dev;
2174
2175 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
2176 ri->ifindex = 0;
2177 if (unlikely(!dev)) {
2178 kfree_skb(skb);
2179 return -EINVAL;
2180 }
2181
4e3264d2 2182 return __bpf_redirect(skb, dev, ri->flags);
27b29f63
AS
2183}
2184
577c50aa 2185static const struct bpf_func_proto bpf_redirect_proto = {
27b29f63
AS
2186 .func = bpf_redirect,
2187 .gpl_only = false,
2188 .ret_type = RET_INTEGER,
2189 .arg1_type = ARG_ANYTHING,
2190 .arg2_type = ARG_ANYTHING,
2191};
2192
604326b4 2193BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2a100317
JF
2194{
2195 msg->apply_bytes = bytes;
2196 return 0;
2197}
2198
2199static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2200 .func = bpf_msg_apply_bytes,
2201 .gpl_only = false,
2202 .ret_type = RET_INTEGER,
2203 .arg1_type = ARG_PTR_TO_CTX,
2204 .arg2_type = ARG_ANYTHING,
2205};
2206
604326b4 2207BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
91843d54
JF
2208{
2209 msg->cork_bytes = bytes;
2210 return 0;
2211}
2212
2213static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2214 .func = bpf_msg_cork_bytes,
2215 .gpl_only = false,
2216 .ret_type = RET_INTEGER,
2217 .arg1_type = ARG_PTR_TO_CTX,
2218 .arg2_type = ARG_ANYTHING,
2219};
2220
604326b4
DB
2221BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2222 u32, end, u64, flags)
015632bb 2223{
604326b4
DB
2224 u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2225 u32 first_sge, last_sge, i, shift, bytes_sg_total;
2226 struct scatterlist *sge;
2227 u8 *raw, *to, *from;
015632bb
JF
2228 struct page *page;
2229
2230 if (unlikely(flags || end <= start))
2231 return -EINVAL;
2232
2233 /* First find the starting scatterlist element */
604326b4 2234 i = msg->sg.start;
015632bb 2235 do {
604326b4 2236 len = sk_msg_elem(msg, i)->length;
015632bb
JF
2237 if (start < offset + len)
2238 break;
5b24109b 2239 offset += len;
604326b4
DB
2240 sk_msg_iter_var_next(i);
2241 } while (i != msg->sg.end);
015632bb
JF
2242
2243 if (unlikely(start >= offset + len))
2244 return -EINVAL;
2245
604326b4 2246 first_sge = i;
5b24109b
DB
2247 /* The start may point into the sg element so we need to also
2248 * account for the headroom.
2249 */
2250 bytes_sg_total = start - offset + bytes;
604326b4 2251 if (!msg->sg.copy[i] && bytes_sg_total <= len)
015632bb 2252 goto out;
015632bb
JF
2253
2254 /* At this point we need to linearize multiple scatterlist
2255 * elements or a single shared page. Either way we need to
2256 * copy into a linear buffer exclusively owned by BPF. Then
2257 * place the buffer in the scatterlist and fixup the original
2258 * entries by removing the entries now in the linear buffer
2259 * and shifting the remaining entries. For now we do not try
2260 * to copy partial entries to avoid complexity of running out
2261 * of sg_entry slots. The downside is reading a single byte
2262 * will copy the entire sg entry.
2263 */
2264 do {
604326b4
DB
2265 copy += sk_msg_elem(msg, i)->length;
2266 sk_msg_iter_var_next(i);
5b24109b 2267 if (bytes_sg_total <= copy)
015632bb 2268 break;
604326b4
DB
2269 } while (i != msg->sg.end);
2270 last_sge = i;
015632bb 2271
5b24109b 2272 if (unlikely(bytes_sg_total > copy))
015632bb
JF
2273 return -EINVAL;
2274
4c3d795c
TD
2275 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2276 get_order(copy));
015632bb
JF
2277 if (unlikely(!page))
2278 return -ENOMEM;
015632bb 2279
604326b4
DB
2280 raw = page_address(page);
2281 i = first_sge;
015632bb 2282 do {
604326b4
DB
2283 sge = sk_msg_elem(msg, i);
2284 from = sg_virt(sge);
2285 len = sge->length;
2286 to = raw + poffset;
015632bb
JF
2287
2288 memcpy(to, from, len);
9db39f4d 2289 poffset += len;
604326b4
DB
2290 sge->length = 0;
2291 put_page(sg_page(sge));
015632bb 2292
604326b4
DB
2293 sk_msg_iter_var_next(i);
2294 } while (i != last_sge);
015632bb 2295
604326b4 2296 sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
015632bb
JF
2297
2298 /* To repair sg ring we need to shift entries. If we only
2299 * had a single entry though we can just replace it and
2300 * be done. Otherwise walk the ring and shift the entries.
2301 */
604326b4
DB
2302 WARN_ON_ONCE(last_sge == first_sge);
2303 shift = last_sge > first_sge ?
2304 last_sge - first_sge - 1 :
2305 MAX_SKB_FRAGS - first_sge + last_sge - 1;
015632bb
JF
2306 if (!shift)
2307 goto out;
2308
604326b4
DB
2309 i = first_sge;
2310 sk_msg_iter_var_next(i);
015632bb 2311 do {
604326b4 2312 u32 move_from;
015632bb 2313
604326b4
DB
2314 if (i + shift >= MAX_MSG_FRAGS)
2315 move_from = i + shift - MAX_MSG_FRAGS;
015632bb
JF
2316 else
2317 move_from = i + shift;
604326b4 2318 if (move_from == msg->sg.end)
015632bb
JF
2319 break;
2320
604326b4
DB
2321 msg->sg.data[i] = msg->sg.data[move_from];
2322 msg->sg.data[move_from].length = 0;
2323 msg->sg.data[move_from].page_link = 0;
2324 msg->sg.data[move_from].offset = 0;
2325 sk_msg_iter_var_next(i);
015632bb 2326 } while (1);
604326b4
DB
2327
2328 msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2329 msg->sg.end - shift + MAX_MSG_FRAGS :
2330 msg->sg.end - shift;
015632bb 2331out:
604326b4 2332 msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
015632bb 2333 msg->data_end = msg->data + bytes;
015632bb
JF
2334 return 0;
2335}
2336
2337static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2338 .func = bpf_msg_pull_data,
2339 .gpl_only = false,
2340 .ret_type = RET_INTEGER,
2341 .arg1_type = ARG_PTR_TO_CTX,
2342 .arg2_type = ARG_ANYTHING,
2343 .arg3_type = ARG_ANYTHING,
2344 .arg4_type = ARG_ANYTHING,
2345};
2346
6fff607e
JF
2347BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2348 u32, len, u64, flags)
2349{
2350 struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2351 u32 new, i = 0, l, space, copy = 0, offset = 0;
2352 u8 *raw, *to, *from;
2353 struct page *page;
2354
2355 if (unlikely(flags))
2356 return -EINVAL;
2357
2358 /* First find the starting scatterlist element */
2359 i = msg->sg.start;
2360 do {
2361 l = sk_msg_elem(msg, i)->length;
2362
2363 if (start < offset + l)
2364 break;
2365 offset += l;
2366 sk_msg_iter_var_next(i);
2367 } while (i != msg->sg.end);
2368
2369 if (start >= offset + l)
2370 return -EINVAL;
2371
2372 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2373
2374 /* If no space available will fallback to copy, we need at
2375 * least one scatterlist elem available to push data into
2376 * when start aligns to the beginning of an element or two
2377 * when it falls inside an element. We handle the start equals
2378 * offset case because its the common case for inserting a
2379 * header.
2380 */
2381 if (!space || (space == 1 && start != offset))
2382 copy = msg->sg.data[i].length;
2383
2384 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2385 get_order(copy + len));
2386 if (unlikely(!page))
2387 return -ENOMEM;
2388
2389 if (copy) {
2390 int front, back;
2391
2392 raw = page_address(page);
2393
2394 psge = sk_msg_elem(msg, i);
2395 front = start - offset;
2396 back = psge->length - front;
2397 from = sg_virt(psge);
2398
2399 if (front)
2400 memcpy(raw, from, front);
2401
2402 if (back) {
2403 from += front;
2404 to = raw + front + len;
2405
2406 memcpy(to, from, back);
2407 }
2408
2409 put_page(sg_page(psge));
2410 } else if (start - offset) {
2411 psge = sk_msg_elem(msg, i);
2412 rsge = sk_msg_elem_cpy(msg, i);
2413
2414 psge->length = start - offset;
2415 rsge.length -= psge->length;
2416 rsge.offset += start;
2417
2418 sk_msg_iter_var_next(i);
2419 sg_unmark_end(psge);
2420 sk_msg_iter_next(msg, end);
2421 }
2422
2423 /* Slot(s) to place newly allocated data */
2424 new = i;
2425
2426 /* Shift one or two slots as needed */
2427 if (!copy) {
2428 sge = sk_msg_elem_cpy(msg, i);
2429
2430 sk_msg_iter_var_next(i);
2431 sg_unmark_end(&sge);
2432 sk_msg_iter_next(msg, end);
2433
2434 nsge = sk_msg_elem_cpy(msg, i);
2435 if (rsge.length) {
2436 sk_msg_iter_var_next(i);
2437 nnsge = sk_msg_elem_cpy(msg, i);
2438 }
2439
2440 while (i != msg->sg.end) {
2441 msg->sg.data[i] = sge;
2442 sge = nsge;
2443 sk_msg_iter_var_next(i);
2444 if (rsge.length) {
2445 nsge = nnsge;
2446 nnsge = sk_msg_elem_cpy(msg, i);
2447 } else {
2448 nsge = sk_msg_elem_cpy(msg, i);
2449 }
2450 }
2451 }
2452
2453 /* Place newly allocated data buffer */
2454 sk_mem_charge(msg->sk, len);
2455 msg->sg.size += len;
2456 msg->sg.copy[new] = false;
2457 sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2458 if (rsge.length) {
2459 get_page(sg_page(&rsge));
2460 sk_msg_iter_var_next(new);
2461 msg->sg.data[new] = rsge;
2462 }
2463
2464 sk_msg_compute_data_pointers(msg);
2465 return 0;
2466}
2467
2468static const struct bpf_func_proto bpf_msg_push_data_proto = {
2469 .func = bpf_msg_push_data,
2470 .gpl_only = false,
2471 .ret_type = RET_INTEGER,
2472 .arg1_type = ARG_PTR_TO_CTX,
2473 .arg2_type = ARG_ANYTHING,
2474 .arg3_type = ARG_ANYTHING,
2475 .arg4_type = ARG_ANYTHING,
2476};
2477
7246d8ed
JF
2478static void sk_msg_shift_left(struct sk_msg *msg, int i)
2479{
2480 int prev;
2481
2482 do {
2483 prev = i;
2484 sk_msg_iter_var_next(i);
2485 msg->sg.data[prev] = msg->sg.data[i];
2486 } while (i != msg->sg.end);
2487
2488 sk_msg_iter_prev(msg, end);
2489}
2490
2491static void sk_msg_shift_right(struct sk_msg *msg, int i)
2492{
2493 struct scatterlist tmp, sge;
2494
2495 sk_msg_iter_next(msg, end);
2496 sge = sk_msg_elem_cpy(msg, i);
2497 sk_msg_iter_var_next(i);
2498 tmp = sk_msg_elem_cpy(msg, i);
2499
2500 while (i != msg->sg.end) {
2501 msg->sg.data[i] = sge;
2502 sk_msg_iter_var_next(i);
2503 sge = tmp;
2504 tmp = sk_msg_elem_cpy(msg, i);
2505 }
2506}
2507
2508BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2509 u32, len, u64, flags)
2510{
2511 u32 i = 0, l, space, offset = 0;
2512 u64 last = start + len;
2513 int pop;
2514
2515 if (unlikely(flags))
2516 return -EINVAL;
2517
2518 /* First find the starting scatterlist element */
2519 i = msg->sg.start;
2520 do {
2521 l = sk_msg_elem(msg, i)->length;
2522
2523 if (start < offset + l)
2524 break;
2525 offset += l;
2526 sk_msg_iter_var_next(i);
2527 } while (i != msg->sg.end);
2528
2529 /* Bounds checks: start and pop must be inside message */
2530 if (start >= offset + l || last >= msg->sg.size)
2531 return -EINVAL;
2532
2533 space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2534
2535 pop = len;
2536 /* --------------| offset
2537 * -| start |-------- len -------|
2538 *
2539 * |----- a ----|-------- pop -------|----- b ----|
2540 * |______________________________________________| length
2541 *
2542 *
2543 * a: region at front of scatter element to save
2544 * b: region at back of scatter element to save when length > A + pop
2545 * pop: region to pop from element, same as input 'pop' here will be
2546 * decremented below per iteration.
2547 *
2548 * Two top-level cases to handle when start != offset, first B is non
2549 * zero and second B is zero corresponding to when a pop includes more
2550 * than one element.
2551 *
2552 * Then if B is non-zero AND there is no space allocate space and
2553 * compact A, B regions into page. If there is space shift ring to
2554 * the rigth free'ing the next element in ring to place B, leaving
2555 * A untouched except to reduce length.
2556 */
2557 if (start != offset) {
2558 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2559 int a = start;
2560 int b = sge->length - pop - a;
2561
2562 sk_msg_iter_var_next(i);
2563
2564 if (pop < sge->length - a) {
2565 if (space) {
2566 sge->length = a;
2567 sk_msg_shift_right(msg, i);
2568 nsge = sk_msg_elem(msg, i);
2569 get_page(sg_page(sge));
2570 sg_set_page(nsge,
2571 sg_page(sge),
2572 b, sge->offset + pop + a);
2573 } else {
2574 struct page *page, *orig;
2575 u8 *to, *from;
2576
2577 page = alloc_pages(__GFP_NOWARN |
2578 __GFP_COMP | GFP_ATOMIC,
2579 get_order(a + b));
2580 if (unlikely(!page))
2581 return -ENOMEM;
2582
2583 sge->length = a;
2584 orig = sg_page(sge);
2585 from = sg_virt(sge);
2586 to = page_address(page);
2587 memcpy(to, from, a);
2588 memcpy(to + a, from + a + pop, b);
2589 sg_set_page(sge, page, a + b, 0);
2590 put_page(orig);
2591 }
2592 pop = 0;
2593 } else if (pop >= sge->length - a) {
2594 sge->length = a;
2595 pop -= (sge->length - a);
2596 }
2597 }
2598
2599 /* From above the current layout _must_ be as follows,
2600 *
2601 * -| offset
2602 * -| start
2603 *
2604 * |---- pop ---|---------------- b ------------|
2605 * |____________________________________________| length
2606 *
2607 * Offset and start of the current msg elem are equal because in the
2608 * previous case we handled offset != start and either consumed the
2609 * entire element and advanced to the next element OR pop == 0.
2610 *
2611 * Two cases to handle here are first pop is less than the length
2612 * leaving some remainder b above. Simply adjust the element's layout
2613 * in this case. Or pop >= length of the element so that b = 0. In this
2614 * case advance to next element decrementing pop.
2615 */
2616 while (pop) {
2617 struct scatterlist *sge = sk_msg_elem(msg, i);
2618
2619 if (pop < sge->length) {
2620 sge->length -= pop;
2621 sge->offset += pop;
2622 pop = 0;
2623 } else {
2624 pop -= sge->length;
2625 sk_msg_shift_left(msg, i);
2626 }
2627 sk_msg_iter_var_next(i);
2628 }
2629
2630 sk_mem_uncharge(msg->sk, len - pop);
2631 msg->sg.size -= (len - pop);
2632 sk_msg_compute_data_pointers(msg);
2633 return 0;
2634}
2635
2636static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2637 .func = bpf_msg_pop_data,
2638 .gpl_only = false,
2639 .ret_type = RET_INTEGER,
2640 .arg1_type = ARG_PTR_TO_CTX,
2641 .arg2_type = ARG_ANYTHING,
2642 .arg3_type = ARG_ANYTHING,
2643 .arg4_type = ARG_ANYTHING,
2644};
2645
f3694e00 2646BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
8d20aabe 2647{
f3694e00 2648 return task_get_classid(skb);
8d20aabe
DB
2649}
2650
2651static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2652 .func = bpf_get_cgroup_classid,
2653 .gpl_only = false,
2654 .ret_type = RET_INTEGER,
2655 .arg1_type = ARG_PTR_TO_CTX,
2656};
2657
f3694e00 2658BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
c46646d0 2659{
f3694e00 2660 return dst_tclassid(skb);
c46646d0
DB
2661}
2662
2663static const struct bpf_func_proto bpf_get_route_realm_proto = {
2664 .func = bpf_get_route_realm,
2665 .gpl_only = false,
2666 .ret_type = RET_INTEGER,
2667 .arg1_type = ARG_PTR_TO_CTX,
2668};
2669
f3694e00 2670BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
13c5c240
DB
2671{
2672 /* If skb_clear_hash() was called due to mangling, we can
2673 * trigger SW recalculation here. Later access to hash
2674 * can then use the inline skb->hash via context directly
2675 * instead of calling this helper again.
2676 */
f3694e00 2677 return skb_get_hash(skb);
13c5c240
DB
2678}
2679
2680static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2681 .func = bpf_get_hash_recalc,
2682 .gpl_only = false,
2683 .ret_type = RET_INTEGER,
2684 .arg1_type = ARG_PTR_TO_CTX,
2685};
2686
7a4b28c6
DB
2687BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2688{
2689 /* After all direct packet write, this can be used once for
2690 * triggering a lazy recalc on next skb_get_hash() invocation.
2691 */
2692 skb_clear_hash(skb);
2693 return 0;
2694}
2695
2696static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2697 .func = bpf_set_hash_invalid,
2698 .gpl_only = false,
2699 .ret_type = RET_INTEGER,
2700 .arg1_type = ARG_PTR_TO_CTX,
2701};
2702
ded092cd
DB
2703BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2704{
2705 /* Set user specified hash as L4(+), so that it gets returned
2706 * on skb_get_hash() call unless BPF prog later on triggers a
2707 * skb_clear_hash().
2708 */
2709 __skb_set_sw_hash(skb, hash, true);
2710 return 0;
2711}
2712
2713static const struct bpf_func_proto bpf_set_hash_proto = {
2714 .func = bpf_set_hash,
2715 .gpl_only = false,
2716 .ret_type = RET_INTEGER,
2717 .arg1_type = ARG_PTR_TO_CTX,
2718 .arg2_type = ARG_ANYTHING,
2719};
2720
f3694e00
DB
2721BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2722 u16, vlan_tci)
4e10df9a 2723{
db58ba45 2724 int ret;
4e10df9a
AS
2725
2726 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2727 vlan_proto != htons(ETH_P_8021AD)))
2728 vlan_proto = htons(ETH_P_8021Q);
2729
8065694e 2730 bpf_push_mac_rcsum(skb);
db58ba45 2731 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
8065694e
DB
2732 bpf_pull_mac_rcsum(skb);
2733
6aaae2b6 2734 bpf_compute_data_pointers(skb);
db58ba45 2735 return ret;
4e10df9a
AS
2736}
2737
93731ef0 2738static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
4e10df9a
AS
2739 .func = bpf_skb_vlan_push,
2740 .gpl_only = false,
2741 .ret_type = RET_INTEGER,
2742 .arg1_type = ARG_PTR_TO_CTX,
2743 .arg2_type = ARG_ANYTHING,
2744 .arg3_type = ARG_ANYTHING,
2745};
2746
f3694e00 2747BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
4e10df9a 2748{
db58ba45 2749 int ret;
4e10df9a 2750
8065694e 2751 bpf_push_mac_rcsum(skb);
db58ba45 2752 ret = skb_vlan_pop(skb);
8065694e
DB
2753 bpf_pull_mac_rcsum(skb);
2754
6aaae2b6 2755 bpf_compute_data_pointers(skb);
db58ba45 2756 return ret;
4e10df9a
AS
2757}
2758
93731ef0 2759static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
4e10df9a
AS
2760 .func = bpf_skb_vlan_pop,
2761 .gpl_only = false,
2762 .ret_type = RET_INTEGER,
2763 .arg1_type = ARG_PTR_TO_CTX,
2764};
2765
6578171a
DB
2766static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2767{
2768 /* Caller already did skb_cow() with len as headroom,
2769 * so no need to do it here.
2770 */
2771 skb_push(skb, len);
2772 memmove(skb->data, skb->data + len, off);
2773 memset(skb->data + off, 0, len);
2774
2775 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2776 * needed here as it does not change the skb->csum
2777 * result for checksum complete when summing over
2778 * zeroed blocks.
2779 */
2780 return 0;
2781}
2782
2783static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2784{
2785 /* skb_ensure_writable() is not needed here, as we're
2786 * already working on an uncloned skb.
2787 */
2788 if (unlikely(!pskb_may_pull(skb, off + len)))
2789 return -ENOMEM;
2790
2791 skb_postpull_rcsum(skb, skb->data + off, len);
2792 memmove(skb->data + len, skb->data, off);
2793 __skb_pull(skb, len);
2794
2795 return 0;
2796}
2797
2798static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2799{
2800 bool trans_same = skb->transport_header == skb->network_header;
2801 int ret;
2802
2803 /* There's no need for __skb_push()/__skb_pull() pair to
2804 * get to the start of the mac header as we're guaranteed
2805 * to always start from here under eBPF.
2806 */
2807 ret = bpf_skb_generic_push(skb, off, len);
2808 if (likely(!ret)) {
2809 skb->mac_header -= len;
2810 skb->network_header -= len;
2811 if (trans_same)
2812 skb->transport_header = skb->network_header;
2813 }
2814
2815 return ret;
2816}
2817
2818static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2819{
2820 bool trans_same = skb->transport_header == skb->network_header;
2821 int ret;
2822
2823 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2824 ret = bpf_skb_generic_pop(skb, off, len);
2825 if (likely(!ret)) {
2826 skb->mac_header += len;
2827 skb->network_header += len;
2828 if (trans_same)
2829 skb->transport_header = skb->network_header;
2830 }
2831
2832 return ret;
2833}
2834
2835static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2836{
2837 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2838 u32 off = skb_mac_header_len(skb);
6578171a
DB
2839 int ret;
2840
4c3024de 2841 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
d02f51cb
DA
2842 return -ENOTSUPP;
2843
6578171a
DB
2844 ret = skb_cow(skb, len_diff);
2845 if (unlikely(ret < 0))
2846 return ret;
2847
2848 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2849 if (unlikely(ret < 0))
2850 return ret;
2851
2852 if (skb_is_gso(skb)) {
d02f51cb
DA
2853 struct skb_shared_info *shinfo = skb_shinfo(skb);
2854
880388aa
DM
2855 /* SKB_GSO_TCPV4 needs to be changed into
2856 * SKB_GSO_TCPV6.
6578171a 2857 */
d02f51cb
DA
2858 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2859 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2860 shinfo->gso_type |= SKB_GSO_TCPV6;
6578171a
DB
2861 }
2862
2863 /* Due to IPv6 header, MSS needs to be downgraded. */
d02f51cb 2864 skb_decrease_gso_size(shinfo, len_diff);
6578171a 2865 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2866 shinfo->gso_type |= SKB_GSO_DODGY;
2867 shinfo->gso_segs = 0;
6578171a
DB
2868 }
2869
2870 skb->protocol = htons(ETH_P_IPV6);
2871 skb_clear_hash(skb);
2872
2873 return 0;
2874}
2875
2876static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2877{
2878 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2879 u32 off = skb_mac_header_len(skb);
6578171a
DB
2880 int ret;
2881
4c3024de 2882 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
d02f51cb
DA
2883 return -ENOTSUPP;
2884
6578171a
DB
2885 ret = skb_unclone(skb, GFP_ATOMIC);
2886 if (unlikely(ret < 0))
2887 return ret;
2888
2889 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2890 if (unlikely(ret < 0))
2891 return ret;
2892
2893 if (skb_is_gso(skb)) {
d02f51cb
DA
2894 struct skb_shared_info *shinfo = skb_shinfo(skb);
2895
880388aa
DM
2896 /* SKB_GSO_TCPV6 needs to be changed into
2897 * SKB_GSO_TCPV4.
6578171a 2898 */
d02f51cb
DA
2899 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2900 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2901 shinfo->gso_type |= SKB_GSO_TCPV4;
6578171a
DB
2902 }
2903
2904 /* Due to IPv4 header, MSS can be upgraded. */
d02f51cb 2905 skb_increase_gso_size(shinfo, len_diff);
6578171a 2906 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2907 shinfo->gso_type |= SKB_GSO_DODGY;
2908 shinfo->gso_segs = 0;
6578171a
DB
2909 }
2910
2911 skb->protocol = htons(ETH_P_IP);
2912 skb_clear_hash(skb);
2913
2914 return 0;
2915}
2916
2917static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2918{
2919 __be16 from_proto = skb->protocol;
2920
2921 if (from_proto == htons(ETH_P_IP) &&
2922 to_proto == htons(ETH_P_IPV6))
2923 return bpf_skb_proto_4_to_6(skb);
2924
2925 if (from_proto == htons(ETH_P_IPV6) &&
2926 to_proto == htons(ETH_P_IP))
2927 return bpf_skb_proto_6_to_4(skb);
2928
2929 return -ENOTSUPP;
2930}
2931
f3694e00
DB
2932BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2933 u64, flags)
6578171a 2934{
6578171a
DB
2935 int ret;
2936
2937 if (unlikely(flags))
2938 return -EINVAL;
2939
2940 /* General idea is that this helper does the basic groundwork
2941 * needed for changing the protocol, and eBPF program fills the
2942 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2943 * and other helpers, rather than passing a raw buffer here.
2944 *
2945 * The rationale is to keep this minimal and without a need to
2946 * deal with raw packet data. F.e. even if we would pass buffers
2947 * here, the program still needs to call the bpf_lX_csum_replace()
2948 * helpers anyway. Plus, this way we keep also separation of
2949 * concerns, since f.e. bpf_skb_store_bytes() should only take
2950 * care of stores.
2951 *
2952 * Currently, additional options and extension header space are
2953 * not supported, but flags register is reserved so we can adapt
2954 * that. For offloads, we mark packet as dodgy, so that headers
2955 * need to be verified first.
2956 */
2957 ret = bpf_skb_proto_xlat(skb, proto);
6aaae2b6 2958 bpf_compute_data_pointers(skb);
6578171a
DB
2959 return ret;
2960}
2961
2962static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2963 .func = bpf_skb_change_proto,
2964 .gpl_only = false,
2965 .ret_type = RET_INTEGER,
2966 .arg1_type = ARG_PTR_TO_CTX,
2967 .arg2_type = ARG_ANYTHING,
2968 .arg3_type = ARG_ANYTHING,
2969};
2970
f3694e00 2971BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
d2485c42 2972{
d2485c42 2973 /* We only allow a restricted subset to be changed for now. */
45c7fffa
DB
2974 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2975 !skb_pkt_type_ok(pkt_type)))
d2485c42
DB
2976 return -EINVAL;
2977
2978 skb->pkt_type = pkt_type;
2979 return 0;
2980}
2981
2982static const struct bpf_func_proto bpf_skb_change_type_proto = {
2983 .func = bpf_skb_change_type,
2984 .gpl_only = false,
2985 .ret_type = RET_INTEGER,
2986 .arg1_type = ARG_PTR_TO_CTX,
2987 .arg2_type = ARG_ANYTHING,
2988};
2989
2be7e212
DB
2990static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2991{
2992 switch (skb->protocol) {
2993 case htons(ETH_P_IP):
2994 return sizeof(struct iphdr);
2995 case htons(ETH_P_IPV6):
2996 return sizeof(struct ipv6hdr);
2997 default:
2998 return ~0U;
2999 }
3000}
3001
868d5235
WB
3002#define BPF_F_ADJ_ROOM_ENCAP_L3_MASK (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3003 BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3004
3005#define BPF_F_ADJ_ROOM_MASK (BPF_F_ADJ_ROOM_FIXED_GSO | \
3006 BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3007 BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
58dfc900
AM
3008 BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
3009 BPF_F_ADJ_ROOM_ENCAP_L2( \
3010 BPF_ADJ_ROOM_ENCAP_L2_MASK))
2278f6cc
WB
3011
3012static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3013 u64 flags)
2be7e212 3014{
58dfc900 3015 u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
868d5235 3016 bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
62b31b42 3017 u16 mac_len = 0, inner_net = 0, inner_trans = 0;
868d5235 3018 unsigned int gso_type = SKB_GSO_DODGY;
2be7e212
DB
3019 int ret;
3020
2278f6cc
WB
3021 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3022 /* udp gso_size delineates datagrams, only allow if fixed */
3023 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3024 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3025 return -ENOTSUPP;
3026 }
d02f51cb 3027
908adce6 3028 ret = skb_cow_head(skb, len_diff);
2be7e212
DB
3029 if (unlikely(ret < 0))
3030 return ret;
3031
868d5235
WB
3032 if (encap) {
3033 if (skb->protocol != htons(ETH_P_IP) &&
3034 skb->protocol != htons(ETH_P_IPV6))
3035 return -ENOTSUPP;
3036
3037 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3038 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3039 return -EINVAL;
3040
3041 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3042 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3043 return -EINVAL;
3044
3045 if (skb->encapsulation)
3046 return -EALREADY;
3047
3048 mac_len = skb->network_header - skb->mac_header;
3049 inner_net = skb->network_header;
58dfc900
AM
3050 if (inner_mac_len > len_diff)
3051 return -EINVAL;
868d5235
WB
3052 inner_trans = skb->transport_header;
3053 }
3054
2be7e212
DB
3055 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3056 if (unlikely(ret < 0))
3057 return ret;
3058
868d5235 3059 if (encap) {
58dfc900 3060 skb->inner_mac_header = inner_net - inner_mac_len;
868d5235
WB
3061 skb->inner_network_header = inner_net;
3062 skb->inner_transport_header = inner_trans;
3063 skb_set_inner_protocol(skb, skb->protocol);
3064
3065 skb->encapsulation = 1;
3066 skb_set_network_header(skb, mac_len);
3067
3068 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3069 gso_type |= SKB_GSO_UDP_TUNNEL;
3070 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3071 gso_type |= SKB_GSO_GRE;
3072 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3073 gso_type |= SKB_GSO_IPXIP6;
58dfc900 3074 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
868d5235
WB
3075 gso_type |= SKB_GSO_IPXIP4;
3076
3077 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3078 flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3079 int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3080 sizeof(struct ipv6hdr) :
3081 sizeof(struct iphdr);
3082
3083 skb_set_transport_header(skb, mac_len + nh_len);
3084 }
1b00e0df
WB
3085
3086 /* Match skb->protocol to new outer l3 protocol */
3087 if (skb->protocol == htons(ETH_P_IP) &&
3088 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3089 skb->protocol = htons(ETH_P_IPV6);
3090 else if (skb->protocol == htons(ETH_P_IPV6) &&
3091 flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3092 skb->protocol = htons(ETH_P_IP);
868d5235
WB
3093 }
3094
2be7e212 3095 if (skb_is_gso(skb)) {
d02f51cb
DA
3096 struct skb_shared_info *shinfo = skb_shinfo(skb);
3097
2be7e212 3098 /* Due to header grow, MSS needs to be downgraded. */
2278f6cc
WB
3099 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3100 skb_decrease_gso_size(shinfo, len_diff);
3101
2be7e212 3102 /* Header must be checked, and gso_segs recomputed. */
868d5235 3103 shinfo->gso_type |= gso_type;
d02f51cb 3104 shinfo->gso_segs = 0;
2be7e212
DB
3105 }
3106
3107 return 0;
3108}
3109
2278f6cc
WB
3110static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3111 u64 flags)
2be7e212 3112{
2be7e212
DB
3113 int ret;
3114
43537b8e
WB
3115 if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO)
3116 return -EINVAL;
3117
2278f6cc
WB
3118 if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3119 /* udp gso_size delineates datagrams, only allow if fixed */
3120 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3121 !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3122 return -ENOTSUPP;
3123 }
d02f51cb 3124
2be7e212
DB
3125 ret = skb_unclone(skb, GFP_ATOMIC);
3126 if (unlikely(ret < 0))
3127 return ret;
3128
3129 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3130 if (unlikely(ret < 0))
3131 return ret;
3132
3133 if (skb_is_gso(skb)) {
d02f51cb
DA
3134 struct skb_shared_info *shinfo = skb_shinfo(skb);
3135
2be7e212 3136 /* Due to header shrink, MSS can be upgraded. */
2278f6cc
WB
3137 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3138 skb_increase_gso_size(shinfo, len_diff);
3139
2be7e212 3140 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
3141 shinfo->gso_type |= SKB_GSO_DODGY;
3142 shinfo->gso_segs = 0;
2be7e212
DB
3143 }
3144
3145 return 0;
3146}
3147
3148static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3149{
0c6bc6e5
JF
3150 return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3151 SKB_MAX_ALLOC;
2be7e212
DB
3152}
3153
14aa3192
WB
3154BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3155 u32, mode, u64, flags)
2be7e212 3156{
2be7e212
DB
3157 u32 len_cur, len_diff_abs = abs(len_diff);
3158 u32 len_min = bpf_skb_net_base_len(skb);
3159 u32 len_max = __bpf_skb_max_len(skb);
3160 __be16 proto = skb->protocol;
3161 bool shrink = len_diff < 0;
14aa3192 3162 u32 off;
2be7e212
DB
3163 int ret;
3164
2278f6cc 3165 if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK))
14aa3192 3166 return -EINVAL;
2be7e212
DB
3167 if (unlikely(len_diff_abs > 0xfffU))
3168 return -EFAULT;
3169 if (unlikely(proto != htons(ETH_P_IP) &&
3170 proto != htons(ETH_P_IPV6)))
3171 return -ENOTSUPP;
3172
14aa3192
WB
3173 off = skb_mac_header_len(skb);
3174 switch (mode) {
3175 case BPF_ADJ_ROOM_NET:
3176 off += bpf_skb_net_base_len(skb);
3177 break;
3178 case BPF_ADJ_ROOM_MAC:
3179 break;
3180 default:
3181 return -ENOTSUPP;
3182 }
3183
2be7e212 3184 len_cur = skb->len - skb_network_offset(skb);
2be7e212
DB
3185 if ((shrink && (len_diff_abs >= len_cur ||
3186 len_cur - len_diff_abs < len_min)) ||
3187 (!shrink && (skb->len + len_diff_abs > len_max &&
3188 !skb_is_gso(skb))))
3189 return -ENOTSUPP;
3190
2278f6cc
WB
3191 ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3192 bpf_skb_net_grow(skb, off, len_diff_abs, flags);
2be7e212 3193
6aaae2b6 3194 bpf_compute_data_pointers(skb);
e4a6a342 3195 return ret;
2be7e212
DB
3196}
3197
2be7e212
DB
3198static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3199 .func = bpf_skb_adjust_room,
3200 .gpl_only = false,
3201 .ret_type = RET_INTEGER,
3202 .arg1_type = ARG_PTR_TO_CTX,
3203 .arg2_type = ARG_ANYTHING,
3204 .arg3_type = ARG_ANYTHING,
3205 .arg4_type = ARG_ANYTHING,
3206};
3207
5293efe6
DB
3208static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3209{
3210 u32 min_len = skb_network_offset(skb);
3211
3212 if (skb_transport_header_was_set(skb))
3213 min_len = skb_transport_offset(skb);
3214 if (skb->ip_summed == CHECKSUM_PARTIAL)
3215 min_len = skb_checksum_start_offset(skb) +
3216 skb->csum_offset + sizeof(__sum16);
3217 return min_len;
3218}
3219
5293efe6
DB
3220static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3221{
3222 unsigned int old_len = skb->len;
3223 int ret;
3224
3225 ret = __skb_grow_rcsum(skb, new_len);
3226 if (!ret)
3227 memset(skb->data + old_len, 0, new_len - old_len);
3228 return ret;
3229}
3230
3231static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3232{
3233 return __skb_trim_rcsum(skb, new_len);
3234}
3235
0ea488ff
JF
3236static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3237 u64 flags)
5293efe6 3238{
5293efe6
DB
3239 u32 max_len = __bpf_skb_max_len(skb);
3240 u32 min_len = __bpf_skb_min_len(skb);
5293efe6
DB
3241 int ret;
3242
3243 if (unlikely(flags || new_len > max_len || new_len < min_len))
3244 return -EINVAL;
3245 if (skb->encapsulation)
3246 return -ENOTSUPP;
3247
3248 /* The basic idea of this helper is that it's performing the
3249 * needed work to either grow or trim an skb, and eBPF program
3250 * rewrites the rest via helpers like bpf_skb_store_bytes(),
3251 * bpf_lX_csum_replace() and others rather than passing a raw
3252 * buffer here. This one is a slow path helper and intended
3253 * for replies with control messages.
3254 *
3255 * Like in bpf_skb_change_proto(), we want to keep this rather
3256 * minimal and without protocol specifics so that we are able
3257 * to separate concerns as in bpf_skb_store_bytes() should only
3258 * be the one responsible for writing buffers.
3259 *
3260 * It's really expected to be a slow path operation here for
3261 * control message replies, so we're implicitly linearizing,
3262 * uncloning and drop offloads from the skb by this.
3263 */
3264 ret = __bpf_try_make_writable(skb, skb->len);
3265 if (!ret) {
3266 if (new_len > skb->len)
3267 ret = bpf_skb_grow_rcsum(skb, new_len);
3268 else if (new_len < skb->len)
3269 ret = bpf_skb_trim_rcsum(skb, new_len);
3270 if (!ret && skb_is_gso(skb))
3271 skb_gso_reset(skb);
3272 }
0ea488ff
JF
3273 return ret;
3274}
3275
3276BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3277 u64, flags)
3278{
3279 int ret = __bpf_skb_change_tail(skb, new_len, flags);
5293efe6 3280
6aaae2b6 3281 bpf_compute_data_pointers(skb);
5293efe6
DB
3282 return ret;
3283}
3284
3285static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3286 .func = bpf_skb_change_tail,
3287 .gpl_only = false,
3288 .ret_type = RET_INTEGER,
3289 .arg1_type = ARG_PTR_TO_CTX,
3290 .arg2_type = ARG_ANYTHING,
3291 .arg3_type = ARG_ANYTHING,
3292};
3293
0ea488ff 3294BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3a0af8fd 3295 u64, flags)
0ea488ff
JF
3296{
3297 int ret = __bpf_skb_change_tail(skb, new_len, flags);
3298
3299 bpf_compute_data_end_sk_skb(skb);
3300 return ret;
3301}
3302
3303static const struct bpf_func_proto sk_skb_change_tail_proto = {
3304 .func = sk_skb_change_tail,
3305 .gpl_only = false,
3306 .ret_type = RET_INTEGER,
3307 .arg1_type = ARG_PTR_TO_CTX,
3308 .arg2_type = ARG_ANYTHING,
3309 .arg3_type = ARG_ANYTHING,
3310};
3311
3312static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3313 u64 flags)
3a0af8fd
TG
3314{
3315 u32 max_len = __bpf_skb_max_len(skb);
3316 u32 new_len = skb->len + head_room;
3317 int ret;
3318
3319 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3320 new_len < skb->len))
3321 return -EINVAL;
3322
3323 ret = skb_cow(skb, head_room);
3324 if (likely(!ret)) {
3325 /* Idea for this helper is that we currently only
3326 * allow to expand on mac header. This means that
3327 * skb->protocol network header, etc, stay as is.
3328 * Compared to bpf_skb_change_tail(), we're more
3329 * flexible due to not needing to linearize or
3330 * reset GSO. Intention for this helper is to be
3331 * used by an L3 skb that needs to push mac header
3332 * for redirection into L2 device.
3333 */
3334 __skb_push(skb, head_room);
3335 memset(skb->data, 0, head_room);
3336 skb_reset_mac_header(skb);
3337 }
3338
0ea488ff
JF
3339 return ret;
3340}
3341
3342BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3343 u64, flags)
3344{
3345 int ret = __bpf_skb_change_head(skb, head_room, flags);
3346
6aaae2b6 3347 bpf_compute_data_pointers(skb);
0ea488ff 3348 return ret;
3a0af8fd
TG
3349}
3350
3351static const struct bpf_func_proto bpf_skb_change_head_proto = {
3352 .func = bpf_skb_change_head,
3353 .gpl_only = false,
3354 .ret_type = RET_INTEGER,
3355 .arg1_type = ARG_PTR_TO_CTX,
3356 .arg2_type = ARG_ANYTHING,
3357 .arg3_type = ARG_ANYTHING,
3358};
3359
0ea488ff
JF
3360BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3361 u64, flags)
3362{
3363 int ret = __bpf_skb_change_head(skb, head_room, flags);
3364
3365 bpf_compute_data_end_sk_skb(skb);
3366 return ret;
3367}
3368
3369static const struct bpf_func_proto sk_skb_change_head_proto = {
3370 .func = sk_skb_change_head,
3371 .gpl_only = false,
3372 .ret_type = RET_INTEGER,
3373 .arg1_type = ARG_PTR_TO_CTX,
3374 .arg2_type = ARG_ANYTHING,
3375 .arg3_type = ARG_ANYTHING,
3376};
de8f3a83
DB
3377static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3378{
3379 return xdp_data_meta_unsupported(xdp) ? 0 :
3380 xdp->data - xdp->data_meta;
3381}
3382
17bedab2
MKL
3383BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3384{
6dfb970d 3385 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83 3386 unsigned long metalen = xdp_get_metalen(xdp);
97e19cce 3387 void *data_start = xdp_frame_end + metalen;
17bedab2
MKL
3388 void *data = xdp->data + offset;
3389
de8f3a83 3390 if (unlikely(data < data_start ||
17bedab2
MKL
3391 data > xdp->data_end - ETH_HLEN))
3392 return -EINVAL;
3393
de8f3a83
DB
3394 if (metalen)
3395 memmove(xdp->data_meta + offset,
3396 xdp->data_meta, metalen);
3397 xdp->data_meta += offset;
17bedab2
MKL
3398 xdp->data = data;
3399
3400 return 0;
3401}
3402
3403static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3404 .func = bpf_xdp_adjust_head,
3405 .gpl_only = false,
3406 .ret_type = RET_INTEGER,
3407 .arg1_type = ARG_PTR_TO_CTX,
3408 .arg2_type = ARG_ANYTHING,
3409};
3410
b32cc5b9
NS
3411BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3412{
3413 void *data_end = xdp->data_end + offset;
3414
3415 /* only shrinking is allowed for now. */
3416 if (unlikely(offset >= 0))
3417 return -EINVAL;
3418
3419 if (unlikely(data_end < xdp->data + ETH_HLEN))
3420 return -EINVAL;
3421
3422 xdp->data_end = data_end;
3423
3424 return 0;
3425}
3426
3427static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3428 .func = bpf_xdp_adjust_tail,
3429 .gpl_only = false,
3430 .ret_type = RET_INTEGER,
3431 .arg1_type = ARG_PTR_TO_CTX,
3432 .arg2_type = ARG_ANYTHING,
3433};
3434
de8f3a83
DB
3435BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3436{
97e19cce 3437 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83
DB
3438 void *meta = xdp->data_meta + offset;
3439 unsigned long metalen = xdp->data - meta;
3440
3441 if (xdp_data_meta_unsupported(xdp))
3442 return -ENOTSUPP;
97e19cce 3443 if (unlikely(meta < xdp_frame_end ||
de8f3a83
DB
3444 meta > xdp->data))
3445 return -EINVAL;
3446 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3447 (metalen > 32)))
3448 return -EACCES;
3449
3450 xdp->data_meta = meta;
3451
3452 return 0;
3453}
3454
3455static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3456 .func = bpf_xdp_adjust_meta,
3457 .gpl_only = false,
3458 .ret_type = RET_INTEGER,
3459 .arg1_type = ARG_PTR_TO_CTX,
3460 .arg2_type = ARG_ANYTHING,
3461};
3462
11393cc9
JF
3463static int __bpf_tx_xdp(struct net_device *dev,
3464 struct bpf_map *map,
3465 struct xdp_buff *xdp,
3466 u32 index)
814abfab 3467{
44fa2dbd 3468 struct xdp_frame *xdpf;
d8d7218a 3469 int err, sent;
11393cc9
JF
3470
3471 if (!dev->netdev_ops->ndo_xdp_xmit) {
11393cc9 3472 return -EOPNOTSUPP;
814abfab 3473 }
11393cc9 3474
d8d7218a
TM
3475 err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
3476 if (unlikely(err))
3477 return err;
3478
44fa2dbd
JDB
3479 xdpf = convert_to_xdp_frame(xdp);
3480 if (unlikely(!xdpf))
3481 return -EOVERFLOW;
3482
1e67575a 3483 sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
735fc405
JDB
3484 if (sent <= 0)
3485 return sent;
9c270af3
JDB
3486 return 0;
3487}
3488
47b123ed
JDB
3489static noinline int
3490xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
3491 struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
3492{
3493 struct net_device *fwd;
3494 u32 index = ri->ifindex;
3495 int err;
3496
3497 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3498 ri->ifindex = 0;
3499 if (unlikely(!fwd)) {
3500 err = -EINVAL;
3501 goto err;
3502 }
3503
3504 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
3505 if (unlikely(err))
3506 goto err;
3507
3508 _trace_xdp_redirect(dev, xdp_prog, index);
3509 return 0;
3510err:
3511 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3512 return err;
3513}
3514
9c270af3
JDB
3515static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3516 struct bpf_map *map,
3517 struct xdp_buff *xdp,
3518 u32 index)
3519{
3520 int err;
3521
1b1a251c
BT
3522 switch (map->map_type) {
3523 case BPF_MAP_TYPE_DEVMAP: {
67f29e07 3524 struct bpf_dtab_netdev *dst = fwd;
9c270af3 3525
38edddb8 3526 err = dev_map_enqueue(dst, xdp, dev_rx);
e1302542 3527 if (unlikely(err))
9c270af3 3528 return err;
11393cc9 3529 __dev_map_insert_ctx(map, index);
1b1a251c
BT
3530 break;
3531 }
3532 case BPF_MAP_TYPE_CPUMAP: {
9c270af3
JDB
3533 struct bpf_cpu_map_entry *rcpu = fwd;
3534
3535 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
e1302542 3536 if (unlikely(err))
9c270af3
JDB
3537 return err;
3538 __cpu_map_insert_ctx(map, index);
1b1a251c
BT
3539 break;
3540 }
3541 case BPF_MAP_TYPE_XSKMAP: {
3542 struct xdp_sock *xs = fwd;
3543
3544 err = __xsk_map_redirect(map, xdp, xs);
3545 return err;
3546 }
3547 default:
3548 break;
9c270af3 3549 }
e4a8e817 3550 return 0;
814abfab
JF
3551}
3552
11393cc9
JF
3553void xdp_do_flush_map(void)
3554{
0b19cc0a 3555 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
11393cc9
JF
3556 struct bpf_map *map = ri->map_to_flush;
3557
11393cc9 3558 ri->map_to_flush = NULL;
9c270af3
JDB
3559 if (map) {
3560 switch (map->map_type) {
3561 case BPF_MAP_TYPE_DEVMAP:
3562 __dev_map_flush(map);
3563 break;
3564 case BPF_MAP_TYPE_CPUMAP:
3565 __cpu_map_flush(map);
3566 break;
1b1a251c
BT
3567 case BPF_MAP_TYPE_XSKMAP:
3568 __xsk_map_flush(map);
3569 break;
9c270af3
JDB
3570 default:
3571 break;
3572 }
3573 }
11393cc9
JF
3574}
3575EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3576
2a68d85f 3577static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
9c270af3
JDB
3578{
3579 switch (map->map_type) {
3580 case BPF_MAP_TYPE_DEVMAP:
3581 return __dev_map_lookup_elem(map, index);
3582 case BPF_MAP_TYPE_CPUMAP:
3583 return __cpu_map_lookup_elem(map, index);
1b1a251c
BT
3584 case BPF_MAP_TYPE_XSKMAP:
3585 return __xsk_map_lookup_elem(map, index);
9c270af3
JDB
3586 default:
3587 return NULL;
3588 }
3589}
3590
f6069b9a 3591void bpf_clear_redirect_map(struct bpf_map *map)
7c300131 3592{
f6069b9a
DB
3593 struct bpf_redirect_info *ri;
3594 int cpu;
3595
3596 for_each_possible_cpu(cpu) {
3597 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3598 /* Avoid polluting remote cacheline due to writes if
3599 * not needed. Once we pass this test, we need the
3600 * cmpxchg() to make sure it hasn't been changed in
3601 * the meantime by remote CPU.
3602 */
3603 if (unlikely(READ_ONCE(ri->map) == map))
3604 cmpxchg(&ri->map, map, NULL);
3605 }
7c300131
DB
3606}
3607
e4a8e817 3608static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
47b123ed
JDB
3609 struct bpf_prog *xdp_prog, struct bpf_map *map,
3610 struct bpf_redirect_info *ri)
97f91a7c 3611{
11393cc9 3612 u32 index = ri->ifindex;
9c270af3 3613 void *fwd = NULL;
4c03bdd7 3614 int err;
97f91a7c
JF
3615
3616 ri->ifindex = 0;
f6069b9a 3617 WRITE_ONCE(ri->map, NULL);
97f91a7c 3618
9c270af3 3619 fwd = __xdp_map_lookup_elem(map, index);
2a68d85f 3620 if (unlikely(!fwd)) {
4c03bdd7 3621 err = -EINVAL;
f5836ca5 3622 goto err;
4c03bdd7 3623 }
e1302542 3624 if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
11393cc9
JF
3625 xdp_do_flush_map();
3626
9c270af3 3627 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
f5836ca5
JDB
3628 if (unlikely(err))
3629 goto err;
3630
3631 ri->map_to_flush = map;
59a30896 3632 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
f5836ca5
JDB
3633 return 0;
3634err:
59a30896 3635 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
97f91a7c
JF
3636 return err;
3637}
3638
5acaee0a
JF
3639int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3640 struct bpf_prog *xdp_prog)
814abfab 3641{
0b19cc0a 3642 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
f6069b9a 3643 struct bpf_map *map = READ_ONCE(ri->map);
814abfab 3644
2a68d85f 3645 if (likely(map))
47b123ed 3646 return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
97f91a7c 3647
47b123ed 3648 return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
814abfab
JF
3649}
3650EXPORT_SYMBOL_GPL(xdp_do_redirect);
3651
c060bc61
XS
3652static int xdp_do_generic_redirect_map(struct net_device *dev,
3653 struct sk_buff *skb,
02671e23 3654 struct xdp_buff *xdp,
f6069b9a
DB
3655 struct bpf_prog *xdp_prog,
3656 struct bpf_map *map)
6103aa96 3657{
0b19cc0a 3658 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
eb48d682 3659 u32 index = ri->ifindex;
02671e23 3660 void *fwd = NULL;
2facaad6 3661 int err = 0;
6103aa96 3662
6103aa96 3663 ri->ifindex = 0;
f6069b9a 3664 WRITE_ONCE(ri->map, NULL);
96c5508e 3665
9c270af3 3666 fwd = __xdp_map_lookup_elem(map, index);
2facaad6
JDB
3667 if (unlikely(!fwd)) {
3668 err = -EINVAL;
f5836ca5 3669 goto err;
6103aa96
JF
3670 }
3671
9c270af3 3672 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
6d5fc195
TM
3673 struct bpf_dtab_netdev *dst = fwd;
3674
3675 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3676 if (unlikely(err))
9c270af3 3677 goto err;
02671e23
BT
3678 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3679 struct xdp_sock *xs = fwd;
3680
3681 err = xsk_generic_rcv(xs, xdp);
3682 if (err)
3683 goto err;
3684 consume_skb(skb);
9c270af3
JDB
3685 } else {
3686 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3687 err = -EBADRQC;
f5836ca5 3688 goto err;
2facaad6 3689 }
6103aa96 3690
9c270af3
JDB
3691 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3692 return 0;
3693err:
3694 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3695 return err;
3696}
3697
3698int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
02671e23 3699 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
9c270af3 3700{
0b19cc0a 3701 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
f6069b9a 3702 struct bpf_map *map = READ_ONCE(ri->map);
9c270af3
JDB
3703 u32 index = ri->ifindex;
3704 struct net_device *fwd;
3705 int err = 0;
3706
f6069b9a
DB
3707 if (map)
3708 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
3709 map);
9c270af3
JDB
3710 ri->ifindex = 0;
3711 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3712 if (unlikely(!fwd)) {
3713 err = -EINVAL;
f5836ca5 3714 goto err;
2facaad6
JDB
3715 }
3716
d8d7218a
TM
3717 err = xdp_ok_fwd_dev(fwd, skb->len);
3718 if (unlikely(err))
9c270af3
JDB
3719 goto err;
3720
2facaad6 3721 skb->dev = fwd;
9c270af3 3722 _trace_xdp_redirect(dev, xdp_prog, index);
02671e23 3723 generic_xdp_tx(skb, xdp_prog);
f5836ca5
JDB
3724 return 0;
3725err:
9c270af3 3726 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2facaad6 3727 return err;
6103aa96
JF
3728}
3729EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3730
814abfab
JF
3731BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3732{
0b19cc0a 3733 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
814abfab
JF
3734
3735 if (unlikely(flags))
3736 return XDP_ABORTED;
3737
3738 ri->ifindex = ifindex;
3739 ri->flags = flags;
f6069b9a 3740 WRITE_ONCE(ri->map, NULL);
e4a8e817 3741
814abfab
JF
3742 return XDP_REDIRECT;
3743}
3744
3745static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3746 .func = bpf_xdp_redirect,
3747 .gpl_only = false,
3748 .ret_type = RET_INTEGER,
3749 .arg1_type = ARG_ANYTHING,
3750 .arg2_type = ARG_ANYTHING,
3751};
3752
f6069b9a
DB
3753BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
3754 u64, flags)
e4a8e817 3755{
0b19cc0a 3756 struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
e4a8e817
DB
3757
3758 if (unlikely(flags))
3759 return XDP_ABORTED;
3760
3761 ri->ifindex = ifindex;
3762 ri->flags = flags;
f6069b9a 3763 WRITE_ONCE(ri->map, map);
e4a8e817
DB
3764
3765 return XDP_REDIRECT;
3766}
3767
3768static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3769 .func = bpf_xdp_redirect_map,
3770 .gpl_only = false,
3771 .ret_type = RET_INTEGER,
3772 .arg1_type = ARG_CONST_MAP_PTR,
3773 .arg2_type = ARG_ANYTHING,
3774 .arg3_type = ARG_ANYTHING,
3775};
3776
555c8a86 3777static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
aa7145c1 3778 unsigned long off, unsigned long len)
555c8a86 3779{
aa7145c1 3780 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
555c8a86
DB
3781
3782 if (unlikely(!ptr))
3783 return len;
3784 if (ptr != dst_buff)
3785 memcpy(dst_buff, ptr, len);
3786
3787 return 0;
3788}
3789
f3694e00
DB
3790BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3791 u64, flags, void *, meta, u64, meta_size)
555c8a86 3792{
555c8a86 3793 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
555c8a86
DB
3794
3795 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3796 return -EINVAL;
3797 if (unlikely(skb_size > skb->len))
3798 return -EFAULT;
3799
3800 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3801 bpf_skb_copy);
3802}
3803
3804static const struct bpf_func_proto bpf_skb_event_output_proto = {
3805 .func = bpf_skb_event_output,
3806 .gpl_only = true,
3807 .ret_type = RET_INTEGER,
3808 .arg1_type = ARG_PTR_TO_CTX,
3809 .arg2_type = ARG_CONST_MAP_PTR,
3810 .arg3_type = ARG_ANYTHING,
39f19ebb 3811 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3812 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
555c8a86
DB
3813};
3814
c6c33454
DB
3815static unsigned short bpf_tunnel_key_af(u64 flags)
3816{
3817 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3818}
3819
f3694e00
DB
3820BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3821 u32, size, u64, flags)
d3aa45ce 3822{
c6c33454
DB
3823 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3824 u8 compat[sizeof(struct bpf_tunnel_key)];
074f528e
DB
3825 void *to_orig = to;
3826 int err;
d3aa45ce 3827
074f528e
DB
3828 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3829 err = -EINVAL;
3830 goto err_clear;
3831 }
3832 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3833 err = -EPROTO;
3834 goto err_clear;
3835 }
c6c33454 3836 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
074f528e 3837 err = -EINVAL;
c6c33454 3838 switch (size) {
4018ab18 3839 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3840 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4018ab18 3841 goto set_compat;
c6c33454
DB
3842 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3843 /* Fixup deprecated structure layouts here, so we have
3844 * a common path later on.
3845 */
3846 if (ip_tunnel_info_af(info) != AF_INET)
074f528e 3847 goto err_clear;
4018ab18 3848set_compat:
c6c33454
DB
3849 to = (struct bpf_tunnel_key *)compat;
3850 break;
3851 default:
074f528e 3852 goto err_clear;
c6c33454
DB
3853 }
3854 }
d3aa45ce
AS
3855
3856 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
3857 to->tunnel_tos = info->key.tos;
3858 to->tunnel_ttl = info->key.ttl;
1fbc2e0c 3859 to->tunnel_ext = 0;
c6c33454 3860
4018ab18 3861 if (flags & BPF_F_TUNINFO_IPV6) {
c6c33454
DB
3862 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3863 sizeof(to->remote_ipv6));
4018ab18
DB
3864 to->tunnel_label = be32_to_cpu(info->key.label);
3865 } else {
c6c33454 3866 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
1fbc2e0c
DB
3867 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3868 to->tunnel_label = 0;
4018ab18 3869 }
c6c33454
DB
3870
3871 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
074f528e 3872 memcpy(to_orig, to, size);
d3aa45ce
AS
3873
3874 return 0;
074f528e
DB
3875err_clear:
3876 memset(to_orig, 0, size);
3877 return err;
d3aa45ce
AS
3878}
3879
577c50aa 3880static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
d3aa45ce
AS
3881 .func = bpf_skb_get_tunnel_key,
3882 .gpl_only = false,
3883 .ret_type = RET_INTEGER,
3884 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3885 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3886 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3887 .arg4_type = ARG_ANYTHING,
3888};
3889
f3694e00 3890BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
14ca0751 3891{
14ca0751 3892 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
074f528e 3893 int err;
14ca0751
DB
3894
3895 if (unlikely(!info ||
074f528e
DB
3896 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3897 err = -ENOENT;
3898 goto err_clear;
3899 }
3900 if (unlikely(size < info->options_len)) {
3901 err = -ENOMEM;
3902 goto err_clear;
3903 }
14ca0751
DB
3904
3905 ip_tunnel_info_opts_get(to, info);
074f528e
DB
3906 if (size > info->options_len)
3907 memset(to + info->options_len, 0, size - info->options_len);
14ca0751
DB
3908
3909 return info->options_len;
074f528e
DB
3910err_clear:
3911 memset(to, 0, size);
3912 return err;
14ca0751
DB
3913}
3914
3915static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3916 .func = bpf_skb_get_tunnel_opt,
3917 .gpl_only = false,
3918 .ret_type = RET_INTEGER,
3919 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3920 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3921 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3922};
3923
d3aa45ce
AS
3924static struct metadata_dst __percpu *md_dst;
3925
f3694e00
DB
3926BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3927 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
d3aa45ce 3928{
d3aa45ce 3929 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 3930 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
3931 struct ip_tunnel_info *info;
3932
22080870 3933 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
77a5196a 3934 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
d3aa45ce 3935 return -EINVAL;
c6c33454
DB
3936 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3937 switch (size) {
4018ab18 3938 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3939 case offsetof(struct bpf_tunnel_key, tunnel_ext):
c6c33454
DB
3940 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3941 /* Fixup deprecated structure layouts here, so we have
3942 * a common path later on.
3943 */
3944 memcpy(compat, from, size);
3945 memset(compat + size, 0, sizeof(compat) - size);
f3694e00 3946 from = (const struct bpf_tunnel_key *) compat;
c6c33454
DB
3947 break;
3948 default:
3949 return -EINVAL;
3950 }
3951 }
c0e760c9
DB
3952 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3953 from->tunnel_ext))
4018ab18 3954 return -EINVAL;
d3aa45ce
AS
3955
3956 skb_dst_drop(skb);
3957 dst_hold((struct dst_entry *) md);
3958 skb_dst_set(skb, (struct dst_entry *) md);
3959
3960 info = &md->u.tun_info;
5540fbf4 3961 memset(info, 0, sizeof(*info));
d3aa45ce 3962 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 3963
db3c6139 3964 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
22080870
DB
3965 if (flags & BPF_F_DONT_FRAGMENT)
3966 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
792f3dd6
WT
3967 if (flags & BPF_F_ZERO_CSUM_TX)
3968 info->key.tun_flags &= ~TUNNEL_CSUM;
77a5196a
WT
3969 if (flags & BPF_F_SEQ_NUMBER)
3970 info->key.tun_flags |= TUNNEL_SEQ;
22080870 3971
d3aa45ce 3972 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
3973 info->key.tos = from->tunnel_tos;
3974 info->key.ttl = from->tunnel_ttl;
3975
3976 if (flags & BPF_F_TUNINFO_IPV6) {
3977 info->mode |= IP_TUNNEL_INFO_IPV6;
3978 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3979 sizeof(from->remote_ipv6));
4018ab18
DB
3980 info->key.label = cpu_to_be32(from->tunnel_label) &
3981 IPV6_FLOWLABEL_MASK;
c6c33454
DB
3982 } else {
3983 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3984 }
d3aa45ce
AS
3985
3986 return 0;
3987}
3988
577c50aa 3989static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
d3aa45ce
AS
3990 .func = bpf_skb_set_tunnel_key,
3991 .gpl_only = false,
3992 .ret_type = RET_INTEGER,
3993 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3994 .arg2_type = ARG_PTR_TO_MEM,
3995 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3996 .arg4_type = ARG_ANYTHING,
3997};
3998
f3694e00
DB
3999BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4000 const u8 *, from, u32, size)
14ca0751 4001{
14ca0751
DB
4002 struct ip_tunnel_info *info = skb_tunnel_info(skb);
4003 const struct metadata_dst *md = this_cpu_ptr(md_dst);
4004
4005 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4006 return -EINVAL;
fca5fdf6 4007 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
14ca0751
DB
4008 return -ENOMEM;
4009
256c87c1 4010 ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
14ca0751
DB
4011
4012 return 0;
4013}
4014
4015static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4016 .func = bpf_skb_set_tunnel_opt,
4017 .gpl_only = false,
4018 .ret_type = RET_INTEGER,
4019 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
4020 .arg2_type = ARG_PTR_TO_MEM,
4021 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
4022};
4023
4024static const struct bpf_func_proto *
4025bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
d3aa45ce
AS
4026{
4027 if (!md_dst) {
d66f2b91
JK
4028 struct metadata_dst __percpu *tmp;
4029
4030 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4031 METADATA_IP_TUNNEL,
4032 GFP_KERNEL);
4033 if (!tmp)
d3aa45ce 4034 return NULL;
d66f2b91
JK
4035 if (cmpxchg(&md_dst, NULL, tmp))
4036 metadata_dst_free_percpu(tmp);
d3aa45ce 4037 }
14ca0751
DB
4038
4039 switch (which) {
4040 case BPF_FUNC_skb_set_tunnel_key:
4041 return &bpf_skb_set_tunnel_key_proto;
4042 case BPF_FUNC_skb_set_tunnel_opt:
4043 return &bpf_skb_set_tunnel_opt_proto;
4044 default:
4045 return NULL;
4046 }
d3aa45ce
AS
4047}
4048
f3694e00
DB
4049BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4050 u32, idx)
4a482f34 4051{
4a482f34
MKL
4052 struct bpf_array *array = container_of(map, struct bpf_array, map);
4053 struct cgroup *cgrp;
4054 struct sock *sk;
4a482f34 4055
2d48c5f9 4056 sk = skb_to_full_sk(skb);
4a482f34
MKL
4057 if (!sk || !sk_fullsock(sk))
4058 return -ENOENT;
f3694e00 4059 if (unlikely(idx >= array->map.max_entries))
4a482f34
MKL
4060 return -E2BIG;
4061
f3694e00 4062 cgrp = READ_ONCE(array->ptrs[idx]);
4a482f34
MKL
4063 if (unlikely(!cgrp))
4064 return -EAGAIN;
4065
54fd9c2d 4066 return sk_under_cgroup_hierarchy(sk, cgrp);
4a482f34
MKL
4067}
4068
747ea55e
DB
4069static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4070 .func = bpf_skb_under_cgroup,
4a482f34
MKL
4071 .gpl_only = false,
4072 .ret_type = RET_INTEGER,
4073 .arg1_type = ARG_PTR_TO_CTX,
4074 .arg2_type = ARG_CONST_MAP_PTR,
4075 .arg3_type = ARG_ANYTHING,
4076};
4a482f34 4077
cb20b08e
DB
4078#ifdef CONFIG_SOCK_CGROUP_DATA
4079BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4080{
4081 struct sock *sk = skb_to_full_sk(skb);
4082 struct cgroup *cgrp;
4083
4084 if (!sk || !sk_fullsock(sk))
4085 return 0;
4086
4087 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4088 return cgrp->kn->id.id;
4089}
4090
4091static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4092 .func = bpf_skb_cgroup_id,
4093 .gpl_only = false,
4094 .ret_type = RET_INTEGER,
4095 .arg1_type = ARG_PTR_TO_CTX,
4096};
77236281
AI
4097
4098BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4099 ancestor_level)
4100{
4101 struct sock *sk = skb_to_full_sk(skb);
4102 struct cgroup *ancestor;
4103 struct cgroup *cgrp;
4104
4105 if (!sk || !sk_fullsock(sk))
4106 return 0;
4107
4108 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4109 ancestor = cgroup_ancestor(cgrp, ancestor_level);
4110 if (!ancestor)
4111 return 0;
4112
4113 return ancestor->kn->id.id;
4114}
4115
4116static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4117 .func = bpf_skb_ancestor_cgroup_id,
4118 .gpl_only = false,
4119 .ret_type = RET_INTEGER,
4120 .arg1_type = ARG_PTR_TO_CTX,
4121 .arg2_type = ARG_ANYTHING,
4122};
cb20b08e
DB
4123#endif
4124
4de16969
DB
4125static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4126 unsigned long off, unsigned long len)
4127{
4128 memcpy(dst_buff, src_buff + off, len);
4129 return 0;
4130}
4131
f3694e00
DB
4132BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4133 u64, flags, void *, meta, u64, meta_size)
4de16969 4134{
4de16969 4135 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4de16969
DB
4136
4137 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4138 return -EINVAL;
4139 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4140 return -EFAULT;
4141
9c471370
MKL
4142 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4143 xdp_size, bpf_xdp_copy);
4de16969
DB
4144}
4145
4146static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4147 .func = bpf_xdp_event_output,
4148 .gpl_only = true,
4149 .ret_type = RET_INTEGER,
4150 .arg1_type = ARG_PTR_TO_CTX,
4151 .arg2_type = ARG_CONST_MAP_PTR,
4152 .arg3_type = ARG_ANYTHING,
39f19ebb 4153 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 4154 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4de16969
DB
4155};
4156
91b8270f
CF
4157BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4158{
4159 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
4160}
4161
4162static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4163 .func = bpf_get_socket_cookie,
4164 .gpl_only = false,
4165 .ret_type = RET_INTEGER,
4166 .arg1_type = ARG_PTR_TO_CTX,
4167};
4168
d692f113
AI
4169BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4170{
4171 return sock_gen_cookie(ctx->sk);
4172}
4173
4174static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4175 .func = bpf_get_socket_cookie_sock_addr,
4176 .gpl_only = false,
4177 .ret_type = RET_INTEGER,
4178 .arg1_type = ARG_PTR_TO_CTX,
4179};
4180
4181BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4182{
4183 return sock_gen_cookie(ctx->sk);
4184}
4185
4186static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4187 .func = bpf_get_socket_cookie_sock_ops,
4188 .gpl_only = false,
4189 .ret_type = RET_INTEGER,
4190 .arg1_type = ARG_PTR_TO_CTX,
4191};
4192
6acc5c29
CF
4193BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4194{
4195 struct sock *sk = sk_to_full_sk(skb->sk);
4196 kuid_t kuid;
4197
4198 if (!sk || !sk_fullsock(sk))
4199 return overflowuid;
4200 kuid = sock_net_uid(sock_net(sk), sk);
4201 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4202}
4203
4204static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4205 .func = bpf_get_socket_uid,
4206 .gpl_only = false,
4207 .ret_type = RET_INTEGER,
4208 .arg1_type = ARG_PTR_TO_CTX,
4209};
4210
a5a3a828
SV
4211BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
4212 struct bpf_map *, map, u64, flags, void *, data, u64, size)
4213{
4214 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
4215 return -EINVAL;
4216
4217 return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
4218}
4219
4220static const struct bpf_func_proto bpf_sockopt_event_output_proto = {
4221 .func = bpf_sockopt_event_output,
4222 .gpl_only = true,
4223 .ret_type = RET_INTEGER,
4224 .arg1_type = ARG_PTR_TO_CTX,
4225 .arg2_type = ARG_CONST_MAP_PTR,
4226 .arg3_type = ARG_ANYTHING,
4227 .arg4_type = ARG_PTR_TO_MEM,
4228 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4229};
4230
8c4b4c7e
LB
4231BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4232 int, level, int, optname, char *, optval, int, optlen)
4233{
4234 struct sock *sk = bpf_sock->sk;
4235 int ret = 0;
4236 int val;
4237
4238 if (!sk_fullsock(sk))
4239 return -EINVAL;
4240
4241 if (level == SOL_SOCKET) {
4242 if (optlen != sizeof(int))
4243 return -EINVAL;
4244 val = *((int *)optval);
4245
4246 /* Only some socketops are supported */
4247 switch (optname) {
4248 case SO_RCVBUF:
c9e45767 4249 val = min_t(u32, val, sysctl_rmem_max);
8c4b4c7e
LB
4250 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4251 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4252 break;
4253 case SO_SNDBUF:
c9e45767 4254 val = min_t(u32, val, sysctl_wmem_max);
8c4b4c7e
LB
4255 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4256 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4257 break;
76a9ebe8 4258 case SO_MAX_PACING_RATE: /* 32bit version */
e224c390
YC
4259 if (val != ~0U)
4260 cmpxchg(&sk->sk_pacing_status,
4261 SK_PACING_NONE,
4262 SK_PACING_NEEDED);
76a9ebe8 4263 sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
8c4b4c7e
LB
4264 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4265 sk->sk_max_pacing_rate);
4266 break;
4267 case SO_PRIORITY:
4268 sk->sk_priority = val;
4269 break;
4270 case SO_RCVLOWAT:
4271 if (val < 0)
4272 val = INT_MAX;
4273 sk->sk_rcvlowat = val ? : 1;
4274 break;
4275 case SO_MARK:
f4924f24
PO
4276 if (sk->sk_mark != val) {
4277 sk->sk_mark = val;
4278 sk_dst_reset(sk);
4279 }
8c4b4c7e
LB
4280 break;
4281 default:
4282 ret = -EINVAL;
4283 }
a5192c52 4284#ifdef CONFIG_INET
6f5c39fa
NS
4285 } else if (level == SOL_IP) {
4286 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4287 return -EINVAL;
4288
4289 val = *((int *)optval);
4290 /* Only some options are supported */
4291 switch (optname) {
4292 case IP_TOS:
4293 if (val < -1 || val > 0xff) {
4294 ret = -EINVAL;
4295 } else {
4296 struct inet_sock *inet = inet_sk(sk);
4297
4298 if (val == -1)
4299 val = 0;
4300 inet->tos = val;
4301 }
4302 break;
4303 default:
4304 ret = -EINVAL;
4305 }
6f9bd3d7
LB
4306#if IS_ENABLED(CONFIG_IPV6)
4307 } else if (level == SOL_IPV6) {
4308 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4309 return -EINVAL;
4310
4311 val = *((int *)optval);
4312 /* Only some options are supported */
4313 switch (optname) {
4314 case IPV6_TCLASS:
4315 if (val < -1 || val > 0xff) {
4316 ret = -EINVAL;
4317 } else {
4318 struct ipv6_pinfo *np = inet6_sk(sk);
4319
4320 if (val == -1)
4321 val = 0;
4322 np->tclass = val;
4323 }
4324 break;
4325 default:
4326 ret = -EINVAL;
4327 }
4328#endif
8c4b4c7e
LB
4329 } else if (level == SOL_TCP &&
4330 sk->sk_prot->setsockopt == tcp_setsockopt) {
91b5b21c
LB
4331 if (optname == TCP_CONGESTION) {
4332 char name[TCP_CA_NAME_MAX];
ebfa00c5 4333 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
91b5b21c
LB
4334
4335 strncpy(name, optval, min_t(long, optlen,
4336 TCP_CA_NAME_MAX-1));
4337 name[TCP_CA_NAME_MAX-1] = 0;
6f9bd3d7
LB
4338 ret = tcp_set_congestion_control(sk, name, false,
4339 reinit);
91b5b21c 4340 } else {
fc747810
LB
4341 struct tcp_sock *tp = tcp_sk(sk);
4342
4343 if (optlen != sizeof(int))
4344 return -EINVAL;
4345
4346 val = *((int *)optval);
4347 /* Only some options are supported */
4348 switch (optname) {
4349 case TCP_BPF_IW:
31aa6503 4350 if (val <= 0 || tp->data_segs_out > tp->syn_data)
fc747810
LB
4351 ret = -EINVAL;
4352 else
4353 tp->snd_cwnd = val;
4354 break;
13bf9641
LB
4355 case TCP_BPF_SNDCWND_CLAMP:
4356 if (val <= 0) {
4357 ret = -EINVAL;
4358 } else {
4359 tp->snd_cwnd_clamp = val;
4360 tp->snd_ssthresh = val;
4361 }
6d3f06a0 4362 break;
1e215300
NS
4363 case TCP_SAVE_SYN:
4364 if (val < 0 || val > 1)
4365 ret = -EINVAL;
4366 else
4367 tp->save_syn = val;
4368 break;
fc747810
LB
4369 default:
4370 ret = -EINVAL;
4371 }
91b5b21c 4372 }
91b5b21c 4373#endif
8c4b4c7e
LB
4374 } else {
4375 ret = -EINVAL;
4376 }
4377 return ret;
4378}
4379
4380static const struct bpf_func_proto bpf_setsockopt_proto = {
4381 .func = bpf_setsockopt,
cd86d1fd 4382 .gpl_only = false,
8c4b4c7e
LB
4383 .ret_type = RET_INTEGER,
4384 .arg1_type = ARG_PTR_TO_CTX,
4385 .arg2_type = ARG_ANYTHING,
4386 .arg3_type = ARG_ANYTHING,
4387 .arg4_type = ARG_PTR_TO_MEM,
4388 .arg5_type = ARG_CONST_SIZE,
4389};
4390
cd86d1fd
LB
4391BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4392 int, level, int, optname, char *, optval, int, optlen)
4393{
4394 struct sock *sk = bpf_sock->sk;
cd86d1fd
LB
4395
4396 if (!sk_fullsock(sk))
4397 goto err_clear;
cd86d1fd
LB
4398#ifdef CONFIG_INET
4399 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
1edb6e03
AR
4400 struct inet_connection_sock *icsk;
4401 struct tcp_sock *tp;
4402
1e215300
NS
4403 switch (optname) {
4404 case TCP_CONGESTION:
4405 icsk = inet_csk(sk);
cd86d1fd
LB
4406
4407 if (!icsk->icsk_ca_ops || optlen <= 1)
4408 goto err_clear;
4409 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4410 optval[optlen - 1] = 0;
1e215300
NS
4411 break;
4412 case TCP_SAVED_SYN:
4413 tp = tcp_sk(sk);
4414
4415 if (optlen <= 0 || !tp->saved_syn ||
4416 optlen > tp->saved_syn[0])
4417 goto err_clear;
4418 memcpy(optval, tp->saved_syn + 1, optlen);
4419 break;
4420 default:
cd86d1fd
LB
4421 goto err_clear;
4422 }
6f5c39fa
NS
4423 } else if (level == SOL_IP) {
4424 struct inet_sock *inet = inet_sk(sk);
4425
4426 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4427 goto err_clear;
4428
4429 /* Only some options are supported */
4430 switch (optname) {
4431 case IP_TOS:
4432 *((int *)optval) = (int)inet->tos;
4433 break;
4434 default:
4435 goto err_clear;
4436 }
6f9bd3d7
LB
4437#if IS_ENABLED(CONFIG_IPV6)
4438 } else if (level == SOL_IPV6) {
4439 struct ipv6_pinfo *np = inet6_sk(sk);
4440
4441 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4442 goto err_clear;
4443
4444 /* Only some options are supported */
4445 switch (optname) {
4446 case IPV6_TCLASS:
4447 *((int *)optval) = (int)np->tclass;
4448 break;
4449 default:
4450 goto err_clear;
4451 }
4452#endif
cd86d1fd
LB
4453 } else {
4454 goto err_clear;
4455 }
aa2bc739 4456 return 0;
cd86d1fd
LB
4457#endif
4458err_clear:
4459 memset(optval, 0, optlen);
4460 return -EINVAL;
4461}
4462
4463static const struct bpf_func_proto bpf_getsockopt_proto = {
4464 .func = bpf_getsockopt,
4465 .gpl_only = false,
4466 .ret_type = RET_INTEGER,
4467 .arg1_type = ARG_PTR_TO_CTX,
4468 .arg2_type = ARG_ANYTHING,
4469 .arg3_type = ARG_ANYTHING,
4470 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
4471 .arg5_type = ARG_CONST_SIZE,
4472};
4473
b13d8807
LB
4474BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
4475 int, argval)
4476{
4477 struct sock *sk = bpf_sock->sk;
4478 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
4479
a7dcdf6e 4480 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
b13d8807
LB
4481 return -EINVAL;
4482
725721a6 4483 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
b13d8807
LB
4484
4485 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
b13d8807
LB
4486}
4487
4488static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
4489 .func = bpf_sock_ops_cb_flags_set,
4490 .gpl_only = false,
4491 .ret_type = RET_INTEGER,
4492 .arg1_type = ARG_PTR_TO_CTX,
4493 .arg2_type = ARG_ANYTHING,
4494};
4495
d74bad4e
AI
4496const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
4497EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
4498
4499BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
4500 int, addr_len)
4501{
4502#ifdef CONFIG_INET
4503 struct sock *sk = ctx->sk;
4504 int err;
4505
4506 /* Binding to port can be expensive so it's prohibited in the helper.
4507 * Only binding to IP is supported.
4508 */
4509 err = -EINVAL;
ba024f25
TH
4510 if (addr_len < offsetofend(struct sockaddr, sa_family))
4511 return err;
d74bad4e
AI
4512 if (addr->sa_family == AF_INET) {
4513 if (addr_len < sizeof(struct sockaddr_in))
4514 return err;
4515 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
4516 return err;
4517 return __inet_bind(sk, addr, addr_len, true, false);
4518#if IS_ENABLED(CONFIG_IPV6)
4519 } else if (addr->sa_family == AF_INET6) {
4520 if (addr_len < SIN6_LEN_RFC2133)
4521 return err;
4522 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4523 return err;
4524 /* ipv6_bpf_stub cannot be NULL, since it's called from
4525 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4526 */
4527 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4528#endif /* CONFIG_IPV6 */
4529 }
4530#endif /* CONFIG_INET */
4531
4532 return -EAFNOSUPPORT;
4533}
4534
4535static const struct bpf_func_proto bpf_bind_proto = {
4536 .func = bpf_bind,
4537 .gpl_only = false,
4538 .ret_type = RET_INTEGER,
4539 .arg1_type = ARG_PTR_TO_CTX,
4540 .arg2_type = ARG_PTR_TO_MEM,
4541 .arg3_type = ARG_CONST_SIZE,
4542};
4543
12bed760
EB
4544#ifdef CONFIG_XFRM
4545BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4546 struct bpf_xfrm_state *, to, u32, size, u64, flags)
4547{
4548 const struct sec_path *sp = skb_sec_path(skb);
4549 const struct xfrm_state *x;
4550
4551 if (!sp || unlikely(index >= sp->len || flags))
4552 goto err_clear;
4553
4554 x = sp->xvec[index];
4555
4556 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4557 goto err_clear;
4558
4559 to->reqid = x->props.reqid;
4560 to->spi = x->id.spi;
4561 to->family = x->props.family;
1fbc2e0c
DB
4562 to->ext = 0;
4563
12bed760
EB
4564 if (to->family == AF_INET6) {
4565 memcpy(to->remote_ipv6, x->props.saddr.a6,
4566 sizeof(to->remote_ipv6));
4567 } else {
4568 to->remote_ipv4 = x->props.saddr.a4;
1fbc2e0c 4569 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
12bed760
EB
4570 }
4571
4572 return 0;
4573err_clear:
4574 memset(to, 0, size);
4575 return -EINVAL;
4576}
4577
4578static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4579 .func = bpf_skb_get_xfrm_state,
4580 .gpl_only = false,
4581 .ret_type = RET_INTEGER,
4582 .arg1_type = ARG_PTR_TO_CTX,
4583 .arg2_type = ARG_ANYTHING,
4584 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
4585 .arg4_type = ARG_CONST_SIZE,
4586 .arg5_type = ARG_ANYTHING,
4587};
4588#endif
4589
87f5fc7e
DA
4590#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4591static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4592 const struct neighbour *neigh,
4593 const struct net_device *dev)
4594{
4595 memcpy(params->dmac, neigh->ha, ETH_ALEN);
4596 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4597 params->h_vlan_TCI = 0;
4598 params->h_vlan_proto = 0;
4c79579b 4599 params->ifindex = dev->ifindex;
87f5fc7e 4600
4c79579b 4601 return 0;
87f5fc7e
DA
4602}
4603#endif
4604
4605#if IS_ENABLED(CONFIG_INET)
4606static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 4607 u32 flags, bool check_mtu)
87f5fc7e 4608{
eba618ab 4609 struct fib_nh_common *nhc;
87f5fc7e
DA
4610 struct in_device *in_dev;
4611 struct neighbour *neigh;
4612 struct net_device *dev;
4613 struct fib_result res;
87f5fc7e
DA
4614 struct flowi4 fl4;
4615 int err;
4f74fede 4616 u32 mtu;
87f5fc7e
DA
4617
4618 dev = dev_get_by_index_rcu(net, params->ifindex);
4619 if (unlikely(!dev))
4620 return -ENODEV;
4621
4622 /* verify forwarding is enabled on this interface */
4623 in_dev = __in_dev_get_rcu(dev);
4624 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4c79579b 4625 return BPF_FIB_LKUP_RET_FWD_DISABLED;
87f5fc7e
DA
4626
4627 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4628 fl4.flowi4_iif = 1;
4629 fl4.flowi4_oif = params->ifindex;
4630 } else {
4631 fl4.flowi4_iif = params->ifindex;
4632 fl4.flowi4_oif = 0;
4633 }
4634 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4635 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4636 fl4.flowi4_flags = 0;
4637
4638 fl4.flowi4_proto = params->l4_protocol;
4639 fl4.daddr = params->ipv4_dst;
4640 fl4.saddr = params->ipv4_src;
4641 fl4.fl4_sport = params->sport;
4642 fl4.fl4_dport = params->dport;
4643
4644 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4645 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4646 struct fib_table *tb;
4647
4648 tb = fib_get_table(net, tbid);
4649 if (unlikely(!tb))
4c79579b 4650 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4651
4652 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4653 } else {
4654 fl4.flowi4_mark = 0;
4655 fl4.flowi4_secid = 0;
4656 fl4.flowi4_tun_key.tun_id = 0;
4657 fl4.flowi4_uid = sock_net_uid(net, NULL);
4658
4659 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4660 }
4661
4c79579b
DA
4662 if (err) {
4663 /* map fib lookup errors to RTN_ type */
4664 if (err == -EINVAL)
4665 return BPF_FIB_LKUP_RET_BLACKHOLE;
4666 if (err == -EHOSTUNREACH)
4667 return BPF_FIB_LKUP_RET_UNREACHABLE;
4668 if (err == -EACCES)
4669 return BPF_FIB_LKUP_RET_PROHIBIT;
4670
4671 return BPF_FIB_LKUP_RET_NOT_FWDED;
4672 }
4673
4674 if (res.type != RTN_UNICAST)
4675 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4676
4677 if (res.fi->fib_nhs > 1)
4678 fib_select_path(net, &res, &fl4, NULL);
4679
4f74fede
DA
4680 if (check_mtu) {
4681 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4682 if (params->tot_len > mtu)
4c79579b 4683 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4f74fede
DA
4684 }
4685
eba618ab 4686 nhc = res.nhc;
87f5fc7e
DA
4687
4688 /* do not handle lwt encaps right now */
eba618ab 4689 if (nhc->nhc_lwtstate)
4c79579b 4690 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
87f5fc7e 4691
eba618ab 4692 dev = nhc->nhc_dev;
87f5fc7e
DA
4693
4694 params->rt_metric = res.fi->fib_priority;
4695
4696 /* xdp and cls_bpf programs are run in RCU-bh so
4697 * rcu_read_lock_bh is not needed here
4698 */
6f5f68d0
DA
4699 if (likely(nhc->nhc_gw_family != AF_INET6)) {
4700 if (nhc->nhc_gw_family)
4701 params->ipv4_dst = nhc->nhc_gw.ipv4;
4702
4703 neigh = __ipv4_neigh_lookup_noref(dev,
4704 (__force u32)params->ipv4_dst);
4705 } else {
4706 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
4707
4708 params->family = AF_INET6;
4709 *dst = nhc->nhc_gw.ipv6;
4710 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4711 }
4712
4c79579b
DA
4713 if (!neigh)
4714 return BPF_FIB_LKUP_RET_NO_NEIGH;
87f5fc7e 4715
4c79579b 4716 return bpf_fib_set_fwd_params(params, neigh, dev);
87f5fc7e
DA
4717}
4718#endif
4719
4720#if IS_ENABLED(CONFIG_IPV6)
4721static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4f74fede 4722 u32 flags, bool check_mtu)
87f5fc7e
DA
4723{
4724 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4725 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
e55449e7 4726 struct fib6_result res = {};
87f5fc7e
DA
4727 struct neighbour *neigh;
4728 struct net_device *dev;
4729 struct inet6_dev *idev;
87f5fc7e
DA
4730 struct flowi6 fl6;
4731 int strict = 0;
effda4dd 4732 int oif, err;
4f74fede 4733 u32 mtu;
87f5fc7e
DA
4734
4735 /* link local addresses are never forwarded */
4736 if (rt6_need_strict(dst) || rt6_need_strict(src))
4c79579b 4737 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e
DA
4738
4739 dev = dev_get_by_index_rcu(net, params->ifindex);
4740 if (unlikely(!dev))
4741 return -ENODEV;
4742
4743 idev = __in6_dev_get_safely(dev);
4744 if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
4c79579b 4745 return BPF_FIB_LKUP_RET_FWD_DISABLED;
87f5fc7e
DA
4746
4747 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4748 fl6.flowi6_iif = 1;
4749 oif = fl6.flowi6_oif = params->ifindex;
4750 } else {
4751 oif = fl6.flowi6_iif = params->ifindex;
4752 fl6.flowi6_oif = 0;
4753 strict = RT6_LOOKUP_F_HAS_SADDR;
4754 }
bd3a08aa 4755 fl6.flowlabel = params->flowinfo;
87f5fc7e
DA
4756 fl6.flowi6_scope = 0;
4757 fl6.flowi6_flags = 0;
4758 fl6.mp_hash = 0;
4759
4760 fl6.flowi6_proto = params->l4_protocol;
4761 fl6.daddr = *dst;
4762 fl6.saddr = *src;
4763 fl6.fl6_sport = params->sport;
4764 fl6.fl6_dport = params->dport;
4765
4766 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4767 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4768 struct fib6_table *tb;
4769
4770 tb = ipv6_stub->fib6_get_table(net, tbid);
4771 if (unlikely(!tb))
4c79579b 4772 return BPF_FIB_LKUP_RET_NOT_FWDED;
87f5fc7e 4773
effda4dd
DA
4774 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
4775 strict);
87f5fc7e
DA
4776 } else {
4777 fl6.flowi6_mark = 0;
4778 fl6.flowi6_secid = 0;
4779 fl6.flowi6_tun_key.tun_id = 0;
4780 fl6.flowi6_uid = sock_net_uid(net, NULL);
4781
effda4dd 4782 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
87f5fc7e
DA
4783 }
4784
effda4dd 4785 if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
b1d40991 4786 res.f6i == net->ipv6.fib6_null_entry))
4c79579b
DA
4787 return BPF_FIB_LKUP_RET_NOT_FWDED;
4788
7d21fec9
DA
4789 switch (res.fib6_type) {
4790 /* only unicast is forwarded */
4791 case RTN_UNICAST:
4792 break;
4793 case RTN_BLACKHOLE:
4794 return BPF_FIB_LKUP_RET_BLACKHOLE;
4795 case RTN_UNREACHABLE:
4796 return BPF_FIB_LKUP_RET_UNREACHABLE;
4797 case RTN_PROHIBIT:
4798 return BPF_FIB_LKUP_RET_PROHIBIT;
4799 default:
4c79579b 4800 return BPF_FIB_LKUP_RET_NOT_FWDED;
7d21fec9 4801 }
87f5fc7e 4802
b1d40991
DA
4803 ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
4804 fl6.flowi6_oif != 0, NULL, strict);
87f5fc7e 4805
4f74fede 4806 if (check_mtu) {
b748f260 4807 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
4f74fede 4808 if (params->tot_len > mtu)
4c79579b 4809 return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4f74fede
DA
4810 }
4811
b1d40991 4812 if (res.nh->fib_nh_lws)
4c79579b 4813 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
87f5fc7e 4814
b1d40991
DA
4815 if (res.nh->fib_nh_gw_family)
4816 *dst = res.nh->fib_nh_gw6;
87f5fc7e 4817
b1d40991
DA
4818 dev = res.nh->fib_nh_dev;
4819 params->rt_metric = res.f6i->fib6_metric;
87f5fc7e
DA
4820
4821 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
71df5777 4822 * not needed here.
87f5fc7e 4823 */
71df5777 4824 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4c79579b
DA
4825 if (!neigh)
4826 return BPF_FIB_LKUP_RET_NO_NEIGH;
87f5fc7e 4827
4c79579b 4828 return bpf_fib_set_fwd_params(params, neigh, dev);
87f5fc7e
DA
4829}
4830#endif
4831
4832BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4833 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4834{
4835 if (plen < sizeof(*params))
4836 return -EINVAL;
4837
9ce64f19
DA
4838 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4839 return -EINVAL;
4840
87f5fc7e
DA
4841 switch (params->family) {
4842#if IS_ENABLED(CONFIG_INET)
4843 case AF_INET:
4844 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 4845 flags, true);
87f5fc7e
DA
4846#endif
4847#if IS_ENABLED(CONFIG_IPV6)
4848 case AF_INET6:
4849 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4f74fede 4850 flags, true);
87f5fc7e
DA
4851#endif
4852 }
bcece5dc 4853 return -EAFNOSUPPORT;
87f5fc7e
DA
4854}
4855
4856static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4857 .func = bpf_xdp_fib_lookup,
4858 .gpl_only = true,
4859 .ret_type = RET_INTEGER,
4860 .arg1_type = ARG_PTR_TO_CTX,
4861 .arg2_type = ARG_PTR_TO_MEM,
4862 .arg3_type = ARG_CONST_SIZE,
4863 .arg4_type = ARG_ANYTHING,
4864};
4865
4866BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4867 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4868{
4f74fede 4869 struct net *net = dev_net(skb->dev);
4c79579b 4870 int rc = -EAFNOSUPPORT;
4f74fede 4871
87f5fc7e
DA
4872 if (plen < sizeof(*params))
4873 return -EINVAL;
4874
9ce64f19
DA
4875 if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4876 return -EINVAL;
4877
87f5fc7e
DA
4878 switch (params->family) {
4879#if IS_ENABLED(CONFIG_INET)
4880 case AF_INET:
4c79579b 4881 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
4f74fede 4882 break;
87f5fc7e
DA
4883#endif
4884#if IS_ENABLED(CONFIG_IPV6)
4885 case AF_INET6:
4c79579b 4886 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
4f74fede 4887 break;
87f5fc7e
DA
4888#endif
4889 }
4f74fede 4890
4c79579b 4891 if (!rc) {
4f74fede
DA
4892 struct net_device *dev;
4893
4c79579b 4894 dev = dev_get_by_index_rcu(net, params->ifindex);
4f74fede 4895 if (!is_skb_forwardable(dev, skb))
4c79579b 4896 rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
4f74fede
DA
4897 }
4898
4c79579b 4899 return rc;
87f5fc7e
DA
4900}
4901
4902static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4903 .func = bpf_skb_fib_lookup,
4904 .gpl_only = true,
4905 .ret_type = RET_INTEGER,
4906 .arg1_type = ARG_PTR_TO_CTX,
4907 .arg2_type = ARG_PTR_TO_MEM,
4908 .arg3_type = ARG_CONST_SIZE,
4909 .arg4_type = ARG_ANYTHING,
4910};
4911
fe94cc29
MX
4912#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4913static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4914{
4915 int err;
4916 struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4917
4918 if (!seg6_validate_srh(srh, len))
4919 return -EINVAL;
4920
4921 switch (type) {
4922 case BPF_LWT_ENCAP_SEG6_INLINE:
4923 if (skb->protocol != htons(ETH_P_IPV6))
4924 return -EBADMSG;
4925
4926 err = seg6_do_srh_inline(skb, srh);
4927 break;
4928 case BPF_LWT_ENCAP_SEG6:
4929 skb_reset_inner_headers(skb);
4930 skb->encapsulation = 1;
4931 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4932 break;
4933 default:
4934 return -EINVAL;
4935 }
4936
4937 bpf_compute_data_pointers(skb);
4938 if (err)
4939 return err;
4940
4941 ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4942 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4943
4944 return seg6_lookup_nexthop(skb, NULL, 0);
4945}
4946#endif /* CONFIG_IPV6_SEG6_BPF */
4947
3e0bd37c
PO
4948#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4949static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
4950 bool ingress)
4951{
52f27877 4952 return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
3e0bd37c
PO
4953}
4954#endif
4955
4956BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
fe94cc29
MX
4957 u32, len)
4958{
4959 switch (type) {
4960#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4961 case BPF_LWT_ENCAP_SEG6:
4962 case BPF_LWT_ENCAP_SEG6_INLINE:
4963 return bpf_push_seg6_encap(skb, type, hdr, len);
3e0bd37c
PO
4964#endif
4965#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4966 case BPF_LWT_ENCAP_IP:
4967 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
fe94cc29
MX
4968#endif
4969 default:
4970 return -EINVAL;
4971 }
4972}
4973
3e0bd37c
PO
4974BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
4975 void *, hdr, u32, len)
4976{
4977 switch (type) {
4978#if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4979 case BPF_LWT_ENCAP_IP:
4980 return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
fe94cc29
MX
4981#endif
4982 default:
4983 return -EINVAL;
4984 }
4985}
4986
3e0bd37c
PO
4987static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
4988 .func = bpf_lwt_in_push_encap,
4989 .gpl_only = false,
4990 .ret_type = RET_INTEGER,
4991 .arg1_type = ARG_PTR_TO_CTX,
4992 .arg2_type = ARG_ANYTHING,
4993 .arg3_type = ARG_PTR_TO_MEM,
4994 .arg4_type = ARG_CONST_SIZE
4995};
4996
4997static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
4998 .func = bpf_lwt_xmit_push_encap,
fe94cc29
MX
4999 .gpl_only = false,
5000 .ret_type = RET_INTEGER,
5001 .arg1_type = ARG_PTR_TO_CTX,
5002 .arg2_type = ARG_ANYTHING,
5003 .arg3_type = ARG_PTR_TO_MEM,
5004 .arg4_type = ARG_CONST_SIZE
5005};
5006
61d76980 5007#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
fe94cc29
MX
5008BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5009 const void *, from, u32, len)
5010{
fe94cc29
MX
5011 struct seg6_bpf_srh_state *srh_state =
5012 this_cpu_ptr(&seg6_bpf_srh_states);
486cdf21 5013 struct ipv6_sr_hdr *srh = srh_state->srh;
fe94cc29 5014 void *srh_tlvs, *srh_end, *ptr;
fe94cc29
MX
5015 int srhoff = 0;
5016
486cdf21 5017 if (srh == NULL)
fe94cc29
MX
5018 return -EINVAL;
5019
fe94cc29
MX
5020 srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5021 srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5022
5023 ptr = skb->data + offset;
5024 if (ptr >= srh_tlvs && ptr + len <= srh_end)
486cdf21 5025 srh_state->valid = false;
fe94cc29
MX
5026 else if (ptr < (void *)&srh->flags ||
5027 ptr + len > (void *)&srh->segments)
5028 return -EFAULT;
5029
5030 if (unlikely(bpf_try_make_writable(skb, offset + len)))
5031 return -EFAULT;
486cdf21
MX
5032 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5033 return -EINVAL;
5034 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
fe94cc29
MX
5035
5036 memcpy(skb->data + offset, from, len);
5037 return 0;
fe94cc29
MX
5038}
5039
5040static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5041 .func = bpf_lwt_seg6_store_bytes,
5042 .gpl_only = false,
5043 .ret_type = RET_INTEGER,
5044 .arg1_type = ARG_PTR_TO_CTX,
5045 .arg2_type = ARG_ANYTHING,
5046 .arg3_type = ARG_PTR_TO_MEM,
5047 .arg4_type = ARG_CONST_SIZE
5048};
5049
486cdf21 5050static void bpf_update_srh_state(struct sk_buff *skb)
fe94cc29 5051{
fe94cc29
MX
5052 struct seg6_bpf_srh_state *srh_state =
5053 this_cpu_ptr(&seg6_bpf_srh_states);
fe94cc29 5054 int srhoff = 0;
fe94cc29 5055
486cdf21
MX
5056 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5057 srh_state->srh = NULL;
5058 } else {
5059 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5060 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5061 srh_state->valid = true;
fe94cc29 5062 }
486cdf21
MX
5063}
5064
5065BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5066 u32, action, void *, param, u32, param_len)
5067{
5068 struct seg6_bpf_srh_state *srh_state =
5069 this_cpu_ptr(&seg6_bpf_srh_states);
5070 int hdroff = 0;
5071 int err;
fe94cc29
MX
5072
5073 switch (action) {
5074 case SEG6_LOCAL_ACTION_END_X:
486cdf21
MX
5075 if (!seg6_bpf_has_valid_srh(skb))
5076 return -EBADMSG;
fe94cc29
MX
5077 if (param_len != sizeof(struct in6_addr))
5078 return -EINVAL;
5079 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5080 case SEG6_LOCAL_ACTION_END_T:
486cdf21
MX
5081 if (!seg6_bpf_has_valid_srh(skb))
5082 return -EBADMSG;
fe94cc29
MX
5083 if (param_len != sizeof(int))
5084 return -EINVAL;
5085 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
486cdf21
MX
5086 case SEG6_LOCAL_ACTION_END_DT6:
5087 if (!seg6_bpf_has_valid_srh(skb))
5088 return -EBADMSG;
fe94cc29
MX
5089 if (param_len != sizeof(int))
5090 return -EINVAL;
486cdf21
MX
5091
5092 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
5093 return -EBADMSG;
5094 if (!pskb_pull(skb, hdroff))
5095 return -EBADMSG;
5096
5097 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
5098 skb_reset_network_header(skb);
5099 skb_reset_transport_header(skb);
5100 skb->encapsulation = 0;
5101
5102 bpf_compute_data_pointers(skb);
5103 bpf_update_srh_state(skb);
fe94cc29
MX
5104 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5105 case SEG6_LOCAL_ACTION_END_B6:
486cdf21
MX
5106 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5107 return -EBADMSG;
fe94cc29
MX
5108 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
5109 param, param_len);
5110 if (!err)
486cdf21
MX
5111 bpf_update_srh_state(skb);
5112
fe94cc29
MX
5113 return err;
5114 case SEG6_LOCAL_ACTION_END_B6_ENCAP:
486cdf21
MX
5115 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5116 return -EBADMSG;
fe94cc29
MX
5117 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
5118 param, param_len);
5119 if (!err)
486cdf21
MX
5120 bpf_update_srh_state(skb);
5121
fe94cc29
MX
5122 return err;
5123 default:
5124 return -EINVAL;
5125 }
fe94cc29
MX
5126}
5127
5128static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
5129 .func = bpf_lwt_seg6_action,
5130 .gpl_only = false,
5131 .ret_type = RET_INTEGER,
5132 .arg1_type = ARG_PTR_TO_CTX,
5133 .arg2_type = ARG_ANYTHING,
5134 .arg3_type = ARG_PTR_TO_MEM,
5135 .arg4_type = ARG_CONST_SIZE
5136};
5137
5138BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5139 s32, len)
5140{
fe94cc29
MX
5141 struct seg6_bpf_srh_state *srh_state =
5142 this_cpu_ptr(&seg6_bpf_srh_states);
486cdf21 5143 struct ipv6_sr_hdr *srh = srh_state->srh;
fe94cc29 5144 void *srh_end, *srh_tlvs, *ptr;
fe94cc29
MX
5145 struct ipv6hdr *hdr;
5146 int srhoff = 0;
5147 int ret;
5148
486cdf21 5149 if (unlikely(srh == NULL))
fe94cc29 5150 return -EINVAL;
fe94cc29
MX
5151
5152 srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5153 ((srh->first_segment + 1) << 4));
5154 srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5155 srh_state->hdrlen);
5156 ptr = skb->data + offset;
5157
5158 if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5159 return -EFAULT;
5160 if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5161 return -EFAULT;
5162
5163 if (len > 0) {
5164 ret = skb_cow_head(skb, len);
5165 if (unlikely(ret < 0))
5166 return ret;
5167
5168 ret = bpf_skb_net_hdr_push(skb, offset, len);
5169 } else {
5170 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5171 }
5172
5173 bpf_compute_data_pointers(skb);
5174 if (unlikely(ret < 0))
5175 return ret;
5176
5177 hdr = (struct ipv6hdr *)skb->data;
5178 hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5179
486cdf21
MX
5180 if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5181 return -EINVAL;
5182 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
fe94cc29 5183 srh_state->hdrlen += len;
486cdf21 5184 srh_state->valid = false;
fe94cc29 5185 return 0;
fe94cc29
MX
5186}
5187
5188static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5189 .func = bpf_lwt_seg6_adjust_srh,
5190 .gpl_only = false,
5191 .ret_type = RET_INTEGER,
5192 .arg1_type = ARG_PTR_TO_CTX,
5193 .arg2_type = ARG_ANYTHING,
5194 .arg3_type = ARG_ANYTHING,
5195};
61d76980 5196#endif /* CONFIG_IPV6_SEG6_BPF */
fe94cc29 5197
9b1f3d6e
MKL
5198#define CONVERT_COMMON_TCP_SOCK_FIELDS(md_type, CONVERT) \
5199do { \
5200 switch (si->off) { \
5201 case offsetof(md_type, snd_cwnd): \
5202 CONVERT(snd_cwnd); break; \
5203 case offsetof(md_type, srtt_us): \
5204 CONVERT(srtt_us); break; \
5205 case offsetof(md_type, snd_ssthresh): \
5206 CONVERT(snd_ssthresh); break; \
5207 case offsetof(md_type, rcv_nxt): \
5208 CONVERT(rcv_nxt); break; \
5209 case offsetof(md_type, snd_nxt): \
5210 CONVERT(snd_nxt); break; \
5211 case offsetof(md_type, snd_una): \
5212 CONVERT(snd_una); break; \
5213 case offsetof(md_type, mss_cache): \
5214 CONVERT(mss_cache); break; \
5215 case offsetof(md_type, ecn_flags): \
5216 CONVERT(ecn_flags); break; \
5217 case offsetof(md_type, rate_delivered): \
5218 CONVERT(rate_delivered); break; \
5219 case offsetof(md_type, rate_interval_us): \
5220 CONVERT(rate_interval_us); break; \
5221 case offsetof(md_type, packets_out): \
5222 CONVERT(packets_out); break; \
5223 case offsetof(md_type, retrans_out): \
5224 CONVERT(retrans_out); break; \
5225 case offsetof(md_type, total_retrans): \
5226 CONVERT(total_retrans); break; \
5227 case offsetof(md_type, segs_in): \
5228 CONVERT(segs_in); break; \
5229 case offsetof(md_type, data_segs_in): \
5230 CONVERT(data_segs_in); break; \
5231 case offsetof(md_type, segs_out): \
5232 CONVERT(segs_out); break; \
5233 case offsetof(md_type, data_segs_out): \
5234 CONVERT(data_segs_out); break; \
5235 case offsetof(md_type, lost_out): \
5236 CONVERT(lost_out); break; \
5237 case offsetof(md_type, sacked_out): \
5238 CONVERT(sacked_out); break; \
5239 case offsetof(md_type, bytes_received): \
5240 CONVERT(bytes_received); break; \
5241 case offsetof(md_type, bytes_acked): \
5242 CONVERT(bytes_acked); break; \
5243 } \
5244} while (0)
5245
df3f94a0
AB
5246#ifdef CONFIG_INET
5247static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
c8123ead 5248 int dif, int sdif, u8 family, u8 proto)
6acc9b43 5249{
6acc9b43
JS
5250 bool refcounted = false;
5251 struct sock *sk = NULL;
5252
5253 if (family == AF_INET) {
5254 __be32 src4 = tuple->ipv4.saddr;
5255 __be32 dst4 = tuple->ipv4.daddr;
6acc9b43
JS
5256
5257 if (proto == IPPROTO_TCP)
c8123ead 5258 sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
6acc9b43
JS
5259 src4, tuple->ipv4.sport,
5260 dst4, tuple->ipv4.dport,
5261 dif, sdif, &refcounted);
5262 else
5263 sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5264 dst4, tuple->ipv4.dport,
c8123ead 5265 dif, sdif, &udp_table, NULL);
8a615c6b 5266#if IS_ENABLED(CONFIG_IPV6)
6acc9b43
JS
5267 } else {
5268 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5269 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
6acc9b43
JS
5270
5271 if (proto == IPPROTO_TCP)
c8123ead 5272 sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
6acc9b43 5273 src6, tuple->ipv6.sport,
cac6cc2f 5274 dst6, ntohs(tuple->ipv6.dport),
6acc9b43 5275 dif, sdif, &refcounted);
8a615c6b
JS
5276 else if (likely(ipv6_bpf_stub))
5277 sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5278 src6, tuple->ipv6.sport,
cac6cc2f 5279 dst6, tuple->ipv6.dport,
8a615c6b 5280 dif, sdif,
c8123ead 5281 &udp_table, NULL);
6acc9b43
JS
5282#endif
5283 }
5284
5285 if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5286 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5287 sk = NULL;
5288 }
5289 return sk;
5290}
5291
edbf8c01 5292/* bpf_skc_lookup performs the core lookup for different types of sockets,
6acc9b43
JS
5293 * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
5294 * Returns the socket as an 'unsigned long' to simplify the casting in the
5295 * callers to satisfy BPF_CALL declarations.
5296 */
edbf8c01
LB
5297static struct sock *
5298__bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5299 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5300 u64 flags)
6acc9b43 5301{
6acc9b43
JS
5302 struct sock *sk = NULL;
5303 u8 family = AF_UNSPEC;
5304 struct net *net;
c8123ead 5305 int sdif;
6acc9b43
JS
5306
5307 family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6;
f71c6143
JS
5308 if (unlikely(family == AF_UNSPEC || flags ||
5309 !((s32)netns_id < 0 || netns_id <= S32_MAX)))
6acc9b43
JS
5310 goto out;
5311
c8123ead
NH
5312 if (family == AF_INET)
5313 sdif = inet_sdif(skb);
6acc9b43 5314 else
c8123ead
NH
5315 sdif = inet6_sdif(skb);
5316
f71c6143
JS
5317 if ((s32)netns_id < 0) {
5318 net = caller_net;
4cc1feeb 5319 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
f71c6143 5320 } else {
6acc9b43
JS
5321 net = get_net_ns_by_id(caller_net, netns_id);
5322 if (unlikely(!net))
5323 goto out;
c8123ead 5324 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
6acc9b43 5325 put_net(net);
6acc9b43
JS
5326 }
5327
edbf8c01
LB
5328out:
5329 return sk;
5330}
5331
5332static struct sock *
5333__bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5334 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5335 u64 flags)
5336{
5337 struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
5338 ifindex, proto, netns_id, flags);
5339
6acc9b43
JS
5340 if (sk)
5341 sk = sk_to_full_sk(sk);
edbf8c01
LB
5342
5343 return sk;
6acc9b43
JS
5344}
5345
edbf8c01
LB
5346static struct sock *
5347bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5348 u8 proto, u64 netns_id, u64 flags)
c8123ead
NH
5349{
5350 struct net *caller_net;
5351 int ifindex;
5352
5353 if (skb->dev) {
5354 caller_net = dev_net(skb->dev);
5355 ifindex = skb->dev->ifindex;
5356 } else {
5357 caller_net = sock_net(skb->sk);
5358 ifindex = 0;
5359 }
5360
edbf8c01
LB
5361 return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
5362 netns_id, flags);
c8123ead
NH
5363}
5364
edbf8c01
LB
5365static struct sock *
5366bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5367 u8 proto, u64 netns_id, u64 flags)
5368{
5369 struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
5370 flags);
5371
5372 if (sk)
5373 sk = sk_to_full_sk(sk);
5374
5375 return sk;
5376}
5377
5378BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
5379 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5380{
5381 return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
5382 netns_id, flags);
5383}
5384
5385static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
5386 .func = bpf_skc_lookup_tcp,
5387 .gpl_only = false,
5388 .pkt_access = true,
5389 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5390 .arg1_type = ARG_PTR_TO_CTX,
5391 .arg2_type = ARG_PTR_TO_MEM,
5392 .arg3_type = ARG_CONST_SIZE,
5393 .arg4_type = ARG_ANYTHING,
5394 .arg5_type = ARG_ANYTHING,
5395};
5396
6acc9b43
JS
5397BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
5398 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5399{
edbf8c01
LB
5400 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
5401 netns_id, flags);
6acc9b43
JS
5402}
5403
5404static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
5405 .func = bpf_sk_lookup_tcp,
5406 .gpl_only = false,
5407 .pkt_access = true,
5408 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5409 .arg1_type = ARG_PTR_TO_CTX,
5410 .arg2_type = ARG_PTR_TO_MEM,
5411 .arg3_type = ARG_CONST_SIZE,
5412 .arg4_type = ARG_ANYTHING,
5413 .arg5_type = ARG_ANYTHING,
5414};
5415
5416BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
5417 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5418{
edbf8c01
LB
5419 return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
5420 netns_id, flags);
6acc9b43
JS
5421}
5422
5423static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
5424 .func = bpf_sk_lookup_udp,
5425 .gpl_only = false,
5426 .pkt_access = true,
5427 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5428 .arg1_type = ARG_PTR_TO_CTX,
5429 .arg2_type = ARG_PTR_TO_MEM,
5430 .arg3_type = ARG_CONST_SIZE,
5431 .arg4_type = ARG_ANYTHING,
5432 .arg5_type = ARG_ANYTHING,
5433};
5434
5435BPF_CALL_1(bpf_sk_release, struct sock *, sk)
5436{
5437 if (!sock_flag(sk, SOCK_RCU_FREE))
5438 sock_gen_put(sk);
5439 return 0;
5440}
5441
5442static const struct bpf_func_proto bpf_sk_release_proto = {
5443 .func = bpf_sk_release,
5444 .gpl_only = false,
5445 .ret_type = RET_INTEGER,
1b986589 5446 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
6acc9b43 5447};
c8123ead
NH
5448
5449BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
5450 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5451{
5452 struct net *caller_net = dev_net(ctx->rxq->dev);
5453 int ifindex = ctx->rxq->dev->ifindex;
5454
edbf8c01
LB
5455 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5456 ifindex, IPPROTO_UDP, netns_id,
5457 flags);
c8123ead
NH
5458}
5459
5460static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
5461 .func = bpf_xdp_sk_lookup_udp,
5462 .gpl_only = false,
5463 .pkt_access = true,
5464 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5465 .arg1_type = ARG_PTR_TO_CTX,
5466 .arg2_type = ARG_PTR_TO_MEM,
5467 .arg3_type = ARG_CONST_SIZE,
5468 .arg4_type = ARG_ANYTHING,
5469 .arg5_type = ARG_ANYTHING,
5470};
5471
edbf8c01
LB
5472BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
5473 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5474{
5475 struct net *caller_net = dev_net(ctx->rxq->dev);
5476 int ifindex = ctx->rxq->dev->ifindex;
5477
5478 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
5479 ifindex, IPPROTO_TCP, netns_id,
5480 flags);
5481}
5482
5483static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
5484 .func = bpf_xdp_skc_lookup_tcp,
5485 .gpl_only = false,
5486 .pkt_access = true,
5487 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5488 .arg1_type = ARG_PTR_TO_CTX,
5489 .arg2_type = ARG_PTR_TO_MEM,
5490 .arg3_type = ARG_CONST_SIZE,
5491 .arg4_type = ARG_ANYTHING,
5492 .arg5_type = ARG_ANYTHING,
5493};
5494
c8123ead
NH
5495BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
5496 struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5497{
5498 struct net *caller_net = dev_net(ctx->rxq->dev);
5499 int ifindex = ctx->rxq->dev->ifindex;
5500
edbf8c01
LB
5501 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5502 ifindex, IPPROTO_TCP, netns_id,
5503 flags);
c8123ead
NH
5504}
5505
5506static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
5507 .func = bpf_xdp_sk_lookup_tcp,
5508 .gpl_only = false,
5509 .pkt_access = true,
5510 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5511 .arg1_type = ARG_PTR_TO_CTX,
5512 .arg2_type = ARG_PTR_TO_MEM,
5513 .arg3_type = ARG_CONST_SIZE,
5514 .arg4_type = ARG_ANYTHING,
5515 .arg5_type = ARG_ANYTHING,
5516};
6c49e65e 5517
edbf8c01
LB
5518BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5519 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5520{
5521 return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
5522 sock_net(ctx->sk), 0,
5523 IPPROTO_TCP, netns_id, flags);
5524}
5525
5526static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
5527 .func = bpf_sock_addr_skc_lookup_tcp,
5528 .gpl_only = false,
5529 .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5530 .arg1_type = ARG_PTR_TO_CTX,
5531 .arg2_type = ARG_PTR_TO_MEM,
5532 .arg3_type = ARG_CONST_SIZE,
5533 .arg4_type = ARG_ANYTHING,
5534 .arg5_type = ARG_ANYTHING,
5535};
5536
6c49e65e
AI
5537BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5538 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5539{
edbf8c01
LB
5540 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5541 sock_net(ctx->sk), 0, IPPROTO_TCP,
5542 netns_id, flags);
6c49e65e
AI
5543}
5544
5545static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
5546 .func = bpf_sock_addr_sk_lookup_tcp,
5547 .gpl_only = false,
5548 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5549 .arg1_type = ARG_PTR_TO_CTX,
5550 .arg2_type = ARG_PTR_TO_MEM,
5551 .arg3_type = ARG_CONST_SIZE,
5552 .arg4_type = ARG_ANYTHING,
5553 .arg5_type = ARG_ANYTHING,
5554};
5555
5556BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
5557 struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5558{
edbf8c01
LB
5559 return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5560 sock_net(ctx->sk), 0, IPPROTO_UDP,
5561 netns_id, flags);
6c49e65e
AI
5562}
5563
5564static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
5565 .func = bpf_sock_addr_sk_lookup_udp,
5566 .gpl_only = false,
5567 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5568 .arg1_type = ARG_PTR_TO_CTX,
5569 .arg2_type = ARG_PTR_TO_MEM,
5570 .arg3_type = ARG_CONST_SIZE,
5571 .arg4_type = ARG_ANYTHING,
5572 .arg5_type = ARG_ANYTHING,
5573};
5574
655a51e5
MKL
5575bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5576 struct bpf_insn_access_aux *info)
5577{
5578 if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked))
5579 return false;
5580
5581 if (off % size != 0)
5582 return false;
5583
5584 switch (off) {
5585 case offsetof(struct bpf_tcp_sock, bytes_received):
5586 case offsetof(struct bpf_tcp_sock, bytes_acked):
5587 return size == sizeof(__u64);
5588 default:
5589 return size == sizeof(__u32);
5590 }
5591}
5592
5593u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
5594 const struct bpf_insn *si,
5595 struct bpf_insn *insn_buf,
5596 struct bpf_prog *prog, u32 *target_size)
5597{
5598 struct bpf_insn *insn = insn_buf;
5599
5600#define BPF_TCP_SOCK_GET_COMMON(FIELD) \
5601 do { \
5602 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \
5603 FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
5604 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
5605 si->dst_reg, si->src_reg, \
5606 offsetof(struct tcp_sock, FIELD)); \
5607 } while (0)
5608
5609 CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_tcp_sock,
5610 BPF_TCP_SOCK_GET_COMMON);
5611
5612 if (insn > insn_buf)
5613 return insn - insn_buf;
5614
5615 switch (si->off) {
5616 case offsetof(struct bpf_tcp_sock, rtt_min):
5617 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5618 sizeof(struct minmax));
5619 BUILD_BUG_ON(sizeof(struct minmax) <
5620 sizeof(struct minmax_sample));
5621
5622 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5623 offsetof(struct tcp_sock, rtt_min) +
5624 offsetof(struct minmax_sample, v));
5625 break;
5626 }
5627
5628 return insn - insn_buf;
5629}
5630
5631BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
5632{
655a51e5
MKL
5633 if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
5634 return (unsigned long)sk;
5635
5636 return (unsigned long)NULL;
5637}
5638
5639static const struct bpf_func_proto bpf_tcp_sock_proto = {
5640 .func = bpf_tcp_sock,
5641 .gpl_only = false,
5642 .ret_type = RET_PTR_TO_TCP_SOCK_OR_NULL,
5643 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5644};
5645
dbafd7dd
MKL
5646BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
5647{
5648 sk = sk_to_full_sk(sk);
5649
5650 if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
5651 return (unsigned long)sk;
5652
5653 return (unsigned long)NULL;
5654}
5655
5656static const struct bpf_func_proto bpf_get_listener_sock_proto = {
5657 .func = bpf_get_listener_sock,
5658 .gpl_only = false,
5659 .ret_type = RET_PTR_TO_SOCKET_OR_NULL,
5660 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5661};
5662
f7c917ba 5663BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
5664{
5665 unsigned int iphdr_len;
5666
5667 if (skb->protocol == cpu_to_be16(ETH_P_IP))
5668 iphdr_len = sizeof(struct iphdr);
5669 else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
5670 iphdr_len = sizeof(struct ipv6hdr);
5671 else
5672 return 0;
5673
5674 if (skb_headlen(skb) < iphdr_len)
5675 return 0;
5676
5677 if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
5678 return 0;
5679
5680 return INET_ECN_set_ce(skb);
5681}
5682
fada7fdc
JL
5683bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5684 struct bpf_insn_access_aux *info)
5685{
5686 if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
5687 return false;
5688
5689 if (off % size != 0)
5690 return false;
5691
5692 switch (off) {
5693 default:
5694 return size == sizeof(__u32);
5695 }
5696}
5697
5698u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
5699 const struct bpf_insn *si,
5700 struct bpf_insn *insn_buf,
5701 struct bpf_prog *prog, u32 *target_size)
5702{
5703 struct bpf_insn *insn = insn_buf;
5704
5705#define BPF_XDP_SOCK_GET(FIELD) \
5706 do { \
5707 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \
5708 FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
5709 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
5710 si->dst_reg, si->src_reg, \
5711 offsetof(struct xdp_sock, FIELD)); \
5712 } while (0)
5713
5714 switch (si->off) {
5715 case offsetof(struct bpf_xdp_sock, queue_id):
5716 BPF_XDP_SOCK_GET(queue_id);
5717 break;
5718 }
5719
5720 return insn - insn_buf;
5721}
5722
f7c917ba 5723static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
5724 .func = bpf_skb_ecn_set_ce,
5725 .gpl_only = false,
5726 .ret_type = RET_INTEGER,
5727 .arg1_type = ARG_PTR_TO_CTX,
5728};
39904084
LB
5729
5730BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5731 struct tcphdr *, th, u32, th_len)
5732{
5733#ifdef CONFIG_SYN_COOKIES
5734 u32 cookie;
5735 int ret;
5736
5737 if (unlikely(th_len < sizeof(*th)))
5738 return -EINVAL;
5739
5740 /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
5741 if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5742 return -EINVAL;
5743
5744 if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5745 return -EINVAL;
5746
5747 if (!th->ack || th->rst || th->syn)
5748 return -ENOENT;
5749
5750 if (tcp_synq_no_recent_overflow(sk))
5751 return -ENOENT;
5752
5753 cookie = ntohl(th->ack_seq) - 1;
5754
5755 switch (sk->sk_family) {
5756 case AF_INET:
5757 if (unlikely(iph_len < sizeof(struct iphdr)))
5758 return -EINVAL;
5759
5760 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
5761 break;
5762
5763#if IS_BUILTIN(CONFIG_IPV6)
5764 case AF_INET6:
5765 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5766 return -EINVAL;
5767
5768 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
5769 break;
5770#endif /* CONFIG_IPV6 */
5771
5772 default:
5773 return -EPROTONOSUPPORT;
5774 }
5775
5776 if (ret > 0)
5777 return 0;
5778
5779 return -ENOENT;
5780#else
5781 return -ENOTSUPP;
5782#endif
5783}
5784
5785static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
5786 .func = bpf_tcp_check_syncookie,
5787 .gpl_only = true,
5788 .pkt_access = true,
5789 .ret_type = RET_INTEGER,
5790 .arg1_type = ARG_PTR_TO_SOCK_COMMON,
5791 .arg2_type = ARG_PTR_TO_MEM,
5792 .arg3_type = ARG_CONST_SIZE,
5793 .arg4_type = ARG_PTR_TO_MEM,
5794 .arg5_type = ARG_CONST_SIZE,
5795};
5796
df3f94a0 5797#endif /* CONFIG_INET */
6acc9b43 5798
fe94cc29
MX
5799bool bpf_helper_changes_pkt_data(void *func)
5800{
5801 if (func == bpf_skb_vlan_push ||
5802 func == bpf_skb_vlan_pop ||
5803 func == bpf_skb_store_bytes ||
5804 func == bpf_skb_change_proto ||
5805 func == bpf_skb_change_head ||
0ea488ff 5806 func == sk_skb_change_head ||
fe94cc29 5807 func == bpf_skb_change_tail ||
0ea488ff 5808 func == sk_skb_change_tail ||
fe94cc29
MX
5809 func == bpf_skb_adjust_room ||
5810 func == bpf_skb_pull_data ||
0ea488ff 5811 func == sk_skb_pull_data ||
fe94cc29
MX
5812 func == bpf_clone_redirect ||
5813 func == bpf_l3_csum_replace ||
5814 func == bpf_l4_csum_replace ||
5815 func == bpf_xdp_adjust_head ||
5816 func == bpf_xdp_adjust_meta ||
5817 func == bpf_msg_pull_data ||
6fff607e 5818 func == bpf_msg_push_data ||
7246d8ed 5819 func == bpf_msg_pop_data ||
fe94cc29 5820 func == bpf_xdp_adjust_tail ||
61d76980 5821#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
fe94cc29
MX
5822 func == bpf_lwt_seg6_store_bytes ||
5823 func == bpf_lwt_seg6_adjust_srh ||
61d76980
MX
5824 func == bpf_lwt_seg6_action ||
5825#endif
3e0bd37c
PO
5826 func == bpf_lwt_in_push_encap ||
5827 func == bpf_lwt_xmit_push_encap)
fe94cc29
MX
5828 return true;
5829
5830 return false;
5831}
5832
d4052c4a 5833static const struct bpf_func_proto *
2492d3b8 5834bpf_base_func_proto(enum bpf_func_id func_id)
89aa0758
AS
5835{
5836 switch (func_id) {
5837 case BPF_FUNC_map_lookup_elem:
5838 return &bpf_map_lookup_elem_proto;
5839 case BPF_FUNC_map_update_elem:
5840 return &bpf_map_update_elem_proto;
5841 case BPF_FUNC_map_delete_elem:
5842 return &bpf_map_delete_elem_proto;
f1a2e44a
MV
5843 case BPF_FUNC_map_push_elem:
5844 return &bpf_map_push_elem_proto;
5845 case BPF_FUNC_map_pop_elem:
5846 return &bpf_map_pop_elem_proto;
5847 case BPF_FUNC_map_peek_elem:
5848 return &bpf_map_peek_elem_proto;
03e69b50
DB
5849 case BPF_FUNC_get_prandom_u32:
5850 return &bpf_get_prandom_u32_proto;
c04167ce 5851 case BPF_FUNC_get_smp_processor_id:
80b48c44 5852 return &bpf_get_raw_smp_processor_id_proto;
2d0e30c3
DB
5853 case BPF_FUNC_get_numa_node_id:
5854 return &bpf_get_numa_node_id_proto;
04fd61ab
AS
5855 case BPF_FUNC_tail_call:
5856 return &bpf_tail_call_proto;
17ca8cbf
DB
5857 case BPF_FUNC_ktime_get_ns:
5858 return &bpf_ktime_get_ns_proto;
d83525ca
AS
5859 default:
5860 break;
5861 }
5862
5863 if (!capable(CAP_SYS_ADMIN))
5864 return NULL;
5865
5866 switch (func_id) {
5867 case BPF_FUNC_spin_lock:
5868 return &bpf_spin_lock_proto;
5869 case BPF_FUNC_spin_unlock:
5870 return &bpf_spin_unlock_proto;
0756ea3e 5871 case BPF_FUNC_trace_printk:
d83525ca 5872 return bpf_get_trace_printk_proto();
89aa0758
AS
5873 default:
5874 return NULL;
5875 }
5876}
5877
ae2cf1c4 5878static const struct bpf_func_proto *
5e43f899 5879sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
ae2cf1c4
DA
5880{
5881 switch (func_id) {
5882 /* inet and inet6 sockets are created in a process
5883 * context so there is always a valid uid/gid
5884 */
5885 case BPF_FUNC_get_current_uid_gid:
5886 return &bpf_get_current_uid_gid_proto;
cd339431
RG
5887 case BPF_FUNC_get_local_storage:
5888 return &bpf_get_local_storage_proto;
ae2cf1c4
DA
5889 default:
5890 return bpf_base_func_proto(func_id);
5891 }
5892}
5893
4fbac77d
AI
5894static const struct bpf_func_proto *
5895sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5896{
5897 switch (func_id) {
5898 /* inet and inet6 sockets are created in a process
5899 * context so there is always a valid uid/gid
5900 */
5901 case BPF_FUNC_get_current_uid_gid:
5902 return &bpf_get_current_uid_gid_proto;
d74bad4e
AI
5903 case BPF_FUNC_bind:
5904 switch (prog->expected_attach_type) {
5905 case BPF_CGROUP_INET4_CONNECT:
5906 case BPF_CGROUP_INET6_CONNECT:
5907 return &bpf_bind_proto;
5908 default:
5909 return NULL;
5910 }
d692f113
AI
5911 case BPF_FUNC_get_socket_cookie:
5912 return &bpf_get_socket_cookie_sock_addr_proto;
cd339431
RG
5913 case BPF_FUNC_get_local_storage:
5914 return &bpf_get_local_storage_proto;
6c49e65e
AI
5915#ifdef CONFIG_INET
5916 case BPF_FUNC_sk_lookup_tcp:
5917 return &bpf_sock_addr_sk_lookup_tcp_proto;
5918 case BPF_FUNC_sk_lookup_udp:
5919 return &bpf_sock_addr_sk_lookup_udp_proto;
5920 case BPF_FUNC_sk_release:
5921 return &bpf_sk_release_proto;
edbf8c01
LB
5922 case BPF_FUNC_skc_lookup_tcp:
5923 return &bpf_sock_addr_skc_lookup_tcp_proto;
6c49e65e 5924#endif /* CONFIG_INET */
fb85c4a7
SF
5925 case BPF_FUNC_sk_storage_get:
5926 return &bpf_sk_storage_get_proto;
5927 case BPF_FUNC_sk_storage_delete:
5928 return &bpf_sk_storage_delete_proto;
4fbac77d
AI
5929 default:
5930 return bpf_base_func_proto(func_id);
5931 }
5932}
5933
2492d3b8 5934static const struct bpf_func_proto *
5e43f899 5935sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2492d3b8
DB
5936{
5937 switch (func_id) {
5938 case BPF_FUNC_skb_load_bytes:
5939 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
5940 case BPF_FUNC_skb_load_bytes_relative:
5941 return &bpf_skb_load_bytes_relative_proto;
91b8270f
CF
5942 case BPF_FUNC_get_socket_cookie:
5943 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
5944 case BPF_FUNC_get_socket_uid:
5945 return &bpf_get_socket_uid_proto;
2492d3b8
DB
5946 default:
5947 return bpf_base_func_proto(func_id);
5948 }
5949}
5950
6ac99e8f
MKL
5951const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
5952const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
5953
cd339431
RG
5954static const struct bpf_func_proto *
5955cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
5956{
5957 switch (func_id) {
5958 case BPF_FUNC_get_local_storage:
5959 return &bpf_get_local_storage_proto;
46f8bc92
MKL
5960 case BPF_FUNC_sk_fullsock:
5961 return &bpf_sk_fullsock_proto;
6ac99e8f
MKL
5962 case BPF_FUNC_sk_storage_get:
5963 return &bpf_sk_storage_get_proto;
5964 case BPF_FUNC_sk_storage_delete:
5965 return &bpf_sk_storage_delete_proto;
4ecabd55
RG
5966#ifdef CONFIG_SOCK_CGROUP_DATA
5967 case BPF_FUNC_skb_cgroup_id:
5968 return &bpf_skb_cgroup_id_proto;
5969#endif
655a51e5
MKL
5970#ifdef CONFIG_INET
5971 case BPF_FUNC_tcp_sock:
5972 return &bpf_tcp_sock_proto;
dbafd7dd
MKL
5973 case BPF_FUNC_get_listener_sock:
5974 return &bpf_get_listener_sock_proto;
f7c917ba 5975 case BPF_FUNC_skb_ecn_set_ce:
5976 return &bpf_skb_ecn_set_ce_proto;
655a51e5 5977#endif
cd339431
RG
5978 default:
5979 return sk_filter_func_proto(func_id, prog);
5980 }
5981}
5982
608cd71a 5983static const struct bpf_func_proto *
5e43f899 5984tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
608cd71a
AS
5985{
5986 switch (func_id) {
5987 case BPF_FUNC_skb_store_bytes:
5988 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
5989 case BPF_FUNC_skb_load_bytes:
5990 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
5991 case BPF_FUNC_skb_load_bytes_relative:
5992 return &bpf_skb_load_bytes_relative_proto;
36bbef52
DB
5993 case BPF_FUNC_skb_pull_data:
5994 return &bpf_skb_pull_data_proto;
7d672345
DB
5995 case BPF_FUNC_csum_diff:
5996 return &bpf_csum_diff_proto;
36bbef52
DB
5997 case BPF_FUNC_csum_update:
5998 return &bpf_csum_update_proto;
91bc4822
AS
5999 case BPF_FUNC_l3_csum_replace:
6000 return &bpf_l3_csum_replace_proto;
6001 case BPF_FUNC_l4_csum_replace:
6002 return &bpf_l4_csum_replace_proto;
3896d655
AS
6003 case BPF_FUNC_clone_redirect:
6004 return &bpf_clone_redirect_proto;
8d20aabe
DB
6005 case BPF_FUNC_get_cgroup_classid:
6006 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
6007 case BPF_FUNC_skb_vlan_push:
6008 return &bpf_skb_vlan_push_proto;
6009 case BPF_FUNC_skb_vlan_pop:
6010 return &bpf_skb_vlan_pop_proto;
6578171a
DB
6011 case BPF_FUNC_skb_change_proto:
6012 return &bpf_skb_change_proto_proto;
d2485c42
DB
6013 case BPF_FUNC_skb_change_type:
6014 return &bpf_skb_change_type_proto;
2be7e212
DB
6015 case BPF_FUNC_skb_adjust_room:
6016 return &bpf_skb_adjust_room_proto;
5293efe6
DB
6017 case BPF_FUNC_skb_change_tail:
6018 return &bpf_skb_change_tail_proto;
d3aa45ce
AS
6019 case BPF_FUNC_skb_get_tunnel_key:
6020 return &bpf_skb_get_tunnel_key_proto;
6021 case BPF_FUNC_skb_set_tunnel_key:
14ca0751
DB
6022 return bpf_get_skb_set_tunnel_proto(func_id);
6023 case BPF_FUNC_skb_get_tunnel_opt:
6024 return &bpf_skb_get_tunnel_opt_proto;
6025 case BPF_FUNC_skb_set_tunnel_opt:
6026 return bpf_get_skb_set_tunnel_proto(func_id);
27b29f63
AS
6027 case BPF_FUNC_redirect:
6028 return &bpf_redirect_proto;
c46646d0
DB
6029 case BPF_FUNC_get_route_realm:
6030 return &bpf_get_route_realm_proto;
13c5c240
DB
6031 case BPF_FUNC_get_hash_recalc:
6032 return &bpf_get_hash_recalc_proto;
7a4b28c6
DB
6033 case BPF_FUNC_set_hash_invalid:
6034 return &bpf_set_hash_invalid_proto;
ded092cd
DB
6035 case BPF_FUNC_set_hash:
6036 return &bpf_set_hash_proto;
bd570ff9 6037 case BPF_FUNC_perf_event_output:
555c8a86 6038 return &bpf_skb_event_output_proto;
80b48c44
DB
6039 case BPF_FUNC_get_smp_processor_id:
6040 return &bpf_get_smp_processor_id_proto;
747ea55e
DB
6041 case BPF_FUNC_skb_under_cgroup:
6042 return &bpf_skb_under_cgroup_proto;
91b8270f
CF
6043 case BPF_FUNC_get_socket_cookie:
6044 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
6045 case BPF_FUNC_get_socket_uid:
6046 return &bpf_get_socket_uid_proto;
cb20b08e
DB
6047 case BPF_FUNC_fib_lookup:
6048 return &bpf_skb_fib_lookup_proto;
46f8bc92
MKL
6049 case BPF_FUNC_sk_fullsock:
6050 return &bpf_sk_fullsock_proto;
6ac99e8f
MKL
6051 case BPF_FUNC_sk_storage_get:
6052 return &bpf_sk_storage_get_proto;
6053 case BPF_FUNC_sk_storage_delete:
6054 return &bpf_sk_storage_delete_proto;
12bed760
EB
6055#ifdef CONFIG_XFRM
6056 case BPF_FUNC_skb_get_xfrm_state:
6057 return &bpf_skb_get_xfrm_state_proto;
6058#endif
cb20b08e
DB
6059#ifdef CONFIG_SOCK_CGROUP_DATA
6060 case BPF_FUNC_skb_cgroup_id:
6061 return &bpf_skb_cgroup_id_proto;
77236281
AI
6062 case BPF_FUNC_skb_ancestor_cgroup_id:
6063 return &bpf_skb_ancestor_cgroup_id_proto;
cb20b08e 6064#endif
df3f94a0 6065#ifdef CONFIG_INET
6acc9b43
JS
6066 case BPF_FUNC_sk_lookup_tcp:
6067 return &bpf_sk_lookup_tcp_proto;
6068 case BPF_FUNC_sk_lookup_udp:
6069 return &bpf_sk_lookup_udp_proto;
6070 case BPF_FUNC_sk_release:
6071 return &bpf_sk_release_proto;
655a51e5
MKL
6072 case BPF_FUNC_tcp_sock:
6073 return &bpf_tcp_sock_proto;
dbafd7dd
MKL
6074 case BPF_FUNC_get_listener_sock:
6075 return &bpf_get_listener_sock_proto;
edbf8c01
LB
6076 case BPF_FUNC_skc_lookup_tcp:
6077 return &bpf_skc_lookup_tcp_proto;
39904084
LB
6078 case BPF_FUNC_tcp_check_syncookie:
6079 return &bpf_tcp_check_syncookie_proto;
315a2029
PO
6080 case BPF_FUNC_skb_ecn_set_ce:
6081 return &bpf_skb_ecn_set_ce_proto;
df3f94a0 6082#endif
608cd71a 6083 default:
2492d3b8 6084 return bpf_base_func_proto(func_id);
608cd71a
AS
6085 }
6086}
6087
6a773a15 6088static const struct bpf_func_proto *
5e43f899 6089xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6a773a15 6090{
4de16969
DB
6091 switch (func_id) {
6092 case BPF_FUNC_perf_event_output:
6093 return &bpf_xdp_event_output_proto;
669dc4d7
DB
6094 case BPF_FUNC_get_smp_processor_id:
6095 return &bpf_get_smp_processor_id_proto;
205c3807
DB
6096 case BPF_FUNC_csum_diff:
6097 return &bpf_csum_diff_proto;
17bedab2
MKL
6098 case BPF_FUNC_xdp_adjust_head:
6099 return &bpf_xdp_adjust_head_proto;
de8f3a83
DB
6100 case BPF_FUNC_xdp_adjust_meta:
6101 return &bpf_xdp_adjust_meta_proto;
814abfab
JF
6102 case BPF_FUNC_redirect:
6103 return &bpf_xdp_redirect_proto;
97f91a7c 6104 case BPF_FUNC_redirect_map:
e4a8e817 6105 return &bpf_xdp_redirect_map_proto;
b32cc5b9
NS
6106 case BPF_FUNC_xdp_adjust_tail:
6107 return &bpf_xdp_adjust_tail_proto;
87f5fc7e
DA
6108 case BPF_FUNC_fib_lookup:
6109 return &bpf_xdp_fib_lookup_proto;
c8123ead
NH
6110#ifdef CONFIG_INET
6111 case BPF_FUNC_sk_lookup_udp:
6112 return &bpf_xdp_sk_lookup_udp_proto;
6113 case BPF_FUNC_sk_lookup_tcp:
6114 return &bpf_xdp_sk_lookup_tcp_proto;
6115 case BPF_FUNC_sk_release:
6116 return &bpf_sk_release_proto;
edbf8c01
LB
6117 case BPF_FUNC_skc_lookup_tcp:
6118 return &bpf_xdp_skc_lookup_tcp_proto;
39904084
LB
6119 case BPF_FUNC_tcp_check_syncookie:
6120 return &bpf_tcp_check_syncookie_proto;
c8123ead 6121#endif
4de16969 6122 default:
2492d3b8 6123 return bpf_base_func_proto(func_id);
4de16969 6124 }
6a773a15
BB
6125}
6126
604326b4
DB
6127const struct bpf_func_proto bpf_sock_map_update_proto __weak;
6128const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
6129
8c4b4c7e 6130static const struct bpf_func_proto *
5e43f899 6131sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
8c4b4c7e
LB
6132{
6133 switch (func_id) {
6134 case BPF_FUNC_setsockopt:
6135 return &bpf_setsockopt_proto;
cd86d1fd
LB
6136 case BPF_FUNC_getsockopt:
6137 return &bpf_getsockopt_proto;
b13d8807
LB
6138 case BPF_FUNC_sock_ops_cb_flags_set:
6139 return &bpf_sock_ops_cb_flags_set_proto;
174a79ff
JF
6140 case BPF_FUNC_sock_map_update:
6141 return &bpf_sock_map_update_proto;
81110384
JF
6142 case BPF_FUNC_sock_hash_update:
6143 return &bpf_sock_hash_update_proto;
d692f113
AI
6144 case BPF_FUNC_get_socket_cookie:
6145 return &bpf_get_socket_cookie_sock_ops_proto;
cd339431
RG
6146 case BPF_FUNC_get_local_storage:
6147 return &bpf_get_local_storage_proto;
a5a3a828
SV
6148 case BPF_FUNC_perf_event_output:
6149 return &bpf_sockopt_event_output_proto;
8c4b4c7e
LB
6150 default:
6151 return bpf_base_func_proto(func_id);
6152 }
6153}
6154
604326b4
DB
6155const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
6156const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
6157
5e43f899
AI
6158static const struct bpf_func_proto *
6159sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4f738adb
JF
6160{
6161 switch (func_id) {
6162 case BPF_FUNC_msg_redirect_map:
6163 return &bpf_msg_redirect_map_proto;
81110384
JF
6164 case BPF_FUNC_msg_redirect_hash:
6165 return &bpf_msg_redirect_hash_proto;
2a100317
JF
6166 case BPF_FUNC_msg_apply_bytes:
6167 return &bpf_msg_apply_bytes_proto;
91843d54
JF
6168 case BPF_FUNC_msg_cork_bytes:
6169 return &bpf_msg_cork_bytes_proto;
015632bb
JF
6170 case BPF_FUNC_msg_pull_data:
6171 return &bpf_msg_pull_data_proto;
6fff607e
JF
6172 case BPF_FUNC_msg_push_data:
6173 return &bpf_msg_push_data_proto;
7246d8ed
JF
6174 case BPF_FUNC_msg_pop_data:
6175 return &bpf_msg_pop_data_proto;
4f738adb
JF
6176 default:
6177 return bpf_base_func_proto(func_id);
6178 }
6179}
6180
604326b4
DB
6181const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
6182const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
6183
5e43f899
AI
6184static const struct bpf_func_proto *
6185sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
b005fd18
JF
6186{
6187 switch (func_id) {
8a31db56
JF
6188 case BPF_FUNC_skb_store_bytes:
6189 return &bpf_skb_store_bytes_proto;
b005fd18
JF
6190 case BPF_FUNC_skb_load_bytes:
6191 return &bpf_skb_load_bytes_proto;
8a31db56 6192 case BPF_FUNC_skb_pull_data:
0ea488ff 6193 return &sk_skb_pull_data_proto;
8a31db56 6194 case BPF_FUNC_skb_change_tail:
0ea488ff 6195 return &sk_skb_change_tail_proto;
8a31db56 6196 case BPF_FUNC_skb_change_head:
0ea488ff 6197 return &sk_skb_change_head_proto;
b005fd18
JF
6198 case BPF_FUNC_get_socket_cookie:
6199 return &bpf_get_socket_cookie_proto;
6200 case BPF_FUNC_get_socket_uid:
6201 return &bpf_get_socket_uid_proto;
174a79ff
JF
6202 case BPF_FUNC_sk_redirect_map:
6203 return &bpf_sk_redirect_map_proto;
81110384
JF
6204 case BPF_FUNC_sk_redirect_hash:
6205 return &bpf_sk_redirect_hash_proto;
df3f94a0 6206#ifdef CONFIG_INET
6acc9b43
JS
6207 case BPF_FUNC_sk_lookup_tcp:
6208 return &bpf_sk_lookup_tcp_proto;
6209 case BPF_FUNC_sk_lookup_udp:
6210 return &bpf_sk_lookup_udp_proto;
6211 case BPF_FUNC_sk_release:
6212 return &bpf_sk_release_proto;
edbf8c01
LB
6213 case BPF_FUNC_skc_lookup_tcp:
6214 return &bpf_skc_lookup_tcp_proto;
df3f94a0 6215#endif
b005fd18
JF
6216 default:
6217 return bpf_base_func_proto(func_id);
6218 }
6219}
6220
d58e468b
PP
6221static const struct bpf_func_proto *
6222flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6223{
6224 switch (func_id) {
6225 case BPF_FUNC_skb_load_bytes:
089b19a9 6226 return &bpf_flow_dissector_load_bytes_proto;
d58e468b
PP
6227 default:
6228 return bpf_base_func_proto(func_id);
6229 }
6230}
6231
cd3092c7
MX
6232static const struct bpf_func_proto *
6233lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6234{
6235 switch (func_id) {
6236 case BPF_FUNC_skb_load_bytes:
6237 return &bpf_skb_load_bytes_proto;
6238 case BPF_FUNC_skb_pull_data:
6239 return &bpf_skb_pull_data_proto;
6240 case BPF_FUNC_csum_diff:
6241 return &bpf_csum_diff_proto;
6242 case BPF_FUNC_get_cgroup_classid:
6243 return &bpf_get_cgroup_classid_proto;
6244 case BPF_FUNC_get_route_realm:
6245 return &bpf_get_route_realm_proto;
6246 case BPF_FUNC_get_hash_recalc:
6247 return &bpf_get_hash_recalc_proto;
6248 case BPF_FUNC_perf_event_output:
6249 return &bpf_skb_event_output_proto;
6250 case BPF_FUNC_get_smp_processor_id:
6251 return &bpf_get_smp_processor_id_proto;
6252 case BPF_FUNC_skb_under_cgroup:
6253 return &bpf_skb_under_cgroup_proto;
6254 default:
6255 return bpf_base_func_proto(func_id);
6256 }
6257}
6258
6259static const struct bpf_func_proto *
6260lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6261{
6262 switch (func_id) {
6263 case BPF_FUNC_lwt_push_encap:
3e0bd37c 6264 return &bpf_lwt_in_push_encap_proto;
cd3092c7
MX
6265 default:
6266 return lwt_out_func_proto(func_id, prog);
6267 }
6268}
6269
3a0af8fd 6270static const struct bpf_func_proto *
5e43f899 6271lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
6272{
6273 switch (func_id) {
6274 case BPF_FUNC_skb_get_tunnel_key:
6275 return &bpf_skb_get_tunnel_key_proto;
6276 case BPF_FUNC_skb_set_tunnel_key:
6277 return bpf_get_skb_set_tunnel_proto(func_id);
6278 case BPF_FUNC_skb_get_tunnel_opt:
6279 return &bpf_skb_get_tunnel_opt_proto;
6280 case BPF_FUNC_skb_set_tunnel_opt:
6281 return bpf_get_skb_set_tunnel_proto(func_id);
6282 case BPF_FUNC_redirect:
6283 return &bpf_redirect_proto;
6284 case BPF_FUNC_clone_redirect:
6285 return &bpf_clone_redirect_proto;
6286 case BPF_FUNC_skb_change_tail:
6287 return &bpf_skb_change_tail_proto;
6288 case BPF_FUNC_skb_change_head:
6289 return &bpf_skb_change_head_proto;
6290 case BPF_FUNC_skb_store_bytes:
6291 return &bpf_skb_store_bytes_proto;
6292 case BPF_FUNC_csum_update:
6293 return &bpf_csum_update_proto;
6294 case BPF_FUNC_l3_csum_replace:
6295 return &bpf_l3_csum_replace_proto;
6296 case BPF_FUNC_l4_csum_replace:
6297 return &bpf_l4_csum_replace_proto;
6298 case BPF_FUNC_set_hash_invalid:
6299 return &bpf_set_hash_invalid_proto;
3e0bd37c
PO
6300 case BPF_FUNC_lwt_push_encap:
6301 return &bpf_lwt_xmit_push_encap_proto;
3a0af8fd 6302 default:
cd3092c7 6303 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
6304 }
6305}
6306
004d4b27
MX
6307static const struct bpf_func_proto *
6308lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6309{
6310 switch (func_id) {
61d76980 6311#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
004d4b27
MX
6312 case BPF_FUNC_lwt_seg6_store_bytes:
6313 return &bpf_lwt_seg6_store_bytes_proto;
6314 case BPF_FUNC_lwt_seg6_action:
6315 return &bpf_lwt_seg6_action_proto;
6316 case BPF_FUNC_lwt_seg6_adjust_srh:
6317 return &bpf_lwt_seg6_adjust_srh_proto;
61d76980 6318#endif
004d4b27
MX
6319 default:
6320 return lwt_out_func_proto(func_id, prog);
3a0af8fd
TG
6321 }
6322}
6323
f96da094 6324static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 6325 const struct bpf_prog *prog,
f96da094 6326 struct bpf_insn_access_aux *info)
23994631 6327{
f96da094 6328 const int size_default = sizeof(__u32);
23994631 6329
9bac3d6d
AS
6330 if (off < 0 || off >= sizeof(struct __sk_buff))
6331 return false;
62c7989b 6332
4936e352 6333 /* The verifier guarantees that size > 0. */
9bac3d6d
AS
6334 if (off % size != 0)
6335 return false;
62c7989b
DB
6336
6337 switch (off) {
f96da094
DB
6338 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6339 if (off + size > offsetofend(struct __sk_buff, cb[4]))
62c7989b
DB
6340 return false;
6341 break;
8a31db56
JF
6342 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
6343 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
6344 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
6345 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
f96da094 6346 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 6347 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094
DB
6348 case bpf_ctx_range(struct __sk_buff, data_end):
6349 if (size != size_default)
23994631 6350 return false;
31fd8581 6351 break;
b7df9ada 6352 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
089b19a9 6353 return false;
f11216b2
VD
6354 case bpf_ctx_range(struct __sk_buff, tstamp):
6355 if (size != sizeof(__u64))
6356 return false;
6357 break;
46f8bc92
MKL
6358 case offsetof(struct __sk_buff, sk):
6359 if (type == BPF_WRITE || size != sizeof(__u64))
6360 return false;
6361 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
6362 break;
31fd8581 6363 default:
f96da094 6364 /* Only narrow read access allowed for now. */
31fd8581 6365 if (type == BPF_WRITE) {
f96da094 6366 if (size != size_default)
31fd8581
YS
6367 return false;
6368 } else {
f96da094
DB
6369 bpf_ctx_record_field_size(info, size_default);
6370 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
23994631 6371 return false;
31fd8581 6372 }
62c7989b 6373 }
9bac3d6d
AS
6374
6375 return true;
6376}
6377
d691f9e8 6378static bool sk_filter_is_valid_access(int off, int size,
19de99f7 6379 enum bpf_access_type type,
5e43f899 6380 const struct bpf_prog *prog,
23994631 6381 struct bpf_insn_access_aux *info)
d691f9e8 6382{
db58ba45 6383 switch (off) {
f96da094
DB
6384 case bpf_ctx_range(struct __sk_buff, tc_classid):
6385 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 6386 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094 6387 case bpf_ctx_range(struct __sk_buff, data_end):
8a31db56 6388 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
f11216b2 6389 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6390 case bpf_ctx_range(struct __sk_buff, wire_len):
045efa82 6391 return false;
db58ba45 6392 }
045efa82 6393
d691f9e8
AS
6394 if (type == BPF_WRITE) {
6395 switch (off) {
f96da094 6396 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
6397 break;
6398 default:
6399 return false;
6400 }
6401 }
6402
5e43f899 6403 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
6404}
6405
b39b5f41
SL
6406static bool cg_skb_is_valid_access(int off, int size,
6407 enum bpf_access_type type,
6408 const struct bpf_prog *prog,
6409 struct bpf_insn_access_aux *info)
6410{
6411 switch (off) {
6412 case bpf_ctx_range(struct __sk_buff, tc_classid):
6413 case bpf_ctx_range(struct __sk_buff, data_meta):
e3da08d0 6414 case bpf_ctx_range(struct __sk_buff, wire_len):
b39b5f41 6415 return false;
ab21c1b5
DB
6416 case bpf_ctx_range(struct __sk_buff, data):
6417 case bpf_ctx_range(struct __sk_buff, data_end):
6418 if (!capable(CAP_SYS_ADMIN))
6419 return false;
6420 break;
b39b5f41 6421 }
ab21c1b5 6422
b39b5f41
SL
6423 if (type == BPF_WRITE) {
6424 switch (off) {
6425 case bpf_ctx_range(struct __sk_buff, mark):
6426 case bpf_ctx_range(struct __sk_buff, priority):
6427 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6428 break;
f11216b2
VD
6429 case bpf_ctx_range(struct __sk_buff, tstamp):
6430 if (!capable(CAP_SYS_ADMIN))
6431 return false;
6432 break;
b39b5f41
SL
6433 default:
6434 return false;
6435 }
6436 }
6437
6438 switch (off) {
6439 case bpf_ctx_range(struct __sk_buff, data):
6440 info->reg_type = PTR_TO_PACKET;
6441 break;
6442 case bpf_ctx_range(struct __sk_buff, data_end):
6443 info->reg_type = PTR_TO_PACKET_END;
6444 break;
6445 }
6446
6447 return bpf_skb_is_valid_access(off, size, type, prog, info);
6448}
6449
3a0af8fd
TG
6450static bool lwt_is_valid_access(int off, int size,
6451 enum bpf_access_type type,
5e43f899 6452 const struct bpf_prog *prog,
23994631 6453 struct bpf_insn_access_aux *info)
3a0af8fd
TG
6454{
6455 switch (off) {
f96da094 6456 case bpf_ctx_range(struct __sk_buff, tc_classid):
8a31db56 6457 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
de8f3a83 6458 case bpf_ctx_range(struct __sk_buff, data_meta):
f11216b2 6459 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6460 case bpf_ctx_range(struct __sk_buff, wire_len):
3a0af8fd
TG
6461 return false;
6462 }
6463
6464 if (type == BPF_WRITE) {
6465 switch (off) {
f96da094
DB
6466 case bpf_ctx_range(struct __sk_buff, mark):
6467 case bpf_ctx_range(struct __sk_buff, priority):
6468 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
3a0af8fd
TG
6469 break;
6470 default:
6471 return false;
6472 }
6473 }
6474
f96da094
DB
6475 switch (off) {
6476 case bpf_ctx_range(struct __sk_buff, data):
6477 info->reg_type = PTR_TO_PACKET;
6478 break;
6479 case bpf_ctx_range(struct __sk_buff, data_end):
6480 info->reg_type = PTR_TO_PACKET_END;
6481 break;
6482 }
6483
5e43f899 6484 return bpf_skb_is_valid_access(off, size, type, prog, info);
3a0af8fd
TG
6485}
6486
aac3fc32
AI
6487/* Attach type specific accesses */
6488static bool __sock_filter_check_attach_type(int off,
6489 enum bpf_access_type access_type,
6490 enum bpf_attach_type attach_type)
61023658 6491{
aac3fc32
AI
6492 switch (off) {
6493 case offsetof(struct bpf_sock, bound_dev_if):
6494 case offsetof(struct bpf_sock, mark):
6495 case offsetof(struct bpf_sock, priority):
6496 switch (attach_type) {
6497 case BPF_CGROUP_INET_SOCK_CREATE:
6498 goto full_access;
6499 default:
6500 return false;
6501 }
6502 case bpf_ctx_range(struct bpf_sock, src_ip4):
6503 switch (attach_type) {
6504 case BPF_CGROUP_INET4_POST_BIND:
6505 goto read_only;
6506 default:
6507 return false;
6508 }
6509 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6510 switch (attach_type) {
6511 case BPF_CGROUP_INET6_POST_BIND:
6512 goto read_only;
6513 default:
6514 return false;
6515 }
6516 case bpf_ctx_range(struct bpf_sock, src_port):
6517 switch (attach_type) {
6518 case BPF_CGROUP_INET4_POST_BIND:
6519 case BPF_CGROUP_INET6_POST_BIND:
6520 goto read_only;
61023658
DA
6521 default:
6522 return false;
6523 }
6524 }
aac3fc32
AI
6525read_only:
6526 return access_type == BPF_READ;
6527full_access:
6528 return true;
6529}
6530
46f8bc92
MKL
6531bool bpf_sock_common_is_valid_access(int off, int size,
6532 enum bpf_access_type type,
aac3fc32
AI
6533 struct bpf_insn_access_aux *info)
6534{
aac3fc32 6535 switch (off) {
46f8bc92
MKL
6536 case bpf_ctx_range_till(struct bpf_sock, type, priority):
6537 return false;
6538 default:
6539 return bpf_sock_is_valid_access(off, size, type, info);
aac3fc32 6540 }
aac3fc32
AI
6541}
6542
c64b7983
JS
6543bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6544 struct bpf_insn_access_aux *info)
aac3fc32 6545{
aa65d696
MKL
6546 const int size_default = sizeof(__u32);
6547
aac3fc32 6548 if (off < 0 || off >= sizeof(struct bpf_sock))
61023658 6549 return false;
61023658
DA
6550 if (off % size != 0)
6551 return false;
aa65d696
MKL
6552
6553 switch (off) {
6554 case offsetof(struct bpf_sock, state):
6555 case offsetof(struct bpf_sock, family):
6556 case offsetof(struct bpf_sock, type):
6557 case offsetof(struct bpf_sock, protocol):
6558 case offsetof(struct bpf_sock, dst_port):
6559 case offsetof(struct bpf_sock, src_port):
6560 case bpf_ctx_range(struct bpf_sock, src_ip4):
6561 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6562 case bpf_ctx_range(struct bpf_sock, dst_ip4):
6563 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
6564 bpf_ctx_record_field_size(info, size_default);
6565 return bpf_ctx_narrow_access_ok(off, size, size_default);
6566 }
6567
6568 return size == size_default;
61023658
DA
6569}
6570
c64b7983
JS
6571static bool sock_filter_is_valid_access(int off, int size,
6572 enum bpf_access_type type,
6573 const struct bpf_prog *prog,
6574 struct bpf_insn_access_aux *info)
6575{
6576 if (!bpf_sock_is_valid_access(off, size, type, info))
6577 return false;
6578 return __sock_filter_check_attach_type(off, type,
6579 prog->expected_attach_type);
6580}
6581
b09928b9
DB
6582static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
6583 const struct bpf_prog *prog)
6584{
6585 /* Neither direct read nor direct write requires any preliminary
6586 * action.
6587 */
6588 return 0;
6589}
6590
047b0ecd
DB
6591static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
6592 const struct bpf_prog *prog, int drop_verdict)
36bbef52
DB
6593{
6594 struct bpf_insn *insn = insn_buf;
6595
6596 if (!direct_write)
6597 return 0;
6598
6599 /* if (!skb->cloned)
6600 * goto start;
6601 *
6602 * (Fast-path, otherwise approximation that we might be
6603 * a clone, do the rest in helper.)
6604 */
6605 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
6606 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
6607 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
6608
6609 /* ret = bpf_skb_pull_data(skb, 0); */
6610 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
6611 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
6612 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6613 BPF_FUNC_skb_pull_data);
6614 /* if (!ret)
6615 * goto restore;
6616 * return TC_ACT_SHOT;
6617 */
6618 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
047b0ecd 6619 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
36bbef52
DB
6620 *insn++ = BPF_EXIT_INSN();
6621
6622 /* restore: */
6623 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
6624 /* start: */
6625 *insn++ = prog->insnsi[0];
6626
6627 return insn - insn_buf;
6628}
6629
e0cea7ce
DB
6630static int bpf_gen_ld_abs(const struct bpf_insn *orig,
6631 struct bpf_insn *insn_buf)
6632{
6633 bool indirect = BPF_MODE(orig->code) == BPF_IND;
6634 struct bpf_insn *insn = insn_buf;
6635
6636 /* We're guaranteed here that CTX is in R6. */
6637 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
6638 if (!indirect) {
6639 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
6640 } else {
6641 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
6642 if (orig->imm)
6643 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
6644 }
6645
6646 switch (BPF_SIZE(orig->code)) {
6647 case BPF_B:
6648 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
6649 break;
6650 case BPF_H:
6651 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
6652 break;
6653 case BPF_W:
6654 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
6655 break;
6656 }
6657
6658 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
6659 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
6660 *insn++ = BPF_EXIT_INSN();
6661
6662 return insn - insn_buf;
6663}
6664
047b0ecd
DB
6665static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
6666 const struct bpf_prog *prog)
6667{
6668 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
6669}
6670
d691f9e8 6671static bool tc_cls_act_is_valid_access(int off, int size,
19de99f7 6672 enum bpf_access_type type,
5e43f899 6673 const struct bpf_prog *prog,
23994631 6674 struct bpf_insn_access_aux *info)
d691f9e8
AS
6675{
6676 if (type == BPF_WRITE) {
6677 switch (off) {
f96da094
DB
6678 case bpf_ctx_range(struct __sk_buff, mark):
6679 case bpf_ctx_range(struct __sk_buff, tc_index):
6680 case bpf_ctx_range(struct __sk_buff, priority):
6681 case bpf_ctx_range(struct __sk_buff, tc_classid):
6682 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
f11216b2 6683 case bpf_ctx_range(struct __sk_buff, tstamp):
74e31ca8 6684 case bpf_ctx_range(struct __sk_buff, queue_mapping):
d691f9e8
AS
6685 break;
6686 default:
6687 return false;
6688 }
6689 }
19de99f7 6690
f96da094
DB
6691 switch (off) {
6692 case bpf_ctx_range(struct __sk_buff, data):
6693 info->reg_type = PTR_TO_PACKET;
6694 break;
de8f3a83
DB
6695 case bpf_ctx_range(struct __sk_buff, data_meta):
6696 info->reg_type = PTR_TO_PACKET_META;
6697 break;
f96da094
DB
6698 case bpf_ctx_range(struct __sk_buff, data_end):
6699 info->reg_type = PTR_TO_PACKET_END;
6700 break;
8a31db56
JF
6701 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6702 return false;
f96da094
DB
6703 }
6704
5e43f899 6705 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
6706}
6707
1afaf661 6708static bool __is_valid_xdp_access(int off, int size)
6a773a15
BB
6709{
6710 if (off < 0 || off >= sizeof(struct xdp_md))
6711 return false;
6712 if (off % size != 0)
6713 return false;
6088b582 6714 if (size != sizeof(__u32))
6a773a15
BB
6715 return false;
6716
6717 return true;
6718}
6719
6720static bool xdp_is_valid_access(int off, int size,
6721 enum bpf_access_type type,
5e43f899 6722 const struct bpf_prog *prog,
23994631 6723 struct bpf_insn_access_aux *info)
6a773a15 6724{
0d830032
JK
6725 if (type == BPF_WRITE) {
6726 if (bpf_prog_is_dev_bound(prog->aux)) {
6727 switch (off) {
6728 case offsetof(struct xdp_md, rx_queue_index):
6729 return __is_valid_xdp_access(off, size);
6730 }
6731 }
6a773a15 6732 return false;
0d830032 6733 }
6a773a15
BB
6734
6735 switch (off) {
6736 case offsetof(struct xdp_md, data):
23994631 6737 info->reg_type = PTR_TO_PACKET;
6a773a15 6738 break;
de8f3a83
DB
6739 case offsetof(struct xdp_md, data_meta):
6740 info->reg_type = PTR_TO_PACKET_META;
6741 break;
6a773a15 6742 case offsetof(struct xdp_md, data_end):
23994631 6743 info->reg_type = PTR_TO_PACKET_END;
6a773a15
BB
6744 break;
6745 }
6746
1afaf661 6747 return __is_valid_xdp_access(off, size);
6a773a15
BB
6748}
6749
6750void bpf_warn_invalid_xdp_action(u32 act)
6751{
9beb8bed
DB
6752 const u32 act_max = XDP_REDIRECT;
6753
6754 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
6755 act > act_max ? "Illegal" : "Driver unsupported",
6756 act);
6a773a15
BB
6757}
6758EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
6759
4fbac77d
AI
6760static bool sock_addr_is_valid_access(int off, int size,
6761 enum bpf_access_type type,
6762 const struct bpf_prog *prog,
6763 struct bpf_insn_access_aux *info)
6764{
6765 const int size_default = sizeof(__u32);
6766
6767 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
6768 return false;
6769 if (off % size != 0)
6770 return false;
6771
6772 /* Disallow access to IPv6 fields from IPv4 contex and vise
6773 * versa.
6774 */
6775 switch (off) {
6776 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6777 switch (prog->expected_attach_type) {
6778 case BPF_CGROUP_INET4_BIND:
d74bad4e 6779 case BPF_CGROUP_INET4_CONNECT:
1cedee13 6780 case BPF_CGROUP_UDP4_SENDMSG:
4fbac77d
AI
6781 break;
6782 default:
6783 return false;
6784 }
6785 break;
6786 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6787 switch (prog->expected_attach_type) {
6788 case BPF_CGROUP_INET6_BIND:
d74bad4e 6789 case BPF_CGROUP_INET6_CONNECT:
1cedee13
AI
6790 case BPF_CGROUP_UDP6_SENDMSG:
6791 break;
6792 default:
6793 return false;
6794 }
6795 break;
6796 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6797 switch (prog->expected_attach_type) {
6798 case BPF_CGROUP_UDP4_SENDMSG:
6799 break;
6800 default:
6801 return false;
6802 }
6803 break;
6804 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6805 msg_src_ip6[3]):
6806 switch (prog->expected_attach_type) {
6807 case BPF_CGROUP_UDP6_SENDMSG:
4fbac77d
AI
6808 break;
6809 default:
6810 return false;
6811 }
6812 break;
6813 }
6814
6815 switch (off) {
6816 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6817 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
1cedee13
AI
6818 case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6819 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6820 msg_src_ip6[3]):
4fbac77d
AI
6821 /* Only narrow read access allowed for now. */
6822 if (type == BPF_READ) {
6823 bpf_ctx_record_field_size(info, size_default);
6824 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6825 return false;
6826 } else {
6827 if (size != size_default)
6828 return false;
6829 }
6830 break;
6831 case bpf_ctx_range(struct bpf_sock_addr, user_port):
6832 if (size != size_default)
6833 return false;
6834 break;
fb85c4a7
SF
6835 case offsetof(struct bpf_sock_addr, sk):
6836 if (type != BPF_READ)
6837 return false;
6838 if (size != sizeof(__u64))
6839 return false;
6840 info->reg_type = PTR_TO_SOCKET;
6841 break;
4fbac77d
AI
6842 default:
6843 if (type == BPF_READ) {
6844 if (size != size_default)
6845 return false;
6846 } else {
6847 return false;
6848 }
6849 }
6850
6851 return true;
6852}
6853
44f0e430
LB
6854static bool sock_ops_is_valid_access(int off, int size,
6855 enum bpf_access_type type,
5e43f899 6856 const struct bpf_prog *prog,
44f0e430 6857 struct bpf_insn_access_aux *info)
40304b2a 6858{
44f0e430
LB
6859 const int size_default = sizeof(__u32);
6860
40304b2a
LB
6861 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
6862 return false;
44f0e430 6863
40304b2a
LB
6864 /* The verifier guarantees that size > 0. */
6865 if (off % size != 0)
6866 return false;
40304b2a 6867
40304b2a
LB
6868 if (type == BPF_WRITE) {
6869 switch (off) {
2585cd62 6870 case offsetof(struct bpf_sock_ops, reply):
6f9bd3d7 6871 case offsetof(struct bpf_sock_ops, sk_txhash):
44f0e430
LB
6872 if (size != size_default)
6873 return false;
40304b2a
LB
6874 break;
6875 default:
6876 return false;
6877 }
44f0e430
LB
6878 } else {
6879 switch (off) {
6880 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
6881 bytes_acked):
6882 if (size != sizeof(__u64))
6883 return false;
6884 break;
6885 default:
6886 if (size != size_default)
6887 return false;
6888 break;
6889 }
40304b2a
LB
6890 }
6891
44f0e430 6892 return true;
40304b2a
LB
6893}
6894
8a31db56
JF
6895static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
6896 const struct bpf_prog *prog)
6897{
047b0ecd 6898 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8a31db56
JF
6899}
6900
b005fd18
JF
6901static bool sk_skb_is_valid_access(int off, int size,
6902 enum bpf_access_type type,
5e43f899 6903 const struct bpf_prog *prog,
b005fd18
JF
6904 struct bpf_insn_access_aux *info)
6905{
de8f3a83
DB
6906 switch (off) {
6907 case bpf_ctx_range(struct __sk_buff, tc_classid):
6908 case bpf_ctx_range(struct __sk_buff, data_meta):
f11216b2 6909 case bpf_ctx_range(struct __sk_buff, tstamp):
e3da08d0 6910 case bpf_ctx_range(struct __sk_buff, wire_len):
de8f3a83
DB
6911 return false;
6912 }
6913
8a31db56
JF
6914 if (type == BPF_WRITE) {
6915 switch (off) {
8a31db56
JF
6916 case bpf_ctx_range(struct __sk_buff, tc_index):
6917 case bpf_ctx_range(struct __sk_buff, priority):
6918 break;
6919 default:
6920 return false;
6921 }
6922 }
6923
b005fd18 6924 switch (off) {
f7e9cb1e 6925 case bpf_ctx_range(struct __sk_buff, mark):
8a31db56 6926 return false;
b005fd18
JF
6927 case bpf_ctx_range(struct __sk_buff, data):
6928 info->reg_type = PTR_TO_PACKET;
6929 break;
6930 case bpf_ctx_range(struct __sk_buff, data_end):
6931 info->reg_type = PTR_TO_PACKET_END;
6932 break;
6933 }
6934
5e43f899 6935 return bpf_skb_is_valid_access(off, size, type, prog, info);
b005fd18
JF
6936}
6937
4f738adb
JF
6938static bool sk_msg_is_valid_access(int off, int size,
6939 enum bpf_access_type type,
5e43f899 6940 const struct bpf_prog *prog,
4f738adb
JF
6941 struct bpf_insn_access_aux *info)
6942{
6943 if (type == BPF_WRITE)
6944 return false;
6945
bc1b4f01
JF
6946 if (off % size != 0)
6947 return false;
6948
4f738adb
JF
6949 switch (off) {
6950 case offsetof(struct sk_msg_md, data):
6951 info->reg_type = PTR_TO_PACKET;
303def35
JF
6952 if (size != sizeof(__u64))
6953 return false;
4f738adb
JF
6954 break;
6955 case offsetof(struct sk_msg_md, data_end):
6956 info->reg_type = PTR_TO_PACKET_END;
303def35
JF
6957 if (size != sizeof(__u64))
6958 return false;
4f738adb 6959 break;
bc1b4f01
JF
6960 case bpf_ctx_range(struct sk_msg_md, family):
6961 case bpf_ctx_range(struct sk_msg_md, remote_ip4):
6962 case bpf_ctx_range(struct sk_msg_md, local_ip4):
6963 case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
6964 case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
6965 case bpf_ctx_range(struct sk_msg_md, remote_port):
6966 case bpf_ctx_range(struct sk_msg_md, local_port):
6967 case bpf_ctx_range(struct sk_msg_md, size):
303def35
JF
6968 if (size != sizeof(__u32))
6969 return false;
bc1b4f01
JF
6970 break;
6971 default:
4f738adb 6972 return false;
bc1b4f01 6973 }
4f738adb
JF
6974 return true;
6975}
6976
d58e468b
PP
6977static bool flow_dissector_is_valid_access(int off, int size,
6978 enum bpf_access_type type,
6979 const struct bpf_prog *prog,
6980 struct bpf_insn_access_aux *info)
6981{
089b19a9
SF
6982 const int size_default = sizeof(__u32);
6983
6984 if (off < 0 || off >= sizeof(struct __sk_buff))
6985 return false;
6986
2ee7fba0
SF
6987 if (type == BPF_WRITE)
6988 return false;
d58e468b
PP
6989
6990 switch (off) {
6991 case bpf_ctx_range(struct __sk_buff, data):
089b19a9
SF
6992 if (size != size_default)
6993 return false;
d58e468b 6994 info->reg_type = PTR_TO_PACKET;
089b19a9 6995 return true;
d58e468b 6996 case bpf_ctx_range(struct __sk_buff, data_end):
089b19a9
SF
6997 if (size != size_default)
6998 return false;
d58e468b 6999 info->reg_type = PTR_TO_PACKET_END;
089b19a9 7000 return true;
b7df9ada 7001 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
089b19a9
SF
7002 if (size != sizeof(__u64))
7003 return false;
d58e468b 7004 info->reg_type = PTR_TO_FLOW_KEYS;
089b19a9 7005 return true;
2ee7fba0 7006 default:
d58e468b
PP
7007 return false;
7008 }
089b19a9 7009}
d58e468b 7010
089b19a9
SF
7011static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
7012 const struct bpf_insn *si,
7013 struct bpf_insn *insn_buf,
7014 struct bpf_prog *prog,
7015 u32 *target_size)
7016
7017{
7018 struct bpf_insn *insn = insn_buf;
7019
7020 switch (si->off) {
7021 case offsetof(struct __sk_buff, data):
7022 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
7023 si->dst_reg, si->src_reg,
7024 offsetof(struct bpf_flow_dissector, data));
7025 break;
7026
7027 case offsetof(struct __sk_buff, data_end):
7028 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
7029 si->dst_reg, si->src_reg,
7030 offsetof(struct bpf_flow_dissector, data_end));
7031 break;
7032
7033 case offsetof(struct __sk_buff, flow_keys):
7034 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
7035 si->dst_reg, si->src_reg,
7036 offsetof(struct bpf_flow_dissector, flow_keys));
7037 break;
7038 }
7039
7040 return insn - insn_buf;
d58e468b
PP
7041}
7042
2492d3b8
DB
7043static u32 bpf_convert_ctx_access(enum bpf_access_type type,
7044 const struct bpf_insn *si,
7045 struct bpf_insn *insn_buf,
f96da094 7046 struct bpf_prog *prog, u32 *target_size)
9bac3d6d
AS
7047{
7048 struct bpf_insn *insn = insn_buf;
6b8cc1d1 7049 int off;
9bac3d6d 7050
6b8cc1d1 7051 switch (si->off) {
9bac3d6d 7052 case offsetof(struct __sk_buff, len):
6b8cc1d1 7053 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7054 bpf_target_off(struct sk_buff, len, 4,
7055 target_size));
9bac3d6d
AS
7056 break;
7057
0b8c707d 7058 case offsetof(struct __sk_buff, protocol):
6b8cc1d1 7059 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
7060 bpf_target_off(struct sk_buff, protocol, 2,
7061 target_size));
0b8c707d
DB
7062 break;
7063
27cd5452 7064 case offsetof(struct __sk_buff, vlan_proto):
6b8cc1d1 7065 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
7066 bpf_target_off(struct sk_buff, vlan_proto, 2,
7067 target_size));
27cd5452
MS
7068 break;
7069
bcad5718 7070 case offsetof(struct __sk_buff, priority):
754f1e6a 7071 if (type == BPF_WRITE)
6b8cc1d1 7072 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7073 bpf_target_off(struct sk_buff, priority, 4,
7074 target_size));
754f1e6a 7075 else
6b8cc1d1 7076 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7077 bpf_target_off(struct sk_buff, priority, 4,
7078 target_size));
bcad5718
DB
7079 break;
7080
37e82c2f 7081 case offsetof(struct __sk_buff, ingress_ifindex):
6b8cc1d1 7082 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7083 bpf_target_off(struct sk_buff, skb_iif, 4,
7084 target_size));
37e82c2f
AS
7085 break;
7086
7087 case offsetof(struct __sk_buff, ifindex):
f035a515 7088 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 7089 si->dst_reg, si->src_reg,
37e82c2f 7090 offsetof(struct sk_buff, dev));
6b8cc1d1
DB
7091 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
7092 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
7093 bpf_target_off(struct net_device, ifindex, 4,
7094 target_size));
37e82c2f
AS
7095 break;
7096
ba7591d8 7097 case offsetof(struct __sk_buff, hash):
6b8cc1d1 7098 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7099 bpf_target_off(struct sk_buff, hash, 4,
7100 target_size));
ba7591d8
DB
7101 break;
7102
9bac3d6d 7103 case offsetof(struct __sk_buff, mark):
d691f9e8 7104 if (type == BPF_WRITE)
6b8cc1d1 7105 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7106 bpf_target_off(struct sk_buff, mark, 4,
7107 target_size));
d691f9e8 7108 else
6b8cc1d1 7109 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7110 bpf_target_off(struct sk_buff, mark, 4,
7111 target_size));
d691f9e8 7112 break;
9bac3d6d
AS
7113
7114 case offsetof(struct __sk_buff, pkt_type):
f96da094
DB
7115 *target_size = 1;
7116 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7117 PKT_TYPE_OFFSET());
7118 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
7119#ifdef __BIG_ENDIAN_BITFIELD
7120 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
7121#endif
7122 break;
9bac3d6d
AS
7123
7124 case offsetof(struct __sk_buff, queue_mapping):
74e31ca8
JDB
7125 if (type == BPF_WRITE) {
7126 *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
7127 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7128 bpf_target_off(struct sk_buff,
7129 queue_mapping,
7130 2, target_size));
7131 } else {
7132 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7133 bpf_target_off(struct sk_buff,
7134 queue_mapping,
7135 2, target_size));
7136 }
f96da094 7137 break;
c2497395 7138
c2497395 7139 case offsetof(struct __sk_buff, vlan_present):
9c212255
MM
7140 *target_size = 1;
7141 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7142 PKT_VLAN_PRESENT_OFFSET());
7143 if (PKT_VLAN_PRESENT_BIT)
7144 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
7145 if (PKT_VLAN_PRESENT_BIT < 7)
7146 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
7147 break;
f96da094 7148
9c212255 7149 case offsetof(struct __sk_buff, vlan_tci):
f96da094
DB
7150 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7151 bpf_target_off(struct sk_buff, vlan_tci, 2,
7152 target_size));
f96da094 7153 break;
d691f9e8
AS
7154
7155 case offsetof(struct __sk_buff, cb[0]) ...
f96da094 7156 offsetofend(struct __sk_buff, cb[4]) - 1:
d691f9e8 7157 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
62c7989b
DB
7158 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
7159 offsetof(struct qdisc_skb_cb, data)) %
7160 sizeof(__u64));
d691f9e8 7161
ff936a04 7162 prog->cb_access = 1;
6b8cc1d1
DB
7163 off = si->off;
7164 off -= offsetof(struct __sk_buff, cb[0]);
7165 off += offsetof(struct sk_buff, cb);
7166 off += offsetof(struct qdisc_skb_cb, data);
d691f9e8 7167 if (type == BPF_WRITE)
62c7989b 7168 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 7169 si->src_reg, off);
d691f9e8 7170 else
62c7989b 7171 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 7172 si->src_reg, off);
d691f9e8
AS
7173 break;
7174
045efa82 7175 case offsetof(struct __sk_buff, tc_classid):
6b8cc1d1
DB
7176 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
7177
7178 off = si->off;
7179 off -= offsetof(struct __sk_buff, tc_classid);
7180 off += offsetof(struct sk_buff, cb);
7181 off += offsetof(struct qdisc_skb_cb, tc_classid);
f96da094 7182 *target_size = 2;
09c37a2c 7183 if (type == BPF_WRITE)
6b8cc1d1
DB
7184 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
7185 si->src_reg, off);
09c37a2c 7186 else
6b8cc1d1
DB
7187 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
7188 si->src_reg, off);
045efa82
DB
7189 break;
7190
db58ba45 7191 case offsetof(struct __sk_buff, data):
f035a515 7192 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
6b8cc1d1 7193 si->dst_reg, si->src_reg,
db58ba45
AS
7194 offsetof(struct sk_buff, data));
7195 break;
7196
de8f3a83
DB
7197 case offsetof(struct __sk_buff, data_meta):
7198 off = si->off;
7199 off -= offsetof(struct __sk_buff, data_meta);
7200 off += offsetof(struct sk_buff, cb);
7201 off += offsetof(struct bpf_skb_data_end, data_meta);
7202 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7203 si->src_reg, off);
7204 break;
7205
db58ba45 7206 case offsetof(struct __sk_buff, data_end):
6b8cc1d1
DB
7207 off = si->off;
7208 off -= offsetof(struct __sk_buff, data_end);
7209 off += offsetof(struct sk_buff, cb);
7210 off += offsetof(struct bpf_skb_data_end, data_end);
7211 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7212 si->src_reg, off);
db58ba45
AS
7213 break;
7214
d691f9e8
AS
7215 case offsetof(struct __sk_buff, tc_index):
7216#ifdef CONFIG_NET_SCHED
d691f9e8 7217 if (type == BPF_WRITE)
6b8cc1d1 7218 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
7219 bpf_target_off(struct sk_buff, tc_index, 2,
7220 target_size));
d691f9e8 7221 else
6b8cc1d1 7222 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
7223 bpf_target_off(struct sk_buff, tc_index, 2,
7224 target_size));
d691f9e8 7225#else
2ed46ce4 7226 *target_size = 2;
d691f9e8 7227 if (type == BPF_WRITE)
6b8cc1d1 7228 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
d691f9e8 7229 else
6b8cc1d1 7230 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
b1d9fc41
DB
7231#endif
7232 break;
7233
7234 case offsetof(struct __sk_buff, napi_id):
7235#if defined(CONFIG_NET_RX_BUSY_POLL)
b1d9fc41 7236 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
7237 bpf_target_off(struct sk_buff, napi_id, 4,
7238 target_size));
b1d9fc41
DB
7239 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
7240 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7241#else
2ed46ce4 7242 *target_size = 4;
b1d9fc41 7243 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
d691f9e8 7244#endif
6b8cc1d1 7245 break;
8a31db56
JF
7246 case offsetof(struct __sk_buff, family):
7247 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
7248
7249 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7250 si->dst_reg, si->src_reg,
7251 offsetof(struct sk_buff, sk));
7252 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7253 bpf_target_off(struct sock_common,
7254 skc_family,
7255 2, target_size));
7256 break;
7257 case offsetof(struct __sk_buff, remote_ip4):
7258 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
7259
7260 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7261 si->dst_reg, si->src_reg,
7262 offsetof(struct sk_buff, sk));
7263 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7264 bpf_target_off(struct sock_common,
7265 skc_daddr,
7266 4, target_size));
7267 break;
7268 case offsetof(struct __sk_buff, local_ip4):
7269 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7270 skc_rcv_saddr) != 4);
7271
7272 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7273 si->dst_reg, si->src_reg,
7274 offsetof(struct sk_buff, sk));
7275 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7276 bpf_target_off(struct sock_common,
7277 skc_rcv_saddr,
7278 4, target_size));
7279 break;
7280 case offsetof(struct __sk_buff, remote_ip6[0]) ...
7281 offsetof(struct __sk_buff, remote_ip6[3]):
7282#if IS_ENABLED(CONFIG_IPV6)
7283 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7284 skc_v6_daddr.s6_addr32[0]) != 4);
7285
7286 off = si->off;
7287 off -= offsetof(struct __sk_buff, remote_ip6[0]);
7288
7289 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7290 si->dst_reg, si->src_reg,
7291 offsetof(struct sk_buff, sk));
7292 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7293 offsetof(struct sock_common,
7294 skc_v6_daddr.s6_addr32[0]) +
7295 off);
7296#else
7297 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7298#endif
7299 break;
7300 case offsetof(struct __sk_buff, local_ip6[0]) ...
7301 offsetof(struct __sk_buff, local_ip6[3]):
7302#if IS_ENABLED(CONFIG_IPV6)
7303 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7304 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7305
7306 off = si->off;
7307 off -= offsetof(struct __sk_buff, local_ip6[0]);
7308
7309 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7310 si->dst_reg, si->src_reg,
7311 offsetof(struct sk_buff, sk));
7312 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7313 offsetof(struct sock_common,
7314 skc_v6_rcv_saddr.s6_addr32[0]) +
7315 off);
7316#else
7317 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7318#endif
7319 break;
7320
7321 case offsetof(struct __sk_buff, remote_port):
7322 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7323
7324 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7325 si->dst_reg, si->src_reg,
7326 offsetof(struct sk_buff, sk));
7327 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7328 bpf_target_off(struct sock_common,
7329 skc_dport,
7330 2, target_size));
7331#ifndef __BIG_ENDIAN_BITFIELD
7332 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7333#endif
7334 break;
7335
7336 case offsetof(struct __sk_buff, local_port):
7337 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
7338
7339 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7340 si->dst_reg, si->src_reg,
7341 offsetof(struct sk_buff, sk));
7342 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7343 bpf_target_off(struct sock_common,
7344 skc_num, 2, target_size));
7345 break;
d58e468b 7346
f11216b2
VD
7347 case offsetof(struct __sk_buff, tstamp):
7348 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
7349
7350 if (type == BPF_WRITE)
7351 *insn++ = BPF_STX_MEM(BPF_DW,
7352 si->dst_reg, si->src_reg,
7353 bpf_target_off(struct sk_buff,
7354 tstamp, 8,
7355 target_size));
7356 else
7357 *insn++ = BPF_LDX_MEM(BPF_DW,
7358 si->dst_reg, si->src_reg,
7359 bpf_target_off(struct sk_buff,
7360 tstamp, 8,
7361 target_size));
e3da08d0
PP
7362 break;
7363
d9ff286a
ED
7364 case offsetof(struct __sk_buff, gso_segs):
7365 /* si->dst_reg = skb_shinfo(SKB); */
7366#ifdef NET_SKBUFF_DATA_USES_OFFSET
7367 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7368 si->dst_reg, si->src_reg,
7369 offsetof(struct sk_buff, head));
7370 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7371 BPF_REG_AX, si->src_reg,
7372 offsetof(struct sk_buff, end));
7373 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7374#else
7375 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7376 si->dst_reg, si->src_reg,
7377 offsetof(struct sk_buff, end));
7378#endif
7379 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
7380 si->dst_reg, si->dst_reg,
7381 bpf_target_off(struct skb_shared_info,
7382 gso_segs, 2,
7383 target_size));
7384 break;
e3da08d0
PP
7385 case offsetof(struct __sk_buff, wire_len):
7386 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
7387
7388 off = si->off;
7389 off -= offsetof(struct __sk_buff, wire_len);
7390 off += offsetof(struct sk_buff, cb);
7391 off += offsetof(struct qdisc_skb_cb, pkt_len);
7392 *target_size = 4;
7393 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
46f8bc92
MKL
7394 break;
7395
7396 case offsetof(struct __sk_buff, sk):
7397 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7398 si->dst_reg, si->src_reg,
7399 offsetof(struct sk_buff, sk));
7400 break;
9bac3d6d
AS
7401 }
7402
7403 return insn - insn_buf;
89aa0758
AS
7404}
7405
c64b7983
JS
7406u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
7407 const struct bpf_insn *si,
7408 struct bpf_insn *insn_buf,
7409 struct bpf_prog *prog, u32 *target_size)
61023658
DA
7410{
7411 struct bpf_insn *insn = insn_buf;
aac3fc32 7412 int off;
61023658 7413
6b8cc1d1 7414 switch (si->off) {
61023658
DA
7415 case offsetof(struct bpf_sock, bound_dev_if):
7416 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
7417
7418 if (type == BPF_WRITE)
6b8cc1d1 7419 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
7420 offsetof(struct sock, sk_bound_dev_if));
7421 else
6b8cc1d1 7422 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
7423 offsetof(struct sock, sk_bound_dev_if));
7424 break;
aa4c1037 7425
482dca93
DA
7426 case offsetof(struct bpf_sock, mark):
7427 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
7428
7429 if (type == BPF_WRITE)
7430 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7431 offsetof(struct sock, sk_mark));
7432 else
7433 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7434 offsetof(struct sock, sk_mark));
7435 break;
7436
7437 case offsetof(struct bpf_sock, priority):
7438 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
7439
7440 if (type == BPF_WRITE)
7441 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7442 offsetof(struct sock, sk_priority));
7443 else
7444 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7445 offsetof(struct sock, sk_priority));
7446 break;
7447
aa4c1037 7448 case offsetof(struct bpf_sock, family):
aa65d696
MKL
7449 *insn++ = BPF_LDX_MEM(
7450 BPF_FIELD_SIZEOF(struct sock_common, skc_family),
7451 si->dst_reg, si->src_reg,
7452 bpf_target_off(struct sock_common,
7453 skc_family,
7454 FIELD_SIZEOF(struct sock_common,
7455 skc_family),
7456 target_size));
aa4c1037
DA
7457 break;
7458
7459 case offsetof(struct bpf_sock, type):
aa65d696 7460 BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
6b8cc1d1 7461 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 7462 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
7463 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7464 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
aa65d696 7465 *target_size = 2;
aa4c1037
DA
7466 break;
7467
7468 case offsetof(struct bpf_sock, protocol):
aa65d696 7469 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
6b8cc1d1 7470 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 7471 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
7472 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7473 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
aa65d696 7474 *target_size = 1;
aa4c1037 7475 break;
aac3fc32
AI
7476
7477 case offsetof(struct bpf_sock, src_ip4):
7478 *insn++ = BPF_LDX_MEM(
7479 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7480 bpf_target_off(struct sock_common, skc_rcv_saddr,
7481 FIELD_SIZEOF(struct sock_common,
7482 skc_rcv_saddr),
7483 target_size));
7484 break;
7485
aa65d696
MKL
7486 case offsetof(struct bpf_sock, dst_ip4):
7487 *insn++ = BPF_LDX_MEM(
7488 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7489 bpf_target_off(struct sock_common, skc_daddr,
7490 FIELD_SIZEOF(struct sock_common,
7491 skc_daddr),
7492 target_size));
7493 break;
7494
aac3fc32
AI
7495 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7496#if IS_ENABLED(CONFIG_IPV6)
7497 off = si->off;
7498 off -= offsetof(struct bpf_sock, src_ip6[0]);
7499 *insn++ = BPF_LDX_MEM(
7500 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7501 bpf_target_off(
7502 struct sock_common,
7503 skc_v6_rcv_saddr.s6_addr32[0],
7504 FIELD_SIZEOF(struct sock_common,
7505 skc_v6_rcv_saddr.s6_addr32[0]),
7506 target_size) + off);
7507#else
7508 (void)off;
7509 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7510#endif
7511 break;
7512
aa65d696
MKL
7513 case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7514#if IS_ENABLED(CONFIG_IPV6)
7515 off = si->off;
7516 off -= offsetof(struct bpf_sock, dst_ip6[0]);
7517 *insn++ = BPF_LDX_MEM(
7518 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7519 bpf_target_off(struct sock_common,
7520 skc_v6_daddr.s6_addr32[0],
7521 FIELD_SIZEOF(struct sock_common,
7522 skc_v6_daddr.s6_addr32[0]),
7523 target_size) + off);
7524#else
7525 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7526 *target_size = 4;
7527#endif
7528 break;
7529
aac3fc32
AI
7530 case offsetof(struct bpf_sock, src_port):
7531 *insn++ = BPF_LDX_MEM(
7532 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
7533 si->dst_reg, si->src_reg,
7534 bpf_target_off(struct sock_common, skc_num,
7535 FIELD_SIZEOF(struct sock_common,
7536 skc_num),
7537 target_size));
7538 break;
aa65d696
MKL
7539
7540 case offsetof(struct bpf_sock, dst_port):
7541 *insn++ = BPF_LDX_MEM(
7542 BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
7543 si->dst_reg, si->src_reg,
7544 bpf_target_off(struct sock_common, skc_dport,
7545 FIELD_SIZEOF(struct sock_common,
7546 skc_dport),
7547 target_size));
7548 break;
7549
7550 case offsetof(struct bpf_sock, state):
7551 *insn++ = BPF_LDX_MEM(
7552 BPF_FIELD_SIZEOF(struct sock_common, skc_state),
7553 si->dst_reg, si->src_reg,
7554 bpf_target_off(struct sock_common, skc_state,
7555 FIELD_SIZEOF(struct sock_common,
7556 skc_state),
7557 target_size));
7558 break;
61023658
DA
7559 }
7560
7561 return insn - insn_buf;
7562}
7563
6b8cc1d1
DB
7564static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
7565 const struct bpf_insn *si,
374fb54e 7566 struct bpf_insn *insn_buf,
f96da094 7567 struct bpf_prog *prog, u32 *target_size)
374fb54e
DB
7568{
7569 struct bpf_insn *insn = insn_buf;
7570
6b8cc1d1 7571 switch (si->off) {
374fb54e 7572 case offsetof(struct __sk_buff, ifindex):
374fb54e 7573 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 7574 si->dst_reg, si->src_reg,
374fb54e 7575 offsetof(struct sk_buff, dev));
6b8cc1d1 7576 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
7577 bpf_target_off(struct net_device, ifindex, 4,
7578 target_size));
374fb54e
DB
7579 break;
7580 default:
f96da094
DB
7581 return bpf_convert_ctx_access(type, si, insn_buf, prog,
7582 target_size);
374fb54e
DB
7583 }
7584
7585 return insn - insn_buf;
7586}
7587
6b8cc1d1
DB
7588static u32 xdp_convert_ctx_access(enum bpf_access_type type,
7589 const struct bpf_insn *si,
6a773a15 7590 struct bpf_insn *insn_buf,
f96da094 7591 struct bpf_prog *prog, u32 *target_size)
6a773a15
BB
7592{
7593 struct bpf_insn *insn = insn_buf;
7594
6b8cc1d1 7595 switch (si->off) {
6a773a15 7596 case offsetof(struct xdp_md, data):
f035a515 7597 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
6b8cc1d1 7598 si->dst_reg, si->src_reg,
6a773a15
BB
7599 offsetof(struct xdp_buff, data));
7600 break;
de8f3a83
DB
7601 case offsetof(struct xdp_md, data_meta):
7602 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
7603 si->dst_reg, si->src_reg,
7604 offsetof(struct xdp_buff, data_meta));
7605 break;
6a773a15 7606 case offsetof(struct xdp_md, data_end):
f035a515 7607 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
6b8cc1d1 7608 si->dst_reg, si->src_reg,
6a773a15
BB
7609 offsetof(struct xdp_buff, data_end));
7610 break;
02dd3291
JDB
7611 case offsetof(struct xdp_md, ingress_ifindex):
7612 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7613 si->dst_reg, si->src_reg,
7614 offsetof(struct xdp_buff, rxq));
7615 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
7616 si->dst_reg, si->dst_reg,
7617 offsetof(struct xdp_rxq_info, dev));
7618 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6 7619 offsetof(struct net_device, ifindex));
02dd3291
JDB
7620 break;
7621 case offsetof(struct xdp_md, rx_queue_index):
7622 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7623 si->dst_reg, si->src_reg,
7624 offsetof(struct xdp_buff, rxq));
7625 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6
JDB
7626 offsetof(struct xdp_rxq_info,
7627 queue_index));
02dd3291 7628 break;
6a773a15
BB
7629 }
7630
7631 return insn - insn_buf;
7632}
7633
4fbac77d
AI
7634/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
7635 * context Structure, F is Field in context structure that contains a pointer
7636 * to Nested Structure of type NS that has the field NF.
7637 *
7638 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
7639 * sure that SIZE is not greater than actual size of S.F.NF.
7640 *
7641 * If offset OFF is provided, the load happens from that offset relative to
7642 * offset of NF.
7643 */
7644#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
7645 do { \
7646 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
7647 si->src_reg, offsetof(S, F)); \
7648 *insn++ = BPF_LDX_MEM( \
7649 SIZE, si->dst_reg, si->dst_reg, \
7650 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
7651 target_size) \
7652 + OFF); \
7653 } while (0)
7654
7655#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
7656 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
7657 BPF_FIELD_SIZEOF(NS, NF), 0)
7658
7659/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
7660 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
7661 *
7662 * It doesn't support SIZE argument though since narrow stores are not
7663 * supported for now.
7664 *
7665 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
7666 * "register" since two registers available in convert_ctx_access are not
7667 * enough: we can't override neither SRC, since it contains value to store, nor
7668 * DST since it contains pointer to context that may be used by later
7669 * instructions. But we need a temporary place to save pointer to nested
7670 * structure whose field we want to store to.
7671 */
7672#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
7673 do { \
7674 int tmp_reg = BPF_REG_9; \
7675 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
7676 --tmp_reg; \
7677 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
7678 --tmp_reg; \
7679 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
7680 offsetof(S, TF)); \
7681 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
7682 si->dst_reg, offsetof(S, F)); \
7683 *insn++ = BPF_STX_MEM( \
7684 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
7685 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
7686 target_size) \
7687 + OFF); \
7688 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
7689 offsetof(S, TF)); \
7690 } while (0)
7691
7692#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
7693 TF) \
7694 do { \
7695 if (type == BPF_WRITE) { \
7696 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
7697 TF); \
7698 } else { \
7699 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
7700 S, NS, F, NF, SIZE, OFF); \
7701 } \
7702 } while (0)
7703
7704#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
7705 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
7706 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
7707
7708static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
7709 const struct bpf_insn *si,
7710 struct bpf_insn *insn_buf,
7711 struct bpf_prog *prog, u32 *target_size)
7712{
7713 struct bpf_insn *insn = insn_buf;
7714 int off;
7715
7716 switch (si->off) {
7717 case offsetof(struct bpf_sock_addr, user_family):
7718 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7719 struct sockaddr, uaddr, sa_family);
7720 break;
7721
7722 case offsetof(struct bpf_sock_addr, user_ip4):
7723 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7724 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
7725 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
7726 break;
7727
7728 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7729 off = si->off;
7730 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
7731 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7732 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
7733 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
7734 tmp_reg);
7735 break;
7736
7737 case offsetof(struct bpf_sock_addr, user_port):
7738 /* To get port we need to know sa_family first and then treat
7739 * sockaddr as either sockaddr_in or sockaddr_in6.
7740 * Though we can simplify since port field has same offset and
7741 * size in both structures.
7742 * Here we check this invariant and use just one of the
7743 * structures if it's true.
7744 */
7745 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
7746 offsetof(struct sockaddr_in6, sin6_port));
7747 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
7748 FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
7749 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
7750 struct sockaddr_in6, uaddr,
7751 sin6_port, tmp_reg);
7752 break;
7753
7754 case offsetof(struct bpf_sock_addr, family):
7755 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7756 struct sock, sk, sk_family);
7757 break;
7758
7759 case offsetof(struct bpf_sock_addr, type):
7760 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7761 struct bpf_sock_addr_kern, struct sock, sk,
7762 __sk_flags_offset, BPF_W, 0);
7763 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7764 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7765 break;
7766
7767 case offsetof(struct bpf_sock_addr, protocol):
7768 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7769 struct bpf_sock_addr_kern, struct sock, sk,
7770 __sk_flags_offset, BPF_W, 0);
7771 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7772 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
7773 SK_FL_PROTO_SHIFT);
7774 break;
1cedee13
AI
7775
7776 case offsetof(struct bpf_sock_addr, msg_src_ip4):
7777 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
7778 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7779 struct bpf_sock_addr_kern, struct in_addr, t_ctx,
7780 s_addr, BPF_SIZE(si->code), 0, tmp_reg);
7781 break;
7782
7783 case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7784 msg_src_ip6[3]):
7785 off = si->off;
7786 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
7787 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
7788 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7789 struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
7790 s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
7791 break;
fb85c4a7
SF
7792 case offsetof(struct bpf_sock_addr, sk):
7793 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
7794 si->dst_reg, si->src_reg,
7795 offsetof(struct bpf_sock_addr_kern, sk));
7796 break;
4fbac77d
AI
7797 }
7798
7799 return insn - insn_buf;
7800}
7801
40304b2a
LB
7802static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
7803 const struct bpf_insn *si,
7804 struct bpf_insn *insn_buf,
f96da094
DB
7805 struct bpf_prog *prog,
7806 u32 *target_size)
40304b2a
LB
7807{
7808 struct bpf_insn *insn = insn_buf;
7809 int off;
7810
9b1f3d6e
MKL
7811/* Helper macro for adding read access to tcp_sock or sock fields. */
7812#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
7813 do { \
7814 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
7815 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
7816 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7817 struct bpf_sock_ops_kern, \
7818 is_fullsock), \
7819 si->dst_reg, si->src_reg, \
7820 offsetof(struct bpf_sock_ops_kern, \
7821 is_fullsock)); \
7822 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
7823 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7824 struct bpf_sock_ops_kern, sk),\
7825 si->dst_reg, si->src_reg, \
7826 offsetof(struct bpf_sock_ops_kern, sk));\
7827 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
7828 OBJ_FIELD), \
7829 si->dst_reg, si->dst_reg, \
7830 offsetof(OBJ, OBJ_FIELD)); \
7831 } while (0)
7832
7833#define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
7834 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
7835
7836/* Helper macro for adding write access to tcp_sock or sock fields.
7837 * The macro is called with two registers, dst_reg which contains a pointer
7838 * to ctx (context) and src_reg which contains the value that should be
7839 * stored. However, we need an additional register since we cannot overwrite
7840 * dst_reg because it may be used later in the program.
7841 * Instead we "borrow" one of the other register. We first save its value
7842 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
7843 * it at the end of the macro.
7844 */
7845#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
7846 do { \
7847 int reg = BPF_REG_9; \
7848 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
7849 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
7850 if (si->dst_reg == reg || si->src_reg == reg) \
7851 reg--; \
7852 if (si->dst_reg == reg || si->src_reg == reg) \
7853 reg--; \
7854 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
7855 offsetof(struct bpf_sock_ops_kern, \
7856 temp)); \
7857 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7858 struct bpf_sock_ops_kern, \
7859 is_fullsock), \
7860 reg, si->dst_reg, \
7861 offsetof(struct bpf_sock_ops_kern, \
7862 is_fullsock)); \
7863 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
7864 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
7865 struct bpf_sock_ops_kern, sk),\
7866 reg, si->dst_reg, \
7867 offsetof(struct bpf_sock_ops_kern, sk));\
7868 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
7869 reg, si->src_reg, \
7870 offsetof(OBJ, OBJ_FIELD)); \
7871 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
7872 offsetof(struct bpf_sock_ops_kern, \
7873 temp)); \
7874 } while (0)
7875
7876#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
7877 do { \
7878 if (TYPE == BPF_WRITE) \
7879 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
7880 else \
7881 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
7882 } while (0)
7883
7884 CONVERT_COMMON_TCP_SOCK_FIELDS(struct bpf_sock_ops,
7885 SOCK_OPS_GET_TCP_SOCK_FIELD);
7886
7887 if (insn > insn_buf)
7888 return insn - insn_buf;
7889
40304b2a
LB
7890 switch (si->off) {
7891 case offsetof(struct bpf_sock_ops, op) ...
7892 offsetof(struct bpf_sock_ops, replylong[3]):
7893 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
7894 FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
7895 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
7896 FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
7897 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
7898 FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
7899 off = si->off;
7900 off -= offsetof(struct bpf_sock_ops, op);
7901 off += offsetof(struct bpf_sock_ops_kern, op);
7902 if (type == BPF_WRITE)
7903 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7904 off);
7905 else
7906 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7907 off);
7908 break;
7909
7910 case offsetof(struct bpf_sock_ops, family):
7911 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
7912
7913 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7914 struct bpf_sock_ops_kern, sk),
7915 si->dst_reg, si->src_reg,
7916 offsetof(struct bpf_sock_ops_kern, sk));
7917 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7918 offsetof(struct sock_common, skc_family));
7919 break;
7920
7921 case offsetof(struct bpf_sock_ops, remote_ip4):
7922 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
7923
7924 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7925 struct bpf_sock_ops_kern, sk),
7926 si->dst_reg, si->src_reg,
7927 offsetof(struct bpf_sock_ops_kern, sk));
7928 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7929 offsetof(struct sock_common, skc_daddr));
7930 break;
7931
7932 case offsetof(struct bpf_sock_ops, local_ip4):
303def35
JF
7933 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7934 skc_rcv_saddr) != 4);
40304b2a
LB
7935
7936 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7937 struct bpf_sock_ops_kern, sk),
7938 si->dst_reg, si->src_reg,
7939 offsetof(struct bpf_sock_ops_kern, sk));
7940 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7941 offsetof(struct sock_common,
7942 skc_rcv_saddr));
7943 break;
7944
7945 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
7946 offsetof(struct bpf_sock_ops, remote_ip6[3]):
7947#if IS_ENABLED(CONFIG_IPV6)
7948 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7949 skc_v6_daddr.s6_addr32[0]) != 4);
7950
7951 off = si->off;
7952 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
7953 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7954 struct bpf_sock_ops_kern, sk),
7955 si->dst_reg, si->src_reg,
7956 offsetof(struct bpf_sock_ops_kern, sk));
7957 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7958 offsetof(struct sock_common,
7959 skc_v6_daddr.s6_addr32[0]) +
7960 off);
7961#else
7962 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7963#endif
7964 break;
7965
7966 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
7967 offsetof(struct bpf_sock_ops, local_ip6[3]):
7968#if IS_ENABLED(CONFIG_IPV6)
7969 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7970 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7971
7972 off = si->off;
7973 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
7974 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7975 struct bpf_sock_ops_kern, sk),
7976 si->dst_reg, si->src_reg,
7977 offsetof(struct bpf_sock_ops_kern, sk));
7978 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7979 offsetof(struct sock_common,
7980 skc_v6_rcv_saddr.s6_addr32[0]) +
7981 off);
7982#else
7983 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7984#endif
7985 break;
7986
7987 case offsetof(struct bpf_sock_ops, remote_port):
7988 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7989
7990 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
7991 struct bpf_sock_ops_kern, sk),
7992 si->dst_reg, si->src_reg,
7993 offsetof(struct bpf_sock_ops_kern, sk));
7994 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7995 offsetof(struct sock_common, skc_dport));
7996#ifndef __BIG_ENDIAN_BITFIELD
7997 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7998#endif
7999 break;
8000
8001 case offsetof(struct bpf_sock_ops, local_port):
8002 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
8003
8004 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8005 struct bpf_sock_ops_kern, sk),
8006 si->dst_reg, si->src_reg,
8007 offsetof(struct bpf_sock_ops_kern, sk));
8008 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8009 offsetof(struct sock_common, skc_num));
8010 break;
f19397a5
LB
8011
8012 case offsetof(struct bpf_sock_ops, is_fullsock):
8013 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8014 struct bpf_sock_ops_kern,
8015 is_fullsock),
8016 si->dst_reg, si->src_reg,
8017 offsetof(struct bpf_sock_ops_kern,
8018 is_fullsock));
8019 break;
8020
44f0e430
LB
8021 case offsetof(struct bpf_sock_ops, state):
8022 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
8023
8024 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8025 struct bpf_sock_ops_kern, sk),
8026 si->dst_reg, si->src_reg,
8027 offsetof(struct bpf_sock_ops_kern, sk));
8028 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
8029 offsetof(struct sock_common, skc_state));
8030 break;
8031
8032 case offsetof(struct bpf_sock_ops, rtt_min):
8033 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
8034 sizeof(struct minmax));
8035 BUILD_BUG_ON(sizeof(struct minmax) <
8036 sizeof(struct minmax_sample));
8037
8038 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8039 struct bpf_sock_ops_kern, sk),
8040 si->dst_reg, si->src_reg,
8041 offsetof(struct bpf_sock_ops_kern, sk));
8042 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8043 offsetof(struct tcp_sock, rtt_min) +
8044 FIELD_SIZEOF(struct minmax_sample, t));
8045 break;
8046
b13d8807
LB
8047 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
8048 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
8049 struct tcp_sock);
8050 break;
44f0e430 8051
44f0e430 8052 case offsetof(struct bpf_sock_ops, sk_txhash):
6f9bd3d7
LB
8053 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
8054 struct sock, type);
44f0e430 8055 break;
40304b2a
LB
8056 }
8057 return insn - insn_buf;
8058}
8059
8108a775
JF
8060static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
8061 const struct bpf_insn *si,
8062 struct bpf_insn *insn_buf,
8063 struct bpf_prog *prog, u32 *target_size)
8064{
8065 struct bpf_insn *insn = insn_buf;
8066 int off;
8067
8068 switch (si->off) {
8069 case offsetof(struct __sk_buff, data_end):
8070 off = si->off;
8071 off -= offsetof(struct __sk_buff, data_end);
8072 off += offsetof(struct sk_buff, cb);
8073 off += offsetof(struct tcp_skb_cb, bpf.data_end);
8074 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8075 si->src_reg, off);
8076 break;
8077 default:
8078 return bpf_convert_ctx_access(type, si, insn_buf, prog,
8079 target_size);
8080 }
8081
8082 return insn - insn_buf;
8083}
8084
4f738adb
JF
8085static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
8086 const struct bpf_insn *si,
8087 struct bpf_insn *insn_buf,
8088 struct bpf_prog *prog, u32 *target_size)
8089{
8090 struct bpf_insn *insn = insn_buf;
720e7f38 8091#if IS_ENABLED(CONFIG_IPV6)
303def35 8092 int off;
720e7f38 8093#endif
4f738adb 8094
7a69c0f2
JF
8095 /* convert ctx uses the fact sg element is first in struct */
8096 BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
8097
4f738adb
JF
8098 switch (si->off) {
8099 case offsetof(struct sk_msg_md, data):
604326b4 8100 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
4f738adb 8101 si->dst_reg, si->src_reg,
604326b4 8102 offsetof(struct sk_msg, data));
4f738adb
JF
8103 break;
8104 case offsetof(struct sk_msg_md, data_end):
604326b4 8105 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
4f738adb 8106 si->dst_reg, si->src_reg,
604326b4 8107 offsetof(struct sk_msg, data_end));
4f738adb 8108 break;
303def35
JF
8109 case offsetof(struct sk_msg_md, family):
8110 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
8111
8112 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8113 struct sk_msg, sk),
303def35 8114 si->dst_reg, si->src_reg,
604326b4 8115 offsetof(struct sk_msg, sk));
303def35
JF
8116 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8117 offsetof(struct sock_common, skc_family));
8118 break;
8119
8120 case offsetof(struct sk_msg_md, remote_ip4):
8121 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
8122
8123 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8124 struct sk_msg, sk),
303def35 8125 si->dst_reg, si->src_reg,
604326b4 8126 offsetof(struct sk_msg, sk));
303def35
JF
8127 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8128 offsetof(struct sock_common, skc_daddr));
8129 break;
8130
8131 case offsetof(struct sk_msg_md, local_ip4):
8132 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8133 skc_rcv_saddr) != 4);
8134
8135 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8136 struct sk_msg, sk),
303def35 8137 si->dst_reg, si->src_reg,
604326b4 8138 offsetof(struct sk_msg, sk));
303def35
JF
8139 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8140 offsetof(struct sock_common,
8141 skc_rcv_saddr));
8142 break;
8143
8144 case offsetof(struct sk_msg_md, remote_ip6[0]) ...
8145 offsetof(struct sk_msg_md, remote_ip6[3]):
8146#if IS_ENABLED(CONFIG_IPV6)
8147 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8148 skc_v6_daddr.s6_addr32[0]) != 4);
8149
8150 off = si->off;
8151 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
8152 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8153 struct sk_msg, sk),
303def35 8154 si->dst_reg, si->src_reg,
604326b4 8155 offsetof(struct sk_msg, sk));
303def35
JF
8156 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8157 offsetof(struct sock_common,
8158 skc_v6_daddr.s6_addr32[0]) +
8159 off);
8160#else
8161 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8162#endif
8163 break;
8164
8165 case offsetof(struct sk_msg_md, local_ip6[0]) ...
8166 offsetof(struct sk_msg_md, local_ip6[3]):
8167#if IS_ENABLED(CONFIG_IPV6)
8168 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8169 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8170
8171 off = si->off;
8172 off -= offsetof(struct sk_msg_md, local_ip6[0]);
8173 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8174 struct sk_msg, sk),
303def35 8175 si->dst_reg, si->src_reg,
604326b4 8176 offsetof(struct sk_msg, sk));
303def35
JF
8177 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8178 offsetof(struct sock_common,
8179 skc_v6_rcv_saddr.s6_addr32[0]) +
8180 off);
8181#else
8182 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8183#endif
8184 break;
8185
8186 case offsetof(struct sk_msg_md, remote_port):
8187 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
8188
8189 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8190 struct sk_msg, sk),
303def35 8191 si->dst_reg, si->src_reg,
604326b4 8192 offsetof(struct sk_msg, sk));
303def35
JF
8193 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8194 offsetof(struct sock_common, skc_dport));
8195#ifndef __BIG_ENDIAN_BITFIELD
8196 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8197#endif
8198 break;
8199
8200 case offsetof(struct sk_msg_md, local_port):
8201 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
8202
8203 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
604326b4 8204 struct sk_msg, sk),
303def35 8205 si->dst_reg, si->src_reg,
604326b4 8206 offsetof(struct sk_msg, sk));
303def35
JF
8207 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8208 offsetof(struct sock_common, skc_num));
8209 break;
3bdbd022
JF
8210
8211 case offsetof(struct sk_msg_md, size):
8212 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
8213 si->dst_reg, si->src_reg,
8214 offsetof(struct sk_msg_sg, size));
8215 break;
4f738adb
JF
8216 }
8217
8218 return insn - insn_buf;
8219}
8220
7de16e3a 8221const struct bpf_verifier_ops sk_filter_verifier_ops = {
4936e352
DB
8222 .get_func_proto = sk_filter_func_proto,
8223 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 8224 .convert_ctx_access = bpf_convert_ctx_access,
e0cea7ce 8225 .gen_ld_abs = bpf_gen_ld_abs,
89aa0758
AS
8226};
8227
7de16e3a 8228const struct bpf_prog_ops sk_filter_prog_ops = {
61f3c964 8229 .test_run = bpf_prog_test_run_skb,
7de16e3a
JK
8230};
8231
8232const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4936e352
DB
8233 .get_func_proto = tc_cls_act_func_proto,
8234 .is_valid_access = tc_cls_act_is_valid_access,
374fb54e 8235 .convert_ctx_access = tc_cls_act_convert_ctx_access,
36bbef52 8236 .gen_prologue = tc_cls_act_prologue,
e0cea7ce 8237 .gen_ld_abs = bpf_gen_ld_abs,
7de16e3a
JK
8238};
8239
8240const struct bpf_prog_ops tc_cls_act_prog_ops = {
1cf1cae9 8241 .test_run = bpf_prog_test_run_skb,
608cd71a
AS
8242};
8243
7de16e3a 8244const struct bpf_verifier_ops xdp_verifier_ops = {
6a773a15
BB
8245 .get_func_proto = xdp_func_proto,
8246 .is_valid_access = xdp_is_valid_access,
8247 .convert_ctx_access = xdp_convert_ctx_access,
b09928b9 8248 .gen_prologue = bpf_noop_prologue,
7de16e3a
JK
8249};
8250
8251const struct bpf_prog_ops xdp_prog_ops = {
1cf1cae9 8252 .test_run = bpf_prog_test_run_xdp,
6a773a15
BB
8253};
8254
7de16e3a 8255const struct bpf_verifier_ops cg_skb_verifier_ops = {
cd339431 8256 .get_func_proto = cg_skb_func_proto,
b39b5f41 8257 .is_valid_access = cg_skb_is_valid_access,
2492d3b8 8258 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
8259};
8260
8261const struct bpf_prog_ops cg_skb_prog_ops = {
1cf1cae9 8262 .test_run = bpf_prog_test_run_skb,
0e33661d
DM
8263};
8264
cd3092c7
MX
8265const struct bpf_verifier_ops lwt_in_verifier_ops = {
8266 .get_func_proto = lwt_in_func_proto,
3a0af8fd 8267 .is_valid_access = lwt_is_valid_access,
2492d3b8 8268 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
8269};
8270
cd3092c7
MX
8271const struct bpf_prog_ops lwt_in_prog_ops = {
8272 .test_run = bpf_prog_test_run_skb,
8273};
8274
8275const struct bpf_verifier_ops lwt_out_verifier_ops = {
8276 .get_func_proto = lwt_out_func_proto,
3a0af8fd 8277 .is_valid_access = lwt_is_valid_access,
2492d3b8 8278 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
8279};
8280
cd3092c7 8281const struct bpf_prog_ops lwt_out_prog_ops = {
1cf1cae9 8282 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
8283};
8284
7de16e3a 8285const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
3a0af8fd
TG
8286 .get_func_proto = lwt_xmit_func_proto,
8287 .is_valid_access = lwt_is_valid_access,
2492d3b8 8288 .convert_ctx_access = bpf_convert_ctx_access,
3a0af8fd 8289 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
8290};
8291
8292const struct bpf_prog_ops lwt_xmit_prog_ops = {
1cf1cae9 8293 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
8294};
8295
004d4b27
MX
8296const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
8297 .get_func_proto = lwt_seg6local_func_proto,
8298 .is_valid_access = lwt_is_valid_access,
8299 .convert_ctx_access = bpf_convert_ctx_access,
8300};
8301
8302const struct bpf_prog_ops lwt_seg6local_prog_ops = {
8303 .test_run = bpf_prog_test_run_skb,
8304};
8305
7de16e3a 8306const struct bpf_verifier_ops cg_sock_verifier_ops = {
ae2cf1c4 8307 .get_func_proto = sock_filter_func_proto,
61023658 8308 .is_valid_access = sock_filter_is_valid_access,
c64b7983 8309 .convert_ctx_access = bpf_sock_convert_ctx_access,
61023658
DA
8310};
8311
7de16e3a
JK
8312const struct bpf_prog_ops cg_sock_prog_ops = {
8313};
8314
4fbac77d
AI
8315const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
8316 .get_func_proto = sock_addr_func_proto,
8317 .is_valid_access = sock_addr_is_valid_access,
8318 .convert_ctx_access = sock_addr_convert_ctx_access,
8319};
8320
8321const struct bpf_prog_ops cg_sock_addr_prog_ops = {
8322};
8323
7de16e3a 8324const struct bpf_verifier_ops sock_ops_verifier_ops = {
8c4b4c7e 8325 .get_func_proto = sock_ops_func_proto,
40304b2a
LB
8326 .is_valid_access = sock_ops_is_valid_access,
8327 .convert_ctx_access = sock_ops_convert_ctx_access,
8328};
8329
7de16e3a
JK
8330const struct bpf_prog_ops sock_ops_prog_ops = {
8331};
8332
8333const struct bpf_verifier_ops sk_skb_verifier_ops = {
b005fd18
JF
8334 .get_func_proto = sk_skb_func_proto,
8335 .is_valid_access = sk_skb_is_valid_access,
8108a775 8336 .convert_ctx_access = sk_skb_convert_ctx_access,
8a31db56 8337 .gen_prologue = sk_skb_prologue,
b005fd18
JF
8338};
8339
7de16e3a
JK
8340const struct bpf_prog_ops sk_skb_prog_ops = {
8341};
8342
4f738adb
JF
8343const struct bpf_verifier_ops sk_msg_verifier_ops = {
8344 .get_func_proto = sk_msg_func_proto,
8345 .is_valid_access = sk_msg_is_valid_access,
8346 .convert_ctx_access = sk_msg_convert_ctx_access,
b09928b9 8347 .gen_prologue = bpf_noop_prologue,
4f738adb
JF
8348};
8349
8350const struct bpf_prog_ops sk_msg_prog_ops = {
8351};
8352
d58e468b
PP
8353const struct bpf_verifier_ops flow_dissector_verifier_ops = {
8354 .get_func_proto = flow_dissector_func_proto,
8355 .is_valid_access = flow_dissector_is_valid_access,
089b19a9 8356 .convert_ctx_access = flow_dissector_convert_ctx_access,
d58e468b
PP
8357};
8358
8359const struct bpf_prog_ops flow_dissector_prog_ops = {
b7a1848e 8360 .test_run = bpf_prog_test_run_flow_dissector,
d58e468b
PP
8361};
8362
8ced425e 8363int sk_detach_filter(struct sock *sk)
55b33325
PE
8364{
8365 int ret = -ENOENT;
8366 struct sk_filter *filter;
8367
d59577b6
VB
8368 if (sock_flag(sk, SOCK_FILTER_LOCKED))
8369 return -EPERM;
8370
8ced425e
HFS
8371 filter = rcu_dereference_protected(sk->sk_filter,
8372 lockdep_sock_is_held(sk));
55b33325 8373 if (filter) {
a9b3cd7f 8374 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 8375 sk_filter_uncharge(sk, filter);
55b33325
PE
8376 ret = 0;
8377 }
a3ea269b 8378
55b33325
PE
8379 return ret;
8380}
8ced425e 8381EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 8382
a3ea269b
DB
8383int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
8384 unsigned int len)
a8fc9277 8385{
a3ea269b 8386 struct sock_fprog_kern *fprog;
a8fc9277 8387 struct sk_filter *filter;
a3ea269b 8388 int ret = 0;
a8fc9277
PE
8389
8390 lock_sock(sk);
8391 filter = rcu_dereference_protected(sk->sk_filter,
8ced425e 8392 lockdep_sock_is_held(sk));
a8fc9277
PE
8393 if (!filter)
8394 goto out;
a3ea269b
DB
8395
8396 /* We're copying the filter that has been originally attached,
93d08b69
DB
8397 * so no conversion/decode needed anymore. eBPF programs that
8398 * have no original program cannot be dumped through this.
a3ea269b 8399 */
93d08b69 8400 ret = -EACCES;
7ae457c1 8401 fprog = filter->prog->orig_prog;
93d08b69
DB
8402 if (!fprog)
8403 goto out;
a3ea269b
DB
8404
8405 ret = fprog->len;
a8fc9277 8406 if (!len)
a3ea269b 8407 /* User space only enquires number of filter blocks. */
a8fc9277 8408 goto out;
a3ea269b 8409
a8fc9277 8410 ret = -EINVAL;
a3ea269b 8411 if (len < fprog->len)
a8fc9277
PE
8412 goto out;
8413
8414 ret = -EFAULT;
009937e7 8415 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 8416 goto out;
a8fc9277 8417
a3ea269b
DB
8418 /* Instead of bytes, the API requests to return the number
8419 * of filter blocks.
8420 */
8421 ret = fprog->len;
a8fc9277
PE
8422out:
8423 release_sock(sk);
8424 return ret;
8425}
2dbb9b9e
MKL
8426
8427#ifdef CONFIG_INET
8428struct sk_reuseport_kern {
8429 struct sk_buff *skb;
8430 struct sock *sk;
8431 struct sock *selected_sk;
8432 void *data_end;
8433 u32 hash;
8434 u32 reuseport_id;
8435 bool bind_inany;
8436};
8437
8438static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
8439 struct sock_reuseport *reuse,
8440 struct sock *sk, struct sk_buff *skb,
8441 u32 hash)
8442{
8443 reuse_kern->skb = skb;
8444 reuse_kern->sk = sk;
8445 reuse_kern->selected_sk = NULL;
8446 reuse_kern->data_end = skb->data + skb_headlen(skb);
8447 reuse_kern->hash = hash;
8448 reuse_kern->reuseport_id = reuse->reuseport_id;
8449 reuse_kern->bind_inany = reuse->bind_inany;
8450}
8451
8452struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
8453 struct bpf_prog *prog, struct sk_buff *skb,
8454 u32 hash)
8455{
8456 struct sk_reuseport_kern reuse_kern;
8457 enum sk_action action;
8458
8459 bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
8460 action = BPF_PROG_RUN(prog, &reuse_kern);
8461
8462 if (action == SK_PASS)
8463 return reuse_kern.selected_sk;
8464 else
8465 return ERR_PTR(-ECONNREFUSED);
8466}
8467
8468BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
8469 struct bpf_map *, map, void *, key, u32, flags)
8470{
8471 struct sock_reuseport *reuse;
8472 struct sock *selected_sk;
8473
8474 selected_sk = map->ops->map_lookup_elem(map, key);
8475 if (!selected_sk)
8476 return -ENOENT;
8477
8478 reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
8479 if (!reuse)
8480 /* selected_sk is unhashed (e.g. by close()) after the
8481 * above map_lookup_elem(). Treat selected_sk has already
8482 * been removed from the map.
8483 */
8484 return -ENOENT;
8485
8486 if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
8487 struct sock *sk;
8488
8489 if (unlikely(!reuse_kern->reuseport_id))
8490 /* There is a small race between adding the
8491 * sk to the map and setting the
8492 * reuse_kern->reuseport_id.
8493 * Treat it as the sk has not been added to
8494 * the bpf map yet.
8495 */
8496 return -ENOENT;
8497
8498 sk = reuse_kern->sk;
8499 if (sk->sk_protocol != selected_sk->sk_protocol)
8500 return -EPROTOTYPE;
8501 else if (sk->sk_family != selected_sk->sk_family)
8502 return -EAFNOSUPPORT;
8503
8504 /* Catch all. Likely bound to a different sockaddr. */
8505 return -EBADFD;
8506 }
8507
8508 reuse_kern->selected_sk = selected_sk;
8509
8510 return 0;
8511}
8512
8513static const struct bpf_func_proto sk_select_reuseport_proto = {
8514 .func = sk_select_reuseport,
8515 .gpl_only = false,
8516 .ret_type = RET_INTEGER,
8517 .arg1_type = ARG_PTR_TO_CTX,
8518 .arg2_type = ARG_CONST_MAP_PTR,
8519 .arg3_type = ARG_PTR_TO_MAP_KEY,
8520 .arg4_type = ARG_ANYTHING,
8521};
8522
8523BPF_CALL_4(sk_reuseport_load_bytes,
8524 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8525 void *, to, u32, len)
8526{
8527 return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
8528}
8529
8530static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
8531 .func = sk_reuseport_load_bytes,
8532 .gpl_only = false,
8533 .ret_type = RET_INTEGER,
8534 .arg1_type = ARG_PTR_TO_CTX,
8535 .arg2_type = ARG_ANYTHING,
8536 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
8537 .arg4_type = ARG_CONST_SIZE,
8538};
8539
8540BPF_CALL_5(sk_reuseport_load_bytes_relative,
8541 const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8542 void *, to, u32, len, u32, start_header)
8543{
8544 return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
8545 len, start_header);
8546}
8547
8548static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
8549 .func = sk_reuseport_load_bytes_relative,
8550 .gpl_only = false,
8551 .ret_type = RET_INTEGER,
8552 .arg1_type = ARG_PTR_TO_CTX,
8553 .arg2_type = ARG_ANYTHING,
8554 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
8555 .arg4_type = ARG_CONST_SIZE,
8556 .arg5_type = ARG_ANYTHING,
8557};
8558
8559static const struct bpf_func_proto *
8560sk_reuseport_func_proto(enum bpf_func_id func_id,
8561 const struct bpf_prog *prog)
8562{
8563 switch (func_id) {
8564 case BPF_FUNC_sk_select_reuseport:
8565 return &sk_select_reuseport_proto;
8566 case BPF_FUNC_skb_load_bytes:
8567 return &sk_reuseport_load_bytes_proto;
8568 case BPF_FUNC_skb_load_bytes_relative:
8569 return &sk_reuseport_load_bytes_relative_proto;
8570 default:
8571 return bpf_base_func_proto(func_id);
8572 }
8573}
8574
8575static bool
8576sk_reuseport_is_valid_access(int off, int size,
8577 enum bpf_access_type type,
8578 const struct bpf_prog *prog,
8579 struct bpf_insn_access_aux *info)
8580{
8581 const u32 size_default = sizeof(__u32);
8582
8583 if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
8584 off % size || type != BPF_READ)
8585 return false;
8586
8587 switch (off) {
8588 case offsetof(struct sk_reuseport_md, data):
8589 info->reg_type = PTR_TO_PACKET;
8590 return size == sizeof(__u64);
8591
8592 case offsetof(struct sk_reuseport_md, data_end):
8593 info->reg_type = PTR_TO_PACKET_END;
8594 return size == sizeof(__u64);
8595
8596 case offsetof(struct sk_reuseport_md, hash):
8597 return size == size_default;
8598
8599 /* Fields that allow narrowing */
8600 case offsetof(struct sk_reuseport_md, eth_protocol):
8601 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8602 return false;
4597b62f 8603 /* fall through */
2dbb9b9e
MKL
8604 case offsetof(struct sk_reuseport_md, ip_protocol):
8605 case offsetof(struct sk_reuseport_md, bind_inany):
8606 case offsetof(struct sk_reuseport_md, len):
8607 bpf_ctx_record_field_size(info, size_default);
8608 return bpf_ctx_narrow_access_ok(off, size, size_default);
8609
8610 default:
8611 return false;
8612 }
8613}
8614
8615#define SK_REUSEPORT_LOAD_FIELD(F) ({ \
8616 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8617 si->dst_reg, si->src_reg, \
8618 bpf_target_off(struct sk_reuseport_kern, F, \
8619 FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8620 target_size)); \
8621 })
8622
8623#define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \
8624 SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \
8625 struct sk_buff, \
8626 skb, \
8627 SKB_FIELD)
8628
8629#define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
8630 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \
8631 struct sock, \
8632 sk, \
8633 SK_FIELD, BPF_SIZE, EXTRA_OFF)
8634
8635static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
8636 const struct bpf_insn *si,
8637 struct bpf_insn *insn_buf,
8638 struct bpf_prog *prog,
8639 u32 *target_size)
8640{
8641 struct bpf_insn *insn = insn_buf;
8642
8643 switch (si->off) {
8644 case offsetof(struct sk_reuseport_md, data):
8645 SK_REUSEPORT_LOAD_SKB_FIELD(data);
8646 break;
8647
8648 case offsetof(struct sk_reuseport_md, len):
8649 SK_REUSEPORT_LOAD_SKB_FIELD(len);
8650 break;
8651
8652 case offsetof(struct sk_reuseport_md, eth_protocol):
8653 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
8654 break;
8655
8656 case offsetof(struct sk_reuseport_md, ip_protocol):
3f6e138d 8657 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
2dbb9b9e
MKL
8658 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
8659 BPF_W, 0);
8660 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
8661 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
8662 SK_FL_PROTO_SHIFT);
8663 /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
8664 * aware. No further narrowing or masking is needed.
8665 */
8666 *target_size = 1;
8667 break;
8668
8669 case offsetof(struct sk_reuseport_md, data_end):
8670 SK_REUSEPORT_LOAD_FIELD(data_end);
8671 break;
8672
8673 case offsetof(struct sk_reuseport_md, hash):
8674 SK_REUSEPORT_LOAD_FIELD(hash);
8675 break;
8676
8677 case offsetof(struct sk_reuseport_md, bind_inany):
8678 SK_REUSEPORT_LOAD_FIELD(bind_inany);
8679 break;
8680 }
8681
8682 return insn - insn_buf;
8683}
8684
8685const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
8686 .get_func_proto = sk_reuseport_func_proto,
8687 .is_valid_access = sk_reuseport_is_valid_access,
8688 .convert_ctx_access = sk_reuseport_convert_ctx_access,
8689};
8690
8691const struct bpf_prog_ops sk_reuseport_prog_ops = {
8692};
8693#endif /* CONFIG_INET */