Merge branch 'bpf-nfp-shift-insns'
[linux-2.6-block.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
bd4cf0ed
AS
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
1da177e4 6 *
bd4cf0ed
AS
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
1da177e4
LT
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
91b8270f 29#include <linux/sock_diag.h>
1da177e4
LT
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/if_packet.h>
c491680f 34#include <linux/if_arp.h>
5a0e3ad6 35#include <linux/gfp.h>
d74bad4e 36#include <net/inet_common.h>
1da177e4
LT
37#include <net/ip.h>
38#include <net/protocol.h>
4738c1db 39#include <net/netlink.h>
1da177e4
LT
40#include <linux/skbuff.h>
41#include <net/sock.h>
10b89ee4 42#include <net/flow_dissector.h>
1da177e4
LT
43#include <linux/errno.h>
44#include <linux/timer.h>
7c0f6ba6 45#include <linux/uaccess.h>
40daafc8 46#include <asm/unaligned.h>
d66f2b91 47#include <asm/cmpxchg.h>
1da177e4 48#include <linux/filter.h>
86e4ca66 49#include <linux/ratelimit.h>
46b325c7 50#include <linux/seccomp.h>
f3335031 51#include <linux/if_vlan.h>
89aa0758 52#include <linux/bpf.h>
d691f9e8 53#include <net/sch_generic.h>
8d20aabe 54#include <net/cls_cgroup.h>
d3aa45ce 55#include <net/dst_metadata.h>
c46646d0 56#include <net/dst.h>
538950a1 57#include <net/sock_reuseport.h>
b1d9fc41 58#include <net/busy_poll.h>
8c4b4c7e 59#include <net/tcp.h>
12bed760 60#include <net/xfrm.h>
5acaee0a 61#include <linux/bpf_trace.h>
02671e23 62#include <net/xdp_sock.h>
87f5fc7e
DA
63#include <linux/inetdevice.h>
64#include <net/ip_fib.h>
65#include <net/flow.h>
66#include <net/arp.h>
1da177e4 67
43db6d65 68/**
f4979fce 69 * sk_filter_trim_cap - run a packet through a socket filter
43db6d65
SH
70 * @sk: sock associated with &sk_buff
71 * @skb: buffer to filter
f4979fce 72 * @cap: limit on how short the eBPF program may trim the packet
43db6d65 73 *
ff936a04
AS
74 * Run the eBPF program and then cut skb->data to correct size returned by
75 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 76 * than pkt_len we keep whole skb->data. This is the socket level
ff936a04 77 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
43db6d65
SH
78 * be accepted or -EPERM if the packet should be tossed.
79 *
80 */
f4979fce 81int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
43db6d65
SH
82{
83 int err;
84 struct sk_filter *filter;
85
c93bdd0e
MG
86 /*
87 * If the skb was allocated from pfmemalloc reserves, only
88 * allow SOCK_MEMALLOC sockets to use it as this socket is
89 * helping free memory
90 */
8fe809a9
ED
91 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
92 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
c93bdd0e 93 return -ENOMEM;
8fe809a9 94 }
c11cd3a6
DM
95 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
96 if (err)
97 return err;
98
43db6d65
SH
99 err = security_sock_rcv_skb(sk, skb);
100 if (err)
101 return err;
102
80f8f102
ED
103 rcu_read_lock();
104 filter = rcu_dereference(sk->sk_filter);
43db6d65 105 if (filter) {
8f917bba
WB
106 struct sock *save_sk = skb->sk;
107 unsigned int pkt_len;
108
109 skb->sk = sk;
110 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
8f917bba 111 skb->sk = save_sk;
d1f496fd 112 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
43db6d65 113 }
80f8f102 114 rcu_read_unlock();
43db6d65
SH
115
116 return err;
117}
f4979fce 118EXPORT_SYMBOL(sk_filter_trim_cap);
43db6d65 119
b390134c 120BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
bd4cf0ed 121{
f3694e00 122 return skb_get_poff(skb);
bd4cf0ed
AS
123}
124
b390134c 125BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 126{
bd4cf0ed
AS
127 struct nlattr *nla;
128
129 if (skb_is_nonlinear(skb))
130 return 0;
131
05ab8f26
MK
132 if (skb->len < sizeof(struct nlattr))
133 return 0;
134
30743837 135 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
136 return 0;
137
30743837 138 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
139 if (nla)
140 return (void *) nla - (void *) skb->data;
141
142 return 0;
143}
144
b390134c 145BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 146{
bd4cf0ed
AS
147 struct nlattr *nla;
148
149 if (skb_is_nonlinear(skb))
150 return 0;
151
05ab8f26
MK
152 if (skb->len < sizeof(struct nlattr))
153 return 0;
154
30743837 155 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
156 return 0;
157
30743837
DB
158 nla = (struct nlattr *) &skb->data[a];
159 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
160 return 0;
161
30743837 162 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
163 if (nla)
164 return (void *) nla - (void *) skb->data;
165
166 return 0;
167}
168
e0cea7ce
DB
169BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
170 data, int, headlen, int, offset)
171{
172 u8 tmp, *ptr;
173 const int len = sizeof(tmp);
174
175 if (offset >= 0) {
176 if (headlen - offset >= len)
177 return *(u8 *)(data + offset);
178 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
179 return tmp;
180 } else {
181 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
182 if (likely(ptr))
183 return *(u8 *)ptr;
184 }
185
186 return -EFAULT;
187}
188
189BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
190 int, offset)
191{
192 return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
193 offset);
194}
195
196BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
197 data, int, headlen, int, offset)
198{
199 u16 tmp, *ptr;
200 const int len = sizeof(tmp);
201
202 if (offset >= 0) {
203 if (headlen - offset >= len)
204 return get_unaligned_be16(data + offset);
205 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
206 return be16_to_cpu(tmp);
207 } else {
208 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
209 if (likely(ptr))
210 return get_unaligned_be16(ptr);
211 }
212
213 return -EFAULT;
214}
215
216BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
217 int, offset)
218{
219 return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
220 offset);
221}
222
223BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
224 data, int, headlen, int, offset)
225{
226 u32 tmp, *ptr;
227 const int len = sizeof(tmp);
228
229 if (likely(offset >= 0)) {
230 if (headlen - offset >= len)
231 return get_unaligned_be32(data + offset);
232 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
233 return be32_to_cpu(tmp);
234 } else {
235 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
236 if (likely(ptr))
237 return get_unaligned_be32(ptr);
238 }
239
240 return -EFAULT;
241}
242
243BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
244 int, offset)
245{
246 return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
247 offset);
248}
249
b390134c 250BPF_CALL_0(bpf_get_raw_cpu_id)
bd4cf0ed
AS
251{
252 return raw_smp_processor_id();
253}
254
80b48c44 255static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
b390134c 256 .func = bpf_get_raw_cpu_id,
80b48c44
DB
257 .gpl_only = false,
258 .ret_type = RET_INTEGER,
259};
260
9bac3d6d
AS
261static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
262 struct bpf_insn *insn_buf)
263{
264 struct bpf_insn *insn = insn_buf;
265
266 switch (skb_field) {
267 case SKF_AD_MARK:
268 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
269
270 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
271 offsetof(struct sk_buff, mark));
272 break;
273
274 case SKF_AD_PKTTYPE:
275 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
276 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
277#ifdef __BIG_ENDIAN_BITFIELD
278 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
279#endif
280 break;
281
282 case SKF_AD_QUEUE:
283 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
284
285 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
286 offsetof(struct sk_buff, queue_mapping));
287 break;
c2497395 288
c2497395
AS
289 case SKF_AD_VLAN_TAG:
290 case SKF_AD_VLAN_TAG_PRESENT:
291 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
292 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
293
294 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
295 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
296 offsetof(struct sk_buff, vlan_tci));
297 if (skb_field == SKF_AD_VLAN_TAG) {
298 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
299 ~VLAN_TAG_PRESENT);
300 } else {
301 /* dst_reg >>= 12 */
302 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
303 /* dst_reg &= 1 */
304 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
305 }
306 break;
9bac3d6d
AS
307 }
308
309 return insn - insn_buf;
310}
311
bd4cf0ed 312static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 313 struct bpf_insn **insnp)
bd4cf0ed 314{
2695fb55 315 struct bpf_insn *insn = *insnp;
9bac3d6d 316 u32 cnt;
bd4cf0ed
AS
317
318 switch (fp->k) {
319 case SKF_AD_OFF + SKF_AD_PROTOCOL:
0b8c707d
DB
320 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
321
322 /* A = *(u16 *) (CTX + offsetof(protocol)) */
323 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
324 offsetof(struct sk_buff, protocol));
325 /* A = ntohs(A) [emitting a nop or swap16] */
326 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
327 break;
328
329 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
330 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
331 insn += cnt - 1;
bd4cf0ed
AS
332 break;
333
334 case SKF_AD_OFF + SKF_AD_IFINDEX:
335 case SKF_AD_OFF + SKF_AD_HATYPE:
bd4cf0ed
AS
336 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
337 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
f8f6d679 338
f035a515 339 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
f8f6d679
DB
340 BPF_REG_TMP, BPF_REG_CTX,
341 offsetof(struct sk_buff, dev));
342 /* if (tmp != 0) goto pc + 1 */
343 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
344 *insn++ = BPF_EXIT_INSN();
345 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
346 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
347 offsetof(struct net_device, ifindex));
348 else
349 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
350 offsetof(struct net_device, type));
bd4cf0ed
AS
351 break;
352
353 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
354 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
355 insn += cnt - 1;
bd4cf0ed
AS
356 break;
357
358 case SKF_AD_OFF + SKF_AD_RXHASH:
359 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
360
9739eef1
AS
361 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
362 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
363 break;
364
365 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
366 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
367 insn += cnt - 1;
bd4cf0ed
AS
368 break;
369
370 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
371 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
372 BPF_REG_A, BPF_REG_CTX, insn);
373 insn += cnt - 1;
374 break;
bd4cf0ed 375
c2497395
AS
376 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
377 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
378 BPF_REG_A, BPF_REG_CTX, insn);
379 insn += cnt - 1;
bd4cf0ed
AS
380 break;
381
27cd5452
MS
382 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
383 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
384
385 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
386 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
387 offsetof(struct sk_buff, vlan_proto));
388 /* A = ntohs(A) [emitting a nop or swap16] */
389 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
390 break;
391
bd4cf0ed
AS
392 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
393 case SKF_AD_OFF + SKF_AD_NLATTR:
394 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
395 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 396 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 397 /* arg1 = CTX */
f8f6d679 398 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 399 /* arg2 = A */
f8f6d679 400 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 401 /* arg3 = X */
f8f6d679 402 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 403 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
404 switch (fp->k) {
405 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
b390134c 406 *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
bd4cf0ed
AS
407 break;
408 case SKF_AD_OFF + SKF_AD_NLATTR:
b390134c 409 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
bd4cf0ed
AS
410 break;
411 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
b390134c 412 *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
bd4cf0ed
AS
413 break;
414 case SKF_AD_OFF + SKF_AD_CPU:
b390134c 415 *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
bd4cf0ed 416 break;
4cd3675e 417 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
418 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
419 bpf_user_rnd_init_once();
4cd3675e 420 break;
bd4cf0ed
AS
421 }
422 break;
423
424 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
425 /* A ^= X */
426 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
427 break;
428
429 default:
430 /* This is just a dummy call to avoid letting the compiler
431 * evict __bpf_call_base() as an optimization. Placed here
432 * where no-one bothers.
433 */
434 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
435 return false;
436 }
437
438 *insnp = insn;
439 return true;
440}
441
e0cea7ce
DB
442static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
443{
444 const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
445 int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
446 bool endian = BPF_SIZE(fp->code) == BPF_H ||
447 BPF_SIZE(fp->code) == BPF_W;
448 bool indirect = BPF_MODE(fp->code) == BPF_IND;
449 const int ip_align = NET_IP_ALIGN;
450 struct bpf_insn *insn = *insnp;
451 int offset = fp->k;
452
453 if (!indirect &&
454 ((unaligned_ok && offset >= 0) ||
455 (!unaligned_ok && offset >= 0 &&
456 offset + ip_align >= 0 &&
457 offset + ip_align % size == 0))) {
458 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
459 *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
460 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
461 *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
462 offset);
463 if (endian)
464 *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
465 *insn++ = BPF_JMP_A(8);
466 }
467
468 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
469 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
470 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
471 if (!indirect) {
472 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
473 } else {
474 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
475 if (fp->k)
476 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
477 }
478
479 switch (BPF_SIZE(fp->code)) {
480 case BPF_B:
481 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
482 break;
483 case BPF_H:
484 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
485 break;
486 case BPF_W:
487 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
488 break;
489 default:
490 return false;
491 }
492
493 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
494 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
495 *insn = BPF_EXIT_INSN();
496
497 *insnp = insn;
498 return true;
499}
500
bd4cf0ed 501/**
8fb575ca 502 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
503 * @prog: the user passed filter program
504 * @len: the length of the user passed filter program
50bbfed9 505 * @new_prog: allocated 'struct bpf_prog' or NULL
bd4cf0ed 506 * @new_len: pointer to store length of converted program
e0cea7ce 507 * @seen_ld_abs: bool whether we've seen ld_abs/ind
bd4cf0ed 508 *
1f504ec9
TK
509 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
510 * style extended BPF (eBPF).
bd4cf0ed
AS
511 * Conversion workflow:
512 *
513 * 1) First pass for calculating the new program length:
e0cea7ce 514 * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
bd4cf0ed
AS
515 *
516 * 2) 2nd pass to remap in two passes: 1st pass finds new
517 * jump offsets, 2nd pass remapping:
e0cea7ce 518 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
bd4cf0ed 519 */
d9e12f42 520static int bpf_convert_filter(struct sock_filter *prog, int len,
e0cea7ce
DB
521 struct bpf_prog *new_prog, int *new_len,
522 bool *seen_ld_abs)
bd4cf0ed 523{
50bbfed9
AS
524 int new_flen = 0, pass = 0, target, i, stack_off;
525 struct bpf_insn *new_insn, *first_insn = NULL;
bd4cf0ed
AS
526 struct sock_filter *fp;
527 int *addrs = NULL;
528 u8 bpf_src;
529
530 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 531 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 532
6f9a093b 533 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
534 return -EINVAL;
535
536 if (new_prog) {
50bbfed9 537 first_insn = new_prog->insnsi;
658da937
DB
538 addrs = kcalloc(len, sizeof(*addrs),
539 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
540 if (!addrs)
541 return -ENOMEM;
542 }
543
544do_pass:
50bbfed9 545 new_insn = first_insn;
bd4cf0ed
AS
546 fp = prog;
547
8b614aeb 548 /* Classic BPF related prologue emission. */
50bbfed9 549 if (new_prog) {
8b614aeb
DB
550 /* Classic BPF expects A and X to be reset first. These need
551 * to be guaranteed to be the first two instructions.
552 */
1d621674
DB
553 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
554 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
8b614aeb
DB
555
556 /* All programs must keep CTX in callee saved BPF_REG_CTX.
557 * In eBPF case it's done by the compiler, here we need to
558 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
559 */
560 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
e0cea7ce
DB
561 if (*seen_ld_abs) {
562 /* For packet access in classic BPF, cache skb->data
563 * in callee-saved BPF R8 and skb->len - skb->data_len
564 * (headlen) in BPF R9. Since classic BPF is read-only
565 * on CTX, we only need to cache it once.
566 */
567 *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
568 BPF_REG_D, BPF_REG_CTX,
569 offsetof(struct sk_buff, data));
570 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
571 offsetof(struct sk_buff, len));
572 *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
573 offsetof(struct sk_buff, data_len));
574 *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
575 }
8b614aeb
DB
576 } else {
577 new_insn += 3;
578 }
bd4cf0ed
AS
579
580 for (i = 0; i < len; fp++, i++) {
e0cea7ce 581 struct bpf_insn tmp_insns[32] = { };
2695fb55 582 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
583
584 if (addrs)
50bbfed9 585 addrs[i] = new_insn - first_insn;
bd4cf0ed
AS
586
587 switch (fp->code) {
588 /* All arithmetic insns and skb loads map as-is. */
589 case BPF_ALU | BPF_ADD | BPF_X:
590 case BPF_ALU | BPF_ADD | BPF_K:
591 case BPF_ALU | BPF_SUB | BPF_X:
592 case BPF_ALU | BPF_SUB | BPF_K:
593 case BPF_ALU | BPF_AND | BPF_X:
594 case BPF_ALU | BPF_AND | BPF_K:
595 case BPF_ALU | BPF_OR | BPF_X:
596 case BPF_ALU | BPF_OR | BPF_K:
597 case BPF_ALU | BPF_LSH | BPF_X:
598 case BPF_ALU | BPF_LSH | BPF_K:
599 case BPF_ALU | BPF_RSH | BPF_X:
600 case BPF_ALU | BPF_RSH | BPF_K:
601 case BPF_ALU | BPF_XOR | BPF_X:
602 case BPF_ALU | BPF_XOR | BPF_K:
603 case BPF_ALU | BPF_MUL | BPF_X:
604 case BPF_ALU | BPF_MUL | BPF_K:
605 case BPF_ALU | BPF_DIV | BPF_X:
606 case BPF_ALU | BPF_DIV | BPF_K:
607 case BPF_ALU | BPF_MOD | BPF_X:
608 case BPF_ALU | BPF_MOD | BPF_K:
609 case BPF_ALU | BPF_NEG:
610 case BPF_LD | BPF_ABS | BPF_W:
611 case BPF_LD | BPF_ABS | BPF_H:
612 case BPF_LD | BPF_ABS | BPF_B:
613 case BPF_LD | BPF_IND | BPF_W:
614 case BPF_LD | BPF_IND | BPF_H:
615 case BPF_LD | BPF_IND | BPF_B:
616 /* Check for overloaded BPF extension and
617 * directly convert it if found, otherwise
618 * just move on with mapping.
619 */
620 if (BPF_CLASS(fp->code) == BPF_LD &&
621 BPF_MODE(fp->code) == BPF_ABS &&
622 convert_bpf_extensions(fp, &insn))
623 break;
e0cea7ce
DB
624 if (BPF_CLASS(fp->code) == BPF_LD &&
625 convert_bpf_ld_abs(fp, &insn)) {
626 *seen_ld_abs = true;
627 break;
628 }
bd4cf0ed 629
68fda450 630 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
f6b1b3bf 631 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
68fda450 632 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
f6b1b3bf
DB
633 /* Error with exception code on div/mod by 0.
634 * For cBPF programs, this was always return 0.
635 */
636 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
637 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
638 *insn++ = BPF_EXIT_INSN();
639 }
68fda450 640
f8f6d679 641 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
642 break;
643
f8f6d679
DB
644 /* Jump transformation cannot use BPF block macros
645 * everywhere as offset calculation and target updates
646 * require a bit more work than the rest, i.e. jump
647 * opcodes map as-is, but offsets need adjustment.
648 */
649
650#define BPF_EMIT_JMP \
bd4cf0ed
AS
651 do { \
652 if (target >= len || target < 0) \
653 goto err; \
654 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
655 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
656 insn->off -= insn - tmp_insns; \
657 } while (0)
658
f8f6d679
DB
659 case BPF_JMP | BPF_JA:
660 target = i + fp->k + 1;
661 insn->code = fp->code;
662 BPF_EMIT_JMP;
bd4cf0ed
AS
663 break;
664
665 case BPF_JMP | BPF_JEQ | BPF_K:
666 case BPF_JMP | BPF_JEQ | BPF_X:
667 case BPF_JMP | BPF_JSET | BPF_K:
668 case BPF_JMP | BPF_JSET | BPF_X:
669 case BPF_JMP | BPF_JGT | BPF_K:
670 case BPF_JMP | BPF_JGT | BPF_X:
671 case BPF_JMP | BPF_JGE | BPF_K:
672 case BPF_JMP | BPF_JGE | BPF_X:
673 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
674 /* BPF immediates are signed, zero extend
675 * immediate into tmp register and use it
676 * in compare insn.
677 */
f8f6d679 678 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 679
e430f34e
AS
680 insn->dst_reg = BPF_REG_A;
681 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
682 bpf_src = BPF_X;
683 } else {
e430f34e 684 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
685 insn->imm = fp->k;
686 bpf_src = BPF_SRC(fp->code);
19539ce7 687 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 688 }
bd4cf0ed
AS
689
690 /* Common case where 'jump_false' is next insn. */
691 if (fp->jf == 0) {
692 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
693 target = i + fp->jt + 1;
f8f6d679 694 BPF_EMIT_JMP;
bd4cf0ed 695 break;
1da177e4 696 }
bd4cf0ed 697
92b31a9a
DB
698 /* Convert some jumps when 'jump_true' is next insn. */
699 if (fp->jt == 0) {
700 switch (BPF_OP(fp->code)) {
701 case BPF_JEQ:
702 insn->code = BPF_JMP | BPF_JNE | bpf_src;
703 break;
704 case BPF_JGT:
705 insn->code = BPF_JMP | BPF_JLE | bpf_src;
706 break;
707 case BPF_JGE:
708 insn->code = BPF_JMP | BPF_JLT | bpf_src;
709 break;
710 default:
711 goto jmp_rest;
712 }
713
bd4cf0ed 714 target = i + fp->jf + 1;
f8f6d679 715 BPF_EMIT_JMP;
bd4cf0ed 716 break;
0b05b2a4 717 }
92b31a9a 718jmp_rest:
bd4cf0ed
AS
719 /* Other jumps are mapped into two insns: Jxx and JA. */
720 target = i + fp->jt + 1;
721 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 722 BPF_EMIT_JMP;
bd4cf0ed
AS
723 insn++;
724
725 insn->code = BPF_JMP | BPF_JA;
726 target = i + fp->jf + 1;
f8f6d679 727 BPF_EMIT_JMP;
bd4cf0ed
AS
728 break;
729
730 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
e0cea7ce
DB
731 case BPF_LDX | BPF_MSH | BPF_B: {
732 struct sock_filter tmp = {
733 .code = BPF_LD | BPF_ABS | BPF_B,
734 .k = fp->k,
735 };
736
737 *seen_ld_abs = true;
738
739 /* X = A */
740 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
1268e253 741 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
e0cea7ce
DB
742 convert_bpf_ld_abs(&tmp, &insn);
743 insn++;
9739eef1 744 /* A &= 0xf */
f8f6d679 745 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 746 /* A <<= 2 */
f8f6d679 747 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
e0cea7ce
DB
748 /* tmp = X */
749 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
9739eef1 750 /* X = A */
f8f6d679 751 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 752 /* A = tmp */
f8f6d679 753 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed 754 break;
e0cea7ce 755 }
6205b9cf
DB
756 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
757 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
758 */
bd4cf0ed
AS
759 case BPF_RET | BPF_A:
760 case BPF_RET | BPF_K:
6205b9cf
DB
761 if (BPF_RVAL(fp->code) == BPF_K)
762 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
763 0, fp->k);
9739eef1 764 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
765 break;
766
767 /* Store to stack. */
768 case BPF_ST:
769 case BPF_STX:
50bbfed9 770 stack_off = fp->k * 4 + 4;
f8f6d679
DB
771 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
772 BPF_ST ? BPF_REG_A : BPF_REG_X,
50bbfed9
AS
773 -stack_off);
774 /* check_load_and_stores() verifies that classic BPF can
775 * load from stack only after write, so tracking
776 * stack_depth for ST|STX insns is enough
777 */
778 if (new_prog && new_prog->aux->stack_depth < stack_off)
779 new_prog->aux->stack_depth = stack_off;
bd4cf0ed
AS
780 break;
781
782 /* Load from stack. */
783 case BPF_LD | BPF_MEM:
784 case BPF_LDX | BPF_MEM:
50bbfed9 785 stack_off = fp->k * 4 + 4;
f8f6d679
DB
786 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
787 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
50bbfed9 788 -stack_off);
bd4cf0ed
AS
789 break;
790
791 /* A = K or X = K */
792 case BPF_LD | BPF_IMM:
793 case BPF_LDX | BPF_IMM:
f8f6d679
DB
794 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
795 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
796 break;
797
798 /* X = A */
799 case BPF_MISC | BPF_TAX:
f8f6d679 800 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
801 break;
802
803 /* A = X */
804 case BPF_MISC | BPF_TXA:
f8f6d679 805 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
806 break;
807
808 /* A = skb->len or X = skb->len */
809 case BPF_LD | BPF_W | BPF_LEN:
810 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
811 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
812 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
813 offsetof(struct sk_buff, len));
bd4cf0ed
AS
814 break;
815
f8f6d679 816 /* Access seccomp_data fields. */
bd4cf0ed 817 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
818 /* A = *(u32 *) (ctx + K) */
819 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
820 break;
821
ca9f1fd2 822 /* Unknown instruction. */
1da177e4 823 default:
bd4cf0ed 824 goto err;
1da177e4 825 }
bd4cf0ed
AS
826
827 insn++;
828 if (new_prog)
829 memcpy(new_insn, tmp_insns,
830 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 831 new_insn += insn - tmp_insns;
1da177e4
LT
832 }
833
bd4cf0ed
AS
834 if (!new_prog) {
835 /* Only calculating new length. */
50bbfed9 836 *new_len = new_insn - first_insn;
e0cea7ce
DB
837 if (*seen_ld_abs)
838 *new_len += 4; /* Prologue bits. */
bd4cf0ed
AS
839 return 0;
840 }
841
842 pass++;
50bbfed9
AS
843 if (new_flen != new_insn - first_insn) {
844 new_flen = new_insn - first_insn;
bd4cf0ed
AS
845 if (pass > 2)
846 goto err;
bd4cf0ed
AS
847 goto do_pass;
848 }
849
850 kfree(addrs);
851 BUG_ON(*new_len != new_flen);
1da177e4 852 return 0;
bd4cf0ed
AS
853err:
854 kfree(addrs);
855 return -EINVAL;
1da177e4
LT
856}
857
bd4cf0ed 858/* Security:
bd4cf0ed 859 *
2d5311e4 860 * As we dont want to clear mem[] array for each packet going through
8ea6e345 861 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 862 * a cell if not previously written, and we check all branches to be sure
25985edc 863 * a malicious user doesn't try to abuse us.
2d5311e4 864 */
ec31a05c 865static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 866{
34805931 867 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
868 int pc, ret = 0;
869
870 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 871
99e72a0f 872 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
873 if (!masks)
874 return -ENOMEM;
34805931 875
2d5311e4
ED
876 memset(masks, 0xff, flen * sizeof(*masks));
877
878 for (pc = 0; pc < flen; pc++) {
879 memvalid &= masks[pc];
880
881 switch (filter[pc].code) {
34805931
DB
882 case BPF_ST:
883 case BPF_STX:
2d5311e4
ED
884 memvalid |= (1 << filter[pc].k);
885 break;
34805931
DB
886 case BPF_LD | BPF_MEM:
887 case BPF_LDX | BPF_MEM:
2d5311e4
ED
888 if (!(memvalid & (1 << filter[pc].k))) {
889 ret = -EINVAL;
890 goto error;
891 }
892 break;
34805931
DB
893 case BPF_JMP | BPF_JA:
894 /* A jump must set masks on target */
2d5311e4
ED
895 masks[pc + 1 + filter[pc].k] &= memvalid;
896 memvalid = ~0;
897 break;
34805931
DB
898 case BPF_JMP | BPF_JEQ | BPF_K:
899 case BPF_JMP | BPF_JEQ | BPF_X:
900 case BPF_JMP | BPF_JGE | BPF_K:
901 case BPF_JMP | BPF_JGE | BPF_X:
902 case BPF_JMP | BPF_JGT | BPF_K:
903 case BPF_JMP | BPF_JGT | BPF_X:
904 case BPF_JMP | BPF_JSET | BPF_K:
905 case BPF_JMP | BPF_JSET | BPF_X:
906 /* A jump must set masks on targets */
2d5311e4
ED
907 masks[pc + 1 + filter[pc].jt] &= memvalid;
908 masks[pc + 1 + filter[pc].jf] &= memvalid;
909 memvalid = ~0;
910 break;
911 }
912 }
913error:
914 kfree(masks);
915 return ret;
916}
917
34805931
DB
918static bool chk_code_allowed(u16 code_to_probe)
919{
920 static const bool codes[] = {
921 /* 32 bit ALU operations */
922 [BPF_ALU | BPF_ADD | BPF_K] = true,
923 [BPF_ALU | BPF_ADD | BPF_X] = true,
924 [BPF_ALU | BPF_SUB | BPF_K] = true,
925 [BPF_ALU | BPF_SUB | BPF_X] = true,
926 [BPF_ALU | BPF_MUL | BPF_K] = true,
927 [BPF_ALU | BPF_MUL | BPF_X] = true,
928 [BPF_ALU | BPF_DIV | BPF_K] = true,
929 [BPF_ALU | BPF_DIV | BPF_X] = true,
930 [BPF_ALU | BPF_MOD | BPF_K] = true,
931 [BPF_ALU | BPF_MOD | BPF_X] = true,
932 [BPF_ALU | BPF_AND | BPF_K] = true,
933 [BPF_ALU | BPF_AND | BPF_X] = true,
934 [BPF_ALU | BPF_OR | BPF_K] = true,
935 [BPF_ALU | BPF_OR | BPF_X] = true,
936 [BPF_ALU | BPF_XOR | BPF_K] = true,
937 [BPF_ALU | BPF_XOR | BPF_X] = true,
938 [BPF_ALU | BPF_LSH | BPF_K] = true,
939 [BPF_ALU | BPF_LSH | BPF_X] = true,
940 [BPF_ALU | BPF_RSH | BPF_K] = true,
941 [BPF_ALU | BPF_RSH | BPF_X] = true,
942 [BPF_ALU | BPF_NEG] = true,
943 /* Load instructions */
944 [BPF_LD | BPF_W | BPF_ABS] = true,
945 [BPF_LD | BPF_H | BPF_ABS] = true,
946 [BPF_LD | BPF_B | BPF_ABS] = true,
947 [BPF_LD | BPF_W | BPF_LEN] = true,
948 [BPF_LD | BPF_W | BPF_IND] = true,
949 [BPF_LD | BPF_H | BPF_IND] = true,
950 [BPF_LD | BPF_B | BPF_IND] = true,
951 [BPF_LD | BPF_IMM] = true,
952 [BPF_LD | BPF_MEM] = true,
953 [BPF_LDX | BPF_W | BPF_LEN] = true,
954 [BPF_LDX | BPF_B | BPF_MSH] = true,
955 [BPF_LDX | BPF_IMM] = true,
956 [BPF_LDX | BPF_MEM] = true,
957 /* Store instructions */
958 [BPF_ST] = true,
959 [BPF_STX] = true,
960 /* Misc instructions */
961 [BPF_MISC | BPF_TAX] = true,
962 [BPF_MISC | BPF_TXA] = true,
963 /* Return instructions */
964 [BPF_RET | BPF_K] = true,
965 [BPF_RET | BPF_A] = true,
966 /* Jump instructions */
967 [BPF_JMP | BPF_JA] = true,
968 [BPF_JMP | BPF_JEQ | BPF_K] = true,
969 [BPF_JMP | BPF_JEQ | BPF_X] = true,
970 [BPF_JMP | BPF_JGE | BPF_K] = true,
971 [BPF_JMP | BPF_JGE | BPF_X] = true,
972 [BPF_JMP | BPF_JGT | BPF_K] = true,
973 [BPF_JMP | BPF_JGT | BPF_X] = true,
974 [BPF_JMP | BPF_JSET | BPF_K] = true,
975 [BPF_JMP | BPF_JSET | BPF_X] = true,
976 };
977
978 if (code_to_probe >= ARRAY_SIZE(codes))
979 return false;
980
981 return codes[code_to_probe];
982}
983
f7bd9e36
DB
984static bool bpf_check_basics_ok(const struct sock_filter *filter,
985 unsigned int flen)
986{
987 if (filter == NULL)
988 return false;
989 if (flen == 0 || flen > BPF_MAXINSNS)
990 return false;
991
992 return true;
993}
994
1da177e4 995/**
4df95ff4 996 * bpf_check_classic - verify socket filter code
1da177e4
LT
997 * @filter: filter to verify
998 * @flen: length of filter
999 *
1000 * Check the user's filter code. If we let some ugly
1001 * filter code slip through kaboom! The filter must contain
93699863
KK
1002 * no references or jumps that are out of range, no illegal
1003 * instructions, and must end with a RET instruction.
1da177e4 1004 *
7b11f69f
KK
1005 * All jumps are forward as they are not signed.
1006 *
1007 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 1008 */
d9e12f42
NS
1009static int bpf_check_classic(const struct sock_filter *filter,
1010 unsigned int flen)
1da177e4 1011{
aa1113d9 1012 bool anc_found;
34805931 1013 int pc;
1da177e4 1014
34805931 1015 /* Check the filter code now */
1da177e4 1016 for (pc = 0; pc < flen; pc++) {
ec31a05c 1017 const struct sock_filter *ftest = &filter[pc];
93699863 1018
34805931
DB
1019 /* May we actually operate on this code? */
1020 if (!chk_code_allowed(ftest->code))
cba328fc 1021 return -EINVAL;
34805931 1022
93699863 1023 /* Some instructions need special checks */
34805931
DB
1024 switch (ftest->code) {
1025 case BPF_ALU | BPF_DIV | BPF_K:
1026 case BPF_ALU | BPF_MOD | BPF_K:
1027 /* Check for division by zero */
b6069a95
ED
1028 if (ftest->k == 0)
1029 return -EINVAL;
1030 break;
229394e8
RV
1031 case BPF_ALU | BPF_LSH | BPF_K:
1032 case BPF_ALU | BPF_RSH | BPF_K:
1033 if (ftest->k >= 32)
1034 return -EINVAL;
1035 break;
34805931
DB
1036 case BPF_LD | BPF_MEM:
1037 case BPF_LDX | BPF_MEM:
1038 case BPF_ST:
1039 case BPF_STX:
1040 /* Check for invalid memory addresses */
93699863
KK
1041 if (ftest->k >= BPF_MEMWORDS)
1042 return -EINVAL;
1043 break;
34805931
DB
1044 case BPF_JMP | BPF_JA:
1045 /* Note, the large ftest->k might cause loops.
93699863
KK
1046 * Compare this with conditional jumps below,
1047 * where offsets are limited. --ANK (981016)
1048 */
34805931 1049 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 1050 return -EINVAL;
01f2f3f6 1051 break;
34805931
DB
1052 case BPF_JMP | BPF_JEQ | BPF_K:
1053 case BPF_JMP | BPF_JEQ | BPF_X:
1054 case BPF_JMP | BPF_JGE | BPF_K:
1055 case BPF_JMP | BPF_JGE | BPF_X:
1056 case BPF_JMP | BPF_JGT | BPF_K:
1057 case BPF_JMP | BPF_JGT | BPF_X:
1058 case BPF_JMP | BPF_JSET | BPF_K:
1059 case BPF_JMP | BPF_JSET | BPF_X:
1060 /* Both conditionals must be safe */
e35bedf3 1061 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
1062 pc + ftest->jf + 1 >= flen)
1063 return -EINVAL;
cba328fc 1064 break;
34805931
DB
1065 case BPF_LD | BPF_W | BPF_ABS:
1066 case BPF_LD | BPF_H | BPF_ABS:
1067 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 1068 anc_found = false;
34805931
DB
1069 if (bpf_anc_helper(ftest) & BPF_ANC)
1070 anc_found = true;
1071 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
1072 if (anc_found == false && ftest->k >= SKF_AD_OFF)
1073 return -EINVAL;
01f2f3f6
HPP
1074 }
1075 }
93699863 1076
34805931 1077 /* Last instruction must be a RET code */
01f2f3f6 1078 switch (filter[flen - 1].code) {
34805931
DB
1079 case BPF_RET | BPF_K:
1080 case BPF_RET | BPF_A:
2d5311e4 1081 return check_load_and_stores(filter, flen);
cba328fc 1082 }
34805931 1083
cba328fc 1084 return -EINVAL;
1da177e4
LT
1085}
1086
7ae457c1
AS
1087static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1088 const struct sock_fprog *fprog)
a3ea269b 1089{
009937e7 1090 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
1091 struct sock_fprog_kern *fkprog;
1092
1093 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1094 if (!fp->orig_prog)
1095 return -ENOMEM;
1096
1097 fkprog = fp->orig_prog;
1098 fkprog->len = fprog->len;
658da937
DB
1099
1100 fkprog->filter = kmemdup(fp->insns, fsize,
1101 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
1102 if (!fkprog->filter) {
1103 kfree(fp->orig_prog);
1104 return -ENOMEM;
1105 }
1106
1107 return 0;
1108}
1109
7ae457c1 1110static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
1111{
1112 struct sock_fprog_kern *fprog = fp->orig_prog;
1113
1114 if (fprog) {
1115 kfree(fprog->filter);
1116 kfree(fprog);
1117 }
1118}
1119
7ae457c1
AS
1120static void __bpf_prog_release(struct bpf_prog *prog)
1121{
24701ece 1122 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
1123 bpf_prog_put(prog);
1124 } else {
1125 bpf_release_orig_filter(prog);
1126 bpf_prog_free(prog);
1127 }
7ae457c1
AS
1128}
1129
34c5bd66
PN
1130static void __sk_filter_release(struct sk_filter *fp)
1131{
7ae457c1
AS
1132 __bpf_prog_release(fp->prog);
1133 kfree(fp);
34c5bd66
PN
1134}
1135
47e958ea 1136/**
46bcf14f 1137 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
1138 * @rcu: rcu_head that contains the sk_filter to free
1139 */
fbc907f0 1140static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
1141{
1142 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1143
34c5bd66 1144 __sk_filter_release(fp);
47e958ea 1145}
fbc907f0
DB
1146
1147/**
1148 * sk_filter_release - release a socket filter
1149 * @fp: filter to remove
1150 *
1151 * Remove a filter from a socket and release its resources.
1152 */
1153static void sk_filter_release(struct sk_filter *fp)
1154{
4c355cdf 1155 if (refcount_dec_and_test(&fp->refcnt))
fbc907f0
DB
1156 call_rcu(&fp->rcu, sk_filter_release_rcu);
1157}
1158
1159void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1160{
7ae457c1 1161 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 1162
278571ba
AS
1163 atomic_sub(filter_size, &sk->sk_omem_alloc);
1164 sk_filter_release(fp);
fbc907f0 1165}
47e958ea 1166
278571ba
AS
1167/* try to charge the socket memory if there is space available
1168 * return true on success
1169 */
4c355cdf 1170static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 1171{
7ae457c1 1172 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
1173
1174 /* same check as in sock_kmalloc() */
1175 if (filter_size <= sysctl_optmem_max &&
1176 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
278571ba
AS
1177 atomic_add(filter_size, &sk->sk_omem_alloc);
1178 return true;
bd4cf0ed 1179 }
278571ba 1180 return false;
bd4cf0ed
AS
1181}
1182
4c355cdf
RE
1183bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1184{
eefca20e
ED
1185 if (!refcount_inc_not_zero(&fp->refcnt))
1186 return false;
1187
1188 if (!__sk_filter_charge(sk, fp)) {
1189 sk_filter_release(fp);
1190 return false;
1191 }
1192 return true;
4c355cdf
RE
1193}
1194
7ae457c1 1195static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
1196{
1197 struct sock_filter *old_prog;
7ae457c1 1198 struct bpf_prog *old_fp;
34805931 1199 int err, new_len, old_len = fp->len;
e0cea7ce 1200 bool seen_ld_abs = false;
bd4cf0ed
AS
1201
1202 /* We are free to overwrite insns et al right here as it
1203 * won't be used at this point in time anymore internally
1204 * after the migration to the internal BPF instruction
1205 * representation.
1206 */
1207 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 1208 sizeof(struct bpf_insn));
bd4cf0ed 1209
bd4cf0ed
AS
1210 /* Conversion cannot happen on overlapping memory areas,
1211 * so we need to keep the user BPF around until the 2nd
1212 * pass. At this time, the user BPF is stored in fp->insns.
1213 */
1214 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 1215 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
1216 if (!old_prog) {
1217 err = -ENOMEM;
1218 goto out_err;
1219 }
1220
1221 /* 1st pass: calculate the new program length. */
e0cea7ce
DB
1222 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1223 &seen_ld_abs);
bd4cf0ed
AS
1224 if (err)
1225 goto out_err_free;
1226
1227 /* Expand fp for appending the new filter representation. */
1228 old_fp = fp;
60a3b225 1229 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
1230 if (!fp) {
1231 /* The old_fp is still around in case we couldn't
1232 * allocate new memory, so uncharge on that one.
1233 */
1234 fp = old_fp;
1235 err = -ENOMEM;
1236 goto out_err_free;
1237 }
1238
bd4cf0ed
AS
1239 fp->len = new_len;
1240
2695fb55 1241 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
e0cea7ce
DB
1242 err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1243 &seen_ld_abs);
bd4cf0ed 1244 if (err)
8fb575ca 1245 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
1246 * to allocate memory, remapping must succeed. Note,
1247 * that at this time old_fp has already been released
278571ba 1248 * by krealloc().
bd4cf0ed
AS
1249 */
1250 goto out_err_free;
1251
d1c55ab5 1252 fp = bpf_prog_select_runtime(fp, &err);
290af866
AS
1253 if (err)
1254 goto out_err_free;
5fe821a9 1255
bd4cf0ed
AS
1256 kfree(old_prog);
1257 return fp;
1258
1259out_err_free:
1260 kfree(old_prog);
1261out_err:
7ae457c1 1262 __bpf_prog_release(fp);
bd4cf0ed
AS
1263 return ERR_PTR(err);
1264}
1265
ac67eb2c
DB
1266static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1267 bpf_aux_classic_check_t trans)
302d6637
JP
1268{
1269 int err;
1270
bd4cf0ed 1271 fp->bpf_func = NULL;
a91263d5 1272 fp->jited = 0;
302d6637 1273
4df95ff4 1274 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1275 if (err) {
7ae457c1 1276 __bpf_prog_release(fp);
bd4cf0ed 1277 return ERR_PTR(err);
418c96ac 1278 }
302d6637 1279
4ae92bc7
NS
1280 /* There might be additional checks and transformations
1281 * needed on classic filters, f.e. in case of seccomp.
1282 */
1283 if (trans) {
1284 err = trans(fp->insns, fp->len);
1285 if (err) {
1286 __bpf_prog_release(fp);
1287 return ERR_PTR(err);
1288 }
1289 }
1290
bd4cf0ed
AS
1291 /* Probe if we can JIT compile the filter and if so, do
1292 * the compilation of the filter.
1293 */
302d6637 1294 bpf_jit_compile(fp);
bd4cf0ed
AS
1295
1296 /* JIT compiler couldn't process this filter, so do the
1297 * internal BPF translation for the optimized interpreter.
1298 */
5fe821a9 1299 if (!fp->jited)
7ae457c1 1300 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1301
1302 return fp;
302d6637
JP
1303}
1304
1305/**
7ae457c1 1306 * bpf_prog_create - create an unattached filter
c6c4b97c 1307 * @pfp: the unattached filter that is created
677a9fd3 1308 * @fprog: the filter program
302d6637 1309 *
c6c4b97c 1310 * Create a filter independent of any socket. We first run some
302d6637
JP
1311 * sanity checks on it to make sure it does not explode on us later.
1312 * If an error occurs or there is insufficient memory for the filter
1313 * a negative errno code is returned. On success the return is zero.
1314 */
7ae457c1 1315int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1316{
009937e7 1317 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1318 struct bpf_prog *fp;
302d6637
JP
1319
1320 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1321 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
302d6637
JP
1322 return -EINVAL;
1323
60a3b225 1324 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1325 if (!fp)
1326 return -ENOMEM;
a3ea269b 1327
302d6637
JP
1328 memcpy(fp->insns, fprog->filter, fsize);
1329
302d6637 1330 fp->len = fprog->len;
a3ea269b
DB
1331 /* Since unattached filters are not copied back to user
1332 * space through sk_get_filter(), we do not need to hold
1333 * a copy here, and can spare us the work.
1334 */
1335 fp->orig_prog = NULL;
302d6637 1336
7ae457c1 1337 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1338 * memory in case something goes wrong.
1339 */
4ae92bc7 1340 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1341 if (IS_ERR(fp))
1342 return PTR_ERR(fp);
302d6637
JP
1343
1344 *pfp = fp;
1345 return 0;
302d6637 1346}
7ae457c1 1347EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1348
ac67eb2c
DB
1349/**
1350 * bpf_prog_create_from_user - create an unattached filter from user buffer
1351 * @pfp: the unattached filter that is created
1352 * @fprog: the filter program
1353 * @trans: post-classic verifier transformation handler
bab18991 1354 * @save_orig: save classic BPF program
ac67eb2c
DB
1355 *
1356 * This function effectively does the same as bpf_prog_create(), only
1357 * that it builds up its insns buffer from user space provided buffer.
1358 * It also allows for passing a bpf_aux_classic_check_t handler.
1359 */
1360int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1361 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1362{
1363 unsigned int fsize = bpf_classic_proglen(fprog);
1364 struct bpf_prog *fp;
bab18991 1365 int err;
ac67eb2c
DB
1366
1367 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1368 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
ac67eb2c
DB
1369 return -EINVAL;
1370
1371 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1372 if (!fp)
1373 return -ENOMEM;
1374
1375 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1376 __bpf_prog_free(fp);
1377 return -EFAULT;
1378 }
1379
1380 fp->len = fprog->len;
ac67eb2c
DB
1381 fp->orig_prog = NULL;
1382
bab18991
DB
1383 if (save_orig) {
1384 err = bpf_prog_store_orig_filter(fp, fprog);
1385 if (err) {
1386 __bpf_prog_free(fp);
1387 return -ENOMEM;
1388 }
1389 }
1390
ac67eb2c
DB
1391 /* bpf_prepare_filter() already takes care of freeing
1392 * memory in case something goes wrong.
1393 */
1394 fp = bpf_prepare_filter(fp, trans);
1395 if (IS_ERR(fp))
1396 return PTR_ERR(fp);
1397
1398 *pfp = fp;
1399 return 0;
1400}
2ea273d7 1401EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1402
7ae457c1 1403void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1404{
7ae457c1 1405 __bpf_prog_release(fp);
302d6637 1406}
7ae457c1 1407EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1408
8ced425e 1409static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
49b31e57
DB
1410{
1411 struct sk_filter *fp, *old_fp;
1412
1413 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1414 if (!fp)
1415 return -ENOMEM;
1416
1417 fp->prog = prog;
49b31e57 1418
4c355cdf 1419 if (!__sk_filter_charge(sk, fp)) {
49b31e57
DB
1420 kfree(fp);
1421 return -ENOMEM;
1422 }
4c355cdf 1423 refcount_set(&fp->refcnt, 1);
49b31e57 1424
8ced425e
HFS
1425 old_fp = rcu_dereference_protected(sk->sk_filter,
1426 lockdep_sock_is_held(sk));
49b31e57 1427 rcu_assign_pointer(sk->sk_filter, fp);
8ced425e 1428
49b31e57
DB
1429 if (old_fp)
1430 sk_filter_uncharge(sk, old_fp);
1431
1432 return 0;
1433}
1434
538950a1
CG
1435static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1436{
1437 struct bpf_prog *old_prog;
1438 int err;
1439
1440 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1441 return -ENOMEM;
1442
fa463497 1443 if (sk_unhashed(sk) && sk->sk_reuseport) {
538950a1
CG
1444 err = reuseport_alloc(sk);
1445 if (err)
1446 return err;
1447 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1448 /* The socket wasn't bound with SO_REUSEPORT */
1449 return -EINVAL;
1450 }
1451
1452 old_prog = reuseport_attach_prog(sk, prog);
1453 if (old_prog)
1454 bpf_prog_destroy(old_prog);
1455
1456 return 0;
1457}
1458
1459static
1460struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1461{
009937e7 1462 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1463 struct bpf_prog *prog;
1da177e4
LT
1464 int err;
1465
d59577b6 1466 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1467 return ERR_PTR(-EPERM);
d59577b6 1468
1da177e4 1469 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1470 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
538950a1 1471 return ERR_PTR(-EINVAL);
1da177e4 1472
f7bd9e36 1473 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
7ae457c1 1474 if (!prog)
538950a1 1475 return ERR_PTR(-ENOMEM);
a3ea269b 1476
7ae457c1 1477 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1478 __bpf_prog_free(prog);
538950a1 1479 return ERR_PTR(-EFAULT);
1da177e4
LT
1480 }
1481
7ae457c1 1482 prog->len = fprog->len;
1da177e4 1483
7ae457c1 1484 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1485 if (err) {
c0d1379a 1486 __bpf_prog_free(prog);
538950a1 1487 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1488 }
1489
7ae457c1 1490 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1491 * memory in case something goes wrong.
1492 */
538950a1
CG
1493 return bpf_prepare_filter(prog, NULL);
1494}
1495
1496/**
1497 * sk_attach_filter - attach a socket filter
1498 * @fprog: the filter program
1499 * @sk: the socket to use
1500 *
1501 * Attach the user's filter code. We first run some sanity checks on
1502 * it to make sure it does not explode on us later. If an error
1503 * occurs or there is insufficient memory for the filter a negative
1504 * errno code is returned. On success the return is zero.
1505 */
8ced425e 1506int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
538950a1
CG
1507{
1508 struct bpf_prog *prog = __get_filter(fprog, sk);
1509 int err;
1510
7ae457c1
AS
1511 if (IS_ERR(prog))
1512 return PTR_ERR(prog);
1513
8ced425e 1514 err = __sk_attach_prog(prog, sk);
49b31e57 1515 if (err < 0) {
7ae457c1 1516 __bpf_prog_release(prog);
49b31e57 1517 return err;
278571ba
AS
1518 }
1519
d3904b73 1520 return 0;
1da177e4 1521}
8ced425e 1522EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1523
538950a1 1524int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1525{
538950a1 1526 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1527 int err;
89aa0758 1528
538950a1
CG
1529 if (IS_ERR(prog))
1530 return PTR_ERR(prog);
1531
1532 err = __reuseport_attach_prog(prog, sk);
1533 if (err < 0) {
1534 __bpf_prog_release(prog);
1535 return err;
1536 }
1537
1538 return 0;
1539}
1540
1541static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1542{
89aa0758 1543 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1544 return ERR_PTR(-EPERM);
89aa0758 1545
113214be 1546 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
538950a1
CG
1547}
1548
1549int sk_attach_bpf(u32 ufd, struct sock *sk)
1550{
1551 struct bpf_prog *prog = __get_bpf(ufd, sk);
1552 int err;
1553
1554 if (IS_ERR(prog))
1555 return PTR_ERR(prog);
1556
8ced425e 1557 err = __sk_attach_prog(prog, sk);
49b31e57 1558 if (err < 0) {
89aa0758 1559 bpf_prog_put(prog);
49b31e57 1560 return err;
89aa0758
AS
1561 }
1562
89aa0758
AS
1563 return 0;
1564}
1565
538950a1
CG
1566int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1567{
1568 struct bpf_prog *prog = __get_bpf(ufd, sk);
1569 int err;
1570
1571 if (IS_ERR(prog))
1572 return PTR_ERR(prog);
1573
1574 err = __reuseport_attach_prog(prog, sk);
1575 if (err < 0) {
1576 bpf_prog_put(prog);
1577 return err;
1578 }
1579
1580 return 0;
1581}
1582
21cafc1d
DB
1583struct bpf_scratchpad {
1584 union {
1585 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1586 u8 buff[MAX_BPF_STACK];
1587 };
1588};
1589
1590static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822 1591
5293efe6
DB
1592static inline int __bpf_try_make_writable(struct sk_buff *skb,
1593 unsigned int write_len)
1594{
1595 return skb_ensure_writable(skb, write_len);
1596}
1597
db58ba45
AS
1598static inline int bpf_try_make_writable(struct sk_buff *skb,
1599 unsigned int write_len)
1600{
5293efe6 1601 int err = __bpf_try_make_writable(skb, write_len);
db58ba45 1602
6aaae2b6 1603 bpf_compute_data_pointers(skb);
db58ba45
AS
1604 return err;
1605}
1606
36bbef52
DB
1607static int bpf_try_make_head_writable(struct sk_buff *skb)
1608{
1609 return bpf_try_make_writable(skb, skb_headlen(skb));
1610}
1611
a2bfe6bf
DB
1612static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1613{
1614 if (skb_at_tc_ingress(skb))
1615 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1616}
1617
8065694e
DB
1618static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1619{
1620 if (skb_at_tc_ingress(skb))
1621 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1622}
1623
f3694e00
DB
1624BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1625 const void *, from, u32, len, u64, flags)
608cd71a 1626{
608cd71a
AS
1627 void *ptr;
1628
8afd54c8 1629 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
781c53bc 1630 return -EINVAL;
0ed661d5 1631 if (unlikely(offset > 0xffff))
608cd71a 1632 return -EFAULT;
db58ba45 1633 if (unlikely(bpf_try_make_writable(skb, offset + len)))
608cd71a
AS
1634 return -EFAULT;
1635
0ed661d5 1636 ptr = skb->data + offset;
781c53bc 1637 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1638 __skb_postpull_rcsum(skb, ptr, len, offset);
608cd71a
AS
1639
1640 memcpy(ptr, from, len);
1641
781c53bc 1642 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1643 __skb_postpush_rcsum(skb, ptr, len, offset);
8afd54c8
DB
1644 if (flags & BPF_F_INVALIDATE_HASH)
1645 skb_clear_hash(skb);
f8ffad69 1646
608cd71a
AS
1647 return 0;
1648}
1649
577c50aa 1650static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
608cd71a
AS
1651 .func = bpf_skb_store_bytes,
1652 .gpl_only = false,
1653 .ret_type = RET_INTEGER,
1654 .arg1_type = ARG_PTR_TO_CTX,
1655 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1656 .arg3_type = ARG_PTR_TO_MEM,
1657 .arg4_type = ARG_CONST_SIZE,
91bc4822
AS
1658 .arg5_type = ARG_ANYTHING,
1659};
1660
f3694e00
DB
1661BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1662 void *, to, u32, len)
05c74e5e 1663{
05c74e5e
DB
1664 void *ptr;
1665
0ed661d5 1666 if (unlikely(offset > 0xffff))
074f528e 1667 goto err_clear;
05c74e5e
DB
1668
1669 ptr = skb_header_pointer(skb, offset, len, to);
1670 if (unlikely(!ptr))
074f528e 1671 goto err_clear;
05c74e5e
DB
1672 if (ptr != to)
1673 memcpy(to, ptr, len);
1674
1675 return 0;
074f528e
DB
1676err_clear:
1677 memset(to, 0, len);
1678 return -EFAULT;
05c74e5e
DB
1679}
1680
577c50aa 1681static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
05c74e5e
DB
1682 .func = bpf_skb_load_bytes,
1683 .gpl_only = false,
1684 .ret_type = RET_INTEGER,
1685 .arg1_type = ARG_PTR_TO_CTX,
1686 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1687 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1688 .arg4_type = ARG_CONST_SIZE,
05c74e5e
DB
1689};
1690
4e1ec56c
DB
1691BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1692 u32, offset, void *, to, u32, len, u32, start_header)
1693{
1694 u8 *ptr;
1695
1696 if (unlikely(offset > 0xffff || len > skb_headlen(skb)))
1697 goto err_clear;
1698
1699 switch (start_header) {
1700 case BPF_HDR_START_MAC:
1701 ptr = skb_mac_header(skb) + offset;
1702 break;
1703 case BPF_HDR_START_NET:
1704 ptr = skb_network_header(skb) + offset;
1705 break;
1706 default:
1707 goto err_clear;
1708 }
1709
1710 if (likely(ptr >= skb_mac_header(skb) &&
1711 ptr + len <= skb_tail_pointer(skb))) {
1712 memcpy(to, ptr, len);
1713 return 0;
1714 }
1715
1716err_clear:
1717 memset(to, 0, len);
1718 return -EFAULT;
1719}
1720
1721static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1722 .func = bpf_skb_load_bytes_relative,
1723 .gpl_only = false,
1724 .ret_type = RET_INTEGER,
1725 .arg1_type = ARG_PTR_TO_CTX,
1726 .arg2_type = ARG_ANYTHING,
1727 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1728 .arg4_type = ARG_CONST_SIZE,
1729 .arg5_type = ARG_ANYTHING,
1730};
1731
36bbef52
DB
1732BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1733{
1734 /* Idea is the following: should the needed direct read/write
1735 * test fail during runtime, we can pull in more data and redo
1736 * again, since implicitly, we invalidate previous checks here.
1737 *
1738 * Or, since we know how much we need to make read/writeable,
1739 * this can be done once at the program beginning for direct
1740 * access case. By this we overcome limitations of only current
1741 * headroom being accessible.
1742 */
1743 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1744}
1745
1746static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1747 .func = bpf_skb_pull_data,
1748 .gpl_only = false,
1749 .ret_type = RET_INTEGER,
1750 .arg1_type = ARG_PTR_TO_CTX,
1751 .arg2_type = ARG_ANYTHING,
1752};
1753
f3694e00
DB
1754BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1755 u64, from, u64, to, u64, flags)
91bc4822 1756{
0ed661d5 1757 __sum16 *ptr;
91bc4822 1758
781c53bc
DB
1759 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1760 return -EINVAL;
0ed661d5 1761 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1762 return -EFAULT;
0ed661d5 1763 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1764 return -EFAULT;
1765
0ed661d5 1766 ptr = (__sum16 *)(skb->data + offset);
781c53bc 1767 switch (flags & BPF_F_HDR_FIELD_MASK) {
8050c0f0
DB
1768 case 0:
1769 if (unlikely(from != 0))
1770 return -EINVAL;
1771
1772 csum_replace_by_diff(ptr, to);
1773 break;
91bc4822
AS
1774 case 2:
1775 csum_replace2(ptr, from, to);
1776 break;
1777 case 4:
1778 csum_replace4(ptr, from, to);
1779 break;
1780 default:
1781 return -EINVAL;
1782 }
1783
91bc4822
AS
1784 return 0;
1785}
1786
577c50aa 1787static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
91bc4822
AS
1788 .func = bpf_l3_csum_replace,
1789 .gpl_only = false,
1790 .ret_type = RET_INTEGER,
1791 .arg1_type = ARG_PTR_TO_CTX,
1792 .arg2_type = ARG_ANYTHING,
1793 .arg3_type = ARG_ANYTHING,
1794 .arg4_type = ARG_ANYTHING,
1795 .arg5_type = ARG_ANYTHING,
1796};
1797
f3694e00
DB
1798BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1799 u64, from, u64, to, u64, flags)
91bc4822 1800{
781c53bc 1801 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
2f72959a 1802 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
d1b662ad 1803 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
0ed661d5 1804 __sum16 *ptr;
91bc4822 1805
d1b662ad
DB
1806 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1807 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
781c53bc 1808 return -EINVAL;
0ed661d5 1809 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1810 return -EFAULT;
0ed661d5 1811 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1812 return -EFAULT;
1813
0ed661d5 1814 ptr = (__sum16 *)(skb->data + offset);
d1b662ad 1815 if (is_mmzero && !do_mforce && !*ptr)
2f72959a 1816 return 0;
91bc4822 1817
781c53bc 1818 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1819 case 0:
1820 if (unlikely(from != 0))
1821 return -EINVAL;
1822
1823 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1824 break;
91bc4822
AS
1825 case 2:
1826 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1827 break;
1828 case 4:
1829 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1830 break;
1831 default:
1832 return -EINVAL;
1833 }
1834
2f72959a
DB
1835 if (is_mmzero && !*ptr)
1836 *ptr = CSUM_MANGLED_0;
91bc4822
AS
1837 return 0;
1838}
1839
577c50aa 1840static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
91bc4822
AS
1841 .func = bpf_l4_csum_replace,
1842 .gpl_only = false,
1843 .ret_type = RET_INTEGER,
1844 .arg1_type = ARG_PTR_TO_CTX,
1845 .arg2_type = ARG_ANYTHING,
1846 .arg3_type = ARG_ANYTHING,
1847 .arg4_type = ARG_ANYTHING,
1848 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1849};
1850
f3694e00
DB
1851BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1852 __be32 *, to, u32, to_size, __wsum, seed)
7d672345 1853{
21cafc1d 1854 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
f3694e00 1855 u32 diff_size = from_size + to_size;
7d672345
DB
1856 int i, j = 0;
1857
1858 /* This is quite flexible, some examples:
1859 *
1860 * from_size == 0, to_size > 0, seed := csum --> pushing data
1861 * from_size > 0, to_size == 0, seed := csum --> pulling data
1862 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1863 *
1864 * Even for diffing, from_size and to_size don't need to be equal.
1865 */
1866 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1867 diff_size > sizeof(sp->diff)))
1868 return -EINVAL;
1869
1870 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1871 sp->diff[j] = ~from[i];
1872 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1873 sp->diff[j] = to[i];
1874
1875 return csum_partial(sp->diff, diff_size, seed);
1876}
1877
577c50aa 1878static const struct bpf_func_proto bpf_csum_diff_proto = {
7d672345
DB
1879 .func = bpf_csum_diff,
1880 .gpl_only = false,
36bbef52 1881 .pkt_access = true,
7d672345 1882 .ret_type = RET_INTEGER,
db1ac496 1883 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1884 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
db1ac496 1885 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1886 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7d672345
DB
1887 .arg5_type = ARG_ANYTHING,
1888};
1889
36bbef52
DB
1890BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1891{
1892 /* The interface is to be used in combination with bpf_csum_diff()
1893 * for direct packet writes. csum rotation for alignment as well
1894 * as emulating csum_sub() can be done from the eBPF program.
1895 */
1896 if (skb->ip_summed == CHECKSUM_COMPLETE)
1897 return (skb->csum = csum_add(skb->csum, csum));
1898
1899 return -ENOTSUPP;
1900}
1901
1902static const struct bpf_func_proto bpf_csum_update_proto = {
1903 .func = bpf_csum_update,
1904 .gpl_only = false,
1905 .ret_type = RET_INTEGER,
1906 .arg1_type = ARG_PTR_TO_CTX,
1907 .arg2_type = ARG_ANYTHING,
1908};
1909
a70b506e
DB
1910static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1911{
a70b506e
DB
1912 return dev_forward_skb(dev, skb);
1913}
1914
4e3264d2
MKL
1915static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1916 struct sk_buff *skb)
1917{
1918 int ret = ____dev_forward_skb(dev, skb);
1919
1920 if (likely(!ret)) {
1921 skb->dev = dev;
1922 ret = netif_rx(skb);
1923 }
1924
1925 return ret;
1926}
1927
a70b506e
DB
1928static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1929{
1930 int ret;
1931
1932 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1933 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1934 kfree_skb(skb);
1935 return -ENETDOWN;
1936 }
1937
1938 skb->dev = dev;
1939
1940 __this_cpu_inc(xmit_recursion);
1941 ret = dev_queue_xmit(skb);
1942 __this_cpu_dec(xmit_recursion);
1943
1944 return ret;
1945}
1946
4e3264d2
MKL
1947static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1948 u32 flags)
1949{
1950 /* skb->mac_len is not set on normal egress */
1951 unsigned int mlen = skb->network_header - skb->mac_header;
1952
1953 __skb_pull(skb, mlen);
1954
1955 /* At ingress, the mac header has already been pulled once.
1956 * At egress, skb_pospull_rcsum has to be done in case that
1957 * the skb is originated from ingress (i.e. a forwarded skb)
1958 * to ensure that rcsum starts at net header.
1959 */
1960 if (!skb_at_tc_ingress(skb))
1961 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1962 skb_pop_mac_header(skb);
1963 skb_reset_mac_len(skb);
1964 return flags & BPF_F_INGRESS ?
1965 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1966}
1967
1968static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1969 u32 flags)
1970{
3a0af8fd
TG
1971 /* Verify that a link layer header is carried */
1972 if (unlikely(skb->mac_header >= skb->network_header)) {
1973 kfree_skb(skb);
1974 return -ERANGE;
1975 }
1976
4e3264d2
MKL
1977 bpf_push_mac_rcsum(skb);
1978 return flags & BPF_F_INGRESS ?
1979 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1980}
1981
1982static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1983 u32 flags)
1984{
c491680f 1985 if (dev_is_mac_header_xmit(dev))
4e3264d2 1986 return __bpf_redirect_common(skb, dev, flags);
c491680f
DB
1987 else
1988 return __bpf_redirect_no_mac(skb, dev, flags);
4e3264d2
MKL
1989}
1990
f3694e00 1991BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
3896d655 1992{
3896d655 1993 struct net_device *dev;
36bbef52
DB
1994 struct sk_buff *clone;
1995 int ret;
3896d655 1996
781c53bc
DB
1997 if (unlikely(flags & ~(BPF_F_INGRESS)))
1998 return -EINVAL;
1999
3896d655
AS
2000 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2001 if (unlikely(!dev))
2002 return -EINVAL;
2003
36bbef52
DB
2004 clone = skb_clone(skb, GFP_ATOMIC);
2005 if (unlikely(!clone))
3896d655
AS
2006 return -ENOMEM;
2007
36bbef52
DB
2008 /* For direct write, we need to keep the invariant that the skbs
2009 * we're dealing with need to be uncloned. Should uncloning fail
2010 * here, we need to free the just generated clone to unclone once
2011 * again.
2012 */
2013 ret = bpf_try_make_head_writable(skb);
2014 if (unlikely(ret)) {
2015 kfree_skb(clone);
2016 return -ENOMEM;
2017 }
2018
4e3264d2 2019 return __bpf_redirect(clone, dev, flags);
3896d655
AS
2020}
2021
577c50aa 2022static const struct bpf_func_proto bpf_clone_redirect_proto = {
3896d655
AS
2023 .func = bpf_clone_redirect,
2024 .gpl_only = false,
2025 .ret_type = RET_INTEGER,
2026 .arg1_type = ARG_PTR_TO_CTX,
2027 .arg2_type = ARG_ANYTHING,
2028 .arg3_type = ARG_ANYTHING,
2029};
2030
27b29f63
AS
2031struct redirect_info {
2032 u32 ifindex;
2033 u32 flags;
97f91a7c 2034 struct bpf_map *map;
11393cc9 2035 struct bpf_map *map_to_flush;
7c300131 2036 unsigned long map_owner;
27b29f63
AS
2037};
2038
2039static DEFINE_PER_CPU(struct redirect_info, redirect_info);
781c53bc 2040
f3694e00 2041BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
27b29f63
AS
2042{
2043 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2044
781c53bc
DB
2045 if (unlikely(flags & ~(BPF_F_INGRESS)))
2046 return TC_ACT_SHOT;
2047
27b29f63
AS
2048 ri->ifindex = ifindex;
2049 ri->flags = flags;
781c53bc 2050
27b29f63
AS
2051 return TC_ACT_REDIRECT;
2052}
2053
2054int skb_do_redirect(struct sk_buff *skb)
2055{
2056 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2057 struct net_device *dev;
2058
2059 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
2060 ri->ifindex = 0;
2061 if (unlikely(!dev)) {
2062 kfree_skb(skb);
2063 return -EINVAL;
2064 }
2065
4e3264d2 2066 return __bpf_redirect(skb, dev, ri->flags);
27b29f63
AS
2067}
2068
577c50aa 2069static const struct bpf_func_proto bpf_redirect_proto = {
27b29f63
AS
2070 .func = bpf_redirect,
2071 .gpl_only = false,
2072 .ret_type = RET_INTEGER,
2073 .arg1_type = ARG_ANYTHING,
2074 .arg2_type = ARG_ANYTHING,
2075};
2076
81110384
JF
2077BPF_CALL_4(bpf_sk_redirect_hash, struct sk_buff *, skb,
2078 struct bpf_map *, map, void *, key, u64, flags)
2079{
2080 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2081
2082 /* If user passes invalid input drop the packet. */
2083 if (unlikely(flags & ~(BPF_F_INGRESS)))
2084 return SK_DROP;
2085
2086 tcb->bpf.flags = flags;
2087 tcb->bpf.sk_redir = __sock_hash_lookup_elem(map, key);
2088 if (!tcb->bpf.sk_redir)
2089 return SK_DROP;
2090
2091 return SK_PASS;
2092}
2093
2094static const struct bpf_func_proto bpf_sk_redirect_hash_proto = {
2095 .func = bpf_sk_redirect_hash,
2096 .gpl_only = false,
2097 .ret_type = RET_INTEGER,
2098 .arg1_type = ARG_PTR_TO_CTX,
2099 .arg2_type = ARG_CONST_MAP_PTR,
2100 .arg3_type = ARG_PTR_TO_MAP_KEY,
2101 .arg4_type = ARG_ANYTHING,
2102};
2103
34f79502
JF
2104BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
2105 struct bpf_map *, map, u32, key, u64, flags)
174a79ff 2106{
34f79502 2107 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
174a79ff 2108
bfa64075 2109 /* If user passes invalid input drop the packet. */
fa246693 2110 if (unlikely(flags & ~(BPF_F_INGRESS)))
bfa64075 2111 return SK_DROP;
174a79ff 2112
34f79502 2113 tcb->bpf.flags = flags;
e5cd3abc
JF
2114 tcb->bpf.sk_redir = __sock_map_lookup_elem(map, key);
2115 if (!tcb->bpf.sk_redir)
2116 return SK_DROP;
174a79ff 2117
bfa64075 2118 return SK_PASS;
174a79ff
JF
2119}
2120
34f79502 2121struct sock *do_sk_redirect_map(struct sk_buff *skb)
174a79ff 2122{
34f79502 2123 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
174a79ff 2124
e5cd3abc 2125 return tcb->bpf.sk_redir;
174a79ff
JF
2126}
2127
2128static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
2129 .func = bpf_sk_redirect_map,
2130 .gpl_only = false,
2131 .ret_type = RET_INTEGER,
34f79502
JF
2132 .arg1_type = ARG_PTR_TO_CTX,
2133 .arg2_type = ARG_CONST_MAP_PTR,
174a79ff 2134 .arg3_type = ARG_ANYTHING,
34f79502 2135 .arg4_type = ARG_ANYTHING,
174a79ff
JF
2136};
2137
81110384
JF
2138BPF_CALL_4(bpf_msg_redirect_hash, struct sk_msg_buff *, msg,
2139 struct bpf_map *, map, void *, key, u64, flags)
2140{
2141 /* If user passes invalid input drop the packet. */
2142 if (unlikely(flags & ~(BPF_F_INGRESS)))
2143 return SK_DROP;
2144
2145 msg->flags = flags;
2146 msg->sk_redir = __sock_hash_lookup_elem(map, key);
2147 if (!msg->sk_redir)
2148 return SK_DROP;
2149
2150 return SK_PASS;
2151}
2152
2153static const struct bpf_func_proto bpf_msg_redirect_hash_proto = {
2154 .func = bpf_msg_redirect_hash,
2155 .gpl_only = false,
2156 .ret_type = RET_INTEGER,
2157 .arg1_type = ARG_PTR_TO_CTX,
2158 .arg2_type = ARG_CONST_MAP_PTR,
2159 .arg3_type = ARG_PTR_TO_MAP_KEY,
2160 .arg4_type = ARG_ANYTHING,
2161};
2162
4f738adb
JF
2163BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
2164 struct bpf_map *, map, u32, key, u64, flags)
2165{
2166 /* If user passes invalid input drop the packet. */
8934ce2f 2167 if (unlikely(flags & ~(BPF_F_INGRESS)))
4f738adb
JF
2168 return SK_DROP;
2169
4f738adb 2170 msg->flags = flags;
e5cd3abc
JF
2171 msg->sk_redir = __sock_map_lookup_elem(map, key);
2172 if (!msg->sk_redir)
2173 return SK_DROP;
4f738adb
JF
2174
2175 return SK_PASS;
2176}
2177
2178struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
2179{
e5cd3abc 2180 return msg->sk_redir;
4f738adb
JF
2181}
2182
2183static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
2184 .func = bpf_msg_redirect_map,
2185 .gpl_only = false,
2186 .ret_type = RET_INTEGER,
2187 .arg1_type = ARG_PTR_TO_CTX,
2188 .arg2_type = ARG_CONST_MAP_PTR,
2189 .arg3_type = ARG_ANYTHING,
2190 .arg4_type = ARG_ANYTHING,
2191};
2192
2a100317
JF
2193BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
2194{
2195 msg->apply_bytes = bytes;
2196 return 0;
2197}
2198
2199static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2200 .func = bpf_msg_apply_bytes,
2201 .gpl_only = false,
2202 .ret_type = RET_INTEGER,
2203 .arg1_type = ARG_PTR_TO_CTX,
2204 .arg2_type = ARG_ANYTHING,
2205};
2206
91843d54
JF
2207BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
2208{
2209 msg->cork_bytes = bytes;
2210 return 0;
2211}
2212
2213static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2214 .func = bpf_msg_cork_bytes,
2215 .gpl_only = false,
2216 .ret_type = RET_INTEGER,
2217 .arg1_type = ARG_PTR_TO_CTX,
2218 .arg2_type = ARG_ANYTHING,
2219};
2220
015632bb
JF
2221BPF_CALL_4(bpf_msg_pull_data,
2222 struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
2223{
2224 unsigned int len = 0, offset = 0, copy = 0;
2225 struct scatterlist *sg = msg->sg_data;
2226 int first_sg, last_sg, i, shift;
2227 unsigned char *p, *to, *from;
2228 int bytes = end - start;
2229 struct page *page;
2230
2231 if (unlikely(flags || end <= start))
2232 return -EINVAL;
2233
2234 /* First find the starting scatterlist element */
2235 i = msg->sg_start;
2236 do {
2237 len = sg[i].length;
2238 offset += len;
2239 if (start < offset + len)
2240 break;
2241 i++;
2242 if (i == MAX_SKB_FRAGS)
2243 i = 0;
2244 } while (i != msg->sg_end);
2245
2246 if (unlikely(start >= offset + len))
2247 return -EINVAL;
2248
2249 if (!msg->sg_copy[i] && bytes <= len)
2250 goto out;
2251
2252 first_sg = i;
2253
2254 /* At this point we need to linearize multiple scatterlist
2255 * elements or a single shared page. Either way we need to
2256 * copy into a linear buffer exclusively owned by BPF. Then
2257 * place the buffer in the scatterlist and fixup the original
2258 * entries by removing the entries now in the linear buffer
2259 * and shifting the remaining entries. For now we do not try
2260 * to copy partial entries to avoid complexity of running out
2261 * of sg_entry slots. The downside is reading a single byte
2262 * will copy the entire sg entry.
2263 */
2264 do {
2265 copy += sg[i].length;
2266 i++;
2267 if (i == MAX_SKB_FRAGS)
2268 i = 0;
2269 if (bytes < copy)
2270 break;
2271 } while (i != msg->sg_end);
2272 last_sg = i;
2273
2274 if (unlikely(copy < end - start))
2275 return -EINVAL;
2276
2277 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
2278 if (unlikely(!page))
2279 return -ENOMEM;
2280 p = page_address(page);
2281 offset = 0;
2282
2283 i = first_sg;
2284 do {
2285 from = sg_virt(&sg[i]);
2286 len = sg[i].length;
2287 to = p + offset;
2288
2289 memcpy(to, from, len);
2290 offset += len;
2291 sg[i].length = 0;
2292 put_page(sg_page(&sg[i]));
2293
2294 i++;
2295 if (i == MAX_SKB_FRAGS)
2296 i = 0;
2297 } while (i != last_sg);
2298
2299 sg[first_sg].length = copy;
2300 sg_set_page(&sg[first_sg], page, copy, 0);
2301
2302 /* To repair sg ring we need to shift entries. If we only
2303 * had a single entry though we can just replace it and
2304 * be done. Otherwise walk the ring and shift the entries.
2305 */
2306 shift = last_sg - first_sg - 1;
2307 if (!shift)
2308 goto out;
2309
2310 i = first_sg + 1;
2311 do {
2312 int move_from;
2313
2314 if (i + shift >= MAX_SKB_FRAGS)
2315 move_from = i + shift - MAX_SKB_FRAGS;
2316 else
2317 move_from = i + shift;
2318
2319 if (move_from == msg->sg_end)
2320 break;
2321
2322 sg[i] = sg[move_from];
2323 sg[move_from].length = 0;
2324 sg[move_from].page_link = 0;
2325 sg[move_from].offset = 0;
2326
2327 i++;
2328 if (i == MAX_SKB_FRAGS)
2329 i = 0;
2330 } while (1);
2331 msg->sg_end -= shift;
2332 if (msg->sg_end < 0)
2333 msg->sg_end += MAX_SKB_FRAGS;
2334out:
2335 msg->data = sg_virt(&sg[i]) + start - offset;
2336 msg->data_end = msg->data + bytes;
2337
2338 return 0;
2339}
2340
2341static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2342 .func = bpf_msg_pull_data,
2343 .gpl_only = false,
2344 .ret_type = RET_INTEGER,
2345 .arg1_type = ARG_PTR_TO_CTX,
2346 .arg2_type = ARG_ANYTHING,
2347 .arg3_type = ARG_ANYTHING,
2348 .arg4_type = ARG_ANYTHING,
2349};
2350
f3694e00 2351BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
8d20aabe 2352{
f3694e00 2353 return task_get_classid(skb);
8d20aabe
DB
2354}
2355
2356static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2357 .func = bpf_get_cgroup_classid,
2358 .gpl_only = false,
2359 .ret_type = RET_INTEGER,
2360 .arg1_type = ARG_PTR_TO_CTX,
2361};
2362
f3694e00 2363BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
c46646d0 2364{
f3694e00 2365 return dst_tclassid(skb);
c46646d0
DB
2366}
2367
2368static const struct bpf_func_proto bpf_get_route_realm_proto = {
2369 .func = bpf_get_route_realm,
2370 .gpl_only = false,
2371 .ret_type = RET_INTEGER,
2372 .arg1_type = ARG_PTR_TO_CTX,
2373};
2374
f3694e00 2375BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
13c5c240
DB
2376{
2377 /* If skb_clear_hash() was called due to mangling, we can
2378 * trigger SW recalculation here. Later access to hash
2379 * can then use the inline skb->hash via context directly
2380 * instead of calling this helper again.
2381 */
f3694e00 2382 return skb_get_hash(skb);
13c5c240
DB
2383}
2384
2385static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2386 .func = bpf_get_hash_recalc,
2387 .gpl_only = false,
2388 .ret_type = RET_INTEGER,
2389 .arg1_type = ARG_PTR_TO_CTX,
2390};
2391
7a4b28c6
DB
2392BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2393{
2394 /* After all direct packet write, this can be used once for
2395 * triggering a lazy recalc on next skb_get_hash() invocation.
2396 */
2397 skb_clear_hash(skb);
2398 return 0;
2399}
2400
2401static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2402 .func = bpf_set_hash_invalid,
2403 .gpl_only = false,
2404 .ret_type = RET_INTEGER,
2405 .arg1_type = ARG_PTR_TO_CTX,
2406};
2407
ded092cd
DB
2408BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2409{
2410 /* Set user specified hash as L4(+), so that it gets returned
2411 * on skb_get_hash() call unless BPF prog later on triggers a
2412 * skb_clear_hash().
2413 */
2414 __skb_set_sw_hash(skb, hash, true);
2415 return 0;
2416}
2417
2418static const struct bpf_func_proto bpf_set_hash_proto = {
2419 .func = bpf_set_hash,
2420 .gpl_only = false,
2421 .ret_type = RET_INTEGER,
2422 .arg1_type = ARG_PTR_TO_CTX,
2423 .arg2_type = ARG_ANYTHING,
2424};
2425
f3694e00
DB
2426BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2427 u16, vlan_tci)
4e10df9a 2428{
db58ba45 2429 int ret;
4e10df9a
AS
2430
2431 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2432 vlan_proto != htons(ETH_P_8021AD)))
2433 vlan_proto = htons(ETH_P_8021Q);
2434
8065694e 2435 bpf_push_mac_rcsum(skb);
db58ba45 2436 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
8065694e
DB
2437 bpf_pull_mac_rcsum(skb);
2438
6aaae2b6 2439 bpf_compute_data_pointers(skb);
db58ba45 2440 return ret;
4e10df9a
AS
2441}
2442
93731ef0 2443static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
4e10df9a
AS
2444 .func = bpf_skb_vlan_push,
2445 .gpl_only = false,
2446 .ret_type = RET_INTEGER,
2447 .arg1_type = ARG_PTR_TO_CTX,
2448 .arg2_type = ARG_ANYTHING,
2449 .arg3_type = ARG_ANYTHING,
2450};
2451
f3694e00 2452BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
4e10df9a 2453{
db58ba45 2454 int ret;
4e10df9a 2455
8065694e 2456 bpf_push_mac_rcsum(skb);
db58ba45 2457 ret = skb_vlan_pop(skb);
8065694e
DB
2458 bpf_pull_mac_rcsum(skb);
2459
6aaae2b6 2460 bpf_compute_data_pointers(skb);
db58ba45 2461 return ret;
4e10df9a
AS
2462}
2463
93731ef0 2464static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
4e10df9a
AS
2465 .func = bpf_skb_vlan_pop,
2466 .gpl_only = false,
2467 .ret_type = RET_INTEGER,
2468 .arg1_type = ARG_PTR_TO_CTX,
2469};
2470
6578171a
DB
2471static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2472{
2473 /* Caller already did skb_cow() with len as headroom,
2474 * so no need to do it here.
2475 */
2476 skb_push(skb, len);
2477 memmove(skb->data, skb->data + len, off);
2478 memset(skb->data + off, 0, len);
2479
2480 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2481 * needed here as it does not change the skb->csum
2482 * result for checksum complete when summing over
2483 * zeroed blocks.
2484 */
2485 return 0;
2486}
2487
2488static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2489{
2490 /* skb_ensure_writable() is not needed here, as we're
2491 * already working on an uncloned skb.
2492 */
2493 if (unlikely(!pskb_may_pull(skb, off + len)))
2494 return -ENOMEM;
2495
2496 skb_postpull_rcsum(skb, skb->data + off, len);
2497 memmove(skb->data + len, skb->data, off);
2498 __skb_pull(skb, len);
2499
2500 return 0;
2501}
2502
2503static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2504{
2505 bool trans_same = skb->transport_header == skb->network_header;
2506 int ret;
2507
2508 /* There's no need for __skb_push()/__skb_pull() pair to
2509 * get to the start of the mac header as we're guaranteed
2510 * to always start from here under eBPF.
2511 */
2512 ret = bpf_skb_generic_push(skb, off, len);
2513 if (likely(!ret)) {
2514 skb->mac_header -= len;
2515 skb->network_header -= len;
2516 if (trans_same)
2517 skb->transport_header = skb->network_header;
2518 }
2519
2520 return ret;
2521}
2522
2523static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2524{
2525 bool trans_same = skb->transport_header == skb->network_header;
2526 int ret;
2527
2528 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2529 ret = bpf_skb_generic_pop(skb, off, len);
2530 if (likely(!ret)) {
2531 skb->mac_header += len;
2532 skb->network_header += len;
2533 if (trans_same)
2534 skb->transport_header = skb->network_header;
2535 }
2536
2537 return ret;
2538}
2539
2540static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2541{
2542 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2543 u32 off = skb_mac_header_len(skb);
6578171a
DB
2544 int ret;
2545
d02f51cb
DA
2546 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2547 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2548 return -ENOTSUPP;
2549
6578171a
DB
2550 ret = skb_cow(skb, len_diff);
2551 if (unlikely(ret < 0))
2552 return ret;
2553
2554 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2555 if (unlikely(ret < 0))
2556 return ret;
2557
2558 if (skb_is_gso(skb)) {
d02f51cb
DA
2559 struct skb_shared_info *shinfo = skb_shinfo(skb);
2560
880388aa
DM
2561 /* SKB_GSO_TCPV4 needs to be changed into
2562 * SKB_GSO_TCPV6.
6578171a 2563 */
d02f51cb
DA
2564 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2565 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2566 shinfo->gso_type |= SKB_GSO_TCPV6;
6578171a
DB
2567 }
2568
2569 /* Due to IPv6 header, MSS needs to be downgraded. */
d02f51cb 2570 skb_decrease_gso_size(shinfo, len_diff);
6578171a 2571 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2572 shinfo->gso_type |= SKB_GSO_DODGY;
2573 shinfo->gso_segs = 0;
6578171a
DB
2574 }
2575
2576 skb->protocol = htons(ETH_P_IPV6);
2577 skb_clear_hash(skb);
2578
2579 return 0;
2580}
2581
2582static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2583{
2584 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2585 u32 off = skb_mac_header_len(skb);
6578171a
DB
2586 int ret;
2587
d02f51cb
DA
2588 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2589 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2590 return -ENOTSUPP;
2591
6578171a
DB
2592 ret = skb_unclone(skb, GFP_ATOMIC);
2593 if (unlikely(ret < 0))
2594 return ret;
2595
2596 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2597 if (unlikely(ret < 0))
2598 return ret;
2599
2600 if (skb_is_gso(skb)) {
d02f51cb
DA
2601 struct skb_shared_info *shinfo = skb_shinfo(skb);
2602
880388aa
DM
2603 /* SKB_GSO_TCPV6 needs to be changed into
2604 * SKB_GSO_TCPV4.
6578171a 2605 */
d02f51cb
DA
2606 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2607 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2608 shinfo->gso_type |= SKB_GSO_TCPV4;
6578171a
DB
2609 }
2610
2611 /* Due to IPv4 header, MSS can be upgraded. */
d02f51cb 2612 skb_increase_gso_size(shinfo, len_diff);
6578171a 2613 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2614 shinfo->gso_type |= SKB_GSO_DODGY;
2615 shinfo->gso_segs = 0;
6578171a
DB
2616 }
2617
2618 skb->protocol = htons(ETH_P_IP);
2619 skb_clear_hash(skb);
2620
2621 return 0;
2622}
2623
2624static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2625{
2626 __be16 from_proto = skb->protocol;
2627
2628 if (from_proto == htons(ETH_P_IP) &&
2629 to_proto == htons(ETH_P_IPV6))
2630 return bpf_skb_proto_4_to_6(skb);
2631
2632 if (from_proto == htons(ETH_P_IPV6) &&
2633 to_proto == htons(ETH_P_IP))
2634 return bpf_skb_proto_6_to_4(skb);
2635
2636 return -ENOTSUPP;
2637}
2638
f3694e00
DB
2639BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2640 u64, flags)
6578171a 2641{
6578171a
DB
2642 int ret;
2643
2644 if (unlikely(flags))
2645 return -EINVAL;
2646
2647 /* General idea is that this helper does the basic groundwork
2648 * needed for changing the protocol, and eBPF program fills the
2649 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2650 * and other helpers, rather than passing a raw buffer here.
2651 *
2652 * The rationale is to keep this minimal and without a need to
2653 * deal with raw packet data. F.e. even if we would pass buffers
2654 * here, the program still needs to call the bpf_lX_csum_replace()
2655 * helpers anyway. Plus, this way we keep also separation of
2656 * concerns, since f.e. bpf_skb_store_bytes() should only take
2657 * care of stores.
2658 *
2659 * Currently, additional options and extension header space are
2660 * not supported, but flags register is reserved so we can adapt
2661 * that. For offloads, we mark packet as dodgy, so that headers
2662 * need to be verified first.
2663 */
2664 ret = bpf_skb_proto_xlat(skb, proto);
6aaae2b6 2665 bpf_compute_data_pointers(skb);
6578171a
DB
2666 return ret;
2667}
2668
2669static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2670 .func = bpf_skb_change_proto,
2671 .gpl_only = false,
2672 .ret_type = RET_INTEGER,
2673 .arg1_type = ARG_PTR_TO_CTX,
2674 .arg2_type = ARG_ANYTHING,
2675 .arg3_type = ARG_ANYTHING,
2676};
2677
f3694e00 2678BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
d2485c42 2679{
d2485c42 2680 /* We only allow a restricted subset to be changed for now. */
45c7fffa
DB
2681 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2682 !skb_pkt_type_ok(pkt_type)))
d2485c42
DB
2683 return -EINVAL;
2684
2685 skb->pkt_type = pkt_type;
2686 return 0;
2687}
2688
2689static const struct bpf_func_proto bpf_skb_change_type_proto = {
2690 .func = bpf_skb_change_type,
2691 .gpl_only = false,
2692 .ret_type = RET_INTEGER,
2693 .arg1_type = ARG_PTR_TO_CTX,
2694 .arg2_type = ARG_ANYTHING,
2695};
2696
2be7e212
DB
2697static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2698{
2699 switch (skb->protocol) {
2700 case htons(ETH_P_IP):
2701 return sizeof(struct iphdr);
2702 case htons(ETH_P_IPV6):
2703 return sizeof(struct ipv6hdr);
2704 default:
2705 return ~0U;
2706 }
2707}
2708
2709static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2710{
2711 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2712 int ret;
2713
d02f51cb
DA
2714 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2715 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2716 return -ENOTSUPP;
2717
2be7e212
DB
2718 ret = skb_cow(skb, len_diff);
2719 if (unlikely(ret < 0))
2720 return ret;
2721
2722 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2723 if (unlikely(ret < 0))
2724 return ret;
2725
2726 if (skb_is_gso(skb)) {
d02f51cb
DA
2727 struct skb_shared_info *shinfo = skb_shinfo(skb);
2728
2be7e212 2729 /* Due to header grow, MSS needs to be downgraded. */
d02f51cb 2730 skb_decrease_gso_size(shinfo, len_diff);
2be7e212 2731 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2732 shinfo->gso_type |= SKB_GSO_DODGY;
2733 shinfo->gso_segs = 0;
2be7e212
DB
2734 }
2735
2736 return 0;
2737}
2738
2739static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2740{
2741 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2742 int ret;
2743
d02f51cb
DA
2744 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2745 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2746 return -ENOTSUPP;
2747
2be7e212
DB
2748 ret = skb_unclone(skb, GFP_ATOMIC);
2749 if (unlikely(ret < 0))
2750 return ret;
2751
2752 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2753 if (unlikely(ret < 0))
2754 return ret;
2755
2756 if (skb_is_gso(skb)) {
d02f51cb
DA
2757 struct skb_shared_info *shinfo = skb_shinfo(skb);
2758
2be7e212 2759 /* Due to header shrink, MSS can be upgraded. */
d02f51cb 2760 skb_increase_gso_size(shinfo, len_diff);
2be7e212 2761 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2762 shinfo->gso_type |= SKB_GSO_DODGY;
2763 shinfo->gso_segs = 0;
2be7e212
DB
2764 }
2765
2766 return 0;
2767}
2768
2769static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2770{
2771 return skb->dev->mtu + skb->dev->hard_header_len;
2772}
2773
2774static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
2775{
2776 bool trans_same = skb->transport_header == skb->network_header;
2777 u32 len_cur, len_diff_abs = abs(len_diff);
2778 u32 len_min = bpf_skb_net_base_len(skb);
2779 u32 len_max = __bpf_skb_max_len(skb);
2780 __be16 proto = skb->protocol;
2781 bool shrink = len_diff < 0;
2782 int ret;
2783
2784 if (unlikely(len_diff_abs > 0xfffU))
2785 return -EFAULT;
2786 if (unlikely(proto != htons(ETH_P_IP) &&
2787 proto != htons(ETH_P_IPV6)))
2788 return -ENOTSUPP;
2789
2790 len_cur = skb->len - skb_network_offset(skb);
2791 if (skb_transport_header_was_set(skb) && !trans_same)
2792 len_cur = skb_network_header_len(skb);
2793 if ((shrink && (len_diff_abs >= len_cur ||
2794 len_cur - len_diff_abs < len_min)) ||
2795 (!shrink && (skb->len + len_diff_abs > len_max &&
2796 !skb_is_gso(skb))))
2797 return -ENOTSUPP;
2798
2799 ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
2800 bpf_skb_net_grow(skb, len_diff_abs);
2801
6aaae2b6 2802 bpf_compute_data_pointers(skb);
e4a6a342 2803 return ret;
2be7e212
DB
2804}
2805
2806BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
2807 u32, mode, u64, flags)
2808{
2809 if (unlikely(flags))
2810 return -EINVAL;
2811 if (likely(mode == BPF_ADJ_ROOM_NET))
2812 return bpf_skb_adjust_net(skb, len_diff);
2813
2814 return -ENOTSUPP;
2815}
2816
2817static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
2818 .func = bpf_skb_adjust_room,
2819 .gpl_only = false,
2820 .ret_type = RET_INTEGER,
2821 .arg1_type = ARG_PTR_TO_CTX,
2822 .arg2_type = ARG_ANYTHING,
2823 .arg3_type = ARG_ANYTHING,
2824 .arg4_type = ARG_ANYTHING,
2825};
2826
5293efe6
DB
2827static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2828{
2829 u32 min_len = skb_network_offset(skb);
2830
2831 if (skb_transport_header_was_set(skb))
2832 min_len = skb_transport_offset(skb);
2833 if (skb->ip_summed == CHECKSUM_PARTIAL)
2834 min_len = skb_checksum_start_offset(skb) +
2835 skb->csum_offset + sizeof(__sum16);
2836 return min_len;
2837}
2838
5293efe6
DB
2839static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
2840{
2841 unsigned int old_len = skb->len;
2842 int ret;
2843
2844 ret = __skb_grow_rcsum(skb, new_len);
2845 if (!ret)
2846 memset(skb->data + old_len, 0, new_len - old_len);
2847 return ret;
2848}
2849
2850static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2851{
2852 return __skb_trim_rcsum(skb, new_len);
2853}
2854
f3694e00
DB
2855BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2856 u64, flags)
5293efe6 2857{
5293efe6
DB
2858 u32 max_len = __bpf_skb_max_len(skb);
2859 u32 min_len = __bpf_skb_min_len(skb);
5293efe6
DB
2860 int ret;
2861
2862 if (unlikely(flags || new_len > max_len || new_len < min_len))
2863 return -EINVAL;
2864 if (skb->encapsulation)
2865 return -ENOTSUPP;
2866
2867 /* The basic idea of this helper is that it's performing the
2868 * needed work to either grow or trim an skb, and eBPF program
2869 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2870 * bpf_lX_csum_replace() and others rather than passing a raw
2871 * buffer here. This one is a slow path helper and intended
2872 * for replies with control messages.
2873 *
2874 * Like in bpf_skb_change_proto(), we want to keep this rather
2875 * minimal and without protocol specifics so that we are able
2876 * to separate concerns as in bpf_skb_store_bytes() should only
2877 * be the one responsible for writing buffers.
2878 *
2879 * It's really expected to be a slow path operation here for
2880 * control message replies, so we're implicitly linearizing,
2881 * uncloning and drop offloads from the skb by this.
2882 */
2883 ret = __bpf_try_make_writable(skb, skb->len);
2884 if (!ret) {
2885 if (new_len > skb->len)
2886 ret = bpf_skb_grow_rcsum(skb, new_len);
2887 else if (new_len < skb->len)
2888 ret = bpf_skb_trim_rcsum(skb, new_len);
2889 if (!ret && skb_is_gso(skb))
2890 skb_gso_reset(skb);
2891 }
2892
6aaae2b6 2893 bpf_compute_data_pointers(skb);
5293efe6
DB
2894 return ret;
2895}
2896
2897static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2898 .func = bpf_skb_change_tail,
2899 .gpl_only = false,
2900 .ret_type = RET_INTEGER,
2901 .arg1_type = ARG_PTR_TO_CTX,
2902 .arg2_type = ARG_ANYTHING,
2903 .arg3_type = ARG_ANYTHING,
2904};
2905
3a0af8fd
TG
2906BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2907 u64, flags)
2908{
2909 u32 max_len = __bpf_skb_max_len(skb);
2910 u32 new_len = skb->len + head_room;
2911 int ret;
2912
2913 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
2914 new_len < skb->len))
2915 return -EINVAL;
2916
2917 ret = skb_cow(skb, head_room);
2918 if (likely(!ret)) {
2919 /* Idea for this helper is that we currently only
2920 * allow to expand on mac header. This means that
2921 * skb->protocol network header, etc, stay as is.
2922 * Compared to bpf_skb_change_tail(), we're more
2923 * flexible due to not needing to linearize or
2924 * reset GSO. Intention for this helper is to be
2925 * used by an L3 skb that needs to push mac header
2926 * for redirection into L2 device.
2927 */
2928 __skb_push(skb, head_room);
2929 memset(skb->data, 0, head_room);
2930 skb_reset_mac_header(skb);
2931 }
2932
6aaae2b6 2933 bpf_compute_data_pointers(skb);
3a0af8fd
TG
2934 return 0;
2935}
2936
2937static const struct bpf_func_proto bpf_skb_change_head_proto = {
2938 .func = bpf_skb_change_head,
2939 .gpl_only = false,
2940 .ret_type = RET_INTEGER,
2941 .arg1_type = ARG_PTR_TO_CTX,
2942 .arg2_type = ARG_ANYTHING,
2943 .arg3_type = ARG_ANYTHING,
2944};
2945
de8f3a83
DB
2946static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
2947{
2948 return xdp_data_meta_unsupported(xdp) ? 0 :
2949 xdp->data - xdp->data_meta;
2950}
2951
17bedab2
MKL
2952BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
2953{
6dfb970d 2954 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83 2955 unsigned long metalen = xdp_get_metalen(xdp);
97e19cce 2956 void *data_start = xdp_frame_end + metalen;
17bedab2
MKL
2957 void *data = xdp->data + offset;
2958
de8f3a83 2959 if (unlikely(data < data_start ||
17bedab2
MKL
2960 data > xdp->data_end - ETH_HLEN))
2961 return -EINVAL;
2962
de8f3a83
DB
2963 if (metalen)
2964 memmove(xdp->data_meta + offset,
2965 xdp->data_meta, metalen);
2966 xdp->data_meta += offset;
17bedab2
MKL
2967 xdp->data = data;
2968
2969 return 0;
2970}
2971
2972static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
2973 .func = bpf_xdp_adjust_head,
2974 .gpl_only = false,
2975 .ret_type = RET_INTEGER,
2976 .arg1_type = ARG_PTR_TO_CTX,
2977 .arg2_type = ARG_ANYTHING,
2978};
2979
b32cc5b9
NS
2980BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
2981{
2982 void *data_end = xdp->data_end + offset;
2983
2984 /* only shrinking is allowed for now. */
2985 if (unlikely(offset >= 0))
2986 return -EINVAL;
2987
2988 if (unlikely(data_end < xdp->data + ETH_HLEN))
2989 return -EINVAL;
2990
2991 xdp->data_end = data_end;
2992
2993 return 0;
2994}
2995
2996static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
2997 .func = bpf_xdp_adjust_tail,
2998 .gpl_only = false,
2999 .ret_type = RET_INTEGER,
3000 .arg1_type = ARG_PTR_TO_CTX,
3001 .arg2_type = ARG_ANYTHING,
3002};
3003
de8f3a83
DB
3004BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3005{
97e19cce 3006 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83
DB
3007 void *meta = xdp->data_meta + offset;
3008 unsigned long metalen = xdp->data - meta;
3009
3010 if (xdp_data_meta_unsupported(xdp))
3011 return -ENOTSUPP;
97e19cce 3012 if (unlikely(meta < xdp_frame_end ||
de8f3a83
DB
3013 meta > xdp->data))
3014 return -EINVAL;
3015 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3016 (metalen > 32)))
3017 return -EACCES;
3018
3019 xdp->data_meta = meta;
3020
3021 return 0;
3022}
3023
3024static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3025 .func = bpf_xdp_adjust_meta,
3026 .gpl_only = false,
3027 .ret_type = RET_INTEGER,
3028 .arg1_type = ARG_PTR_TO_CTX,
3029 .arg2_type = ARG_ANYTHING,
3030};
3031
11393cc9
JF
3032static int __bpf_tx_xdp(struct net_device *dev,
3033 struct bpf_map *map,
3034 struct xdp_buff *xdp,
3035 u32 index)
814abfab 3036{
44fa2dbd 3037 struct xdp_frame *xdpf;
11393cc9
JF
3038 int err;
3039
3040 if (!dev->netdev_ops->ndo_xdp_xmit) {
11393cc9 3041 return -EOPNOTSUPP;
814abfab 3042 }
11393cc9 3043
44fa2dbd
JDB
3044 xdpf = convert_to_xdp_frame(xdp);
3045 if (unlikely(!xdpf))
3046 return -EOVERFLOW;
3047
3048 err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
11393cc9
JF
3049 if (err)
3050 return err;
9c270af3
JDB
3051 dev->netdev_ops->ndo_xdp_flush(dev);
3052 return 0;
3053}
3054
3055static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3056 struct bpf_map *map,
3057 struct xdp_buff *xdp,
3058 u32 index)
3059{
3060 int err;
3061
1b1a251c
BT
3062 switch (map->map_type) {
3063 case BPF_MAP_TYPE_DEVMAP: {
9c270af3 3064 struct net_device *dev = fwd;
44fa2dbd 3065 struct xdp_frame *xdpf;
9c270af3
JDB
3066
3067 if (!dev->netdev_ops->ndo_xdp_xmit)
3068 return -EOPNOTSUPP;
3069
44fa2dbd
JDB
3070 xdpf = convert_to_xdp_frame(xdp);
3071 if (unlikely(!xdpf))
3072 return -EOVERFLOW;
3073
3074 /* TODO: move to inside map code instead, for bulk support
3075 * err = dev_map_enqueue(dev, xdp);
3076 */
3077 err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
9c270af3
JDB
3078 if (err)
3079 return err;
11393cc9 3080 __dev_map_insert_ctx(map, index);
1b1a251c
BT
3081 break;
3082 }
3083 case BPF_MAP_TYPE_CPUMAP: {
9c270af3
JDB
3084 struct bpf_cpu_map_entry *rcpu = fwd;
3085
3086 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
3087 if (err)
3088 return err;
3089 __cpu_map_insert_ctx(map, index);
1b1a251c
BT
3090 break;
3091 }
3092 case BPF_MAP_TYPE_XSKMAP: {
3093 struct xdp_sock *xs = fwd;
3094
3095 err = __xsk_map_redirect(map, xdp, xs);
3096 return err;
3097 }
3098 default:
3099 break;
9c270af3 3100 }
e4a8e817 3101 return 0;
814abfab
JF
3102}
3103
11393cc9
JF
3104void xdp_do_flush_map(void)
3105{
3106 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3107 struct bpf_map *map = ri->map_to_flush;
3108
11393cc9 3109 ri->map_to_flush = NULL;
9c270af3
JDB
3110 if (map) {
3111 switch (map->map_type) {
3112 case BPF_MAP_TYPE_DEVMAP:
3113 __dev_map_flush(map);
3114 break;
3115 case BPF_MAP_TYPE_CPUMAP:
3116 __cpu_map_flush(map);
3117 break;
1b1a251c
BT
3118 case BPF_MAP_TYPE_XSKMAP:
3119 __xsk_map_flush(map);
3120 break;
9c270af3
JDB
3121 default:
3122 break;
3123 }
3124 }
11393cc9
JF
3125}
3126EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3127
9c270af3
JDB
3128static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3129{
3130 switch (map->map_type) {
3131 case BPF_MAP_TYPE_DEVMAP:
3132 return __dev_map_lookup_elem(map, index);
3133 case BPF_MAP_TYPE_CPUMAP:
3134 return __cpu_map_lookup_elem(map, index);
1b1a251c
BT
3135 case BPF_MAP_TYPE_XSKMAP:
3136 return __xsk_map_lookup_elem(map, index);
9c270af3
JDB
3137 default:
3138 return NULL;
3139 }
3140}
3141
7c300131
DB
3142static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
3143 unsigned long aux)
3144{
3145 return (unsigned long)xdp_prog->aux != aux;
3146}
3147
e4a8e817
DB
3148static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
3149 struct bpf_prog *xdp_prog)
97f91a7c
JF
3150{
3151 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
7c300131 3152 unsigned long map_owner = ri->map_owner;
97f91a7c 3153 struct bpf_map *map = ri->map;
11393cc9 3154 u32 index = ri->ifindex;
9c270af3 3155 void *fwd = NULL;
4c03bdd7 3156 int err;
97f91a7c
JF
3157
3158 ri->ifindex = 0;
3159 ri->map = NULL;
7c300131 3160 ri->map_owner = 0;
109980b8 3161
7c300131 3162 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
96c5508e
JDB
3163 err = -EFAULT;
3164 map = NULL;
3165 goto err;
3166 }
97f91a7c 3167
9c270af3 3168 fwd = __xdp_map_lookup_elem(map, index);
4c03bdd7
JDB
3169 if (!fwd) {
3170 err = -EINVAL;
f5836ca5 3171 goto err;
4c03bdd7 3172 }
e4a8e817 3173 if (ri->map_to_flush && ri->map_to_flush != map)
11393cc9
JF
3174 xdp_do_flush_map();
3175
9c270af3 3176 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
f5836ca5
JDB
3177 if (unlikely(err))
3178 goto err;
3179
3180 ri->map_to_flush = map;
59a30896 3181 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
f5836ca5
JDB
3182 return 0;
3183err:
59a30896 3184 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
97f91a7c
JF
3185 return err;
3186}
3187
5acaee0a
JF
3188int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3189 struct bpf_prog *xdp_prog)
814abfab
JF
3190{
3191 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
5acaee0a 3192 struct net_device *fwd;
eb48d682 3193 u32 index = ri->ifindex;
4c03bdd7 3194 int err;
814abfab 3195
97f91a7c
JF
3196 if (ri->map)
3197 return xdp_do_redirect_map(dev, xdp, xdp_prog);
3198
eb48d682 3199 fwd = dev_get_by_index_rcu(dev_net(dev), index);
814abfab 3200 ri->ifindex = 0;
5acaee0a 3201 if (unlikely(!fwd)) {
4c03bdd7 3202 err = -EINVAL;
f5836ca5 3203 goto err;
814abfab
JF
3204 }
3205
4c03bdd7 3206 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
f5836ca5
JDB
3207 if (unlikely(err))
3208 goto err;
3209
3210 _trace_xdp_redirect(dev, xdp_prog, index);
3211 return 0;
3212err:
3213 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
4c03bdd7 3214 return err;
814abfab
JF
3215}
3216EXPORT_SYMBOL_GPL(xdp_do_redirect);
3217
9c270af3
JDB
3218static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
3219{
3220 unsigned int len;
3221
3222 if (unlikely(!(fwd->flags & IFF_UP)))
3223 return -ENETDOWN;
3224
3225 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
3226 if (skb->len > len)
3227 return -EMSGSIZE;
3228
3229 return 0;
3230}
3231
c060bc61
XS
3232static int xdp_do_generic_redirect_map(struct net_device *dev,
3233 struct sk_buff *skb,
02671e23 3234 struct xdp_buff *xdp,
c060bc61 3235 struct bpf_prog *xdp_prog)
6103aa96
JF
3236{
3237 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
7c300131 3238 unsigned long map_owner = ri->map_owner;
96c5508e 3239 struct bpf_map *map = ri->map;
eb48d682 3240 u32 index = ri->ifindex;
02671e23 3241 void *fwd = NULL;
2facaad6 3242 int err = 0;
6103aa96 3243
6103aa96 3244 ri->ifindex = 0;
96c5508e 3245 ri->map = NULL;
7c300131 3246 ri->map_owner = 0;
96c5508e 3247
9c270af3
JDB
3248 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
3249 err = -EFAULT;
3250 map = NULL;
3251 goto err;
96c5508e 3252 }
9c270af3 3253 fwd = __xdp_map_lookup_elem(map, index);
2facaad6
JDB
3254 if (unlikely(!fwd)) {
3255 err = -EINVAL;
f5836ca5 3256 goto err;
6103aa96
JF
3257 }
3258
9c270af3
JDB
3259 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
3260 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
3261 goto err;
3262 skb->dev = fwd;
02671e23
BT
3263 generic_xdp_tx(skb, xdp_prog);
3264 } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3265 struct xdp_sock *xs = fwd;
3266
3267 err = xsk_generic_rcv(xs, xdp);
3268 if (err)
3269 goto err;
3270 consume_skb(skb);
9c270af3
JDB
3271 } else {
3272 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3273 err = -EBADRQC;
f5836ca5 3274 goto err;
2facaad6 3275 }
6103aa96 3276
9c270af3
JDB
3277 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3278 return 0;
3279err:
3280 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3281 return err;
3282}
3283
3284int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
02671e23 3285 struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
9c270af3
JDB
3286{
3287 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3288 u32 index = ri->ifindex;
3289 struct net_device *fwd;
3290 int err = 0;
3291
3292 if (ri->map)
02671e23 3293 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog);
9c270af3
JDB
3294
3295 ri->ifindex = 0;
3296 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3297 if (unlikely(!fwd)) {
3298 err = -EINVAL;
f5836ca5 3299 goto err;
2facaad6
JDB
3300 }
3301
9c270af3
JDB
3302 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
3303 goto err;
3304
2facaad6 3305 skb->dev = fwd;
9c270af3 3306 _trace_xdp_redirect(dev, xdp_prog, index);
02671e23 3307 generic_xdp_tx(skb, xdp_prog);
f5836ca5
JDB
3308 return 0;
3309err:
9c270af3 3310 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2facaad6 3311 return err;
6103aa96
JF
3312}
3313EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3314
814abfab
JF
3315BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3316{
3317 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3318
3319 if (unlikely(flags))
3320 return XDP_ABORTED;
3321
3322 ri->ifindex = ifindex;
3323 ri->flags = flags;
109980b8 3324 ri->map = NULL;
7c300131 3325 ri->map_owner = 0;
e4a8e817 3326
814abfab
JF
3327 return XDP_REDIRECT;
3328}
3329
3330static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3331 .func = bpf_xdp_redirect,
3332 .gpl_only = false,
3333 .ret_type = RET_INTEGER,
3334 .arg1_type = ARG_ANYTHING,
3335 .arg2_type = ARG_ANYTHING,
3336};
3337
109980b8 3338BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
7c300131 3339 unsigned long, map_owner)
e4a8e817
DB
3340{
3341 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3342
3343 if (unlikely(flags))
3344 return XDP_ABORTED;
3345
3346 ri->ifindex = ifindex;
3347 ri->flags = flags;
3348 ri->map = map;
109980b8 3349 ri->map_owner = map_owner;
e4a8e817
DB
3350
3351 return XDP_REDIRECT;
3352}
3353
109980b8
DB
3354/* Note, arg4 is hidden from users and populated by the verifier
3355 * with the right pointer.
3356 */
e4a8e817
DB
3357static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3358 .func = bpf_xdp_redirect_map,
3359 .gpl_only = false,
3360 .ret_type = RET_INTEGER,
3361 .arg1_type = ARG_CONST_MAP_PTR,
3362 .arg2_type = ARG_ANYTHING,
3363 .arg3_type = ARG_ANYTHING,
3364};
3365
17bedab2 3366bool bpf_helper_changes_pkt_data(void *func)
4e10df9a 3367{
36bbef52
DB
3368 if (func == bpf_skb_vlan_push ||
3369 func == bpf_skb_vlan_pop ||
3370 func == bpf_skb_store_bytes ||
3371 func == bpf_skb_change_proto ||
3a0af8fd 3372 func == bpf_skb_change_head ||
36bbef52 3373 func == bpf_skb_change_tail ||
2be7e212 3374 func == bpf_skb_adjust_room ||
36bbef52 3375 func == bpf_skb_pull_data ||
41703a73 3376 func == bpf_clone_redirect ||
36bbef52 3377 func == bpf_l3_csum_replace ||
17bedab2 3378 func == bpf_l4_csum_replace ||
de8f3a83 3379 func == bpf_xdp_adjust_head ||
015632bb 3380 func == bpf_xdp_adjust_meta ||
b32cc5b9
NS
3381 func == bpf_msg_pull_data ||
3382 func == bpf_xdp_adjust_tail)
3697649f
DB
3383 return true;
3384
4e10df9a
AS
3385 return false;
3386}
3387
555c8a86 3388static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
aa7145c1 3389 unsigned long off, unsigned long len)
555c8a86 3390{
aa7145c1 3391 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
555c8a86
DB
3392
3393 if (unlikely(!ptr))
3394 return len;
3395 if (ptr != dst_buff)
3396 memcpy(dst_buff, ptr, len);
3397
3398 return 0;
3399}
3400
f3694e00
DB
3401BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3402 u64, flags, void *, meta, u64, meta_size)
555c8a86 3403{
555c8a86 3404 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
555c8a86
DB
3405
3406 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3407 return -EINVAL;
3408 if (unlikely(skb_size > skb->len))
3409 return -EFAULT;
3410
3411 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3412 bpf_skb_copy);
3413}
3414
3415static const struct bpf_func_proto bpf_skb_event_output_proto = {
3416 .func = bpf_skb_event_output,
3417 .gpl_only = true,
3418 .ret_type = RET_INTEGER,
3419 .arg1_type = ARG_PTR_TO_CTX,
3420 .arg2_type = ARG_CONST_MAP_PTR,
3421 .arg3_type = ARG_ANYTHING,
39f19ebb 3422 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3423 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
555c8a86
DB
3424};
3425
c6c33454
DB
3426static unsigned short bpf_tunnel_key_af(u64 flags)
3427{
3428 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3429}
3430
f3694e00
DB
3431BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3432 u32, size, u64, flags)
d3aa45ce 3433{
c6c33454
DB
3434 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3435 u8 compat[sizeof(struct bpf_tunnel_key)];
074f528e
DB
3436 void *to_orig = to;
3437 int err;
d3aa45ce 3438
074f528e
DB
3439 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3440 err = -EINVAL;
3441 goto err_clear;
3442 }
3443 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3444 err = -EPROTO;
3445 goto err_clear;
3446 }
c6c33454 3447 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
074f528e 3448 err = -EINVAL;
c6c33454 3449 switch (size) {
4018ab18 3450 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3451 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4018ab18 3452 goto set_compat;
c6c33454
DB
3453 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3454 /* Fixup deprecated structure layouts here, so we have
3455 * a common path later on.
3456 */
3457 if (ip_tunnel_info_af(info) != AF_INET)
074f528e 3458 goto err_clear;
4018ab18 3459set_compat:
c6c33454
DB
3460 to = (struct bpf_tunnel_key *)compat;
3461 break;
3462 default:
074f528e 3463 goto err_clear;
c6c33454
DB
3464 }
3465 }
d3aa45ce
AS
3466
3467 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
3468 to->tunnel_tos = info->key.tos;
3469 to->tunnel_ttl = info->key.ttl;
3470
4018ab18 3471 if (flags & BPF_F_TUNINFO_IPV6) {
c6c33454
DB
3472 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3473 sizeof(to->remote_ipv6));
4018ab18
DB
3474 to->tunnel_label = be32_to_cpu(info->key.label);
3475 } else {
c6c33454 3476 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
4018ab18 3477 }
c6c33454
DB
3478
3479 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
074f528e 3480 memcpy(to_orig, to, size);
d3aa45ce
AS
3481
3482 return 0;
074f528e
DB
3483err_clear:
3484 memset(to_orig, 0, size);
3485 return err;
d3aa45ce
AS
3486}
3487
577c50aa 3488static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
d3aa45ce
AS
3489 .func = bpf_skb_get_tunnel_key,
3490 .gpl_only = false,
3491 .ret_type = RET_INTEGER,
3492 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3493 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3494 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3495 .arg4_type = ARG_ANYTHING,
3496};
3497
f3694e00 3498BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
14ca0751 3499{
14ca0751 3500 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
074f528e 3501 int err;
14ca0751
DB
3502
3503 if (unlikely(!info ||
074f528e
DB
3504 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3505 err = -ENOENT;
3506 goto err_clear;
3507 }
3508 if (unlikely(size < info->options_len)) {
3509 err = -ENOMEM;
3510 goto err_clear;
3511 }
14ca0751
DB
3512
3513 ip_tunnel_info_opts_get(to, info);
074f528e
DB
3514 if (size > info->options_len)
3515 memset(to + info->options_len, 0, size - info->options_len);
14ca0751
DB
3516
3517 return info->options_len;
074f528e
DB
3518err_clear:
3519 memset(to, 0, size);
3520 return err;
14ca0751
DB
3521}
3522
3523static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3524 .func = bpf_skb_get_tunnel_opt,
3525 .gpl_only = false,
3526 .ret_type = RET_INTEGER,
3527 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3528 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3529 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3530};
3531
d3aa45ce
AS
3532static struct metadata_dst __percpu *md_dst;
3533
f3694e00
DB
3534BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3535 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
d3aa45ce 3536{
d3aa45ce 3537 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 3538 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
3539 struct ip_tunnel_info *info;
3540
22080870 3541 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
77a5196a 3542 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
d3aa45ce 3543 return -EINVAL;
c6c33454
DB
3544 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3545 switch (size) {
4018ab18 3546 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3547 case offsetof(struct bpf_tunnel_key, tunnel_ext):
c6c33454
DB
3548 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3549 /* Fixup deprecated structure layouts here, so we have
3550 * a common path later on.
3551 */
3552 memcpy(compat, from, size);
3553 memset(compat + size, 0, sizeof(compat) - size);
f3694e00 3554 from = (const struct bpf_tunnel_key *) compat;
c6c33454
DB
3555 break;
3556 default:
3557 return -EINVAL;
3558 }
3559 }
c0e760c9
DB
3560 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3561 from->tunnel_ext))
4018ab18 3562 return -EINVAL;
d3aa45ce
AS
3563
3564 skb_dst_drop(skb);
3565 dst_hold((struct dst_entry *) md);
3566 skb_dst_set(skb, (struct dst_entry *) md);
3567
3568 info = &md->u.tun_info;
5540fbf4 3569 memset(info, 0, sizeof(*info));
d3aa45ce 3570 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 3571
db3c6139 3572 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
22080870
DB
3573 if (flags & BPF_F_DONT_FRAGMENT)
3574 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
792f3dd6
WT
3575 if (flags & BPF_F_ZERO_CSUM_TX)
3576 info->key.tun_flags &= ~TUNNEL_CSUM;
77a5196a
WT
3577 if (flags & BPF_F_SEQ_NUMBER)
3578 info->key.tun_flags |= TUNNEL_SEQ;
22080870 3579
d3aa45ce 3580 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
3581 info->key.tos = from->tunnel_tos;
3582 info->key.ttl = from->tunnel_ttl;
3583
3584 if (flags & BPF_F_TUNINFO_IPV6) {
3585 info->mode |= IP_TUNNEL_INFO_IPV6;
3586 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3587 sizeof(from->remote_ipv6));
4018ab18
DB
3588 info->key.label = cpu_to_be32(from->tunnel_label) &
3589 IPV6_FLOWLABEL_MASK;
c6c33454
DB
3590 } else {
3591 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3592 }
d3aa45ce
AS
3593
3594 return 0;
3595}
3596
577c50aa 3597static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
d3aa45ce
AS
3598 .func = bpf_skb_set_tunnel_key,
3599 .gpl_only = false,
3600 .ret_type = RET_INTEGER,
3601 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3602 .arg2_type = ARG_PTR_TO_MEM,
3603 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3604 .arg4_type = ARG_ANYTHING,
3605};
3606
f3694e00
DB
3607BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
3608 const u8 *, from, u32, size)
14ca0751 3609{
14ca0751
DB
3610 struct ip_tunnel_info *info = skb_tunnel_info(skb);
3611 const struct metadata_dst *md = this_cpu_ptr(md_dst);
3612
3613 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
3614 return -EINVAL;
fca5fdf6 3615 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
14ca0751
DB
3616 return -ENOMEM;
3617
3618 ip_tunnel_info_opts_set(info, from, size);
3619
3620 return 0;
3621}
3622
3623static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
3624 .func = bpf_skb_set_tunnel_opt,
3625 .gpl_only = false,
3626 .ret_type = RET_INTEGER,
3627 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3628 .arg2_type = ARG_PTR_TO_MEM,
3629 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3630};
3631
3632static const struct bpf_func_proto *
3633bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
d3aa45ce
AS
3634{
3635 if (!md_dst) {
d66f2b91
JK
3636 struct metadata_dst __percpu *tmp;
3637
3638 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
3639 METADATA_IP_TUNNEL,
3640 GFP_KERNEL);
3641 if (!tmp)
d3aa45ce 3642 return NULL;
d66f2b91
JK
3643 if (cmpxchg(&md_dst, NULL, tmp))
3644 metadata_dst_free_percpu(tmp);
d3aa45ce 3645 }
14ca0751
DB
3646
3647 switch (which) {
3648 case BPF_FUNC_skb_set_tunnel_key:
3649 return &bpf_skb_set_tunnel_key_proto;
3650 case BPF_FUNC_skb_set_tunnel_opt:
3651 return &bpf_skb_set_tunnel_opt_proto;
3652 default:
3653 return NULL;
3654 }
d3aa45ce
AS
3655}
3656
f3694e00
DB
3657BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
3658 u32, idx)
4a482f34 3659{
4a482f34
MKL
3660 struct bpf_array *array = container_of(map, struct bpf_array, map);
3661 struct cgroup *cgrp;
3662 struct sock *sk;
4a482f34 3663
2d48c5f9 3664 sk = skb_to_full_sk(skb);
4a482f34
MKL
3665 if (!sk || !sk_fullsock(sk))
3666 return -ENOENT;
f3694e00 3667 if (unlikely(idx >= array->map.max_entries))
4a482f34
MKL
3668 return -E2BIG;
3669
f3694e00 3670 cgrp = READ_ONCE(array->ptrs[idx]);
4a482f34
MKL
3671 if (unlikely(!cgrp))
3672 return -EAGAIN;
3673
54fd9c2d 3674 return sk_under_cgroup_hierarchy(sk, cgrp);
4a482f34
MKL
3675}
3676
747ea55e
DB
3677static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
3678 .func = bpf_skb_under_cgroup,
4a482f34
MKL
3679 .gpl_only = false,
3680 .ret_type = RET_INTEGER,
3681 .arg1_type = ARG_PTR_TO_CTX,
3682 .arg2_type = ARG_CONST_MAP_PTR,
3683 .arg3_type = ARG_ANYTHING,
3684};
4a482f34 3685
4de16969
DB
3686static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
3687 unsigned long off, unsigned long len)
3688{
3689 memcpy(dst_buff, src_buff + off, len);
3690 return 0;
3691}
3692
f3694e00
DB
3693BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
3694 u64, flags, void *, meta, u64, meta_size)
4de16969 3695{
4de16969 3696 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4de16969
DB
3697
3698 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3699 return -EINVAL;
3700 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
3701 return -EFAULT;
3702
9c471370
MKL
3703 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
3704 xdp_size, bpf_xdp_copy);
4de16969
DB
3705}
3706
3707static const struct bpf_func_proto bpf_xdp_event_output_proto = {
3708 .func = bpf_xdp_event_output,
3709 .gpl_only = true,
3710 .ret_type = RET_INTEGER,
3711 .arg1_type = ARG_PTR_TO_CTX,
3712 .arg2_type = ARG_CONST_MAP_PTR,
3713 .arg3_type = ARG_ANYTHING,
39f19ebb 3714 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3715 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4de16969
DB
3716};
3717
91b8270f
CF
3718BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
3719{
3720 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
3721}
3722
3723static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
3724 .func = bpf_get_socket_cookie,
3725 .gpl_only = false,
3726 .ret_type = RET_INTEGER,
3727 .arg1_type = ARG_PTR_TO_CTX,
3728};
3729
6acc5c29
CF
3730BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
3731{
3732 struct sock *sk = sk_to_full_sk(skb->sk);
3733 kuid_t kuid;
3734
3735 if (!sk || !sk_fullsock(sk))
3736 return overflowuid;
3737 kuid = sock_net_uid(sock_net(sk), sk);
3738 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
3739}
3740
3741static const struct bpf_func_proto bpf_get_socket_uid_proto = {
3742 .func = bpf_get_socket_uid,
3743 .gpl_only = false,
3744 .ret_type = RET_INTEGER,
3745 .arg1_type = ARG_PTR_TO_CTX,
3746};
3747
8c4b4c7e
LB
3748BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3749 int, level, int, optname, char *, optval, int, optlen)
3750{
3751 struct sock *sk = bpf_sock->sk;
3752 int ret = 0;
3753 int val;
3754
3755 if (!sk_fullsock(sk))
3756 return -EINVAL;
3757
3758 if (level == SOL_SOCKET) {
3759 if (optlen != sizeof(int))
3760 return -EINVAL;
3761 val = *((int *)optval);
3762
3763 /* Only some socketops are supported */
3764 switch (optname) {
3765 case SO_RCVBUF:
3766 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3767 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
3768 break;
3769 case SO_SNDBUF:
3770 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3771 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
3772 break;
3773 case SO_MAX_PACING_RATE:
3774 sk->sk_max_pacing_rate = val;
3775 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
3776 sk->sk_max_pacing_rate);
3777 break;
3778 case SO_PRIORITY:
3779 sk->sk_priority = val;
3780 break;
3781 case SO_RCVLOWAT:
3782 if (val < 0)
3783 val = INT_MAX;
3784 sk->sk_rcvlowat = val ? : 1;
3785 break;
3786 case SO_MARK:
3787 sk->sk_mark = val;
3788 break;
3789 default:
3790 ret = -EINVAL;
3791 }
a5192c52 3792#ifdef CONFIG_INET
6f5c39fa
NS
3793 } else if (level == SOL_IP) {
3794 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3795 return -EINVAL;
3796
3797 val = *((int *)optval);
3798 /* Only some options are supported */
3799 switch (optname) {
3800 case IP_TOS:
3801 if (val < -1 || val > 0xff) {
3802 ret = -EINVAL;
3803 } else {
3804 struct inet_sock *inet = inet_sk(sk);
3805
3806 if (val == -1)
3807 val = 0;
3808 inet->tos = val;
3809 }
3810 break;
3811 default:
3812 ret = -EINVAL;
3813 }
6f9bd3d7
LB
3814#if IS_ENABLED(CONFIG_IPV6)
3815 } else if (level == SOL_IPV6) {
3816 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3817 return -EINVAL;
3818
3819 val = *((int *)optval);
3820 /* Only some options are supported */
3821 switch (optname) {
3822 case IPV6_TCLASS:
3823 if (val < -1 || val > 0xff) {
3824 ret = -EINVAL;
3825 } else {
3826 struct ipv6_pinfo *np = inet6_sk(sk);
3827
3828 if (val == -1)
3829 val = 0;
3830 np->tclass = val;
3831 }
3832 break;
3833 default:
3834 ret = -EINVAL;
3835 }
3836#endif
8c4b4c7e
LB
3837 } else if (level == SOL_TCP &&
3838 sk->sk_prot->setsockopt == tcp_setsockopt) {
91b5b21c
LB
3839 if (optname == TCP_CONGESTION) {
3840 char name[TCP_CA_NAME_MAX];
ebfa00c5 3841 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
91b5b21c
LB
3842
3843 strncpy(name, optval, min_t(long, optlen,
3844 TCP_CA_NAME_MAX-1));
3845 name[TCP_CA_NAME_MAX-1] = 0;
6f9bd3d7
LB
3846 ret = tcp_set_congestion_control(sk, name, false,
3847 reinit);
91b5b21c 3848 } else {
fc747810
LB
3849 struct tcp_sock *tp = tcp_sk(sk);
3850
3851 if (optlen != sizeof(int))
3852 return -EINVAL;
3853
3854 val = *((int *)optval);
3855 /* Only some options are supported */
3856 switch (optname) {
3857 case TCP_BPF_IW:
3858 if (val <= 0 || tp->data_segs_out > 0)
3859 ret = -EINVAL;
3860 else
3861 tp->snd_cwnd = val;
3862 break;
13bf9641
LB
3863 case TCP_BPF_SNDCWND_CLAMP:
3864 if (val <= 0) {
3865 ret = -EINVAL;
3866 } else {
3867 tp->snd_cwnd_clamp = val;
3868 tp->snd_ssthresh = val;
3869 }
6d3f06a0 3870 break;
fc747810
LB
3871 default:
3872 ret = -EINVAL;
3873 }
91b5b21c 3874 }
91b5b21c 3875#endif
8c4b4c7e
LB
3876 } else {
3877 ret = -EINVAL;
3878 }
3879 return ret;
3880}
3881
3882static const struct bpf_func_proto bpf_setsockopt_proto = {
3883 .func = bpf_setsockopt,
cd86d1fd 3884 .gpl_only = false,
8c4b4c7e
LB
3885 .ret_type = RET_INTEGER,
3886 .arg1_type = ARG_PTR_TO_CTX,
3887 .arg2_type = ARG_ANYTHING,
3888 .arg3_type = ARG_ANYTHING,
3889 .arg4_type = ARG_PTR_TO_MEM,
3890 .arg5_type = ARG_CONST_SIZE,
3891};
3892
cd86d1fd
LB
3893BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3894 int, level, int, optname, char *, optval, int, optlen)
3895{
3896 struct sock *sk = bpf_sock->sk;
cd86d1fd
LB
3897
3898 if (!sk_fullsock(sk))
3899 goto err_clear;
3900
3901#ifdef CONFIG_INET
3902 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
3903 if (optname == TCP_CONGESTION) {
3904 struct inet_connection_sock *icsk = inet_csk(sk);
3905
3906 if (!icsk->icsk_ca_ops || optlen <= 1)
3907 goto err_clear;
3908 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
3909 optval[optlen - 1] = 0;
3910 } else {
3911 goto err_clear;
3912 }
6f5c39fa
NS
3913 } else if (level == SOL_IP) {
3914 struct inet_sock *inet = inet_sk(sk);
3915
3916 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3917 goto err_clear;
3918
3919 /* Only some options are supported */
3920 switch (optname) {
3921 case IP_TOS:
3922 *((int *)optval) = (int)inet->tos;
3923 break;
3924 default:
3925 goto err_clear;
3926 }
6f9bd3d7
LB
3927#if IS_ENABLED(CONFIG_IPV6)
3928 } else if (level == SOL_IPV6) {
3929 struct ipv6_pinfo *np = inet6_sk(sk);
3930
3931 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3932 goto err_clear;
3933
3934 /* Only some options are supported */
3935 switch (optname) {
3936 case IPV6_TCLASS:
3937 *((int *)optval) = (int)np->tclass;
3938 break;
3939 default:
3940 goto err_clear;
3941 }
3942#endif
cd86d1fd
LB
3943 } else {
3944 goto err_clear;
3945 }
aa2bc739 3946 return 0;
cd86d1fd
LB
3947#endif
3948err_clear:
3949 memset(optval, 0, optlen);
3950 return -EINVAL;
3951}
3952
3953static const struct bpf_func_proto bpf_getsockopt_proto = {
3954 .func = bpf_getsockopt,
3955 .gpl_only = false,
3956 .ret_type = RET_INTEGER,
3957 .arg1_type = ARG_PTR_TO_CTX,
3958 .arg2_type = ARG_ANYTHING,
3959 .arg3_type = ARG_ANYTHING,
3960 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
3961 .arg5_type = ARG_CONST_SIZE,
3962};
3963
b13d8807
LB
3964BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
3965 int, argval)
3966{
3967 struct sock *sk = bpf_sock->sk;
3968 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
3969
a7dcdf6e 3970 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
b13d8807
LB
3971 return -EINVAL;
3972
b13d8807
LB
3973 if (val)
3974 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
3975
3976 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
b13d8807
LB
3977}
3978
3979static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
3980 .func = bpf_sock_ops_cb_flags_set,
3981 .gpl_only = false,
3982 .ret_type = RET_INTEGER,
3983 .arg1_type = ARG_PTR_TO_CTX,
3984 .arg2_type = ARG_ANYTHING,
3985};
3986
d74bad4e
AI
3987const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
3988EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
3989
3990BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
3991 int, addr_len)
3992{
3993#ifdef CONFIG_INET
3994 struct sock *sk = ctx->sk;
3995 int err;
3996
3997 /* Binding to port can be expensive so it's prohibited in the helper.
3998 * Only binding to IP is supported.
3999 */
4000 err = -EINVAL;
4001 if (addr->sa_family == AF_INET) {
4002 if (addr_len < sizeof(struct sockaddr_in))
4003 return err;
4004 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
4005 return err;
4006 return __inet_bind(sk, addr, addr_len, true, false);
4007#if IS_ENABLED(CONFIG_IPV6)
4008 } else if (addr->sa_family == AF_INET6) {
4009 if (addr_len < SIN6_LEN_RFC2133)
4010 return err;
4011 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4012 return err;
4013 /* ipv6_bpf_stub cannot be NULL, since it's called from
4014 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4015 */
4016 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4017#endif /* CONFIG_IPV6 */
4018 }
4019#endif /* CONFIG_INET */
4020
4021 return -EAFNOSUPPORT;
4022}
4023
4024static const struct bpf_func_proto bpf_bind_proto = {
4025 .func = bpf_bind,
4026 .gpl_only = false,
4027 .ret_type = RET_INTEGER,
4028 .arg1_type = ARG_PTR_TO_CTX,
4029 .arg2_type = ARG_PTR_TO_MEM,
4030 .arg3_type = ARG_CONST_SIZE,
4031};
4032
12bed760
EB
4033#ifdef CONFIG_XFRM
4034BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4035 struct bpf_xfrm_state *, to, u32, size, u64, flags)
4036{
4037 const struct sec_path *sp = skb_sec_path(skb);
4038 const struct xfrm_state *x;
4039
4040 if (!sp || unlikely(index >= sp->len || flags))
4041 goto err_clear;
4042
4043 x = sp->xvec[index];
4044
4045 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4046 goto err_clear;
4047
4048 to->reqid = x->props.reqid;
4049 to->spi = x->id.spi;
4050 to->family = x->props.family;
4051 if (to->family == AF_INET6) {
4052 memcpy(to->remote_ipv6, x->props.saddr.a6,
4053 sizeof(to->remote_ipv6));
4054 } else {
4055 to->remote_ipv4 = x->props.saddr.a4;
4056 }
4057
4058 return 0;
4059err_clear:
4060 memset(to, 0, size);
4061 return -EINVAL;
4062}
4063
4064static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4065 .func = bpf_skb_get_xfrm_state,
4066 .gpl_only = false,
4067 .ret_type = RET_INTEGER,
4068 .arg1_type = ARG_PTR_TO_CTX,
4069 .arg2_type = ARG_ANYTHING,
4070 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
4071 .arg4_type = ARG_CONST_SIZE,
4072 .arg5_type = ARG_ANYTHING,
4073};
4074#endif
4075
87f5fc7e
DA
4076#if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4077static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4078 const struct neighbour *neigh,
4079 const struct net_device *dev)
4080{
4081 memcpy(params->dmac, neigh->ha, ETH_ALEN);
4082 memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4083 params->h_vlan_TCI = 0;
4084 params->h_vlan_proto = 0;
4085
4086 return dev->ifindex;
4087}
4088#endif
4089
4090#if IS_ENABLED(CONFIG_INET)
4091static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4092 u32 flags)
4093{
4094 struct in_device *in_dev;
4095 struct neighbour *neigh;
4096 struct net_device *dev;
4097 struct fib_result res;
4098 struct fib_nh *nh;
4099 struct flowi4 fl4;
4100 int err;
4101
4102 dev = dev_get_by_index_rcu(net, params->ifindex);
4103 if (unlikely(!dev))
4104 return -ENODEV;
4105
4106 /* verify forwarding is enabled on this interface */
4107 in_dev = __in_dev_get_rcu(dev);
4108 if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4109 return 0;
4110
4111 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4112 fl4.flowi4_iif = 1;
4113 fl4.flowi4_oif = params->ifindex;
4114 } else {
4115 fl4.flowi4_iif = params->ifindex;
4116 fl4.flowi4_oif = 0;
4117 }
4118 fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4119 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4120 fl4.flowi4_flags = 0;
4121
4122 fl4.flowi4_proto = params->l4_protocol;
4123 fl4.daddr = params->ipv4_dst;
4124 fl4.saddr = params->ipv4_src;
4125 fl4.fl4_sport = params->sport;
4126 fl4.fl4_dport = params->dport;
4127
4128 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4129 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4130 struct fib_table *tb;
4131
4132 tb = fib_get_table(net, tbid);
4133 if (unlikely(!tb))
4134 return 0;
4135
4136 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4137 } else {
4138 fl4.flowi4_mark = 0;
4139 fl4.flowi4_secid = 0;
4140 fl4.flowi4_tun_key.tun_id = 0;
4141 fl4.flowi4_uid = sock_net_uid(net, NULL);
4142
4143 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4144 }
4145
4146 if (err || res.type != RTN_UNICAST)
4147 return 0;
4148
4149 if (res.fi->fib_nhs > 1)
4150 fib_select_path(net, &res, &fl4, NULL);
4151
4152 nh = &res.fi->fib_nh[res.nh_sel];
4153
4154 /* do not handle lwt encaps right now */
4155 if (nh->nh_lwtstate)
4156 return 0;
4157
4158 dev = nh->nh_dev;
4159 if (unlikely(!dev))
4160 return 0;
4161
4162 if (nh->nh_gw)
4163 params->ipv4_dst = nh->nh_gw;
4164
4165 params->rt_metric = res.fi->fib_priority;
4166
4167 /* xdp and cls_bpf programs are run in RCU-bh so
4168 * rcu_read_lock_bh is not needed here
4169 */
4170 neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
4171 if (neigh)
4172 return bpf_fib_set_fwd_params(params, neigh, dev);
4173
4174 return 0;
4175}
4176#endif
4177
4178#if IS_ENABLED(CONFIG_IPV6)
4179static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4180 u32 flags)
4181{
4182 struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4183 struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4184 struct neighbour *neigh;
4185 struct net_device *dev;
4186 struct inet6_dev *idev;
4187 struct fib6_info *f6i;
4188 struct flowi6 fl6;
4189 int strict = 0;
4190 int oif;
4191
4192 /* link local addresses are never forwarded */
4193 if (rt6_need_strict(dst) || rt6_need_strict(src))
4194 return 0;
4195
4196 dev = dev_get_by_index_rcu(net, params->ifindex);
4197 if (unlikely(!dev))
4198 return -ENODEV;
4199
4200 idev = __in6_dev_get_safely(dev);
4201 if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
4202 return 0;
4203
4204 if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4205 fl6.flowi6_iif = 1;
4206 oif = fl6.flowi6_oif = params->ifindex;
4207 } else {
4208 oif = fl6.flowi6_iif = params->ifindex;
4209 fl6.flowi6_oif = 0;
4210 strict = RT6_LOOKUP_F_HAS_SADDR;
4211 }
4212 fl6.flowlabel = params->flowlabel;
4213 fl6.flowi6_scope = 0;
4214 fl6.flowi6_flags = 0;
4215 fl6.mp_hash = 0;
4216
4217 fl6.flowi6_proto = params->l4_protocol;
4218 fl6.daddr = *dst;
4219 fl6.saddr = *src;
4220 fl6.fl6_sport = params->sport;
4221 fl6.fl6_dport = params->dport;
4222
4223 if (flags & BPF_FIB_LOOKUP_DIRECT) {
4224 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4225 struct fib6_table *tb;
4226
4227 tb = ipv6_stub->fib6_get_table(net, tbid);
4228 if (unlikely(!tb))
4229 return 0;
4230
4231 f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
4232 } else {
4233 fl6.flowi6_mark = 0;
4234 fl6.flowi6_secid = 0;
4235 fl6.flowi6_tun_key.tun_id = 0;
4236 fl6.flowi6_uid = sock_net_uid(net, NULL);
4237
4238 f6i = ipv6_stub->fib6_lookup(net, oif, &fl6, strict);
4239 }
4240
4241 if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
4242 return 0;
4243
4244 if (unlikely(f6i->fib6_flags & RTF_REJECT ||
4245 f6i->fib6_type != RTN_UNICAST))
4246 return 0;
4247
4248 if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
4249 f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
4250 fl6.flowi6_oif, NULL,
4251 strict);
4252
4253 if (f6i->fib6_nh.nh_lwtstate)
4254 return 0;
4255
4256 if (f6i->fib6_flags & RTF_GATEWAY)
4257 *dst = f6i->fib6_nh.nh_gw;
4258
4259 dev = f6i->fib6_nh.nh_dev;
4260 params->rt_metric = f6i->fib6_metric;
4261
4262 /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4263 * not needed here. Can not use __ipv6_neigh_lookup_noref here
4264 * because we need to get nd_tbl via the stub
4265 */
4266 neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
4267 ndisc_hashfn, dst, dev);
4268 if (neigh)
4269 return bpf_fib_set_fwd_params(params, neigh, dev);
4270
4271 return 0;
4272}
4273#endif
4274
4275BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4276 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4277{
4278 if (plen < sizeof(*params))
4279 return -EINVAL;
4280
4281 switch (params->family) {
4282#if IS_ENABLED(CONFIG_INET)
4283 case AF_INET:
4284 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4285 flags);
4286#endif
4287#if IS_ENABLED(CONFIG_IPV6)
4288 case AF_INET6:
4289 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4290 flags);
4291#endif
4292 }
4293 return 0;
4294}
4295
4296static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4297 .func = bpf_xdp_fib_lookup,
4298 .gpl_only = true,
4299 .ret_type = RET_INTEGER,
4300 .arg1_type = ARG_PTR_TO_CTX,
4301 .arg2_type = ARG_PTR_TO_MEM,
4302 .arg3_type = ARG_CONST_SIZE,
4303 .arg4_type = ARG_ANYTHING,
4304};
4305
4306BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4307 struct bpf_fib_lookup *, params, int, plen, u32, flags)
4308{
4309 if (plen < sizeof(*params))
4310 return -EINVAL;
4311
4312 switch (params->family) {
4313#if IS_ENABLED(CONFIG_INET)
4314 case AF_INET:
4315 return bpf_ipv4_fib_lookup(dev_net(skb->dev), params, flags);
4316#endif
4317#if IS_ENABLED(CONFIG_IPV6)
4318 case AF_INET6:
4319 return bpf_ipv6_fib_lookup(dev_net(skb->dev), params, flags);
4320#endif
4321 }
4322 return -ENOTSUPP;
4323}
4324
4325static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4326 .func = bpf_skb_fib_lookup,
4327 .gpl_only = true,
4328 .ret_type = RET_INTEGER,
4329 .arg1_type = ARG_PTR_TO_CTX,
4330 .arg2_type = ARG_PTR_TO_MEM,
4331 .arg3_type = ARG_CONST_SIZE,
4332 .arg4_type = ARG_ANYTHING,
4333};
4334
d4052c4a 4335static const struct bpf_func_proto *
2492d3b8 4336bpf_base_func_proto(enum bpf_func_id func_id)
89aa0758
AS
4337{
4338 switch (func_id) {
4339 case BPF_FUNC_map_lookup_elem:
4340 return &bpf_map_lookup_elem_proto;
4341 case BPF_FUNC_map_update_elem:
4342 return &bpf_map_update_elem_proto;
4343 case BPF_FUNC_map_delete_elem:
4344 return &bpf_map_delete_elem_proto;
03e69b50
DB
4345 case BPF_FUNC_get_prandom_u32:
4346 return &bpf_get_prandom_u32_proto;
c04167ce 4347 case BPF_FUNC_get_smp_processor_id:
80b48c44 4348 return &bpf_get_raw_smp_processor_id_proto;
2d0e30c3
DB
4349 case BPF_FUNC_get_numa_node_id:
4350 return &bpf_get_numa_node_id_proto;
04fd61ab
AS
4351 case BPF_FUNC_tail_call:
4352 return &bpf_tail_call_proto;
17ca8cbf
DB
4353 case BPF_FUNC_ktime_get_ns:
4354 return &bpf_ktime_get_ns_proto;
0756ea3e 4355 case BPF_FUNC_trace_printk:
1be7f75d
AS
4356 if (capable(CAP_SYS_ADMIN))
4357 return bpf_get_trace_printk_proto();
89aa0758
AS
4358 default:
4359 return NULL;
4360 }
4361}
4362
ae2cf1c4 4363static const struct bpf_func_proto *
5e43f899 4364sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
ae2cf1c4
DA
4365{
4366 switch (func_id) {
4367 /* inet and inet6 sockets are created in a process
4368 * context so there is always a valid uid/gid
4369 */
4370 case BPF_FUNC_get_current_uid_gid:
4371 return &bpf_get_current_uid_gid_proto;
4372 default:
4373 return bpf_base_func_proto(func_id);
4374 }
4375}
4376
4fbac77d
AI
4377static const struct bpf_func_proto *
4378sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4379{
4380 switch (func_id) {
4381 /* inet and inet6 sockets are created in a process
4382 * context so there is always a valid uid/gid
4383 */
4384 case BPF_FUNC_get_current_uid_gid:
4385 return &bpf_get_current_uid_gid_proto;
d74bad4e
AI
4386 case BPF_FUNC_bind:
4387 switch (prog->expected_attach_type) {
4388 case BPF_CGROUP_INET4_CONNECT:
4389 case BPF_CGROUP_INET6_CONNECT:
4390 return &bpf_bind_proto;
4391 default:
4392 return NULL;
4393 }
4fbac77d
AI
4394 default:
4395 return bpf_base_func_proto(func_id);
4396 }
4397}
4398
2492d3b8 4399static const struct bpf_func_proto *
5e43f899 4400sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2492d3b8
DB
4401{
4402 switch (func_id) {
4403 case BPF_FUNC_skb_load_bytes:
4404 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
4405 case BPF_FUNC_skb_load_bytes_relative:
4406 return &bpf_skb_load_bytes_relative_proto;
91b8270f
CF
4407 case BPF_FUNC_get_socket_cookie:
4408 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
4409 case BPF_FUNC_get_socket_uid:
4410 return &bpf_get_socket_uid_proto;
2492d3b8
DB
4411 default:
4412 return bpf_base_func_proto(func_id);
4413 }
4414}
4415
608cd71a 4416static const struct bpf_func_proto *
5e43f899 4417tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
608cd71a
AS
4418{
4419 switch (func_id) {
4420 case BPF_FUNC_skb_store_bytes:
4421 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
4422 case BPF_FUNC_skb_load_bytes:
4423 return &bpf_skb_load_bytes_proto;
4e1ec56c
DB
4424 case BPF_FUNC_skb_load_bytes_relative:
4425 return &bpf_skb_load_bytes_relative_proto;
36bbef52
DB
4426 case BPF_FUNC_skb_pull_data:
4427 return &bpf_skb_pull_data_proto;
7d672345
DB
4428 case BPF_FUNC_csum_diff:
4429 return &bpf_csum_diff_proto;
36bbef52
DB
4430 case BPF_FUNC_csum_update:
4431 return &bpf_csum_update_proto;
91bc4822
AS
4432 case BPF_FUNC_l3_csum_replace:
4433 return &bpf_l3_csum_replace_proto;
4434 case BPF_FUNC_l4_csum_replace:
4435 return &bpf_l4_csum_replace_proto;
3896d655
AS
4436 case BPF_FUNC_clone_redirect:
4437 return &bpf_clone_redirect_proto;
8d20aabe
DB
4438 case BPF_FUNC_get_cgroup_classid:
4439 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
4440 case BPF_FUNC_skb_vlan_push:
4441 return &bpf_skb_vlan_push_proto;
4442 case BPF_FUNC_skb_vlan_pop:
4443 return &bpf_skb_vlan_pop_proto;
6578171a
DB
4444 case BPF_FUNC_skb_change_proto:
4445 return &bpf_skb_change_proto_proto;
d2485c42
DB
4446 case BPF_FUNC_skb_change_type:
4447 return &bpf_skb_change_type_proto;
2be7e212
DB
4448 case BPF_FUNC_skb_adjust_room:
4449 return &bpf_skb_adjust_room_proto;
5293efe6
DB
4450 case BPF_FUNC_skb_change_tail:
4451 return &bpf_skb_change_tail_proto;
d3aa45ce
AS
4452 case BPF_FUNC_skb_get_tunnel_key:
4453 return &bpf_skb_get_tunnel_key_proto;
4454 case BPF_FUNC_skb_set_tunnel_key:
14ca0751
DB
4455 return bpf_get_skb_set_tunnel_proto(func_id);
4456 case BPF_FUNC_skb_get_tunnel_opt:
4457 return &bpf_skb_get_tunnel_opt_proto;
4458 case BPF_FUNC_skb_set_tunnel_opt:
4459 return bpf_get_skb_set_tunnel_proto(func_id);
27b29f63
AS
4460 case BPF_FUNC_redirect:
4461 return &bpf_redirect_proto;
c46646d0
DB
4462 case BPF_FUNC_get_route_realm:
4463 return &bpf_get_route_realm_proto;
13c5c240
DB
4464 case BPF_FUNC_get_hash_recalc:
4465 return &bpf_get_hash_recalc_proto;
7a4b28c6
DB
4466 case BPF_FUNC_set_hash_invalid:
4467 return &bpf_set_hash_invalid_proto;
ded092cd
DB
4468 case BPF_FUNC_set_hash:
4469 return &bpf_set_hash_proto;
bd570ff9 4470 case BPF_FUNC_perf_event_output:
555c8a86 4471 return &bpf_skb_event_output_proto;
80b48c44
DB
4472 case BPF_FUNC_get_smp_processor_id:
4473 return &bpf_get_smp_processor_id_proto;
747ea55e
DB
4474 case BPF_FUNC_skb_under_cgroup:
4475 return &bpf_skb_under_cgroup_proto;
91b8270f
CF
4476 case BPF_FUNC_get_socket_cookie:
4477 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
4478 case BPF_FUNC_get_socket_uid:
4479 return &bpf_get_socket_uid_proto;
12bed760
EB
4480#ifdef CONFIG_XFRM
4481 case BPF_FUNC_skb_get_xfrm_state:
4482 return &bpf_skb_get_xfrm_state_proto;
4483#endif
87f5fc7e
DA
4484 case BPF_FUNC_fib_lookup:
4485 return &bpf_skb_fib_lookup_proto;
608cd71a 4486 default:
2492d3b8 4487 return bpf_base_func_proto(func_id);
608cd71a
AS
4488 }
4489}
4490
6a773a15 4491static const struct bpf_func_proto *
5e43f899 4492xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6a773a15 4493{
4de16969
DB
4494 switch (func_id) {
4495 case BPF_FUNC_perf_event_output:
4496 return &bpf_xdp_event_output_proto;
669dc4d7
DB
4497 case BPF_FUNC_get_smp_processor_id:
4498 return &bpf_get_smp_processor_id_proto;
205c3807
DB
4499 case BPF_FUNC_csum_diff:
4500 return &bpf_csum_diff_proto;
17bedab2
MKL
4501 case BPF_FUNC_xdp_adjust_head:
4502 return &bpf_xdp_adjust_head_proto;
de8f3a83
DB
4503 case BPF_FUNC_xdp_adjust_meta:
4504 return &bpf_xdp_adjust_meta_proto;
814abfab
JF
4505 case BPF_FUNC_redirect:
4506 return &bpf_xdp_redirect_proto;
97f91a7c 4507 case BPF_FUNC_redirect_map:
e4a8e817 4508 return &bpf_xdp_redirect_map_proto;
b32cc5b9
NS
4509 case BPF_FUNC_xdp_adjust_tail:
4510 return &bpf_xdp_adjust_tail_proto;
87f5fc7e
DA
4511 case BPF_FUNC_fib_lookup:
4512 return &bpf_xdp_fib_lookup_proto;
4de16969 4513 default:
2492d3b8 4514 return bpf_base_func_proto(func_id);
4de16969 4515 }
6a773a15
BB
4516}
4517
3a0af8fd 4518static const struct bpf_func_proto *
5e43f899 4519lwt_inout_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
4520{
4521 switch (func_id) {
4522 case BPF_FUNC_skb_load_bytes:
4523 return &bpf_skb_load_bytes_proto;
4524 case BPF_FUNC_skb_pull_data:
4525 return &bpf_skb_pull_data_proto;
4526 case BPF_FUNC_csum_diff:
4527 return &bpf_csum_diff_proto;
4528 case BPF_FUNC_get_cgroup_classid:
4529 return &bpf_get_cgroup_classid_proto;
4530 case BPF_FUNC_get_route_realm:
4531 return &bpf_get_route_realm_proto;
4532 case BPF_FUNC_get_hash_recalc:
4533 return &bpf_get_hash_recalc_proto;
4534 case BPF_FUNC_perf_event_output:
4535 return &bpf_skb_event_output_proto;
4536 case BPF_FUNC_get_smp_processor_id:
4537 return &bpf_get_smp_processor_id_proto;
4538 case BPF_FUNC_skb_under_cgroup:
4539 return &bpf_skb_under_cgroup_proto;
4540 default:
2492d3b8 4541 return bpf_base_func_proto(func_id);
3a0af8fd
TG
4542 }
4543}
4544
8c4b4c7e 4545static const struct bpf_func_proto *
5e43f899 4546sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
8c4b4c7e
LB
4547{
4548 switch (func_id) {
4549 case BPF_FUNC_setsockopt:
4550 return &bpf_setsockopt_proto;
cd86d1fd
LB
4551 case BPF_FUNC_getsockopt:
4552 return &bpf_getsockopt_proto;
b13d8807
LB
4553 case BPF_FUNC_sock_ops_cb_flags_set:
4554 return &bpf_sock_ops_cb_flags_set_proto;
174a79ff
JF
4555 case BPF_FUNC_sock_map_update:
4556 return &bpf_sock_map_update_proto;
81110384
JF
4557 case BPF_FUNC_sock_hash_update:
4558 return &bpf_sock_hash_update_proto;
8c4b4c7e
LB
4559 default:
4560 return bpf_base_func_proto(func_id);
4561 }
4562}
4563
5e43f899
AI
4564static const struct bpf_func_proto *
4565sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4f738adb
JF
4566{
4567 switch (func_id) {
4568 case BPF_FUNC_msg_redirect_map:
4569 return &bpf_msg_redirect_map_proto;
81110384
JF
4570 case BPF_FUNC_msg_redirect_hash:
4571 return &bpf_msg_redirect_hash_proto;
2a100317
JF
4572 case BPF_FUNC_msg_apply_bytes:
4573 return &bpf_msg_apply_bytes_proto;
91843d54
JF
4574 case BPF_FUNC_msg_cork_bytes:
4575 return &bpf_msg_cork_bytes_proto;
015632bb
JF
4576 case BPF_FUNC_msg_pull_data:
4577 return &bpf_msg_pull_data_proto;
4f738adb
JF
4578 default:
4579 return bpf_base_func_proto(func_id);
4580 }
4581}
4582
5e43f899
AI
4583static const struct bpf_func_proto *
4584sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
b005fd18
JF
4585{
4586 switch (func_id) {
8a31db56
JF
4587 case BPF_FUNC_skb_store_bytes:
4588 return &bpf_skb_store_bytes_proto;
b005fd18
JF
4589 case BPF_FUNC_skb_load_bytes:
4590 return &bpf_skb_load_bytes_proto;
8a31db56
JF
4591 case BPF_FUNC_skb_pull_data:
4592 return &bpf_skb_pull_data_proto;
4593 case BPF_FUNC_skb_change_tail:
4594 return &bpf_skb_change_tail_proto;
4595 case BPF_FUNC_skb_change_head:
4596 return &bpf_skb_change_head_proto;
b005fd18
JF
4597 case BPF_FUNC_get_socket_cookie:
4598 return &bpf_get_socket_cookie_proto;
4599 case BPF_FUNC_get_socket_uid:
4600 return &bpf_get_socket_uid_proto;
174a79ff
JF
4601 case BPF_FUNC_sk_redirect_map:
4602 return &bpf_sk_redirect_map_proto;
81110384
JF
4603 case BPF_FUNC_sk_redirect_hash:
4604 return &bpf_sk_redirect_hash_proto;
b005fd18
JF
4605 default:
4606 return bpf_base_func_proto(func_id);
4607 }
4608}
4609
3a0af8fd 4610static const struct bpf_func_proto *
5e43f899 4611lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
4612{
4613 switch (func_id) {
4614 case BPF_FUNC_skb_get_tunnel_key:
4615 return &bpf_skb_get_tunnel_key_proto;
4616 case BPF_FUNC_skb_set_tunnel_key:
4617 return bpf_get_skb_set_tunnel_proto(func_id);
4618 case BPF_FUNC_skb_get_tunnel_opt:
4619 return &bpf_skb_get_tunnel_opt_proto;
4620 case BPF_FUNC_skb_set_tunnel_opt:
4621 return bpf_get_skb_set_tunnel_proto(func_id);
4622 case BPF_FUNC_redirect:
4623 return &bpf_redirect_proto;
4624 case BPF_FUNC_clone_redirect:
4625 return &bpf_clone_redirect_proto;
4626 case BPF_FUNC_skb_change_tail:
4627 return &bpf_skb_change_tail_proto;
4628 case BPF_FUNC_skb_change_head:
4629 return &bpf_skb_change_head_proto;
4630 case BPF_FUNC_skb_store_bytes:
4631 return &bpf_skb_store_bytes_proto;
4632 case BPF_FUNC_csum_update:
4633 return &bpf_csum_update_proto;
4634 case BPF_FUNC_l3_csum_replace:
4635 return &bpf_l3_csum_replace_proto;
4636 case BPF_FUNC_l4_csum_replace:
4637 return &bpf_l4_csum_replace_proto;
4638 case BPF_FUNC_set_hash_invalid:
4639 return &bpf_set_hash_invalid_proto;
4640 default:
5e43f899 4641 return lwt_inout_func_proto(func_id, prog);
3a0af8fd
TG
4642 }
4643}
4644
f96da094 4645static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 4646 const struct bpf_prog *prog,
f96da094 4647 struct bpf_insn_access_aux *info)
23994631 4648{
f96da094 4649 const int size_default = sizeof(__u32);
23994631 4650
9bac3d6d
AS
4651 if (off < 0 || off >= sizeof(struct __sk_buff))
4652 return false;
62c7989b 4653
4936e352 4654 /* The verifier guarantees that size > 0. */
9bac3d6d
AS
4655 if (off % size != 0)
4656 return false;
62c7989b
DB
4657
4658 switch (off) {
f96da094
DB
4659 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4660 if (off + size > offsetofend(struct __sk_buff, cb[4]))
62c7989b
DB
4661 return false;
4662 break;
8a31db56
JF
4663 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
4664 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
4665 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
4666 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
f96da094 4667 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 4668 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094
DB
4669 case bpf_ctx_range(struct __sk_buff, data_end):
4670 if (size != size_default)
23994631 4671 return false;
31fd8581
YS
4672 break;
4673 default:
f96da094 4674 /* Only narrow read access allowed for now. */
31fd8581 4675 if (type == BPF_WRITE) {
f96da094 4676 if (size != size_default)
31fd8581
YS
4677 return false;
4678 } else {
f96da094
DB
4679 bpf_ctx_record_field_size(info, size_default);
4680 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
23994631 4681 return false;
31fd8581 4682 }
62c7989b 4683 }
9bac3d6d
AS
4684
4685 return true;
4686}
4687
d691f9e8 4688static bool sk_filter_is_valid_access(int off, int size,
19de99f7 4689 enum bpf_access_type type,
5e43f899 4690 const struct bpf_prog *prog,
23994631 4691 struct bpf_insn_access_aux *info)
d691f9e8 4692{
db58ba45 4693 switch (off) {
f96da094
DB
4694 case bpf_ctx_range(struct __sk_buff, tc_classid):
4695 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 4696 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094 4697 case bpf_ctx_range(struct __sk_buff, data_end):
8a31db56 4698 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
045efa82 4699 return false;
db58ba45 4700 }
045efa82 4701
d691f9e8
AS
4702 if (type == BPF_WRITE) {
4703 switch (off) {
f96da094 4704 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
4705 break;
4706 default:
4707 return false;
4708 }
4709 }
4710
5e43f899 4711 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
4712}
4713
3a0af8fd
TG
4714static bool lwt_is_valid_access(int off, int size,
4715 enum bpf_access_type type,
5e43f899 4716 const struct bpf_prog *prog,
23994631 4717 struct bpf_insn_access_aux *info)
3a0af8fd
TG
4718{
4719 switch (off) {
f96da094 4720 case bpf_ctx_range(struct __sk_buff, tc_classid):
8a31db56 4721 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
de8f3a83 4722 case bpf_ctx_range(struct __sk_buff, data_meta):
3a0af8fd
TG
4723 return false;
4724 }
4725
4726 if (type == BPF_WRITE) {
4727 switch (off) {
f96da094
DB
4728 case bpf_ctx_range(struct __sk_buff, mark):
4729 case bpf_ctx_range(struct __sk_buff, priority):
4730 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
3a0af8fd
TG
4731 break;
4732 default:
4733 return false;
4734 }
4735 }
4736
f96da094
DB
4737 switch (off) {
4738 case bpf_ctx_range(struct __sk_buff, data):
4739 info->reg_type = PTR_TO_PACKET;
4740 break;
4741 case bpf_ctx_range(struct __sk_buff, data_end):
4742 info->reg_type = PTR_TO_PACKET_END;
4743 break;
4744 }
4745
5e43f899 4746 return bpf_skb_is_valid_access(off, size, type, prog, info);
3a0af8fd
TG
4747}
4748
aac3fc32
AI
4749
4750/* Attach type specific accesses */
4751static bool __sock_filter_check_attach_type(int off,
4752 enum bpf_access_type access_type,
4753 enum bpf_attach_type attach_type)
61023658 4754{
aac3fc32
AI
4755 switch (off) {
4756 case offsetof(struct bpf_sock, bound_dev_if):
4757 case offsetof(struct bpf_sock, mark):
4758 case offsetof(struct bpf_sock, priority):
4759 switch (attach_type) {
4760 case BPF_CGROUP_INET_SOCK_CREATE:
4761 goto full_access;
4762 default:
4763 return false;
4764 }
4765 case bpf_ctx_range(struct bpf_sock, src_ip4):
4766 switch (attach_type) {
4767 case BPF_CGROUP_INET4_POST_BIND:
4768 goto read_only;
4769 default:
4770 return false;
4771 }
4772 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4773 switch (attach_type) {
4774 case BPF_CGROUP_INET6_POST_BIND:
4775 goto read_only;
4776 default:
4777 return false;
4778 }
4779 case bpf_ctx_range(struct bpf_sock, src_port):
4780 switch (attach_type) {
4781 case BPF_CGROUP_INET4_POST_BIND:
4782 case BPF_CGROUP_INET6_POST_BIND:
4783 goto read_only;
61023658
DA
4784 default:
4785 return false;
4786 }
4787 }
aac3fc32
AI
4788read_only:
4789 return access_type == BPF_READ;
4790full_access:
4791 return true;
4792}
4793
4794static bool __sock_filter_check_size(int off, int size,
4795 struct bpf_insn_access_aux *info)
4796{
4797 const int size_default = sizeof(__u32);
61023658 4798
aac3fc32
AI
4799 switch (off) {
4800 case bpf_ctx_range(struct bpf_sock, src_ip4):
4801 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4802 bpf_ctx_record_field_size(info, size_default);
4803 return bpf_ctx_narrow_access_ok(off, size, size_default);
4804 }
4805
4806 return size == size_default;
4807}
4808
4809static bool sock_filter_is_valid_access(int off, int size,
4810 enum bpf_access_type type,
4811 const struct bpf_prog *prog,
4812 struct bpf_insn_access_aux *info)
4813{
4814 if (off < 0 || off >= sizeof(struct bpf_sock))
61023658 4815 return false;
61023658
DA
4816 if (off % size != 0)
4817 return false;
aac3fc32
AI
4818 if (!__sock_filter_check_attach_type(off, type,
4819 prog->expected_attach_type))
4820 return false;
4821 if (!__sock_filter_check_size(off, size, info))
61023658 4822 return false;
61023658
DA
4823 return true;
4824}
4825
047b0ecd
DB
4826static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
4827 const struct bpf_prog *prog, int drop_verdict)
36bbef52
DB
4828{
4829 struct bpf_insn *insn = insn_buf;
4830
4831 if (!direct_write)
4832 return 0;
4833
4834 /* if (!skb->cloned)
4835 * goto start;
4836 *
4837 * (Fast-path, otherwise approximation that we might be
4838 * a clone, do the rest in helper.)
4839 */
4840 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
4841 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
4842 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
4843
4844 /* ret = bpf_skb_pull_data(skb, 0); */
4845 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
4846 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
4847 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4848 BPF_FUNC_skb_pull_data);
4849 /* if (!ret)
4850 * goto restore;
4851 * return TC_ACT_SHOT;
4852 */
4853 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
047b0ecd 4854 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
36bbef52
DB
4855 *insn++ = BPF_EXIT_INSN();
4856
4857 /* restore: */
4858 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
4859 /* start: */
4860 *insn++ = prog->insnsi[0];
4861
4862 return insn - insn_buf;
4863}
4864
e0cea7ce
DB
4865static int bpf_gen_ld_abs(const struct bpf_insn *orig,
4866 struct bpf_insn *insn_buf)
4867{
4868 bool indirect = BPF_MODE(orig->code) == BPF_IND;
4869 struct bpf_insn *insn = insn_buf;
4870
4871 /* We're guaranteed here that CTX is in R6. */
4872 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
4873 if (!indirect) {
4874 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
4875 } else {
4876 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
4877 if (orig->imm)
4878 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
4879 }
4880
4881 switch (BPF_SIZE(orig->code)) {
4882 case BPF_B:
4883 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
4884 break;
4885 case BPF_H:
4886 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
4887 break;
4888 case BPF_W:
4889 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
4890 break;
4891 }
4892
4893 *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
4894 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
4895 *insn++ = BPF_EXIT_INSN();
4896
4897 return insn - insn_buf;
4898}
4899
047b0ecd
DB
4900static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
4901 const struct bpf_prog *prog)
4902{
4903 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
4904}
4905
d691f9e8 4906static bool tc_cls_act_is_valid_access(int off, int size,
19de99f7 4907 enum bpf_access_type type,
5e43f899 4908 const struct bpf_prog *prog,
23994631 4909 struct bpf_insn_access_aux *info)
d691f9e8
AS
4910{
4911 if (type == BPF_WRITE) {
4912 switch (off) {
f96da094
DB
4913 case bpf_ctx_range(struct __sk_buff, mark):
4914 case bpf_ctx_range(struct __sk_buff, tc_index):
4915 case bpf_ctx_range(struct __sk_buff, priority):
4916 case bpf_ctx_range(struct __sk_buff, tc_classid):
4917 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
4918 break;
4919 default:
4920 return false;
4921 }
4922 }
19de99f7 4923
f96da094
DB
4924 switch (off) {
4925 case bpf_ctx_range(struct __sk_buff, data):
4926 info->reg_type = PTR_TO_PACKET;
4927 break;
de8f3a83
DB
4928 case bpf_ctx_range(struct __sk_buff, data_meta):
4929 info->reg_type = PTR_TO_PACKET_META;
4930 break;
f96da094
DB
4931 case bpf_ctx_range(struct __sk_buff, data_end):
4932 info->reg_type = PTR_TO_PACKET_END;
4933 break;
8a31db56
JF
4934 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
4935 return false;
f96da094
DB
4936 }
4937
5e43f899 4938 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
4939}
4940
1afaf661 4941static bool __is_valid_xdp_access(int off, int size)
6a773a15
BB
4942{
4943 if (off < 0 || off >= sizeof(struct xdp_md))
4944 return false;
4945 if (off % size != 0)
4946 return false;
6088b582 4947 if (size != sizeof(__u32))
6a773a15
BB
4948 return false;
4949
4950 return true;
4951}
4952
4953static bool xdp_is_valid_access(int off, int size,
4954 enum bpf_access_type type,
5e43f899 4955 const struct bpf_prog *prog,
23994631 4956 struct bpf_insn_access_aux *info)
6a773a15 4957{
0d830032
JK
4958 if (type == BPF_WRITE) {
4959 if (bpf_prog_is_dev_bound(prog->aux)) {
4960 switch (off) {
4961 case offsetof(struct xdp_md, rx_queue_index):
4962 return __is_valid_xdp_access(off, size);
4963 }
4964 }
6a773a15 4965 return false;
0d830032 4966 }
6a773a15
BB
4967
4968 switch (off) {
4969 case offsetof(struct xdp_md, data):
23994631 4970 info->reg_type = PTR_TO_PACKET;
6a773a15 4971 break;
de8f3a83
DB
4972 case offsetof(struct xdp_md, data_meta):
4973 info->reg_type = PTR_TO_PACKET_META;
4974 break;
6a773a15 4975 case offsetof(struct xdp_md, data_end):
23994631 4976 info->reg_type = PTR_TO_PACKET_END;
6a773a15
BB
4977 break;
4978 }
4979
1afaf661 4980 return __is_valid_xdp_access(off, size);
6a773a15
BB
4981}
4982
4983void bpf_warn_invalid_xdp_action(u32 act)
4984{
9beb8bed
DB
4985 const u32 act_max = XDP_REDIRECT;
4986
4987 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
4988 act > act_max ? "Illegal" : "Driver unsupported",
4989 act);
6a773a15
BB
4990}
4991EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
4992
4fbac77d
AI
4993static bool sock_addr_is_valid_access(int off, int size,
4994 enum bpf_access_type type,
4995 const struct bpf_prog *prog,
4996 struct bpf_insn_access_aux *info)
4997{
4998 const int size_default = sizeof(__u32);
4999
5000 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
5001 return false;
5002 if (off % size != 0)
5003 return false;
5004
5005 /* Disallow access to IPv6 fields from IPv4 contex and vise
5006 * versa.
5007 */
5008 switch (off) {
5009 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
5010 switch (prog->expected_attach_type) {
5011 case BPF_CGROUP_INET4_BIND:
d74bad4e 5012 case BPF_CGROUP_INET4_CONNECT:
4fbac77d
AI
5013 break;
5014 default:
5015 return false;
5016 }
5017 break;
5018 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5019 switch (prog->expected_attach_type) {
5020 case BPF_CGROUP_INET6_BIND:
d74bad4e 5021 case BPF_CGROUP_INET6_CONNECT:
4fbac77d
AI
5022 break;
5023 default:
5024 return false;
5025 }
5026 break;
5027 }
5028
5029 switch (off) {
5030 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
5031 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5032 /* Only narrow read access allowed for now. */
5033 if (type == BPF_READ) {
5034 bpf_ctx_record_field_size(info, size_default);
5035 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
5036 return false;
5037 } else {
5038 if (size != size_default)
5039 return false;
5040 }
5041 break;
5042 case bpf_ctx_range(struct bpf_sock_addr, user_port):
5043 if (size != size_default)
5044 return false;
5045 break;
5046 default:
5047 if (type == BPF_READ) {
5048 if (size != size_default)
5049 return false;
5050 } else {
5051 return false;
5052 }
5053 }
5054
5055 return true;
5056}
5057
44f0e430
LB
5058static bool sock_ops_is_valid_access(int off, int size,
5059 enum bpf_access_type type,
5e43f899 5060 const struct bpf_prog *prog,
44f0e430 5061 struct bpf_insn_access_aux *info)
40304b2a 5062{
44f0e430
LB
5063 const int size_default = sizeof(__u32);
5064
40304b2a
LB
5065 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
5066 return false;
44f0e430 5067
40304b2a
LB
5068 /* The verifier guarantees that size > 0. */
5069 if (off % size != 0)
5070 return false;
40304b2a 5071
40304b2a
LB
5072 if (type == BPF_WRITE) {
5073 switch (off) {
2585cd62 5074 case offsetof(struct bpf_sock_ops, reply):
6f9bd3d7 5075 case offsetof(struct bpf_sock_ops, sk_txhash):
44f0e430
LB
5076 if (size != size_default)
5077 return false;
40304b2a
LB
5078 break;
5079 default:
5080 return false;
5081 }
44f0e430
LB
5082 } else {
5083 switch (off) {
5084 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
5085 bytes_acked):
5086 if (size != sizeof(__u64))
5087 return false;
5088 break;
5089 default:
5090 if (size != size_default)
5091 return false;
5092 break;
5093 }
40304b2a
LB
5094 }
5095
44f0e430 5096 return true;
40304b2a
LB
5097}
5098
8a31db56
JF
5099static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
5100 const struct bpf_prog *prog)
5101{
047b0ecd 5102 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8a31db56
JF
5103}
5104
b005fd18
JF
5105static bool sk_skb_is_valid_access(int off, int size,
5106 enum bpf_access_type type,
5e43f899 5107 const struct bpf_prog *prog,
b005fd18
JF
5108 struct bpf_insn_access_aux *info)
5109{
de8f3a83
DB
5110 switch (off) {
5111 case bpf_ctx_range(struct __sk_buff, tc_classid):
5112 case bpf_ctx_range(struct __sk_buff, data_meta):
5113 return false;
5114 }
5115
8a31db56
JF
5116 if (type == BPF_WRITE) {
5117 switch (off) {
8a31db56
JF
5118 case bpf_ctx_range(struct __sk_buff, tc_index):
5119 case bpf_ctx_range(struct __sk_buff, priority):
5120 break;
5121 default:
5122 return false;
5123 }
5124 }
5125
b005fd18 5126 switch (off) {
f7e9cb1e 5127 case bpf_ctx_range(struct __sk_buff, mark):
8a31db56 5128 return false;
b005fd18
JF
5129 case bpf_ctx_range(struct __sk_buff, data):
5130 info->reg_type = PTR_TO_PACKET;
5131 break;
5132 case bpf_ctx_range(struct __sk_buff, data_end):
5133 info->reg_type = PTR_TO_PACKET_END;
5134 break;
5135 }
5136
5e43f899 5137 return bpf_skb_is_valid_access(off, size, type, prog, info);
b005fd18
JF
5138}
5139
4f738adb
JF
5140static bool sk_msg_is_valid_access(int off, int size,
5141 enum bpf_access_type type,
5e43f899 5142 const struct bpf_prog *prog,
4f738adb
JF
5143 struct bpf_insn_access_aux *info)
5144{
5145 if (type == BPF_WRITE)
5146 return false;
5147
5148 switch (off) {
5149 case offsetof(struct sk_msg_md, data):
5150 info->reg_type = PTR_TO_PACKET;
5151 break;
5152 case offsetof(struct sk_msg_md, data_end):
5153 info->reg_type = PTR_TO_PACKET_END;
5154 break;
5155 }
5156
5157 if (off < 0 || off >= sizeof(struct sk_msg_md))
5158 return false;
5159 if (off % size != 0)
5160 return false;
5161 if (size != sizeof(__u64))
5162 return false;
5163
5164 return true;
5165}
5166
2492d3b8
DB
5167static u32 bpf_convert_ctx_access(enum bpf_access_type type,
5168 const struct bpf_insn *si,
5169 struct bpf_insn *insn_buf,
f96da094 5170 struct bpf_prog *prog, u32 *target_size)
9bac3d6d
AS
5171{
5172 struct bpf_insn *insn = insn_buf;
6b8cc1d1 5173 int off;
9bac3d6d 5174
6b8cc1d1 5175 switch (si->off) {
9bac3d6d 5176 case offsetof(struct __sk_buff, len):
6b8cc1d1 5177 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5178 bpf_target_off(struct sk_buff, len, 4,
5179 target_size));
9bac3d6d
AS
5180 break;
5181
0b8c707d 5182 case offsetof(struct __sk_buff, protocol):
6b8cc1d1 5183 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5184 bpf_target_off(struct sk_buff, protocol, 2,
5185 target_size));
0b8c707d
DB
5186 break;
5187
27cd5452 5188 case offsetof(struct __sk_buff, vlan_proto):
6b8cc1d1 5189 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5190 bpf_target_off(struct sk_buff, vlan_proto, 2,
5191 target_size));
27cd5452
MS
5192 break;
5193
bcad5718 5194 case offsetof(struct __sk_buff, priority):
754f1e6a 5195 if (type == BPF_WRITE)
6b8cc1d1 5196 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5197 bpf_target_off(struct sk_buff, priority, 4,
5198 target_size));
754f1e6a 5199 else
6b8cc1d1 5200 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5201 bpf_target_off(struct sk_buff, priority, 4,
5202 target_size));
bcad5718
DB
5203 break;
5204
37e82c2f 5205 case offsetof(struct __sk_buff, ingress_ifindex):
6b8cc1d1 5206 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5207 bpf_target_off(struct sk_buff, skb_iif, 4,
5208 target_size));
37e82c2f
AS
5209 break;
5210
5211 case offsetof(struct __sk_buff, ifindex):
f035a515 5212 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 5213 si->dst_reg, si->src_reg,
37e82c2f 5214 offsetof(struct sk_buff, dev));
6b8cc1d1
DB
5215 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
5216 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
5217 bpf_target_off(struct net_device, ifindex, 4,
5218 target_size));
37e82c2f
AS
5219 break;
5220
ba7591d8 5221 case offsetof(struct __sk_buff, hash):
6b8cc1d1 5222 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5223 bpf_target_off(struct sk_buff, hash, 4,
5224 target_size));
ba7591d8
DB
5225 break;
5226
9bac3d6d 5227 case offsetof(struct __sk_buff, mark):
d691f9e8 5228 if (type == BPF_WRITE)
6b8cc1d1 5229 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5230 bpf_target_off(struct sk_buff, mark, 4,
5231 target_size));
d691f9e8 5232 else
6b8cc1d1 5233 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5234 bpf_target_off(struct sk_buff, mark, 4,
5235 target_size));
d691f9e8 5236 break;
9bac3d6d
AS
5237
5238 case offsetof(struct __sk_buff, pkt_type):
f96da094
DB
5239 *target_size = 1;
5240 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
5241 PKT_TYPE_OFFSET());
5242 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
5243#ifdef __BIG_ENDIAN_BITFIELD
5244 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
5245#endif
5246 break;
9bac3d6d
AS
5247
5248 case offsetof(struct __sk_buff, queue_mapping):
f96da094
DB
5249 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5250 bpf_target_off(struct sk_buff, queue_mapping, 2,
5251 target_size));
5252 break;
c2497395 5253
c2497395 5254 case offsetof(struct __sk_buff, vlan_present):
c2497395 5255 case offsetof(struct __sk_buff, vlan_tci):
f96da094
DB
5256 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
5257
5258 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
5259 bpf_target_off(struct sk_buff, vlan_tci, 2,
5260 target_size));
5261 if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
5262 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
5263 ~VLAN_TAG_PRESENT);
5264 } else {
5265 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
5266 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
5267 }
5268 break;
d691f9e8
AS
5269
5270 case offsetof(struct __sk_buff, cb[0]) ...
f96da094 5271 offsetofend(struct __sk_buff, cb[4]) - 1:
d691f9e8 5272 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
62c7989b
DB
5273 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
5274 offsetof(struct qdisc_skb_cb, data)) %
5275 sizeof(__u64));
d691f9e8 5276
ff936a04 5277 prog->cb_access = 1;
6b8cc1d1
DB
5278 off = si->off;
5279 off -= offsetof(struct __sk_buff, cb[0]);
5280 off += offsetof(struct sk_buff, cb);
5281 off += offsetof(struct qdisc_skb_cb, data);
d691f9e8 5282 if (type == BPF_WRITE)
62c7989b 5283 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 5284 si->src_reg, off);
d691f9e8 5285 else
62c7989b 5286 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 5287 si->src_reg, off);
d691f9e8
AS
5288 break;
5289
045efa82 5290 case offsetof(struct __sk_buff, tc_classid):
6b8cc1d1
DB
5291 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
5292
5293 off = si->off;
5294 off -= offsetof(struct __sk_buff, tc_classid);
5295 off += offsetof(struct sk_buff, cb);
5296 off += offsetof(struct qdisc_skb_cb, tc_classid);
f96da094 5297 *target_size = 2;
09c37a2c 5298 if (type == BPF_WRITE)
6b8cc1d1
DB
5299 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
5300 si->src_reg, off);
09c37a2c 5301 else
6b8cc1d1
DB
5302 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
5303 si->src_reg, off);
045efa82
DB
5304 break;
5305
db58ba45 5306 case offsetof(struct __sk_buff, data):
f035a515 5307 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
6b8cc1d1 5308 si->dst_reg, si->src_reg,
db58ba45
AS
5309 offsetof(struct sk_buff, data));
5310 break;
5311
de8f3a83
DB
5312 case offsetof(struct __sk_buff, data_meta):
5313 off = si->off;
5314 off -= offsetof(struct __sk_buff, data_meta);
5315 off += offsetof(struct sk_buff, cb);
5316 off += offsetof(struct bpf_skb_data_end, data_meta);
5317 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5318 si->src_reg, off);
5319 break;
5320
db58ba45 5321 case offsetof(struct __sk_buff, data_end):
6b8cc1d1
DB
5322 off = si->off;
5323 off -= offsetof(struct __sk_buff, data_end);
5324 off += offsetof(struct sk_buff, cb);
5325 off += offsetof(struct bpf_skb_data_end, data_end);
5326 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5327 si->src_reg, off);
db58ba45
AS
5328 break;
5329
d691f9e8
AS
5330 case offsetof(struct __sk_buff, tc_index):
5331#ifdef CONFIG_NET_SCHED
d691f9e8 5332 if (type == BPF_WRITE)
6b8cc1d1 5333 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5334 bpf_target_off(struct sk_buff, tc_index, 2,
5335 target_size));
d691f9e8 5336 else
6b8cc1d1 5337 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
5338 bpf_target_off(struct sk_buff, tc_index, 2,
5339 target_size));
d691f9e8 5340#else
2ed46ce4 5341 *target_size = 2;
d691f9e8 5342 if (type == BPF_WRITE)
6b8cc1d1 5343 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
d691f9e8 5344 else
6b8cc1d1 5345 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
b1d9fc41
DB
5346#endif
5347 break;
5348
5349 case offsetof(struct __sk_buff, napi_id):
5350#if defined(CONFIG_NET_RX_BUSY_POLL)
b1d9fc41 5351 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
5352 bpf_target_off(struct sk_buff, napi_id, 4,
5353 target_size));
b1d9fc41
DB
5354 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
5355 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
5356#else
2ed46ce4 5357 *target_size = 4;
b1d9fc41 5358 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
d691f9e8 5359#endif
6b8cc1d1 5360 break;
8a31db56
JF
5361 case offsetof(struct __sk_buff, family):
5362 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
5363
5364 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5365 si->dst_reg, si->src_reg,
5366 offsetof(struct sk_buff, sk));
5367 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5368 bpf_target_off(struct sock_common,
5369 skc_family,
5370 2, target_size));
5371 break;
5372 case offsetof(struct __sk_buff, remote_ip4):
5373 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
5374
5375 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5376 si->dst_reg, si->src_reg,
5377 offsetof(struct sk_buff, sk));
5378 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5379 bpf_target_off(struct sock_common,
5380 skc_daddr,
5381 4, target_size));
5382 break;
5383 case offsetof(struct __sk_buff, local_ip4):
5384 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5385 skc_rcv_saddr) != 4);
5386
5387 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5388 si->dst_reg, si->src_reg,
5389 offsetof(struct sk_buff, sk));
5390 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5391 bpf_target_off(struct sock_common,
5392 skc_rcv_saddr,
5393 4, target_size));
5394 break;
5395 case offsetof(struct __sk_buff, remote_ip6[0]) ...
5396 offsetof(struct __sk_buff, remote_ip6[3]):
5397#if IS_ENABLED(CONFIG_IPV6)
5398 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5399 skc_v6_daddr.s6_addr32[0]) != 4);
5400
5401 off = si->off;
5402 off -= offsetof(struct __sk_buff, remote_ip6[0]);
5403
5404 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5405 si->dst_reg, si->src_reg,
5406 offsetof(struct sk_buff, sk));
5407 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5408 offsetof(struct sock_common,
5409 skc_v6_daddr.s6_addr32[0]) +
5410 off);
5411#else
5412 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5413#endif
5414 break;
5415 case offsetof(struct __sk_buff, local_ip6[0]) ...
5416 offsetof(struct __sk_buff, local_ip6[3]):
5417#if IS_ENABLED(CONFIG_IPV6)
5418 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5419 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
5420
5421 off = si->off;
5422 off -= offsetof(struct __sk_buff, local_ip6[0]);
5423
5424 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5425 si->dst_reg, si->src_reg,
5426 offsetof(struct sk_buff, sk));
5427 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5428 offsetof(struct sock_common,
5429 skc_v6_rcv_saddr.s6_addr32[0]) +
5430 off);
5431#else
5432 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5433#endif
5434 break;
5435
5436 case offsetof(struct __sk_buff, remote_port):
5437 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
5438
5439 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5440 si->dst_reg, si->src_reg,
5441 offsetof(struct sk_buff, sk));
5442 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5443 bpf_target_off(struct sock_common,
5444 skc_dport,
5445 2, target_size));
5446#ifndef __BIG_ENDIAN_BITFIELD
5447 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
5448#endif
5449 break;
5450
5451 case offsetof(struct __sk_buff, local_port):
5452 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
5453
5454 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
5455 si->dst_reg, si->src_reg,
5456 offsetof(struct sk_buff, sk));
5457 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5458 bpf_target_off(struct sock_common,
5459 skc_num, 2, target_size));
5460 break;
9bac3d6d
AS
5461 }
5462
5463 return insn - insn_buf;
89aa0758
AS
5464}
5465
61023658 5466static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
6b8cc1d1 5467 const struct bpf_insn *si,
61023658 5468 struct bpf_insn *insn_buf,
f96da094 5469 struct bpf_prog *prog, u32 *target_size)
61023658
DA
5470{
5471 struct bpf_insn *insn = insn_buf;
aac3fc32 5472 int off;
61023658 5473
6b8cc1d1 5474 switch (si->off) {
61023658
DA
5475 case offsetof(struct bpf_sock, bound_dev_if):
5476 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
5477
5478 if (type == BPF_WRITE)
6b8cc1d1 5479 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
5480 offsetof(struct sock, sk_bound_dev_if));
5481 else
6b8cc1d1 5482 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
5483 offsetof(struct sock, sk_bound_dev_if));
5484 break;
aa4c1037 5485
482dca93
DA
5486 case offsetof(struct bpf_sock, mark):
5487 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
5488
5489 if (type == BPF_WRITE)
5490 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5491 offsetof(struct sock, sk_mark));
5492 else
5493 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5494 offsetof(struct sock, sk_mark));
5495 break;
5496
5497 case offsetof(struct bpf_sock, priority):
5498 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
5499
5500 if (type == BPF_WRITE)
5501 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5502 offsetof(struct sock, sk_priority));
5503 else
5504 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5505 offsetof(struct sock, sk_priority));
5506 break;
5507
aa4c1037
DA
5508 case offsetof(struct bpf_sock, family):
5509 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
5510
6b8cc1d1 5511 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
aa4c1037
DA
5512 offsetof(struct sock, sk_family));
5513 break;
5514
5515 case offsetof(struct bpf_sock, type):
6b8cc1d1 5516 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 5517 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
5518 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
5519 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
aa4c1037
DA
5520 break;
5521
5522 case offsetof(struct bpf_sock, protocol):
6b8cc1d1 5523 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 5524 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
5525 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
5526 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
aa4c1037 5527 break;
aac3fc32
AI
5528
5529 case offsetof(struct bpf_sock, src_ip4):
5530 *insn++ = BPF_LDX_MEM(
5531 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
5532 bpf_target_off(struct sock_common, skc_rcv_saddr,
5533 FIELD_SIZEOF(struct sock_common,
5534 skc_rcv_saddr),
5535 target_size));
5536 break;
5537
5538 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
5539#if IS_ENABLED(CONFIG_IPV6)
5540 off = si->off;
5541 off -= offsetof(struct bpf_sock, src_ip6[0]);
5542 *insn++ = BPF_LDX_MEM(
5543 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
5544 bpf_target_off(
5545 struct sock_common,
5546 skc_v6_rcv_saddr.s6_addr32[0],
5547 FIELD_SIZEOF(struct sock_common,
5548 skc_v6_rcv_saddr.s6_addr32[0]),
5549 target_size) + off);
5550#else
5551 (void)off;
5552 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5553#endif
5554 break;
5555
5556 case offsetof(struct bpf_sock, src_port):
5557 *insn++ = BPF_LDX_MEM(
5558 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
5559 si->dst_reg, si->src_reg,
5560 bpf_target_off(struct sock_common, skc_num,
5561 FIELD_SIZEOF(struct sock_common,
5562 skc_num),
5563 target_size));
5564 break;
61023658
DA
5565 }
5566
5567 return insn - insn_buf;
5568}
5569
6b8cc1d1
DB
5570static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
5571 const struct bpf_insn *si,
374fb54e 5572 struct bpf_insn *insn_buf,
f96da094 5573 struct bpf_prog *prog, u32 *target_size)
374fb54e
DB
5574{
5575 struct bpf_insn *insn = insn_buf;
5576
6b8cc1d1 5577 switch (si->off) {
374fb54e 5578 case offsetof(struct __sk_buff, ifindex):
374fb54e 5579 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 5580 si->dst_reg, si->src_reg,
374fb54e 5581 offsetof(struct sk_buff, dev));
6b8cc1d1 5582 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
5583 bpf_target_off(struct net_device, ifindex, 4,
5584 target_size));
374fb54e
DB
5585 break;
5586 default:
f96da094
DB
5587 return bpf_convert_ctx_access(type, si, insn_buf, prog,
5588 target_size);
374fb54e
DB
5589 }
5590
5591 return insn - insn_buf;
5592}
5593
6b8cc1d1
DB
5594static u32 xdp_convert_ctx_access(enum bpf_access_type type,
5595 const struct bpf_insn *si,
6a773a15 5596 struct bpf_insn *insn_buf,
f96da094 5597 struct bpf_prog *prog, u32 *target_size)
6a773a15
BB
5598{
5599 struct bpf_insn *insn = insn_buf;
5600
6b8cc1d1 5601 switch (si->off) {
6a773a15 5602 case offsetof(struct xdp_md, data):
f035a515 5603 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
6b8cc1d1 5604 si->dst_reg, si->src_reg,
6a773a15
BB
5605 offsetof(struct xdp_buff, data));
5606 break;
de8f3a83
DB
5607 case offsetof(struct xdp_md, data_meta):
5608 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
5609 si->dst_reg, si->src_reg,
5610 offsetof(struct xdp_buff, data_meta));
5611 break;
6a773a15 5612 case offsetof(struct xdp_md, data_end):
f035a515 5613 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
6b8cc1d1 5614 si->dst_reg, si->src_reg,
6a773a15
BB
5615 offsetof(struct xdp_buff, data_end));
5616 break;
02dd3291
JDB
5617 case offsetof(struct xdp_md, ingress_ifindex):
5618 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5619 si->dst_reg, si->src_reg,
5620 offsetof(struct xdp_buff, rxq));
5621 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
5622 si->dst_reg, si->dst_reg,
5623 offsetof(struct xdp_rxq_info, dev));
5624 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6 5625 offsetof(struct net_device, ifindex));
02dd3291
JDB
5626 break;
5627 case offsetof(struct xdp_md, rx_queue_index):
5628 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5629 si->dst_reg, si->src_reg,
5630 offsetof(struct xdp_buff, rxq));
5631 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6
JDB
5632 offsetof(struct xdp_rxq_info,
5633 queue_index));
02dd3291 5634 break;
6a773a15
BB
5635 }
5636
5637 return insn - insn_buf;
5638}
5639
4fbac77d
AI
5640/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
5641 * context Structure, F is Field in context structure that contains a pointer
5642 * to Nested Structure of type NS that has the field NF.
5643 *
5644 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
5645 * sure that SIZE is not greater than actual size of S.F.NF.
5646 *
5647 * If offset OFF is provided, the load happens from that offset relative to
5648 * offset of NF.
5649 */
5650#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
5651 do { \
5652 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
5653 si->src_reg, offsetof(S, F)); \
5654 *insn++ = BPF_LDX_MEM( \
5655 SIZE, si->dst_reg, si->dst_reg, \
5656 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
5657 target_size) \
5658 + OFF); \
5659 } while (0)
5660
5661#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
5662 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
5663 BPF_FIELD_SIZEOF(NS, NF), 0)
5664
5665/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
5666 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
5667 *
5668 * It doesn't support SIZE argument though since narrow stores are not
5669 * supported for now.
5670 *
5671 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
5672 * "register" since two registers available in convert_ctx_access are not
5673 * enough: we can't override neither SRC, since it contains value to store, nor
5674 * DST since it contains pointer to context that may be used by later
5675 * instructions. But we need a temporary place to save pointer to nested
5676 * structure whose field we want to store to.
5677 */
5678#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
5679 do { \
5680 int tmp_reg = BPF_REG_9; \
5681 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
5682 --tmp_reg; \
5683 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
5684 --tmp_reg; \
5685 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
5686 offsetof(S, TF)); \
5687 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
5688 si->dst_reg, offsetof(S, F)); \
5689 *insn++ = BPF_STX_MEM( \
5690 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
5691 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
5692 target_size) \
5693 + OFF); \
5694 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
5695 offsetof(S, TF)); \
5696 } while (0)
5697
5698#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
5699 TF) \
5700 do { \
5701 if (type == BPF_WRITE) { \
5702 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
5703 TF); \
5704 } else { \
5705 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
5706 S, NS, F, NF, SIZE, OFF); \
5707 } \
5708 } while (0)
5709
5710#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
5711 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
5712 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
5713
5714static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
5715 const struct bpf_insn *si,
5716 struct bpf_insn *insn_buf,
5717 struct bpf_prog *prog, u32 *target_size)
5718{
5719 struct bpf_insn *insn = insn_buf;
5720 int off;
5721
5722 switch (si->off) {
5723 case offsetof(struct bpf_sock_addr, user_family):
5724 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
5725 struct sockaddr, uaddr, sa_family);
5726 break;
5727
5728 case offsetof(struct bpf_sock_addr, user_ip4):
5729 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
5730 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
5731 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
5732 break;
5733
5734 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5735 off = si->off;
5736 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
5737 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
5738 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
5739 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
5740 tmp_reg);
5741 break;
5742
5743 case offsetof(struct bpf_sock_addr, user_port):
5744 /* To get port we need to know sa_family first and then treat
5745 * sockaddr as either sockaddr_in or sockaddr_in6.
5746 * Though we can simplify since port field has same offset and
5747 * size in both structures.
5748 * Here we check this invariant and use just one of the
5749 * structures if it's true.
5750 */
5751 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
5752 offsetof(struct sockaddr_in6, sin6_port));
5753 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
5754 FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
5755 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
5756 struct sockaddr_in6, uaddr,
5757 sin6_port, tmp_reg);
5758 break;
5759
5760 case offsetof(struct bpf_sock_addr, family):
5761 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
5762 struct sock, sk, sk_family);
5763 break;
5764
5765 case offsetof(struct bpf_sock_addr, type):
5766 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
5767 struct bpf_sock_addr_kern, struct sock, sk,
5768 __sk_flags_offset, BPF_W, 0);
5769 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
5770 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
5771 break;
5772
5773 case offsetof(struct bpf_sock_addr, protocol):
5774 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
5775 struct bpf_sock_addr_kern, struct sock, sk,
5776 __sk_flags_offset, BPF_W, 0);
5777 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
5778 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
5779 SK_FL_PROTO_SHIFT);
5780 break;
5781 }
5782
5783 return insn - insn_buf;
5784}
5785
40304b2a
LB
5786static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
5787 const struct bpf_insn *si,
5788 struct bpf_insn *insn_buf,
f96da094
DB
5789 struct bpf_prog *prog,
5790 u32 *target_size)
40304b2a
LB
5791{
5792 struct bpf_insn *insn = insn_buf;
5793 int off;
5794
5795 switch (si->off) {
5796 case offsetof(struct bpf_sock_ops, op) ...
5797 offsetof(struct bpf_sock_ops, replylong[3]):
5798 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
5799 FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
5800 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
5801 FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
5802 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
5803 FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
5804 off = si->off;
5805 off -= offsetof(struct bpf_sock_ops, op);
5806 off += offsetof(struct bpf_sock_ops_kern, op);
5807 if (type == BPF_WRITE)
5808 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5809 off);
5810 else
5811 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5812 off);
5813 break;
5814
5815 case offsetof(struct bpf_sock_ops, family):
5816 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
5817
5818 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5819 struct bpf_sock_ops_kern, sk),
5820 si->dst_reg, si->src_reg,
5821 offsetof(struct bpf_sock_ops_kern, sk));
5822 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5823 offsetof(struct sock_common, skc_family));
5824 break;
5825
5826 case offsetof(struct bpf_sock_ops, remote_ip4):
5827 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
5828
5829 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5830 struct bpf_sock_ops_kern, sk),
5831 si->dst_reg, si->src_reg,
5832 offsetof(struct bpf_sock_ops_kern, sk));
5833 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5834 offsetof(struct sock_common, skc_daddr));
5835 break;
5836
5837 case offsetof(struct bpf_sock_ops, local_ip4):
5838 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4);
5839
5840 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5841 struct bpf_sock_ops_kern, sk),
5842 si->dst_reg, si->src_reg,
5843 offsetof(struct bpf_sock_ops_kern, sk));
5844 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5845 offsetof(struct sock_common,
5846 skc_rcv_saddr));
5847 break;
5848
5849 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
5850 offsetof(struct bpf_sock_ops, remote_ip6[3]):
5851#if IS_ENABLED(CONFIG_IPV6)
5852 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5853 skc_v6_daddr.s6_addr32[0]) != 4);
5854
5855 off = si->off;
5856 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
5857 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5858 struct bpf_sock_ops_kern, sk),
5859 si->dst_reg, si->src_reg,
5860 offsetof(struct bpf_sock_ops_kern, sk));
5861 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5862 offsetof(struct sock_common,
5863 skc_v6_daddr.s6_addr32[0]) +
5864 off);
5865#else
5866 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5867#endif
5868 break;
5869
5870 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
5871 offsetof(struct bpf_sock_ops, local_ip6[3]):
5872#if IS_ENABLED(CONFIG_IPV6)
5873 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5874 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
5875
5876 off = si->off;
5877 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
5878 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5879 struct bpf_sock_ops_kern, sk),
5880 si->dst_reg, si->src_reg,
5881 offsetof(struct bpf_sock_ops_kern, sk));
5882 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5883 offsetof(struct sock_common,
5884 skc_v6_rcv_saddr.s6_addr32[0]) +
5885 off);
5886#else
5887 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5888#endif
5889 break;
5890
5891 case offsetof(struct bpf_sock_ops, remote_port):
5892 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
5893
5894 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5895 struct bpf_sock_ops_kern, sk),
5896 si->dst_reg, si->src_reg,
5897 offsetof(struct bpf_sock_ops_kern, sk));
5898 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5899 offsetof(struct sock_common, skc_dport));
5900#ifndef __BIG_ENDIAN_BITFIELD
5901 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
5902#endif
5903 break;
5904
5905 case offsetof(struct bpf_sock_ops, local_port):
5906 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
5907
5908 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5909 struct bpf_sock_ops_kern, sk),
5910 si->dst_reg, si->src_reg,
5911 offsetof(struct bpf_sock_ops_kern, sk));
5912 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5913 offsetof(struct sock_common, skc_num));
5914 break;
f19397a5
LB
5915
5916 case offsetof(struct bpf_sock_ops, is_fullsock):
5917 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5918 struct bpf_sock_ops_kern,
5919 is_fullsock),
5920 si->dst_reg, si->src_reg,
5921 offsetof(struct bpf_sock_ops_kern,
5922 is_fullsock));
5923 break;
5924
44f0e430
LB
5925 case offsetof(struct bpf_sock_ops, state):
5926 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
5927
5928 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5929 struct bpf_sock_ops_kern, sk),
5930 si->dst_reg, si->src_reg,
5931 offsetof(struct bpf_sock_ops_kern, sk));
5932 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
5933 offsetof(struct sock_common, skc_state));
5934 break;
5935
5936 case offsetof(struct bpf_sock_ops, rtt_min):
5937 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5938 sizeof(struct minmax));
5939 BUILD_BUG_ON(sizeof(struct minmax) <
5940 sizeof(struct minmax_sample));
5941
5942 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5943 struct bpf_sock_ops_kern, sk),
5944 si->dst_reg, si->src_reg,
5945 offsetof(struct bpf_sock_ops_kern, sk));
5946 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5947 offsetof(struct tcp_sock, rtt_min) +
5948 FIELD_SIZEOF(struct minmax_sample, t));
5949 break;
5950
34d367c5
LB
5951/* Helper macro for adding read access to tcp_sock or sock fields. */
5952#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
f19397a5 5953 do { \
34d367c5
LB
5954 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
5955 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
f19397a5
LB
5956 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5957 struct bpf_sock_ops_kern, \
5958 is_fullsock), \
5959 si->dst_reg, si->src_reg, \
5960 offsetof(struct bpf_sock_ops_kern, \
5961 is_fullsock)); \
5962 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
5963 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5964 struct bpf_sock_ops_kern, sk),\
5965 si->dst_reg, si->src_reg, \
5966 offsetof(struct bpf_sock_ops_kern, sk));\
34d367c5
LB
5967 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
5968 OBJ_FIELD), \
5969 si->dst_reg, si->dst_reg, \
5970 offsetof(OBJ, OBJ_FIELD)); \
f19397a5
LB
5971 } while (0)
5972
b73042b8
LB
5973/* Helper macro for adding write access to tcp_sock or sock fields.
5974 * The macro is called with two registers, dst_reg which contains a pointer
5975 * to ctx (context) and src_reg which contains the value that should be
5976 * stored. However, we need an additional register since we cannot overwrite
5977 * dst_reg because it may be used later in the program.
5978 * Instead we "borrow" one of the other register. We first save its value
5979 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
5980 * it at the end of the macro.
5981 */
5982#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
5983 do { \
5984 int reg = BPF_REG_9; \
5985 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
5986 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
5987 if (si->dst_reg == reg || si->src_reg == reg) \
5988 reg--; \
5989 if (si->dst_reg == reg || si->src_reg == reg) \
5990 reg--; \
5991 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
5992 offsetof(struct bpf_sock_ops_kern, \
5993 temp)); \
5994 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5995 struct bpf_sock_ops_kern, \
5996 is_fullsock), \
5997 reg, si->dst_reg, \
5998 offsetof(struct bpf_sock_ops_kern, \
5999 is_fullsock)); \
6000 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
6001 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
6002 struct bpf_sock_ops_kern, sk),\
6003 reg, si->dst_reg, \
6004 offsetof(struct bpf_sock_ops_kern, sk));\
6005 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
6006 reg, si->src_reg, \
6007 offsetof(OBJ, OBJ_FIELD)); \
6008 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
6009 offsetof(struct bpf_sock_ops_kern, \
6010 temp)); \
6011 } while (0)
6012
6013#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
6014 do { \
6015 if (TYPE == BPF_WRITE) \
6016 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
6017 else \
6018 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
6019 } while (0)
6020
f19397a5 6021 case offsetof(struct bpf_sock_ops, snd_cwnd):
34d367c5 6022 SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
f19397a5
LB
6023 break;
6024
6025 case offsetof(struct bpf_sock_ops, srtt_us):
34d367c5 6026 SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
f19397a5 6027 break;
b13d8807
LB
6028
6029 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
6030 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
6031 struct tcp_sock);
6032 break;
44f0e430
LB
6033
6034 case offsetof(struct bpf_sock_ops, snd_ssthresh):
6035 SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
6036 break;
6037
6038 case offsetof(struct bpf_sock_ops, rcv_nxt):
6039 SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
6040 break;
6041
6042 case offsetof(struct bpf_sock_ops, snd_nxt):
6043 SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
6044 break;
6045
6046 case offsetof(struct bpf_sock_ops, snd_una):
6047 SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
6048 break;
6049
6050 case offsetof(struct bpf_sock_ops, mss_cache):
6051 SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
6052 break;
6053
6054 case offsetof(struct bpf_sock_ops, ecn_flags):
6055 SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
6056 break;
6057
6058 case offsetof(struct bpf_sock_ops, rate_delivered):
6059 SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
6060 struct tcp_sock);
6061 break;
6062
6063 case offsetof(struct bpf_sock_ops, rate_interval_us):
6064 SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
6065 struct tcp_sock);
6066 break;
6067
6068 case offsetof(struct bpf_sock_ops, packets_out):
6069 SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
6070 break;
6071
6072 case offsetof(struct bpf_sock_ops, retrans_out):
6073 SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
6074 break;
6075
6076 case offsetof(struct bpf_sock_ops, total_retrans):
6077 SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
6078 struct tcp_sock);
6079 break;
6080
6081 case offsetof(struct bpf_sock_ops, segs_in):
6082 SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
6083 break;
6084
6085 case offsetof(struct bpf_sock_ops, data_segs_in):
6086 SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
6087 break;
6088
6089 case offsetof(struct bpf_sock_ops, segs_out):
6090 SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
6091 break;
6092
6093 case offsetof(struct bpf_sock_ops, data_segs_out):
6094 SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
6095 struct tcp_sock);
6096 break;
6097
6098 case offsetof(struct bpf_sock_ops, lost_out):
6099 SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
6100 break;
6101
6102 case offsetof(struct bpf_sock_ops, sacked_out):
6103 SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
6104 break;
6105
6106 case offsetof(struct bpf_sock_ops, sk_txhash):
6f9bd3d7
LB
6107 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
6108 struct sock, type);
44f0e430
LB
6109 break;
6110
6111 case offsetof(struct bpf_sock_ops, bytes_received):
6112 SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
6113 struct tcp_sock);
6114 break;
6115
6116 case offsetof(struct bpf_sock_ops, bytes_acked):
6117 SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
6118 break;
6f9bd3d7 6119
40304b2a
LB
6120 }
6121 return insn - insn_buf;
6122}
6123
8108a775
JF
6124static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
6125 const struct bpf_insn *si,
6126 struct bpf_insn *insn_buf,
6127 struct bpf_prog *prog, u32 *target_size)
6128{
6129 struct bpf_insn *insn = insn_buf;
6130 int off;
6131
6132 switch (si->off) {
6133 case offsetof(struct __sk_buff, data_end):
6134 off = si->off;
6135 off -= offsetof(struct __sk_buff, data_end);
6136 off += offsetof(struct sk_buff, cb);
6137 off += offsetof(struct tcp_skb_cb, bpf.data_end);
6138 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
6139 si->src_reg, off);
6140 break;
6141 default:
6142 return bpf_convert_ctx_access(type, si, insn_buf, prog,
6143 target_size);
6144 }
6145
6146 return insn - insn_buf;
6147}
6148
4f738adb
JF
6149static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
6150 const struct bpf_insn *si,
6151 struct bpf_insn *insn_buf,
6152 struct bpf_prog *prog, u32 *target_size)
6153{
6154 struct bpf_insn *insn = insn_buf;
6155
6156 switch (si->off) {
6157 case offsetof(struct sk_msg_md, data):
6158 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
6159 si->dst_reg, si->src_reg,
6160 offsetof(struct sk_msg_buff, data));
6161 break;
6162 case offsetof(struct sk_msg_md, data_end):
6163 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
6164 si->dst_reg, si->src_reg,
6165 offsetof(struct sk_msg_buff, data_end));
6166 break;
6167 }
6168
6169 return insn - insn_buf;
6170}
6171
7de16e3a 6172const struct bpf_verifier_ops sk_filter_verifier_ops = {
4936e352
DB
6173 .get_func_proto = sk_filter_func_proto,
6174 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 6175 .convert_ctx_access = bpf_convert_ctx_access,
e0cea7ce 6176 .gen_ld_abs = bpf_gen_ld_abs,
89aa0758
AS
6177};
6178
7de16e3a 6179const struct bpf_prog_ops sk_filter_prog_ops = {
61f3c964 6180 .test_run = bpf_prog_test_run_skb,
7de16e3a
JK
6181};
6182
6183const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4936e352
DB
6184 .get_func_proto = tc_cls_act_func_proto,
6185 .is_valid_access = tc_cls_act_is_valid_access,
374fb54e 6186 .convert_ctx_access = tc_cls_act_convert_ctx_access,
36bbef52 6187 .gen_prologue = tc_cls_act_prologue,
e0cea7ce 6188 .gen_ld_abs = bpf_gen_ld_abs,
7de16e3a
JK
6189};
6190
6191const struct bpf_prog_ops tc_cls_act_prog_ops = {
1cf1cae9 6192 .test_run = bpf_prog_test_run_skb,
608cd71a
AS
6193};
6194
7de16e3a 6195const struct bpf_verifier_ops xdp_verifier_ops = {
6a773a15
BB
6196 .get_func_proto = xdp_func_proto,
6197 .is_valid_access = xdp_is_valid_access,
6198 .convert_ctx_access = xdp_convert_ctx_access,
7de16e3a
JK
6199};
6200
6201const struct bpf_prog_ops xdp_prog_ops = {
1cf1cae9 6202 .test_run = bpf_prog_test_run_xdp,
6a773a15
BB
6203};
6204
7de16e3a 6205const struct bpf_verifier_ops cg_skb_verifier_ops = {
966789fb 6206 .get_func_proto = sk_filter_func_proto,
0e33661d 6207 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 6208 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
6209};
6210
6211const struct bpf_prog_ops cg_skb_prog_ops = {
1cf1cae9 6212 .test_run = bpf_prog_test_run_skb,
0e33661d
DM
6213};
6214
7de16e3a 6215const struct bpf_verifier_ops lwt_inout_verifier_ops = {
3a0af8fd
TG
6216 .get_func_proto = lwt_inout_func_proto,
6217 .is_valid_access = lwt_is_valid_access,
2492d3b8 6218 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
6219};
6220
6221const struct bpf_prog_ops lwt_inout_prog_ops = {
1cf1cae9 6222 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
6223};
6224
7de16e3a 6225const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
3a0af8fd
TG
6226 .get_func_proto = lwt_xmit_func_proto,
6227 .is_valid_access = lwt_is_valid_access,
2492d3b8 6228 .convert_ctx_access = bpf_convert_ctx_access,
3a0af8fd 6229 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
6230};
6231
6232const struct bpf_prog_ops lwt_xmit_prog_ops = {
1cf1cae9 6233 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
6234};
6235
7de16e3a 6236const struct bpf_verifier_ops cg_sock_verifier_ops = {
ae2cf1c4 6237 .get_func_proto = sock_filter_func_proto,
61023658
DA
6238 .is_valid_access = sock_filter_is_valid_access,
6239 .convert_ctx_access = sock_filter_convert_ctx_access,
6240};
6241
7de16e3a
JK
6242const struct bpf_prog_ops cg_sock_prog_ops = {
6243};
6244
4fbac77d
AI
6245const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
6246 .get_func_proto = sock_addr_func_proto,
6247 .is_valid_access = sock_addr_is_valid_access,
6248 .convert_ctx_access = sock_addr_convert_ctx_access,
6249};
6250
6251const struct bpf_prog_ops cg_sock_addr_prog_ops = {
6252};
6253
7de16e3a 6254const struct bpf_verifier_ops sock_ops_verifier_ops = {
8c4b4c7e 6255 .get_func_proto = sock_ops_func_proto,
40304b2a
LB
6256 .is_valid_access = sock_ops_is_valid_access,
6257 .convert_ctx_access = sock_ops_convert_ctx_access,
6258};
6259
7de16e3a
JK
6260const struct bpf_prog_ops sock_ops_prog_ops = {
6261};
6262
6263const struct bpf_verifier_ops sk_skb_verifier_ops = {
b005fd18
JF
6264 .get_func_proto = sk_skb_func_proto,
6265 .is_valid_access = sk_skb_is_valid_access,
8108a775 6266 .convert_ctx_access = sk_skb_convert_ctx_access,
8a31db56 6267 .gen_prologue = sk_skb_prologue,
b005fd18
JF
6268};
6269
7de16e3a
JK
6270const struct bpf_prog_ops sk_skb_prog_ops = {
6271};
6272
4f738adb
JF
6273const struct bpf_verifier_ops sk_msg_verifier_ops = {
6274 .get_func_proto = sk_msg_func_proto,
6275 .is_valid_access = sk_msg_is_valid_access,
6276 .convert_ctx_access = sk_msg_convert_ctx_access,
6277};
6278
6279const struct bpf_prog_ops sk_msg_prog_ops = {
6280};
6281
8ced425e 6282int sk_detach_filter(struct sock *sk)
55b33325
PE
6283{
6284 int ret = -ENOENT;
6285 struct sk_filter *filter;
6286
d59577b6
VB
6287 if (sock_flag(sk, SOCK_FILTER_LOCKED))
6288 return -EPERM;
6289
8ced425e
HFS
6290 filter = rcu_dereference_protected(sk->sk_filter,
6291 lockdep_sock_is_held(sk));
55b33325 6292 if (filter) {
a9b3cd7f 6293 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 6294 sk_filter_uncharge(sk, filter);
55b33325
PE
6295 ret = 0;
6296 }
a3ea269b 6297
55b33325
PE
6298 return ret;
6299}
8ced425e 6300EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 6301
a3ea269b
DB
6302int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
6303 unsigned int len)
a8fc9277 6304{
a3ea269b 6305 struct sock_fprog_kern *fprog;
a8fc9277 6306 struct sk_filter *filter;
a3ea269b 6307 int ret = 0;
a8fc9277
PE
6308
6309 lock_sock(sk);
6310 filter = rcu_dereference_protected(sk->sk_filter,
8ced425e 6311 lockdep_sock_is_held(sk));
a8fc9277
PE
6312 if (!filter)
6313 goto out;
a3ea269b
DB
6314
6315 /* We're copying the filter that has been originally attached,
93d08b69
DB
6316 * so no conversion/decode needed anymore. eBPF programs that
6317 * have no original program cannot be dumped through this.
a3ea269b 6318 */
93d08b69 6319 ret = -EACCES;
7ae457c1 6320 fprog = filter->prog->orig_prog;
93d08b69
DB
6321 if (!fprog)
6322 goto out;
a3ea269b
DB
6323
6324 ret = fprog->len;
a8fc9277 6325 if (!len)
a3ea269b 6326 /* User space only enquires number of filter blocks. */
a8fc9277 6327 goto out;
a3ea269b 6328
a8fc9277 6329 ret = -EINVAL;
a3ea269b 6330 if (len < fprog->len)
a8fc9277
PE
6331 goto out;
6332
6333 ret = -EFAULT;
009937e7 6334 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 6335 goto out;
a8fc9277 6336
a3ea269b
DB
6337 /* Instead of bytes, the API requests to return the number
6338 * of filter blocks.
6339 */
6340 ret = fprog->len;
a8fc9277
PE
6341out:
6342 release_sock(sk);
6343 return ret;
6344}