xsk: wire up XDP_DRV side of AF_XDP
[linux-2.6-block.git] / net / core / filter.c
CommitLineData
1da177e4
LT
1/*
2 * Linux Socket Filter - Kernel level socket filtering
3 *
bd4cf0ed
AS
4 * Based on the design of the Berkeley Packet Filter. The new
5 * internal format has been designed by PLUMgrid:
1da177e4 6 *
bd4cf0ed
AS
7 * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
8 *
9 * Authors:
10 *
11 * Jay Schulist <jschlst@samba.org>
12 * Alexei Starovoitov <ast@plumgrid.com>
13 * Daniel Borkmann <dborkman@redhat.com>
1da177e4
LT
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 *
20 * Andi Kleen - Fix a few bad bugs and races.
4df95ff4 21 * Kris Katterjohn - Added many additional checks in bpf_check_classic()
1da177e4
LT
22 */
23
24#include <linux/module.h>
25#include <linux/types.h>
1da177e4
LT
26#include <linux/mm.h>
27#include <linux/fcntl.h>
28#include <linux/socket.h>
91b8270f 29#include <linux/sock_diag.h>
1da177e4
LT
30#include <linux/in.h>
31#include <linux/inet.h>
32#include <linux/netdevice.h>
33#include <linux/if_packet.h>
c491680f 34#include <linux/if_arp.h>
5a0e3ad6 35#include <linux/gfp.h>
d74bad4e 36#include <net/inet_common.h>
1da177e4
LT
37#include <net/ip.h>
38#include <net/protocol.h>
4738c1db 39#include <net/netlink.h>
1da177e4
LT
40#include <linux/skbuff.h>
41#include <net/sock.h>
10b89ee4 42#include <net/flow_dissector.h>
1da177e4
LT
43#include <linux/errno.h>
44#include <linux/timer.h>
7c0f6ba6 45#include <linux/uaccess.h>
40daafc8 46#include <asm/unaligned.h>
d66f2b91 47#include <asm/cmpxchg.h>
1da177e4 48#include <linux/filter.h>
86e4ca66 49#include <linux/ratelimit.h>
46b325c7 50#include <linux/seccomp.h>
f3335031 51#include <linux/if_vlan.h>
89aa0758 52#include <linux/bpf.h>
d691f9e8 53#include <net/sch_generic.h>
8d20aabe 54#include <net/cls_cgroup.h>
d3aa45ce 55#include <net/dst_metadata.h>
c46646d0 56#include <net/dst.h>
538950a1 57#include <net/sock_reuseport.h>
b1d9fc41 58#include <net/busy_poll.h>
8c4b4c7e 59#include <net/tcp.h>
12bed760 60#include <net/xfrm.h>
5acaee0a 61#include <linux/bpf_trace.h>
1da177e4 62
43db6d65 63/**
f4979fce 64 * sk_filter_trim_cap - run a packet through a socket filter
43db6d65
SH
65 * @sk: sock associated with &sk_buff
66 * @skb: buffer to filter
f4979fce 67 * @cap: limit on how short the eBPF program may trim the packet
43db6d65 68 *
ff936a04
AS
69 * Run the eBPF program and then cut skb->data to correct size returned by
70 * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
43db6d65 71 * than pkt_len we keep whole skb->data. This is the socket level
ff936a04 72 * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
43db6d65
SH
73 * be accepted or -EPERM if the packet should be tossed.
74 *
75 */
f4979fce 76int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
43db6d65
SH
77{
78 int err;
79 struct sk_filter *filter;
80
c93bdd0e
MG
81 /*
82 * If the skb was allocated from pfmemalloc reserves, only
83 * allow SOCK_MEMALLOC sockets to use it as this socket is
84 * helping free memory
85 */
8fe809a9
ED
86 if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
87 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
c93bdd0e 88 return -ENOMEM;
8fe809a9 89 }
c11cd3a6
DM
90 err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
91 if (err)
92 return err;
93
43db6d65
SH
94 err = security_sock_rcv_skb(sk, skb);
95 if (err)
96 return err;
97
80f8f102
ED
98 rcu_read_lock();
99 filter = rcu_dereference(sk->sk_filter);
43db6d65 100 if (filter) {
8f917bba
WB
101 struct sock *save_sk = skb->sk;
102 unsigned int pkt_len;
103
104 skb->sk = sk;
105 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
8f917bba 106 skb->sk = save_sk;
d1f496fd 107 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
43db6d65 108 }
80f8f102 109 rcu_read_unlock();
43db6d65
SH
110
111 return err;
112}
f4979fce 113EXPORT_SYMBOL(sk_filter_trim_cap);
43db6d65 114
f3694e00 115BPF_CALL_1(__skb_get_pay_offset, struct sk_buff *, skb)
bd4cf0ed 116{
f3694e00 117 return skb_get_poff(skb);
bd4cf0ed
AS
118}
119
f3694e00 120BPF_CALL_3(__skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 121{
bd4cf0ed
AS
122 struct nlattr *nla;
123
124 if (skb_is_nonlinear(skb))
125 return 0;
126
05ab8f26
MK
127 if (skb->len < sizeof(struct nlattr))
128 return 0;
129
30743837 130 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
131 return 0;
132
30743837 133 nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
bd4cf0ed
AS
134 if (nla)
135 return (void *) nla - (void *) skb->data;
136
137 return 0;
138}
139
f3694e00 140BPF_CALL_3(__skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
bd4cf0ed 141{
bd4cf0ed
AS
142 struct nlattr *nla;
143
144 if (skb_is_nonlinear(skb))
145 return 0;
146
05ab8f26
MK
147 if (skb->len < sizeof(struct nlattr))
148 return 0;
149
30743837 150 if (a > skb->len - sizeof(struct nlattr))
bd4cf0ed
AS
151 return 0;
152
30743837
DB
153 nla = (struct nlattr *) &skb->data[a];
154 if (nla->nla_len > skb->len - a)
bd4cf0ed
AS
155 return 0;
156
30743837 157 nla = nla_find_nested(nla, x);
bd4cf0ed
AS
158 if (nla)
159 return (void *) nla - (void *) skb->data;
160
161 return 0;
162}
163
f3694e00 164BPF_CALL_0(__get_raw_cpu_id)
bd4cf0ed
AS
165{
166 return raw_smp_processor_id();
167}
168
80b48c44
DB
169static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
170 .func = __get_raw_cpu_id,
171 .gpl_only = false,
172 .ret_type = RET_INTEGER,
173};
174
9bac3d6d
AS
175static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
176 struct bpf_insn *insn_buf)
177{
178 struct bpf_insn *insn = insn_buf;
179
180 switch (skb_field) {
181 case SKF_AD_MARK:
182 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
183
184 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
185 offsetof(struct sk_buff, mark));
186 break;
187
188 case SKF_AD_PKTTYPE:
189 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
190 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
191#ifdef __BIG_ENDIAN_BITFIELD
192 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
193#endif
194 break;
195
196 case SKF_AD_QUEUE:
197 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
198
199 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
200 offsetof(struct sk_buff, queue_mapping));
201 break;
c2497395 202
c2497395
AS
203 case SKF_AD_VLAN_TAG:
204 case SKF_AD_VLAN_TAG_PRESENT:
205 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
206 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
207
208 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
209 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
210 offsetof(struct sk_buff, vlan_tci));
211 if (skb_field == SKF_AD_VLAN_TAG) {
212 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg,
213 ~VLAN_TAG_PRESENT);
214 } else {
215 /* dst_reg >>= 12 */
216 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12);
217 /* dst_reg &= 1 */
218 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
219 }
220 break;
9bac3d6d
AS
221 }
222
223 return insn - insn_buf;
224}
225
bd4cf0ed 226static bool convert_bpf_extensions(struct sock_filter *fp,
2695fb55 227 struct bpf_insn **insnp)
bd4cf0ed 228{
2695fb55 229 struct bpf_insn *insn = *insnp;
9bac3d6d 230 u32 cnt;
bd4cf0ed
AS
231
232 switch (fp->k) {
233 case SKF_AD_OFF + SKF_AD_PROTOCOL:
0b8c707d
DB
234 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
235
236 /* A = *(u16 *) (CTX + offsetof(protocol)) */
237 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
238 offsetof(struct sk_buff, protocol));
239 /* A = ntohs(A) [emitting a nop or swap16] */
240 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
bd4cf0ed
AS
241 break;
242
243 case SKF_AD_OFF + SKF_AD_PKTTYPE:
9bac3d6d
AS
244 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
245 insn += cnt - 1;
bd4cf0ed
AS
246 break;
247
248 case SKF_AD_OFF + SKF_AD_IFINDEX:
249 case SKF_AD_OFF + SKF_AD_HATYPE:
bd4cf0ed
AS
250 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
251 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
f8f6d679 252
f035a515 253 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
f8f6d679
DB
254 BPF_REG_TMP, BPF_REG_CTX,
255 offsetof(struct sk_buff, dev));
256 /* if (tmp != 0) goto pc + 1 */
257 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
258 *insn++ = BPF_EXIT_INSN();
259 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
260 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
261 offsetof(struct net_device, ifindex));
262 else
263 *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
264 offsetof(struct net_device, type));
bd4cf0ed
AS
265 break;
266
267 case SKF_AD_OFF + SKF_AD_MARK:
9bac3d6d
AS
268 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
269 insn += cnt - 1;
bd4cf0ed
AS
270 break;
271
272 case SKF_AD_OFF + SKF_AD_RXHASH:
273 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
274
9739eef1
AS
275 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
276 offsetof(struct sk_buff, hash));
bd4cf0ed
AS
277 break;
278
279 case SKF_AD_OFF + SKF_AD_QUEUE:
9bac3d6d
AS
280 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
281 insn += cnt - 1;
bd4cf0ed
AS
282 break;
283
284 case SKF_AD_OFF + SKF_AD_VLAN_TAG:
c2497395
AS
285 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
286 BPF_REG_A, BPF_REG_CTX, insn);
287 insn += cnt - 1;
288 break;
bd4cf0ed 289
c2497395
AS
290 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
291 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
292 BPF_REG_A, BPF_REG_CTX, insn);
293 insn += cnt - 1;
bd4cf0ed
AS
294 break;
295
27cd5452
MS
296 case SKF_AD_OFF + SKF_AD_VLAN_TPID:
297 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
298
299 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
300 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
301 offsetof(struct sk_buff, vlan_proto));
302 /* A = ntohs(A) [emitting a nop or swap16] */
303 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
304 break;
305
bd4cf0ed
AS
306 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
307 case SKF_AD_OFF + SKF_AD_NLATTR:
308 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
309 case SKF_AD_OFF + SKF_AD_CPU:
4cd3675e 310 case SKF_AD_OFF + SKF_AD_RANDOM:
e430f34e 311 /* arg1 = CTX */
f8f6d679 312 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
bd4cf0ed 313 /* arg2 = A */
f8f6d679 314 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
bd4cf0ed 315 /* arg3 = X */
f8f6d679 316 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
e430f34e 317 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
bd4cf0ed
AS
318 switch (fp->k) {
319 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
f8f6d679 320 *insn = BPF_EMIT_CALL(__skb_get_pay_offset);
bd4cf0ed
AS
321 break;
322 case SKF_AD_OFF + SKF_AD_NLATTR:
f8f6d679 323 *insn = BPF_EMIT_CALL(__skb_get_nlattr);
bd4cf0ed
AS
324 break;
325 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
f8f6d679 326 *insn = BPF_EMIT_CALL(__skb_get_nlattr_nest);
bd4cf0ed
AS
327 break;
328 case SKF_AD_OFF + SKF_AD_CPU:
f8f6d679 329 *insn = BPF_EMIT_CALL(__get_raw_cpu_id);
bd4cf0ed 330 break;
4cd3675e 331 case SKF_AD_OFF + SKF_AD_RANDOM:
3ad00405
DB
332 *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
333 bpf_user_rnd_init_once();
4cd3675e 334 break;
bd4cf0ed
AS
335 }
336 break;
337
338 case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
9739eef1
AS
339 /* A ^= X */
340 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
341 break;
342
343 default:
344 /* This is just a dummy call to avoid letting the compiler
345 * evict __bpf_call_base() as an optimization. Placed here
346 * where no-one bothers.
347 */
348 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
349 return false;
350 }
351
352 *insnp = insn;
353 return true;
354}
355
356/**
8fb575ca 357 * bpf_convert_filter - convert filter program
bd4cf0ed
AS
358 * @prog: the user passed filter program
359 * @len: the length of the user passed filter program
50bbfed9 360 * @new_prog: allocated 'struct bpf_prog' or NULL
bd4cf0ed
AS
361 * @new_len: pointer to store length of converted program
362 *
1f504ec9
TK
363 * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
364 * style extended BPF (eBPF).
bd4cf0ed
AS
365 * Conversion workflow:
366 *
367 * 1) First pass for calculating the new program length:
8fb575ca 368 * bpf_convert_filter(old_prog, old_len, NULL, &new_len)
bd4cf0ed
AS
369 *
370 * 2) 2nd pass to remap in two passes: 1st pass finds new
371 * jump offsets, 2nd pass remapping:
8fb575ca 372 * bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
bd4cf0ed 373 */
d9e12f42 374static int bpf_convert_filter(struct sock_filter *prog, int len,
50bbfed9 375 struct bpf_prog *new_prog, int *new_len)
bd4cf0ed 376{
50bbfed9
AS
377 int new_flen = 0, pass = 0, target, i, stack_off;
378 struct bpf_insn *new_insn, *first_insn = NULL;
bd4cf0ed
AS
379 struct sock_filter *fp;
380 int *addrs = NULL;
381 u8 bpf_src;
382
383 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
30743837 384 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
bd4cf0ed 385
6f9a093b 386 if (len <= 0 || len > BPF_MAXINSNS)
bd4cf0ed
AS
387 return -EINVAL;
388
389 if (new_prog) {
50bbfed9 390 first_insn = new_prog->insnsi;
658da937
DB
391 addrs = kcalloc(len, sizeof(*addrs),
392 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
393 if (!addrs)
394 return -ENOMEM;
395 }
396
397do_pass:
50bbfed9 398 new_insn = first_insn;
bd4cf0ed
AS
399 fp = prog;
400
8b614aeb 401 /* Classic BPF related prologue emission. */
50bbfed9 402 if (new_prog) {
8b614aeb
DB
403 /* Classic BPF expects A and X to be reset first. These need
404 * to be guaranteed to be the first two instructions.
405 */
1d621674
DB
406 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
407 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
8b614aeb
DB
408
409 /* All programs must keep CTX in callee saved BPF_REG_CTX.
410 * In eBPF case it's done by the compiler, here we need to
411 * do this ourself. Initial CTX is present in BPF_REG_ARG1.
412 */
413 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
414 } else {
415 new_insn += 3;
416 }
bd4cf0ed
AS
417
418 for (i = 0; i < len; fp++, i++) {
2695fb55
AS
419 struct bpf_insn tmp_insns[6] = { };
420 struct bpf_insn *insn = tmp_insns;
bd4cf0ed
AS
421
422 if (addrs)
50bbfed9 423 addrs[i] = new_insn - first_insn;
bd4cf0ed
AS
424
425 switch (fp->code) {
426 /* All arithmetic insns and skb loads map as-is. */
427 case BPF_ALU | BPF_ADD | BPF_X:
428 case BPF_ALU | BPF_ADD | BPF_K:
429 case BPF_ALU | BPF_SUB | BPF_X:
430 case BPF_ALU | BPF_SUB | BPF_K:
431 case BPF_ALU | BPF_AND | BPF_X:
432 case BPF_ALU | BPF_AND | BPF_K:
433 case BPF_ALU | BPF_OR | BPF_X:
434 case BPF_ALU | BPF_OR | BPF_K:
435 case BPF_ALU | BPF_LSH | BPF_X:
436 case BPF_ALU | BPF_LSH | BPF_K:
437 case BPF_ALU | BPF_RSH | BPF_X:
438 case BPF_ALU | BPF_RSH | BPF_K:
439 case BPF_ALU | BPF_XOR | BPF_X:
440 case BPF_ALU | BPF_XOR | BPF_K:
441 case BPF_ALU | BPF_MUL | BPF_X:
442 case BPF_ALU | BPF_MUL | BPF_K:
443 case BPF_ALU | BPF_DIV | BPF_X:
444 case BPF_ALU | BPF_DIV | BPF_K:
445 case BPF_ALU | BPF_MOD | BPF_X:
446 case BPF_ALU | BPF_MOD | BPF_K:
447 case BPF_ALU | BPF_NEG:
448 case BPF_LD | BPF_ABS | BPF_W:
449 case BPF_LD | BPF_ABS | BPF_H:
450 case BPF_LD | BPF_ABS | BPF_B:
451 case BPF_LD | BPF_IND | BPF_W:
452 case BPF_LD | BPF_IND | BPF_H:
453 case BPF_LD | BPF_IND | BPF_B:
454 /* Check for overloaded BPF extension and
455 * directly convert it if found, otherwise
456 * just move on with mapping.
457 */
458 if (BPF_CLASS(fp->code) == BPF_LD &&
459 BPF_MODE(fp->code) == BPF_ABS &&
460 convert_bpf_extensions(fp, &insn))
461 break;
462
68fda450 463 if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
f6b1b3bf 464 fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
68fda450 465 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
f6b1b3bf
DB
466 /* Error with exception code on div/mod by 0.
467 * For cBPF programs, this was always return 0.
468 */
469 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
470 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
471 *insn++ = BPF_EXIT_INSN();
472 }
68fda450 473
f8f6d679 474 *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
bd4cf0ed
AS
475 break;
476
f8f6d679
DB
477 /* Jump transformation cannot use BPF block macros
478 * everywhere as offset calculation and target updates
479 * require a bit more work than the rest, i.e. jump
480 * opcodes map as-is, but offsets need adjustment.
481 */
482
483#define BPF_EMIT_JMP \
bd4cf0ed
AS
484 do { \
485 if (target >= len || target < 0) \
486 goto err; \
487 insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \
488 /* Adjust pc relative offset for 2nd or 3rd insn. */ \
489 insn->off -= insn - tmp_insns; \
490 } while (0)
491
f8f6d679
DB
492 case BPF_JMP | BPF_JA:
493 target = i + fp->k + 1;
494 insn->code = fp->code;
495 BPF_EMIT_JMP;
bd4cf0ed
AS
496 break;
497
498 case BPF_JMP | BPF_JEQ | BPF_K:
499 case BPF_JMP | BPF_JEQ | BPF_X:
500 case BPF_JMP | BPF_JSET | BPF_K:
501 case BPF_JMP | BPF_JSET | BPF_X:
502 case BPF_JMP | BPF_JGT | BPF_K:
503 case BPF_JMP | BPF_JGT | BPF_X:
504 case BPF_JMP | BPF_JGE | BPF_K:
505 case BPF_JMP | BPF_JGE | BPF_X:
506 if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
507 /* BPF immediates are signed, zero extend
508 * immediate into tmp register and use it
509 * in compare insn.
510 */
f8f6d679 511 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
bd4cf0ed 512
e430f34e
AS
513 insn->dst_reg = BPF_REG_A;
514 insn->src_reg = BPF_REG_TMP;
bd4cf0ed
AS
515 bpf_src = BPF_X;
516 } else {
e430f34e 517 insn->dst_reg = BPF_REG_A;
bd4cf0ed
AS
518 insn->imm = fp->k;
519 bpf_src = BPF_SRC(fp->code);
19539ce7 520 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
1da177e4 521 }
bd4cf0ed
AS
522
523 /* Common case where 'jump_false' is next insn. */
524 if (fp->jf == 0) {
525 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
526 target = i + fp->jt + 1;
f8f6d679 527 BPF_EMIT_JMP;
bd4cf0ed 528 break;
1da177e4 529 }
bd4cf0ed 530
92b31a9a
DB
531 /* Convert some jumps when 'jump_true' is next insn. */
532 if (fp->jt == 0) {
533 switch (BPF_OP(fp->code)) {
534 case BPF_JEQ:
535 insn->code = BPF_JMP | BPF_JNE | bpf_src;
536 break;
537 case BPF_JGT:
538 insn->code = BPF_JMP | BPF_JLE | bpf_src;
539 break;
540 case BPF_JGE:
541 insn->code = BPF_JMP | BPF_JLT | bpf_src;
542 break;
543 default:
544 goto jmp_rest;
545 }
546
bd4cf0ed 547 target = i + fp->jf + 1;
f8f6d679 548 BPF_EMIT_JMP;
bd4cf0ed 549 break;
0b05b2a4 550 }
92b31a9a 551jmp_rest:
bd4cf0ed
AS
552 /* Other jumps are mapped into two insns: Jxx and JA. */
553 target = i + fp->jt + 1;
554 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
f8f6d679 555 BPF_EMIT_JMP;
bd4cf0ed
AS
556 insn++;
557
558 insn->code = BPF_JMP | BPF_JA;
559 target = i + fp->jf + 1;
f8f6d679 560 BPF_EMIT_JMP;
bd4cf0ed
AS
561 break;
562
563 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
564 case BPF_LDX | BPF_MSH | BPF_B:
9739eef1 565 /* tmp = A */
f8f6d679 566 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A);
1268e253 567 /* A = BPF_R0 = *(u8 *) (skb->data + K) */
f8f6d679 568 *insn++ = BPF_LD_ABS(BPF_B, fp->k);
9739eef1 569 /* A &= 0xf */
f8f6d679 570 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
9739eef1 571 /* A <<= 2 */
f8f6d679 572 *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
9739eef1 573 /* X = A */
f8f6d679 574 *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
9739eef1 575 /* A = tmp */
f8f6d679 576 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
bd4cf0ed
AS
577 break;
578
6205b9cf
DB
579 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
580 * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
581 */
bd4cf0ed
AS
582 case BPF_RET | BPF_A:
583 case BPF_RET | BPF_K:
6205b9cf
DB
584 if (BPF_RVAL(fp->code) == BPF_K)
585 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
586 0, fp->k);
9739eef1 587 *insn = BPF_EXIT_INSN();
bd4cf0ed
AS
588 break;
589
590 /* Store to stack. */
591 case BPF_ST:
592 case BPF_STX:
50bbfed9 593 stack_off = fp->k * 4 + 4;
f8f6d679
DB
594 *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
595 BPF_ST ? BPF_REG_A : BPF_REG_X,
50bbfed9
AS
596 -stack_off);
597 /* check_load_and_stores() verifies that classic BPF can
598 * load from stack only after write, so tracking
599 * stack_depth for ST|STX insns is enough
600 */
601 if (new_prog && new_prog->aux->stack_depth < stack_off)
602 new_prog->aux->stack_depth = stack_off;
bd4cf0ed
AS
603 break;
604
605 /* Load from stack. */
606 case BPF_LD | BPF_MEM:
607 case BPF_LDX | BPF_MEM:
50bbfed9 608 stack_off = fp->k * 4 + 4;
f8f6d679
DB
609 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
610 BPF_REG_A : BPF_REG_X, BPF_REG_FP,
50bbfed9 611 -stack_off);
bd4cf0ed
AS
612 break;
613
614 /* A = K or X = K */
615 case BPF_LD | BPF_IMM:
616 case BPF_LDX | BPF_IMM:
f8f6d679
DB
617 *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
618 BPF_REG_A : BPF_REG_X, fp->k);
bd4cf0ed
AS
619 break;
620
621 /* X = A */
622 case BPF_MISC | BPF_TAX:
f8f6d679 623 *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
bd4cf0ed
AS
624 break;
625
626 /* A = X */
627 case BPF_MISC | BPF_TXA:
f8f6d679 628 *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
bd4cf0ed
AS
629 break;
630
631 /* A = skb->len or X = skb->len */
632 case BPF_LD | BPF_W | BPF_LEN:
633 case BPF_LDX | BPF_W | BPF_LEN:
f8f6d679
DB
634 *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
635 BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
636 offsetof(struct sk_buff, len));
bd4cf0ed
AS
637 break;
638
f8f6d679 639 /* Access seccomp_data fields. */
bd4cf0ed 640 case BPF_LDX | BPF_ABS | BPF_W:
9739eef1
AS
641 /* A = *(u32 *) (ctx + K) */
642 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
bd4cf0ed
AS
643 break;
644
ca9f1fd2 645 /* Unknown instruction. */
1da177e4 646 default:
bd4cf0ed 647 goto err;
1da177e4 648 }
bd4cf0ed
AS
649
650 insn++;
651 if (new_prog)
652 memcpy(new_insn, tmp_insns,
653 sizeof(*insn) * (insn - tmp_insns));
bd4cf0ed 654 new_insn += insn - tmp_insns;
1da177e4
LT
655 }
656
bd4cf0ed
AS
657 if (!new_prog) {
658 /* Only calculating new length. */
50bbfed9 659 *new_len = new_insn - first_insn;
bd4cf0ed
AS
660 return 0;
661 }
662
663 pass++;
50bbfed9
AS
664 if (new_flen != new_insn - first_insn) {
665 new_flen = new_insn - first_insn;
bd4cf0ed
AS
666 if (pass > 2)
667 goto err;
bd4cf0ed
AS
668 goto do_pass;
669 }
670
671 kfree(addrs);
672 BUG_ON(*new_len != new_flen);
1da177e4 673 return 0;
bd4cf0ed
AS
674err:
675 kfree(addrs);
676 return -EINVAL;
1da177e4
LT
677}
678
bd4cf0ed 679/* Security:
bd4cf0ed 680 *
2d5311e4 681 * As we dont want to clear mem[] array for each packet going through
8ea6e345 682 * __bpf_prog_run(), we check that filter loaded by user never try to read
2d5311e4 683 * a cell if not previously written, and we check all branches to be sure
25985edc 684 * a malicious user doesn't try to abuse us.
2d5311e4 685 */
ec31a05c 686static int check_load_and_stores(const struct sock_filter *filter, int flen)
2d5311e4 687{
34805931 688 u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
2d5311e4
ED
689 int pc, ret = 0;
690
691 BUILD_BUG_ON(BPF_MEMWORDS > 16);
34805931 692
99e72a0f 693 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
2d5311e4
ED
694 if (!masks)
695 return -ENOMEM;
34805931 696
2d5311e4
ED
697 memset(masks, 0xff, flen * sizeof(*masks));
698
699 for (pc = 0; pc < flen; pc++) {
700 memvalid &= masks[pc];
701
702 switch (filter[pc].code) {
34805931
DB
703 case BPF_ST:
704 case BPF_STX:
2d5311e4
ED
705 memvalid |= (1 << filter[pc].k);
706 break;
34805931
DB
707 case BPF_LD | BPF_MEM:
708 case BPF_LDX | BPF_MEM:
2d5311e4
ED
709 if (!(memvalid & (1 << filter[pc].k))) {
710 ret = -EINVAL;
711 goto error;
712 }
713 break;
34805931
DB
714 case BPF_JMP | BPF_JA:
715 /* A jump must set masks on target */
2d5311e4
ED
716 masks[pc + 1 + filter[pc].k] &= memvalid;
717 memvalid = ~0;
718 break;
34805931
DB
719 case BPF_JMP | BPF_JEQ | BPF_K:
720 case BPF_JMP | BPF_JEQ | BPF_X:
721 case BPF_JMP | BPF_JGE | BPF_K:
722 case BPF_JMP | BPF_JGE | BPF_X:
723 case BPF_JMP | BPF_JGT | BPF_K:
724 case BPF_JMP | BPF_JGT | BPF_X:
725 case BPF_JMP | BPF_JSET | BPF_K:
726 case BPF_JMP | BPF_JSET | BPF_X:
727 /* A jump must set masks on targets */
2d5311e4
ED
728 masks[pc + 1 + filter[pc].jt] &= memvalid;
729 masks[pc + 1 + filter[pc].jf] &= memvalid;
730 memvalid = ~0;
731 break;
732 }
733 }
734error:
735 kfree(masks);
736 return ret;
737}
738
34805931
DB
739static bool chk_code_allowed(u16 code_to_probe)
740{
741 static const bool codes[] = {
742 /* 32 bit ALU operations */
743 [BPF_ALU | BPF_ADD | BPF_K] = true,
744 [BPF_ALU | BPF_ADD | BPF_X] = true,
745 [BPF_ALU | BPF_SUB | BPF_K] = true,
746 [BPF_ALU | BPF_SUB | BPF_X] = true,
747 [BPF_ALU | BPF_MUL | BPF_K] = true,
748 [BPF_ALU | BPF_MUL | BPF_X] = true,
749 [BPF_ALU | BPF_DIV | BPF_K] = true,
750 [BPF_ALU | BPF_DIV | BPF_X] = true,
751 [BPF_ALU | BPF_MOD | BPF_K] = true,
752 [BPF_ALU | BPF_MOD | BPF_X] = true,
753 [BPF_ALU | BPF_AND | BPF_K] = true,
754 [BPF_ALU | BPF_AND | BPF_X] = true,
755 [BPF_ALU | BPF_OR | BPF_K] = true,
756 [BPF_ALU | BPF_OR | BPF_X] = true,
757 [BPF_ALU | BPF_XOR | BPF_K] = true,
758 [BPF_ALU | BPF_XOR | BPF_X] = true,
759 [BPF_ALU | BPF_LSH | BPF_K] = true,
760 [BPF_ALU | BPF_LSH | BPF_X] = true,
761 [BPF_ALU | BPF_RSH | BPF_K] = true,
762 [BPF_ALU | BPF_RSH | BPF_X] = true,
763 [BPF_ALU | BPF_NEG] = true,
764 /* Load instructions */
765 [BPF_LD | BPF_W | BPF_ABS] = true,
766 [BPF_LD | BPF_H | BPF_ABS] = true,
767 [BPF_LD | BPF_B | BPF_ABS] = true,
768 [BPF_LD | BPF_W | BPF_LEN] = true,
769 [BPF_LD | BPF_W | BPF_IND] = true,
770 [BPF_LD | BPF_H | BPF_IND] = true,
771 [BPF_LD | BPF_B | BPF_IND] = true,
772 [BPF_LD | BPF_IMM] = true,
773 [BPF_LD | BPF_MEM] = true,
774 [BPF_LDX | BPF_W | BPF_LEN] = true,
775 [BPF_LDX | BPF_B | BPF_MSH] = true,
776 [BPF_LDX | BPF_IMM] = true,
777 [BPF_LDX | BPF_MEM] = true,
778 /* Store instructions */
779 [BPF_ST] = true,
780 [BPF_STX] = true,
781 /* Misc instructions */
782 [BPF_MISC | BPF_TAX] = true,
783 [BPF_MISC | BPF_TXA] = true,
784 /* Return instructions */
785 [BPF_RET | BPF_K] = true,
786 [BPF_RET | BPF_A] = true,
787 /* Jump instructions */
788 [BPF_JMP | BPF_JA] = true,
789 [BPF_JMP | BPF_JEQ | BPF_K] = true,
790 [BPF_JMP | BPF_JEQ | BPF_X] = true,
791 [BPF_JMP | BPF_JGE | BPF_K] = true,
792 [BPF_JMP | BPF_JGE | BPF_X] = true,
793 [BPF_JMP | BPF_JGT | BPF_K] = true,
794 [BPF_JMP | BPF_JGT | BPF_X] = true,
795 [BPF_JMP | BPF_JSET | BPF_K] = true,
796 [BPF_JMP | BPF_JSET | BPF_X] = true,
797 };
798
799 if (code_to_probe >= ARRAY_SIZE(codes))
800 return false;
801
802 return codes[code_to_probe];
803}
804
f7bd9e36
DB
805static bool bpf_check_basics_ok(const struct sock_filter *filter,
806 unsigned int flen)
807{
808 if (filter == NULL)
809 return false;
810 if (flen == 0 || flen > BPF_MAXINSNS)
811 return false;
812
813 return true;
814}
815
1da177e4 816/**
4df95ff4 817 * bpf_check_classic - verify socket filter code
1da177e4
LT
818 * @filter: filter to verify
819 * @flen: length of filter
820 *
821 * Check the user's filter code. If we let some ugly
822 * filter code slip through kaboom! The filter must contain
93699863
KK
823 * no references or jumps that are out of range, no illegal
824 * instructions, and must end with a RET instruction.
1da177e4 825 *
7b11f69f
KK
826 * All jumps are forward as they are not signed.
827 *
828 * Returns 0 if the rule set is legal or -EINVAL if not.
1da177e4 829 */
d9e12f42
NS
830static int bpf_check_classic(const struct sock_filter *filter,
831 unsigned int flen)
1da177e4 832{
aa1113d9 833 bool anc_found;
34805931 834 int pc;
1da177e4 835
34805931 836 /* Check the filter code now */
1da177e4 837 for (pc = 0; pc < flen; pc++) {
ec31a05c 838 const struct sock_filter *ftest = &filter[pc];
93699863 839
34805931
DB
840 /* May we actually operate on this code? */
841 if (!chk_code_allowed(ftest->code))
cba328fc 842 return -EINVAL;
34805931 843
93699863 844 /* Some instructions need special checks */
34805931
DB
845 switch (ftest->code) {
846 case BPF_ALU | BPF_DIV | BPF_K:
847 case BPF_ALU | BPF_MOD | BPF_K:
848 /* Check for division by zero */
b6069a95
ED
849 if (ftest->k == 0)
850 return -EINVAL;
851 break;
229394e8
RV
852 case BPF_ALU | BPF_LSH | BPF_K:
853 case BPF_ALU | BPF_RSH | BPF_K:
854 if (ftest->k >= 32)
855 return -EINVAL;
856 break;
34805931
DB
857 case BPF_LD | BPF_MEM:
858 case BPF_LDX | BPF_MEM:
859 case BPF_ST:
860 case BPF_STX:
861 /* Check for invalid memory addresses */
93699863
KK
862 if (ftest->k >= BPF_MEMWORDS)
863 return -EINVAL;
864 break;
34805931
DB
865 case BPF_JMP | BPF_JA:
866 /* Note, the large ftest->k might cause loops.
93699863
KK
867 * Compare this with conditional jumps below,
868 * where offsets are limited. --ANK (981016)
869 */
34805931 870 if (ftest->k >= (unsigned int)(flen - pc - 1))
93699863 871 return -EINVAL;
01f2f3f6 872 break;
34805931
DB
873 case BPF_JMP | BPF_JEQ | BPF_K:
874 case BPF_JMP | BPF_JEQ | BPF_X:
875 case BPF_JMP | BPF_JGE | BPF_K:
876 case BPF_JMP | BPF_JGE | BPF_X:
877 case BPF_JMP | BPF_JGT | BPF_K:
878 case BPF_JMP | BPF_JGT | BPF_X:
879 case BPF_JMP | BPF_JSET | BPF_K:
880 case BPF_JMP | BPF_JSET | BPF_X:
881 /* Both conditionals must be safe */
e35bedf3 882 if (pc + ftest->jt + 1 >= flen ||
93699863
KK
883 pc + ftest->jf + 1 >= flen)
884 return -EINVAL;
cba328fc 885 break;
34805931
DB
886 case BPF_LD | BPF_W | BPF_ABS:
887 case BPF_LD | BPF_H | BPF_ABS:
888 case BPF_LD | BPF_B | BPF_ABS:
aa1113d9 889 anc_found = false;
34805931
DB
890 if (bpf_anc_helper(ftest) & BPF_ANC)
891 anc_found = true;
892 /* Ancillary operation unknown or unsupported */
aa1113d9
DB
893 if (anc_found == false && ftest->k >= SKF_AD_OFF)
894 return -EINVAL;
01f2f3f6
HPP
895 }
896 }
93699863 897
34805931 898 /* Last instruction must be a RET code */
01f2f3f6 899 switch (filter[flen - 1].code) {
34805931
DB
900 case BPF_RET | BPF_K:
901 case BPF_RET | BPF_A:
2d5311e4 902 return check_load_and_stores(filter, flen);
cba328fc 903 }
34805931 904
cba328fc 905 return -EINVAL;
1da177e4
LT
906}
907
7ae457c1
AS
908static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
909 const struct sock_fprog *fprog)
a3ea269b 910{
009937e7 911 unsigned int fsize = bpf_classic_proglen(fprog);
a3ea269b
DB
912 struct sock_fprog_kern *fkprog;
913
914 fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
915 if (!fp->orig_prog)
916 return -ENOMEM;
917
918 fkprog = fp->orig_prog;
919 fkprog->len = fprog->len;
658da937
DB
920
921 fkprog->filter = kmemdup(fp->insns, fsize,
922 GFP_KERNEL | __GFP_NOWARN);
a3ea269b
DB
923 if (!fkprog->filter) {
924 kfree(fp->orig_prog);
925 return -ENOMEM;
926 }
927
928 return 0;
929}
930
7ae457c1 931static void bpf_release_orig_filter(struct bpf_prog *fp)
a3ea269b
DB
932{
933 struct sock_fprog_kern *fprog = fp->orig_prog;
934
935 if (fprog) {
936 kfree(fprog->filter);
937 kfree(fprog);
938 }
939}
940
7ae457c1
AS
941static void __bpf_prog_release(struct bpf_prog *prog)
942{
24701ece 943 if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
89aa0758
AS
944 bpf_prog_put(prog);
945 } else {
946 bpf_release_orig_filter(prog);
947 bpf_prog_free(prog);
948 }
7ae457c1
AS
949}
950
34c5bd66
PN
951static void __sk_filter_release(struct sk_filter *fp)
952{
7ae457c1
AS
953 __bpf_prog_release(fp->prog);
954 kfree(fp);
34c5bd66
PN
955}
956
47e958ea 957/**
46bcf14f 958 * sk_filter_release_rcu - Release a socket filter by rcu_head
47e958ea
PE
959 * @rcu: rcu_head that contains the sk_filter to free
960 */
fbc907f0 961static void sk_filter_release_rcu(struct rcu_head *rcu)
47e958ea
PE
962{
963 struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
964
34c5bd66 965 __sk_filter_release(fp);
47e958ea 966}
fbc907f0
DB
967
968/**
969 * sk_filter_release - release a socket filter
970 * @fp: filter to remove
971 *
972 * Remove a filter from a socket and release its resources.
973 */
974static void sk_filter_release(struct sk_filter *fp)
975{
4c355cdf 976 if (refcount_dec_and_test(&fp->refcnt))
fbc907f0
DB
977 call_rcu(&fp->rcu, sk_filter_release_rcu);
978}
979
980void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
981{
7ae457c1 982 u32 filter_size = bpf_prog_size(fp->prog->len);
fbc907f0 983
278571ba
AS
984 atomic_sub(filter_size, &sk->sk_omem_alloc);
985 sk_filter_release(fp);
fbc907f0 986}
47e958ea 987
278571ba
AS
988/* try to charge the socket memory if there is space available
989 * return true on success
990 */
4c355cdf 991static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
bd4cf0ed 992{
7ae457c1 993 u32 filter_size = bpf_prog_size(fp->prog->len);
278571ba
AS
994
995 /* same check as in sock_kmalloc() */
996 if (filter_size <= sysctl_optmem_max &&
997 atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
278571ba
AS
998 atomic_add(filter_size, &sk->sk_omem_alloc);
999 return true;
bd4cf0ed 1000 }
278571ba 1001 return false;
bd4cf0ed
AS
1002}
1003
4c355cdf
RE
1004bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1005{
eefca20e
ED
1006 if (!refcount_inc_not_zero(&fp->refcnt))
1007 return false;
1008
1009 if (!__sk_filter_charge(sk, fp)) {
1010 sk_filter_release(fp);
1011 return false;
1012 }
1013 return true;
4c355cdf
RE
1014}
1015
7ae457c1 1016static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
bd4cf0ed
AS
1017{
1018 struct sock_filter *old_prog;
7ae457c1 1019 struct bpf_prog *old_fp;
34805931 1020 int err, new_len, old_len = fp->len;
bd4cf0ed
AS
1021
1022 /* We are free to overwrite insns et al right here as it
1023 * won't be used at this point in time anymore internally
1024 * after the migration to the internal BPF instruction
1025 * representation.
1026 */
1027 BUILD_BUG_ON(sizeof(struct sock_filter) !=
2695fb55 1028 sizeof(struct bpf_insn));
bd4cf0ed 1029
bd4cf0ed
AS
1030 /* Conversion cannot happen on overlapping memory areas,
1031 * so we need to keep the user BPF around until the 2nd
1032 * pass. At this time, the user BPF is stored in fp->insns.
1033 */
1034 old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
658da937 1035 GFP_KERNEL | __GFP_NOWARN);
bd4cf0ed
AS
1036 if (!old_prog) {
1037 err = -ENOMEM;
1038 goto out_err;
1039 }
1040
1041 /* 1st pass: calculate the new program length. */
8fb575ca 1042 err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
bd4cf0ed
AS
1043 if (err)
1044 goto out_err_free;
1045
1046 /* Expand fp for appending the new filter representation. */
1047 old_fp = fp;
60a3b225 1048 fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
bd4cf0ed
AS
1049 if (!fp) {
1050 /* The old_fp is still around in case we couldn't
1051 * allocate new memory, so uncharge on that one.
1052 */
1053 fp = old_fp;
1054 err = -ENOMEM;
1055 goto out_err_free;
1056 }
1057
bd4cf0ed
AS
1058 fp->len = new_len;
1059
2695fb55 1060 /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
50bbfed9 1061 err = bpf_convert_filter(old_prog, old_len, fp, &new_len);
bd4cf0ed 1062 if (err)
8fb575ca 1063 /* 2nd bpf_convert_filter() can fail only if it fails
bd4cf0ed
AS
1064 * to allocate memory, remapping must succeed. Note,
1065 * that at this time old_fp has already been released
278571ba 1066 * by krealloc().
bd4cf0ed
AS
1067 */
1068 goto out_err_free;
1069
d1c55ab5 1070 fp = bpf_prog_select_runtime(fp, &err);
290af866
AS
1071 if (err)
1072 goto out_err_free;
5fe821a9 1073
bd4cf0ed
AS
1074 kfree(old_prog);
1075 return fp;
1076
1077out_err_free:
1078 kfree(old_prog);
1079out_err:
7ae457c1 1080 __bpf_prog_release(fp);
bd4cf0ed
AS
1081 return ERR_PTR(err);
1082}
1083
ac67eb2c
DB
1084static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1085 bpf_aux_classic_check_t trans)
302d6637
JP
1086{
1087 int err;
1088
bd4cf0ed 1089 fp->bpf_func = NULL;
a91263d5 1090 fp->jited = 0;
302d6637 1091
4df95ff4 1092 err = bpf_check_classic(fp->insns, fp->len);
418c96ac 1093 if (err) {
7ae457c1 1094 __bpf_prog_release(fp);
bd4cf0ed 1095 return ERR_PTR(err);
418c96ac 1096 }
302d6637 1097
4ae92bc7
NS
1098 /* There might be additional checks and transformations
1099 * needed on classic filters, f.e. in case of seccomp.
1100 */
1101 if (trans) {
1102 err = trans(fp->insns, fp->len);
1103 if (err) {
1104 __bpf_prog_release(fp);
1105 return ERR_PTR(err);
1106 }
1107 }
1108
bd4cf0ed
AS
1109 /* Probe if we can JIT compile the filter and if so, do
1110 * the compilation of the filter.
1111 */
302d6637 1112 bpf_jit_compile(fp);
bd4cf0ed
AS
1113
1114 /* JIT compiler couldn't process this filter, so do the
1115 * internal BPF translation for the optimized interpreter.
1116 */
5fe821a9 1117 if (!fp->jited)
7ae457c1 1118 fp = bpf_migrate_filter(fp);
bd4cf0ed
AS
1119
1120 return fp;
302d6637
JP
1121}
1122
1123/**
7ae457c1 1124 * bpf_prog_create - create an unattached filter
c6c4b97c 1125 * @pfp: the unattached filter that is created
677a9fd3 1126 * @fprog: the filter program
302d6637 1127 *
c6c4b97c 1128 * Create a filter independent of any socket. We first run some
302d6637
JP
1129 * sanity checks on it to make sure it does not explode on us later.
1130 * If an error occurs or there is insufficient memory for the filter
1131 * a negative errno code is returned. On success the return is zero.
1132 */
7ae457c1 1133int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
302d6637 1134{
009937e7 1135 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1136 struct bpf_prog *fp;
302d6637
JP
1137
1138 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1139 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
302d6637
JP
1140 return -EINVAL;
1141
60a3b225 1142 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
302d6637
JP
1143 if (!fp)
1144 return -ENOMEM;
a3ea269b 1145
302d6637
JP
1146 memcpy(fp->insns, fprog->filter, fsize);
1147
302d6637 1148 fp->len = fprog->len;
a3ea269b
DB
1149 /* Since unattached filters are not copied back to user
1150 * space through sk_get_filter(), we do not need to hold
1151 * a copy here, and can spare us the work.
1152 */
1153 fp->orig_prog = NULL;
302d6637 1154
7ae457c1 1155 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1156 * memory in case something goes wrong.
1157 */
4ae92bc7 1158 fp = bpf_prepare_filter(fp, NULL);
bd4cf0ed
AS
1159 if (IS_ERR(fp))
1160 return PTR_ERR(fp);
302d6637
JP
1161
1162 *pfp = fp;
1163 return 0;
302d6637 1164}
7ae457c1 1165EXPORT_SYMBOL_GPL(bpf_prog_create);
302d6637 1166
ac67eb2c
DB
1167/**
1168 * bpf_prog_create_from_user - create an unattached filter from user buffer
1169 * @pfp: the unattached filter that is created
1170 * @fprog: the filter program
1171 * @trans: post-classic verifier transformation handler
bab18991 1172 * @save_orig: save classic BPF program
ac67eb2c
DB
1173 *
1174 * This function effectively does the same as bpf_prog_create(), only
1175 * that it builds up its insns buffer from user space provided buffer.
1176 * It also allows for passing a bpf_aux_classic_check_t handler.
1177 */
1178int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bab18991 1179 bpf_aux_classic_check_t trans, bool save_orig)
ac67eb2c
DB
1180{
1181 unsigned int fsize = bpf_classic_proglen(fprog);
1182 struct bpf_prog *fp;
bab18991 1183 int err;
ac67eb2c
DB
1184
1185 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1186 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
ac67eb2c
DB
1187 return -EINVAL;
1188
1189 fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1190 if (!fp)
1191 return -ENOMEM;
1192
1193 if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1194 __bpf_prog_free(fp);
1195 return -EFAULT;
1196 }
1197
1198 fp->len = fprog->len;
ac67eb2c
DB
1199 fp->orig_prog = NULL;
1200
bab18991
DB
1201 if (save_orig) {
1202 err = bpf_prog_store_orig_filter(fp, fprog);
1203 if (err) {
1204 __bpf_prog_free(fp);
1205 return -ENOMEM;
1206 }
1207 }
1208
ac67eb2c
DB
1209 /* bpf_prepare_filter() already takes care of freeing
1210 * memory in case something goes wrong.
1211 */
1212 fp = bpf_prepare_filter(fp, trans);
1213 if (IS_ERR(fp))
1214 return PTR_ERR(fp);
1215
1216 *pfp = fp;
1217 return 0;
1218}
2ea273d7 1219EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
ac67eb2c 1220
7ae457c1 1221void bpf_prog_destroy(struct bpf_prog *fp)
302d6637 1222{
7ae457c1 1223 __bpf_prog_release(fp);
302d6637 1224}
7ae457c1 1225EXPORT_SYMBOL_GPL(bpf_prog_destroy);
302d6637 1226
8ced425e 1227static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
49b31e57
DB
1228{
1229 struct sk_filter *fp, *old_fp;
1230
1231 fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1232 if (!fp)
1233 return -ENOMEM;
1234
1235 fp->prog = prog;
49b31e57 1236
4c355cdf 1237 if (!__sk_filter_charge(sk, fp)) {
49b31e57
DB
1238 kfree(fp);
1239 return -ENOMEM;
1240 }
4c355cdf 1241 refcount_set(&fp->refcnt, 1);
49b31e57 1242
8ced425e
HFS
1243 old_fp = rcu_dereference_protected(sk->sk_filter,
1244 lockdep_sock_is_held(sk));
49b31e57 1245 rcu_assign_pointer(sk->sk_filter, fp);
8ced425e 1246
49b31e57
DB
1247 if (old_fp)
1248 sk_filter_uncharge(sk, old_fp);
1249
1250 return 0;
1251}
1252
538950a1
CG
1253static int __reuseport_attach_prog(struct bpf_prog *prog, struct sock *sk)
1254{
1255 struct bpf_prog *old_prog;
1256 int err;
1257
1258 if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1259 return -ENOMEM;
1260
fa463497 1261 if (sk_unhashed(sk) && sk->sk_reuseport) {
538950a1
CG
1262 err = reuseport_alloc(sk);
1263 if (err)
1264 return err;
1265 } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) {
1266 /* The socket wasn't bound with SO_REUSEPORT */
1267 return -EINVAL;
1268 }
1269
1270 old_prog = reuseport_attach_prog(sk, prog);
1271 if (old_prog)
1272 bpf_prog_destroy(old_prog);
1273
1274 return 0;
1275}
1276
1277static
1278struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1da177e4 1279{
009937e7 1280 unsigned int fsize = bpf_classic_proglen(fprog);
7ae457c1 1281 struct bpf_prog *prog;
1da177e4
LT
1282 int err;
1283
d59577b6 1284 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1285 return ERR_PTR(-EPERM);
d59577b6 1286
1da177e4 1287 /* Make sure new filter is there and in the right amounts. */
f7bd9e36 1288 if (!bpf_check_basics_ok(fprog->filter, fprog->len))
538950a1 1289 return ERR_PTR(-EINVAL);
1da177e4 1290
f7bd9e36 1291 prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
7ae457c1 1292 if (!prog)
538950a1 1293 return ERR_PTR(-ENOMEM);
a3ea269b 1294
7ae457c1 1295 if (copy_from_user(prog->insns, fprog->filter, fsize)) {
c0d1379a 1296 __bpf_prog_free(prog);
538950a1 1297 return ERR_PTR(-EFAULT);
1da177e4
LT
1298 }
1299
7ae457c1 1300 prog->len = fprog->len;
1da177e4 1301
7ae457c1 1302 err = bpf_prog_store_orig_filter(prog, fprog);
a3ea269b 1303 if (err) {
c0d1379a 1304 __bpf_prog_free(prog);
538950a1 1305 return ERR_PTR(-ENOMEM);
a3ea269b
DB
1306 }
1307
7ae457c1 1308 /* bpf_prepare_filter() already takes care of freeing
bd4cf0ed
AS
1309 * memory in case something goes wrong.
1310 */
538950a1
CG
1311 return bpf_prepare_filter(prog, NULL);
1312}
1313
1314/**
1315 * sk_attach_filter - attach a socket filter
1316 * @fprog: the filter program
1317 * @sk: the socket to use
1318 *
1319 * Attach the user's filter code. We first run some sanity checks on
1320 * it to make sure it does not explode on us later. If an error
1321 * occurs or there is insufficient memory for the filter a negative
1322 * errno code is returned. On success the return is zero.
1323 */
8ced425e 1324int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
538950a1
CG
1325{
1326 struct bpf_prog *prog = __get_filter(fprog, sk);
1327 int err;
1328
7ae457c1
AS
1329 if (IS_ERR(prog))
1330 return PTR_ERR(prog);
1331
8ced425e 1332 err = __sk_attach_prog(prog, sk);
49b31e57 1333 if (err < 0) {
7ae457c1 1334 __bpf_prog_release(prog);
49b31e57 1335 return err;
278571ba
AS
1336 }
1337
d3904b73 1338 return 0;
1da177e4 1339}
8ced425e 1340EXPORT_SYMBOL_GPL(sk_attach_filter);
1da177e4 1341
538950a1 1342int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
89aa0758 1343{
538950a1 1344 struct bpf_prog *prog = __get_filter(fprog, sk);
49b31e57 1345 int err;
89aa0758 1346
538950a1
CG
1347 if (IS_ERR(prog))
1348 return PTR_ERR(prog);
1349
1350 err = __reuseport_attach_prog(prog, sk);
1351 if (err < 0) {
1352 __bpf_prog_release(prog);
1353 return err;
1354 }
1355
1356 return 0;
1357}
1358
1359static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1360{
89aa0758 1361 if (sock_flag(sk, SOCK_FILTER_LOCKED))
538950a1 1362 return ERR_PTR(-EPERM);
89aa0758 1363
113214be 1364 return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
538950a1
CG
1365}
1366
1367int sk_attach_bpf(u32 ufd, struct sock *sk)
1368{
1369 struct bpf_prog *prog = __get_bpf(ufd, sk);
1370 int err;
1371
1372 if (IS_ERR(prog))
1373 return PTR_ERR(prog);
1374
8ced425e 1375 err = __sk_attach_prog(prog, sk);
49b31e57 1376 if (err < 0) {
89aa0758 1377 bpf_prog_put(prog);
49b31e57 1378 return err;
89aa0758
AS
1379 }
1380
89aa0758
AS
1381 return 0;
1382}
1383
538950a1
CG
1384int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1385{
1386 struct bpf_prog *prog = __get_bpf(ufd, sk);
1387 int err;
1388
1389 if (IS_ERR(prog))
1390 return PTR_ERR(prog);
1391
1392 err = __reuseport_attach_prog(prog, sk);
1393 if (err < 0) {
1394 bpf_prog_put(prog);
1395 return err;
1396 }
1397
1398 return 0;
1399}
1400
21cafc1d
DB
1401struct bpf_scratchpad {
1402 union {
1403 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1404 u8 buff[MAX_BPF_STACK];
1405 };
1406};
1407
1408static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
91bc4822 1409
5293efe6
DB
1410static inline int __bpf_try_make_writable(struct sk_buff *skb,
1411 unsigned int write_len)
1412{
1413 return skb_ensure_writable(skb, write_len);
1414}
1415
db58ba45
AS
1416static inline int bpf_try_make_writable(struct sk_buff *skb,
1417 unsigned int write_len)
1418{
5293efe6 1419 int err = __bpf_try_make_writable(skb, write_len);
db58ba45 1420
6aaae2b6 1421 bpf_compute_data_pointers(skb);
db58ba45
AS
1422 return err;
1423}
1424
36bbef52
DB
1425static int bpf_try_make_head_writable(struct sk_buff *skb)
1426{
1427 return bpf_try_make_writable(skb, skb_headlen(skb));
1428}
1429
a2bfe6bf
DB
1430static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1431{
1432 if (skb_at_tc_ingress(skb))
1433 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1434}
1435
8065694e
DB
1436static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1437{
1438 if (skb_at_tc_ingress(skb))
1439 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1440}
1441
f3694e00
DB
1442BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1443 const void *, from, u32, len, u64, flags)
608cd71a 1444{
608cd71a
AS
1445 void *ptr;
1446
8afd54c8 1447 if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
781c53bc 1448 return -EINVAL;
0ed661d5 1449 if (unlikely(offset > 0xffff))
608cd71a 1450 return -EFAULT;
db58ba45 1451 if (unlikely(bpf_try_make_writable(skb, offset + len)))
608cd71a
AS
1452 return -EFAULT;
1453
0ed661d5 1454 ptr = skb->data + offset;
781c53bc 1455 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1456 __skb_postpull_rcsum(skb, ptr, len, offset);
608cd71a
AS
1457
1458 memcpy(ptr, from, len);
1459
781c53bc 1460 if (flags & BPF_F_RECOMPUTE_CSUM)
479ffccc 1461 __skb_postpush_rcsum(skb, ptr, len, offset);
8afd54c8
DB
1462 if (flags & BPF_F_INVALIDATE_HASH)
1463 skb_clear_hash(skb);
f8ffad69 1464
608cd71a
AS
1465 return 0;
1466}
1467
577c50aa 1468static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
608cd71a
AS
1469 .func = bpf_skb_store_bytes,
1470 .gpl_only = false,
1471 .ret_type = RET_INTEGER,
1472 .arg1_type = ARG_PTR_TO_CTX,
1473 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1474 .arg3_type = ARG_PTR_TO_MEM,
1475 .arg4_type = ARG_CONST_SIZE,
91bc4822
AS
1476 .arg5_type = ARG_ANYTHING,
1477};
1478
f3694e00
DB
1479BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1480 void *, to, u32, len)
05c74e5e 1481{
05c74e5e
DB
1482 void *ptr;
1483
0ed661d5 1484 if (unlikely(offset > 0xffff))
074f528e 1485 goto err_clear;
05c74e5e
DB
1486
1487 ptr = skb_header_pointer(skb, offset, len, to);
1488 if (unlikely(!ptr))
074f528e 1489 goto err_clear;
05c74e5e
DB
1490 if (ptr != to)
1491 memcpy(to, ptr, len);
1492
1493 return 0;
074f528e
DB
1494err_clear:
1495 memset(to, 0, len);
1496 return -EFAULT;
05c74e5e
DB
1497}
1498
577c50aa 1499static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
05c74e5e
DB
1500 .func = bpf_skb_load_bytes,
1501 .gpl_only = false,
1502 .ret_type = RET_INTEGER,
1503 .arg1_type = ARG_PTR_TO_CTX,
1504 .arg2_type = ARG_ANYTHING,
39f19ebb
AS
1505 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
1506 .arg4_type = ARG_CONST_SIZE,
05c74e5e
DB
1507};
1508
36bbef52
DB
1509BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1510{
1511 /* Idea is the following: should the needed direct read/write
1512 * test fail during runtime, we can pull in more data and redo
1513 * again, since implicitly, we invalidate previous checks here.
1514 *
1515 * Or, since we know how much we need to make read/writeable,
1516 * this can be done once at the program beginning for direct
1517 * access case. By this we overcome limitations of only current
1518 * headroom being accessible.
1519 */
1520 return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1521}
1522
1523static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1524 .func = bpf_skb_pull_data,
1525 .gpl_only = false,
1526 .ret_type = RET_INTEGER,
1527 .arg1_type = ARG_PTR_TO_CTX,
1528 .arg2_type = ARG_ANYTHING,
1529};
1530
f3694e00
DB
1531BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1532 u64, from, u64, to, u64, flags)
91bc4822 1533{
0ed661d5 1534 __sum16 *ptr;
91bc4822 1535
781c53bc
DB
1536 if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1537 return -EINVAL;
0ed661d5 1538 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1539 return -EFAULT;
0ed661d5 1540 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1541 return -EFAULT;
1542
0ed661d5 1543 ptr = (__sum16 *)(skb->data + offset);
781c53bc 1544 switch (flags & BPF_F_HDR_FIELD_MASK) {
8050c0f0
DB
1545 case 0:
1546 if (unlikely(from != 0))
1547 return -EINVAL;
1548
1549 csum_replace_by_diff(ptr, to);
1550 break;
91bc4822
AS
1551 case 2:
1552 csum_replace2(ptr, from, to);
1553 break;
1554 case 4:
1555 csum_replace4(ptr, from, to);
1556 break;
1557 default:
1558 return -EINVAL;
1559 }
1560
91bc4822
AS
1561 return 0;
1562}
1563
577c50aa 1564static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
91bc4822
AS
1565 .func = bpf_l3_csum_replace,
1566 .gpl_only = false,
1567 .ret_type = RET_INTEGER,
1568 .arg1_type = ARG_PTR_TO_CTX,
1569 .arg2_type = ARG_ANYTHING,
1570 .arg3_type = ARG_ANYTHING,
1571 .arg4_type = ARG_ANYTHING,
1572 .arg5_type = ARG_ANYTHING,
1573};
1574
f3694e00
DB
1575BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1576 u64, from, u64, to, u64, flags)
91bc4822 1577{
781c53bc 1578 bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
2f72959a 1579 bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
d1b662ad 1580 bool do_mforce = flags & BPF_F_MARK_ENFORCE;
0ed661d5 1581 __sum16 *ptr;
91bc4822 1582
d1b662ad
DB
1583 if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1584 BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
781c53bc 1585 return -EINVAL;
0ed661d5 1586 if (unlikely(offset > 0xffff || offset & 1))
91bc4822 1587 return -EFAULT;
0ed661d5 1588 if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
91bc4822
AS
1589 return -EFAULT;
1590
0ed661d5 1591 ptr = (__sum16 *)(skb->data + offset);
d1b662ad 1592 if (is_mmzero && !do_mforce && !*ptr)
2f72959a 1593 return 0;
91bc4822 1594
781c53bc 1595 switch (flags & BPF_F_HDR_FIELD_MASK) {
7d672345
DB
1596 case 0:
1597 if (unlikely(from != 0))
1598 return -EINVAL;
1599
1600 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1601 break;
91bc4822
AS
1602 case 2:
1603 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1604 break;
1605 case 4:
1606 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1607 break;
1608 default:
1609 return -EINVAL;
1610 }
1611
2f72959a
DB
1612 if (is_mmzero && !*ptr)
1613 *ptr = CSUM_MANGLED_0;
91bc4822
AS
1614 return 0;
1615}
1616
577c50aa 1617static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
91bc4822
AS
1618 .func = bpf_l4_csum_replace,
1619 .gpl_only = false,
1620 .ret_type = RET_INTEGER,
1621 .arg1_type = ARG_PTR_TO_CTX,
1622 .arg2_type = ARG_ANYTHING,
1623 .arg3_type = ARG_ANYTHING,
1624 .arg4_type = ARG_ANYTHING,
1625 .arg5_type = ARG_ANYTHING,
608cd71a
AS
1626};
1627
f3694e00
DB
1628BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1629 __be32 *, to, u32, to_size, __wsum, seed)
7d672345 1630{
21cafc1d 1631 struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
f3694e00 1632 u32 diff_size = from_size + to_size;
7d672345
DB
1633 int i, j = 0;
1634
1635 /* This is quite flexible, some examples:
1636 *
1637 * from_size == 0, to_size > 0, seed := csum --> pushing data
1638 * from_size > 0, to_size == 0, seed := csum --> pulling data
1639 * from_size > 0, to_size > 0, seed := 0 --> diffing data
1640 *
1641 * Even for diffing, from_size and to_size don't need to be equal.
1642 */
1643 if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1644 diff_size > sizeof(sp->diff)))
1645 return -EINVAL;
1646
1647 for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1648 sp->diff[j] = ~from[i];
1649 for (i = 0; i < to_size / sizeof(__be32); i++, j++)
1650 sp->diff[j] = to[i];
1651
1652 return csum_partial(sp->diff, diff_size, seed);
1653}
1654
577c50aa 1655static const struct bpf_func_proto bpf_csum_diff_proto = {
7d672345
DB
1656 .func = bpf_csum_diff,
1657 .gpl_only = false,
36bbef52 1658 .pkt_access = true,
7d672345 1659 .ret_type = RET_INTEGER,
db1ac496 1660 .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1661 .arg2_type = ARG_CONST_SIZE_OR_ZERO,
db1ac496 1662 .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
39f19ebb 1663 .arg4_type = ARG_CONST_SIZE_OR_ZERO,
7d672345
DB
1664 .arg5_type = ARG_ANYTHING,
1665};
1666
36bbef52
DB
1667BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
1668{
1669 /* The interface is to be used in combination with bpf_csum_diff()
1670 * for direct packet writes. csum rotation for alignment as well
1671 * as emulating csum_sub() can be done from the eBPF program.
1672 */
1673 if (skb->ip_summed == CHECKSUM_COMPLETE)
1674 return (skb->csum = csum_add(skb->csum, csum));
1675
1676 return -ENOTSUPP;
1677}
1678
1679static const struct bpf_func_proto bpf_csum_update_proto = {
1680 .func = bpf_csum_update,
1681 .gpl_only = false,
1682 .ret_type = RET_INTEGER,
1683 .arg1_type = ARG_PTR_TO_CTX,
1684 .arg2_type = ARG_ANYTHING,
1685};
1686
a70b506e
DB
1687static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
1688{
a70b506e
DB
1689 return dev_forward_skb(dev, skb);
1690}
1691
4e3264d2
MKL
1692static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
1693 struct sk_buff *skb)
1694{
1695 int ret = ____dev_forward_skb(dev, skb);
1696
1697 if (likely(!ret)) {
1698 skb->dev = dev;
1699 ret = netif_rx(skb);
1700 }
1701
1702 return ret;
1703}
1704
a70b506e
DB
1705static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
1706{
1707 int ret;
1708
1709 if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) {
1710 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
1711 kfree_skb(skb);
1712 return -ENETDOWN;
1713 }
1714
1715 skb->dev = dev;
1716
1717 __this_cpu_inc(xmit_recursion);
1718 ret = dev_queue_xmit(skb);
1719 __this_cpu_dec(xmit_recursion);
1720
1721 return ret;
1722}
1723
4e3264d2
MKL
1724static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
1725 u32 flags)
1726{
1727 /* skb->mac_len is not set on normal egress */
1728 unsigned int mlen = skb->network_header - skb->mac_header;
1729
1730 __skb_pull(skb, mlen);
1731
1732 /* At ingress, the mac header has already been pulled once.
1733 * At egress, skb_pospull_rcsum has to be done in case that
1734 * the skb is originated from ingress (i.e. a forwarded skb)
1735 * to ensure that rcsum starts at net header.
1736 */
1737 if (!skb_at_tc_ingress(skb))
1738 skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
1739 skb_pop_mac_header(skb);
1740 skb_reset_mac_len(skb);
1741 return flags & BPF_F_INGRESS ?
1742 __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
1743}
1744
1745static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
1746 u32 flags)
1747{
3a0af8fd
TG
1748 /* Verify that a link layer header is carried */
1749 if (unlikely(skb->mac_header >= skb->network_header)) {
1750 kfree_skb(skb);
1751 return -ERANGE;
1752 }
1753
4e3264d2
MKL
1754 bpf_push_mac_rcsum(skb);
1755 return flags & BPF_F_INGRESS ?
1756 __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
1757}
1758
1759static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
1760 u32 flags)
1761{
c491680f 1762 if (dev_is_mac_header_xmit(dev))
4e3264d2 1763 return __bpf_redirect_common(skb, dev, flags);
c491680f
DB
1764 else
1765 return __bpf_redirect_no_mac(skb, dev, flags);
4e3264d2
MKL
1766}
1767
f3694e00 1768BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
3896d655 1769{
3896d655 1770 struct net_device *dev;
36bbef52
DB
1771 struct sk_buff *clone;
1772 int ret;
3896d655 1773
781c53bc
DB
1774 if (unlikely(flags & ~(BPF_F_INGRESS)))
1775 return -EINVAL;
1776
3896d655
AS
1777 dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
1778 if (unlikely(!dev))
1779 return -EINVAL;
1780
36bbef52
DB
1781 clone = skb_clone(skb, GFP_ATOMIC);
1782 if (unlikely(!clone))
3896d655
AS
1783 return -ENOMEM;
1784
36bbef52
DB
1785 /* For direct write, we need to keep the invariant that the skbs
1786 * we're dealing with need to be uncloned. Should uncloning fail
1787 * here, we need to free the just generated clone to unclone once
1788 * again.
1789 */
1790 ret = bpf_try_make_head_writable(skb);
1791 if (unlikely(ret)) {
1792 kfree_skb(clone);
1793 return -ENOMEM;
1794 }
1795
4e3264d2 1796 return __bpf_redirect(clone, dev, flags);
3896d655
AS
1797}
1798
577c50aa 1799static const struct bpf_func_proto bpf_clone_redirect_proto = {
3896d655
AS
1800 .func = bpf_clone_redirect,
1801 .gpl_only = false,
1802 .ret_type = RET_INTEGER,
1803 .arg1_type = ARG_PTR_TO_CTX,
1804 .arg2_type = ARG_ANYTHING,
1805 .arg3_type = ARG_ANYTHING,
1806};
1807
27b29f63
AS
1808struct redirect_info {
1809 u32 ifindex;
1810 u32 flags;
97f91a7c 1811 struct bpf_map *map;
11393cc9 1812 struct bpf_map *map_to_flush;
7c300131 1813 unsigned long map_owner;
27b29f63
AS
1814};
1815
1816static DEFINE_PER_CPU(struct redirect_info, redirect_info);
781c53bc 1817
f3694e00 1818BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
27b29f63
AS
1819{
1820 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1821
781c53bc
DB
1822 if (unlikely(flags & ~(BPF_F_INGRESS)))
1823 return TC_ACT_SHOT;
1824
27b29f63
AS
1825 ri->ifindex = ifindex;
1826 ri->flags = flags;
781c53bc 1827
27b29f63
AS
1828 return TC_ACT_REDIRECT;
1829}
1830
1831int skb_do_redirect(struct sk_buff *skb)
1832{
1833 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
1834 struct net_device *dev;
1835
1836 dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex);
1837 ri->ifindex = 0;
1838 if (unlikely(!dev)) {
1839 kfree_skb(skb);
1840 return -EINVAL;
1841 }
1842
4e3264d2 1843 return __bpf_redirect(skb, dev, ri->flags);
27b29f63
AS
1844}
1845
577c50aa 1846static const struct bpf_func_proto bpf_redirect_proto = {
27b29f63
AS
1847 .func = bpf_redirect,
1848 .gpl_only = false,
1849 .ret_type = RET_INTEGER,
1850 .arg1_type = ARG_ANYTHING,
1851 .arg2_type = ARG_ANYTHING,
1852};
1853
34f79502
JF
1854BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb,
1855 struct bpf_map *, map, u32, key, u64, flags)
174a79ff 1856{
34f79502 1857 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
174a79ff 1858
bfa64075 1859 /* If user passes invalid input drop the packet. */
fa246693 1860 if (unlikely(flags & ~(BPF_F_INGRESS)))
bfa64075 1861 return SK_DROP;
174a79ff 1862
34f79502
JF
1863 tcb->bpf.key = key;
1864 tcb->bpf.flags = flags;
1865 tcb->bpf.map = map;
174a79ff 1866
bfa64075 1867 return SK_PASS;
174a79ff
JF
1868}
1869
34f79502 1870struct sock *do_sk_redirect_map(struct sk_buff *skb)
174a79ff 1871{
34f79502 1872 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
174a79ff
JF
1873 struct sock *sk = NULL;
1874
34f79502
JF
1875 if (tcb->bpf.map) {
1876 sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key);
174a79ff 1877
34f79502
JF
1878 tcb->bpf.key = 0;
1879 tcb->bpf.map = NULL;
174a79ff
JF
1880 }
1881
1882 return sk;
1883}
1884
1885static const struct bpf_func_proto bpf_sk_redirect_map_proto = {
1886 .func = bpf_sk_redirect_map,
1887 .gpl_only = false,
1888 .ret_type = RET_INTEGER,
34f79502
JF
1889 .arg1_type = ARG_PTR_TO_CTX,
1890 .arg2_type = ARG_CONST_MAP_PTR,
174a79ff 1891 .arg3_type = ARG_ANYTHING,
34f79502 1892 .arg4_type = ARG_ANYTHING,
174a79ff
JF
1893};
1894
4f738adb
JF
1895BPF_CALL_4(bpf_msg_redirect_map, struct sk_msg_buff *, msg,
1896 struct bpf_map *, map, u32, key, u64, flags)
1897{
1898 /* If user passes invalid input drop the packet. */
8934ce2f 1899 if (unlikely(flags & ~(BPF_F_INGRESS)))
4f738adb
JF
1900 return SK_DROP;
1901
1902 msg->key = key;
1903 msg->flags = flags;
1904 msg->map = map;
1905
1906 return SK_PASS;
1907}
1908
1909struct sock *do_msg_redirect_map(struct sk_msg_buff *msg)
1910{
1911 struct sock *sk = NULL;
1912
1913 if (msg->map) {
1914 sk = __sock_map_lookup_elem(msg->map, msg->key);
1915
1916 msg->key = 0;
1917 msg->map = NULL;
1918 }
1919
1920 return sk;
1921}
1922
1923static const struct bpf_func_proto bpf_msg_redirect_map_proto = {
1924 .func = bpf_msg_redirect_map,
1925 .gpl_only = false,
1926 .ret_type = RET_INTEGER,
1927 .arg1_type = ARG_PTR_TO_CTX,
1928 .arg2_type = ARG_CONST_MAP_PTR,
1929 .arg3_type = ARG_ANYTHING,
1930 .arg4_type = ARG_ANYTHING,
1931};
1932
2a100317
JF
1933BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg_buff *, msg, u32, bytes)
1934{
1935 msg->apply_bytes = bytes;
1936 return 0;
1937}
1938
1939static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
1940 .func = bpf_msg_apply_bytes,
1941 .gpl_only = false,
1942 .ret_type = RET_INTEGER,
1943 .arg1_type = ARG_PTR_TO_CTX,
1944 .arg2_type = ARG_ANYTHING,
1945};
1946
91843d54
JF
1947BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg_buff *, msg, u32, bytes)
1948{
1949 msg->cork_bytes = bytes;
1950 return 0;
1951}
1952
1953static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
1954 .func = bpf_msg_cork_bytes,
1955 .gpl_only = false,
1956 .ret_type = RET_INTEGER,
1957 .arg1_type = ARG_PTR_TO_CTX,
1958 .arg2_type = ARG_ANYTHING,
1959};
1960
015632bb
JF
1961BPF_CALL_4(bpf_msg_pull_data,
1962 struct sk_msg_buff *, msg, u32, start, u32, end, u64, flags)
1963{
1964 unsigned int len = 0, offset = 0, copy = 0;
1965 struct scatterlist *sg = msg->sg_data;
1966 int first_sg, last_sg, i, shift;
1967 unsigned char *p, *to, *from;
1968 int bytes = end - start;
1969 struct page *page;
1970
1971 if (unlikely(flags || end <= start))
1972 return -EINVAL;
1973
1974 /* First find the starting scatterlist element */
1975 i = msg->sg_start;
1976 do {
1977 len = sg[i].length;
1978 offset += len;
1979 if (start < offset + len)
1980 break;
1981 i++;
1982 if (i == MAX_SKB_FRAGS)
1983 i = 0;
1984 } while (i != msg->sg_end);
1985
1986 if (unlikely(start >= offset + len))
1987 return -EINVAL;
1988
1989 if (!msg->sg_copy[i] && bytes <= len)
1990 goto out;
1991
1992 first_sg = i;
1993
1994 /* At this point we need to linearize multiple scatterlist
1995 * elements or a single shared page. Either way we need to
1996 * copy into a linear buffer exclusively owned by BPF. Then
1997 * place the buffer in the scatterlist and fixup the original
1998 * entries by removing the entries now in the linear buffer
1999 * and shifting the remaining entries. For now we do not try
2000 * to copy partial entries to avoid complexity of running out
2001 * of sg_entry slots. The downside is reading a single byte
2002 * will copy the entire sg entry.
2003 */
2004 do {
2005 copy += sg[i].length;
2006 i++;
2007 if (i == MAX_SKB_FRAGS)
2008 i = 0;
2009 if (bytes < copy)
2010 break;
2011 } while (i != msg->sg_end);
2012 last_sg = i;
2013
2014 if (unlikely(copy < end - start))
2015 return -EINVAL;
2016
2017 page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC, get_order(copy));
2018 if (unlikely(!page))
2019 return -ENOMEM;
2020 p = page_address(page);
2021 offset = 0;
2022
2023 i = first_sg;
2024 do {
2025 from = sg_virt(&sg[i]);
2026 len = sg[i].length;
2027 to = p + offset;
2028
2029 memcpy(to, from, len);
2030 offset += len;
2031 sg[i].length = 0;
2032 put_page(sg_page(&sg[i]));
2033
2034 i++;
2035 if (i == MAX_SKB_FRAGS)
2036 i = 0;
2037 } while (i != last_sg);
2038
2039 sg[first_sg].length = copy;
2040 sg_set_page(&sg[first_sg], page, copy, 0);
2041
2042 /* To repair sg ring we need to shift entries. If we only
2043 * had a single entry though we can just replace it and
2044 * be done. Otherwise walk the ring and shift the entries.
2045 */
2046 shift = last_sg - first_sg - 1;
2047 if (!shift)
2048 goto out;
2049
2050 i = first_sg + 1;
2051 do {
2052 int move_from;
2053
2054 if (i + shift >= MAX_SKB_FRAGS)
2055 move_from = i + shift - MAX_SKB_FRAGS;
2056 else
2057 move_from = i + shift;
2058
2059 if (move_from == msg->sg_end)
2060 break;
2061
2062 sg[i] = sg[move_from];
2063 sg[move_from].length = 0;
2064 sg[move_from].page_link = 0;
2065 sg[move_from].offset = 0;
2066
2067 i++;
2068 if (i == MAX_SKB_FRAGS)
2069 i = 0;
2070 } while (1);
2071 msg->sg_end -= shift;
2072 if (msg->sg_end < 0)
2073 msg->sg_end += MAX_SKB_FRAGS;
2074out:
2075 msg->data = sg_virt(&sg[i]) + start - offset;
2076 msg->data_end = msg->data + bytes;
2077
2078 return 0;
2079}
2080
2081static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2082 .func = bpf_msg_pull_data,
2083 .gpl_only = false,
2084 .ret_type = RET_INTEGER,
2085 .arg1_type = ARG_PTR_TO_CTX,
2086 .arg2_type = ARG_ANYTHING,
2087 .arg3_type = ARG_ANYTHING,
2088 .arg4_type = ARG_ANYTHING,
2089};
2090
f3694e00 2091BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
8d20aabe 2092{
f3694e00 2093 return task_get_classid(skb);
8d20aabe
DB
2094}
2095
2096static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2097 .func = bpf_get_cgroup_classid,
2098 .gpl_only = false,
2099 .ret_type = RET_INTEGER,
2100 .arg1_type = ARG_PTR_TO_CTX,
2101};
2102
f3694e00 2103BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
c46646d0 2104{
f3694e00 2105 return dst_tclassid(skb);
c46646d0
DB
2106}
2107
2108static const struct bpf_func_proto bpf_get_route_realm_proto = {
2109 .func = bpf_get_route_realm,
2110 .gpl_only = false,
2111 .ret_type = RET_INTEGER,
2112 .arg1_type = ARG_PTR_TO_CTX,
2113};
2114
f3694e00 2115BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
13c5c240
DB
2116{
2117 /* If skb_clear_hash() was called due to mangling, we can
2118 * trigger SW recalculation here. Later access to hash
2119 * can then use the inline skb->hash via context directly
2120 * instead of calling this helper again.
2121 */
f3694e00 2122 return skb_get_hash(skb);
13c5c240
DB
2123}
2124
2125static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2126 .func = bpf_get_hash_recalc,
2127 .gpl_only = false,
2128 .ret_type = RET_INTEGER,
2129 .arg1_type = ARG_PTR_TO_CTX,
2130};
2131
7a4b28c6
DB
2132BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2133{
2134 /* After all direct packet write, this can be used once for
2135 * triggering a lazy recalc on next skb_get_hash() invocation.
2136 */
2137 skb_clear_hash(skb);
2138 return 0;
2139}
2140
2141static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2142 .func = bpf_set_hash_invalid,
2143 .gpl_only = false,
2144 .ret_type = RET_INTEGER,
2145 .arg1_type = ARG_PTR_TO_CTX,
2146};
2147
ded092cd
DB
2148BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2149{
2150 /* Set user specified hash as L4(+), so that it gets returned
2151 * on skb_get_hash() call unless BPF prog later on triggers a
2152 * skb_clear_hash().
2153 */
2154 __skb_set_sw_hash(skb, hash, true);
2155 return 0;
2156}
2157
2158static const struct bpf_func_proto bpf_set_hash_proto = {
2159 .func = bpf_set_hash,
2160 .gpl_only = false,
2161 .ret_type = RET_INTEGER,
2162 .arg1_type = ARG_PTR_TO_CTX,
2163 .arg2_type = ARG_ANYTHING,
2164};
2165
f3694e00
DB
2166BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2167 u16, vlan_tci)
4e10df9a 2168{
db58ba45 2169 int ret;
4e10df9a
AS
2170
2171 if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2172 vlan_proto != htons(ETH_P_8021AD)))
2173 vlan_proto = htons(ETH_P_8021Q);
2174
8065694e 2175 bpf_push_mac_rcsum(skb);
db58ba45 2176 ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
8065694e
DB
2177 bpf_pull_mac_rcsum(skb);
2178
6aaae2b6 2179 bpf_compute_data_pointers(skb);
db58ba45 2180 return ret;
4e10df9a
AS
2181}
2182
2183const struct bpf_func_proto bpf_skb_vlan_push_proto = {
2184 .func = bpf_skb_vlan_push,
2185 .gpl_only = false,
2186 .ret_type = RET_INTEGER,
2187 .arg1_type = ARG_PTR_TO_CTX,
2188 .arg2_type = ARG_ANYTHING,
2189 .arg3_type = ARG_ANYTHING,
2190};
4d9c5c53 2191EXPORT_SYMBOL_GPL(bpf_skb_vlan_push_proto);
4e10df9a 2192
f3694e00 2193BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
4e10df9a 2194{
db58ba45 2195 int ret;
4e10df9a 2196
8065694e 2197 bpf_push_mac_rcsum(skb);
db58ba45 2198 ret = skb_vlan_pop(skb);
8065694e
DB
2199 bpf_pull_mac_rcsum(skb);
2200
6aaae2b6 2201 bpf_compute_data_pointers(skb);
db58ba45 2202 return ret;
4e10df9a
AS
2203}
2204
2205const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
2206 .func = bpf_skb_vlan_pop,
2207 .gpl_only = false,
2208 .ret_type = RET_INTEGER,
2209 .arg1_type = ARG_PTR_TO_CTX,
2210};
4d9c5c53 2211EXPORT_SYMBOL_GPL(bpf_skb_vlan_pop_proto);
4e10df9a 2212
6578171a
DB
2213static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2214{
2215 /* Caller already did skb_cow() with len as headroom,
2216 * so no need to do it here.
2217 */
2218 skb_push(skb, len);
2219 memmove(skb->data, skb->data + len, off);
2220 memset(skb->data + off, 0, len);
2221
2222 /* No skb_postpush_rcsum(skb, skb->data + off, len)
2223 * needed here as it does not change the skb->csum
2224 * result for checksum complete when summing over
2225 * zeroed blocks.
2226 */
2227 return 0;
2228}
2229
2230static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2231{
2232 /* skb_ensure_writable() is not needed here, as we're
2233 * already working on an uncloned skb.
2234 */
2235 if (unlikely(!pskb_may_pull(skb, off + len)))
2236 return -ENOMEM;
2237
2238 skb_postpull_rcsum(skb, skb->data + off, len);
2239 memmove(skb->data + len, skb->data, off);
2240 __skb_pull(skb, len);
2241
2242 return 0;
2243}
2244
2245static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2246{
2247 bool trans_same = skb->transport_header == skb->network_header;
2248 int ret;
2249
2250 /* There's no need for __skb_push()/__skb_pull() pair to
2251 * get to the start of the mac header as we're guaranteed
2252 * to always start from here under eBPF.
2253 */
2254 ret = bpf_skb_generic_push(skb, off, len);
2255 if (likely(!ret)) {
2256 skb->mac_header -= len;
2257 skb->network_header -= len;
2258 if (trans_same)
2259 skb->transport_header = skb->network_header;
2260 }
2261
2262 return ret;
2263}
2264
2265static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2266{
2267 bool trans_same = skb->transport_header == skb->network_header;
2268 int ret;
2269
2270 /* Same here, __skb_push()/__skb_pull() pair not needed. */
2271 ret = bpf_skb_generic_pop(skb, off, len);
2272 if (likely(!ret)) {
2273 skb->mac_header += len;
2274 skb->network_header += len;
2275 if (trans_same)
2276 skb->transport_header = skb->network_header;
2277 }
2278
2279 return ret;
2280}
2281
2282static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2283{
2284 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2285 u32 off = skb_mac_header_len(skb);
6578171a
DB
2286 int ret;
2287
d02f51cb
DA
2288 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2289 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2290 return -ENOTSUPP;
2291
6578171a
DB
2292 ret = skb_cow(skb, len_diff);
2293 if (unlikely(ret < 0))
2294 return ret;
2295
2296 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2297 if (unlikely(ret < 0))
2298 return ret;
2299
2300 if (skb_is_gso(skb)) {
d02f51cb
DA
2301 struct skb_shared_info *shinfo = skb_shinfo(skb);
2302
880388aa
DM
2303 /* SKB_GSO_TCPV4 needs to be changed into
2304 * SKB_GSO_TCPV6.
6578171a 2305 */
d02f51cb
DA
2306 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2307 shinfo->gso_type &= ~SKB_GSO_TCPV4;
2308 shinfo->gso_type |= SKB_GSO_TCPV6;
6578171a
DB
2309 }
2310
2311 /* Due to IPv6 header, MSS needs to be downgraded. */
d02f51cb 2312 skb_decrease_gso_size(shinfo, len_diff);
6578171a 2313 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2314 shinfo->gso_type |= SKB_GSO_DODGY;
2315 shinfo->gso_segs = 0;
6578171a
DB
2316 }
2317
2318 skb->protocol = htons(ETH_P_IPV6);
2319 skb_clear_hash(skb);
2320
2321 return 0;
2322}
2323
2324static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2325{
2326 const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
0daf4349 2327 u32 off = skb_mac_header_len(skb);
6578171a
DB
2328 int ret;
2329
d02f51cb
DA
2330 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2331 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2332 return -ENOTSUPP;
2333
6578171a
DB
2334 ret = skb_unclone(skb, GFP_ATOMIC);
2335 if (unlikely(ret < 0))
2336 return ret;
2337
2338 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2339 if (unlikely(ret < 0))
2340 return ret;
2341
2342 if (skb_is_gso(skb)) {
d02f51cb
DA
2343 struct skb_shared_info *shinfo = skb_shinfo(skb);
2344
880388aa
DM
2345 /* SKB_GSO_TCPV6 needs to be changed into
2346 * SKB_GSO_TCPV4.
6578171a 2347 */
d02f51cb
DA
2348 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2349 shinfo->gso_type &= ~SKB_GSO_TCPV6;
2350 shinfo->gso_type |= SKB_GSO_TCPV4;
6578171a
DB
2351 }
2352
2353 /* Due to IPv4 header, MSS can be upgraded. */
d02f51cb 2354 skb_increase_gso_size(shinfo, len_diff);
6578171a 2355 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2356 shinfo->gso_type |= SKB_GSO_DODGY;
2357 shinfo->gso_segs = 0;
6578171a
DB
2358 }
2359
2360 skb->protocol = htons(ETH_P_IP);
2361 skb_clear_hash(skb);
2362
2363 return 0;
2364}
2365
2366static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2367{
2368 __be16 from_proto = skb->protocol;
2369
2370 if (from_proto == htons(ETH_P_IP) &&
2371 to_proto == htons(ETH_P_IPV6))
2372 return bpf_skb_proto_4_to_6(skb);
2373
2374 if (from_proto == htons(ETH_P_IPV6) &&
2375 to_proto == htons(ETH_P_IP))
2376 return bpf_skb_proto_6_to_4(skb);
2377
2378 return -ENOTSUPP;
2379}
2380
f3694e00
DB
2381BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2382 u64, flags)
6578171a 2383{
6578171a
DB
2384 int ret;
2385
2386 if (unlikely(flags))
2387 return -EINVAL;
2388
2389 /* General idea is that this helper does the basic groundwork
2390 * needed for changing the protocol, and eBPF program fills the
2391 * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2392 * and other helpers, rather than passing a raw buffer here.
2393 *
2394 * The rationale is to keep this minimal and without a need to
2395 * deal with raw packet data. F.e. even if we would pass buffers
2396 * here, the program still needs to call the bpf_lX_csum_replace()
2397 * helpers anyway. Plus, this way we keep also separation of
2398 * concerns, since f.e. bpf_skb_store_bytes() should only take
2399 * care of stores.
2400 *
2401 * Currently, additional options and extension header space are
2402 * not supported, but flags register is reserved so we can adapt
2403 * that. For offloads, we mark packet as dodgy, so that headers
2404 * need to be verified first.
2405 */
2406 ret = bpf_skb_proto_xlat(skb, proto);
6aaae2b6 2407 bpf_compute_data_pointers(skb);
6578171a
DB
2408 return ret;
2409}
2410
2411static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2412 .func = bpf_skb_change_proto,
2413 .gpl_only = false,
2414 .ret_type = RET_INTEGER,
2415 .arg1_type = ARG_PTR_TO_CTX,
2416 .arg2_type = ARG_ANYTHING,
2417 .arg3_type = ARG_ANYTHING,
2418};
2419
f3694e00 2420BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
d2485c42 2421{
d2485c42 2422 /* We only allow a restricted subset to be changed for now. */
45c7fffa
DB
2423 if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2424 !skb_pkt_type_ok(pkt_type)))
d2485c42
DB
2425 return -EINVAL;
2426
2427 skb->pkt_type = pkt_type;
2428 return 0;
2429}
2430
2431static const struct bpf_func_proto bpf_skb_change_type_proto = {
2432 .func = bpf_skb_change_type,
2433 .gpl_only = false,
2434 .ret_type = RET_INTEGER,
2435 .arg1_type = ARG_PTR_TO_CTX,
2436 .arg2_type = ARG_ANYTHING,
2437};
2438
2be7e212
DB
2439static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2440{
2441 switch (skb->protocol) {
2442 case htons(ETH_P_IP):
2443 return sizeof(struct iphdr);
2444 case htons(ETH_P_IPV6):
2445 return sizeof(struct ipv6hdr);
2446 default:
2447 return ~0U;
2448 }
2449}
2450
2451static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff)
2452{
2453 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2454 int ret;
2455
d02f51cb
DA
2456 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2457 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2458 return -ENOTSUPP;
2459
2be7e212
DB
2460 ret = skb_cow(skb, len_diff);
2461 if (unlikely(ret < 0))
2462 return ret;
2463
2464 ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2465 if (unlikely(ret < 0))
2466 return ret;
2467
2468 if (skb_is_gso(skb)) {
d02f51cb
DA
2469 struct skb_shared_info *shinfo = skb_shinfo(skb);
2470
2be7e212 2471 /* Due to header grow, MSS needs to be downgraded. */
d02f51cb 2472 skb_decrease_gso_size(shinfo, len_diff);
2be7e212 2473 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2474 shinfo->gso_type |= SKB_GSO_DODGY;
2475 shinfo->gso_segs = 0;
2be7e212
DB
2476 }
2477
2478 return 0;
2479}
2480
2481static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
2482{
2483 u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb);
2484 int ret;
2485
d02f51cb
DA
2486 /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */
2487 if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb)))
2488 return -ENOTSUPP;
2489
2be7e212
DB
2490 ret = skb_unclone(skb, GFP_ATOMIC);
2491 if (unlikely(ret < 0))
2492 return ret;
2493
2494 ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2495 if (unlikely(ret < 0))
2496 return ret;
2497
2498 if (skb_is_gso(skb)) {
d02f51cb
DA
2499 struct skb_shared_info *shinfo = skb_shinfo(skb);
2500
2be7e212 2501 /* Due to header shrink, MSS can be upgraded. */
d02f51cb 2502 skb_increase_gso_size(shinfo, len_diff);
2be7e212 2503 /* Header must be checked, and gso_segs recomputed. */
d02f51cb
DA
2504 shinfo->gso_type |= SKB_GSO_DODGY;
2505 shinfo->gso_segs = 0;
2be7e212
DB
2506 }
2507
2508 return 0;
2509}
2510
2511static u32 __bpf_skb_max_len(const struct sk_buff *skb)
2512{
2513 return skb->dev->mtu + skb->dev->hard_header_len;
2514}
2515
2516static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
2517{
2518 bool trans_same = skb->transport_header == skb->network_header;
2519 u32 len_cur, len_diff_abs = abs(len_diff);
2520 u32 len_min = bpf_skb_net_base_len(skb);
2521 u32 len_max = __bpf_skb_max_len(skb);
2522 __be16 proto = skb->protocol;
2523 bool shrink = len_diff < 0;
2524 int ret;
2525
2526 if (unlikely(len_diff_abs > 0xfffU))
2527 return -EFAULT;
2528 if (unlikely(proto != htons(ETH_P_IP) &&
2529 proto != htons(ETH_P_IPV6)))
2530 return -ENOTSUPP;
2531
2532 len_cur = skb->len - skb_network_offset(skb);
2533 if (skb_transport_header_was_set(skb) && !trans_same)
2534 len_cur = skb_network_header_len(skb);
2535 if ((shrink && (len_diff_abs >= len_cur ||
2536 len_cur - len_diff_abs < len_min)) ||
2537 (!shrink && (skb->len + len_diff_abs > len_max &&
2538 !skb_is_gso(skb))))
2539 return -ENOTSUPP;
2540
2541 ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) :
2542 bpf_skb_net_grow(skb, len_diff_abs);
2543
6aaae2b6 2544 bpf_compute_data_pointers(skb);
e4a6a342 2545 return ret;
2be7e212
DB
2546}
2547
2548BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
2549 u32, mode, u64, flags)
2550{
2551 if (unlikely(flags))
2552 return -EINVAL;
2553 if (likely(mode == BPF_ADJ_ROOM_NET))
2554 return bpf_skb_adjust_net(skb, len_diff);
2555
2556 return -ENOTSUPP;
2557}
2558
2559static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
2560 .func = bpf_skb_adjust_room,
2561 .gpl_only = false,
2562 .ret_type = RET_INTEGER,
2563 .arg1_type = ARG_PTR_TO_CTX,
2564 .arg2_type = ARG_ANYTHING,
2565 .arg3_type = ARG_ANYTHING,
2566 .arg4_type = ARG_ANYTHING,
2567};
2568
5293efe6
DB
2569static u32 __bpf_skb_min_len(const struct sk_buff *skb)
2570{
2571 u32 min_len = skb_network_offset(skb);
2572
2573 if (skb_transport_header_was_set(skb))
2574 min_len = skb_transport_offset(skb);
2575 if (skb->ip_summed == CHECKSUM_PARTIAL)
2576 min_len = skb_checksum_start_offset(skb) +
2577 skb->csum_offset + sizeof(__sum16);
2578 return min_len;
2579}
2580
5293efe6
DB
2581static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
2582{
2583 unsigned int old_len = skb->len;
2584 int ret;
2585
2586 ret = __skb_grow_rcsum(skb, new_len);
2587 if (!ret)
2588 memset(skb->data + old_len, 0, new_len - old_len);
2589 return ret;
2590}
2591
2592static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
2593{
2594 return __skb_trim_rcsum(skb, new_len);
2595}
2596
f3694e00
DB
2597BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
2598 u64, flags)
5293efe6 2599{
5293efe6
DB
2600 u32 max_len = __bpf_skb_max_len(skb);
2601 u32 min_len = __bpf_skb_min_len(skb);
5293efe6
DB
2602 int ret;
2603
2604 if (unlikely(flags || new_len > max_len || new_len < min_len))
2605 return -EINVAL;
2606 if (skb->encapsulation)
2607 return -ENOTSUPP;
2608
2609 /* The basic idea of this helper is that it's performing the
2610 * needed work to either grow or trim an skb, and eBPF program
2611 * rewrites the rest via helpers like bpf_skb_store_bytes(),
2612 * bpf_lX_csum_replace() and others rather than passing a raw
2613 * buffer here. This one is a slow path helper and intended
2614 * for replies with control messages.
2615 *
2616 * Like in bpf_skb_change_proto(), we want to keep this rather
2617 * minimal and without protocol specifics so that we are able
2618 * to separate concerns as in bpf_skb_store_bytes() should only
2619 * be the one responsible for writing buffers.
2620 *
2621 * It's really expected to be a slow path operation here for
2622 * control message replies, so we're implicitly linearizing,
2623 * uncloning and drop offloads from the skb by this.
2624 */
2625 ret = __bpf_try_make_writable(skb, skb->len);
2626 if (!ret) {
2627 if (new_len > skb->len)
2628 ret = bpf_skb_grow_rcsum(skb, new_len);
2629 else if (new_len < skb->len)
2630 ret = bpf_skb_trim_rcsum(skb, new_len);
2631 if (!ret && skb_is_gso(skb))
2632 skb_gso_reset(skb);
2633 }
2634
6aaae2b6 2635 bpf_compute_data_pointers(skb);
5293efe6
DB
2636 return ret;
2637}
2638
2639static const struct bpf_func_proto bpf_skb_change_tail_proto = {
2640 .func = bpf_skb_change_tail,
2641 .gpl_only = false,
2642 .ret_type = RET_INTEGER,
2643 .arg1_type = ARG_PTR_TO_CTX,
2644 .arg2_type = ARG_ANYTHING,
2645 .arg3_type = ARG_ANYTHING,
2646};
2647
3a0af8fd
TG
2648BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
2649 u64, flags)
2650{
2651 u32 max_len = __bpf_skb_max_len(skb);
2652 u32 new_len = skb->len + head_room;
2653 int ret;
2654
2655 if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
2656 new_len < skb->len))
2657 return -EINVAL;
2658
2659 ret = skb_cow(skb, head_room);
2660 if (likely(!ret)) {
2661 /* Idea for this helper is that we currently only
2662 * allow to expand on mac header. This means that
2663 * skb->protocol network header, etc, stay as is.
2664 * Compared to bpf_skb_change_tail(), we're more
2665 * flexible due to not needing to linearize or
2666 * reset GSO. Intention for this helper is to be
2667 * used by an L3 skb that needs to push mac header
2668 * for redirection into L2 device.
2669 */
2670 __skb_push(skb, head_room);
2671 memset(skb->data, 0, head_room);
2672 skb_reset_mac_header(skb);
2673 }
2674
6aaae2b6 2675 bpf_compute_data_pointers(skb);
3a0af8fd
TG
2676 return 0;
2677}
2678
2679static const struct bpf_func_proto bpf_skb_change_head_proto = {
2680 .func = bpf_skb_change_head,
2681 .gpl_only = false,
2682 .ret_type = RET_INTEGER,
2683 .arg1_type = ARG_PTR_TO_CTX,
2684 .arg2_type = ARG_ANYTHING,
2685 .arg3_type = ARG_ANYTHING,
2686};
2687
de8f3a83
DB
2688static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
2689{
2690 return xdp_data_meta_unsupported(xdp) ? 0 :
2691 xdp->data - xdp->data_meta;
2692}
2693
17bedab2
MKL
2694BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
2695{
6dfb970d 2696 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83 2697 unsigned long metalen = xdp_get_metalen(xdp);
97e19cce 2698 void *data_start = xdp_frame_end + metalen;
17bedab2
MKL
2699 void *data = xdp->data + offset;
2700
de8f3a83 2701 if (unlikely(data < data_start ||
17bedab2
MKL
2702 data > xdp->data_end - ETH_HLEN))
2703 return -EINVAL;
2704
de8f3a83
DB
2705 if (metalen)
2706 memmove(xdp->data_meta + offset,
2707 xdp->data_meta, metalen);
2708 xdp->data_meta += offset;
17bedab2
MKL
2709 xdp->data = data;
2710
2711 return 0;
2712}
2713
2714static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
2715 .func = bpf_xdp_adjust_head,
2716 .gpl_only = false,
2717 .ret_type = RET_INTEGER,
2718 .arg1_type = ARG_PTR_TO_CTX,
2719 .arg2_type = ARG_ANYTHING,
2720};
2721
b32cc5b9
NS
2722BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
2723{
2724 void *data_end = xdp->data_end + offset;
2725
2726 /* only shrinking is allowed for now. */
2727 if (unlikely(offset >= 0))
2728 return -EINVAL;
2729
2730 if (unlikely(data_end < xdp->data + ETH_HLEN))
2731 return -EINVAL;
2732
2733 xdp->data_end = data_end;
2734
2735 return 0;
2736}
2737
2738static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
2739 .func = bpf_xdp_adjust_tail,
2740 .gpl_only = false,
2741 .ret_type = RET_INTEGER,
2742 .arg1_type = ARG_PTR_TO_CTX,
2743 .arg2_type = ARG_ANYTHING,
2744};
2745
de8f3a83
DB
2746BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
2747{
97e19cce 2748 void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
de8f3a83
DB
2749 void *meta = xdp->data_meta + offset;
2750 unsigned long metalen = xdp->data - meta;
2751
2752 if (xdp_data_meta_unsupported(xdp))
2753 return -ENOTSUPP;
97e19cce 2754 if (unlikely(meta < xdp_frame_end ||
de8f3a83
DB
2755 meta > xdp->data))
2756 return -EINVAL;
2757 if (unlikely((metalen & (sizeof(__u32) - 1)) ||
2758 (metalen > 32)))
2759 return -EACCES;
2760
2761 xdp->data_meta = meta;
2762
2763 return 0;
2764}
2765
2766static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
2767 .func = bpf_xdp_adjust_meta,
2768 .gpl_only = false,
2769 .ret_type = RET_INTEGER,
2770 .arg1_type = ARG_PTR_TO_CTX,
2771 .arg2_type = ARG_ANYTHING,
2772};
2773
11393cc9
JF
2774static int __bpf_tx_xdp(struct net_device *dev,
2775 struct bpf_map *map,
2776 struct xdp_buff *xdp,
2777 u32 index)
814abfab 2778{
44fa2dbd 2779 struct xdp_frame *xdpf;
11393cc9
JF
2780 int err;
2781
2782 if (!dev->netdev_ops->ndo_xdp_xmit) {
11393cc9 2783 return -EOPNOTSUPP;
814abfab 2784 }
11393cc9 2785
44fa2dbd
JDB
2786 xdpf = convert_to_xdp_frame(xdp);
2787 if (unlikely(!xdpf))
2788 return -EOVERFLOW;
2789
2790 err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
11393cc9
JF
2791 if (err)
2792 return err;
9c270af3
JDB
2793 dev->netdev_ops->ndo_xdp_flush(dev);
2794 return 0;
2795}
2796
2797static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
2798 struct bpf_map *map,
2799 struct xdp_buff *xdp,
2800 u32 index)
2801{
2802 int err;
2803
1b1a251c
BT
2804 switch (map->map_type) {
2805 case BPF_MAP_TYPE_DEVMAP: {
9c270af3 2806 struct net_device *dev = fwd;
44fa2dbd 2807 struct xdp_frame *xdpf;
9c270af3
JDB
2808
2809 if (!dev->netdev_ops->ndo_xdp_xmit)
2810 return -EOPNOTSUPP;
2811
44fa2dbd
JDB
2812 xdpf = convert_to_xdp_frame(xdp);
2813 if (unlikely(!xdpf))
2814 return -EOVERFLOW;
2815
2816 /* TODO: move to inside map code instead, for bulk support
2817 * err = dev_map_enqueue(dev, xdp);
2818 */
2819 err = dev->netdev_ops->ndo_xdp_xmit(dev, xdpf);
9c270af3
JDB
2820 if (err)
2821 return err;
11393cc9 2822 __dev_map_insert_ctx(map, index);
1b1a251c
BT
2823 break;
2824 }
2825 case BPF_MAP_TYPE_CPUMAP: {
9c270af3
JDB
2826 struct bpf_cpu_map_entry *rcpu = fwd;
2827
2828 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
2829 if (err)
2830 return err;
2831 __cpu_map_insert_ctx(map, index);
1b1a251c
BT
2832 break;
2833 }
2834 case BPF_MAP_TYPE_XSKMAP: {
2835 struct xdp_sock *xs = fwd;
2836
2837 err = __xsk_map_redirect(map, xdp, xs);
2838 return err;
2839 }
2840 default:
2841 break;
9c270af3 2842 }
e4a8e817 2843 return 0;
814abfab
JF
2844}
2845
11393cc9
JF
2846void xdp_do_flush_map(void)
2847{
2848 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2849 struct bpf_map *map = ri->map_to_flush;
2850
11393cc9 2851 ri->map_to_flush = NULL;
9c270af3
JDB
2852 if (map) {
2853 switch (map->map_type) {
2854 case BPF_MAP_TYPE_DEVMAP:
2855 __dev_map_flush(map);
2856 break;
2857 case BPF_MAP_TYPE_CPUMAP:
2858 __cpu_map_flush(map);
2859 break;
1b1a251c
BT
2860 case BPF_MAP_TYPE_XSKMAP:
2861 __xsk_map_flush(map);
2862 break;
9c270af3
JDB
2863 default:
2864 break;
2865 }
2866 }
11393cc9
JF
2867}
2868EXPORT_SYMBOL_GPL(xdp_do_flush_map);
2869
9c270af3
JDB
2870static void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
2871{
2872 switch (map->map_type) {
2873 case BPF_MAP_TYPE_DEVMAP:
2874 return __dev_map_lookup_elem(map, index);
2875 case BPF_MAP_TYPE_CPUMAP:
2876 return __cpu_map_lookup_elem(map, index);
1b1a251c
BT
2877 case BPF_MAP_TYPE_XSKMAP:
2878 return __xsk_map_lookup_elem(map, index);
9c270af3
JDB
2879 default:
2880 return NULL;
2881 }
2882}
2883
7c300131
DB
2884static inline bool xdp_map_invalid(const struct bpf_prog *xdp_prog,
2885 unsigned long aux)
2886{
2887 return (unsigned long)xdp_prog->aux != aux;
2888}
2889
e4a8e817
DB
2890static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
2891 struct bpf_prog *xdp_prog)
97f91a7c
JF
2892{
2893 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
7c300131 2894 unsigned long map_owner = ri->map_owner;
97f91a7c 2895 struct bpf_map *map = ri->map;
11393cc9 2896 u32 index = ri->ifindex;
9c270af3 2897 void *fwd = NULL;
4c03bdd7 2898 int err;
97f91a7c
JF
2899
2900 ri->ifindex = 0;
2901 ri->map = NULL;
7c300131 2902 ri->map_owner = 0;
109980b8 2903
7c300131 2904 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
96c5508e
JDB
2905 err = -EFAULT;
2906 map = NULL;
2907 goto err;
2908 }
97f91a7c 2909
9c270af3 2910 fwd = __xdp_map_lookup_elem(map, index);
4c03bdd7
JDB
2911 if (!fwd) {
2912 err = -EINVAL;
f5836ca5 2913 goto err;
4c03bdd7 2914 }
e4a8e817 2915 if (ri->map_to_flush && ri->map_to_flush != map)
11393cc9
JF
2916 xdp_do_flush_map();
2917
9c270af3 2918 err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
f5836ca5
JDB
2919 if (unlikely(err))
2920 goto err;
2921
2922 ri->map_to_flush = map;
59a30896 2923 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
f5836ca5
JDB
2924 return 0;
2925err:
59a30896 2926 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
97f91a7c
JF
2927 return err;
2928}
2929
5acaee0a
JF
2930int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
2931 struct bpf_prog *xdp_prog)
814abfab
JF
2932{
2933 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
5acaee0a 2934 struct net_device *fwd;
eb48d682 2935 u32 index = ri->ifindex;
4c03bdd7 2936 int err;
814abfab 2937
97f91a7c
JF
2938 if (ri->map)
2939 return xdp_do_redirect_map(dev, xdp, xdp_prog);
2940
eb48d682 2941 fwd = dev_get_by_index_rcu(dev_net(dev), index);
814abfab 2942 ri->ifindex = 0;
5acaee0a 2943 if (unlikely(!fwd)) {
4c03bdd7 2944 err = -EINVAL;
f5836ca5 2945 goto err;
814abfab
JF
2946 }
2947
4c03bdd7 2948 err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
f5836ca5
JDB
2949 if (unlikely(err))
2950 goto err;
2951
2952 _trace_xdp_redirect(dev, xdp_prog, index);
2953 return 0;
2954err:
2955 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
4c03bdd7 2956 return err;
814abfab
JF
2957}
2958EXPORT_SYMBOL_GPL(xdp_do_redirect);
2959
9c270af3
JDB
2960static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
2961{
2962 unsigned int len;
2963
2964 if (unlikely(!(fwd->flags & IFF_UP)))
2965 return -ENETDOWN;
2966
2967 len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
2968 if (skb->len > len)
2969 return -EMSGSIZE;
2970
2971 return 0;
2972}
2973
c060bc61
XS
2974static int xdp_do_generic_redirect_map(struct net_device *dev,
2975 struct sk_buff *skb,
2976 struct bpf_prog *xdp_prog)
6103aa96
JF
2977{
2978 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
7c300131 2979 unsigned long map_owner = ri->map_owner;
96c5508e
JDB
2980 struct bpf_map *map = ri->map;
2981 struct net_device *fwd = NULL;
eb48d682 2982 u32 index = ri->ifindex;
2facaad6 2983 int err = 0;
6103aa96 2984
6103aa96 2985 ri->ifindex = 0;
96c5508e 2986 ri->map = NULL;
7c300131 2987 ri->map_owner = 0;
96c5508e 2988
9c270af3
JDB
2989 if (unlikely(xdp_map_invalid(xdp_prog, map_owner))) {
2990 err = -EFAULT;
2991 map = NULL;
2992 goto err;
96c5508e 2993 }
9c270af3 2994 fwd = __xdp_map_lookup_elem(map, index);
2facaad6
JDB
2995 if (unlikely(!fwd)) {
2996 err = -EINVAL;
f5836ca5 2997 goto err;
6103aa96
JF
2998 }
2999
9c270af3
JDB
3000 if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
3001 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
3002 goto err;
3003 skb->dev = fwd;
3004 } else {
3005 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3006 err = -EBADRQC;
f5836ca5 3007 goto err;
2facaad6 3008 }
6103aa96 3009
9c270af3
JDB
3010 _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3011 return 0;
3012err:
3013 _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3014 return err;
3015}
3016
3017int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3018 struct bpf_prog *xdp_prog)
3019{
3020 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3021 u32 index = ri->ifindex;
3022 struct net_device *fwd;
3023 int err = 0;
3024
3025 if (ri->map)
3026 return xdp_do_generic_redirect_map(dev, skb, xdp_prog);
3027
3028 ri->ifindex = 0;
3029 fwd = dev_get_by_index_rcu(dev_net(dev), index);
3030 if (unlikely(!fwd)) {
3031 err = -EINVAL;
f5836ca5 3032 goto err;
2facaad6
JDB
3033 }
3034
9c270af3
JDB
3035 if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
3036 goto err;
3037
2facaad6 3038 skb->dev = fwd;
9c270af3 3039 _trace_xdp_redirect(dev, xdp_prog, index);
f5836ca5
JDB
3040 return 0;
3041err:
9c270af3 3042 _trace_xdp_redirect_err(dev, xdp_prog, index, err);
2facaad6 3043 return err;
6103aa96
JF
3044}
3045EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3046
814abfab
JF
3047BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3048{
3049 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3050
3051 if (unlikely(flags))
3052 return XDP_ABORTED;
3053
3054 ri->ifindex = ifindex;
3055 ri->flags = flags;
109980b8 3056 ri->map = NULL;
7c300131 3057 ri->map_owner = 0;
e4a8e817 3058
814abfab
JF
3059 return XDP_REDIRECT;
3060}
3061
3062static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3063 .func = bpf_xdp_redirect,
3064 .gpl_only = false,
3065 .ret_type = RET_INTEGER,
3066 .arg1_type = ARG_ANYTHING,
3067 .arg2_type = ARG_ANYTHING,
3068};
3069
109980b8 3070BPF_CALL_4(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, u64, flags,
7c300131 3071 unsigned long, map_owner)
e4a8e817
DB
3072{
3073 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
3074
3075 if (unlikely(flags))
3076 return XDP_ABORTED;
3077
3078 ri->ifindex = ifindex;
3079 ri->flags = flags;
3080 ri->map = map;
109980b8 3081 ri->map_owner = map_owner;
e4a8e817
DB
3082
3083 return XDP_REDIRECT;
3084}
3085
109980b8
DB
3086/* Note, arg4 is hidden from users and populated by the verifier
3087 * with the right pointer.
3088 */
e4a8e817
DB
3089static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3090 .func = bpf_xdp_redirect_map,
3091 .gpl_only = false,
3092 .ret_type = RET_INTEGER,
3093 .arg1_type = ARG_CONST_MAP_PTR,
3094 .arg2_type = ARG_ANYTHING,
3095 .arg3_type = ARG_ANYTHING,
3096};
3097
17bedab2 3098bool bpf_helper_changes_pkt_data(void *func)
4e10df9a 3099{
36bbef52
DB
3100 if (func == bpf_skb_vlan_push ||
3101 func == bpf_skb_vlan_pop ||
3102 func == bpf_skb_store_bytes ||
3103 func == bpf_skb_change_proto ||
3a0af8fd 3104 func == bpf_skb_change_head ||
36bbef52 3105 func == bpf_skb_change_tail ||
2be7e212 3106 func == bpf_skb_adjust_room ||
36bbef52 3107 func == bpf_skb_pull_data ||
41703a73 3108 func == bpf_clone_redirect ||
36bbef52 3109 func == bpf_l3_csum_replace ||
17bedab2 3110 func == bpf_l4_csum_replace ||
de8f3a83 3111 func == bpf_xdp_adjust_head ||
015632bb 3112 func == bpf_xdp_adjust_meta ||
b32cc5b9
NS
3113 func == bpf_msg_pull_data ||
3114 func == bpf_xdp_adjust_tail)
3697649f
DB
3115 return true;
3116
4e10df9a
AS
3117 return false;
3118}
3119
555c8a86 3120static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
aa7145c1 3121 unsigned long off, unsigned long len)
555c8a86 3122{
aa7145c1 3123 void *ptr = skb_header_pointer(skb, off, len, dst_buff);
555c8a86
DB
3124
3125 if (unlikely(!ptr))
3126 return len;
3127 if (ptr != dst_buff)
3128 memcpy(dst_buff, ptr, len);
3129
3130 return 0;
3131}
3132
f3694e00
DB
3133BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3134 u64, flags, void *, meta, u64, meta_size)
555c8a86 3135{
555c8a86 3136 u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
555c8a86
DB
3137
3138 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3139 return -EINVAL;
3140 if (unlikely(skb_size > skb->len))
3141 return -EFAULT;
3142
3143 return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3144 bpf_skb_copy);
3145}
3146
3147static const struct bpf_func_proto bpf_skb_event_output_proto = {
3148 .func = bpf_skb_event_output,
3149 .gpl_only = true,
3150 .ret_type = RET_INTEGER,
3151 .arg1_type = ARG_PTR_TO_CTX,
3152 .arg2_type = ARG_CONST_MAP_PTR,
3153 .arg3_type = ARG_ANYTHING,
39f19ebb 3154 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3155 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
555c8a86
DB
3156};
3157
c6c33454
DB
3158static unsigned short bpf_tunnel_key_af(u64 flags)
3159{
3160 return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3161}
3162
f3694e00
DB
3163BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3164 u32, size, u64, flags)
d3aa45ce 3165{
c6c33454
DB
3166 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3167 u8 compat[sizeof(struct bpf_tunnel_key)];
074f528e
DB
3168 void *to_orig = to;
3169 int err;
d3aa45ce 3170
074f528e
DB
3171 if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3172 err = -EINVAL;
3173 goto err_clear;
3174 }
3175 if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3176 err = -EPROTO;
3177 goto err_clear;
3178 }
c6c33454 3179 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
074f528e 3180 err = -EINVAL;
c6c33454 3181 switch (size) {
4018ab18 3182 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3183 case offsetof(struct bpf_tunnel_key, tunnel_ext):
4018ab18 3184 goto set_compat;
c6c33454
DB
3185 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3186 /* Fixup deprecated structure layouts here, so we have
3187 * a common path later on.
3188 */
3189 if (ip_tunnel_info_af(info) != AF_INET)
074f528e 3190 goto err_clear;
4018ab18 3191set_compat:
c6c33454
DB
3192 to = (struct bpf_tunnel_key *)compat;
3193 break;
3194 default:
074f528e 3195 goto err_clear;
c6c33454
DB
3196 }
3197 }
d3aa45ce
AS
3198
3199 to->tunnel_id = be64_to_cpu(info->key.tun_id);
c6c33454
DB
3200 to->tunnel_tos = info->key.tos;
3201 to->tunnel_ttl = info->key.ttl;
3202
4018ab18 3203 if (flags & BPF_F_TUNINFO_IPV6) {
c6c33454
DB
3204 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3205 sizeof(to->remote_ipv6));
4018ab18
DB
3206 to->tunnel_label = be32_to_cpu(info->key.label);
3207 } else {
c6c33454 3208 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
4018ab18 3209 }
c6c33454
DB
3210
3211 if (unlikely(size != sizeof(struct bpf_tunnel_key)))
074f528e 3212 memcpy(to_orig, to, size);
d3aa45ce
AS
3213
3214 return 0;
074f528e
DB
3215err_clear:
3216 memset(to_orig, 0, size);
3217 return err;
d3aa45ce
AS
3218}
3219
577c50aa 3220static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
d3aa45ce
AS
3221 .func = bpf_skb_get_tunnel_key,
3222 .gpl_only = false,
3223 .ret_type = RET_INTEGER,
3224 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3225 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3226 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3227 .arg4_type = ARG_ANYTHING,
3228};
3229
f3694e00 3230BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
14ca0751 3231{
14ca0751 3232 const struct ip_tunnel_info *info = skb_tunnel_info(skb);
074f528e 3233 int err;
14ca0751
DB
3234
3235 if (unlikely(!info ||
074f528e
DB
3236 !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3237 err = -ENOENT;
3238 goto err_clear;
3239 }
3240 if (unlikely(size < info->options_len)) {
3241 err = -ENOMEM;
3242 goto err_clear;
3243 }
14ca0751
DB
3244
3245 ip_tunnel_info_opts_get(to, info);
074f528e
DB
3246 if (size > info->options_len)
3247 memset(to + info->options_len, 0, size - info->options_len);
14ca0751
DB
3248
3249 return info->options_len;
074f528e
DB
3250err_clear:
3251 memset(to, 0, size);
3252 return err;
14ca0751
DB
3253}
3254
3255static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3256 .func = bpf_skb_get_tunnel_opt,
3257 .gpl_only = false,
3258 .ret_type = RET_INTEGER,
3259 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3260 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
3261 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3262};
3263
d3aa45ce
AS
3264static struct metadata_dst __percpu *md_dst;
3265
f3694e00
DB
3266BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3267 const struct bpf_tunnel_key *, from, u32, size, u64, flags)
d3aa45ce 3268{
d3aa45ce 3269 struct metadata_dst *md = this_cpu_ptr(md_dst);
c6c33454 3270 u8 compat[sizeof(struct bpf_tunnel_key)];
d3aa45ce
AS
3271 struct ip_tunnel_info *info;
3272
22080870 3273 if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
77a5196a 3274 BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
d3aa45ce 3275 return -EINVAL;
c6c33454
DB
3276 if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3277 switch (size) {
4018ab18 3278 case offsetof(struct bpf_tunnel_key, tunnel_label):
c0e760c9 3279 case offsetof(struct bpf_tunnel_key, tunnel_ext):
c6c33454
DB
3280 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3281 /* Fixup deprecated structure layouts here, so we have
3282 * a common path later on.
3283 */
3284 memcpy(compat, from, size);
3285 memset(compat + size, 0, sizeof(compat) - size);
f3694e00 3286 from = (const struct bpf_tunnel_key *) compat;
c6c33454
DB
3287 break;
3288 default:
3289 return -EINVAL;
3290 }
3291 }
c0e760c9
DB
3292 if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3293 from->tunnel_ext))
4018ab18 3294 return -EINVAL;
d3aa45ce
AS
3295
3296 skb_dst_drop(skb);
3297 dst_hold((struct dst_entry *) md);
3298 skb_dst_set(skb, (struct dst_entry *) md);
3299
3300 info = &md->u.tun_info;
5540fbf4 3301 memset(info, 0, sizeof(*info));
d3aa45ce 3302 info->mode = IP_TUNNEL_INFO_TX;
c6c33454 3303
db3c6139 3304 info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
22080870
DB
3305 if (flags & BPF_F_DONT_FRAGMENT)
3306 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
792f3dd6
WT
3307 if (flags & BPF_F_ZERO_CSUM_TX)
3308 info->key.tun_flags &= ~TUNNEL_CSUM;
77a5196a
WT
3309 if (flags & BPF_F_SEQ_NUMBER)
3310 info->key.tun_flags |= TUNNEL_SEQ;
22080870 3311
d3aa45ce 3312 info->key.tun_id = cpu_to_be64(from->tunnel_id);
c6c33454
DB
3313 info->key.tos = from->tunnel_tos;
3314 info->key.ttl = from->tunnel_ttl;
3315
3316 if (flags & BPF_F_TUNINFO_IPV6) {
3317 info->mode |= IP_TUNNEL_INFO_IPV6;
3318 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3319 sizeof(from->remote_ipv6));
4018ab18
DB
3320 info->key.label = cpu_to_be32(from->tunnel_label) &
3321 IPV6_FLOWLABEL_MASK;
c6c33454
DB
3322 } else {
3323 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3324 }
d3aa45ce
AS
3325
3326 return 0;
3327}
3328
577c50aa 3329static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
d3aa45ce
AS
3330 .func = bpf_skb_set_tunnel_key,
3331 .gpl_only = false,
3332 .ret_type = RET_INTEGER,
3333 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3334 .arg2_type = ARG_PTR_TO_MEM,
3335 .arg3_type = ARG_CONST_SIZE,
d3aa45ce
AS
3336 .arg4_type = ARG_ANYTHING,
3337};
3338
f3694e00
DB
3339BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
3340 const u8 *, from, u32, size)
14ca0751 3341{
14ca0751
DB
3342 struct ip_tunnel_info *info = skb_tunnel_info(skb);
3343 const struct metadata_dst *md = this_cpu_ptr(md_dst);
3344
3345 if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
3346 return -EINVAL;
fca5fdf6 3347 if (unlikely(size > IP_TUNNEL_OPTS_MAX))
14ca0751
DB
3348 return -ENOMEM;
3349
3350 ip_tunnel_info_opts_set(info, from, size);
3351
3352 return 0;
3353}
3354
3355static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
3356 .func = bpf_skb_set_tunnel_opt,
3357 .gpl_only = false,
3358 .ret_type = RET_INTEGER,
3359 .arg1_type = ARG_PTR_TO_CTX,
39f19ebb
AS
3360 .arg2_type = ARG_PTR_TO_MEM,
3361 .arg3_type = ARG_CONST_SIZE,
14ca0751
DB
3362};
3363
3364static const struct bpf_func_proto *
3365bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
d3aa45ce
AS
3366{
3367 if (!md_dst) {
d66f2b91
JK
3368 struct metadata_dst __percpu *tmp;
3369
3370 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
3371 METADATA_IP_TUNNEL,
3372 GFP_KERNEL);
3373 if (!tmp)
d3aa45ce 3374 return NULL;
d66f2b91
JK
3375 if (cmpxchg(&md_dst, NULL, tmp))
3376 metadata_dst_free_percpu(tmp);
d3aa45ce 3377 }
14ca0751
DB
3378
3379 switch (which) {
3380 case BPF_FUNC_skb_set_tunnel_key:
3381 return &bpf_skb_set_tunnel_key_proto;
3382 case BPF_FUNC_skb_set_tunnel_opt:
3383 return &bpf_skb_set_tunnel_opt_proto;
3384 default:
3385 return NULL;
3386 }
d3aa45ce
AS
3387}
3388
f3694e00
DB
3389BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
3390 u32, idx)
4a482f34 3391{
4a482f34
MKL
3392 struct bpf_array *array = container_of(map, struct bpf_array, map);
3393 struct cgroup *cgrp;
3394 struct sock *sk;
4a482f34 3395
2d48c5f9 3396 sk = skb_to_full_sk(skb);
4a482f34
MKL
3397 if (!sk || !sk_fullsock(sk))
3398 return -ENOENT;
f3694e00 3399 if (unlikely(idx >= array->map.max_entries))
4a482f34
MKL
3400 return -E2BIG;
3401
f3694e00 3402 cgrp = READ_ONCE(array->ptrs[idx]);
4a482f34
MKL
3403 if (unlikely(!cgrp))
3404 return -EAGAIN;
3405
54fd9c2d 3406 return sk_under_cgroup_hierarchy(sk, cgrp);
4a482f34
MKL
3407}
3408
747ea55e
DB
3409static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
3410 .func = bpf_skb_under_cgroup,
4a482f34
MKL
3411 .gpl_only = false,
3412 .ret_type = RET_INTEGER,
3413 .arg1_type = ARG_PTR_TO_CTX,
3414 .arg2_type = ARG_CONST_MAP_PTR,
3415 .arg3_type = ARG_ANYTHING,
3416};
4a482f34 3417
4de16969
DB
3418static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
3419 unsigned long off, unsigned long len)
3420{
3421 memcpy(dst_buff, src_buff + off, len);
3422 return 0;
3423}
3424
f3694e00
DB
3425BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
3426 u64, flags, void *, meta, u64, meta_size)
4de16969 3427{
4de16969 3428 u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4de16969
DB
3429
3430 if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3431 return -EINVAL;
3432 if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
3433 return -EFAULT;
3434
9c471370
MKL
3435 return bpf_event_output(map, flags, meta, meta_size, xdp->data,
3436 xdp_size, bpf_xdp_copy);
4de16969
DB
3437}
3438
3439static const struct bpf_func_proto bpf_xdp_event_output_proto = {
3440 .func = bpf_xdp_event_output,
3441 .gpl_only = true,
3442 .ret_type = RET_INTEGER,
3443 .arg1_type = ARG_PTR_TO_CTX,
3444 .arg2_type = ARG_CONST_MAP_PTR,
3445 .arg3_type = ARG_ANYTHING,
39f19ebb 3446 .arg4_type = ARG_PTR_TO_MEM,
1728a4f2 3447 .arg5_type = ARG_CONST_SIZE_OR_ZERO,
4de16969
DB
3448};
3449
91b8270f
CF
3450BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
3451{
3452 return skb->sk ? sock_gen_cookie(skb->sk) : 0;
3453}
3454
3455static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
3456 .func = bpf_get_socket_cookie,
3457 .gpl_only = false,
3458 .ret_type = RET_INTEGER,
3459 .arg1_type = ARG_PTR_TO_CTX,
3460};
3461
6acc5c29
CF
3462BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
3463{
3464 struct sock *sk = sk_to_full_sk(skb->sk);
3465 kuid_t kuid;
3466
3467 if (!sk || !sk_fullsock(sk))
3468 return overflowuid;
3469 kuid = sock_net_uid(sock_net(sk), sk);
3470 return from_kuid_munged(sock_net(sk)->user_ns, kuid);
3471}
3472
3473static const struct bpf_func_proto bpf_get_socket_uid_proto = {
3474 .func = bpf_get_socket_uid,
3475 .gpl_only = false,
3476 .ret_type = RET_INTEGER,
3477 .arg1_type = ARG_PTR_TO_CTX,
3478};
3479
8c4b4c7e
LB
3480BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3481 int, level, int, optname, char *, optval, int, optlen)
3482{
3483 struct sock *sk = bpf_sock->sk;
3484 int ret = 0;
3485 int val;
3486
3487 if (!sk_fullsock(sk))
3488 return -EINVAL;
3489
3490 if (level == SOL_SOCKET) {
3491 if (optlen != sizeof(int))
3492 return -EINVAL;
3493 val = *((int *)optval);
3494
3495 /* Only some socketops are supported */
3496 switch (optname) {
3497 case SO_RCVBUF:
3498 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
3499 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
3500 break;
3501 case SO_SNDBUF:
3502 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
3503 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
3504 break;
3505 case SO_MAX_PACING_RATE:
3506 sk->sk_max_pacing_rate = val;
3507 sk->sk_pacing_rate = min(sk->sk_pacing_rate,
3508 sk->sk_max_pacing_rate);
3509 break;
3510 case SO_PRIORITY:
3511 sk->sk_priority = val;
3512 break;
3513 case SO_RCVLOWAT:
3514 if (val < 0)
3515 val = INT_MAX;
3516 sk->sk_rcvlowat = val ? : 1;
3517 break;
3518 case SO_MARK:
3519 sk->sk_mark = val;
3520 break;
3521 default:
3522 ret = -EINVAL;
3523 }
a5192c52 3524#ifdef CONFIG_INET
6f5c39fa
NS
3525 } else if (level == SOL_IP) {
3526 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3527 return -EINVAL;
3528
3529 val = *((int *)optval);
3530 /* Only some options are supported */
3531 switch (optname) {
3532 case IP_TOS:
3533 if (val < -1 || val > 0xff) {
3534 ret = -EINVAL;
3535 } else {
3536 struct inet_sock *inet = inet_sk(sk);
3537
3538 if (val == -1)
3539 val = 0;
3540 inet->tos = val;
3541 }
3542 break;
3543 default:
3544 ret = -EINVAL;
3545 }
6f9bd3d7
LB
3546#if IS_ENABLED(CONFIG_IPV6)
3547 } else if (level == SOL_IPV6) {
3548 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3549 return -EINVAL;
3550
3551 val = *((int *)optval);
3552 /* Only some options are supported */
3553 switch (optname) {
3554 case IPV6_TCLASS:
3555 if (val < -1 || val > 0xff) {
3556 ret = -EINVAL;
3557 } else {
3558 struct ipv6_pinfo *np = inet6_sk(sk);
3559
3560 if (val == -1)
3561 val = 0;
3562 np->tclass = val;
3563 }
3564 break;
3565 default:
3566 ret = -EINVAL;
3567 }
3568#endif
8c4b4c7e
LB
3569 } else if (level == SOL_TCP &&
3570 sk->sk_prot->setsockopt == tcp_setsockopt) {
91b5b21c
LB
3571 if (optname == TCP_CONGESTION) {
3572 char name[TCP_CA_NAME_MAX];
ebfa00c5 3573 bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
91b5b21c
LB
3574
3575 strncpy(name, optval, min_t(long, optlen,
3576 TCP_CA_NAME_MAX-1));
3577 name[TCP_CA_NAME_MAX-1] = 0;
6f9bd3d7
LB
3578 ret = tcp_set_congestion_control(sk, name, false,
3579 reinit);
91b5b21c 3580 } else {
fc747810
LB
3581 struct tcp_sock *tp = tcp_sk(sk);
3582
3583 if (optlen != sizeof(int))
3584 return -EINVAL;
3585
3586 val = *((int *)optval);
3587 /* Only some options are supported */
3588 switch (optname) {
3589 case TCP_BPF_IW:
3590 if (val <= 0 || tp->data_segs_out > 0)
3591 ret = -EINVAL;
3592 else
3593 tp->snd_cwnd = val;
3594 break;
13bf9641
LB
3595 case TCP_BPF_SNDCWND_CLAMP:
3596 if (val <= 0) {
3597 ret = -EINVAL;
3598 } else {
3599 tp->snd_cwnd_clamp = val;
3600 tp->snd_ssthresh = val;
3601 }
6d3f06a0 3602 break;
fc747810
LB
3603 default:
3604 ret = -EINVAL;
3605 }
91b5b21c 3606 }
91b5b21c 3607#endif
8c4b4c7e
LB
3608 } else {
3609 ret = -EINVAL;
3610 }
3611 return ret;
3612}
3613
3614static const struct bpf_func_proto bpf_setsockopt_proto = {
3615 .func = bpf_setsockopt,
cd86d1fd 3616 .gpl_only = false,
8c4b4c7e
LB
3617 .ret_type = RET_INTEGER,
3618 .arg1_type = ARG_PTR_TO_CTX,
3619 .arg2_type = ARG_ANYTHING,
3620 .arg3_type = ARG_ANYTHING,
3621 .arg4_type = ARG_PTR_TO_MEM,
3622 .arg5_type = ARG_CONST_SIZE,
3623};
3624
cd86d1fd
LB
3625BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
3626 int, level, int, optname, char *, optval, int, optlen)
3627{
3628 struct sock *sk = bpf_sock->sk;
cd86d1fd
LB
3629
3630 if (!sk_fullsock(sk))
3631 goto err_clear;
3632
3633#ifdef CONFIG_INET
3634 if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
3635 if (optname == TCP_CONGESTION) {
3636 struct inet_connection_sock *icsk = inet_csk(sk);
3637
3638 if (!icsk->icsk_ca_ops || optlen <= 1)
3639 goto err_clear;
3640 strncpy(optval, icsk->icsk_ca_ops->name, optlen);
3641 optval[optlen - 1] = 0;
3642 } else {
3643 goto err_clear;
3644 }
6f5c39fa
NS
3645 } else if (level == SOL_IP) {
3646 struct inet_sock *inet = inet_sk(sk);
3647
3648 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
3649 goto err_clear;
3650
3651 /* Only some options are supported */
3652 switch (optname) {
3653 case IP_TOS:
3654 *((int *)optval) = (int)inet->tos;
3655 break;
3656 default:
3657 goto err_clear;
3658 }
6f9bd3d7
LB
3659#if IS_ENABLED(CONFIG_IPV6)
3660 } else if (level == SOL_IPV6) {
3661 struct ipv6_pinfo *np = inet6_sk(sk);
3662
3663 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
3664 goto err_clear;
3665
3666 /* Only some options are supported */
3667 switch (optname) {
3668 case IPV6_TCLASS:
3669 *((int *)optval) = (int)np->tclass;
3670 break;
3671 default:
3672 goto err_clear;
3673 }
3674#endif
cd86d1fd
LB
3675 } else {
3676 goto err_clear;
3677 }
aa2bc739 3678 return 0;
cd86d1fd
LB
3679#endif
3680err_clear:
3681 memset(optval, 0, optlen);
3682 return -EINVAL;
3683}
3684
3685static const struct bpf_func_proto bpf_getsockopt_proto = {
3686 .func = bpf_getsockopt,
3687 .gpl_only = false,
3688 .ret_type = RET_INTEGER,
3689 .arg1_type = ARG_PTR_TO_CTX,
3690 .arg2_type = ARG_ANYTHING,
3691 .arg3_type = ARG_ANYTHING,
3692 .arg4_type = ARG_PTR_TO_UNINIT_MEM,
3693 .arg5_type = ARG_CONST_SIZE,
3694};
3695
b13d8807
LB
3696BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
3697 int, argval)
3698{
3699 struct sock *sk = bpf_sock->sk;
3700 int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
3701
a7dcdf6e 3702 if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
b13d8807
LB
3703 return -EINVAL;
3704
b13d8807
LB
3705 if (val)
3706 tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
3707
3708 return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
b13d8807
LB
3709}
3710
3711static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
3712 .func = bpf_sock_ops_cb_flags_set,
3713 .gpl_only = false,
3714 .ret_type = RET_INTEGER,
3715 .arg1_type = ARG_PTR_TO_CTX,
3716 .arg2_type = ARG_ANYTHING,
3717};
3718
d74bad4e
AI
3719const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
3720EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
3721
3722BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
3723 int, addr_len)
3724{
3725#ifdef CONFIG_INET
3726 struct sock *sk = ctx->sk;
3727 int err;
3728
3729 /* Binding to port can be expensive so it's prohibited in the helper.
3730 * Only binding to IP is supported.
3731 */
3732 err = -EINVAL;
3733 if (addr->sa_family == AF_INET) {
3734 if (addr_len < sizeof(struct sockaddr_in))
3735 return err;
3736 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
3737 return err;
3738 return __inet_bind(sk, addr, addr_len, true, false);
3739#if IS_ENABLED(CONFIG_IPV6)
3740 } else if (addr->sa_family == AF_INET6) {
3741 if (addr_len < SIN6_LEN_RFC2133)
3742 return err;
3743 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
3744 return err;
3745 /* ipv6_bpf_stub cannot be NULL, since it's called from
3746 * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
3747 */
3748 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
3749#endif /* CONFIG_IPV6 */
3750 }
3751#endif /* CONFIG_INET */
3752
3753 return -EAFNOSUPPORT;
3754}
3755
3756static const struct bpf_func_proto bpf_bind_proto = {
3757 .func = bpf_bind,
3758 .gpl_only = false,
3759 .ret_type = RET_INTEGER,
3760 .arg1_type = ARG_PTR_TO_CTX,
3761 .arg2_type = ARG_PTR_TO_MEM,
3762 .arg3_type = ARG_CONST_SIZE,
3763};
3764
12bed760
EB
3765#ifdef CONFIG_XFRM
3766BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
3767 struct bpf_xfrm_state *, to, u32, size, u64, flags)
3768{
3769 const struct sec_path *sp = skb_sec_path(skb);
3770 const struct xfrm_state *x;
3771
3772 if (!sp || unlikely(index >= sp->len || flags))
3773 goto err_clear;
3774
3775 x = sp->xvec[index];
3776
3777 if (unlikely(size != sizeof(struct bpf_xfrm_state)))
3778 goto err_clear;
3779
3780 to->reqid = x->props.reqid;
3781 to->spi = x->id.spi;
3782 to->family = x->props.family;
3783 if (to->family == AF_INET6) {
3784 memcpy(to->remote_ipv6, x->props.saddr.a6,
3785 sizeof(to->remote_ipv6));
3786 } else {
3787 to->remote_ipv4 = x->props.saddr.a4;
3788 }
3789
3790 return 0;
3791err_clear:
3792 memset(to, 0, size);
3793 return -EINVAL;
3794}
3795
3796static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
3797 .func = bpf_skb_get_xfrm_state,
3798 .gpl_only = false,
3799 .ret_type = RET_INTEGER,
3800 .arg1_type = ARG_PTR_TO_CTX,
3801 .arg2_type = ARG_ANYTHING,
3802 .arg3_type = ARG_PTR_TO_UNINIT_MEM,
3803 .arg4_type = ARG_CONST_SIZE,
3804 .arg5_type = ARG_ANYTHING,
3805};
3806#endif
3807
d4052c4a 3808static const struct bpf_func_proto *
2492d3b8 3809bpf_base_func_proto(enum bpf_func_id func_id)
89aa0758
AS
3810{
3811 switch (func_id) {
3812 case BPF_FUNC_map_lookup_elem:
3813 return &bpf_map_lookup_elem_proto;
3814 case BPF_FUNC_map_update_elem:
3815 return &bpf_map_update_elem_proto;
3816 case BPF_FUNC_map_delete_elem:
3817 return &bpf_map_delete_elem_proto;
03e69b50
DB
3818 case BPF_FUNC_get_prandom_u32:
3819 return &bpf_get_prandom_u32_proto;
c04167ce 3820 case BPF_FUNC_get_smp_processor_id:
80b48c44 3821 return &bpf_get_raw_smp_processor_id_proto;
2d0e30c3
DB
3822 case BPF_FUNC_get_numa_node_id:
3823 return &bpf_get_numa_node_id_proto;
04fd61ab
AS
3824 case BPF_FUNC_tail_call:
3825 return &bpf_tail_call_proto;
17ca8cbf
DB
3826 case BPF_FUNC_ktime_get_ns:
3827 return &bpf_ktime_get_ns_proto;
0756ea3e 3828 case BPF_FUNC_trace_printk:
1be7f75d
AS
3829 if (capable(CAP_SYS_ADMIN))
3830 return bpf_get_trace_printk_proto();
89aa0758
AS
3831 default:
3832 return NULL;
3833 }
3834}
3835
ae2cf1c4 3836static const struct bpf_func_proto *
5e43f899 3837sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
ae2cf1c4
DA
3838{
3839 switch (func_id) {
3840 /* inet and inet6 sockets are created in a process
3841 * context so there is always a valid uid/gid
3842 */
3843 case BPF_FUNC_get_current_uid_gid:
3844 return &bpf_get_current_uid_gid_proto;
3845 default:
3846 return bpf_base_func_proto(func_id);
3847 }
3848}
3849
4fbac77d
AI
3850static const struct bpf_func_proto *
3851sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3852{
3853 switch (func_id) {
3854 /* inet and inet6 sockets are created in a process
3855 * context so there is always a valid uid/gid
3856 */
3857 case BPF_FUNC_get_current_uid_gid:
3858 return &bpf_get_current_uid_gid_proto;
d74bad4e
AI
3859 case BPF_FUNC_bind:
3860 switch (prog->expected_attach_type) {
3861 case BPF_CGROUP_INET4_CONNECT:
3862 case BPF_CGROUP_INET6_CONNECT:
3863 return &bpf_bind_proto;
3864 default:
3865 return NULL;
3866 }
4fbac77d
AI
3867 default:
3868 return bpf_base_func_proto(func_id);
3869 }
3870}
3871
2492d3b8 3872static const struct bpf_func_proto *
5e43f899 3873sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
2492d3b8
DB
3874{
3875 switch (func_id) {
3876 case BPF_FUNC_skb_load_bytes:
3877 return &bpf_skb_load_bytes_proto;
91b8270f
CF
3878 case BPF_FUNC_get_socket_cookie:
3879 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
3880 case BPF_FUNC_get_socket_uid:
3881 return &bpf_get_socket_uid_proto;
2492d3b8
DB
3882 default:
3883 return bpf_base_func_proto(func_id);
3884 }
3885}
3886
608cd71a 3887static const struct bpf_func_proto *
5e43f899 3888tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
608cd71a
AS
3889{
3890 switch (func_id) {
3891 case BPF_FUNC_skb_store_bytes:
3892 return &bpf_skb_store_bytes_proto;
05c74e5e
DB
3893 case BPF_FUNC_skb_load_bytes:
3894 return &bpf_skb_load_bytes_proto;
36bbef52
DB
3895 case BPF_FUNC_skb_pull_data:
3896 return &bpf_skb_pull_data_proto;
7d672345
DB
3897 case BPF_FUNC_csum_diff:
3898 return &bpf_csum_diff_proto;
36bbef52
DB
3899 case BPF_FUNC_csum_update:
3900 return &bpf_csum_update_proto;
91bc4822
AS
3901 case BPF_FUNC_l3_csum_replace:
3902 return &bpf_l3_csum_replace_proto;
3903 case BPF_FUNC_l4_csum_replace:
3904 return &bpf_l4_csum_replace_proto;
3896d655
AS
3905 case BPF_FUNC_clone_redirect:
3906 return &bpf_clone_redirect_proto;
8d20aabe
DB
3907 case BPF_FUNC_get_cgroup_classid:
3908 return &bpf_get_cgroup_classid_proto;
4e10df9a
AS
3909 case BPF_FUNC_skb_vlan_push:
3910 return &bpf_skb_vlan_push_proto;
3911 case BPF_FUNC_skb_vlan_pop:
3912 return &bpf_skb_vlan_pop_proto;
6578171a
DB
3913 case BPF_FUNC_skb_change_proto:
3914 return &bpf_skb_change_proto_proto;
d2485c42
DB
3915 case BPF_FUNC_skb_change_type:
3916 return &bpf_skb_change_type_proto;
2be7e212
DB
3917 case BPF_FUNC_skb_adjust_room:
3918 return &bpf_skb_adjust_room_proto;
5293efe6
DB
3919 case BPF_FUNC_skb_change_tail:
3920 return &bpf_skb_change_tail_proto;
d3aa45ce
AS
3921 case BPF_FUNC_skb_get_tunnel_key:
3922 return &bpf_skb_get_tunnel_key_proto;
3923 case BPF_FUNC_skb_set_tunnel_key:
14ca0751
DB
3924 return bpf_get_skb_set_tunnel_proto(func_id);
3925 case BPF_FUNC_skb_get_tunnel_opt:
3926 return &bpf_skb_get_tunnel_opt_proto;
3927 case BPF_FUNC_skb_set_tunnel_opt:
3928 return bpf_get_skb_set_tunnel_proto(func_id);
27b29f63
AS
3929 case BPF_FUNC_redirect:
3930 return &bpf_redirect_proto;
c46646d0
DB
3931 case BPF_FUNC_get_route_realm:
3932 return &bpf_get_route_realm_proto;
13c5c240
DB
3933 case BPF_FUNC_get_hash_recalc:
3934 return &bpf_get_hash_recalc_proto;
7a4b28c6
DB
3935 case BPF_FUNC_set_hash_invalid:
3936 return &bpf_set_hash_invalid_proto;
ded092cd
DB
3937 case BPF_FUNC_set_hash:
3938 return &bpf_set_hash_proto;
bd570ff9 3939 case BPF_FUNC_perf_event_output:
555c8a86 3940 return &bpf_skb_event_output_proto;
80b48c44
DB
3941 case BPF_FUNC_get_smp_processor_id:
3942 return &bpf_get_smp_processor_id_proto;
747ea55e
DB
3943 case BPF_FUNC_skb_under_cgroup:
3944 return &bpf_skb_under_cgroup_proto;
91b8270f
CF
3945 case BPF_FUNC_get_socket_cookie:
3946 return &bpf_get_socket_cookie_proto;
6acc5c29
CF
3947 case BPF_FUNC_get_socket_uid:
3948 return &bpf_get_socket_uid_proto;
12bed760
EB
3949#ifdef CONFIG_XFRM
3950 case BPF_FUNC_skb_get_xfrm_state:
3951 return &bpf_skb_get_xfrm_state_proto;
3952#endif
608cd71a 3953 default:
2492d3b8 3954 return bpf_base_func_proto(func_id);
608cd71a
AS
3955 }
3956}
3957
6a773a15 3958static const struct bpf_func_proto *
5e43f899 3959xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6a773a15 3960{
4de16969
DB
3961 switch (func_id) {
3962 case BPF_FUNC_perf_event_output:
3963 return &bpf_xdp_event_output_proto;
669dc4d7
DB
3964 case BPF_FUNC_get_smp_processor_id:
3965 return &bpf_get_smp_processor_id_proto;
205c3807
DB
3966 case BPF_FUNC_csum_diff:
3967 return &bpf_csum_diff_proto;
17bedab2
MKL
3968 case BPF_FUNC_xdp_adjust_head:
3969 return &bpf_xdp_adjust_head_proto;
de8f3a83
DB
3970 case BPF_FUNC_xdp_adjust_meta:
3971 return &bpf_xdp_adjust_meta_proto;
814abfab
JF
3972 case BPF_FUNC_redirect:
3973 return &bpf_xdp_redirect_proto;
97f91a7c 3974 case BPF_FUNC_redirect_map:
e4a8e817 3975 return &bpf_xdp_redirect_map_proto;
b32cc5b9
NS
3976 case BPF_FUNC_xdp_adjust_tail:
3977 return &bpf_xdp_adjust_tail_proto;
4de16969 3978 default:
2492d3b8 3979 return bpf_base_func_proto(func_id);
4de16969 3980 }
6a773a15
BB
3981}
3982
3a0af8fd 3983static const struct bpf_func_proto *
5e43f899 3984lwt_inout_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
3985{
3986 switch (func_id) {
3987 case BPF_FUNC_skb_load_bytes:
3988 return &bpf_skb_load_bytes_proto;
3989 case BPF_FUNC_skb_pull_data:
3990 return &bpf_skb_pull_data_proto;
3991 case BPF_FUNC_csum_diff:
3992 return &bpf_csum_diff_proto;
3993 case BPF_FUNC_get_cgroup_classid:
3994 return &bpf_get_cgroup_classid_proto;
3995 case BPF_FUNC_get_route_realm:
3996 return &bpf_get_route_realm_proto;
3997 case BPF_FUNC_get_hash_recalc:
3998 return &bpf_get_hash_recalc_proto;
3999 case BPF_FUNC_perf_event_output:
4000 return &bpf_skb_event_output_proto;
4001 case BPF_FUNC_get_smp_processor_id:
4002 return &bpf_get_smp_processor_id_proto;
4003 case BPF_FUNC_skb_under_cgroup:
4004 return &bpf_skb_under_cgroup_proto;
4005 default:
2492d3b8 4006 return bpf_base_func_proto(func_id);
3a0af8fd
TG
4007 }
4008}
4009
8c4b4c7e 4010static const struct bpf_func_proto *
5e43f899 4011sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
8c4b4c7e
LB
4012{
4013 switch (func_id) {
4014 case BPF_FUNC_setsockopt:
4015 return &bpf_setsockopt_proto;
cd86d1fd
LB
4016 case BPF_FUNC_getsockopt:
4017 return &bpf_getsockopt_proto;
b13d8807
LB
4018 case BPF_FUNC_sock_ops_cb_flags_set:
4019 return &bpf_sock_ops_cb_flags_set_proto;
174a79ff
JF
4020 case BPF_FUNC_sock_map_update:
4021 return &bpf_sock_map_update_proto;
8c4b4c7e
LB
4022 default:
4023 return bpf_base_func_proto(func_id);
4024 }
4025}
4026
5e43f899
AI
4027static const struct bpf_func_proto *
4028sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
4f738adb
JF
4029{
4030 switch (func_id) {
4031 case BPF_FUNC_msg_redirect_map:
4032 return &bpf_msg_redirect_map_proto;
2a100317
JF
4033 case BPF_FUNC_msg_apply_bytes:
4034 return &bpf_msg_apply_bytes_proto;
91843d54
JF
4035 case BPF_FUNC_msg_cork_bytes:
4036 return &bpf_msg_cork_bytes_proto;
015632bb
JF
4037 case BPF_FUNC_msg_pull_data:
4038 return &bpf_msg_pull_data_proto;
4f738adb
JF
4039 default:
4040 return bpf_base_func_proto(func_id);
4041 }
4042}
4043
5e43f899
AI
4044static const struct bpf_func_proto *
4045sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
b005fd18
JF
4046{
4047 switch (func_id) {
8a31db56
JF
4048 case BPF_FUNC_skb_store_bytes:
4049 return &bpf_skb_store_bytes_proto;
b005fd18
JF
4050 case BPF_FUNC_skb_load_bytes:
4051 return &bpf_skb_load_bytes_proto;
8a31db56
JF
4052 case BPF_FUNC_skb_pull_data:
4053 return &bpf_skb_pull_data_proto;
4054 case BPF_FUNC_skb_change_tail:
4055 return &bpf_skb_change_tail_proto;
4056 case BPF_FUNC_skb_change_head:
4057 return &bpf_skb_change_head_proto;
b005fd18
JF
4058 case BPF_FUNC_get_socket_cookie:
4059 return &bpf_get_socket_cookie_proto;
4060 case BPF_FUNC_get_socket_uid:
4061 return &bpf_get_socket_uid_proto;
174a79ff
JF
4062 case BPF_FUNC_sk_redirect_map:
4063 return &bpf_sk_redirect_map_proto;
b005fd18
JF
4064 default:
4065 return bpf_base_func_proto(func_id);
4066 }
4067}
4068
3a0af8fd 4069static const struct bpf_func_proto *
5e43f899 4070lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
3a0af8fd
TG
4071{
4072 switch (func_id) {
4073 case BPF_FUNC_skb_get_tunnel_key:
4074 return &bpf_skb_get_tunnel_key_proto;
4075 case BPF_FUNC_skb_set_tunnel_key:
4076 return bpf_get_skb_set_tunnel_proto(func_id);
4077 case BPF_FUNC_skb_get_tunnel_opt:
4078 return &bpf_skb_get_tunnel_opt_proto;
4079 case BPF_FUNC_skb_set_tunnel_opt:
4080 return bpf_get_skb_set_tunnel_proto(func_id);
4081 case BPF_FUNC_redirect:
4082 return &bpf_redirect_proto;
4083 case BPF_FUNC_clone_redirect:
4084 return &bpf_clone_redirect_proto;
4085 case BPF_FUNC_skb_change_tail:
4086 return &bpf_skb_change_tail_proto;
4087 case BPF_FUNC_skb_change_head:
4088 return &bpf_skb_change_head_proto;
4089 case BPF_FUNC_skb_store_bytes:
4090 return &bpf_skb_store_bytes_proto;
4091 case BPF_FUNC_csum_update:
4092 return &bpf_csum_update_proto;
4093 case BPF_FUNC_l3_csum_replace:
4094 return &bpf_l3_csum_replace_proto;
4095 case BPF_FUNC_l4_csum_replace:
4096 return &bpf_l4_csum_replace_proto;
4097 case BPF_FUNC_set_hash_invalid:
4098 return &bpf_set_hash_invalid_proto;
4099 default:
5e43f899 4100 return lwt_inout_func_proto(func_id, prog);
3a0af8fd
TG
4101 }
4102}
4103
f96da094 4104static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
5e43f899 4105 const struct bpf_prog *prog,
f96da094 4106 struct bpf_insn_access_aux *info)
23994631 4107{
f96da094 4108 const int size_default = sizeof(__u32);
23994631 4109
9bac3d6d
AS
4110 if (off < 0 || off >= sizeof(struct __sk_buff))
4111 return false;
62c7989b 4112
4936e352 4113 /* The verifier guarantees that size > 0. */
9bac3d6d
AS
4114 if (off % size != 0)
4115 return false;
62c7989b
DB
4116
4117 switch (off) {
f96da094
DB
4118 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
4119 if (off + size > offsetofend(struct __sk_buff, cb[4]))
62c7989b
DB
4120 return false;
4121 break;
8a31db56
JF
4122 case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
4123 case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
4124 case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
4125 case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
f96da094 4126 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 4127 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094
DB
4128 case bpf_ctx_range(struct __sk_buff, data_end):
4129 if (size != size_default)
23994631 4130 return false;
31fd8581
YS
4131 break;
4132 default:
f96da094 4133 /* Only narrow read access allowed for now. */
31fd8581 4134 if (type == BPF_WRITE) {
f96da094 4135 if (size != size_default)
31fd8581
YS
4136 return false;
4137 } else {
f96da094
DB
4138 bpf_ctx_record_field_size(info, size_default);
4139 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
23994631 4140 return false;
31fd8581 4141 }
62c7989b 4142 }
9bac3d6d
AS
4143
4144 return true;
4145}
4146
d691f9e8 4147static bool sk_filter_is_valid_access(int off, int size,
19de99f7 4148 enum bpf_access_type type,
5e43f899 4149 const struct bpf_prog *prog,
23994631 4150 struct bpf_insn_access_aux *info)
d691f9e8 4151{
db58ba45 4152 switch (off) {
f96da094
DB
4153 case bpf_ctx_range(struct __sk_buff, tc_classid):
4154 case bpf_ctx_range(struct __sk_buff, data):
de8f3a83 4155 case bpf_ctx_range(struct __sk_buff, data_meta):
f96da094 4156 case bpf_ctx_range(struct __sk_buff, data_end):
8a31db56 4157 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
045efa82 4158 return false;
db58ba45 4159 }
045efa82 4160
d691f9e8
AS
4161 if (type == BPF_WRITE) {
4162 switch (off) {
f96da094 4163 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
4164 break;
4165 default:
4166 return false;
4167 }
4168 }
4169
5e43f899 4170 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
4171}
4172
3a0af8fd
TG
4173static bool lwt_is_valid_access(int off, int size,
4174 enum bpf_access_type type,
5e43f899 4175 const struct bpf_prog *prog,
23994631 4176 struct bpf_insn_access_aux *info)
3a0af8fd
TG
4177{
4178 switch (off) {
f96da094 4179 case bpf_ctx_range(struct __sk_buff, tc_classid):
8a31db56 4180 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
de8f3a83 4181 case bpf_ctx_range(struct __sk_buff, data_meta):
3a0af8fd
TG
4182 return false;
4183 }
4184
4185 if (type == BPF_WRITE) {
4186 switch (off) {
f96da094
DB
4187 case bpf_ctx_range(struct __sk_buff, mark):
4188 case bpf_ctx_range(struct __sk_buff, priority):
4189 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
3a0af8fd
TG
4190 break;
4191 default:
4192 return false;
4193 }
4194 }
4195
f96da094
DB
4196 switch (off) {
4197 case bpf_ctx_range(struct __sk_buff, data):
4198 info->reg_type = PTR_TO_PACKET;
4199 break;
4200 case bpf_ctx_range(struct __sk_buff, data_end):
4201 info->reg_type = PTR_TO_PACKET_END;
4202 break;
4203 }
4204
5e43f899 4205 return bpf_skb_is_valid_access(off, size, type, prog, info);
3a0af8fd
TG
4206}
4207
aac3fc32
AI
4208
4209/* Attach type specific accesses */
4210static bool __sock_filter_check_attach_type(int off,
4211 enum bpf_access_type access_type,
4212 enum bpf_attach_type attach_type)
61023658 4213{
aac3fc32
AI
4214 switch (off) {
4215 case offsetof(struct bpf_sock, bound_dev_if):
4216 case offsetof(struct bpf_sock, mark):
4217 case offsetof(struct bpf_sock, priority):
4218 switch (attach_type) {
4219 case BPF_CGROUP_INET_SOCK_CREATE:
4220 goto full_access;
4221 default:
4222 return false;
4223 }
4224 case bpf_ctx_range(struct bpf_sock, src_ip4):
4225 switch (attach_type) {
4226 case BPF_CGROUP_INET4_POST_BIND:
4227 goto read_only;
4228 default:
4229 return false;
4230 }
4231 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4232 switch (attach_type) {
4233 case BPF_CGROUP_INET6_POST_BIND:
4234 goto read_only;
4235 default:
4236 return false;
4237 }
4238 case bpf_ctx_range(struct bpf_sock, src_port):
4239 switch (attach_type) {
4240 case BPF_CGROUP_INET4_POST_BIND:
4241 case BPF_CGROUP_INET6_POST_BIND:
4242 goto read_only;
61023658
DA
4243 default:
4244 return false;
4245 }
4246 }
aac3fc32
AI
4247read_only:
4248 return access_type == BPF_READ;
4249full_access:
4250 return true;
4251}
4252
4253static bool __sock_filter_check_size(int off, int size,
4254 struct bpf_insn_access_aux *info)
4255{
4256 const int size_default = sizeof(__u32);
61023658 4257
aac3fc32
AI
4258 switch (off) {
4259 case bpf_ctx_range(struct bpf_sock, src_ip4):
4260 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4261 bpf_ctx_record_field_size(info, size_default);
4262 return bpf_ctx_narrow_access_ok(off, size, size_default);
4263 }
4264
4265 return size == size_default;
4266}
4267
4268static bool sock_filter_is_valid_access(int off, int size,
4269 enum bpf_access_type type,
4270 const struct bpf_prog *prog,
4271 struct bpf_insn_access_aux *info)
4272{
4273 if (off < 0 || off >= sizeof(struct bpf_sock))
61023658 4274 return false;
61023658
DA
4275 if (off % size != 0)
4276 return false;
aac3fc32
AI
4277 if (!__sock_filter_check_attach_type(off, type,
4278 prog->expected_attach_type))
4279 return false;
4280 if (!__sock_filter_check_size(off, size, info))
61023658 4281 return false;
61023658
DA
4282 return true;
4283}
4284
047b0ecd
DB
4285static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
4286 const struct bpf_prog *prog, int drop_verdict)
36bbef52
DB
4287{
4288 struct bpf_insn *insn = insn_buf;
4289
4290 if (!direct_write)
4291 return 0;
4292
4293 /* if (!skb->cloned)
4294 * goto start;
4295 *
4296 * (Fast-path, otherwise approximation that we might be
4297 * a clone, do the rest in helper.)
4298 */
4299 *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
4300 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
4301 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
4302
4303 /* ret = bpf_skb_pull_data(skb, 0); */
4304 *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
4305 *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
4306 *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
4307 BPF_FUNC_skb_pull_data);
4308 /* if (!ret)
4309 * goto restore;
4310 * return TC_ACT_SHOT;
4311 */
4312 *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
047b0ecd 4313 *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
36bbef52
DB
4314 *insn++ = BPF_EXIT_INSN();
4315
4316 /* restore: */
4317 *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
4318 /* start: */
4319 *insn++ = prog->insnsi[0];
4320
4321 return insn - insn_buf;
4322}
4323
047b0ecd
DB
4324static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
4325 const struct bpf_prog *prog)
4326{
4327 return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
4328}
4329
d691f9e8 4330static bool tc_cls_act_is_valid_access(int off, int size,
19de99f7 4331 enum bpf_access_type type,
5e43f899 4332 const struct bpf_prog *prog,
23994631 4333 struct bpf_insn_access_aux *info)
d691f9e8
AS
4334{
4335 if (type == BPF_WRITE) {
4336 switch (off) {
f96da094
DB
4337 case bpf_ctx_range(struct __sk_buff, mark):
4338 case bpf_ctx_range(struct __sk_buff, tc_index):
4339 case bpf_ctx_range(struct __sk_buff, priority):
4340 case bpf_ctx_range(struct __sk_buff, tc_classid):
4341 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
d691f9e8
AS
4342 break;
4343 default:
4344 return false;
4345 }
4346 }
19de99f7 4347
f96da094
DB
4348 switch (off) {
4349 case bpf_ctx_range(struct __sk_buff, data):
4350 info->reg_type = PTR_TO_PACKET;
4351 break;
de8f3a83
DB
4352 case bpf_ctx_range(struct __sk_buff, data_meta):
4353 info->reg_type = PTR_TO_PACKET_META;
4354 break;
f96da094
DB
4355 case bpf_ctx_range(struct __sk_buff, data_end):
4356 info->reg_type = PTR_TO_PACKET_END;
4357 break;
8a31db56
JF
4358 case bpf_ctx_range_till(struct __sk_buff, family, local_port):
4359 return false;
f96da094
DB
4360 }
4361
5e43f899 4362 return bpf_skb_is_valid_access(off, size, type, prog, info);
d691f9e8
AS
4363}
4364
1afaf661 4365static bool __is_valid_xdp_access(int off, int size)
6a773a15
BB
4366{
4367 if (off < 0 || off >= sizeof(struct xdp_md))
4368 return false;
4369 if (off % size != 0)
4370 return false;
6088b582 4371 if (size != sizeof(__u32))
6a773a15
BB
4372 return false;
4373
4374 return true;
4375}
4376
4377static bool xdp_is_valid_access(int off, int size,
4378 enum bpf_access_type type,
5e43f899 4379 const struct bpf_prog *prog,
23994631 4380 struct bpf_insn_access_aux *info)
6a773a15
BB
4381{
4382 if (type == BPF_WRITE)
4383 return false;
4384
4385 switch (off) {
4386 case offsetof(struct xdp_md, data):
23994631 4387 info->reg_type = PTR_TO_PACKET;
6a773a15 4388 break;
de8f3a83
DB
4389 case offsetof(struct xdp_md, data_meta):
4390 info->reg_type = PTR_TO_PACKET_META;
4391 break;
6a773a15 4392 case offsetof(struct xdp_md, data_end):
23994631 4393 info->reg_type = PTR_TO_PACKET_END;
6a773a15
BB
4394 break;
4395 }
4396
1afaf661 4397 return __is_valid_xdp_access(off, size);
6a773a15
BB
4398}
4399
4400void bpf_warn_invalid_xdp_action(u32 act)
4401{
9beb8bed
DB
4402 const u32 act_max = XDP_REDIRECT;
4403
4404 WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
4405 act > act_max ? "Illegal" : "Driver unsupported",
4406 act);
6a773a15
BB
4407}
4408EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
4409
4fbac77d
AI
4410static bool sock_addr_is_valid_access(int off, int size,
4411 enum bpf_access_type type,
4412 const struct bpf_prog *prog,
4413 struct bpf_insn_access_aux *info)
4414{
4415 const int size_default = sizeof(__u32);
4416
4417 if (off < 0 || off >= sizeof(struct bpf_sock_addr))
4418 return false;
4419 if (off % size != 0)
4420 return false;
4421
4422 /* Disallow access to IPv6 fields from IPv4 contex and vise
4423 * versa.
4424 */
4425 switch (off) {
4426 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
4427 switch (prog->expected_attach_type) {
4428 case BPF_CGROUP_INET4_BIND:
d74bad4e 4429 case BPF_CGROUP_INET4_CONNECT:
4fbac77d
AI
4430 break;
4431 default:
4432 return false;
4433 }
4434 break;
4435 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
4436 switch (prog->expected_attach_type) {
4437 case BPF_CGROUP_INET6_BIND:
d74bad4e 4438 case BPF_CGROUP_INET6_CONNECT:
4fbac77d
AI
4439 break;
4440 default:
4441 return false;
4442 }
4443 break;
4444 }
4445
4446 switch (off) {
4447 case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
4448 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
4449 /* Only narrow read access allowed for now. */
4450 if (type == BPF_READ) {
4451 bpf_ctx_record_field_size(info, size_default);
4452 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
4453 return false;
4454 } else {
4455 if (size != size_default)
4456 return false;
4457 }
4458 break;
4459 case bpf_ctx_range(struct bpf_sock_addr, user_port):
4460 if (size != size_default)
4461 return false;
4462 break;
4463 default:
4464 if (type == BPF_READ) {
4465 if (size != size_default)
4466 return false;
4467 } else {
4468 return false;
4469 }
4470 }
4471
4472 return true;
4473}
4474
44f0e430
LB
4475static bool sock_ops_is_valid_access(int off, int size,
4476 enum bpf_access_type type,
5e43f899 4477 const struct bpf_prog *prog,
44f0e430 4478 struct bpf_insn_access_aux *info)
40304b2a 4479{
44f0e430
LB
4480 const int size_default = sizeof(__u32);
4481
40304b2a
LB
4482 if (off < 0 || off >= sizeof(struct bpf_sock_ops))
4483 return false;
44f0e430 4484
40304b2a
LB
4485 /* The verifier guarantees that size > 0. */
4486 if (off % size != 0)
4487 return false;
40304b2a 4488
40304b2a
LB
4489 if (type == BPF_WRITE) {
4490 switch (off) {
2585cd62 4491 case offsetof(struct bpf_sock_ops, reply):
6f9bd3d7 4492 case offsetof(struct bpf_sock_ops, sk_txhash):
44f0e430
LB
4493 if (size != size_default)
4494 return false;
40304b2a
LB
4495 break;
4496 default:
4497 return false;
4498 }
44f0e430
LB
4499 } else {
4500 switch (off) {
4501 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
4502 bytes_acked):
4503 if (size != sizeof(__u64))
4504 return false;
4505 break;
4506 default:
4507 if (size != size_default)
4508 return false;
4509 break;
4510 }
40304b2a
LB
4511 }
4512
44f0e430 4513 return true;
40304b2a
LB
4514}
4515
8a31db56
JF
4516static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
4517 const struct bpf_prog *prog)
4518{
047b0ecd 4519 return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
8a31db56
JF
4520}
4521
b005fd18
JF
4522static bool sk_skb_is_valid_access(int off, int size,
4523 enum bpf_access_type type,
5e43f899 4524 const struct bpf_prog *prog,
b005fd18
JF
4525 struct bpf_insn_access_aux *info)
4526{
de8f3a83
DB
4527 switch (off) {
4528 case bpf_ctx_range(struct __sk_buff, tc_classid):
4529 case bpf_ctx_range(struct __sk_buff, data_meta):
4530 return false;
4531 }
4532
8a31db56
JF
4533 if (type == BPF_WRITE) {
4534 switch (off) {
8a31db56
JF
4535 case bpf_ctx_range(struct __sk_buff, tc_index):
4536 case bpf_ctx_range(struct __sk_buff, priority):
4537 break;
4538 default:
4539 return false;
4540 }
4541 }
4542
b005fd18 4543 switch (off) {
f7e9cb1e 4544 case bpf_ctx_range(struct __sk_buff, mark):
8a31db56 4545 return false;
b005fd18
JF
4546 case bpf_ctx_range(struct __sk_buff, data):
4547 info->reg_type = PTR_TO_PACKET;
4548 break;
4549 case bpf_ctx_range(struct __sk_buff, data_end):
4550 info->reg_type = PTR_TO_PACKET_END;
4551 break;
4552 }
4553
5e43f899 4554 return bpf_skb_is_valid_access(off, size, type, prog, info);
b005fd18
JF
4555}
4556
4f738adb
JF
4557static bool sk_msg_is_valid_access(int off, int size,
4558 enum bpf_access_type type,
5e43f899 4559 const struct bpf_prog *prog,
4f738adb
JF
4560 struct bpf_insn_access_aux *info)
4561{
4562 if (type == BPF_WRITE)
4563 return false;
4564
4565 switch (off) {
4566 case offsetof(struct sk_msg_md, data):
4567 info->reg_type = PTR_TO_PACKET;
4568 break;
4569 case offsetof(struct sk_msg_md, data_end):
4570 info->reg_type = PTR_TO_PACKET_END;
4571 break;
4572 }
4573
4574 if (off < 0 || off >= sizeof(struct sk_msg_md))
4575 return false;
4576 if (off % size != 0)
4577 return false;
4578 if (size != sizeof(__u64))
4579 return false;
4580
4581 return true;
4582}
4583
2492d3b8
DB
4584static u32 bpf_convert_ctx_access(enum bpf_access_type type,
4585 const struct bpf_insn *si,
4586 struct bpf_insn *insn_buf,
f96da094 4587 struct bpf_prog *prog, u32 *target_size)
9bac3d6d
AS
4588{
4589 struct bpf_insn *insn = insn_buf;
6b8cc1d1 4590 int off;
9bac3d6d 4591
6b8cc1d1 4592 switch (si->off) {
9bac3d6d 4593 case offsetof(struct __sk_buff, len):
6b8cc1d1 4594 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4595 bpf_target_off(struct sk_buff, len, 4,
4596 target_size));
9bac3d6d
AS
4597 break;
4598
0b8c707d 4599 case offsetof(struct __sk_buff, protocol):
6b8cc1d1 4600 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
4601 bpf_target_off(struct sk_buff, protocol, 2,
4602 target_size));
0b8c707d
DB
4603 break;
4604
27cd5452 4605 case offsetof(struct __sk_buff, vlan_proto):
6b8cc1d1 4606 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
4607 bpf_target_off(struct sk_buff, vlan_proto, 2,
4608 target_size));
27cd5452
MS
4609 break;
4610
bcad5718 4611 case offsetof(struct __sk_buff, priority):
754f1e6a 4612 if (type == BPF_WRITE)
6b8cc1d1 4613 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4614 bpf_target_off(struct sk_buff, priority, 4,
4615 target_size));
754f1e6a 4616 else
6b8cc1d1 4617 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4618 bpf_target_off(struct sk_buff, priority, 4,
4619 target_size));
bcad5718
DB
4620 break;
4621
37e82c2f 4622 case offsetof(struct __sk_buff, ingress_ifindex):
6b8cc1d1 4623 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4624 bpf_target_off(struct sk_buff, skb_iif, 4,
4625 target_size));
37e82c2f
AS
4626 break;
4627
4628 case offsetof(struct __sk_buff, ifindex):
f035a515 4629 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 4630 si->dst_reg, si->src_reg,
37e82c2f 4631 offsetof(struct sk_buff, dev));
6b8cc1d1
DB
4632 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
4633 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
4634 bpf_target_off(struct net_device, ifindex, 4,
4635 target_size));
37e82c2f
AS
4636 break;
4637
ba7591d8 4638 case offsetof(struct __sk_buff, hash):
6b8cc1d1 4639 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4640 bpf_target_off(struct sk_buff, hash, 4,
4641 target_size));
ba7591d8
DB
4642 break;
4643
9bac3d6d 4644 case offsetof(struct __sk_buff, mark):
d691f9e8 4645 if (type == BPF_WRITE)
6b8cc1d1 4646 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4647 bpf_target_off(struct sk_buff, mark, 4,
4648 target_size));
d691f9e8 4649 else
6b8cc1d1 4650 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4651 bpf_target_off(struct sk_buff, mark, 4,
4652 target_size));
d691f9e8 4653 break;
9bac3d6d
AS
4654
4655 case offsetof(struct __sk_buff, pkt_type):
f96da094
DB
4656 *target_size = 1;
4657 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
4658 PKT_TYPE_OFFSET());
4659 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
4660#ifdef __BIG_ENDIAN_BITFIELD
4661 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
4662#endif
4663 break;
9bac3d6d
AS
4664
4665 case offsetof(struct __sk_buff, queue_mapping):
f96da094
DB
4666 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4667 bpf_target_off(struct sk_buff, queue_mapping, 2,
4668 target_size));
4669 break;
c2497395 4670
c2497395 4671 case offsetof(struct __sk_buff, vlan_present):
c2497395 4672 case offsetof(struct __sk_buff, vlan_tci):
f96da094
DB
4673 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
4674
4675 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
4676 bpf_target_off(struct sk_buff, vlan_tci, 2,
4677 target_size));
4678 if (si->off == offsetof(struct __sk_buff, vlan_tci)) {
4679 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg,
4680 ~VLAN_TAG_PRESENT);
4681 } else {
4682 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12);
4683 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
4684 }
4685 break;
d691f9e8
AS
4686
4687 case offsetof(struct __sk_buff, cb[0]) ...
f96da094 4688 offsetofend(struct __sk_buff, cb[4]) - 1:
d691f9e8 4689 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
62c7989b
DB
4690 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
4691 offsetof(struct qdisc_skb_cb, data)) %
4692 sizeof(__u64));
d691f9e8 4693
ff936a04 4694 prog->cb_access = 1;
6b8cc1d1
DB
4695 off = si->off;
4696 off -= offsetof(struct __sk_buff, cb[0]);
4697 off += offsetof(struct sk_buff, cb);
4698 off += offsetof(struct qdisc_skb_cb, data);
d691f9e8 4699 if (type == BPF_WRITE)
62c7989b 4700 *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 4701 si->src_reg, off);
d691f9e8 4702 else
62c7989b 4703 *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
6b8cc1d1 4704 si->src_reg, off);
d691f9e8
AS
4705 break;
4706
045efa82 4707 case offsetof(struct __sk_buff, tc_classid):
6b8cc1d1
DB
4708 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
4709
4710 off = si->off;
4711 off -= offsetof(struct __sk_buff, tc_classid);
4712 off += offsetof(struct sk_buff, cb);
4713 off += offsetof(struct qdisc_skb_cb, tc_classid);
f96da094 4714 *target_size = 2;
09c37a2c 4715 if (type == BPF_WRITE)
6b8cc1d1
DB
4716 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
4717 si->src_reg, off);
09c37a2c 4718 else
6b8cc1d1
DB
4719 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
4720 si->src_reg, off);
045efa82
DB
4721 break;
4722
db58ba45 4723 case offsetof(struct __sk_buff, data):
f035a515 4724 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
6b8cc1d1 4725 si->dst_reg, si->src_reg,
db58ba45
AS
4726 offsetof(struct sk_buff, data));
4727 break;
4728
de8f3a83
DB
4729 case offsetof(struct __sk_buff, data_meta):
4730 off = si->off;
4731 off -= offsetof(struct __sk_buff, data_meta);
4732 off += offsetof(struct sk_buff, cb);
4733 off += offsetof(struct bpf_skb_data_end, data_meta);
4734 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
4735 si->src_reg, off);
4736 break;
4737
db58ba45 4738 case offsetof(struct __sk_buff, data_end):
6b8cc1d1
DB
4739 off = si->off;
4740 off -= offsetof(struct __sk_buff, data_end);
4741 off += offsetof(struct sk_buff, cb);
4742 off += offsetof(struct bpf_skb_data_end, data_end);
4743 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
4744 si->src_reg, off);
db58ba45
AS
4745 break;
4746
d691f9e8
AS
4747 case offsetof(struct __sk_buff, tc_index):
4748#ifdef CONFIG_NET_SCHED
d691f9e8 4749 if (type == BPF_WRITE)
6b8cc1d1 4750 *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
4751 bpf_target_off(struct sk_buff, tc_index, 2,
4752 target_size));
d691f9e8 4753 else
6b8cc1d1 4754 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
f96da094
DB
4755 bpf_target_off(struct sk_buff, tc_index, 2,
4756 target_size));
d691f9e8 4757#else
2ed46ce4 4758 *target_size = 2;
d691f9e8 4759 if (type == BPF_WRITE)
6b8cc1d1 4760 *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
d691f9e8 4761 else
6b8cc1d1 4762 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
b1d9fc41
DB
4763#endif
4764 break;
4765
4766 case offsetof(struct __sk_buff, napi_id):
4767#if defined(CONFIG_NET_RX_BUSY_POLL)
b1d9fc41 4768 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
f96da094
DB
4769 bpf_target_off(struct sk_buff, napi_id, 4,
4770 target_size));
b1d9fc41
DB
4771 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
4772 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
4773#else
2ed46ce4 4774 *target_size = 4;
b1d9fc41 4775 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
d691f9e8 4776#endif
6b8cc1d1 4777 break;
8a31db56
JF
4778 case offsetof(struct __sk_buff, family):
4779 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
4780
4781 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4782 si->dst_reg, si->src_reg,
4783 offsetof(struct sk_buff, sk));
4784 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
4785 bpf_target_off(struct sock_common,
4786 skc_family,
4787 2, target_size));
4788 break;
4789 case offsetof(struct __sk_buff, remote_ip4):
4790 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
4791
4792 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4793 si->dst_reg, si->src_reg,
4794 offsetof(struct sk_buff, sk));
4795 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4796 bpf_target_off(struct sock_common,
4797 skc_daddr,
4798 4, target_size));
4799 break;
4800 case offsetof(struct __sk_buff, local_ip4):
4801 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
4802 skc_rcv_saddr) != 4);
4803
4804 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4805 si->dst_reg, si->src_reg,
4806 offsetof(struct sk_buff, sk));
4807 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4808 bpf_target_off(struct sock_common,
4809 skc_rcv_saddr,
4810 4, target_size));
4811 break;
4812 case offsetof(struct __sk_buff, remote_ip6[0]) ...
4813 offsetof(struct __sk_buff, remote_ip6[3]):
4814#if IS_ENABLED(CONFIG_IPV6)
4815 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
4816 skc_v6_daddr.s6_addr32[0]) != 4);
4817
4818 off = si->off;
4819 off -= offsetof(struct __sk_buff, remote_ip6[0]);
4820
4821 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4822 si->dst_reg, si->src_reg,
4823 offsetof(struct sk_buff, sk));
4824 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4825 offsetof(struct sock_common,
4826 skc_v6_daddr.s6_addr32[0]) +
4827 off);
4828#else
4829 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
4830#endif
4831 break;
4832 case offsetof(struct __sk_buff, local_ip6[0]) ...
4833 offsetof(struct __sk_buff, local_ip6[3]):
4834#if IS_ENABLED(CONFIG_IPV6)
4835 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
4836 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
4837
4838 off = si->off;
4839 off -= offsetof(struct __sk_buff, local_ip6[0]);
4840
4841 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4842 si->dst_reg, si->src_reg,
4843 offsetof(struct sk_buff, sk));
4844 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
4845 offsetof(struct sock_common,
4846 skc_v6_rcv_saddr.s6_addr32[0]) +
4847 off);
4848#else
4849 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
4850#endif
4851 break;
4852
4853 case offsetof(struct __sk_buff, remote_port):
4854 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
4855
4856 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4857 si->dst_reg, si->src_reg,
4858 offsetof(struct sk_buff, sk));
4859 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
4860 bpf_target_off(struct sock_common,
4861 skc_dport,
4862 2, target_size));
4863#ifndef __BIG_ENDIAN_BITFIELD
4864 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
4865#endif
4866 break;
4867
4868 case offsetof(struct __sk_buff, local_port):
4869 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
4870
4871 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
4872 si->dst_reg, si->src_reg,
4873 offsetof(struct sk_buff, sk));
4874 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
4875 bpf_target_off(struct sock_common,
4876 skc_num, 2, target_size));
4877 break;
9bac3d6d
AS
4878 }
4879
4880 return insn - insn_buf;
89aa0758
AS
4881}
4882
61023658 4883static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
6b8cc1d1 4884 const struct bpf_insn *si,
61023658 4885 struct bpf_insn *insn_buf,
f96da094 4886 struct bpf_prog *prog, u32 *target_size)
61023658
DA
4887{
4888 struct bpf_insn *insn = insn_buf;
aac3fc32 4889 int off;
61023658 4890
6b8cc1d1 4891 switch (si->off) {
61023658
DA
4892 case offsetof(struct bpf_sock, bound_dev_if):
4893 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
4894
4895 if (type == BPF_WRITE)
6b8cc1d1 4896 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
4897 offsetof(struct sock, sk_bound_dev_if));
4898 else
6b8cc1d1 4899 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
61023658
DA
4900 offsetof(struct sock, sk_bound_dev_if));
4901 break;
aa4c1037 4902
482dca93
DA
4903 case offsetof(struct bpf_sock, mark):
4904 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
4905
4906 if (type == BPF_WRITE)
4907 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4908 offsetof(struct sock, sk_mark));
4909 else
4910 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4911 offsetof(struct sock, sk_mark));
4912 break;
4913
4914 case offsetof(struct bpf_sock, priority):
4915 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
4916
4917 if (type == BPF_WRITE)
4918 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
4919 offsetof(struct sock, sk_priority));
4920 else
4921 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
4922 offsetof(struct sock, sk_priority));
4923 break;
4924
aa4c1037
DA
4925 case offsetof(struct bpf_sock, family):
4926 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
4927
6b8cc1d1 4928 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
aa4c1037
DA
4929 offsetof(struct sock, sk_family));
4930 break;
4931
4932 case offsetof(struct bpf_sock, type):
6b8cc1d1 4933 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 4934 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
4935 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
4936 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
aa4c1037
DA
4937 break;
4938
4939 case offsetof(struct bpf_sock, protocol):
6b8cc1d1 4940 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
aa4c1037 4941 offsetof(struct sock, __sk_flags_offset));
6b8cc1d1
DB
4942 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
4943 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
aa4c1037 4944 break;
aac3fc32
AI
4945
4946 case offsetof(struct bpf_sock, src_ip4):
4947 *insn++ = BPF_LDX_MEM(
4948 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
4949 bpf_target_off(struct sock_common, skc_rcv_saddr,
4950 FIELD_SIZEOF(struct sock_common,
4951 skc_rcv_saddr),
4952 target_size));
4953 break;
4954
4955 case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
4956#if IS_ENABLED(CONFIG_IPV6)
4957 off = si->off;
4958 off -= offsetof(struct bpf_sock, src_ip6[0]);
4959 *insn++ = BPF_LDX_MEM(
4960 BPF_SIZE(si->code), si->dst_reg, si->src_reg,
4961 bpf_target_off(
4962 struct sock_common,
4963 skc_v6_rcv_saddr.s6_addr32[0],
4964 FIELD_SIZEOF(struct sock_common,
4965 skc_v6_rcv_saddr.s6_addr32[0]),
4966 target_size) + off);
4967#else
4968 (void)off;
4969 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
4970#endif
4971 break;
4972
4973 case offsetof(struct bpf_sock, src_port):
4974 *insn++ = BPF_LDX_MEM(
4975 BPF_FIELD_SIZEOF(struct sock_common, skc_num),
4976 si->dst_reg, si->src_reg,
4977 bpf_target_off(struct sock_common, skc_num,
4978 FIELD_SIZEOF(struct sock_common,
4979 skc_num),
4980 target_size));
4981 break;
61023658
DA
4982 }
4983
4984 return insn - insn_buf;
4985}
4986
6b8cc1d1
DB
4987static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
4988 const struct bpf_insn *si,
374fb54e 4989 struct bpf_insn *insn_buf,
f96da094 4990 struct bpf_prog *prog, u32 *target_size)
374fb54e
DB
4991{
4992 struct bpf_insn *insn = insn_buf;
4993
6b8cc1d1 4994 switch (si->off) {
374fb54e 4995 case offsetof(struct __sk_buff, ifindex):
374fb54e 4996 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
6b8cc1d1 4997 si->dst_reg, si->src_reg,
374fb54e 4998 offsetof(struct sk_buff, dev));
6b8cc1d1 4999 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
f96da094
DB
5000 bpf_target_off(struct net_device, ifindex, 4,
5001 target_size));
374fb54e
DB
5002 break;
5003 default:
f96da094
DB
5004 return bpf_convert_ctx_access(type, si, insn_buf, prog,
5005 target_size);
374fb54e
DB
5006 }
5007
5008 return insn - insn_buf;
5009}
5010
6b8cc1d1
DB
5011static u32 xdp_convert_ctx_access(enum bpf_access_type type,
5012 const struct bpf_insn *si,
6a773a15 5013 struct bpf_insn *insn_buf,
f96da094 5014 struct bpf_prog *prog, u32 *target_size)
6a773a15
BB
5015{
5016 struct bpf_insn *insn = insn_buf;
5017
6b8cc1d1 5018 switch (si->off) {
6a773a15 5019 case offsetof(struct xdp_md, data):
f035a515 5020 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
6b8cc1d1 5021 si->dst_reg, si->src_reg,
6a773a15
BB
5022 offsetof(struct xdp_buff, data));
5023 break;
de8f3a83
DB
5024 case offsetof(struct xdp_md, data_meta):
5025 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
5026 si->dst_reg, si->src_reg,
5027 offsetof(struct xdp_buff, data_meta));
5028 break;
6a773a15 5029 case offsetof(struct xdp_md, data_end):
f035a515 5030 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
6b8cc1d1 5031 si->dst_reg, si->src_reg,
6a773a15
BB
5032 offsetof(struct xdp_buff, data_end));
5033 break;
02dd3291
JDB
5034 case offsetof(struct xdp_md, ingress_ifindex):
5035 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5036 si->dst_reg, si->src_reg,
5037 offsetof(struct xdp_buff, rxq));
5038 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
5039 si->dst_reg, si->dst_reg,
5040 offsetof(struct xdp_rxq_info, dev));
5041 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6 5042 offsetof(struct net_device, ifindex));
02dd3291
JDB
5043 break;
5044 case offsetof(struct xdp_md, rx_queue_index):
5045 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
5046 si->dst_reg, si->src_reg,
5047 offsetof(struct xdp_buff, rxq));
5048 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
daaf24c6
JDB
5049 offsetof(struct xdp_rxq_info,
5050 queue_index));
02dd3291 5051 break;
6a773a15
BB
5052 }
5053
5054 return insn - insn_buf;
5055}
5056
4fbac77d
AI
5057/* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
5058 * context Structure, F is Field in context structure that contains a pointer
5059 * to Nested Structure of type NS that has the field NF.
5060 *
5061 * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
5062 * sure that SIZE is not greater than actual size of S.F.NF.
5063 *
5064 * If offset OFF is provided, the load happens from that offset relative to
5065 * offset of NF.
5066 */
5067#define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \
5068 do { \
5069 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \
5070 si->src_reg, offsetof(S, F)); \
5071 *insn++ = BPF_LDX_MEM( \
5072 SIZE, si->dst_reg, si->dst_reg, \
5073 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
5074 target_size) \
5075 + OFF); \
5076 } while (0)
5077
5078#define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \
5079 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \
5080 BPF_FIELD_SIZEOF(NS, NF), 0)
5081
5082/* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
5083 * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
5084 *
5085 * It doesn't support SIZE argument though since narrow stores are not
5086 * supported for now.
5087 *
5088 * In addition it uses Temporary Field TF (member of struct S) as the 3rd
5089 * "register" since two registers available in convert_ctx_access are not
5090 * enough: we can't override neither SRC, since it contains value to store, nor
5091 * DST since it contains pointer to context that may be used by later
5092 * instructions. But we need a temporary place to save pointer to nested
5093 * structure whose field we want to store to.
5094 */
5095#define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \
5096 do { \
5097 int tmp_reg = BPF_REG_9; \
5098 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
5099 --tmp_reg; \
5100 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \
5101 --tmp_reg; \
5102 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \
5103 offsetof(S, TF)); \
5104 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \
5105 si->dst_reg, offsetof(S, F)); \
5106 *insn++ = BPF_STX_MEM( \
5107 BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \
5108 bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \
5109 target_size) \
5110 + OFF); \
5111 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \
5112 offsetof(S, TF)); \
5113 } while (0)
5114
5115#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
5116 TF) \
5117 do { \
5118 if (type == BPF_WRITE) { \
5119 SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \
5120 TF); \
5121 } else { \
5122 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \
5123 S, NS, F, NF, SIZE, OFF); \
5124 } \
5125 } while (0)
5126
5127#define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \
5128 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \
5129 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
5130
5131static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
5132 const struct bpf_insn *si,
5133 struct bpf_insn *insn_buf,
5134 struct bpf_prog *prog, u32 *target_size)
5135{
5136 struct bpf_insn *insn = insn_buf;
5137 int off;
5138
5139 switch (si->off) {
5140 case offsetof(struct bpf_sock_addr, user_family):
5141 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
5142 struct sockaddr, uaddr, sa_family);
5143 break;
5144
5145 case offsetof(struct bpf_sock_addr, user_ip4):
5146 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
5147 struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
5148 sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
5149 break;
5150
5151 case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
5152 off = si->off;
5153 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
5154 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
5155 struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
5156 sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
5157 tmp_reg);
5158 break;
5159
5160 case offsetof(struct bpf_sock_addr, user_port):
5161 /* To get port we need to know sa_family first and then treat
5162 * sockaddr as either sockaddr_in or sockaddr_in6.
5163 * Though we can simplify since port field has same offset and
5164 * size in both structures.
5165 * Here we check this invariant and use just one of the
5166 * structures if it's true.
5167 */
5168 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
5169 offsetof(struct sockaddr_in6, sin6_port));
5170 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
5171 FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
5172 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
5173 struct sockaddr_in6, uaddr,
5174 sin6_port, tmp_reg);
5175 break;
5176
5177 case offsetof(struct bpf_sock_addr, family):
5178 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
5179 struct sock, sk, sk_family);
5180 break;
5181
5182 case offsetof(struct bpf_sock_addr, type):
5183 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
5184 struct bpf_sock_addr_kern, struct sock, sk,
5185 __sk_flags_offset, BPF_W, 0);
5186 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
5187 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
5188 break;
5189
5190 case offsetof(struct bpf_sock_addr, protocol):
5191 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
5192 struct bpf_sock_addr_kern, struct sock, sk,
5193 __sk_flags_offset, BPF_W, 0);
5194 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
5195 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
5196 SK_FL_PROTO_SHIFT);
5197 break;
5198 }
5199
5200 return insn - insn_buf;
5201}
5202
40304b2a
LB
5203static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
5204 const struct bpf_insn *si,
5205 struct bpf_insn *insn_buf,
f96da094
DB
5206 struct bpf_prog *prog,
5207 u32 *target_size)
40304b2a
LB
5208{
5209 struct bpf_insn *insn = insn_buf;
5210 int off;
5211
5212 switch (si->off) {
5213 case offsetof(struct bpf_sock_ops, op) ...
5214 offsetof(struct bpf_sock_ops, replylong[3]):
5215 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
5216 FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
5217 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
5218 FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
5219 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
5220 FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
5221 off = si->off;
5222 off -= offsetof(struct bpf_sock_ops, op);
5223 off += offsetof(struct bpf_sock_ops_kern, op);
5224 if (type == BPF_WRITE)
5225 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
5226 off);
5227 else
5228 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5229 off);
5230 break;
5231
5232 case offsetof(struct bpf_sock_ops, family):
5233 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
5234
5235 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5236 struct bpf_sock_ops_kern, sk),
5237 si->dst_reg, si->src_reg,
5238 offsetof(struct bpf_sock_ops_kern, sk));
5239 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5240 offsetof(struct sock_common, skc_family));
5241 break;
5242
5243 case offsetof(struct bpf_sock_ops, remote_ip4):
5244 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
5245
5246 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5247 struct bpf_sock_ops_kern, sk),
5248 si->dst_reg, si->src_reg,
5249 offsetof(struct bpf_sock_ops_kern, sk));
5250 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5251 offsetof(struct sock_common, skc_daddr));
5252 break;
5253
5254 case offsetof(struct bpf_sock_ops, local_ip4):
5255 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4);
5256
5257 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5258 struct bpf_sock_ops_kern, sk),
5259 si->dst_reg, si->src_reg,
5260 offsetof(struct bpf_sock_ops_kern, sk));
5261 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5262 offsetof(struct sock_common,
5263 skc_rcv_saddr));
5264 break;
5265
5266 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
5267 offsetof(struct bpf_sock_ops, remote_ip6[3]):
5268#if IS_ENABLED(CONFIG_IPV6)
5269 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5270 skc_v6_daddr.s6_addr32[0]) != 4);
5271
5272 off = si->off;
5273 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
5274 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5275 struct bpf_sock_ops_kern, sk),
5276 si->dst_reg, si->src_reg,
5277 offsetof(struct bpf_sock_ops_kern, sk));
5278 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5279 offsetof(struct sock_common,
5280 skc_v6_daddr.s6_addr32[0]) +
5281 off);
5282#else
5283 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5284#endif
5285 break;
5286
5287 case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
5288 offsetof(struct bpf_sock_ops, local_ip6[3]):
5289#if IS_ENABLED(CONFIG_IPV6)
5290 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
5291 skc_v6_rcv_saddr.s6_addr32[0]) != 4);
5292
5293 off = si->off;
5294 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
5295 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5296 struct bpf_sock_ops_kern, sk),
5297 si->dst_reg, si->src_reg,
5298 offsetof(struct bpf_sock_ops_kern, sk));
5299 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5300 offsetof(struct sock_common,
5301 skc_v6_rcv_saddr.s6_addr32[0]) +
5302 off);
5303#else
5304 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
5305#endif
5306 break;
5307
5308 case offsetof(struct bpf_sock_ops, remote_port):
5309 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
5310
5311 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5312 struct bpf_sock_ops_kern, sk),
5313 si->dst_reg, si->src_reg,
5314 offsetof(struct bpf_sock_ops_kern, sk));
5315 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5316 offsetof(struct sock_common, skc_dport));
5317#ifndef __BIG_ENDIAN_BITFIELD
5318 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
5319#endif
5320 break;
5321
5322 case offsetof(struct bpf_sock_ops, local_port):
5323 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
5324
5325 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5326 struct bpf_sock_ops_kern, sk),
5327 si->dst_reg, si->src_reg,
5328 offsetof(struct bpf_sock_ops_kern, sk));
5329 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
5330 offsetof(struct sock_common, skc_num));
5331 break;
f19397a5
LB
5332
5333 case offsetof(struct bpf_sock_ops, is_fullsock):
5334 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5335 struct bpf_sock_ops_kern,
5336 is_fullsock),
5337 si->dst_reg, si->src_reg,
5338 offsetof(struct bpf_sock_ops_kern,
5339 is_fullsock));
5340 break;
5341
44f0e430
LB
5342 case offsetof(struct bpf_sock_ops, state):
5343 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
5344
5345 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5346 struct bpf_sock_ops_kern, sk),
5347 si->dst_reg, si->src_reg,
5348 offsetof(struct bpf_sock_ops_kern, sk));
5349 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
5350 offsetof(struct sock_common, skc_state));
5351 break;
5352
5353 case offsetof(struct bpf_sock_ops, rtt_min):
5354 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5355 sizeof(struct minmax));
5356 BUILD_BUG_ON(sizeof(struct minmax) <
5357 sizeof(struct minmax_sample));
5358
5359 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
5360 struct bpf_sock_ops_kern, sk),
5361 si->dst_reg, si->src_reg,
5362 offsetof(struct bpf_sock_ops_kern, sk));
5363 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
5364 offsetof(struct tcp_sock, rtt_min) +
5365 FIELD_SIZEOF(struct minmax_sample, t));
5366 break;
5367
34d367c5
LB
5368/* Helper macro for adding read access to tcp_sock or sock fields. */
5369#define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
f19397a5 5370 do { \
34d367c5
LB
5371 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
5372 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
f19397a5
LB
5373 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5374 struct bpf_sock_ops_kern, \
5375 is_fullsock), \
5376 si->dst_reg, si->src_reg, \
5377 offsetof(struct bpf_sock_ops_kern, \
5378 is_fullsock)); \
5379 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \
5380 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5381 struct bpf_sock_ops_kern, sk),\
5382 si->dst_reg, si->src_reg, \
5383 offsetof(struct bpf_sock_ops_kern, sk));\
34d367c5
LB
5384 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \
5385 OBJ_FIELD), \
5386 si->dst_reg, si->dst_reg, \
5387 offsetof(OBJ, OBJ_FIELD)); \
f19397a5
LB
5388 } while (0)
5389
b73042b8
LB
5390/* Helper macro for adding write access to tcp_sock or sock fields.
5391 * The macro is called with two registers, dst_reg which contains a pointer
5392 * to ctx (context) and src_reg which contains the value that should be
5393 * stored. However, we need an additional register since we cannot overwrite
5394 * dst_reg because it may be used later in the program.
5395 * Instead we "borrow" one of the other register. We first save its value
5396 * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
5397 * it at the end of the macro.
5398 */
5399#define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \
5400 do { \
5401 int reg = BPF_REG_9; \
5402 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \
5403 FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \
5404 if (si->dst_reg == reg || si->src_reg == reg) \
5405 reg--; \
5406 if (si->dst_reg == reg || si->src_reg == reg) \
5407 reg--; \
5408 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \
5409 offsetof(struct bpf_sock_ops_kern, \
5410 temp)); \
5411 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5412 struct bpf_sock_ops_kern, \
5413 is_fullsock), \
5414 reg, si->dst_reg, \
5415 offsetof(struct bpf_sock_ops_kern, \
5416 is_fullsock)); \
5417 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \
5418 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
5419 struct bpf_sock_ops_kern, sk),\
5420 reg, si->dst_reg, \
5421 offsetof(struct bpf_sock_ops_kern, sk));\
5422 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \
5423 reg, si->src_reg, \
5424 offsetof(OBJ, OBJ_FIELD)); \
5425 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \
5426 offsetof(struct bpf_sock_ops_kern, \
5427 temp)); \
5428 } while (0)
5429
5430#define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \
5431 do { \
5432 if (TYPE == BPF_WRITE) \
5433 SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
5434 else \
5435 SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \
5436 } while (0)
5437
f19397a5 5438 case offsetof(struct bpf_sock_ops, snd_cwnd):
34d367c5 5439 SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock);
f19397a5
LB
5440 break;
5441
5442 case offsetof(struct bpf_sock_ops, srtt_us):
34d367c5 5443 SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock);
f19397a5 5444 break;
b13d8807
LB
5445
5446 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
5447 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
5448 struct tcp_sock);
5449 break;
44f0e430
LB
5450
5451 case offsetof(struct bpf_sock_ops, snd_ssthresh):
5452 SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock);
5453 break;
5454
5455 case offsetof(struct bpf_sock_ops, rcv_nxt):
5456 SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock);
5457 break;
5458
5459 case offsetof(struct bpf_sock_ops, snd_nxt):
5460 SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock);
5461 break;
5462
5463 case offsetof(struct bpf_sock_ops, snd_una):
5464 SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock);
5465 break;
5466
5467 case offsetof(struct bpf_sock_ops, mss_cache):
5468 SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock);
5469 break;
5470
5471 case offsetof(struct bpf_sock_ops, ecn_flags):
5472 SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock);
5473 break;
5474
5475 case offsetof(struct bpf_sock_ops, rate_delivered):
5476 SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered,
5477 struct tcp_sock);
5478 break;
5479
5480 case offsetof(struct bpf_sock_ops, rate_interval_us):
5481 SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us,
5482 struct tcp_sock);
5483 break;
5484
5485 case offsetof(struct bpf_sock_ops, packets_out):
5486 SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock);
5487 break;
5488
5489 case offsetof(struct bpf_sock_ops, retrans_out):
5490 SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock);
5491 break;
5492
5493 case offsetof(struct bpf_sock_ops, total_retrans):
5494 SOCK_OPS_GET_FIELD(total_retrans, total_retrans,
5495 struct tcp_sock);
5496 break;
5497
5498 case offsetof(struct bpf_sock_ops, segs_in):
5499 SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock);
5500 break;
5501
5502 case offsetof(struct bpf_sock_ops, data_segs_in):
5503 SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock);
5504 break;
5505
5506 case offsetof(struct bpf_sock_ops, segs_out):
5507 SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock);
5508 break;
5509
5510 case offsetof(struct bpf_sock_ops, data_segs_out):
5511 SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out,
5512 struct tcp_sock);
5513 break;
5514
5515 case offsetof(struct bpf_sock_ops, lost_out):
5516 SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock);
5517 break;
5518
5519 case offsetof(struct bpf_sock_ops, sacked_out):
5520 SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock);
5521 break;
5522
5523 case offsetof(struct bpf_sock_ops, sk_txhash):
6f9bd3d7
LB
5524 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
5525 struct sock, type);
44f0e430
LB
5526 break;
5527
5528 case offsetof(struct bpf_sock_ops, bytes_received):
5529 SOCK_OPS_GET_FIELD(bytes_received, bytes_received,
5530 struct tcp_sock);
5531 break;
5532
5533 case offsetof(struct bpf_sock_ops, bytes_acked):
5534 SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock);
5535 break;
6f9bd3d7 5536
40304b2a
LB
5537 }
5538 return insn - insn_buf;
5539}
5540
8108a775
JF
5541static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
5542 const struct bpf_insn *si,
5543 struct bpf_insn *insn_buf,
5544 struct bpf_prog *prog, u32 *target_size)
5545{
5546 struct bpf_insn *insn = insn_buf;
5547 int off;
5548
5549 switch (si->off) {
5550 case offsetof(struct __sk_buff, data_end):
5551 off = si->off;
5552 off -= offsetof(struct __sk_buff, data_end);
5553 off += offsetof(struct sk_buff, cb);
5554 off += offsetof(struct tcp_skb_cb, bpf.data_end);
5555 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
5556 si->src_reg, off);
5557 break;
5558 default:
5559 return bpf_convert_ctx_access(type, si, insn_buf, prog,
5560 target_size);
5561 }
5562
5563 return insn - insn_buf;
5564}
5565
4f738adb
JF
5566static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
5567 const struct bpf_insn *si,
5568 struct bpf_insn *insn_buf,
5569 struct bpf_prog *prog, u32 *target_size)
5570{
5571 struct bpf_insn *insn = insn_buf;
5572
5573 switch (si->off) {
5574 case offsetof(struct sk_msg_md, data):
5575 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data),
5576 si->dst_reg, si->src_reg,
5577 offsetof(struct sk_msg_buff, data));
5578 break;
5579 case offsetof(struct sk_msg_md, data_end):
5580 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_buff, data_end),
5581 si->dst_reg, si->src_reg,
5582 offsetof(struct sk_msg_buff, data_end));
5583 break;
5584 }
5585
5586 return insn - insn_buf;
5587}
5588
7de16e3a 5589const struct bpf_verifier_ops sk_filter_verifier_ops = {
4936e352
DB
5590 .get_func_proto = sk_filter_func_proto,
5591 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 5592 .convert_ctx_access = bpf_convert_ctx_access,
89aa0758
AS
5593};
5594
7de16e3a 5595const struct bpf_prog_ops sk_filter_prog_ops = {
61f3c964 5596 .test_run = bpf_prog_test_run_skb,
7de16e3a
JK
5597};
5598
5599const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
4936e352
DB
5600 .get_func_proto = tc_cls_act_func_proto,
5601 .is_valid_access = tc_cls_act_is_valid_access,
374fb54e 5602 .convert_ctx_access = tc_cls_act_convert_ctx_access,
36bbef52 5603 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
5604};
5605
5606const struct bpf_prog_ops tc_cls_act_prog_ops = {
1cf1cae9 5607 .test_run = bpf_prog_test_run_skb,
608cd71a
AS
5608};
5609
7de16e3a 5610const struct bpf_verifier_ops xdp_verifier_ops = {
6a773a15
BB
5611 .get_func_proto = xdp_func_proto,
5612 .is_valid_access = xdp_is_valid_access,
5613 .convert_ctx_access = xdp_convert_ctx_access,
7de16e3a
JK
5614};
5615
5616const struct bpf_prog_ops xdp_prog_ops = {
1cf1cae9 5617 .test_run = bpf_prog_test_run_xdp,
6a773a15
BB
5618};
5619
7de16e3a 5620const struct bpf_verifier_ops cg_skb_verifier_ops = {
966789fb 5621 .get_func_proto = sk_filter_func_proto,
0e33661d 5622 .is_valid_access = sk_filter_is_valid_access,
2492d3b8 5623 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
5624};
5625
5626const struct bpf_prog_ops cg_skb_prog_ops = {
1cf1cae9 5627 .test_run = bpf_prog_test_run_skb,
0e33661d
DM
5628};
5629
7de16e3a 5630const struct bpf_verifier_ops lwt_inout_verifier_ops = {
3a0af8fd
TG
5631 .get_func_proto = lwt_inout_func_proto,
5632 .is_valid_access = lwt_is_valid_access,
2492d3b8 5633 .convert_ctx_access = bpf_convert_ctx_access,
7de16e3a
JK
5634};
5635
5636const struct bpf_prog_ops lwt_inout_prog_ops = {
1cf1cae9 5637 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
5638};
5639
7de16e3a 5640const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
3a0af8fd
TG
5641 .get_func_proto = lwt_xmit_func_proto,
5642 .is_valid_access = lwt_is_valid_access,
2492d3b8 5643 .convert_ctx_access = bpf_convert_ctx_access,
3a0af8fd 5644 .gen_prologue = tc_cls_act_prologue,
7de16e3a
JK
5645};
5646
5647const struct bpf_prog_ops lwt_xmit_prog_ops = {
1cf1cae9 5648 .test_run = bpf_prog_test_run_skb,
3a0af8fd
TG
5649};
5650
7de16e3a 5651const struct bpf_verifier_ops cg_sock_verifier_ops = {
ae2cf1c4 5652 .get_func_proto = sock_filter_func_proto,
61023658
DA
5653 .is_valid_access = sock_filter_is_valid_access,
5654 .convert_ctx_access = sock_filter_convert_ctx_access,
5655};
5656
7de16e3a
JK
5657const struct bpf_prog_ops cg_sock_prog_ops = {
5658};
5659
4fbac77d
AI
5660const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
5661 .get_func_proto = sock_addr_func_proto,
5662 .is_valid_access = sock_addr_is_valid_access,
5663 .convert_ctx_access = sock_addr_convert_ctx_access,
5664};
5665
5666const struct bpf_prog_ops cg_sock_addr_prog_ops = {
5667};
5668
7de16e3a 5669const struct bpf_verifier_ops sock_ops_verifier_ops = {
8c4b4c7e 5670 .get_func_proto = sock_ops_func_proto,
40304b2a
LB
5671 .is_valid_access = sock_ops_is_valid_access,
5672 .convert_ctx_access = sock_ops_convert_ctx_access,
5673};
5674
7de16e3a
JK
5675const struct bpf_prog_ops sock_ops_prog_ops = {
5676};
5677
5678const struct bpf_verifier_ops sk_skb_verifier_ops = {
b005fd18
JF
5679 .get_func_proto = sk_skb_func_proto,
5680 .is_valid_access = sk_skb_is_valid_access,
8108a775 5681 .convert_ctx_access = sk_skb_convert_ctx_access,
8a31db56 5682 .gen_prologue = sk_skb_prologue,
b005fd18
JF
5683};
5684
7de16e3a
JK
5685const struct bpf_prog_ops sk_skb_prog_ops = {
5686};
5687
4f738adb
JF
5688const struct bpf_verifier_ops sk_msg_verifier_ops = {
5689 .get_func_proto = sk_msg_func_proto,
5690 .is_valid_access = sk_msg_is_valid_access,
5691 .convert_ctx_access = sk_msg_convert_ctx_access,
5692};
5693
5694const struct bpf_prog_ops sk_msg_prog_ops = {
5695};
5696
8ced425e 5697int sk_detach_filter(struct sock *sk)
55b33325
PE
5698{
5699 int ret = -ENOENT;
5700 struct sk_filter *filter;
5701
d59577b6
VB
5702 if (sock_flag(sk, SOCK_FILTER_LOCKED))
5703 return -EPERM;
5704
8ced425e
HFS
5705 filter = rcu_dereference_protected(sk->sk_filter,
5706 lockdep_sock_is_held(sk));
55b33325 5707 if (filter) {
a9b3cd7f 5708 RCU_INIT_POINTER(sk->sk_filter, NULL);
46bcf14f 5709 sk_filter_uncharge(sk, filter);
55b33325
PE
5710 ret = 0;
5711 }
a3ea269b 5712
55b33325
PE
5713 return ret;
5714}
8ced425e 5715EXPORT_SYMBOL_GPL(sk_detach_filter);
a8fc9277 5716
a3ea269b
DB
5717int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
5718 unsigned int len)
a8fc9277 5719{
a3ea269b 5720 struct sock_fprog_kern *fprog;
a8fc9277 5721 struct sk_filter *filter;
a3ea269b 5722 int ret = 0;
a8fc9277
PE
5723
5724 lock_sock(sk);
5725 filter = rcu_dereference_protected(sk->sk_filter,
8ced425e 5726 lockdep_sock_is_held(sk));
a8fc9277
PE
5727 if (!filter)
5728 goto out;
a3ea269b
DB
5729
5730 /* We're copying the filter that has been originally attached,
93d08b69
DB
5731 * so no conversion/decode needed anymore. eBPF programs that
5732 * have no original program cannot be dumped through this.
a3ea269b 5733 */
93d08b69 5734 ret = -EACCES;
7ae457c1 5735 fprog = filter->prog->orig_prog;
93d08b69
DB
5736 if (!fprog)
5737 goto out;
a3ea269b
DB
5738
5739 ret = fprog->len;
a8fc9277 5740 if (!len)
a3ea269b 5741 /* User space only enquires number of filter blocks. */
a8fc9277 5742 goto out;
a3ea269b 5743
a8fc9277 5744 ret = -EINVAL;
a3ea269b 5745 if (len < fprog->len)
a8fc9277
PE
5746 goto out;
5747
5748 ret = -EFAULT;
009937e7 5749 if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
a3ea269b 5750 goto out;
a8fc9277 5751
a3ea269b
DB
5752 /* Instead of bytes, the API requests to return the number
5753 * of filter blocks.
5754 */
5755 ret = fprog->len;
a8fc9277
PE
5756out:
5757 release_sock(sk);
5758 return ret;
5759}