tcp: annotate sk->sk_rcvbuf lockless reads
[linux-block.git] / net / core / filter.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Linux Socket Filter - Kernel level socket filtering
4  *
5  * Based on the design of the Berkeley Packet Filter. The new
6  * internal format has been designed by PLUMgrid:
7  *
8  *      Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
9  *
10  * Authors:
11  *
12  *      Jay Schulist <jschlst@samba.org>
13  *      Alexei Starovoitov <ast@plumgrid.com>
14  *      Daniel Borkmann <dborkman@redhat.com>
15  *
16  * Andi Kleen - Fix a few bad bugs and races.
17  * Kris Katterjohn - Added many additional checks in bpf_check_classic()
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/mm.h>
23 #include <linux/fcntl.h>
24 #include <linux/socket.h>
25 #include <linux/sock_diag.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/if_packet.h>
30 #include <linux/if_arp.h>
31 #include <linux/gfp.h>
32 #include <net/inet_common.h>
33 #include <net/ip.h>
34 #include <net/protocol.h>
35 #include <net/netlink.h>
36 #include <linux/skbuff.h>
37 #include <linux/skmsg.h>
38 #include <net/sock.h>
39 #include <net/flow_dissector.h>
40 #include <linux/errno.h>
41 #include <linux/timer.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44 #include <asm/cmpxchg.h>
45 #include <linux/filter.h>
46 #include <linux/ratelimit.h>
47 #include <linux/seccomp.h>
48 #include <linux/if_vlan.h>
49 #include <linux/bpf.h>
50 #include <net/sch_generic.h>
51 #include <net/cls_cgroup.h>
52 #include <net/dst_metadata.h>
53 #include <net/dst.h>
54 #include <net/sock_reuseport.h>
55 #include <net/busy_poll.h>
56 #include <net/tcp.h>
57 #include <net/xfrm.h>
58 #include <net/udp.h>
59 #include <linux/bpf_trace.h>
60 #include <net/xdp_sock.h>
61 #include <linux/inetdevice.h>
62 #include <net/inet_hashtables.h>
63 #include <net/inet6_hashtables.h>
64 #include <net/ip_fib.h>
65 #include <net/nexthop.h>
66 #include <net/flow.h>
67 #include <net/arp.h>
68 #include <net/ipv6.h>
69 #include <net/net_namespace.h>
70 #include <linux/seg6_local.h>
71 #include <net/seg6.h>
72 #include <net/seg6_local.h>
73 #include <net/lwtunnel.h>
74 #include <net/ipv6_stubs.h>
75 #include <net/bpf_sk_storage.h>
76
77 /**
78  *      sk_filter_trim_cap - run a packet through a socket filter
79  *      @sk: sock associated with &sk_buff
80  *      @skb: buffer to filter
81  *      @cap: limit on how short the eBPF program may trim the packet
82  *
83  * Run the eBPF program and then cut skb->data to correct size returned by
84  * the program. If pkt_len is 0 we toss packet. If skb->len is smaller
85  * than pkt_len we keep whole skb->data. This is the socket level
86  * wrapper to BPF_PROG_RUN. It returns 0 if the packet should
87  * be accepted or -EPERM if the packet should be tossed.
88  *
89  */
90 int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap)
91 {
92         int err;
93         struct sk_filter *filter;
94
95         /*
96          * If the skb was allocated from pfmemalloc reserves, only
97          * allow SOCK_MEMALLOC sockets to use it as this socket is
98          * helping free memory
99          */
100         if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) {
101                 NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP);
102                 return -ENOMEM;
103         }
104         err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb);
105         if (err)
106                 return err;
107
108         err = security_sock_rcv_skb(sk, skb);
109         if (err)
110                 return err;
111
112         rcu_read_lock();
113         filter = rcu_dereference(sk->sk_filter);
114         if (filter) {
115                 struct sock *save_sk = skb->sk;
116                 unsigned int pkt_len;
117
118                 skb->sk = sk;
119                 pkt_len = bpf_prog_run_save_cb(filter->prog, skb);
120                 skb->sk = save_sk;
121                 err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM;
122         }
123         rcu_read_unlock();
124
125         return err;
126 }
127 EXPORT_SYMBOL(sk_filter_trim_cap);
128
129 BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb)
130 {
131         return skb_get_poff(skb);
132 }
133
134 BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x)
135 {
136         struct nlattr *nla;
137
138         if (skb_is_nonlinear(skb))
139                 return 0;
140
141         if (skb->len < sizeof(struct nlattr))
142                 return 0;
143
144         if (a > skb->len - sizeof(struct nlattr))
145                 return 0;
146
147         nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x);
148         if (nla)
149                 return (void *) nla - (void *) skb->data;
150
151         return 0;
152 }
153
154 BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x)
155 {
156         struct nlattr *nla;
157
158         if (skb_is_nonlinear(skb))
159                 return 0;
160
161         if (skb->len < sizeof(struct nlattr))
162                 return 0;
163
164         if (a > skb->len - sizeof(struct nlattr))
165                 return 0;
166
167         nla = (struct nlattr *) &skb->data[a];
168         if (nla->nla_len > skb->len - a)
169                 return 0;
170
171         nla = nla_find_nested(nla, x);
172         if (nla)
173                 return (void *) nla - (void *) skb->data;
174
175         return 0;
176 }
177
178 BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *,
179            data, int, headlen, int, offset)
180 {
181         u8 tmp, *ptr;
182         const int len = sizeof(tmp);
183
184         if (offset >= 0) {
185                 if (headlen - offset >= len)
186                         return *(u8 *)(data + offset);
187                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
188                         return tmp;
189         } else {
190                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
191                 if (likely(ptr))
192                         return *(u8 *)ptr;
193         }
194
195         return -EFAULT;
196 }
197
198 BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb,
199            int, offset)
200 {
201         return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len,
202                                          offset);
203 }
204
205 BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *,
206            data, int, headlen, int, offset)
207 {
208         u16 tmp, *ptr;
209         const int len = sizeof(tmp);
210
211         if (offset >= 0) {
212                 if (headlen - offset >= len)
213                         return get_unaligned_be16(data + offset);
214                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
215                         return be16_to_cpu(tmp);
216         } else {
217                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
218                 if (likely(ptr))
219                         return get_unaligned_be16(ptr);
220         }
221
222         return -EFAULT;
223 }
224
225 BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb,
226            int, offset)
227 {
228         return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len,
229                                           offset);
230 }
231
232 BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *,
233            data, int, headlen, int, offset)
234 {
235         u32 tmp, *ptr;
236         const int len = sizeof(tmp);
237
238         if (likely(offset >= 0)) {
239                 if (headlen - offset >= len)
240                         return get_unaligned_be32(data + offset);
241                 if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp)))
242                         return be32_to_cpu(tmp);
243         } else {
244                 ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len);
245                 if (likely(ptr))
246                         return get_unaligned_be32(ptr);
247         }
248
249         return -EFAULT;
250 }
251
252 BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
253            int, offset)
254 {
255         return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len,
256                                           offset);
257 }
258
259 BPF_CALL_0(bpf_get_raw_cpu_id)
260 {
261         return raw_smp_processor_id();
262 }
263
264 static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
265         .func           = bpf_get_raw_cpu_id,
266         .gpl_only       = false,
267         .ret_type       = RET_INTEGER,
268 };
269
270 static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
271                               struct bpf_insn *insn_buf)
272 {
273         struct bpf_insn *insn = insn_buf;
274
275         switch (skb_field) {
276         case SKF_AD_MARK:
277                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
278
279                 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
280                                       offsetof(struct sk_buff, mark));
281                 break;
282
283         case SKF_AD_PKTTYPE:
284                 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET());
285                 *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX);
286 #ifdef __BIG_ENDIAN_BITFIELD
287                 *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5);
288 #endif
289                 break;
290
291         case SKF_AD_QUEUE:
292                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
293
294                 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
295                                       offsetof(struct sk_buff, queue_mapping));
296                 break;
297
298         case SKF_AD_VLAN_TAG:
299                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
300
301                 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */
302                 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
303                                       offsetof(struct sk_buff, vlan_tci));
304                 break;
305         case SKF_AD_VLAN_TAG_PRESENT:
306                 *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET());
307                 if (PKT_VLAN_PRESENT_BIT)
308                         *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT);
309                 if (PKT_VLAN_PRESENT_BIT < 7)
310                         *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1);
311                 break;
312         }
313
314         return insn - insn_buf;
315 }
316
317 static bool convert_bpf_extensions(struct sock_filter *fp,
318                                    struct bpf_insn **insnp)
319 {
320         struct bpf_insn *insn = *insnp;
321         u32 cnt;
322
323         switch (fp->k) {
324         case SKF_AD_OFF + SKF_AD_PROTOCOL:
325                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
326
327                 /* A = *(u16 *) (CTX + offsetof(protocol)) */
328                 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
329                                       offsetof(struct sk_buff, protocol));
330                 /* A = ntohs(A) [emitting a nop or swap16] */
331                 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
332                 break;
333
334         case SKF_AD_OFF + SKF_AD_PKTTYPE:
335                 cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn);
336                 insn += cnt - 1;
337                 break;
338
339         case SKF_AD_OFF + SKF_AD_IFINDEX:
340         case SKF_AD_OFF + SKF_AD_HATYPE:
341                 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
342                 BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
343
344                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
345                                       BPF_REG_TMP, BPF_REG_CTX,
346                                       offsetof(struct sk_buff, dev));
347                 /* if (tmp != 0) goto pc + 1 */
348                 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1);
349                 *insn++ = BPF_EXIT_INSN();
350                 if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX)
351                         *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP,
352                                             offsetof(struct net_device, ifindex));
353                 else
354                         *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP,
355                                             offsetof(struct net_device, type));
356                 break;
357
358         case SKF_AD_OFF + SKF_AD_MARK:
359                 cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn);
360                 insn += cnt - 1;
361                 break;
362
363         case SKF_AD_OFF + SKF_AD_RXHASH:
364                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
365
366                 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX,
367                                     offsetof(struct sk_buff, hash));
368                 break;
369
370         case SKF_AD_OFF + SKF_AD_QUEUE:
371                 cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn);
372                 insn += cnt - 1;
373                 break;
374
375         case SKF_AD_OFF + SKF_AD_VLAN_TAG:
376                 cnt = convert_skb_access(SKF_AD_VLAN_TAG,
377                                          BPF_REG_A, BPF_REG_CTX, insn);
378                 insn += cnt - 1;
379                 break;
380
381         case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT:
382                 cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
383                                          BPF_REG_A, BPF_REG_CTX, insn);
384                 insn += cnt - 1;
385                 break;
386
387         case SKF_AD_OFF + SKF_AD_VLAN_TPID:
388                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
389
390                 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */
391                 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX,
392                                       offsetof(struct sk_buff, vlan_proto));
393                 /* A = ntohs(A) [emitting a nop or swap16] */
394                 *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16);
395                 break;
396
397         case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
398         case SKF_AD_OFF + SKF_AD_NLATTR:
399         case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
400         case SKF_AD_OFF + SKF_AD_CPU:
401         case SKF_AD_OFF + SKF_AD_RANDOM:
402                 /* arg1 = CTX */
403                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
404                 /* arg2 = A */
405                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A);
406                 /* arg3 = X */
407                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X);
408                 /* Emit call(arg1=CTX, arg2=A, arg3=X) */
409                 switch (fp->k) {
410                 case SKF_AD_OFF + SKF_AD_PAY_OFFSET:
411                         *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset);
412                         break;
413                 case SKF_AD_OFF + SKF_AD_NLATTR:
414                         *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr);
415                         break;
416                 case SKF_AD_OFF + SKF_AD_NLATTR_NEST:
417                         *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest);
418                         break;
419                 case SKF_AD_OFF + SKF_AD_CPU:
420                         *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id);
421                         break;
422                 case SKF_AD_OFF + SKF_AD_RANDOM:
423                         *insn = BPF_EMIT_CALL(bpf_user_rnd_u32);
424                         bpf_user_rnd_init_once();
425                         break;
426                 }
427                 break;
428
429         case SKF_AD_OFF + SKF_AD_ALU_XOR_X:
430                 /* A ^= X */
431                 *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X);
432                 break;
433
434         default:
435                 /* This is just a dummy call to avoid letting the compiler
436                  * evict __bpf_call_base() as an optimization. Placed here
437                  * where no-one bothers.
438                  */
439                 BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0);
440                 return false;
441         }
442
443         *insnp = insn;
444         return true;
445 }
446
447 static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
448 {
449         const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS);
450         int size = bpf_size_to_bytes(BPF_SIZE(fp->code));
451         bool endian = BPF_SIZE(fp->code) == BPF_H ||
452                       BPF_SIZE(fp->code) == BPF_W;
453         bool indirect = BPF_MODE(fp->code) == BPF_IND;
454         const int ip_align = NET_IP_ALIGN;
455         struct bpf_insn *insn = *insnp;
456         int offset = fp->k;
457
458         if (!indirect &&
459             ((unaligned_ok && offset >= 0) ||
460              (!unaligned_ok && offset >= 0 &&
461               offset + ip_align >= 0 &&
462               offset + ip_align % size == 0))) {
463                 bool ldx_off_ok = offset <= S16_MAX;
464
465                 *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
466                 if (offset)
467                         *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
468                 *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
469                                       size, 2 + endian + (!ldx_off_ok * 2));
470                 if (ldx_off_ok) {
471                         *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
472                                               BPF_REG_D, offset);
473                 } else {
474                         *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
475                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
476                         *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
477                                               BPF_REG_TMP, 0);
478                 }
479                 if (endian)
480                         *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
481                 *insn++ = BPF_JMP_A(8);
482         }
483
484         *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX);
485         *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D);
486         *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H);
487         if (!indirect) {
488                 *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset);
489         } else {
490                 *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X);
491                 if (fp->k)
492                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset);
493         }
494
495         switch (BPF_SIZE(fp->code)) {
496         case BPF_B:
497                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8);
498                 break;
499         case BPF_H:
500                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16);
501                 break;
502         case BPF_W:
503                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32);
504                 break;
505         default:
506                 return false;
507         }
508
509         *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2);
510         *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
511         *insn   = BPF_EXIT_INSN();
512
513         *insnp = insn;
514         return true;
515 }
516
517 /**
518  *      bpf_convert_filter - convert filter program
519  *      @prog: the user passed filter program
520  *      @len: the length of the user passed filter program
521  *      @new_prog: allocated 'struct bpf_prog' or NULL
522  *      @new_len: pointer to store length of converted program
523  *      @seen_ld_abs: bool whether we've seen ld_abs/ind
524  *
525  * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn'
526  * style extended BPF (eBPF).
527  * Conversion workflow:
528  *
529  * 1) First pass for calculating the new program length:
530  *   bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs)
531  *
532  * 2) 2nd pass to remap in two passes: 1st pass finds new
533  *    jump offsets, 2nd pass remapping:
534  *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs)
535  */
536 static int bpf_convert_filter(struct sock_filter *prog, int len,
537                               struct bpf_prog *new_prog, int *new_len,
538                               bool *seen_ld_abs)
539 {
540         int new_flen = 0, pass = 0, target, i, stack_off;
541         struct bpf_insn *new_insn, *first_insn = NULL;
542         struct sock_filter *fp;
543         int *addrs = NULL;
544         u8 bpf_src;
545
546         BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
547         BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
548
549         if (len <= 0 || len > BPF_MAXINSNS)
550                 return -EINVAL;
551
552         if (new_prog) {
553                 first_insn = new_prog->insnsi;
554                 addrs = kcalloc(len, sizeof(*addrs),
555                                 GFP_KERNEL | __GFP_NOWARN);
556                 if (!addrs)
557                         return -ENOMEM;
558         }
559
560 do_pass:
561         new_insn = first_insn;
562         fp = prog;
563
564         /* Classic BPF related prologue emission. */
565         if (new_prog) {
566                 /* Classic BPF expects A and X to be reset first. These need
567                  * to be guaranteed to be the first two instructions.
568                  */
569                 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
570                 *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X);
571
572                 /* All programs must keep CTX in callee saved BPF_REG_CTX.
573                  * In eBPF case it's done by the compiler, here we need to
574                  * do this ourself. Initial CTX is present in BPF_REG_ARG1.
575                  */
576                 *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1);
577                 if (*seen_ld_abs) {
578                         /* For packet access in classic BPF, cache skb->data
579                          * in callee-saved BPF R8 and skb->len - skb->data_len
580                          * (headlen) in BPF R9. Since classic BPF is read-only
581                          * on CTX, we only need to cache it once.
582                          */
583                         *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
584                                                   BPF_REG_D, BPF_REG_CTX,
585                                                   offsetof(struct sk_buff, data));
586                         *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX,
587                                                   offsetof(struct sk_buff, len));
588                         *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX,
589                                                   offsetof(struct sk_buff, data_len));
590                         *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP);
591                 }
592         } else {
593                 new_insn += 3;
594         }
595
596         for (i = 0; i < len; fp++, i++) {
597                 struct bpf_insn tmp_insns[32] = { };
598                 struct bpf_insn *insn = tmp_insns;
599
600                 if (addrs)
601                         addrs[i] = new_insn - first_insn;
602
603                 switch (fp->code) {
604                 /* All arithmetic insns and skb loads map as-is. */
605                 case BPF_ALU | BPF_ADD | BPF_X:
606                 case BPF_ALU | BPF_ADD | BPF_K:
607                 case BPF_ALU | BPF_SUB | BPF_X:
608                 case BPF_ALU | BPF_SUB | BPF_K:
609                 case BPF_ALU | BPF_AND | BPF_X:
610                 case BPF_ALU | BPF_AND | BPF_K:
611                 case BPF_ALU | BPF_OR | BPF_X:
612                 case BPF_ALU | BPF_OR | BPF_K:
613                 case BPF_ALU | BPF_LSH | BPF_X:
614                 case BPF_ALU | BPF_LSH | BPF_K:
615                 case BPF_ALU | BPF_RSH | BPF_X:
616                 case BPF_ALU | BPF_RSH | BPF_K:
617                 case BPF_ALU | BPF_XOR | BPF_X:
618                 case BPF_ALU | BPF_XOR | BPF_K:
619                 case BPF_ALU | BPF_MUL | BPF_X:
620                 case BPF_ALU | BPF_MUL | BPF_K:
621                 case BPF_ALU | BPF_DIV | BPF_X:
622                 case BPF_ALU | BPF_DIV | BPF_K:
623                 case BPF_ALU | BPF_MOD | BPF_X:
624                 case BPF_ALU | BPF_MOD | BPF_K:
625                 case BPF_ALU | BPF_NEG:
626                 case BPF_LD | BPF_ABS | BPF_W:
627                 case BPF_LD | BPF_ABS | BPF_H:
628                 case BPF_LD | BPF_ABS | BPF_B:
629                 case BPF_LD | BPF_IND | BPF_W:
630                 case BPF_LD | BPF_IND | BPF_H:
631                 case BPF_LD | BPF_IND | BPF_B:
632                         /* Check for overloaded BPF extension and
633                          * directly convert it if found, otherwise
634                          * just move on with mapping.
635                          */
636                         if (BPF_CLASS(fp->code) == BPF_LD &&
637                             BPF_MODE(fp->code) == BPF_ABS &&
638                             convert_bpf_extensions(fp, &insn))
639                                 break;
640                         if (BPF_CLASS(fp->code) == BPF_LD &&
641                             convert_bpf_ld_abs(fp, &insn)) {
642                                 *seen_ld_abs = true;
643                                 break;
644                         }
645
646                         if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) ||
647                             fp->code == (BPF_ALU | BPF_MOD | BPF_X)) {
648                                 *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X);
649                                 /* Error with exception code on div/mod by 0.
650                                  * For cBPF programs, this was always return 0.
651                                  */
652                                 *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2);
653                                 *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A);
654                                 *insn++ = BPF_EXIT_INSN();
655                         }
656
657                         *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k);
658                         break;
659
660                 /* Jump transformation cannot use BPF block macros
661                  * everywhere as offset calculation and target updates
662                  * require a bit more work than the rest, i.e. jump
663                  * opcodes map as-is, but offsets need adjustment.
664                  */
665
666 #define BPF_EMIT_JMP                                                    \
667         do {                                                            \
668                 const s32 off_min = S16_MIN, off_max = S16_MAX;         \
669                 s32 off;                                                \
670                                                                         \
671                 if (target >= len || target < 0)                        \
672                         goto err;                                       \
673                 off = addrs ? addrs[target] - addrs[i] - 1 : 0;         \
674                 /* Adjust pc relative offset for 2nd or 3rd insn. */    \
675                 off -= insn - tmp_insns;                                \
676                 /* Reject anything not fitting into insn->off. */       \
677                 if (off < off_min || off > off_max)                     \
678                         goto err;                                       \
679                 insn->off = off;                                        \
680         } while (0)
681
682                 case BPF_JMP | BPF_JA:
683                         target = i + fp->k + 1;
684                         insn->code = fp->code;
685                         BPF_EMIT_JMP;
686                         break;
687
688                 case BPF_JMP | BPF_JEQ | BPF_K:
689                 case BPF_JMP | BPF_JEQ | BPF_X:
690                 case BPF_JMP | BPF_JSET | BPF_K:
691                 case BPF_JMP | BPF_JSET | BPF_X:
692                 case BPF_JMP | BPF_JGT | BPF_K:
693                 case BPF_JMP | BPF_JGT | BPF_X:
694                 case BPF_JMP | BPF_JGE | BPF_K:
695                 case BPF_JMP | BPF_JGE | BPF_X:
696                         if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) {
697                                 /* BPF immediates are signed, zero extend
698                                  * immediate into tmp register and use it
699                                  * in compare insn.
700                                  */
701                                 *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k);
702
703                                 insn->dst_reg = BPF_REG_A;
704                                 insn->src_reg = BPF_REG_TMP;
705                                 bpf_src = BPF_X;
706                         } else {
707                                 insn->dst_reg = BPF_REG_A;
708                                 insn->imm = fp->k;
709                                 bpf_src = BPF_SRC(fp->code);
710                                 insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0;
711                         }
712
713                         /* Common case where 'jump_false' is next insn. */
714                         if (fp->jf == 0) {
715                                 insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
716                                 target = i + fp->jt + 1;
717                                 BPF_EMIT_JMP;
718                                 break;
719                         }
720
721                         /* Convert some jumps when 'jump_true' is next insn. */
722                         if (fp->jt == 0) {
723                                 switch (BPF_OP(fp->code)) {
724                                 case BPF_JEQ:
725                                         insn->code = BPF_JMP | BPF_JNE | bpf_src;
726                                         break;
727                                 case BPF_JGT:
728                                         insn->code = BPF_JMP | BPF_JLE | bpf_src;
729                                         break;
730                                 case BPF_JGE:
731                                         insn->code = BPF_JMP | BPF_JLT | bpf_src;
732                                         break;
733                                 default:
734                                         goto jmp_rest;
735                                 }
736
737                                 target = i + fp->jf + 1;
738                                 BPF_EMIT_JMP;
739                                 break;
740                         }
741 jmp_rest:
742                         /* Other jumps are mapped into two insns: Jxx and JA. */
743                         target = i + fp->jt + 1;
744                         insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src;
745                         BPF_EMIT_JMP;
746                         insn++;
747
748                         insn->code = BPF_JMP | BPF_JA;
749                         target = i + fp->jf + 1;
750                         BPF_EMIT_JMP;
751                         break;
752
753                 /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */
754                 case BPF_LDX | BPF_MSH | BPF_B: {
755                         struct sock_filter tmp = {
756                                 .code   = BPF_LD | BPF_ABS | BPF_B,
757                                 .k      = fp->k,
758                         };
759
760                         *seen_ld_abs = true;
761
762                         /* X = A */
763                         *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
764                         /* A = BPF_R0 = *(u8 *) (skb->data + K) */
765                         convert_bpf_ld_abs(&tmp, &insn);
766                         insn++;
767                         /* A &= 0xf */
768                         *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf);
769                         /* A <<= 2 */
770                         *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2);
771                         /* tmp = X */
772                         *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X);
773                         /* X = A */
774                         *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
775                         /* A = tmp */
776                         *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP);
777                         break;
778                 }
779                 /* RET_K is remaped into 2 insns. RET_A case doesn't need an
780                  * extra mov as BPF_REG_0 is already mapped into BPF_REG_A.
781                  */
782                 case BPF_RET | BPF_A:
783                 case BPF_RET | BPF_K:
784                         if (BPF_RVAL(fp->code) == BPF_K)
785                                 *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0,
786                                                         0, fp->k);
787                         *insn = BPF_EXIT_INSN();
788                         break;
789
790                 /* Store to stack. */
791                 case BPF_ST:
792                 case BPF_STX:
793                         stack_off = fp->k * 4  + 4;
794                         *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) ==
795                                             BPF_ST ? BPF_REG_A : BPF_REG_X,
796                                             -stack_off);
797                         /* check_load_and_stores() verifies that classic BPF can
798                          * load from stack only after write, so tracking
799                          * stack_depth for ST|STX insns is enough
800                          */
801                         if (new_prog && new_prog->aux->stack_depth < stack_off)
802                                 new_prog->aux->stack_depth = stack_off;
803                         break;
804
805                 /* Load from stack. */
806                 case BPF_LD | BPF_MEM:
807                 case BPF_LDX | BPF_MEM:
808                         stack_off = fp->k * 4  + 4;
809                         *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD  ?
810                                             BPF_REG_A : BPF_REG_X, BPF_REG_FP,
811                                             -stack_off);
812                         break;
813
814                 /* A = K or X = K */
815                 case BPF_LD | BPF_IMM:
816                 case BPF_LDX | BPF_IMM:
817                         *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ?
818                                               BPF_REG_A : BPF_REG_X, fp->k);
819                         break;
820
821                 /* X = A */
822                 case BPF_MISC | BPF_TAX:
823                         *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A);
824                         break;
825
826                 /* A = X */
827                 case BPF_MISC | BPF_TXA:
828                         *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X);
829                         break;
830
831                 /* A = skb->len or X = skb->len */
832                 case BPF_LD | BPF_W | BPF_LEN:
833                 case BPF_LDX | BPF_W | BPF_LEN:
834                         *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ?
835                                             BPF_REG_A : BPF_REG_X, BPF_REG_CTX,
836                                             offsetof(struct sk_buff, len));
837                         break;
838
839                 /* Access seccomp_data fields. */
840                 case BPF_LDX | BPF_ABS | BPF_W:
841                         /* A = *(u32 *) (ctx + K) */
842                         *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k);
843                         break;
844
845                 /* Unknown instruction. */
846                 default:
847                         goto err;
848                 }
849
850                 insn++;
851                 if (new_prog)
852                         memcpy(new_insn, tmp_insns,
853                                sizeof(*insn) * (insn - tmp_insns));
854                 new_insn += insn - tmp_insns;
855         }
856
857         if (!new_prog) {
858                 /* Only calculating new length. */
859                 *new_len = new_insn - first_insn;
860                 if (*seen_ld_abs)
861                         *new_len += 4; /* Prologue bits. */
862                 return 0;
863         }
864
865         pass++;
866         if (new_flen != new_insn - first_insn) {
867                 new_flen = new_insn - first_insn;
868                 if (pass > 2)
869                         goto err;
870                 goto do_pass;
871         }
872
873         kfree(addrs);
874         BUG_ON(*new_len != new_flen);
875         return 0;
876 err:
877         kfree(addrs);
878         return -EINVAL;
879 }
880
881 /* Security:
882  *
883  * As we dont want to clear mem[] array for each packet going through
884  * __bpf_prog_run(), we check that filter loaded by user never try to read
885  * a cell if not previously written, and we check all branches to be sure
886  * a malicious user doesn't try to abuse us.
887  */
888 static int check_load_and_stores(const struct sock_filter *filter, int flen)
889 {
890         u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
891         int pc, ret = 0;
892
893         BUILD_BUG_ON(BPF_MEMWORDS > 16);
894
895         masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
896         if (!masks)
897                 return -ENOMEM;
898
899         memset(masks, 0xff, flen * sizeof(*masks));
900
901         for (pc = 0; pc < flen; pc++) {
902                 memvalid &= masks[pc];
903
904                 switch (filter[pc].code) {
905                 case BPF_ST:
906                 case BPF_STX:
907                         memvalid |= (1 << filter[pc].k);
908                         break;
909                 case BPF_LD | BPF_MEM:
910                 case BPF_LDX | BPF_MEM:
911                         if (!(memvalid & (1 << filter[pc].k))) {
912                                 ret = -EINVAL;
913                                 goto error;
914                         }
915                         break;
916                 case BPF_JMP | BPF_JA:
917                         /* A jump must set masks on target */
918                         masks[pc + 1 + filter[pc].k] &= memvalid;
919                         memvalid = ~0;
920                         break;
921                 case BPF_JMP | BPF_JEQ | BPF_K:
922                 case BPF_JMP | BPF_JEQ | BPF_X:
923                 case BPF_JMP | BPF_JGE | BPF_K:
924                 case BPF_JMP | BPF_JGE | BPF_X:
925                 case BPF_JMP | BPF_JGT | BPF_K:
926                 case BPF_JMP | BPF_JGT | BPF_X:
927                 case BPF_JMP | BPF_JSET | BPF_K:
928                 case BPF_JMP | BPF_JSET | BPF_X:
929                         /* A jump must set masks on targets */
930                         masks[pc + 1 + filter[pc].jt] &= memvalid;
931                         masks[pc + 1 + filter[pc].jf] &= memvalid;
932                         memvalid = ~0;
933                         break;
934                 }
935         }
936 error:
937         kfree(masks);
938         return ret;
939 }
940
941 static bool chk_code_allowed(u16 code_to_probe)
942 {
943         static const bool codes[] = {
944                 /* 32 bit ALU operations */
945                 [BPF_ALU | BPF_ADD | BPF_K] = true,
946                 [BPF_ALU | BPF_ADD | BPF_X] = true,
947                 [BPF_ALU | BPF_SUB | BPF_K] = true,
948                 [BPF_ALU | BPF_SUB | BPF_X] = true,
949                 [BPF_ALU | BPF_MUL | BPF_K] = true,
950                 [BPF_ALU | BPF_MUL | BPF_X] = true,
951                 [BPF_ALU | BPF_DIV | BPF_K] = true,
952                 [BPF_ALU | BPF_DIV | BPF_X] = true,
953                 [BPF_ALU | BPF_MOD | BPF_K] = true,
954                 [BPF_ALU | BPF_MOD | BPF_X] = true,
955                 [BPF_ALU | BPF_AND | BPF_K] = true,
956                 [BPF_ALU | BPF_AND | BPF_X] = true,
957                 [BPF_ALU | BPF_OR | BPF_K] = true,
958                 [BPF_ALU | BPF_OR | BPF_X] = true,
959                 [BPF_ALU | BPF_XOR | BPF_K] = true,
960                 [BPF_ALU | BPF_XOR | BPF_X] = true,
961                 [BPF_ALU | BPF_LSH | BPF_K] = true,
962                 [BPF_ALU | BPF_LSH | BPF_X] = true,
963                 [BPF_ALU | BPF_RSH | BPF_K] = true,
964                 [BPF_ALU | BPF_RSH | BPF_X] = true,
965                 [BPF_ALU | BPF_NEG] = true,
966                 /* Load instructions */
967                 [BPF_LD | BPF_W | BPF_ABS] = true,
968                 [BPF_LD | BPF_H | BPF_ABS] = true,
969                 [BPF_LD | BPF_B | BPF_ABS] = true,
970                 [BPF_LD | BPF_W | BPF_LEN] = true,
971                 [BPF_LD | BPF_W | BPF_IND] = true,
972                 [BPF_LD | BPF_H | BPF_IND] = true,
973                 [BPF_LD | BPF_B | BPF_IND] = true,
974                 [BPF_LD | BPF_IMM] = true,
975                 [BPF_LD | BPF_MEM] = true,
976                 [BPF_LDX | BPF_W | BPF_LEN] = true,
977                 [BPF_LDX | BPF_B | BPF_MSH] = true,
978                 [BPF_LDX | BPF_IMM] = true,
979                 [BPF_LDX | BPF_MEM] = true,
980                 /* Store instructions */
981                 [BPF_ST] = true,
982                 [BPF_STX] = true,
983                 /* Misc instructions */
984                 [BPF_MISC | BPF_TAX] = true,
985                 [BPF_MISC | BPF_TXA] = true,
986                 /* Return instructions */
987                 [BPF_RET | BPF_K] = true,
988                 [BPF_RET | BPF_A] = true,
989                 /* Jump instructions */
990                 [BPF_JMP | BPF_JA] = true,
991                 [BPF_JMP | BPF_JEQ | BPF_K] = true,
992                 [BPF_JMP | BPF_JEQ | BPF_X] = true,
993                 [BPF_JMP | BPF_JGE | BPF_K] = true,
994                 [BPF_JMP | BPF_JGE | BPF_X] = true,
995                 [BPF_JMP | BPF_JGT | BPF_K] = true,
996                 [BPF_JMP | BPF_JGT | BPF_X] = true,
997                 [BPF_JMP | BPF_JSET | BPF_K] = true,
998                 [BPF_JMP | BPF_JSET | BPF_X] = true,
999         };
1000
1001         if (code_to_probe >= ARRAY_SIZE(codes))
1002                 return false;
1003
1004         return codes[code_to_probe];
1005 }
1006
1007 static bool bpf_check_basics_ok(const struct sock_filter *filter,
1008                                 unsigned int flen)
1009 {
1010         if (filter == NULL)
1011                 return false;
1012         if (flen == 0 || flen > BPF_MAXINSNS)
1013                 return false;
1014
1015         return true;
1016 }
1017
1018 /**
1019  *      bpf_check_classic - verify socket filter code
1020  *      @filter: filter to verify
1021  *      @flen: length of filter
1022  *
1023  * Check the user's filter code. If we let some ugly
1024  * filter code slip through kaboom! The filter must contain
1025  * no references or jumps that are out of range, no illegal
1026  * instructions, and must end with a RET instruction.
1027  *
1028  * All jumps are forward as they are not signed.
1029  *
1030  * Returns 0 if the rule set is legal or -EINVAL if not.
1031  */
1032 static int bpf_check_classic(const struct sock_filter *filter,
1033                              unsigned int flen)
1034 {
1035         bool anc_found;
1036         int pc;
1037
1038         /* Check the filter code now */
1039         for (pc = 0; pc < flen; pc++) {
1040                 const struct sock_filter *ftest = &filter[pc];
1041
1042                 /* May we actually operate on this code? */
1043                 if (!chk_code_allowed(ftest->code))
1044                         return -EINVAL;
1045
1046                 /* Some instructions need special checks */
1047                 switch (ftest->code) {
1048                 case BPF_ALU | BPF_DIV | BPF_K:
1049                 case BPF_ALU | BPF_MOD | BPF_K:
1050                         /* Check for division by zero */
1051                         if (ftest->k == 0)
1052                                 return -EINVAL;
1053                         break;
1054                 case BPF_ALU | BPF_LSH | BPF_K:
1055                 case BPF_ALU | BPF_RSH | BPF_K:
1056                         if (ftest->k >= 32)
1057                                 return -EINVAL;
1058                         break;
1059                 case BPF_LD | BPF_MEM:
1060                 case BPF_LDX | BPF_MEM:
1061                 case BPF_ST:
1062                 case BPF_STX:
1063                         /* Check for invalid memory addresses */
1064                         if (ftest->k >= BPF_MEMWORDS)
1065                                 return -EINVAL;
1066                         break;
1067                 case BPF_JMP | BPF_JA:
1068                         /* Note, the large ftest->k might cause loops.
1069                          * Compare this with conditional jumps below,
1070                          * where offsets are limited. --ANK (981016)
1071                          */
1072                         if (ftest->k >= (unsigned int)(flen - pc - 1))
1073                                 return -EINVAL;
1074                         break;
1075                 case BPF_JMP | BPF_JEQ | BPF_K:
1076                 case BPF_JMP | BPF_JEQ | BPF_X:
1077                 case BPF_JMP | BPF_JGE | BPF_K:
1078                 case BPF_JMP | BPF_JGE | BPF_X:
1079                 case BPF_JMP | BPF_JGT | BPF_K:
1080                 case BPF_JMP | BPF_JGT | BPF_X:
1081                 case BPF_JMP | BPF_JSET | BPF_K:
1082                 case BPF_JMP | BPF_JSET | BPF_X:
1083                         /* Both conditionals must be safe */
1084                         if (pc + ftest->jt + 1 >= flen ||
1085                             pc + ftest->jf + 1 >= flen)
1086                                 return -EINVAL;
1087                         break;
1088                 case BPF_LD | BPF_W | BPF_ABS:
1089                 case BPF_LD | BPF_H | BPF_ABS:
1090                 case BPF_LD | BPF_B | BPF_ABS:
1091                         anc_found = false;
1092                         if (bpf_anc_helper(ftest) & BPF_ANC)
1093                                 anc_found = true;
1094                         /* Ancillary operation unknown or unsupported */
1095                         if (anc_found == false && ftest->k >= SKF_AD_OFF)
1096                                 return -EINVAL;
1097                 }
1098         }
1099
1100         /* Last instruction must be a RET code */
1101         switch (filter[flen - 1].code) {
1102         case BPF_RET | BPF_K:
1103         case BPF_RET | BPF_A:
1104                 return check_load_and_stores(filter, flen);
1105         }
1106
1107         return -EINVAL;
1108 }
1109
1110 static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
1111                                       const struct sock_fprog *fprog)
1112 {
1113         unsigned int fsize = bpf_classic_proglen(fprog);
1114         struct sock_fprog_kern *fkprog;
1115
1116         fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
1117         if (!fp->orig_prog)
1118                 return -ENOMEM;
1119
1120         fkprog = fp->orig_prog;
1121         fkprog->len = fprog->len;
1122
1123         fkprog->filter = kmemdup(fp->insns, fsize,
1124                                  GFP_KERNEL | __GFP_NOWARN);
1125         if (!fkprog->filter) {
1126                 kfree(fp->orig_prog);
1127                 return -ENOMEM;
1128         }
1129
1130         return 0;
1131 }
1132
1133 static void bpf_release_orig_filter(struct bpf_prog *fp)
1134 {
1135         struct sock_fprog_kern *fprog = fp->orig_prog;
1136
1137         if (fprog) {
1138                 kfree(fprog->filter);
1139                 kfree(fprog);
1140         }
1141 }
1142
1143 static void __bpf_prog_release(struct bpf_prog *prog)
1144 {
1145         if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) {
1146                 bpf_prog_put(prog);
1147         } else {
1148                 bpf_release_orig_filter(prog);
1149                 bpf_prog_free(prog);
1150         }
1151 }
1152
1153 static void __sk_filter_release(struct sk_filter *fp)
1154 {
1155         __bpf_prog_release(fp->prog);
1156         kfree(fp);
1157 }
1158
1159 /**
1160  *      sk_filter_release_rcu - Release a socket filter by rcu_head
1161  *      @rcu: rcu_head that contains the sk_filter to free
1162  */
1163 static void sk_filter_release_rcu(struct rcu_head *rcu)
1164 {
1165         struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
1166
1167         __sk_filter_release(fp);
1168 }
1169
1170 /**
1171  *      sk_filter_release - release a socket filter
1172  *      @fp: filter to remove
1173  *
1174  *      Remove a filter from a socket and release its resources.
1175  */
1176 static void sk_filter_release(struct sk_filter *fp)
1177 {
1178         if (refcount_dec_and_test(&fp->refcnt))
1179                 call_rcu(&fp->rcu, sk_filter_release_rcu);
1180 }
1181
1182 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1183 {
1184         u32 filter_size = bpf_prog_size(fp->prog->len);
1185
1186         atomic_sub(filter_size, &sk->sk_omem_alloc);
1187         sk_filter_release(fp);
1188 }
1189
1190 /* try to charge the socket memory if there is space available
1191  * return true on success
1192  */
1193 static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1194 {
1195         u32 filter_size = bpf_prog_size(fp->prog->len);
1196
1197         /* same check as in sock_kmalloc() */
1198         if (filter_size <= sysctl_optmem_max &&
1199             atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
1200                 atomic_add(filter_size, &sk->sk_omem_alloc);
1201                 return true;
1202         }
1203         return false;
1204 }
1205
1206 bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1207 {
1208         if (!refcount_inc_not_zero(&fp->refcnt))
1209                 return false;
1210
1211         if (!__sk_filter_charge(sk, fp)) {
1212                 sk_filter_release(fp);
1213                 return false;
1214         }
1215         return true;
1216 }
1217
1218 static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
1219 {
1220         struct sock_filter *old_prog;
1221         struct bpf_prog *old_fp;
1222         int err, new_len, old_len = fp->len;
1223         bool seen_ld_abs = false;
1224
1225         /* We are free to overwrite insns et al right here as it
1226          * won't be used at this point in time anymore internally
1227          * after the migration to the internal BPF instruction
1228          * representation.
1229          */
1230         BUILD_BUG_ON(sizeof(struct sock_filter) !=
1231                      sizeof(struct bpf_insn));
1232
1233         /* Conversion cannot happen on overlapping memory areas,
1234          * so we need to keep the user BPF around until the 2nd
1235          * pass. At this time, the user BPF is stored in fp->insns.
1236          */
1237         old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter),
1238                            GFP_KERNEL | __GFP_NOWARN);
1239         if (!old_prog) {
1240                 err = -ENOMEM;
1241                 goto out_err;
1242         }
1243
1244         /* 1st pass: calculate the new program length. */
1245         err = bpf_convert_filter(old_prog, old_len, NULL, &new_len,
1246                                  &seen_ld_abs);
1247         if (err)
1248                 goto out_err_free;
1249
1250         /* Expand fp for appending the new filter representation. */
1251         old_fp = fp;
1252         fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0);
1253         if (!fp) {
1254                 /* The old_fp is still around in case we couldn't
1255                  * allocate new memory, so uncharge on that one.
1256                  */
1257                 fp = old_fp;
1258                 err = -ENOMEM;
1259                 goto out_err_free;
1260         }
1261
1262         fp->len = new_len;
1263
1264         /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
1265         err = bpf_convert_filter(old_prog, old_len, fp, &new_len,
1266                                  &seen_ld_abs);
1267         if (err)
1268                 /* 2nd bpf_convert_filter() can fail only if it fails
1269                  * to allocate memory, remapping must succeed. Note,
1270                  * that at this time old_fp has already been released
1271                  * by krealloc().
1272                  */
1273                 goto out_err_free;
1274
1275         fp = bpf_prog_select_runtime(fp, &err);
1276         if (err)
1277                 goto out_err_free;
1278
1279         kfree(old_prog);
1280         return fp;
1281
1282 out_err_free:
1283         kfree(old_prog);
1284 out_err:
1285         __bpf_prog_release(fp);
1286         return ERR_PTR(err);
1287 }
1288
1289 static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp,
1290                                            bpf_aux_classic_check_t trans)
1291 {
1292         int err;
1293
1294         fp->bpf_func = NULL;
1295         fp->jited = 0;
1296
1297         err = bpf_check_classic(fp->insns, fp->len);
1298         if (err) {
1299                 __bpf_prog_release(fp);
1300                 return ERR_PTR(err);
1301         }
1302
1303         /* There might be additional checks and transformations
1304          * needed on classic filters, f.e. in case of seccomp.
1305          */
1306         if (trans) {
1307                 err = trans(fp->insns, fp->len);
1308                 if (err) {
1309                         __bpf_prog_release(fp);
1310                         return ERR_PTR(err);
1311                 }
1312         }
1313
1314         /* Probe if we can JIT compile the filter and if so, do
1315          * the compilation of the filter.
1316          */
1317         bpf_jit_compile(fp);
1318
1319         /* JIT compiler couldn't process this filter, so do the
1320          * internal BPF translation for the optimized interpreter.
1321          */
1322         if (!fp->jited)
1323                 fp = bpf_migrate_filter(fp);
1324
1325         return fp;
1326 }
1327
1328 /**
1329  *      bpf_prog_create - create an unattached filter
1330  *      @pfp: the unattached filter that is created
1331  *      @fprog: the filter program
1332  *
1333  * Create a filter independent of any socket. We first run some
1334  * sanity checks on it to make sure it does not explode on us later.
1335  * If an error occurs or there is insufficient memory for the filter
1336  * a negative errno code is returned. On success the return is zero.
1337  */
1338 int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
1339 {
1340         unsigned int fsize = bpf_classic_proglen(fprog);
1341         struct bpf_prog *fp;
1342
1343         /* Make sure new filter is there and in the right amounts. */
1344         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1345                 return -EINVAL;
1346
1347         fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1348         if (!fp)
1349                 return -ENOMEM;
1350
1351         memcpy(fp->insns, fprog->filter, fsize);
1352
1353         fp->len = fprog->len;
1354         /* Since unattached filters are not copied back to user
1355          * space through sk_get_filter(), we do not need to hold
1356          * a copy here, and can spare us the work.
1357          */
1358         fp->orig_prog = NULL;
1359
1360         /* bpf_prepare_filter() already takes care of freeing
1361          * memory in case something goes wrong.
1362          */
1363         fp = bpf_prepare_filter(fp, NULL);
1364         if (IS_ERR(fp))
1365                 return PTR_ERR(fp);
1366
1367         *pfp = fp;
1368         return 0;
1369 }
1370 EXPORT_SYMBOL_GPL(bpf_prog_create);
1371
1372 /**
1373  *      bpf_prog_create_from_user - create an unattached filter from user buffer
1374  *      @pfp: the unattached filter that is created
1375  *      @fprog: the filter program
1376  *      @trans: post-classic verifier transformation handler
1377  *      @save_orig: save classic BPF program
1378  *
1379  * This function effectively does the same as bpf_prog_create(), only
1380  * that it builds up its insns buffer from user space provided buffer.
1381  * It also allows for passing a bpf_aux_classic_check_t handler.
1382  */
1383 int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
1384                               bpf_aux_classic_check_t trans, bool save_orig)
1385 {
1386         unsigned int fsize = bpf_classic_proglen(fprog);
1387         struct bpf_prog *fp;
1388         int err;
1389
1390         /* Make sure new filter is there and in the right amounts. */
1391         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1392                 return -EINVAL;
1393
1394         fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1395         if (!fp)
1396                 return -ENOMEM;
1397
1398         if (copy_from_user(fp->insns, fprog->filter, fsize)) {
1399                 __bpf_prog_free(fp);
1400                 return -EFAULT;
1401         }
1402
1403         fp->len = fprog->len;
1404         fp->orig_prog = NULL;
1405
1406         if (save_orig) {
1407                 err = bpf_prog_store_orig_filter(fp, fprog);
1408                 if (err) {
1409                         __bpf_prog_free(fp);
1410                         return -ENOMEM;
1411                 }
1412         }
1413
1414         /* bpf_prepare_filter() already takes care of freeing
1415          * memory in case something goes wrong.
1416          */
1417         fp = bpf_prepare_filter(fp, trans);
1418         if (IS_ERR(fp))
1419                 return PTR_ERR(fp);
1420
1421         *pfp = fp;
1422         return 0;
1423 }
1424 EXPORT_SYMBOL_GPL(bpf_prog_create_from_user);
1425
1426 void bpf_prog_destroy(struct bpf_prog *fp)
1427 {
1428         __bpf_prog_release(fp);
1429 }
1430 EXPORT_SYMBOL_GPL(bpf_prog_destroy);
1431
1432 static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk)
1433 {
1434         struct sk_filter *fp, *old_fp;
1435
1436         fp = kmalloc(sizeof(*fp), GFP_KERNEL);
1437         if (!fp)
1438                 return -ENOMEM;
1439
1440         fp->prog = prog;
1441
1442         if (!__sk_filter_charge(sk, fp)) {
1443                 kfree(fp);
1444                 return -ENOMEM;
1445         }
1446         refcount_set(&fp->refcnt, 1);
1447
1448         old_fp = rcu_dereference_protected(sk->sk_filter,
1449                                            lockdep_sock_is_held(sk));
1450         rcu_assign_pointer(sk->sk_filter, fp);
1451
1452         if (old_fp)
1453                 sk_filter_uncharge(sk, old_fp);
1454
1455         return 0;
1456 }
1457
1458 static
1459 struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk)
1460 {
1461         unsigned int fsize = bpf_classic_proglen(fprog);
1462         struct bpf_prog *prog;
1463         int err;
1464
1465         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1466                 return ERR_PTR(-EPERM);
1467
1468         /* Make sure new filter is there and in the right amounts. */
1469         if (!bpf_check_basics_ok(fprog->filter, fprog->len))
1470                 return ERR_PTR(-EINVAL);
1471
1472         prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0);
1473         if (!prog)
1474                 return ERR_PTR(-ENOMEM);
1475
1476         if (copy_from_user(prog->insns, fprog->filter, fsize)) {
1477                 __bpf_prog_free(prog);
1478                 return ERR_PTR(-EFAULT);
1479         }
1480
1481         prog->len = fprog->len;
1482
1483         err = bpf_prog_store_orig_filter(prog, fprog);
1484         if (err) {
1485                 __bpf_prog_free(prog);
1486                 return ERR_PTR(-ENOMEM);
1487         }
1488
1489         /* bpf_prepare_filter() already takes care of freeing
1490          * memory in case something goes wrong.
1491          */
1492         return bpf_prepare_filter(prog, NULL);
1493 }
1494
1495 /**
1496  *      sk_attach_filter - attach a socket filter
1497  *      @fprog: the filter program
1498  *      @sk: the socket to use
1499  *
1500  * Attach the user's filter code. We first run some sanity checks on
1501  * it to make sure it does not explode on us later. If an error
1502  * occurs or there is insufficient memory for the filter a negative
1503  * errno code is returned. On success the return is zero.
1504  */
1505 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1506 {
1507         struct bpf_prog *prog = __get_filter(fprog, sk);
1508         int err;
1509
1510         if (IS_ERR(prog))
1511                 return PTR_ERR(prog);
1512
1513         err = __sk_attach_prog(prog, sk);
1514         if (err < 0) {
1515                 __bpf_prog_release(prog);
1516                 return err;
1517         }
1518
1519         return 0;
1520 }
1521 EXPORT_SYMBOL_GPL(sk_attach_filter);
1522
1523 int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk)
1524 {
1525         struct bpf_prog *prog = __get_filter(fprog, sk);
1526         int err;
1527
1528         if (IS_ERR(prog))
1529                 return PTR_ERR(prog);
1530
1531         if (bpf_prog_size(prog->len) > sysctl_optmem_max)
1532                 err = -ENOMEM;
1533         else
1534                 err = reuseport_attach_prog(sk, prog);
1535
1536         if (err)
1537                 __bpf_prog_release(prog);
1538
1539         return err;
1540 }
1541
1542 static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk)
1543 {
1544         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1545                 return ERR_PTR(-EPERM);
1546
1547         return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1548 }
1549
1550 int sk_attach_bpf(u32 ufd, struct sock *sk)
1551 {
1552         struct bpf_prog *prog = __get_bpf(ufd, sk);
1553         int err;
1554
1555         if (IS_ERR(prog))
1556                 return PTR_ERR(prog);
1557
1558         err = __sk_attach_prog(prog, sk);
1559         if (err < 0) {
1560                 bpf_prog_put(prog);
1561                 return err;
1562         }
1563
1564         return 0;
1565 }
1566
1567 int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk)
1568 {
1569         struct bpf_prog *prog;
1570         int err;
1571
1572         if (sock_flag(sk, SOCK_FILTER_LOCKED))
1573                 return -EPERM;
1574
1575         prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER);
1576         if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL)
1577                 prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT);
1578         if (IS_ERR(prog))
1579                 return PTR_ERR(prog);
1580
1581         if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) {
1582                 /* Like other non BPF_PROG_TYPE_SOCKET_FILTER
1583                  * bpf prog (e.g. sockmap).  It depends on the
1584                  * limitation imposed by bpf_prog_load().
1585                  * Hence, sysctl_optmem_max is not checked.
1586                  */
1587                 if ((sk->sk_type != SOCK_STREAM &&
1588                      sk->sk_type != SOCK_DGRAM) ||
1589                     (sk->sk_protocol != IPPROTO_UDP &&
1590                      sk->sk_protocol != IPPROTO_TCP) ||
1591                     (sk->sk_family != AF_INET &&
1592                      sk->sk_family != AF_INET6)) {
1593                         err = -ENOTSUPP;
1594                         goto err_prog_put;
1595                 }
1596         } else {
1597                 /* BPF_PROG_TYPE_SOCKET_FILTER */
1598                 if (bpf_prog_size(prog->len) > sysctl_optmem_max) {
1599                         err = -ENOMEM;
1600                         goto err_prog_put;
1601                 }
1602         }
1603
1604         err = reuseport_attach_prog(sk, prog);
1605 err_prog_put:
1606         if (err)
1607                 bpf_prog_put(prog);
1608
1609         return err;
1610 }
1611
1612 void sk_reuseport_prog_free(struct bpf_prog *prog)
1613 {
1614         if (!prog)
1615                 return;
1616
1617         if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT)
1618                 bpf_prog_put(prog);
1619         else
1620                 bpf_prog_destroy(prog);
1621 }
1622
1623 struct bpf_scratchpad {
1624         union {
1625                 __be32 diff[MAX_BPF_STACK / sizeof(__be32)];
1626                 u8     buff[MAX_BPF_STACK];
1627         };
1628 };
1629
1630 static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp);
1631
1632 static inline int __bpf_try_make_writable(struct sk_buff *skb,
1633                                           unsigned int write_len)
1634 {
1635         return skb_ensure_writable(skb, write_len);
1636 }
1637
1638 static inline int bpf_try_make_writable(struct sk_buff *skb,
1639                                         unsigned int write_len)
1640 {
1641         int err = __bpf_try_make_writable(skb, write_len);
1642
1643         bpf_compute_data_pointers(skb);
1644         return err;
1645 }
1646
1647 static int bpf_try_make_head_writable(struct sk_buff *skb)
1648 {
1649         return bpf_try_make_writable(skb, skb_headlen(skb));
1650 }
1651
1652 static inline void bpf_push_mac_rcsum(struct sk_buff *skb)
1653 {
1654         if (skb_at_tc_ingress(skb))
1655                 skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1656 }
1657
1658 static inline void bpf_pull_mac_rcsum(struct sk_buff *skb)
1659 {
1660         if (skb_at_tc_ingress(skb))
1661                 skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len);
1662 }
1663
1664 BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset,
1665            const void *, from, u32, len, u64, flags)
1666 {
1667         void *ptr;
1668
1669         if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH)))
1670                 return -EINVAL;
1671         if (unlikely(offset > 0xffff))
1672                 return -EFAULT;
1673         if (unlikely(bpf_try_make_writable(skb, offset + len)))
1674                 return -EFAULT;
1675
1676         ptr = skb->data + offset;
1677         if (flags & BPF_F_RECOMPUTE_CSUM)
1678                 __skb_postpull_rcsum(skb, ptr, len, offset);
1679
1680         memcpy(ptr, from, len);
1681
1682         if (flags & BPF_F_RECOMPUTE_CSUM)
1683                 __skb_postpush_rcsum(skb, ptr, len, offset);
1684         if (flags & BPF_F_INVALIDATE_HASH)
1685                 skb_clear_hash(skb);
1686
1687         return 0;
1688 }
1689
1690 static const struct bpf_func_proto bpf_skb_store_bytes_proto = {
1691         .func           = bpf_skb_store_bytes,
1692         .gpl_only       = false,
1693         .ret_type       = RET_INTEGER,
1694         .arg1_type      = ARG_PTR_TO_CTX,
1695         .arg2_type      = ARG_ANYTHING,
1696         .arg3_type      = ARG_PTR_TO_MEM,
1697         .arg4_type      = ARG_CONST_SIZE,
1698         .arg5_type      = ARG_ANYTHING,
1699 };
1700
1701 BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset,
1702            void *, to, u32, len)
1703 {
1704         void *ptr;
1705
1706         if (unlikely(offset > 0xffff))
1707                 goto err_clear;
1708
1709         ptr = skb_header_pointer(skb, offset, len, to);
1710         if (unlikely(!ptr))
1711                 goto err_clear;
1712         if (ptr != to)
1713                 memcpy(to, ptr, len);
1714
1715         return 0;
1716 err_clear:
1717         memset(to, 0, len);
1718         return -EFAULT;
1719 }
1720
1721 static const struct bpf_func_proto bpf_skb_load_bytes_proto = {
1722         .func           = bpf_skb_load_bytes,
1723         .gpl_only       = false,
1724         .ret_type       = RET_INTEGER,
1725         .arg1_type      = ARG_PTR_TO_CTX,
1726         .arg2_type      = ARG_ANYTHING,
1727         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1728         .arg4_type      = ARG_CONST_SIZE,
1729 };
1730
1731 BPF_CALL_4(bpf_flow_dissector_load_bytes,
1732            const struct bpf_flow_dissector *, ctx, u32, offset,
1733            void *, to, u32, len)
1734 {
1735         void *ptr;
1736
1737         if (unlikely(offset > 0xffff))
1738                 goto err_clear;
1739
1740         if (unlikely(!ctx->skb))
1741                 goto err_clear;
1742
1743         ptr = skb_header_pointer(ctx->skb, offset, len, to);
1744         if (unlikely(!ptr))
1745                 goto err_clear;
1746         if (ptr != to)
1747                 memcpy(to, ptr, len);
1748
1749         return 0;
1750 err_clear:
1751         memset(to, 0, len);
1752         return -EFAULT;
1753 }
1754
1755 static const struct bpf_func_proto bpf_flow_dissector_load_bytes_proto = {
1756         .func           = bpf_flow_dissector_load_bytes,
1757         .gpl_only       = false,
1758         .ret_type       = RET_INTEGER,
1759         .arg1_type      = ARG_PTR_TO_CTX,
1760         .arg2_type      = ARG_ANYTHING,
1761         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1762         .arg4_type      = ARG_CONST_SIZE,
1763 };
1764
1765 BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb,
1766            u32, offset, void *, to, u32, len, u32, start_header)
1767 {
1768         u8 *end = skb_tail_pointer(skb);
1769         u8 *net = skb_network_header(skb);
1770         u8 *mac = skb_mac_header(skb);
1771         u8 *ptr;
1772
1773         if (unlikely(offset > 0xffff || len > (end - mac)))
1774                 goto err_clear;
1775
1776         switch (start_header) {
1777         case BPF_HDR_START_MAC:
1778                 ptr = mac + offset;
1779                 break;
1780         case BPF_HDR_START_NET:
1781                 ptr = net + offset;
1782                 break;
1783         default:
1784                 goto err_clear;
1785         }
1786
1787         if (likely(ptr >= mac && ptr + len <= end)) {
1788                 memcpy(to, ptr, len);
1789                 return 0;
1790         }
1791
1792 err_clear:
1793         memset(to, 0, len);
1794         return -EFAULT;
1795 }
1796
1797 static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = {
1798         .func           = bpf_skb_load_bytes_relative,
1799         .gpl_only       = false,
1800         .ret_type       = RET_INTEGER,
1801         .arg1_type      = ARG_PTR_TO_CTX,
1802         .arg2_type      = ARG_ANYTHING,
1803         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
1804         .arg4_type      = ARG_CONST_SIZE,
1805         .arg5_type      = ARG_ANYTHING,
1806 };
1807
1808 BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len)
1809 {
1810         /* Idea is the following: should the needed direct read/write
1811          * test fail during runtime, we can pull in more data and redo
1812          * again, since implicitly, we invalidate previous checks here.
1813          *
1814          * Or, since we know how much we need to make read/writeable,
1815          * this can be done once at the program beginning for direct
1816          * access case. By this we overcome limitations of only current
1817          * headroom being accessible.
1818          */
1819         return bpf_try_make_writable(skb, len ? : skb_headlen(skb));
1820 }
1821
1822 static const struct bpf_func_proto bpf_skb_pull_data_proto = {
1823         .func           = bpf_skb_pull_data,
1824         .gpl_only       = false,
1825         .ret_type       = RET_INTEGER,
1826         .arg1_type      = ARG_PTR_TO_CTX,
1827         .arg2_type      = ARG_ANYTHING,
1828 };
1829
1830 BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk)
1831 {
1832         return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL;
1833 }
1834
1835 static const struct bpf_func_proto bpf_sk_fullsock_proto = {
1836         .func           = bpf_sk_fullsock,
1837         .gpl_only       = false,
1838         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
1839         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
1840 };
1841
1842 static inline int sk_skb_try_make_writable(struct sk_buff *skb,
1843                                            unsigned int write_len)
1844 {
1845         int err = __bpf_try_make_writable(skb, write_len);
1846
1847         bpf_compute_data_end_sk_skb(skb);
1848         return err;
1849 }
1850
1851 BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
1852 {
1853         /* Idea is the following: should the needed direct read/write
1854          * test fail during runtime, we can pull in more data and redo
1855          * again, since implicitly, we invalidate previous checks here.
1856          *
1857          * Or, since we know how much we need to make read/writeable,
1858          * this can be done once at the program beginning for direct
1859          * access case. By this we overcome limitations of only current
1860          * headroom being accessible.
1861          */
1862         return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
1863 }
1864
1865 static const struct bpf_func_proto sk_skb_pull_data_proto = {
1866         .func           = sk_skb_pull_data,
1867         .gpl_only       = false,
1868         .ret_type       = RET_INTEGER,
1869         .arg1_type      = ARG_PTR_TO_CTX,
1870         .arg2_type      = ARG_ANYTHING,
1871 };
1872
1873 BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
1874            u64, from, u64, to, u64, flags)
1875 {
1876         __sum16 *ptr;
1877
1878         if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK)))
1879                 return -EINVAL;
1880         if (unlikely(offset > 0xffff || offset & 1))
1881                 return -EFAULT;
1882         if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1883                 return -EFAULT;
1884
1885         ptr = (__sum16 *)(skb->data + offset);
1886         switch (flags & BPF_F_HDR_FIELD_MASK) {
1887         case 0:
1888                 if (unlikely(from != 0))
1889                         return -EINVAL;
1890
1891                 csum_replace_by_diff(ptr, to);
1892                 break;
1893         case 2:
1894                 csum_replace2(ptr, from, to);
1895                 break;
1896         case 4:
1897                 csum_replace4(ptr, from, to);
1898                 break;
1899         default:
1900                 return -EINVAL;
1901         }
1902
1903         return 0;
1904 }
1905
1906 static const struct bpf_func_proto bpf_l3_csum_replace_proto = {
1907         .func           = bpf_l3_csum_replace,
1908         .gpl_only       = false,
1909         .ret_type       = RET_INTEGER,
1910         .arg1_type      = ARG_PTR_TO_CTX,
1911         .arg2_type      = ARG_ANYTHING,
1912         .arg3_type      = ARG_ANYTHING,
1913         .arg4_type      = ARG_ANYTHING,
1914         .arg5_type      = ARG_ANYTHING,
1915 };
1916
1917 BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
1918            u64, from, u64, to, u64, flags)
1919 {
1920         bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
1921         bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
1922         bool do_mforce = flags & BPF_F_MARK_ENFORCE;
1923         __sum16 *ptr;
1924
1925         if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
1926                                BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
1927                 return -EINVAL;
1928         if (unlikely(offset > 0xffff || offset & 1))
1929                 return -EFAULT;
1930         if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr))))
1931                 return -EFAULT;
1932
1933         ptr = (__sum16 *)(skb->data + offset);
1934         if (is_mmzero && !do_mforce && !*ptr)
1935                 return 0;
1936
1937         switch (flags & BPF_F_HDR_FIELD_MASK) {
1938         case 0:
1939                 if (unlikely(from != 0))
1940                         return -EINVAL;
1941
1942                 inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo);
1943                 break;
1944         case 2:
1945                 inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo);
1946                 break;
1947         case 4:
1948                 inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo);
1949                 break;
1950         default:
1951                 return -EINVAL;
1952         }
1953
1954         if (is_mmzero && !*ptr)
1955                 *ptr = CSUM_MANGLED_0;
1956         return 0;
1957 }
1958
1959 static const struct bpf_func_proto bpf_l4_csum_replace_proto = {
1960         .func           = bpf_l4_csum_replace,
1961         .gpl_only       = false,
1962         .ret_type       = RET_INTEGER,
1963         .arg1_type      = ARG_PTR_TO_CTX,
1964         .arg2_type      = ARG_ANYTHING,
1965         .arg3_type      = ARG_ANYTHING,
1966         .arg4_type      = ARG_ANYTHING,
1967         .arg5_type      = ARG_ANYTHING,
1968 };
1969
1970 BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size,
1971            __be32 *, to, u32, to_size, __wsum, seed)
1972 {
1973         struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp);
1974         u32 diff_size = from_size + to_size;
1975         int i, j = 0;
1976
1977         /* This is quite flexible, some examples:
1978          *
1979          * from_size == 0, to_size > 0,  seed := csum --> pushing data
1980          * from_size > 0,  to_size == 0, seed := csum --> pulling data
1981          * from_size > 0,  to_size > 0,  seed := 0    --> diffing data
1982          *
1983          * Even for diffing, from_size and to_size don't need to be equal.
1984          */
1985         if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) ||
1986                      diff_size > sizeof(sp->diff)))
1987                 return -EINVAL;
1988
1989         for (i = 0; i < from_size / sizeof(__be32); i++, j++)
1990                 sp->diff[j] = ~from[i];
1991         for (i = 0; i <   to_size / sizeof(__be32); i++, j++)
1992                 sp->diff[j] = to[i];
1993
1994         return csum_partial(sp->diff, diff_size, seed);
1995 }
1996
1997 static const struct bpf_func_proto bpf_csum_diff_proto = {
1998         .func           = bpf_csum_diff,
1999         .gpl_only       = false,
2000         .pkt_access     = true,
2001         .ret_type       = RET_INTEGER,
2002         .arg1_type      = ARG_PTR_TO_MEM_OR_NULL,
2003         .arg2_type      = ARG_CONST_SIZE_OR_ZERO,
2004         .arg3_type      = ARG_PTR_TO_MEM_OR_NULL,
2005         .arg4_type      = ARG_CONST_SIZE_OR_ZERO,
2006         .arg5_type      = ARG_ANYTHING,
2007 };
2008
2009 BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum)
2010 {
2011         /* The interface is to be used in combination with bpf_csum_diff()
2012          * for direct packet writes. csum rotation for alignment as well
2013          * as emulating csum_sub() can be done from the eBPF program.
2014          */
2015         if (skb->ip_summed == CHECKSUM_COMPLETE)
2016                 return (skb->csum = csum_add(skb->csum, csum));
2017
2018         return -ENOTSUPP;
2019 }
2020
2021 static const struct bpf_func_proto bpf_csum_update_proto = {
2022         .func           = bpf_csum_update,
2023         .gpl_only       = false,
2024         .ret_type       = RET_INTEGER,
2025         .arg1_type      = ARG_PTR_TO_CTX,
2026         .arg2_type      = ARG_ANYTHING,
2027 };
2028
2029 static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb)
2030 {
2031         return dev_forward_skb(dev, skb);
2032 }
2033
2034 static inline int __bpf_rx_skb_no_mac(struct net_device *dev,
2035                                       struct sk_buff *skb)
2036 {
2037         int ret = ____dev_forward_skb(dev, skb);
2038
2039         if (likely(!ret)) {
2040                 skb->dev = dev;
2041                 ret = netif_rx(skb);
2042         }
2043
2044         return ret;
2045 }
2046
2047 static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb)
2048 {
2049         int ret;
2050
2051         if (dev_xmit_recursion()) {
2052                 net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
2053                 kfree_skb(skb);
2054                 return -ENETDOWN;
2055         }
2056
2057         skb->dev = dev;
2058
2059         dev_xmit_recursion_inc();
2060         ret = dev_queue_xmit(skb);
2061         dev_xmit_recursion_dec();
2062
2063         return ret;
2064 }
2065
2066 static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev,
2067                                  u32 flags)
2068 {
2069         unsigned int mlen = skb_network_offset(skb);
2070
2071         if (mlen) {
2072                 __skb_pull(skb, mlen);
2073
2074                 /* At ingress, the mac header has already been pulled once.
2075                  * At egress, skb_pospull_rcsum has to be done in case that
2076                  * the skb is originated from ingress (i.e. a forwarded skb)
2077                  * to ensure that rcsum starts at net header.
2078                  */
2079                 if (!skb_at_tc_ingress(skb))
2080                         skb_postpull_rcsum(skb, skb_mac_header(skb), mlen);
2081         }
2082         skb_pop_mac_header(skb);
2083         skb_reset_mac_len(skb);
2084         return flags & BPF_F_INGRESS ?
2085                __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb);
2086 }
2087
2088 static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev,
2089                                  u32 flags)
2090 {
2091         /* Verify that a link layer header is carried */
2092         if (unlikely(skb->mac_header >= skb->network_header)) {
2093                 kfree_skb(skb);
2094                 return -ERANGE;
2095         }
2096
2097         bpf_push_mac_rcsum(skb);
2098         return flags & BPF_F_INGRESS ?
2099                __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb);
2100 }
2101
2102 static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev,
2103                           u32 flags)
2104 {
2105         if (dev_is_mac_header_xmit(dev))
2106                 return __bpf_redirect_common(skb, dev, flags);
2107         else
2108                 return __bpf_redirect_no_mac(skb, dev, flags);
2109 }
2110
2111 BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags)
2112 {
2113         struct net_device *dev;
2114         struct sk_buff *clone;
2115         int ret;
2116
2117         if (unlikely(flags & ~(BPF_F_INGRESS)))
2118                 return -EINVAL;
2119
2120         dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex);
2121         if (unlikely(!dev))
2122                 return -EINVAL;
2123
2124         clone = skb_clone(skb, GFP_ATOMIC);
2125         if (unlikely(!clone))
2126                 return -ENOMEM;
2127
2128         /* For direct write, we need to keep the invariant that the skbs
2129          * we're dealing with need to be uncloned. Should uncloning fail
2130          * here, we need to free the just generated clone to unclone once
2131          * again.
2132          */
2133         ret = bpf_try_make_head_writable(skb);
2134         if (unlikely(ret)) {
2135                 kfree_skb(clone);
2136                 return -ENOMEM;
2137         }
2138
2139         return __bpf_redirect(clone, dev, flags);
2140 }
2141
2142 static const struct bpf_func_proto bpf_clone_redirect_proto = {
2143         .func           = bpf_clone_redirect,
2144         .gpl_only       = false,
2145         .ret_type       = RET_INTEGER,
2146         .arg1_type      = ARG_PTR_TO_CTX,
2147         .arg2_type      = ARG_ANYTHING,
2148         .arg3_type      = ARG_ANYTHING,
2149 };
2150
2151 DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
2152 EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info);
2153
2154 BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags)
2155 {
2156         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2157
2158         if (unlikely(flags & ~(BPF_F_INGRESS)))
2159                 return TC_ACT_SHOT;
2160
2161         ri->flags = flags;
2162         ri->tgt_index = ifindex;
2163
2164         return TC_ACT_REDIRECT;
2165 }
2166
2167 int skb_do_redirect(struct sk_buff *skb)
2168 {
2169         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
2170         struct net_device *dev;
2171
2172         dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->tgt_index);
2173         ri->tgt_index = 0;
2174         if (unlikely(!dev)) {
2175                 kfree_skb(skb);
2176                 return -EINVAL;
2177         }
2178
2179         return __bpf_redirect(skb, dev, ri->flags);
2180 }
2181
2182 static const struct bpf_func_proto bpf_redirect_proto = {
2183         .func           = bpf_redirect,
2184         .gpl_only       = false,
2185         .ret_type       = RET_INTEGER,
2186         .arg1_type      = ARG_ANYTHING,
2187         .arg2_type      = ARG_ANYTHING,
2188 };
2189
2190 BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes)
2191 {
2192         msg->apply_bytes = bytes;
2193         return 0;
2194 }
2195
2196 static const struct bpf_func_proto bpf_msg_apply_bytes_proto = {
2197         .func           = bpf_msg_apply_bytes,
2198         .gpl_only       = false,
2199         .ret_type       = RET_INTEGER,
2200         .arg1_type      = ARG_PTR_TO_CTX,
2201         .arg2_type      = ARG_ANYTHING,
2202 };
2203
2204 BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes)
2205 {
2206         msg->cork_bytes = bytes;
2207         return 0;
2208 }
2209
2210 static const struct bpf_func_proto bpf_msg_cork_bytes_proto = {
2211         .func           = bpf_msg_cork_bytes,
2212         .gpl_only       = false,
2213         .ret_type       = RET_INTEGER,
2214         .arg1_type      = ARG_PTR_TO_CTX,
2215         .arg2_type      = ARG_ANYTHING,
2216 };
2217
2218 BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start,
2219            u32, end, u64, flags)
2220 {
2221         u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start;
2222         u32 first_sge, last_sge, i, shift, bytes_sg_total;
2223         struct scatterlist *sge;
2224         u8 *raw, *to, *from;
2225         struct page *page;
2226
2227         if (unlikely(flags || end <= start))
2228                 return -EINVAL;
2229
2230         /* First find the starting scatterlist element */
2231         i = msg->sg.start;
2232         do {
2233                 len = sk_msg_elem(msg, i)->length;
2234                 if (start < offset + len)
2235                         break;
2236                 offset += len;
2237                 sk_msg_iter_var_next(i);
2238         } while (i != msg->sg.end);
2239
2240         if (unlikely(start >= offset + len))
2241                 return -EINVAL;
2242
2243         first_sge = i;
2244         /* The start may point into the sg element so we need to also
2245          * account for the headroom.
2246          */
2247         bytes_sg_total = start - offset + bytes;
2248         if (!msg->sg.copy[i] && bytes_sg_total <= len)
2249                 goto out;
2250
2251         /* At this point we need to linearize multiple scatterlist
2252          * elements or a single shared page. Either way we need to
2253          * copy into a linear buffer exclusively owned by BPF. Then
2254          * place the buffer in the scatterlist and fixup the original
2255          * entries by removing the entries now in the linear buffer
2256          * and shifting the remaining entries. For now we do not try
2257          * to copy partial entries to avoid complexity of running out
2258          * of sg_entry slots. The downside is reading a single byte
2259          * will copy the entire sg entry.
2260          */
2261         do {
2262                 copy += sk_msg_elem(msg, i)->length;
2263                 sk_msg_iter_var_next(i);
2264                 if (bytes_sg_total <= copy)
2265                         break;
2266         } while (i != msg->sg.end);
2267         last_sge = i;
2268
2269         if (unlikely(bytes_sg_total > copy))
2270                 return -EINVAL;
2271
2272         page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2273                            get_order(copy));
2274         if (unlikely(!page))
2275                 return -ENOMEM;
2276
2277         raw = page_address(page);
2278         i = first_sge;
2279         do {
2280                 sge = sk_msg_elem(msg, i);
2281                 from = sg_virt(sge);
2282                 len = sge->length;
2283                 to = raw + poffset;
2284
2285                 memcpy(to, from, len);
2286                 poffset += len;
2287                 sge->length = 0;
2288                 put_page(sg_page(sge));
2289
2290                 sk_msg_iter_var_next(i);
2291         } while (i != last_sge);
2292
2293         sg_set_page(&msg->sg.data[first_sge], page, copy, 0);
2294
2295         /* To repair sg ring we need to shift entries. If we only
2296          * had a single entry though we can just replace it and
2297          * be done. Otherwise walk the ring and shift the entries.
2298          */
2299         WARN_ON_ONCE(last_sge == first_sge);
2300         shift = last_sge > first_sge ?
2301                 last_sge - first_sge - 1 :
2302                 MAX_SKB_FRAGS - first_sge + last_sge - 1;
2303         if (!shift)
2304                 goto out;
2305
2306         i = first_sge;
2307         sk_msg_iter_var_next(i);
2308         do {
2309                 u32 move_from;
2310
2311                 if (i + shift >= MAX_MSG_FRAGS)
2312                         move_from = i + shift - MAX_MSG_FRAGS;
2313                 else
2314                         move_from = i + shift;
2315                 if (move_from == msg->sg.end)
2316                         break;
2317
2318                 msg->sg.data[i] = msg->sg.data[move_from];
2319                 msg->sg.data[move_from].length = 0;
2320                 msg->sg.data[move_from].page_link = 0;
2321                 msg->sg.data[move_from].offset = 0;
2322                 sk_msg_iter_var_next(i);
2323         } while (1);
2324
2325         msg->sg.end = msg->sg.end - shift > msg->sg.end ?
2326                       msg->sg.end - shift + MAX_MSG_FRAGS :
2327                       msg->sg.end - shift;
2328 out:
2329         msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset;
2330         msg->data_end = msg->data + bytes;
2331         return 0;
2332 }
2333
2334 static const struct bpf_func_proto bpf_msg_pull_data_proto = {
2335         .func           = bpf_msg_pull_data,
2336         .gpl_only       = false,
2337         .ret_type       = RET_INTEGER,
2338         .arg1_type      = ARG_PTR_TO_CTX,
2339         .arg2_type      = ARG_ANYTHING,
2340         .arg3_type      = ARG_ANYTHING,
2341         .arg4_type      = ARG_ANYTHING,
2342 };
2343
2344 BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start,
2345            u32, len, u64, flags)
2346 {
2347         struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge;
2348         u32 new, i = 0, l, space, copy = 0, offset = 0;
2349         u8 *raw, *to, *from;
2350         struct page *page;
2351
2352         if (unlikely(flags))
2353                 return -EINVAL;
2354
2355         /* First find the starting scatterlist element */
2356         i = msg->sg.start;
2357         do {
2358                 l = sk_msg_elem(msg, i)->length;
2359
2360                 if (start < offset + l)
2361                         break;
2362                 offset += l;
2363                 sk_msg_iter_var_next(i);
2364         } while (i != msg->sg.end);
2365
2366         if (start >= offset + l)
2367                 return -EINVAL;
2368
2369         space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2370
2371         /* If no space available will fallback to copy, we need at
2372          * least one scatterlist elem available to push data into
2373          * when start aligns to the beginning of an element or two
2374          * when it falls inside an element. We handle the start equals
2375          * offset case because its the common case for inserting a
2376          * header.
2377          */
2378         if (!space || (space == 1 && start != offset))
2379                 copy = msg->sg.data[i].length;
2380
2381         page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP,
2382                            get_order(copy + len));
2383         if (unlikely(!page))
2384                 return -ENOMEM;
2385
2386         if (copy) {
2387                 int front, back;
2388
2389                 raw = page_address(page);
2390
2391                 psge = sk_msg_elem(msg, i);
2392                 front = start - offset;
2393                 back = psge->length - front;
2394                 from = sg_virt(psge);
2395
2396                 if (front)
2397                         memcpy(raw, from, front);
2398
2399                 if (back) {
2400                         from += front;
2401                         to = raw + front + len;
2402
2403                         memcpy(to, from, back);
2404                 }
2405
2406                 put_page(sg_page(psge));
2407         } else if (start - offset) {
2408                 psge = sk_msg_elem(msg, i);
2409                 rsge = sk_msg_elem_cpy(msg, i);
2410
2411                 psge->length = start - offset;
2412                 rsge.length -= psge->length;
2413                 rsge.offset += start;
2414
2415                 sk_msg_iter_var_next(i);
2416                 sg_unmark_end(psge);
2417                 sk_msg_iter_next(msg, end);
2418         }
2419
2420         /* Slot(s) to place newly allocated data */
2421         new = i;
2422
2423         /* Shift one or two slots as needed */
2424         if (!copy) {
2425                 sge = sk_msg_elem_cpy(msg, i);
2426
2427                 sk_msg_iter_var_next(i);
2428                 sg_unmark_end(&sge);
2429                 sk_msg_iter_next(msg, end);
2430
2431                 nsge = sk_msg_elem_cpy(msg, i);
2432                 if (rsge.length) {
2433                         sk_msg_iter_var_next(i);
2434                         nnsge = sk_msg_elem_cpy(msg, i);
2435                 }
2436
2437                 while (i != msg->sg.end) {
2438                         msg->sg.data[i] = sge;
2439                         sge = nsge;
2440                         sk_msg_iter_var_next(i);
2441                         if (rsge.length) {
2442                                 nsge = nnsge;
2443                                 nnsge = sk_msg_elem_cpy(msg, i);
2444                         } else {
2445                                 nsge = sk_msg_elem_cpy(msg, i);
2446                         }
2447                 }
2448         }
2449
2450         /* Place newly allocated data buffer */
2451         sk_mem_charge(msg->sk, len);
2452         msg->sg.size += len;
2453         msg->sg.copy[new] = false;
2454         sg_set_page(&msg->sg.data[new], page, len + copy, 0);
2455         if (rsge.length) {
2456                 get_page(sg_page(&rsge));
2457                 sk_msg_iter_var_next(new);
2458                 msg->sg.data[new] = rsge;
2459         }
2460
2461         sk_msg_compute_data_pointers(msg);
2462         return 0;
2463 }
2464
2465 static const struct bpf_func_proto bpf_msg_push_data_proto = {
2466         .func           = bpf_msg_push_data,
2467         .gpl_only       = false,
2468         .ret_type       = RET_INTEGER,
2469         .arg1_type      = ARG_PTR_TO_CTX,
2470         .arg2_type      = ARG_ANYTHING,
2471         .arg3_type      = ARG_ANYTHING,
2472         .arg4_type      = ARG_ANYTHING,
2473 };
2474
2475 static void sk_msg_shift_left(struct sk_msg *msg, int i)
2476 {
2477         int prev;
2478
2479         do {
2480                 prev = i;
2481                 sk_msg_iter_var_next(i);
2482                 msg->sg.data[prev] = msg->sg.data[i];
2483         } while (i != msg->sg.end);
2484
2485         sk_msg_iter_prev(msg, end);
2486 }
2487
2488 static void sk_msg_shift_right(struct sk_msg *msg, int i)
2489 {
2490         struct scatterlist tmp, sge;
2491
2492         sk_msg_iter_next(msg, end);
2493         sge = sk_msg_elem_cpy(msg, i);
2494         sk_msg_iter_var_next(i);
2495         tmp = sk_msg_elem_cpy(msg, i);
2496
2497         while (i != msg->sg.end) {
2498                 msg->sg.data[i] = sge;
2499                 sk_msg_iter_var_next(i);
2500                 sge = tmp;
2501                 tmp = sk_msg_elem_cpy(msg, i);
2502         }
2503 }
2504
2505 BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
2506            u32, len, u64, flags)
2507 {
2508         u32 i = 0, l, space, offset = 0;
2509         u64 last = start + len;
2510         int pop;
2511
2512         if (unlikely(flags))
2513                 return -EINVAL;
2514
2515         /* First find the starting scatterlist element */
2516         i = msg->sg.start;
2517         do {
2518                 l = sk_msg_elem(msg, i)->length;
2519
2520                 if (start < offset + l)
2521                         break;
2522                 offset += l;
2523                 sk_msg_iter_var_next(i);
2524         } while (i != msg->sg.end);
2525
2526         /* Bounds checks: start and pop must be inside message */
2527         if (start >= offset + l || last >= msg->sg.size)
2528                 return -EINVAL;
2529
2530         space = MAX_MSG_FRAGS - sk_msg_elem_used(msg);
2531
2532         pop = len;
2533         /* --------------| offset
2534          * -| start      |-------- len -------|
2535          *
2536          *  |----- a ----|-------- pop -------|----- b ----|
2537          *  |______________________________________________| length
2538          *
2539          *
2540          * a:   region at front of scatter element to save
2541          * b:   region at back of scatter element to save when length > A + pop
2542          * pop: region to pop from element, same as input 'pop' here will be
2543          *      decremented below per iteration.
2544          *
2545          * Two top-level cases to handle when start != offset, first B is non
2546          * zero and second B is zero corresponding to when a pop includes more
2547          * than one element.
2548          *
2549          * Then if B is non-zero AND there is no space allocate space and
2550          * compact A, B regions into page. If there is space shift ring to
2551          * the rigth free'ing the next element in ring to place B, leaving
2552          * A untouched except to reduce length.
2553          */
2554         if (start != offset) {
2555                 struct scatterlist *nsge, *sge = sk_msg_elem(msg, i);
2556                 int a = start;
2557                 int b = sge->length - pop - a;
2558
2559                 sk_msg_iter_var_next(i);
2560
2561                 if (pop < sge->length - a) {
2562                         if (space) {
2563                                 sge->length = a;
2564                                 sk_msg_shift_right(msg, i);
2565                                 nsge = sk_msg_elem(msg, i);
2566                                 get_page(sg_page(sge));
2567                                 sg_set_page(nsge,
2568                                             sg_page(sge),
2569                                             b, sge->offset + pop + a);
2570                         } else {
2571                                 struct page *page, *orig;
2572                                 u8 *to, *from;
2573
2574                                 page = alloc_pages(__GFP_NOWARN |
2575                                                    __GFP_COMP   | GFP_ATOMIC,
2576                                                    get_order(a + b));
2577                                 if (unlikely(!page))
2578                                         return -ENOMEM;
2579
2580                                 sge->length = a;
2581                                 orig = sg_page(sge);
2582                                 from = sg_virt(sge);
2583                                 to = page_address(page);
2584                                 memcpy(to, from, a);
2585                                 memcpy(to + a, from + a + pop, b);
2586                                 sg_set_page(sge, page, a + b, 0);
2587                                 put_page(orig);
2588                         }
2589                         pop = 0;
2590                 } else if (pop >= sge->length - a) {
2591                         sge->length = a;
2592                         pop -= (sge->length - a);
2593                 }
2594         }
2595
2596         /* From above the current layout _must_ be as follows,
2597          *
2598          * -| offset
2599          * -| start
2600          *
2601          *  |---- pop ---|---------------- b ------------|
2602          *  |____________________________________________| length
2603          *
2604          * Offset and start of the current msg elem are equal because in the
2605          * previous case we handled offset != start and either consumed the
2606          * entire element and advanced to the next element OR pop == 0.
2607          *
2608          * Two cases to handle here are first pop is less than the length
2609          * leaving some remainder b above. Simply adjust the element's layout
2610          * in this case. Or pop >= length of the element so that b = 0. In this
2611          * case advance to next element decrementing pop.
2612          */
2613         while (pop) {
2614                 struct scatterlist *sge = sk_msg_elem(msg, i);
2615
2616                 if (pop < sge->length) {
2617                         sge->length -= pop;
2618                         sge->offset += pop;
2619                         pop = 0;
2620                 } else {
2621                         pop -= sge->length;
2622                         sk_msg_shift_left(msg, i);
2623                 }
2624                 sk_msg_iter_var_next(i);
2625         }
2626
2627         sk_mem_uncharge(msg->sk, len - pop);
2628         msg->sg.size -= (len - pop);
2629         sk_msg_compute_data_pointers(msg);
2630         return 0;
2631 }
2632
2633 static const struct bpf_func_proto bpf_msg_pop_data_proto = {
2634         .func           = bpf_msg_pop_data,
2635         .gpl_only       = false,
2636         .ret_type       = RET_INTEGER,
2637         .arg1_type      = ARG_PTR_TO_CTX,
2638         .arg2_type      = ARG_ANYTHING,
2639         .arg3_type      = ARG_ANYTHING,
2640         .arg4_type      = ARG_ANYTHING,
2641 };
2642
2643 BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb)
2644 {
2645         return task_get_classid(skb);
2646 }
2647
2648 static const struct bpf_func_proto bpf_get_cgroup_classid_proto = {
2649         .func           = bpf_get_cgroup_classid,
2650         .gpl_only       = false,
2651         .ret_type       = RET_INTEGER,
2652         .arg1_type      = ARG_PTR_TO_CTX,
2653 };
2654
2655 BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb)
2656 {
2657         return dst_tclassid(skb);
2658 }
2659
2660 static const struct bpf_func_proto bpf_get_route_realm_proto = {
2661         .func           = bpf_get_route_realm,
2662         .gpl_only       = false,
2663         .ret_type       = RET_INTEGER,
2664         .arg1_type      = ARG_PTR_TO_CTX,
2665 };
2666
2667 BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb)
2668 {
2669         /* If skb_clear_hash() was called due to mangling, we can
2670          * trigger SW recalculation here. Later access to hash
2671          * can then use the inline skb->hash via context directly
2672          * instead of calling this helper again.
2673          */
2674         return skb_get_hash(skb);
2675 }
2676
2677 static const struct bpf_func_proto bpf_get_hash_recalc_proto = {
2678         .func           = bpf_get_hash_recalc,
2679         .gpl_only       = false,
2680         .ret_type       = RET_INTEGER,
2681         .arg1_type      = ARG_PTR_TO_CTX,
2682 };
2683
2684 BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb)
2685 {
2686         /* After all direct packet write, this can be used once for
2687          * triggering a lazy recalc on next skb_get_hash() invocation.
2688          */
2689         skb_clear_hash(skb);
2690         return 0;
2691 }
2692
2693 static const struct bpf_func_proto bpf_set_hash_invalid_proto = {
2694         .func           = bpf_set_hash_invalid,
2695         .gpl_only       = false,
2696         .ret_type       = RET_INTEGER,
2697         .arg1_type      = ARG_PTR_TO_CTX,
2698 };
2699
2700 BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash)
2701 {
2702         /* Set user specified hash as L4(+), so that it gets returned
2703          * on skb_get_hash() call unless BPF prog later on triggers a
2704          * skb_clear_hash().
2705          */
2706         __skb_set_sw_hash(skb, hash, true);
2707         return 0;
2708 }
2709
2710 static const struct bpf_func_proto bpf_set_hash_proto = {
2711         .func           = bpf_set_hash,
2712         .gpl_only       = false,
2713         .ret_type       = RET_INTEGER,
2714         .arg1_type      = ARG_PTR_TO_CTX,
2715         .arg2_type      = ARG_ANYTHING,
2716 };
2717
2718 BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto,
2719            u16, vlan_tci)
2720 {
2721         int ret;
2722
2723         if (unlikely(vlan_proto != htons(ETH_P_8021Q) &&
2724                      vlan_proto != htons(ETH_P_8021AD)))
2725                 vlan_proto = htons(ETH_P_8021Q);
2726
2727         bpf_push_mac_rcsum(skb);
2728         ret = skb_vlan_push(skb, vlan_proto, vlan_tci);
2729         bpf_pull_mac_rcsum(skb);
2730
2731         bpf_compute_data_pointers(skb);
2732         return ret;
2733 }
2734
2735 static const struct bpf_func_proto bpf_skb_vlan_push_proto = {
2736         .func           = bpf_skb_vlan_push,
2737         .gpl_only       = false,
2738         .ret_type       = RET_INTEGER,
2739         .arg1_type      = ARG_PTR_TO_CTX,
2740         .arg2_type      = ARG_ANYTHING,
2741         .arg3_type      = ARG_ANYTHING,
2742 };
2743
2744 BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb)
2745 {
2746         int ret;
2747
2748         bpf_push_mac_rcsum(skb);
2749         ret = skb_vlan_pop(skb);
2750         bpf_pull_mac_rcsum(skb);
2751
2752         bpf_compute_data_pointers(skb);
2753         return ret;
2754 }
2755
2756 static const struct bpf_func_proto bpf_skb_vlan_pop_proto = {
2757         .func           = bpf_skb_vlan_pop,
2758         .gpl_only       = false,
2759         .ret_type       = RET_INTEGER,
2760         .arg1_type      = ARG_PTR_TO_CTX,
2761 };
2762
2763 static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len)
2764 {
2765         /* Caller already did skb_cow() with len as headroom,
2766          * so no need to do it here.
2767          */
2768         skb_push(skb, len);
2769         memmove(skb->data, skb->data + len, off);
2770         memset(skb->data + off, 0, len);
2771
2772         /* No skb_postpush_rcsum(skb, skb->data + off, len)
2773          * needed here as it does not change the skb->csum
2774          * result for checksum complete when summing over
2775          * zeroed blocks.
2776          */
2777         return 0;
2778 }
2779
2780 static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len)
2781 {
2782         /* skb_ensure_writable() is not needed here, as we're
2783          * already working on an uncloned skb.
2784          */
2785         if (unlikely(!pskb_may_pull(skb, off + len)))
2786                 return -ENOMEM;
2787
2788         skb_postpull_rcsum(skb, skb->data + off, len);
2789         memmove(skb->data + len, skb->data, off);
2790         __skb_pull(skb, len);
2791
2792         return 0;
2793 }
2794
2795 static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len)
2796 {
2797         bool trans_same = skb->transport_header == skb->network_header;
2798         int ret;
2799
2800         /* There's no need for __skb_push()/__skb_pull() pair to
2801          * get to the start of the mac header as we're guaranteed
2802          * to always start from here under eBPF.
2803          */
2804         ret = bpf_skb_generic_push(skb, off, len);
2805         if (likely(!ret)) {
2806                 skb->mac_header -= len;
2807                 skb->network_header -= len;
2808                 if (trans_same)
2809                         skb->transport_header = skb->network_header;
2810         }
2811
2812         return ret;
2813 }
2814
2815 static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len)
2816 {
2817         bool trans_same = skb->transport_header == skb->network_header;
2818         int ret;
2819
2820         /* Same here, __skb_push()/__skb_pull() pair not needed. */
2821         ret = bpf_skb_generic_pop(skb, off, len);
2822         if (likely(!ret)) {
2823                 skb->mac_header += len;
2824                 skb->network_header += len;
2825                 if (trans_same)
2826                         skb->transport_header = skb->network_header;
2827         }
2828
2829         return ret;
2830 }
2831
2832 static int bpf_skb_proto_4_to_6(struct sk_buff *skb)
2833 {
2834         const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2835         u32 off = skb_mac_header_len(skb);
2836         int ret;
2837
2838         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
2839                 return -ENOTSUPP;
2840
2841         ret = skb_cow(skb, len_diff);
2842         if (unlikely(ret < 0))
2843                 return ret;
2844
2845         ret = bpf_skb_net_hdr_push(skb, off, len_diff);
2846         if (unlikely(ret < 0))
2847                 return ret;
2848
2849         if (skb_is_gso(skb)) {
2850                 struct skb_shared_info *shinfo = skb_shinfo(skb);
2851
2852                 /* SKB_GSO_TCPV4 needs to be changed into
2853                  * SKB_GSO_TCPV6.
2854                  */
2855                 if (shinfo->gso_type & SKB_GSO_TCPV4) {
2856                         shinfo->gso_type &= ~SKB_GSO_TCPV4;
2857                         shinfo->gso_type |=  SKB_GSO_TCPV6;
2858                 }
2859
2860                 /* Due to IPv6 header, MSS needs to be downgraded. */
2861                 skb_decrease_gso_size(shinfo, len_diff);
2862                 /* Header must be checked, and gso_segs recomputed. */
2863                 shinfo->gso_type |= SKB_GSO_DODGY;
2864                 shinfo->gso_segs = 0;
2865         }
2866
2867         skb->protocol = htons(ETH_P_IPV6);
2868         skb_clear_hash(skb);
2869
2870         return 0;
2871 }
2872
2873 static int bpf_skb_proto_6_to_4(struct sk_buff *skb)
2874 {
2875         const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr);
2876         u32 off = skb_mac_header_len(skb);
2877         int ret;
2878
2879         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb))
2880                 return -ENOTSUPP;
2881
2882         ret = skb_unclone(skb, GFP_ATOMIC);
2883         if (unlikely(ret < 0))
2884                 return ret;
2885
2886         ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
2887         if (unlikely(ret < 0))
2888                 return ret;
2889
2890         if (skb_is_gso(skb)) {
2891                 struct skb_shared_info *shinfo = skb_shinfo(skb);
2892
2893                 /* SKB_GSO_TCPV6 needs to be changed into
2894                  * SKB_GSO_TCPV4.
2895                  */
2896                 if (shinfo->gso_type & SKB_GSO_TCPV6) {
2897                         shinfo->gso_type &= ~SKB_GSO_TCPV6;
2898                         shinfo->gso_type |=  SKB_GSO_TCPV4;
2899                 }
2900
2901                 /* Due to IPv4 header, MSS can be upgraded. */
2902                 skb_increase_gso_size(shinfo, len_diff);
2903                 /* Header must be checked, and gso_segs recomputed. */
2904                 shinfo->gso_type |= SKB_GSO_DODGY;
2905                 shinfo->gso_segs = 0;
2906         }
2907
2908         skb->protocol = htons(ETH_P_IP);
2909         skb_clear_hash(skb);
2910
2911         return 0;
2912 }
2913
2914 static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto)
2915 {
2916         __be16 from_proto = skb->protocol;
2917
2918         if (from_proto == htons(ETH_P_IP) &&
2919               to_proto == htons(ETH_P_IPV6))
2920                 return bpf_skb_proto_4_to_6(skb);
2921
2922         if (from_proto == htons(ETH_P_IPV6) &&
2923               to_proto == htons(ETH_P_IP))
2924                 return bpf_skb_proto_6_to_4(skb);
2925
2926         return -ENOTSUPP;
2927 }
2928
2929 BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto,
2930            u64, flags)
2931 {
2932         int ret;
2933
2934         if (unlikely(flags))
2935                 return -EINVAL;
2936
2937         /* General idea is that this helper does the basic groundwork
2938          * needed for changing the protocol, and eBPF program fills the
2939          * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace()
2940          * and other helpers, rather than passing a raw buffer here.
2941          *
2942          * The rationale is to keep this minimal and without a need to
2943          * deal with raw packet data. F.e. even if we would pass buffers
2944          * here, the program still needs to call the bpf_lX_csum_replace()
2945          * helpers anyway. Plus, this way we keep also separation of
2946          * concerns, since f.e. bpf_skb_store_bytes() should only take
2947          * care of stores.
2948          *
2949          * Currently, additional options and extension header space are
2950          * not supported, but flags register is reserved so we can adapt
2951          * that. For offloads, we mark packet as dodgy, so that headers
2952          * need to be verified first.
2953          */
2954         ret = bpf_skb_proto_xlat(skb, proto);
2955         bpf_compute_data_pointers(skb);
2956         return ret;
2957 }
2958
2959 static const struct bpf_func_proto bpf_skb_change_proto_proto = {
2960         .func           = bpf_skb_change_proto,
2961         .gpl_only       = false,
2962         .ret_type       = RET_INTEGER,
2963         .arg1_type      = ARG_PTR_TO_CTX,
2964         .arg2_type      = ARG_ANYTHING,
2965         .arg3_type      = ARG_ANYTHING,
2966 };
2967
2968 BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type)
2969 {
2970         /* We only allow a restricted subset to be changed for now. */
2971         if (unlikely(!skb_pkt_type_ok(skb->pkt_type) ||
2972                      !skb_pkt_type_ok(pkt_type)))
2973                 return -EINVAL;
2974
2975         skb->pkt_type = pkt_type;
2976         return 0;
2977 }
2978
2979 static const struct bpf_func_proto bpf_skb_change_type_proto = {
2980         .func           = bpf_skb_change_type,
2981         .gpl_only       = false,
2982         .ret_type       = RET_INTEGER,
2983         .arg1_type      = ARG_PTR_TO_CTX,
2984         .arg2_type      = ARG_ANYTHING,
2985 };
2986
2987 static u32 bpf_skb_net_base_len(const struct sk_buff *skb)
2988 {
2989         switch (skb->protocol) {
2990         case htons(ETH_P_IP):
2991                 return sizeof(struct iphdr);
2992         case htons(ETH_P_IPV6):
2993                 return sizeof(struct ipv6hdr);
2994         default:
2995                 return ~0U;
2996         }
2997 }
2998
2999 #define BPF_F_ADJ_ROOM_ENCAP_L3_MASK    (BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 | \
3000                                          BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3001
3002 #define BPF_F_ADJ_ROOM_MASK             (BPF_F_ADJ_ROOM_FIXED_GSO | \
3003                                          BPF_F_ADJ_ROOM_ENCAP_L3_MASK | \
3004                                          BPF_F_ADJ_ROOM_ENCAP_L4_GRE | \
3005                                          BPF_F_ADJ_ROOM_ENCAP_L4_UDP | \
3006                                          BPF_F_ADJ_ROOM_ENCAP_L2( \
3007                                           BPF_ADJ_ROOM_ENCAP_L2_MASK))
3008
3009 static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff,
3010                             u64 flags)
3011 {
3012         u8 inner_mac_len = flags >> BPF_ADJ_ROOM_ENCAP_L2_SHIFT;
3013         bool encap = flags & BPF_F_ADJ_ROOM_ENCAP_L3_MASK;
3014         u16 mac_len = 0, inner_net = 0, inner_trans = 0;
3015         unsigned int gso_type = SKB_GSO_DODGY;
3016         int ret;
3017
3018         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3019                 /* udp gso_size delineates datagrams, only allow if fixed */
3020                 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3021                     !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3022                         return -ENOTSUPP;
3023         }
3024
3025         ret = skb_cow_head(skb, len_diff);
3026         if (unlikely(ret < 0))
3027                 return ret;
3028
3029         if (encap) {
3030                 if (skb->protocol != htons(ETH_P_IP) &&
3031                     skb->protocol != htons(ETH_P_IPV6))
3032                         return -ENOTSUPP;
3033
3034                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 &&
3035                     flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3036                         return -EINVAL;
3037
3038                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE &&
3039                     flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3040                         return -EINVAL;
3041
3042                 if (skb->encapsulation)
3043                         return -EALREADY;
3044
3045                 mac_len = skb->network_header - skb->mac_header;
3046                 inner_net = skb->network_header;
3047                 if (inner_mac_len > len_diff)
3048                         return -EINVAL;
3049                 inner_trans = skb->transport_header;
3050         }
3051
3052         ret = bpf_skb_net_hdr_push(skb, off, len_diff);
3053         if (unlikely(ret < 0))
3054                 return ret;
3055
3056         if (encap) {
3057                 skb->inner_mac_header = inner_net - inner_mac_len;
3058                 skb->inner_network_header = inner_net;
3059                 skb->inner_transport_header = inner_trans;
3060                 skb_set_inner_protocol(skb, skb->protocol);
3061
3062                 skb->encapsulation = 1;
3063                 skb_set_network_header(skb, mac_len);
3064
3065                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP)
3066                         gso_type |= SKB_GSO_UDP_TUNNEL;
3067                 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE)
3068                         gso_type |= SKB_GSO_GRE;
3069                 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3070                         gso_type |= SKB_GSO_IPXIP6;
3071                 else if (flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3072                         gso_type |= SKB_GSO_IPXIP4;
3073
3074                 if (flags & BPF_F_ADJ_ROOM_ENCAP_L4_GRE ||
3075                     flags & BPF_F_ADJ_ROOM_ENCAP_L4_UDP) {
3076                         int nh_len = flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 ?
3077                                         sizeof(struct ipv6hdr) :
3078                                         sizeof(struct iphdr);
3079
3080                         skb_set_transport_header(skb, mac_len + nh_len);
3081                 }
3082
3083                 /* Match skb->protocol to new outer l3 protocol */
3084                 if (skb->protocol == htons(ETH_P_IP) &&
3085                     flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV6)
3086                         skb->protocol = htons(ETH_P_IPV6);
3087                 else if (skb->protocol == htons(ETH_P_IPV6) &&
3088                          flags & BPF_F_ADJ_ROOM_ENCAP_L3_IPV4)
3089                         skb->protocol = htons(ETH_P_IP);
3090         }
3091
3092         if (skb_is_gso(skb)) {
3093                 struct skb_shared_info *shinfo = skb_shinfo(skb);
3094
3095                 /* Due to header grow, MSS needs to be downgraded. */
3096                 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3097                         skb_decrease_gso_size(shinfo, len_diff);
3098
3099                 /* Header must be checked, and gso_segs recomputed. */
3100                 shinfo->gso_type |= gso_type;
3101                 shinfo->gso_segs = 0;
3102         }
3103
3104         return 0;
3105 }
3106
3107 static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff,
3108                               u64 flags)
3109 {
3110         int ret;
3111
3112         if (flags & ~BPF_F_ADJ_ROOM_FIXED_GSO)
3113                 return -EINVAL;
3114
3115         if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) {
3116                 /* udp gso_size delineates datagrams, only allow if fixed */
3117                 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ||
3118                     !(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3119                         return -ENOTSUPP;
3120         }
3121
3122         ret = skb_unclone(skb, GFP_ATOMIC);
3123         if (unlikely(ret < 0))
3124                 return ret;
3125
3126         ret = bpf_skb_net_hdr_pop(skb, off, len_diff);
3127         if (unlikely(ret < 0))
3128                 return ret;
3129
3130         if (skb_is_gso(skb)) {
3131                 struct skb_shared_info *shinfo = skb_shinfo(skb);
3132
3133                 /* Due to header shrink, MSS can be upgraded. */
3134                 if (!(flags & BPF_F_ADJ_ROOM_FIXED_GSO))
3135                         skb_increase_gso_size(shinfo, len_diff);
3136
3137                 /* Header must be checked, and gso_segs recomputed. */
3138                 shinfo->gso_type |= SKB_GSO_DODGY;
3139                 shinfo->gso_segs = 0;
3140         }
3141
3142         return 0;
3143 }
3144
3145 static u32 __bpf_skb_max_len(const struct sk_buff *skb)
3146 {
3147         return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
3148                           SKB_MAX_ALLOC;
3149 }
3150
3151 BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff,
3152            u32, mode, u64, flags)
3153 {
3154         u32 len_cur, len_diff_abs = abs(len_diff);
3155         u32 len_min = bpf_skb_net_base_len(skb);
3156         u32 len_max = __bpf_skb_max_len(skb);
3157         __be16 proto = skb->protocol;
3158         bool shrink = len_diff < 0;
3159         u32 off;
3160         int ret;
3161
3162         if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK))
3163                 return -EINVAL;
3164         if (unlikely(len_diff_abs > 0xfffU))
3165                 return -EFAULT;
3166         if (unlikely(proto != htons(ETH_P_IP) &&
3167                      proto != htons(ETH_P_IPV6)))
3168                 return -ENOTSUPP;
3169
3170         off = skb_mac_header_len(skb);
3171         switch (mode) {
3172         case BPF_ADJ_ROOM_NET:
3173                 off += bpf_skb_net_base_len(skb);
3174                 break;
3175         case BPF_ADJ_ROOM_MAC:
3176                 break;
3177         default:
3178                 return -ENOTSUPP;
3179         }
3180
3181         len_cur = skb->len - skb_network_offset(skb);
3182         if ((shrink && (len_diff_abs >= len_cur ||
3183                         len_cur - len_diff_abs < len_min)) ||
3184             (!shrink && (skb->len + len_diff_abs > len_max &&
3185                          !skb_is_gso(skb))))
3186                 return -ENOTSUPP;
3187
3188         ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) :
3189                        bpf_skb_net_grow(skb, off, len_diff_abs, flags);
3190
3191         bpf_compute_data_pointers(skb);
3192         return ret;
3193 }
3194
3195 static const struct bpf_func_proto bpf_skb_adjust_room_proto = {
3196         .func           = bpf_skb_adjust_room,
3197         .gpl_only       = false,
3198         .ret_type       = RET_INTEGER,
3199         .arg1_type      = ARG_PTR_TO_CTX,
3200         .arg2_type      = ARG_ANYTHING,
3201         .arg3_type      = ARG_ANYTHING,
3202         .arg4_type      = ARG_ANYTHING,
3203 };
3204
3205 static u32 __bpf_skb_min_len(const struct sk_buff *skb)
3206 {
3207         u32 min_len = skb_network_offset(skb);
3208
3209         if (skb_transport_header_was_set(skb))
3210                 min_len = skb_transport_offset(skb);
3211         if (skb->ip_summed == CHECKSUM_PARTIAL)
3212                 min_len = skb_checksum_start_offset(skb) +
3213                           skb->csum_offset + sizeof(__sum16);
3214         return min_len;
3215 }
3216
3217 static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len)
3218 {
3219         unsigned int old_len = skb->len;
3220         int ret;
3221
3222         ret = __skb_grow_rcsum(skb, new_len);
3223         if (!ret)
3224                 memset(skb->data + old_len, 0, new_len - old_len);
3225         return ret;
3226 }
3227
3228 static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
3229 {
3230         return __skb_trim_rcsum(skb, new_len);
3231 }
3232
3233 static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
3234                                         u64 flags)
3235 {
3236         u32 max_len = __bpf_skb_max_len(skb);
3237         u32 min_len = __bpf_skb_min_len(skb);
3238         int ret;
3239
3240         if (unlikely(flags || new_len > max_len || new_len < min_len))
3241                 return -EINVAL;
3242         if (skb->encapsulation)
3243                 return -ENOTSUPP;
3244
3245         /* The basic idea of this helper is that it's performing the
3246          * needed work to either grow or trim an skb, and eBPF program
3247          * rewrites the rest via helpers like bpf_skb_store_bytes(),
3248          * bpf_lX_csum_replace() and others rather than passing a raw
3249          * buffer here. This one is a slow path helper and intended
3250          * for replies with control messages.
3251          *
3252          * Like in bpf_skb_change_proto(), we want to keep this rather
3253          * minimal and without protocol specifics so that we are able
3254          * to separate concerns as in bpf_skb_store_bytes() should only
3255          * be the one responsible for writing buffers.
3256          *
3257          * It's really expected to be a slow path operation here for
3258          * control message replies, so we're implicitly linearizing,
3259          * uncloning and drop offloads from the skb by this.
3260          */
3261         ret = __bpf_try_make_writable(skb, skb->len);
3262         if (!ret) {
3263                 if (new_len > skb->len)
3264                         ret = bpf_skb_grow_rcsum(skb, new_len);
3265                 else if (new_len < skb->len)
3266                         ret = bpf_skb_trim_rcsum(skb, new_len);
3267                 if (!ret && skb_is_gso(skb))
3268                         skb_gso_reset(skb);
3269         }
3270         return ret;
3271 }
3272
3273 BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3274            u64, flags)
3275 {
3276         int ret = __bpf_skb_change_tail(skb, new_len, flags);
3277
3278         bpf_compute_data_pointers(skb);
3279         return ret;
3280 }
3281
3282 static const struct bpf_func_proto bpf_skb_change_tail_proto = {
3283         .func           = bpf_skb_change_tail,
3284         .gpl_only       = false,
3285         .ret_type       = RET_INTEGER,
3286         .arg1_type      = ARG_PTR_TO_CTX,
3287         .arg2_type      = ARG_ANYTHING,
3288         .arg3_type      = ARG_ANYTHING,
3289 };
3290
3291 BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
3292            u64, flags)
3293 {
3294         int ret = __bpf_skb_change_tail(skb, new_len, flags);
3295
3296         bpf_compute_data_end_sk_skb(skb);
3297         return ret;
3298 }
3299
3300 static const struct bpf_func_proto sk_skb_change_tail_proto = {
3301         .func           = sk_skb_change_tail,
3302         .gpl_only       = false,
3303         .ret_type       = RET_INTEGER,
3304         .arg1_type      = ARG_PTR_TO_CTX,
3305         .arg2_type      = ARG_ANYTHING,
3306         .arg3_type      = ARG_ANYTHING,
3307 };
3308
3309 static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
3310                                         u64 flags)
3311 {
3312         u32 max_len = __bpf_skb_max_len(skb);
3313         u32 new_len = skb->len + head_room;
3314         int ret;
3315
3316         if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) ||
3317                      new_len < skb->len))
3318                 return -EINVAL;
3319
3320         ret = skb_cow(skb, head_room);
3321         if (likely(!ret)) {
3322                 /* Idea for this helper is that we currently only
3323                  * allow to expand on mac header. This means that
3324                  * skb->protocol network header, etc, stay as is.
3325                  * Compared to bpf_skb_change_tail(), we're more
3326                  * flexible due to not needing to linearize or
3327                  * reset GSO. Intention for this helper is to be
3328                  * used by an L3 skb that needs to push mac header
3329                  * for redirection into L2 device.
3330                  */
3331                 __skb_push(skb, head_room);
3332                 memset(skb->data, 0, head_room);
3333                 skb_reset_mac_header(skb);
3334         }
3335
3336         return ret;
3337 }
3338
3339 BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
3340            u64, flags)
3341 {
3342         int ret = __bpf_skb_change_head(skb, head_room, flags);
3343
3344         bpf_compute_data_pointers(skb);
3345         return ret;
3346 }
3347
3348 static const struct bpf_func_proto bpf_skb_change_head_proto = {
3349         .func           = bpf_skb_change_head,
3350         .gpl_only       = false,
3351         .ret_type       = RET_INTEGER,
3352         .arg1_type      = ARG_PTR_TO_CTX,
3353         .arg2_type      = ARG_ANYTHING,
3354         .arg3_type      = ARG_ANYTHING,
3355 };
3356
3357 BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
3358            u64, flags)
3359 {
3360         int ret = __bpf_skb_change_head(skb, head_room, flags);
3361
3362         bpf_compute_data_end_sk_skb(skb);
3363         return ret;
3364 }
3365
3366 static const struct bpf_func_proto sk_skb_change_head_proto = {
3367         .func           = sk_skb_change_head,
3368         .gpl_only       = false,
3369         .ret_type       = RET_INTEGER,
3370         .arg1_type      = ARG_PTR_TO_CTX,
3371         .arg2_type      = ARG_ANYTHING,
3372         .arg3_type      = ARG_ANYTHING,
3373 };
3374 static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
3375 {
3376         return xdp_data_meta_unsupported(xdp) ? 0 :
3377                xdp->data - xdp->data_meta;
3378 }
3379
3380 BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset)
3381 {
3382         void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3383         unsigned long metalen = xdp_get_metalen(xdp);
3384         void *data_start = xdp_frame_end + metalen;
3385         void *data = xdp->data + offset;
3386
3387         if (unlikely(data < data_start ||
3388                      data > xdp->data_end - ETH_HLEN))
3389                 return -EINVAL;
3390
3391         if (metalen)
3392                 memmove(xdp->data_meta + offset,
3393                         xdp->data_meta, metalen);
3394         xdp->data_meta += offset;
3395         xdp->data = data;
3396
3397         return 0;
3398 }
3399
3400 static const struct bpf_func_proto bpf_xdp_adjust_head_proto = {
3401         .func           = bpf_xdp_adjust_head,
3402         .gpl_only       = false,
3403         .ret_type       = RET_INTEGER,
3404         .arg1_type      = ARG_PTR_TO_CTX,
3405         .arg2_type      = ARG_ANYTHING,
3406 };
3407
3408 BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset)
3409 {
3410         void *data_end = xdp->data_end + offset;
3411
3412         /* only shrinking is allowed for now. */
3413         if (unlikely(offset >= 0))
3414                 return -EINVAL;
3415
3416         if (unlikely(data_end < xdp->data + ETH_HLEN))
3417                 return -EINVAL;
3418
3419         xdp->data_end = data_end;
3420
3421         return 0;
3422 }
3423
3424 static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = {
3425         .func           = bpf_xdp_adjust_tail,
3426         .gpl_only       = false,
3427         .ret_type       = RET_INTEGER,
3428         .arg1_type      = ARG_PTR_TO_CTX,
3429         .arg2_type      = ARG_ANYTHING,
3430 };
3431
3432 BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset)
3433 {
3434         void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame);
3435         void *meta = xdp->data_meta + offset;
3436         unsigned long metalen = xdp->data - meta;
3437
3438         if (xdp_data_meta_unsupported(xdp))
3439                 return -ENOTSUPP;
3440         if (unlikely(meta < xdp_frame_end ||
3441                      meta > xdp->data))
3442                 return -EINVAL;
3443         if (unlikely((metalen & (sizeof(__u32) - 1)) ||
3444                      (metalen > 32)))
3445                 return -EACCES;
3446
3447         xdp->data_meta = meta;
3448
3449         return 0;
3450 }
3451
3452 static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
3453         .func           = bpf_xdp_adjust_meta,
3454         .gpl_only       = false,
3455         .ret_type       = RET_INTEGER,
3456         .arg1_type      = ARG_PTR_TO_CTX,
3457         .arg2_type      = ARG_ANYTHING,
3458 };
3459
3460 static int __bpf_tx_xdp(struct net_device *dev,
3461                         struct bpf_map *map,
3462                         struct xdp_buff *xdp,
3463                         u32 index)
3464 {
3465         struct xdp_frame *xdpf;
3466         int err, sent;
3467
3468         if (!dev->netdev_ops->ndo_xdp_xmit) {
3469                 return -EOPNOTSUPP;
3470         }
3471
3472         err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
3473         if (unlikely(err))
3474                 return err;
3475
3476         xdpf = convert_to_xdp_frame(xdp);
3477         if (unlikely(!xdpf))
3478                 return -EOVERFLOW;
3479
3480         sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH);
3481         if (sent <= 0)
3482                 return sent;
3483         return 0;
3484 }
3485
3486 static noinline int
3487 xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp,
3488                      struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri)
3489 {
3490         struct net_device *fwd;
3491         u32 index = ri->tgt_index;
3492         int err;
3493
3494         fwd = dev_get_by_index_rcu(dev_net(dev), index);
3495         ri->tgt_index = 0;
3496         if (unlikely(!fwd)) {
3497                 err = -EINVAL;
3498                 goto err;
3499         }
3500
3501         err = __bpf_tx_xdp(fwd, NULL, xdp, 0);
3502         if (unlikely(err))
3503                 goto err;
3504
3505         _trace_xdp_redirect(dev, xdp_prog, index);
3506         return 0;
3507 err:
3508         _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3509         return err;
3510 }
3511
3512 static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
3513                             struct bpf_map *map,
3514                             struct xdp_buff *xdp,
3515                             u32 index)
3516 {
3517         int err;
3518
3519         switch (map->map_type) {
3520         case BPF_MAP_TYPE_DEVMAP:
3521         case BPF_MAP_TYPE_DEVMAP_HASH: {
3522                 struct bpf_dtab_netdev *dst = fwd;
3523
3524                 err = dev_map_enqueue(dst, xdp, dev_rx);
3525                 if (unlikely(err))
3526                         return err;
3527                 break;
3528         }
3529         case BPF_MAP_TYPE_CPUMAP: {
3530                 struct bpf_cpu_map_entry *rcpu = fwd;
3531
3532                 err = cpu_map_enqueue(rcpu, xdp, dev_rx);
3533                 if (unlikely(err))
3534                         return err;
3535                 break;
3536         }
3537         case BPF_MAP_TYPE_XSKMAP: {
3538                 struct xdp_sock *xs = fwd;
3539
3540                 err = __xsk_map_redirect(map, xdp, xs);
3541                 return err;
3542         }
3543         default:
3544                 break;
3545         }
3546         return 0;
3547 }
3548
3549 void xdp_do_flush_map(void)
3550 {
3551         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3552         struct bpf_map *map = ri->map_to_flush;
3553
3554         ri->map_to_flush = NULL;
3555         if (map) {
3556                 switch (map->map_type) {
3557                 case BPF_MAP_TYPE_DEVMAP:
3558                 case BPF_MAP_TYPE_DEVMAP_HASH:
3559                         __dev_map_flush(map);
3560                         break;
3561                 case BPF_MAP_TYPE_CPUMAP:
3562                         __cpu_map_flush(map);
3563                         break;
3564                 case BPF_MAP_TYPE_XSKMAP:
3565                         __xsk_map_flush(map);
3566                         break;
3567                 default:
3568                         break;
3569                 }
3570         }
3571 }
3572 EXPORT_SYMBOL_GPL(xdp_do_flush_map);
3573
3574 static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
3575 {
3576         switch (map->map_type) {
3577         case BPF_MAP_TYPE_DEVMAP:
3578                 return __dev_map_lookup_elem(map, index);
3579         case BPF_MAP_TYPE_DEVMAP_HASH:
3580                 return __dev_map_hash_lookup_elem(map, index);
3581         case BPF_MAP_TYPE_CPUMAP:
3582                 return __cpu_map_lookup_elem(map, index);
3583         case BPF_MAP_TYPE_XSKMAP:
3584                 return __xsk_map_lookup_elem(map, index);
3585         default:
3586                 return NULL;
3587         }
3588 }
3589
3590 void bpf_clear_redirect_map(struct bpf_map *map)
3591 {
3592         struct bpf_redirect_info *ri;
3593         int cpu;
3594
3595         for_each_possible_cpu(cpu) {
3596                 ri = per_cpu_ptr(&bpf_redirect_info, cpu);
3597                 /* Avoid polluting remote cacheline due to writes if
3598                  * not needed. Once we pass this test, we need the
3599                  * cmpxchg() to make sure it hasn't been changed in
3600                  * the meantime by remote CPU.
3601                  */
3602                 if (unlikely(READ_ONCE(ri->map) == map))
3603                         cmpxchg(&ri->map, map, NULL);
3604         }
3605 }
3606
3607 static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp,
3608                                struct bpf_prog *xdp_prog, struct bpf_map *map,
3609                                struct bpf_redirect_info *ri)
3610 {
3611         u32 index = ri->tgt_index;
3612         void *fwd = ri->tgt_value;
3613         int err;
3614
3615         ri->tgt_index = 0;
3616         ri->tgt_value = NULL;
3617         WRITE_ONCE(ri->map, NULL);
3618
3619         if (ri->map_to_flush && unlikely(ri->map_to_flush != map))
3620                 xdp_do_flush_map();
3621
3622         err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index);
3623         if (unlikely(err))
3624                 goto err;
3625
3626         ri->map_to_flush = map;
3627         _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3628         return 0;
3629 err:
3630         _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3631         return err;
3632 }
3633
3634 int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
3635                     struct bpf_prog *xdp_prog)
3636 {
3637         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3638         struct bpf_map *map = READ_ONCE(ri->map);
3639
3640         if (likely(map))
3641                 return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri);
3642
3643         return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri);
3644 }
3645 EXPORT_SYMBOL_GPL(xdp_do_redirect);
3646
3647 static int xdp_do_generic_redirect_map(struct net_device *dev,
3648                                        struct sk_buff *skb,
3649                                        struct xdp_buff *xdp,
3650                                        struct bpf_prog *xdp_prog,
3651                                        struct bpf_map *map)
3652 {
3653         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3654         u32 index = ri->tgt_index;
3655         void *fwd = ri->tgt_value;
3656         int err = 0;
3657
3658         ri->tgt_index = 0;
3659         ri->tgt_value = NULL;
3660         WRITE_ONCE(ri->map, NULL);
3661
3662         if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
3663             map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
3664                 struct bpf_dtab_netdev *dst = fwd;
3665
3666                 err = dev_map_generic_redirect(dst, skb, xdp_prog);
3667                 if (unlikely(err))
3668                         goto err;
3669         } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
3670                 struct xdp_sock *xs = fwd;
3671
3672                 err = xsk_generic_rcv(xs, xdp);
3673                 if (err)
3674                         goto err;
3675                 consume_skb(skb);
3676         } else {
3677                 /* TODO: Handle BPF_MAP_TYPE_CPUMAP */
3678                 err = -EBADRQC;
3679                 goto err;
3680         }
3681
3682         _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
3683         return 0;
3684 err:
3685         _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
3686         return err;
3687 }
3688
3689 int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
3690                             struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
3691 {
3692         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3693         struct bpf_map *map = READ_ONCE(ri->map);
3694         u32 index = ri->tgt_index;
3695         struct net_device *fwd;
3696         int err = 0;
3697
3698         if (map)
3699                 return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
3700                                                    map);
3701         ri->tgt_index = 0;
3702         fwd = dev_get_by_index_rcu(dev_net(dev), index);
3703         if (unlikely(!fwd)) {
3704                 err = -EINVAL;
3705                 goto err;
3706         }
3707
3708         err = xdp_ok_fwd_dev(fwd, skb->len);
3709         if (unlikely(err))
3710                 goto err;
3711
3712         skb->dev = fwd;
3713         _trace_xdp_redirect(dev, xdp_prog, index);
3714         generic_xdp_tx(skb, xdp_prog);
3715         return 0;
3716 err:
3717         _trace_xdp_redirect_err(dev, xdp_prog, index, err);
3718         return err;
3719 }
3720 EXPORT_SYMBOL_GPL(xdp_do_generic_redirect);
3721
3722 BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
3723 {
3724         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3725
3726         if (unlikely(flags))
3727                 return XDP_ABORTED;
3728
3729         ri->flags = flags;
3730         ri->tgt_index = ifindex;
3731         ri->tgt_value = NULL;
3732         WRITE_ONCE(ri->map, NULL);
3733
3734         return XDP_REDIRECT;
3735 }
3736
3737 static const struct bpf_func_proto bpf_xdp_redirect_proto = {
3738         .func           = bpf_xdp_redirect,
3739         .gpl_only       = false,
3740         .ret_type       = RET_INTEGER,
3741         .arg1_type      = ARG_ANYTHING,
3742         .arg2_type      = ARG_ANYTHING,
3743 };
3744
3745 BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
3746            u64, flags)
3747 {
3748         struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
3749
3750         /* Lower bits of the flags are used as return code on lookup failure */
3751         if (unlikely(flags > XDP_TX))
3752                 return XDP_ABORTED;
3753
3754         ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
3755         if (unlikely(!ri->tgt_value)) {
3756                 /* If the lookup fails we want to clear out the state in the
3757                  * redirect_info struct completely, so that if an eBPF program
3758                  * performs multiple lookups, the last one always takes
3759                  * precedence.
3760                  */
3761                 WRITE_ONCE(ri->map, NULL);
3762                 return flags;
3763         }
3764
3765         ri->flags = flags;
3766         ri->tgt_index = ifindex;
3767         WRITE_ONCE(ri->map, map);
3768
3769         return XDP_REDIRECT;
3770 }
3771
3772 static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
3773         .func           = bpf_xdp_redirect_map,
3774         .gpl_only       = false,
3775         .ret_type       = RET_INTEGER,
3776         .arg1_type      = ARG_CONST_MAP_PTR,
3777         .arg2_type      = ARG_ANYTHING,
3778         .arg3_type      = ARG_ANYTHING,
3779 };
3780
3781 static unsigned long bpf_skb_copy(void *dst_buff, const void *skb,
3782                                   unsigned long off, unsigned long len)
3783 {
3784         void *ptr = skb_header_pointer(skb, off, len, dst_buff);
3785
3786         if (unlikely(!ptr))
3787                 return len;
3788         if (ptr != dst_buff)
3789                 memcpy(dst_buff, ptr, len);
3790
3791         return 0;
3792 }
3793
3794 BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map,
3795            u64, flags, void *, meta, u64, meta_size)
3796 {
3797         u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
3798
3799         if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
3800                 return -EINVAL;
3801         if (unlikely(skb_size > skb->len))
3802                 return -EFAULT;
3803
3804         return bpf_event_output(map, flags, meta, meta_size, skb, skb_size,
3805                                 bpf_skb_copy);
3806 }
3807
3808 static const struct bpf_func_proto bpf_skb_event_output_proto = {
3809         .func           = bpf_skb_event_output,
3810         .gpl_only       = true,
3811         .ret_type       = RET_INTEGER,
3812         .arg1_type      = ARG_PTR_TO_CTX,
3813         .arg2_type      = ARG_CONST_MAP_PTR,
3814         .arg3_type      = ARG_ANYTHING,
3815         .arg4_type      = ARG_PTR_TO_MEM,
3816         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
3817 };
3818
3819 static unsigned short bpf_tunnel_key_af(u64 flags)
3820 {
3821         return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET;
3822 }
3823
3824 BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to,
3825            u32, size, u64, flags)
3826 {
3827         const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3828         u8 compat[sizeof(struct bpf_tunnel_key)];
3829         void *to_orig = to;
3830         int err;
3831
3832         if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) {
3833                 err = -EINVAL;
3834                 goto err_clear;
3835         }
3836         if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) {
3837                 err = -EPROTO;
3838                 goto err_clear;
3839         }
3840         if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3841                 err = -EINVAL;
3842                 switch (size) {
3843                 case offsetof(struct bpf_tunnel_key, tunnel_label):
3844                 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3845                         goto set_compat;
3846                 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3847                         /* Fixup deprecated structure layouts here, so we have
3848                          * a common path later on.
3849                          */
3850                         if (ip_tunnel_info_af(info) != AF_INET)
3851                                 goto err_clear;
3852 set_compat:
3853                         to = (struct bpf_tunnel_key *)compat;
3854                         break;
3855                 default:
3856                         goto err_clear;
3857                 }
3858         }
3859
3860         to->tunnel_id = be64_to_cpu(info->key.tun_id);
3861         to->tunnel_tos = info->key.tos;
3862         to->tunnel_ttl = info->key.ttl;
3863         to->tunnel_ext = 0;
3864
3865         if (flags & BPF_F_TUNINFO_IPV6) {
3866                 memcpy(to->remote_ipv6, &info->key.u.ipv6.src,
3867                        sizeof(to->remote_ipv6));
3868                 to->tunnel_label = be32_to_cpu(info->key.label);
3869         } else {
3870                 to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src);
3871                 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
3872                 to->tunnel_label = 0;
3873         }
3874
3875         if (unlikely(size != sizeof(struct bpf_tunnel_key)))
3876                 memcpy(to_orig, to, size);
3877
3878         return 0;
3879 err_clear:
3880         memset(to_orig, 0, size);
3881         return err;
3882 }
3883
3884 static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = {
3885         .func           = bpf_skb_get_tunnel_key,
3886         .gpl_only       = false,
3887         .ret_type       = RET_INTEGER,
3888         .arg1_type      = ARG_PTR_TO_CTX,
3889         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
3890         .arg3_type      = ARG_CONST_SIZE,
3891         .arg4_type      = ARG_ANYTHING,
3892 };
3893
3894 BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size)
3895 {
3896         const struct ip_tunnel_info *info = skb_tunnel_info(skb);
3897         int err;
3898
3899         if (unlikely(!info ||
3900                      !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) {
3901                 err = -ENOENT;
3902                 goto err_clear;
3903         }
3904         if (unlikely(size < info->options_len)) {
3905                 err = -ENOMEM;
3906                 goto err_clear;
3907         }
3908
3909         ip_tunnel_info_opts_get(to, info);
3910         if (size > info->options_len)
3911                 memset(to + info->options_len, 0, size - info->options_len);
3912
3913         return info->options_len;
3914 err_clear:
3915         memset(to, 0, size);
3916         return err;
3917 }
3918
3919 static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = {
3920         .func           = bpf_skb_get_tunnel_opt,
3921         .gpl_only       = false,
3922         .ret_type       = RET_INTEGER,
3923         .arg1_type      = ARG_PTR_TO_CTX,
3924         .arg2_type      = ARG_PTR_TO_UNINIT_MEM,
3925         .arg3_type      = ARG_CONST_SIZE,
3926 };
3927
3928 static struct metadata_dst __percpu *md_dst;
3929
3930 BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb,
3931            const struct bpf_tunnel_key *, from, u32, size, u64, flags)
3932 {
3933         struct metadata_dst *md = this_cpu_ptr(md_dst);
3934         u8 compat[sizeof(struct bpf_tunnel_key)];
3935         struct ip_tunnel_info *info;
3936
3937         if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX |
3938                                BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER)))
3939                 return -EINVAL;
3940         if (unlikely(size != sizeof(struct bpf_tunnel_key))) {
3941                 switch (size) {
3942                 case offsetof(struct bpf_tunnel_key, tunnel_label):
3943                 case offsetof(struct bpf_tunnel_key, tunnel_ext):
3944                 case offsetof(struct bpf_tunnel_key, remote_ipv6[1]):
3945                         /* Fixup deprecated structure layouts here, so we have
3946                          * a common path later on.
3947                          */
3948                         memcpy(compat, from, size);
3949                         memset(compat + size, 0, sizeof(compat) - size);
3950                         from = (const struct bpf_tunnel_key *) compat;
3951                         break;
3952                 default:
3953                         return -EINVAL;
3954                 }
3955         }
3956         if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) ||
3957                      from->tunnel_ext))
3958                 return -EINVAL;
3959
3960         skb_dst_drop(skb);
3961         dst_hold((struct dst_entry *) md);
3962         skb_dst_set(skb, (struct dst_entry *) md);
3963
3964         info = &md->u.tun_info;
3965         memset(info, 0, sizeof(*info));
3966         info->mode = IP_TUNNEL_INFO_TX;
3967
3968         info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE;
3969         if (flags & BPF_F_DONT_FRAGMENT)
3970                 info->key.tun_flags |= TUNNEL_DONT_FRAGMENT;
3971         if (flags & BPF_F_ZERO_CSUM_TX)
3972                 info->key.tun_flags &= ~TUNNEL_CSUM;
3973         if (flags & BPF_F_SEQ_NUMBER)
3974                 info->key.tun_flags |= TUNNEL_SEQ;
3975
3976         info->key.tun_id = cpu_to_be64(from->tunnel_id);
3977         info->key.tos = from->tunnel_tos;
3978         info->key.ttl = from->tunnel_ttl;
3979
3980         if (flags & BPF_F_TUNINFO_IPV6) {
3981                 info->mode |= IP_TUNNEL_INFO_IPV6;
3982                 memcpy(&info->key.u.ipv6.dst, from->remote_ipv6,
3983                        sizeof(from->remote_ipv6));
3984                 info->key.label = cpu_to_be32(from->tunnel_label) &
3985                                   IPV6_FLOWLABEL_MASK;
3986         } else {
3987                 info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4);
3988         }
3989
3990         return 0;
3991 }
3992
3993 static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = {
3994         .func           = bpf_skb_set_tunnel_key,
3995         .gpl_only       = false,
3996         .ret_type       = RET_INTEGER,
3997         .arg1_type      = ARG_PTR_TO_CTX,
3998         .arg2_type      = ARG_PTR_TO_MEM,
3999         .arg3_type      = ARG_CONST_SIZE,
4000         .arg4_type      = ARG_ANYTHING,
4001 };
4002
4003 BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb,
4004            const u8 *, from, u32, size)
4005 {
4006         struct ip_tunnel_info *info = skb_tunnel_info(skb);
4007         const struct metadata_dst *md = this_cpu_ptr(md_dst);
4008
4009         if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1))))
4010                 return -EINVAL;
4011         if (unlikely(size > IP_TUNNEL_OPTS_MAX))
4012                 return -ENOMEM;
4013
4014         ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT);
4015
4016         return 0;
4017 }
4018
4019 static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = {
4020         .func           = bpf_skb_set_tunnel_opt,
4021         .gpl_only       = false,
4022         .ret_type       = RET_INTEGER,
4023         .arg1_type      = ARG_PTR_TO_CTX,
4024         .arg2_type      = ARG_PTR_TO_MEM,
4025         .arg3_type      = ARG_CONST_SIZE,
4026 };
4027
4028 static const struct bpf_func_proto *
4029 bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
4030 {
4031         if (!md_dst) {
4032                 struct metadata_dst __percpu *tmp;
4033
4034                 tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
4035                                                 METADATA_IP_TUNNEL,
4036                                                 GFP_KERNEL);
4037                 if (!tmp)
4038                         return NULL;
4039                 if (cmpxchg(&md_dst, NULL, tmp))
4040                         metadata_dst_free_percpu(tmp);
4041         }
4042
4043         switch (which) {
4044         case BPF_FUNC_skb_set_tunnel_key:
4045                 return &bpf_skb_set_tunnel_key_proto;
4046         case BPF_FUNC_skb_set_tunnel_opt:
4047                 return &bpf_skb_set_tunnel_opt_proto;
4048         default:
4049                 return NULL;
4050         }
4051 }
4052
4053 BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map,
4054            u32, idx)
4055 {
4056         struct bpf_array *array = container_of(map, struct bpf_array, map);
4057         struct cgroup *cgrp;
4058         struct sock *sk;
4059
4060         sk = skb_to_full_sk(skb);
4061         if (!sk || !sk_fullsock(sk))
4062                 return -ENOENT;
4063         if (unlikely(idx >= array->map.max_entries))
4064                 return -E2BIG;
4065
4066         cgrp = READ_ONCE(array->ptrs[idx]);
4067         if (unlikely(!cgrp))
4068                 return -EAGAIN;
4069
4070         return sk_under_cgroup_hierarchy(sk, cgrp);
4071 }
4072
4073 static const struct bpf_func_proto bpf_skb_under_cgroup_proto = {
4074         .func           = bpf_skb_under_cgroup,
4075         .gpl_only       = false,
4076         .ret_type       = RET_INTEGER,
4077         .arg1_type      = ARG_PTR_TO_CTX,
4078         .arg2_type      = ARG_CONST_MAP_PTR,
4079         .arg3_type      = ARG_ANYTHING,
4080 };
4081
4082 #ifdef CONFIG_SOCK_CGROUP_DATA
4083 BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb)
4084 {
4085         struct sock *sk = skb_to_full_sk(skb);
4086         struct cgroup *cgrp;
4087
4088         if (!sk || !sk_fullsock(sk))
4089                 return 0;
4090
4091         cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4092         return cgrp->kn->id.id;
4093 }
4094
4095 static const struct bpf_func_proto bpf_skb_cgroup_id_proto = {
4096         .func           = bpf_skb_cgroup_id,
4097         .gpl_only       = false,
4098         .ret_type       = RET_INTEGER,
4099         .arg1_type      = ARG_PTR_TO_CTX,
4100 };
4101
4102 BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int,
4103            ancestor_level)
4104 {
4105         struct sock *sk = skb_to_full_sk(skb);
4106         struct cgroup *ancestor;
4107         struct cgroup *cgrp;
4108
4109         if (!sk || !sk_fullsock(sk))
4110                 return 0;
4111
4112         cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
4113         ancestor = cgroup_ancestor(cgrp, ancestor_level);
4114         if (!ancestor)
4115                 return 0;
4116
4117         return ancestor->kn->id.id;
4118 }
4119
4120 static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = {
4121         .func           = bpf_skb_ancestor_cgroup_id,
4122         .gpl_only       = false,
4123         .ret_type       = RET_INTEGER,
4124         .arg1_type      = ARG_PTR_TO_CTX,
4125         .arg2_type      = ARG_ANYTHING,
4126 };
4127 #endif
4128
4129 static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff,
4130                                   unsigned long off, unsigned long len)
4131 {
4132         memcpy(dst_buff, src_buff + off, len);
4133         return 0;
4134 }
4135
4136 BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map,
4137            u64, flags, void *, meta, u64, meta_size)
4138 {
4139         u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32;
4140
4141         if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK)))
4142                 return -EINVAL;
4143         if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data)))
4144                 return -EFAULT;
4145
4146         return bpf_event_output(map, flags, meta, meta_size, xdp->data,
4147                                 xdp_size, bpf_xdp_copy);
4148 }
4149
4150 static const struct bpf_func_proto bpf_xdp_event_output_proto = {
4151         .func           = bpf_xdp_event_output,
4152         .gpl_only       = true,
4153         .ret_type       = RET_INTEGER,
4154         .arg1_type      = ARG_PTR_TO_CTX,
4155         .arg2_type      = ARG_CONST_MAP_PTR,
4156         .arg3_type      = ARG_ANYTHING,
4157         .arg4_type      = ARG_PTR_TO_MEM,
4158         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4159 };
4160
4161 BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb)
4162 {
4163         return skb->sk ? sock_gen_cookie(skb->sk) : 0;
4164 }
4165
4166 static const struct bpf_func_proto bpf_get_socket_cookie_proto = {
4167         .func           = bpf_get_socket_cookie,
4168         .gpl_only       = false,
4169         .ret_type       = RET_INTEGER,
4170         .arg1_type      = ARG_PTR_TO_CTX,
4171 };
4172
4173 BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx)
4174 {
4175         return sock_gen_cookie(ctx->sk);
4176 }
4177
4178 static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = {
4179         .func           = bpf_get_socket_cookie_sock_addr,
4180         .gpl_only       = false,
4181         .ret_type       = RET_INTEGER,
4182         .arg1_type      = ARG_PTR_TO_CTX,
4183 };
4184
4185 BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx)
4186 {
4187         return sock_gen_cookie(ctx->sk);
4188 }
4189
4190 static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = {
4191         .func           = bpf_get_socket_cookie_sock_ops,
4192         .gpl_only       = false,
4193         .ret_type       = RET_INTEGER,
4194         .arg1_type      = ARG_PTR_TO_CTX,
4195 };
4196
4197 BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb)
4198 {
4199         struct sock *sk = sk_to_full_sk(skb->sk);
4200         kuid_t kuid;
4201
4202         if (!sk || !sk_fullsock(sk))
4203                 return overflowuid;
4204         kuid = sock_net_uid(sock_net(sk), sk);
4205         return from_kuid_munged(sock_net(sk)->user_ns, kuid);
4206 }
4207
4208 static const struct bpf_func_proto bpf_get_socket_uid_proto = {
4209         .func           = bpf_get_socket_uid,
4210         .gpl_only       = false,
4211         .ret_type       = RET_INTEGER,
4212         .arg1_type      = ARG_PTR_TO_CTX,
4213 };
4214
4215 BPF_CALL_5(bpf_sockopt_event_output, struct bpf_sock_ops_kern *, bpf_sock,
4216            struct bpf_map *, map, u64, flags, void *, data, u64, size)
4217 {
4218         if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
4219                 return -EINVAL;
4220
4221         return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
4222 }
4223
4224 static const struct bpf_func_proto bpf_sockopt_event_output_proto =  {
4225         .func           = bpf_sockopt_event_output,
4226         .gpl_only       = true,
4227         .ret_type       = RET_INTEGER,
4228         .arg1_type      = ARG_PTR_TO_CTX,
4229         .arg2_type      = ARG_CONST_MAP_PTR,
4230         .arg3_type      = ARG_ANYTHING,
4231         .arg4_type      = ARG_PTR_TO_MEM,
4232         .arg5_type      = ARG_CONST_SIZE_OR_ZERO,
4233 };
4234
4235 BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4236            int, level, int, optname, char *, optval, int, optlen)
4237 {
4238         struct sock *sk = bpf_sock->sk;
4239         int ret = 0;
4240         int val;
4241
4242         if (!sk_fullsock(sk))
4243                 return -EINVAL;
4244
4245         if (level == SOL_SOCKET) {
4246                 if (optlen != sizeof(int))
4247                         return -EINVAL;
4248                 val = *((int *)optval);
4249
4250                 /* Only some socketops are supported */
4251                 switch (optname) {
4252                 case SO_RCVBUF:
4253                         val = min_t(u32, val, sysctl_rmem_max);
4254                         sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4255                         WRITE_ONCE(sk->sk_rcvbuf,
4256                                    max_t(int, val * 2, SOCK_MIN_RCVBUF));
4257                         break;
4258                 case SO_SNDBUF:
4259                         val = min_t(u32, val, sysctl_wmem_max);
4260                         sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4261                         sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4262                         break;
4263                 case SO_MAX_PACING_RATE: /* 32bit version */
4264                         if (val != ~0U)
4265                                 cmpxchg(&sk->sk_pacing_status,
4266                                         SK_PACING_NONE,
4267                                         SK_PACING_NEEDED);
4268                         sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val;
4269                         sk->sk_pacing_rate = min(sk->sk_pacing_rate,
4270                                                  sk->sk_max_pacing_rate);
4271                         break;
4272                 case SO_PRIORITY:
4273                         sk->sk_priority = val;
4274                         break;
4275                 case SO_RCVLOWAT:
4276                         if (val < 0)
4277                                 val = INT_MAX;
4278                         WRITE_ONCE(sk->sk_rcvlowat, val ? : 1);
4279                         break;
4280                 case SO_MARK:
4281                         if (sk->sk_mark != val) {
4282                                 sk->sk_mark = val;
4283                                 sk_dst_reset(sk);
4284                         }
4285                         break;
4286                 default:
4287                         ret = -EINVAL;
4288                 }
4289 #ifdef CONFIG_INET
4290         } else if (level == SOL_IP) {
4291                 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4292                         return -EINVAL;
4293
4294                 val = *((int *)optval);
4295                 /* Only some options are supported */
4296                 switch (optname) {
4297                 case IP_TOS:
4298                         if (val < -1 || val > 0xff) {
4299                                 ret = -EINVAL;
4300                         } else {
4301                                 struct inet_sock *inet = inet_sk(sk);
4302
4303                                 if (val == -1)
4304                                         val = 0;
4305                                 inet->tos = val;
4306                         }
4307                         break;
4308                 default:
4309                         ret = -EINVAL;
4310                 }
4311 #if IS_ENABLED(CONFIG_IPV6)
4312         } else if (level == SOL_IPV6) {
4313                 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4314                         return -EINVAL;
4315
4316                 val = *((int *)optval);
4317                 /* Only some options are supported */
4318                 switch (optname) {
4319                 case IPV6_TCLASS:
4320                         if (val < -1 || val > 0xff) {
4321                                 ret = -EINVAL;
4322                         } else {
4323                                 struct ipv6_pinfo *np = inet6_sk(sk);
4324
4325                                 if (val == -1)
4326                                         val = 0;
4327                                 np->tclass = val;
4328                         }
4329                         break;
4330                 default:
4331                         ret = -EINVAL;
4332                 }
4333 #endif
4334         } else if (level == SOL_TCP &&
4335                    sk->sk_prot->setsockopt == tcp_setsockopt) {
4336                 if (optname == TCP_CONGESTION) {
4337                         char name[TCP_CA_NAME_MAX];
4338                         bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
4339
4340                         strncpy(name, optval, min_t(long, optlen,
4341                                                     TCP_CA_NAME_MAX-1));
4342                         name[TCP_CA_NAME_MAX-1] = 0;
4343                         ret = tcp_set_congestion_control(sk, name, false,
4344                                                          reinit, true);
4345                 } else {
4346                         struct tcp_sock *tp = tcp_sk(sk);
4347
4348                         if (optlen != sizeof(int))
4349                                 return -EINVAL;
4350
4351                         val = *((int *)optval);
4352                         /* Only some options are supported */
4353                         switch (optname) {
4354                         case TCP_BPF_IW:
4355                                 if (val <= 0 || tp->data_segs_out > tp->syn_data)
4356                                         ret = -EINVAL;
4357                                 else
4358                                         tp->snd_cwnd = val;
4359                                 break;
4360                         case TCP_BPF_SNDCWND_CLAMP:
4361                                 if (val <= 0) {
4362                                         ret = -EINVAL;
4363                                 } else {
4364                                         tp->snd_cwnd_clamp = val;
4365                                         tp->snd_ssthresh = val;
4366                                 }
4367                                 break;
4368                         case TCP_SAVE_SYN:
4369                                 if (val < 0 || val > 1)
4370                                         ret = -EINVAL;
4371                                 else
4372                                         tp->save_syn = val;
4373                                 break;
4374                         default:
4375                                 ret = -EINVAL;
4376                         }
4377                 }
4378 #endif
4379         } else {
4380                 ret = -EINVAL;
4381         }
4382         return ret;
4383 }
4384
4385 static const struct bpf_func_proto bpf_setsockopt_proto = {
4386         .func           = bpf_setsockopt,
4387         .gpl_only       = false,
4388         .ret_type       = RET_INTEGER,
4389         .arg1_type      = ARG_PTR_TO_CTX,
4390         .arg2_type      = ARG_ANYTHING,
4391         .arg3_type      = ARG_ANYTHING,
4392         .arg4_type      = ARG_PTR_TO_MEM,
4393         .arg5_type      = ARG_CONST_SIZE,
4394 };
4395
4396 BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4397            int, level, int, optname, char *, optval, int, optlen)
4398 {
4399         struct sock *sk = bpf_sock->sk;
4400
4401         if (!sk_fullsock(sk))
4402                 goto err_clear;
4403 #ifdef CONFIG_INET
4404         if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
4405                 struct inet_connection_sock *icsk;
4406                 struct tcp_sock *tp;
4407
4408                 switch (optname) {
4409                 case TCP_CONGESTION:
4410                         icsk = inet_csk(sk);
4411
4412                         if (!icsk->icsk_ca_ops || optlen <= 1)
4413                                 goto err_clear;
4414                         strncpy(optval, icsk->icsk_ca_ops->name, optlen);
4415                         optval[optlen - 1] = 0;
4416                         break;
4417                 case TCP_SAVED_SYN:
4418                         tp = tcp_sk(sk);
4419
4420                         if (optlen <= 0 || !tp->saved_syn ||
4421                             optlen > tp->saved_syn[0])
4422                                 goto err_clear;
4423                         memcpy(optval, tp->saved_syn + 1, optlen);
4424                         break;
4425                 default:
4426                         goto err_clear;
4427                 }
4428         } else if (level == SOL_IP) {
4429                 struct inet_sock *inet = inet_sk(sk);
4430
4431                 if (optlen != sizeof(int) || sk->sk_family != AF_INET)
4432                         goto err_clear;
4433
4434                 /* Only some options are supported */
4435                 switch (optname) {
4436                 case IP_TOS:
4437                         *((int *)optval) = (int)inet->tos;
4438                         break;
4439                 default:
4440                         goto err_clear;
4441                 }
4442 #if IS_ENABLED(CONFIG_IPV6)
4443         } else if (level == SOL_IPV6) {
4444                 struct ipv6_pinfo *np = inet6_sk(sk);
4445
4446                 if (optlen != sizeof(int) || sk->sk_family != AF_INET6)
4447                         goto err_clear;
4448
4449                 /* Only some options are supported */
4450                 switch (optname) {
4451                 case IPV6_TCLASS:
4452                         *((int *)optval) = (int)np->tclass;
4453                         break;
4454                 default:
4455                         goto err_clear;
4456                 }
4457 #endif
4458         } else {
4459                 goto err_clear;
4460         }
4461         return 0;
4462 #endif
4463 err_clear:
4464         memset(optval, 0, optlen);
4465         return -EINVAL;
4466 }
4467
4468 static const struct bpf_func_proto bpf_getsockopt_proto = {
4469         .func           = bpf_getsockopt,
4470         .gpl_only       = false,
4471         .ret_type       = RET_INTEGER,
4472         .arg1_type      = ARG_PTR_TO_CTX,
4473         .arg2_type      = ARG_ANYTHING,
4474         .arg3_type      = ARG_ANYTHING,
4475         .arg4_type      = ARG_PTR_TO_UNINIT_MEM,
4476         .arg5_type      = ARG_CONST_SIZE,
4477 };
4478
4479 BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock,
4480            int, argval)
4481 {
4482         struct sock *sk = bpf_sock->sk;
4483         int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS;
4484
4485         if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk))
4486                 return -EINVAL;
4487
4488         tcp_sk(sk)->bpf_sock_ops_cb_flags = val;
4489
4490         return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS);
4491 }
4492
4493 static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = {
4494         .func           = bpf_sock_ops_cb_flags_set,
4495         .gpl_only       = false,
4496         .ret_type       = RET_INTEGER,
4497         .arg1_type      = ARG_PTR_TO_CTX,
4498         .arg2_type      = ARG_ANYTHING,
4499 };
4500
4501 const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly;
4502 EXPORT_SYMBOL_GPL(ipv6_bpf_stub);
4503
4504 BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr,
4505            int, addr_len)
4506 {
4507 #ifdef CONFIG_INET
4508         struct sock *sk = ctx->sk;
4509         int err;
4510
4511         /* Binding to port can be expensive so it's prohibited in the helper.
4512          * Only binding to IP is supported.
4513          */
4514         err = -EINVAL;
4515         if (addr_len < offsetofend(struct sockaddr, sa_family))
4516                 return err;
4517         if (addr->sa_family == AF_INET) {
4518                 if (addr_len < sizeof(struct sockaddr_in))
4519                         return err;
4520                 if (((struct sockaddr_in *)addr)->sin_port != htons(0))
4521                         return err;
4522                 return __inet_bind(sk, addr, addr_len, true, false);
4523 #if IS_ENABLED(CONFIG_IPV6)
4524         } else if (addr->sa_family == AF_INET6) {
4525                 if (addr_len < SIN6_LEN_RFC2133)
4526                         return err;
4527                 if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0))
4528                         return err;
4529                 /* ipv6_bpf_stub cannot be NULL, since it's called from
4530                  * bpf_cgroup_inet6_connect hook and ipv6 is already loaded
4531                  */
4532                 return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false);
4533 #endif /* CONFIG_IPV6 */
4534         }
4535 #endif /* CONFIG_INET */
4536
4537         return -EAFNOSUPPORT;
4538 }
4539
4540 static const struct bpf_func_proto bpf_bind_proto = {
4541         .func           = bpf_bind,
4542         .gpl_only       = false,
4543         .ret_type       = RET_INTEGER,
4544         .arg1_type      = ARG_PTR_TO_CTX,
4545         .arg2_type      = ARG_PTR_TO_MEM,
4546         .arg3_type      = ARG_CONST_SIZE,
4547 };
4548
4549 #ifdef CONFIG_XFRM
4550 BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index,
4551            struct bpf_xfrm_state *, to, u32, size, u64, flags)
4552 {
4553         const struct sec_path *sp = skb_sec_path(skb);
4554         const struct xfrm_state *x;
4555
4556         if (!sp || unlikely(index >= sp->len || flags))
4557                 goto err_clear;
4558
4559         x = sp->xvec[index];
4560
4561         if (unlikely(size != sizeof(struct bpf_xfrm_state)))
4562                 goto err_clear;
4563
4564         to->reqid = x->props.reqid;
4565         to->spi = x->id.spi;
4566         to->family = x->props.family;
4567         to->ext = 0;
4568
4569         if (to->family == AF_INET6) {
4570                 memcpy(to->remote_ipv6, x->props.saddr.a6,
4571                        sizeof(to->remote_ipv6));
4572         } else {
4573                 to->remote_ipv4 = x->props.saddr.a4;
4574                 memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3);
4575         }
4576
4577         return 0;
4578 err_clear:
4579         memset(to, 0, size);
4580         return -EINVAL;
4581 }
4582
4583 static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = {
4584         .func           = bpf_skb_get_xfrm_state,
4585         .gpl_only       = false,
4586         .ret_type       = RET_INTEGER,
4587         .arg1_type      = ARG_PTR_TO_CTX,
4588         .arg2_type      = ARG_ANYTHING,
4589         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
4590         .arg4_type      = ARG_CONST_SIZE,
4591         .arg5_type      = ARG_ANYTHING,
4592 };
4593 #endif
4594
4595 #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6)
4596 static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
4597                                   const struct neighbour *neigh,
4598                                   const struct net_device *dev)
4599 {
4600         memcpy(params->dmac, neigh->ha, ETH_ALEN);
4601         memcpy(params->smac, dev->dev_addr, ETH_ALEN);
4602         params->h_vlan_TCI = 0;
4603         params->h_vlan_proto = 0;
4604         params->ifindex = dev->ifindex;
4605
4606         return 0;
4607 }
4608 #endif
4609
4610 #if IS_ENABLED(CONFIG_INET)
4611 static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4612                                u32 flags, bool check_mtu)
4613 {
4614         struct fib_nh_common *nhc;
4615         struct in_device *in_dev;
4616         struct neighbour *neigh;
4617         struct net_device *dev;
4618         struct fib_result res;
4619         struct flowi4 fl4;
4620         int err;
4621         u32 mtu;
4622
4623         dev = dev_get_by_index_rcu(net, params->ifindex);
4624         if (unlikely(!dev))
4625                 return -ENODEV;
4626
4627         /* verify forwarding is enabled on this interface */
4628         in_dev = __in_dev_get_rcu(dev);
4629         if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
4630                 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4631
4632         if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4633                 fl4.flowi4_iif = 1;
4634                 fl4.flowi4_oif = params->ifindex;
4635         } else {
4636                 fl4.flowi4_iif = params->ifindex;
4637                 fl4.flowi4_oif = 0;
4638         }
4639         fl4.flowi4_tos = params->tos & IPTOS_RT_MASK;
4640         fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
4641         fl4.flowi4_flags = 0;
4642
4643         fl4.flowi4_proto = params->l4_protocol;
4644         fl4.daddr = params->ipv4_dst;
4645         fl4.saddr = params->ipv4_src;
4646         fl4.fl4_sport = params->sport;
4647         fl4.fl4_dport = params->dport;
4648
4649         if (flags & BPF_FIB_LOOKUP_DIRECT) {
4650                 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4651                 struct fib_table *tb;
4652
4653                 tb = fib_get_table(net, tbid);
4654                 if (unlikely(!tb))
4655                         return BPF_FIB_LKUP_RET_NOT_FWDED;
4656
4657                 err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
4658         } else {
4659                 fl4.flowi4_mark = 0;
4660                 fl4.flowi4_secid = 0;
4661                 fl4.flowi4_tun_key.tun_id = 0;
4662                 fl4.flowi4_uid = sock_net_uid(net, NULL);
4663
4664                 err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
4665         }
4666
4667         if (err) {
4668                 /* map fib lookup errors to RTN_ type */
4669                 if (err == -EINVAL)
4670                         return BPF_FIB_LKUP_RET_BLACKHOLE;
4671                 if (err == -EHOSTUNREACH)
4672                         return BPF_FIB_LKUP_RET_UNREACHABLE;
4673                 if (err == -EACCES)
4674                         return BPF_FIB_LKUP_RET_PROHIBIT;
4675
4676                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4677         }
4678
4679         if (res.type != RTN_UNICAST)
4680                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4681
4682         if (fib_info_num_path(res.fi) > 1)
4683                 fib_select_path(net, &res, &fl4, NULL);
4684
4685         if (check_mtu) {
4686                 mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
4687                 if (params->tot_len > mtu)
4688                         return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4689         }
4690
4691         nhc = res.nhc;
4692
4693         /* do not handle lwt encaps right now */
4694         if (nhc->nhc_lwtstate)
4695                 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4696
4697         dev = nhc->nhc_dev;
4698
4699         params->rt_metric = res.fi->fib_priority;
4700
4701         /* xdp and cls_bpf programs are run in RCU-bh so
4702          * rcu_read_lock_bh is not needed here
4703          */
4704         if (likely(nhc->nhc_gw_family != AF_INET6)) {
4705                 if (nhc->nhc_gw_family)
4706                         params->ipv4_dst = nhc->nhc_gw.ipv4;
4707
4708                 neigh = __ipv4_neigh_lookup_noref(dev,
4709                                                  (__force u32)params->ipv4_dst);
4710         } else {
4711                 struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst;
4712
4713                 params->family = AF_INET6;
4714                 *dst = nhc->nhc_gw.ipv6;
4715                 neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4716         }
4717
4718         if (!neigh)
4719                 return BPF_FIB_LKUP_RET_NO_NEIGH;
4720
4721         return bpf_fib_set_fwd_params(params, neigh, dev);
4722 }
4723 #endif
4724
4725 #if IS_ENABLED(CONFIG_IPV6)
4726 static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
4727                                u32 flags, bool check_mtu)
4728 {
4729         struct in6_addr *src = (struct in6_addr *) params->ipv6_src;
4730         struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst;
4731         struct fib6_result res = {};
4732         struct neighbour *neigh;
4733         struct net_device *dev;
4734         struct inet6_dev *idev;
4735         struct flowi6 fl6;
4736         int strict = 0;
4737         int oif, err;
4738         u32 mtu;
4739
4740         /* link local addresses are never forwarded */
4741         if (rt6_need_strict(dst) || rt6_need_strict(src))
4742                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4743
4744         dev = dev_get_by_index_rcu(net, params->ifindex);
4745         if (unlikely(!dev))
4746                 return -ENODEV;
4747
4748         idev = __in6_dev_get_safely(dev);
4749         if (unlikely(!idev || !idev->cnf.forwarding))
4750                 return BPF_FIB_LKUP_RET_FWD_DISABLED;
4751
4752         if (flags & BPF_FIB_LOOKUP_OUTPUT) {
4753                 fl6.flowi6_iif = 1;
4754                 oif = fl6.flowi6_oif = params->ifindex;
4755         } else {
4756                 oif = fl6.flowi6_iif = params->ifindex;
4757                 fl6.flowi6_oif = 0;
4758                 strict = RT6_LOOKUP_F_HAS_SADDR;
4759         }
4760         fl6.flowlabel = params->flowinfo;
4761         fl6.flowi6_scope = 0;
4762         fl6.flowi6_flags = 0;
4763         fl6.mp_hash = 0;
4764
4765         fl6.flowi6_proto = params->l4_protocol;
4766         fl6.daddr = *dst;
4767         fl6.saddr = *src;
4768         fl6.fl6_sport = params->sport;
4769         fl6.fl6_dport = params->dport;
4770
4771         if (flags & BPF_FIB_LOOKUP_DIRECT) {
4772                 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
4773                 struct fib6_table *tb;
4774
4775                 tb = ipv6_stub->fib6_get_table(net, tbid);
4776                 if (unlikely(!tb))
4777                         return BPF_FIB_LKUP_RET_NOT_FWDED;
4778
4779                 err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res,
4780                                                    strict);
4781         } else {
4782                 fl6.flowi6_mark = 0;
4783                 fl6.flowi6_secid = 0;
4784                 fl6.flowi6_tun_key.tun_id = 0;
4785                 fl6.flowi6_uid = sock_net_uid(net, NULL);
4786
4787                 err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict);
4788         }
4789
4790         if (unlikely(err || IS_ERR_OR_NULL(res.f6i) ||
4791                      res.f6i == net->ipv6.fib6_null_entry))
4792                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4793
4794         switch (res.fib6_type) {
4795         /* only unicast is forwarded */
4796         case RTN_UNICAST:
4797                 break;
4798         case RTN_BLACKHOLE:
4799                 return BPF_FIB_LKUP_RET_BLACKHOLE;
4800         case RTN_UNREACHABLE:
4801                 return BPF_FIB_LKUP_RET_UNREACHABLE;
4802         case RTN_PROHIBIT:
4803                 return BPF_FIB_LKUP_RET_PROHIBIT;
4804         default:
4805                 return BPF_FIB_LKUP_RET_NOT_FWDED;
4806         }
4807
4808         ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif,
4809                                     fl6.flowi6_oif != 0, NULL, strict);
4810
4811         if (check_mtu) {
4812                 mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src);
4813                 if (params->tot_len > mtu)
4814                         return BPF_FIB_LKUP_RET_FRAG_NEEDED;
4815         }
4816
4817         if (res.nh->fib_nh_lws)
4818                 return BPF_FIB_LKUP_RET_UNSUPP_LWT;
4819
4820         if (res.nh->fib_nh_gw_family)
4821                 *dst = res.nh->fib_nh_gw6;
4822
4823         dev = res.nh->fib_nh_dev;
4824         params->rt_metric = res.f6i->fib6_metric;
4825
4826         /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is
4827          * not needed here.
4828          */
4829         neigh = __ipv6_neigh_lookup_noref_stub(dev, dst);
4830         if (!neigh)
4831                 return BPF_FIB_LKUP_RET_NO_NEIGH;
4832
4833         return bpf_fib_set_fwd_params(params, neigh, dev);
4834 }
4835 #endif
4836
4837 BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx,
4838            struct bpf_fib_lookup *, params, int, plen, u32, flags)
4839 {
4840         if (plen < sizeof(*params))
4841                 return -EINVAL;
4842
4843         if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4844                 return -EINVAL;
4845
4846         switch (params->family) {
4847 #if IS_ENABLED(CONFIG_INET)
4848         case AF_INET:
4849                 return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params,
4850                                            flags, true);
4851 #endif
4852 #if IS_ENABLED(CONFIG_IPV6)
4853         case AF_INET6:
4854                 return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params,
4855                                            flags, true);
4856 #endif
4857         }
4858         return -EAFNOSUPPORT;
4859 }
4860
4861 static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = {
4862         .func           = bpf_xdp_fib_lookup,
4863         .gpl_only       = true,
4864         .ret_type       = RET_INTEGER,
4865         .arg1_type      = ARG_PTR_TO_CTX,
4866         .arg2_type      = ARG_PTR_TO_MEM,
4867         .arg3_type      = ARG_CONST_SIZE,
4868         .arg4_type      = ARG_ANYTHING,
4869 };
4870
4871 BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
4872            struct bpf_fib_lookup *, params, int, plen, u32, flags)
4873 {
4874         struct net *net = dev_net(skb->dev);
4875         int rc = -EAFNOSUPPORT;
4876
4877         if (plen < sizeof(*params))
4878                 return -EINVAL;
4879
4880         if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT))
4881                 return -EINVAL;
4882
4883         switch (params->family) {
4884 #if IS_ENABLED(CONFIG_INET)
4885         case AF_INET:
4886                 rc = bpf_ipv4_fib_lookup(net, params, flags, false);
4887                 break;
4888 #endif
4889 #if IS_ENABLED(CONFIG_IPV6)
4890         case AF_INET6:
4891                 rc = bpf_ipv6_fib_lookup(net, params, flags, false);
4892                 break;
4893 #endif
4894         }
4895
4896         if (!rc) {
4897                 struct net_device *dev;
4898
4899                 dev = dev_get_by_index_rcu(net, params->ifindex);
4900                 if (!is_skb_forwardable(dev, skb))
4901                         rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
4902         }
4903
4904         return rc;
4905 }
4906
4907 static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
4908         .func           = bpf_skb_fib_lookup,
4909         .gpl_only       = true,
4910         .ret_type       = RET_INTEGER,
4911         .arg1_type      = ARG_PTR_TO_CTX,
4912         .arg2_type      = ARG_PTR_TO_MEM,
4913         .arg3_type      = ARG_CONST_SIZE,
4914         .arg4_type      = ARG_ANYTHING,
4915 };
4916
4917 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4918 static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len)
4919 {
4920         int err;
4921         struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr;
4922
4923         if (!seg6_validate_srh(srh, len))
4924                 return -EINVAL;
4925
4926         switch (type) {
4927         case BPF_LWT_ENCAP_SEG6_INLINE:
4928                 if (skb->protocol != htons(ETH_P_IPV6))
4929                         return -EBADMSG;
4930
4931                 err = seg6_do_srh_inline(skb, srh);
4932                 break;
4933         case BPF_LWT_ENCAP_SEG6:
4934                 skb_reset_inner_headers(skb);
4935                 skb->encapsulation = 1;
4936                 err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6);
4937                 break;
4938         default:
4939                 return -EINVAL;
4940         }
4941
4942         bpf_compute_data_pointers(skb);
4943         if (err)
4944                 return err;
4945
4946         ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
4947         skb_set_transport_header(skb, sizeof(struct ipv6hdr));
4948
4949         return seg6_lookup_nexthop(skb, NULL, 0);
4950 }
4951 #endif /* CONFIG_IPV6_SEG6_BPF */
4952
4953 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4954 static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len,
4955                              bool ingress)
4956 {
4957         return bpf_lwt_push_ip_encap(skb, hdr, len, ingress);
4958 }
4959 #endif
4960
4961 BPF_CALL_4(bpf_lwt_in_push_encap, struct sk_buff *, skb, u32, type, void *, hdr,
4962            u32, len)
4963 {
4964         switch (type) {
4965 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
4966         case BPF_LWT_ENCAP_SEG6:
4967         case BPF_LWT_ENCAP_SEG6_INLINE:
4968                 return bpf_push_seg6_encap(skb, type, hdr, len);
4969 #endif
4970 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4971         case BPF_LWT_ENCAP_IP:
4972                 return bpf_push_ip_encap(skb, hdr, len, true /* ingress */);
4973 #endif
4974         default:
4975                 return -EINVAL;
4976         }
4977 }
4978
4979 BPF_CALL_4(bpf_lwt_xmit_push_encap, struct sk_buff *, skb, u32, type,
4980            void *, hdr, u32, len)
4981 {
4982         switch (type) {
4983 #if IS_ENABLED(CONFIG_LWTUNNEL_BPF)
4984         case BPF_LWT_ENCAP_IP:
4985                 return bpf_push_ip_encap(skb, hdr, len, false /* egress */);
4986 #endif
4987         default:
4988                 return -EINVAL;
4989         }
4990 }
4991
4992 static const struct bpf_func_proto bpf_lwt_in_push_encap_proto = {
4993         .func           = bpf_lwt_in_push_encap,
4994         .gpl_only       = false,
4995         .ret_type       = RET_INTEGER,
4996         .arg1_type      = ARG_PTR_TO_CTX,
4997         .arg2_type      = ARG_ANYTHING,
4998         .arg3_type      = ARG_PTR_TO_MEM,
4999         .arg4_type      = ARG_CONST_SIZE
5000 };
5001
5002 static const struct bpf_func_proto bpf_lwt_xmit_push_encap_proto = {
5003         .func           = bpf_lwt_xmit_push_encap,
5004         .gpl_only       = false,
5005         .ret_type       = RET_INTEGER,
5006         .arg1_type      = ARG_PTR_TO_CTX,
5007         .arg2_type      = ARG_ANYTHING,
5008         .arg3_type      = ARG_PTR_TO_MEM,
5009         .arg4_type      = ARG_CONST_SIZE
5010 };
5011
5012 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5013 BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
5014            const void *, from, u32, len)
5015 {
5016         struct seg6_bpf_srh_state *srh_state =
5017                 this_cpu_ptr(&seg6_bpf_srh_states);
5018         struct ipv6_sr_hdr *srh = srh_state->srh;
5019         void *srh_tlvs, *srh_end, *ptr;
5020         int srhoff = 0;
5021
5022         if (srh == NULL)
5023                 return -EINVAL;
5024
5025         srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4));
5026         srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen);
5027
5028         ptr = skb->data + offset;
5029         if (ptr >= srh_tlvs && ptr + len <= srh_end)
5030                 srh_state->valid = false;
5031         else if (ptr < (void *)&srh->flags ||
5032                  ptr + len > (void *)&srh->segments)
5033                 return -EFAULT;
5034
5035         if (unlikely(bpf_try_make_writable(skb, offset + len)))
5036                 return -EFAULT;
5037         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5038                 return -EINVAL;
5039         srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5040
5041         memcpy(skb->data + offset, from, len);
5042         return 0;
5043 }
5044
5045 static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
5046         .func           = bpf_lwt_seg6_store_bytes,
5047         .gpl_only       = false,
5048         .ret_type       = RET_INTEGER,
5049         .arg1_type      = ARG_PTR_TO_CTX,
5050         .arg2_type      = ARG_ANYTHING,
5051         .arg3_type      = ARG_PTR_TO_MEM,
5052         .arg4_type      = ARG_CONST_SIZE
5053 };
5054
5055 static void bpf_update_srh_state(struct sk_buff *skb)
5056 {
5057         struct seg6_bpf_srh_state *srh_state =
5058                 this_cpu_ptr(&seg6_bpf_srh_states);
5059         int srhoff = 0;
5060
5061         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) {
5062                 srh_state->srh = NULL;
5063         } else {
5064                 srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5065                 srh_state->hdrlen = srh_state->srh->hdrlen << 3;
5066                 srh_state->valid = true;
5067         }
5068 }
5069
5070 BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
5071            u32, action, void *, param, u32, param_len)
5072 {
5073         struct seg6_bpf_srh_state *srh_state =
5074                 this_cpu_ptr(&seg6_bpf_srh_states);
5075         int hdroff = 0;
5076         int err;
5077
5078         switch (action) {
5079         case SEG6_LOCAL_ACTION_END_X:
5080                 if (!seg6_bpf_has_valid_srh(skb))
5081                         return -EBADMSG;
5082                 if (param_len != sizeof(struct in6_addr))
5083                         return -EINVAL;
5084                 return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0);
5085         case SEG6_LOCAL_ACTION_END_T:
5086                 if (!seg6_bpf_has_valid_srh(skb))
5087                         return -EBADMSG;
5088                 if (param_len != sizeof(int))
5089                         return -EINVAL;
5090                 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5091         case SEG6_LOCAL_ACTION_END_DT6:
5092                 if (!seg6_bpf_has_valid_srh(skb))
5093                         return -EBADMSG;
5094                 if (param_len != sizeof(int))
5095                         return -EINVAL;
5096
5097                 if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0)
5098                         return -EBADMSG;
5099                 if (!pskb_pull(skb, hdroff))
5100                         return -EBADMSG;
5101
5102                 skb_postpull_rcsum(skb, skb_network_header(skb), hdroff);
5103                 skb_reset_network_header(skb);
5104                 skb_reset_transport_header(skb);
5105                 skb->encapsulation = 0;
5106
5107                 bpf_compute_data_pointers(skb);
5108                 bpf_update_srh_state(skb);
5109                 return seg6_lookup_nexthop(skb, NULL, *(int *)param);
5110         case SEG6_LOCAL_ACTION_END_B6:
5111                 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5112                         return -EBADMSG;
5113                 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE,
5114                                           param, param_len);
5115                 if (!err)
5116                         bpf_update_srh_state(skb);
5117
5118                 return err;
5119         case SEG6_LOCAL_ACTION_END_B6_ENCAP:
5120                 if (srh_state->srh && !seg6_bpf_has_valid_srh(skb))
5121                         return -EBADMSG;
5122                 err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6,
5123                                           param, param_len);
5124                 if (!err)
5125                         bpf_update_srh_state(skb);
5126
5127                 return err;
5128         default:
5129                 return -EINVAL;
5130         }
5131 }
5132
5133 static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
5134         .func           = bpf_lwt_seg6_action,
5135         .gpl_only       = false,
5136         .ret_type       = RET_INTEGER,
5137         .arg1_type      = ARG_PTR_TO_CTX,
5138         .arg2_type      = ARG_ANYTHING,
5139         .arg3_type      = ARG_PTR_TO_MEM,
5140         .arg4_type      = ARG_CONST_SIZE
5141 };
5142
5143 BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
5144            s32, len)
5145 {
5146         struct seg6_bpf_srh_state *srh_state =
5147                 this_cpu_ptr(&seg6_bpf_srh_states);
5148         struct ipv6_sr_hdr *srh = srh_state->srh;
5149         void *srh_end, *srh_tlvs, *ptr;
5150         struct ipv6hdr *hdr;
5151         int srhoff = 0;
5152         int ret;
5153
5154         if (unlikely(srh == NULL))
5155                 return -EINVAL;
5156
5157         srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) +
5158                         ((srh->first_segment + 1) << 4));
5159         srh_end = (void *)((unsigned char *)srh + sizeof(*srh) +
5160                         srh_state->hdrlen);
5161         ptr = skb->data + offset;
5162
5163         if (unlikely(ptr < srh_tlvs || ptr > srh_end))
5164                 return -EFAULT;
5165         if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end))
5166                 return -EFAULT;
5167
5168         if (len > 0) {
5169                 ret = skb_cow_head(skb, len);
5170                 if (unlikely(ret < 0))
5171                         return ret;
5172
5173                 ret = bpf_skb_net_hdr_push(skb, offset, len);
5174         } else {
5175                 ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len);
5176         }
5177
5178         bpf_compute_data_pointers(skb);
5179         if (unlikely(ret < 0))
5180                 return ret;
5181
5182         hdr = (struct ipv6hdr *)skb->data;
5183         hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
5184
5185         if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0)
5186                 return -EINVAL;
5187         srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff);
5188         srh_state->hdrlen += len;
5189         srh_state->valid = false;
5190         return 0;
5191 }
5192
5193 static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
5194         .func           = bpf_lwt_seg6_adjust_srh,
5195         .gpl_only       = false,
5196         .ret_type       = RET_INTEGER,
5197         .arg1_type      = ARG_PTR_TO_CTX,
5198         .arg2_type      = ARG_ANYTHING,
5199         .arg3_type      = ARG_ANYTHING,
5200 };
5201 #endif /* CONFIG_IPV6_SEG6_BPF */
5202
5203 #ifdef CONFIG_INET
5204 static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple,
5205                               int dif, int sdif, u8 family, u8 proto)
5206 {
5207         bool refcounted = false;
5208         struct sock *sk = NULL;
5209
5210         if (family == AF_INET) {
5211                 __be32 src4 = tuple->ipv4.saddr;
5212                 __be32 dst4 = tuple->ipv4.daddr;
5213
5214                 if (proto == IPPROTO_TCP)
5215                         sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0,
5216                                            src4, tuple->ipv4.sport,
5217                                            dst4, tuple->ipv4.dport,
5218                                            dif, sdif, &refcounted);
5219                 else
5220                         sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport,
5221                                                dst4, tuple->ipv4.dport,
5222                                                dif, sdif, &udp_table, NULL);
5223 #if IS_ENABLED(CONFIG_IPV6)
5224         } else {
5225                 struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr;
5226                 struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr;
5227
5228                 if (proto == IPPROTO_TCP)
5229                         sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0,
5230                                             src6, tuple->ipv6.sport,
5231                                             dst6, ntohs(tuple->ipv6.dport),
5232                                             dif, sdif, &refcounted);
5233                 else if (likely(ipv6_bpf_stub))
5234                         sk = ipv6_bpf_stub->udp6_lib_lookup(net,
5235                                                             src6, tuple->ipv6.sport,
5236                                                             dst6, tuple->ipv6.dport,
5237                                                             dif, sdif,
5238                                                             &udp_table, NULL);
5239 #endif
5240         }
5241
5242         if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) {
5243                 WARN_ONCE(1, "Found non-RCU, unreferenced socket!");
5244                 sk = NULL;
5245         }
5246         return sk;
5247 }
5248
5249 /* bpf_skc_lookup performs the core lookup for different types of sockets,
5250  * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE.
5251  * Returns the socket as an 'unsigned long' to simplify the casting in the
5252  * callers to satisfy BPF_CALL declarations.
5253  */
5254 static struct sock *
5255 __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5256                  struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5257                  u64 flags)
5258 {
5259         struct sock *sk = NULL;
5260         u8 family = AF_UNSPEC;
5261         struct net *net;
5262         int sdif;
5263
5264         if (len == sizeof(tuple->ipv4))
5265                 family = AF_INET;
5266         else if (len == sizeof(tuple->ipv6))
5267                 family = AF_INET6;
5268         else
5269                 return NULL;
5270
5271         if (unlikely(family == AF_UNSPEC || flags ||
5272                      !((s32)netns_id < 0 || netns_id <= S32_MAX)))
5273                 goto out;
5274
5275         if (family == AF_INET)
5276                 sdif = inet_sdif(skb);
5277         else
5278                 sdif = inet6_sdif(skb);
5279
5280         if ((s32)netns_id < 0) {
5281                 net = caller_net;
5282                 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5283         } else {
5284                 net = get_net_ns_by_id(caller_net, netns_id);
5285                 if (unlikely(!net))
5286                         goto out;
5287                 sk = sk_lookup(net, tuple, ifindex, sdif, family, proto);
5288                 put_net(net);
5289         }
5290
5291 out:
5292         return sk;
5293 }
5294
5295 static struct sock *
5296 __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5297                 struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id,
5298                 u64 flags)
5299 {
5300         struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net,
5301                                            ifindex, proto, netns_id, flags);
5302
5303         if (sk) {
5304                 sk = sk_to_full_sk(sk);
5305                 if (!sk_fullsock(sk)) {
5306                         if (!sock_flag(sk, SOCK_RCU_FREE))
5307                                 sock_gen_put(sk);
5308                         return NULL;
5309                 }
5310         }
5311
5312         return sk;
5313 }
5314
5315 static struct sock *
5316 bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5317                u8 proto, u64 netns_id, u64 flags)
5318 {
5319         struct net *caller_net;
5320         int ifindex;
5321
5322         if (skb->dev) {
5323                 caller_net = dev_net(skb->dev);
5324                 ifindex = skb->dev->ifindex;
5325         } else {
5326                 caller_net = sock_net(skb->sk);
5327                 ifindex = 0;
5328         }
5329
5330         return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto,
5331                                 netns_id, flags);
5332 }
5333
5334 static struct sock *
5335 bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len,
5336               u8 proto, u64 netns_id, u64 flags)
5337 {
5338         struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id,
5339                                          flags);
5340
5341         if (sk) {
5342                 sk = sk_to_full_sk(sk);
5343                 if (!sk_fullsock(sk)) {
5344                         if (!sock_flag(sk, SOCK_RCU_FREE))
5345                                 sock_gen_put(sk);
5346                         return NULL;
5347                 }
5348         }
5349
5350         return sk;
5351 }
5352
5353 BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb,
5354            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5355 {
5356         return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP,
5357                                              netns_id, flags);
5358 }
5359
5360 static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = {
5361         .func           = bpf_skc_lookup_tcp,
5362         .gpl_only       = false,
5363         .pkt_access     = true,
5364         .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5365         .arg1_type      = ARG_PTR_TO_CTX,
5366         .arg2_type      = ARG_PTR_TO_MEM,
5367         .arg3_type      = ARG_CONST_SIZE,
5368         .arg4_type      = ARG_ANYTHING,
5369         .arg5_type      = ARG_ANYTHING,
5370 };
5371
5372 BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb,
5373            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5374 {
5375         return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP,
5376                                             netns_id, flags);
5377 }
5378
5379 static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = {
5380         .func           = bpf_sk_lookup_tcp,
5381         .gpl_only       = false,
5382         .pkt_access     = true,
5383         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5384         .arg1_type      = ARG_PTR_TO_CTX,
5385         .arg2_type      = ARG_PTR_TO_MEM,
5386         .arg3_type      = ARG_CONST_SIZE,
5387         .arg4_type      = ARG_ANYTHING,
5388         .arg5_type      = ARG_ANYTHING,
5389 };
5390
5391 BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb,
5392            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5393 {
5394         return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP,
5395                                             netns_id, flags);
5396 }
5397
5398 static const struct bpf_func_proto bpf_sk_lookup_udp_proto = {
5399         .func           = bpf_sk_lookup_udp,
5400         .gpl_only       = false,
5401         .pkt_access     = true,
5402         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5403         .arg1_type      = ARG_PTR_TO_CTX,
5404         .arg2_type      = ARG_PTR_TO_MEM,
5405         .arg3_type      = ARG_CONST_SIZE,
5406         .arg4_type      = ARG_ANYTHING,
5407         .arg5_type      = ARG_ANYTHING,
5408 };
5409
5410 BPF_CALL_1(bpf_sk_release, struct sock *, sk)
5411 {
5412         if (!sock_flag(sk, SOCK_RCU_FREE))
5413                 sock_gen_put(sk);
5414         return 0;
5415 }
5416
5417 static const struct bpf_func_proto bpf_sk_release_proto = {
5418         .func           = bpf_sk_release,
5419         .gpl_only       = false,
5420         .ret_type       = RET_INTEGER,
5421         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5422 };
5423
5424 BPF_CALL_5(bpf_xdp_sk_lookup_udp, struct xdp_buff *, ctx,
5425            struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5426 {
5427         struct net *caller_net = dev_net(ctx->rxq->dev);
5428         int ifindex = ctx->rxq->dev->ifindex;
5429
5430         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5431                                               ifindex, IPPROTO_UDP, netns_id,
5432                                               flags);
5433 }
5434
5435 static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = {
5436         .func           = bpf_xdp_sk_lookup_udp,
5437         .gpl_only       = false,
5438         .pkt_access     = true,
5439         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5440         .arg1_type      = ARG_PTR_TO_CTX,
5441         .arg2_type      = ARG_PTR_TO_MEM,
5442         .arg3_type      = ARG_CONST_SIZE,
5443         .arg4_type      = ARG_ANYTHING,
5444         .arg5_type      = ARG_ANYTHING,
5445 };
5446
5447 BPF_CALL_5(bpf_xdp_skc_lookup_tcp, struct xdp_buff *, ctx,
5448            struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5449 {
5450         struct net *caller_net = dev_net(ctx->rxq->dev);
5451         int ifindex = ctx->rxq->dev->ifindex;
5452
5453         return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net,
5454                                                ifindex, IPPROTO_TCP, netns_id,
5455                                                flags);
5456 }
5457
5458 static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = {
5459         .func           = bpf_xdp_skc_lookup_tcp,
5460         .gpl_only       = false,
5461         .pkt_access     = true,
5462         .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5463         .arg1_type      = ARG_PTR_TO_CTX,
5464         .arg2_type      = ARG_PTR_TO_MEM,
5465         .arg3_type      = ARG_CONST_SIZE,
5466         .arg4_type      = ARG_ANYTHING,
5467         .arg5_type      = ARG_ANYTHING,
5468 };
5469
5470 BPF_CALL_5(bpf_xdp_sk_lookup_tcp, struct xdp_buff *, ctx,
5471            struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags)
5472 {
5473         struct net *caller_net = dev_net(ctx->rxq->dev);
5474         int ifindex = ctx->rxq->dev->ifindex;
5475
5476         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net,
5477                                               ifindex, IPPROTO_TCP, netns_id,
5478                                               flags);
5479 }
5480
5481 static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = {
5482         .func           = bpf_xdp_sk_lookup_tcp,
5483         .gpl_only       = false,
5484         .pkt_access     = true,
5485         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5486         .arg1_type      = ARG_PTR_TO_CTX,
5487         .arg2_type      = ARG_PTR_TO_MEM,
5488         .arg3_type      = ARG_CONST_SIZE,
5489         .arg4_type      = ARG_ANYTHING,
5490         .arg5_type      = ARG_ANYTHING,
5491 };
5492
5493 BPF_CALL_5(bpf_sock_addr_skc_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5494            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5495 {
5496         return (unsigned long)__bpf_skc_lookup(NULL, tuple, len,
5497                                                sock_net(ctx->sk), 0,
5498                                                IPPROTO_TCP, netns_id, flags);
5499 }
5500
5501 static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = {
5502         .func           = bpf_sock_addr_skc_lookup_tcp,
5503         .gpl_only       = false,
5504         .ret_type       = RET_PTR_TO_SOCK_COMMON_OR_NULL,
5505         .arg1_type      = ARG_PTR_TO_CTX,
5506         .arg2_type      = ARG_PTR_TO_MEM,
5507         .arg3_type      = ARG_CONST_SIZE,
5508         .arg4_type      = ARG_ANYTHING,
5509         .arg5_type      = ARG_ANYTHING,
5510 };
5511
5512 BPF_CALL_5(bpf_sock_addr_sk_lookup_tcp, struct bpf_sock_addr_kern *, ctx,
5513            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5514 {
5515         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5516                                               sock_net(ctx->sk), 0, IPPROTO_TCP,
5517                                               netns_id, flags);
5518 }
5519
5520 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = {
5521         .func           = bpf_sock_addr_sk_lookup_tcp,
5522         .gpl_only       = false,
5523         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5524         .arg1_type      = ARG_PTR_TO_CTX,
5525         .arg2_type      = ARG_PTR_TO_MEM,
5526         .arg3_type      = ARG_CONST_SIZE,
5527         .arg4_type      = ARG_ANYTHING,
5528         .arg5_type      = ARG_ANYTHING,
5529 };
5530
5531 BPF_CALL_5(bpf_sock_addr_sk_lookup_udp, struct bpf_sock_addr_kern *, ctx,
5532            struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags)
5533 {
5534         return (unsigned long)__bpf_sk_lookup(NULL, tuple, len,
5535                                               sock_net(ctx->sk), 0, IPPROTO_UDP,
5536                                               netns_id, flags);
5537 }
5538
5539 static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = {
5540         .func           = bpf_sock_addr_sk_lookup_udp,
5541         .gpl_only       = false,
5542         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5543         .arg1_type      = ARG_PTR_TO_CTX,
5544         .arg2_type      = ARG_PTR_TO_MEM,
5545         .arg3_type      = ARG_CONST_SIZE,
5546         .arg4_type      = ARG_ANYTHING,
5547         .arg5_type      = ARG_ANYTHING,
5548 };
5549
5550 bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5551                                   struct bpf_insn_access_aux *info)
5552 {
5553         if (off < 0 || off >= offsetofend(struct bpf_tcp_sock,
5554                                           icsk_retransmits))
5555                 return false;
5556
5557         if (off % size != 0)
5558                 return false;
5559
5560         switch (off) {
5561         case offsetof(struct bpf_tcp_sock, bytes_received):
5562         case offsetof(struct bpf_tcp_sock, bytes_acked):
5563                 return size == sizeof(__u64);
5564         default:
5565                 return size == sizeof(__u32);
5566         }
5567 }
5568
5569 u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
5570                                     const struct bpf_insn *si,
5571                                     struct bpf_insn *insn_buf,
5572                                     struct bpf_prog *prog, u32 *target_size)
5573 {
5574         struct bpf_insn *insn = insn_buf;
5575
5576 #define BPF_TCP_SOCK_GET_COMMON(FIELD)                                  \
5577         do {                                                            \
5578                 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) >     \
5579                              FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
5580                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\
5581                                       si->dst_reg, si->src_reg,         \
5582                                       offsetof(struct tcp_sock, FIELD)); \
5583         } while (0)
5584
5585 #define BPF_INET_SOCK_GET_COMMON(FIELD)                                 \
5586         do {                                                            \
5587                 BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock,  \
5588                                           FIELD) >                      \
5589                              FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \
5590                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                 \
5591                                         struct inet_connection_sock,    \
5592                                         FIELD),                         \
5593                                       si->dst_reg, si->src_reg,         \
5594                                       offsetof(                         \
5595                                         struct inet_connection_sock,    \
5596                                         FIELD));                        \
5597         } while (0)
5598
5599         if (insn > insn_buf)
5600                 return insn - insn_buf;
5601
5602         switch (si->off) {
5603         case offsetof(struct bpf_tcp_sock, rtt_min):
5604                 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
5605                              sizeof(struct minmax));
5606                 BUILD_BUG_ON(sizeof(struct minmax) <
5607                              sizeof(struct minmax_sample));
5608
5609                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
5610                                       offsetof(struct tcp_sock, rtt_min) +
5611                                       offsetof(struct minmax_sample, v));
5612                 break;
5613         case offsetof(struct bpf_tcp_sock, snd_cwnd):
5614                 BPF_TCP_SOCK_GET_COMMON(snd_cwnd);
5615                 break;
5616         case offsetof(struct bpf_tcp_sock, srtt_us):
5617                 BPF_TCP_SOCK_GET_COMMON(srtt_us);
5618                 break;
5619         case offsetof(struct bpf_tcp_sock, snd_ssthresh):
5620                 BPF_TCP_SOCK_GET_COMMON(snd_ssthresh);
5621                 break;
5622         case offsetof(struct bpf_tcp_sock, rcv_nxt):
5623                 BPF_TCP_SOCK_GET_COMMON(rcv_nxt);
5624                 break;
5625         case offsetof(struct bpf_tcp_sock, snd_nxt):
5626                 BPF_TCP_SOCK_GET_COMMON(snd_nxt);
5627                 break;
5628         case offsetof(struct bpf_tcp_sock, snd_una):
5629                 BPF_TCP_SOCK_GET_COMMON(snd_una);
5630                 break;
5631         case offsetof(struct bpf_tcp_sock, mss_cache):
5632                 BPF_TCP_SOCK_GET_COMMON(mss_cache);
5633                 break;
5634         case offsetof(struct bpf_tcp_sock, ecn_flags):
5635                 BPF_TCP_SOCK_GET_COMMON(ecn_flags);
5636                 break;
5637         case offsetof(struct bpf_tcp_sock, rate_delivered):
5638                 BPF_TCP_SOCK_GET_COMMON(rate_delivered);
5639                 break;
5640         case offsetof(struct bpf_tcp_sock, rate_interval_us):
5641                 BPF_TCP_SOCK_GET_COMMON(rate_interval_us);
5642                 break;
5643         case offsetof(struct bpf_tcp_sock, packets_out):
5644                 BPF_TCP_SOCK_GET_COMMON(packets_out);
5645                 break;
5646         case offsetof(struct bpf_tcp_sock, retrans_out):
5647                 BPF_TCP_SOCK_GET_COMMON(retrans_out);
5648                 break;
5649         case offsetof(struct bpf_tcp_sock, total_retrans):
5650                 BPF_TCP_SOCK_GET_COMMON(total_retrans);
5651                 break;
5652         case offsetof(struct bpf_tcp_sock, segs_in):
5653                 BPF_TCP_SOCK_GET_COMMON(segs_in);
5654                 break;
5655         case offsetof(struct bpf_tcp_sock, data_segs_in):
5656                 BPF_TCP_SOCK_GET_COMMON(data_segs_in);
5657                 break;
5658         case offsetof(struct bpf_tcp_sock, segs_out):
5659                 BPF_TCP_SOCK_GET_COMMON(segs_out);
5660                 break;
5661         case offsetof(struct bpf_tcp_sock, data_segs_out):
5662                 BPF_TCP_SOCK_GET_COMMON(data_segs_out);
5663                 break;
5664         case offsetof(struct bpf_tcp_sock, lost_out):
5665                 BPF_TCP_SOCK_GET_COMMON(lost_out);
5666                 break;
5667         case offsetof(struct bpf_tcp_sock, sacked_out):
5668                 BPF_TCP_SOCK_GET_COMMON(sacked_out);
5669                 break;
5670         case offsetof(struct bpf_tcp_sock, bytes_received):
5671                 BPF_TCP_SOCK_GET_COMMON(bytes_received);
5672                 break;
5673         case offsetof(struct bpf_tcp_sock, bytes_acked):
5674                 BPF_TCP_SOCK_GET_COMMON(bytes_acked);
5675                 break;
5676         case offsetof(struct bpf_tcp_sock, dsack_dups):
5677                 BPF_TCP_SOCK_GET_COMMON(dsack_dups);
5678                 break;
5679         case offsetof(struct bpf_tcp_sock, delivered):
5680                 BPF_TCP_SOCK_GET_COMMON(delivered);
5681                 break;
5682         case offsetof(struct bpf_tcp_sock, delivered_ce):
5683                 BPF_TCP_SOCK_GET_COMMON(delivered_ce);
5684                 break;
5685         case offsetof(struct bpf_tcp_sock, icsk_retransmits):
5686                 BPF_INET_SOCK_GET_COMMON(icsk_retransmits);
5687                 break;
5688         }
5689
5690         return insn - insn_buf;
5691 }
5692
5693 BPF_CALL_1(bpf_tcp_sock, struct sock *, sk)
5694 {
5695         if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP)
5696                 return (unsigned long)sk;
5697
5698         return (unsigned long)NULL;
5699 }
5700
5701 const struct bpf_func_proto bpf_tcp_sock_proto = {
5702         .func           = bpf_tcp_sock,
5703         .gpl_only       = false,
5704         .ret_type       = RET_PTR_TO_TCP_SOCK_OR_NULL,
5705         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5706 };
5707
5708 BPF_CALL_1(bpf_get_listener_sock, struct sock *, sk)
5709 {
5710         sk = sk_to_full_sk(sk);
5711
5712         if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE))
5713                 return (unsigned long)sk;
5714
5715         return (unsigned long)NULL;
5716 }
5717
5718 static const struct bpf_func_proto bpf_get_listener_sock_proto = {
5719         .func           = bpf_get_listener_sock,
5720         .gpl_only       = false,
5721         .ret_type       = RET_PTR_TO_SOCKET_OR_NULL,
5722         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5723 };
5724
5725 BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb)
5726 {
5727         unsigned int iphdr_len;
5728
5729         if (skb->protocol == cpu_to_be16(ETH_P_IP))
5730                 iphdr_len = sizeof(struct iphdr);
5731         else if (skb->protocol == cpu_to_be16(ETH_P_IPV6))
5732                 iphdr_len = sizeof(struct ipv6hdr);
5733         else
5734                 return 0;
5735
5736         if (skb_headlen(skb) < iphdr_len)
5737                 return 0;
5738
5739         if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len))
5740                 return 0;
5741
5742         return INET_ECN_set_ce(skb);
5743 }
5744
5745 bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
5746                                   struct bpf_insn_access_aux *info)
5747 {
5748         if (off < 0 || off >= offsetofend(struct bpf_xdp_sock, queue_id))
5749                 return false;
5750
5751         if (off % size != 0)
5752                 return false;
5753
5754         switch (off) {
5755         default:
5756                 return size == sizeof(__u32);
5757         }
5758 }
5759
5760 u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
5761                                     const struct bpf_insn *si,
5762                                     struct bpf_insn *insn_buf,
5763                                     struct bpf_prog *prog, u32 *target_size)
5764 {
5765         struct bpf_insn *insn = insn_buf;
5766
5767 #define BPF_XDP_SOCK_GET(FIELD)                                         \
5768         do {                                                            \
5769                 BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) >     \
5770                              FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \
5771                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\
5772                                       si->dst_reg, si->src_reg,         \
5773                                       offsetof(struct xdp_sock, FIELD)); \
5774         } while (0)
5775
5776         switch (si->off) {
5777         case offsetof(struct bpf_xdp_sock, queue_id):
5778                 BPF_XDP_SOCK_GET(queue_id);
5779                 break;
5780         }
5781
5782         return insn - insn_buf;
5783 }
5784
5785 static const struct bpf_func_proto bpf_skb_ecn_set_ce_proto = {
5786         .func           = bpf_skb_ecn_set_ce,
5787         .gpl_only       = false,
5788         .ret_type       = RET_INTEGER,
5789         .arg1_type      = ARG_PTR_TO_CTX,
5790 };
5791
5792 BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5793            struct tcphdr *, th, u32, th_len)
5794 {
5795 #ifdef CONFIG_SYN_COOKIES
5796         u32 cookie;
5797         int ret;
5798
5799         if (unlikely(th_len < sizeof(*th)))
5800                 return -EINVAL;
5801
5802         /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */
5803         if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5804                 return -EINVAL;
5805
5806         if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5807                 return -EINVAL;
5808
5809         if (!th->ack || th->rst || th->syn)
5810                 return -ENOENT;
5811
5812         if (tcp_synq_no_recent_overflow(sk))
5813                 return -ENOENT;
5814
5815         cookie = ntohl(th->ack_seq) - 1;
5816
5817         switch (sk->sk_family) {
5818         case AF_INET:
5819                 if (unlikely(iph_len < sizeof(struct iphdr)))
5820                         return -EINVAL;
5821
5822                 ret = __cookie_v4_check((struct iphdr *)iph, th, cookie);
5823                 break;
5824
5825 #if IS_BUILTIN(CONFIG_IPV6)
5826         case AF_INET6:
5827                 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5828                         return -EINVAL;
5829
5830                 ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie);
5831                 break;
5832 #endif /* CONFIG_IPV6 */
5833
5834         default:
5835                 return -EPROTONOSUPPORT;
5836         }
5837
5838         if (ret > 0)
5839                 return 0;
5840
5841         return -ENOENT;
5842 #else
5843         return -ENOTSUPP;
5844 #endif
5845 }
5846
5847 static const struct bpf_func_proto bpf_tcp_check_syncookie_proto = {
5848         .func           = bpf_tcp_check_syncookie,
5849         .gpl_only       = true,
5850         .pkt_access     = true,
5851         .ret_type       = RET_INTEGER,
5852         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5853         .arg2_type      = ARG_PTR_TO_MEM,
5854         .arg3_type      = ARG_CONST_SIZE,
5855         .arg4_type      = ARG_PTR_TO_MEM,
5856         .arg5_type      = ARG_CONST_SIZE,
5857 };
5858
5859 BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len,
5860            struct tcphdr *, th, u32, th_len)
5861 {
5862 #ifdef CONFIG_SYN_COOKIES
5863         u32 cookie;
5864         u16 mss;
5865
5866         if (unlikely(th_len < sizeof(*th) || th_len != th->doff * 4))
5867                 return -EINVAL;
5868
5869         if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN)
5870                 return -EINVAL;
5871
5872         if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies)
5873                 return -ENOENT;
5874
5875         if (!th->syn || th->ack || th->fin || th->rst)
5876                 return -EINVAL;
5877
5878         if (unlikely(iph_len < sizeof(struct iphdr)))
5879                 return -EINVAL;
5880
5881         /* Both struct iphdr and struct ipv6hdr have the version field at the
5882          * same offset so we can cast to the shorter header (struct iphdr).
5883          */
5884         switch (((struct iphdr *)iph)->version) {
5885         case 4:
5886                 if (sk->sk_family == AF_INET6 && sk->sk_ipv6only)
5887                         return -EINVAL;
5888
5889                 mss = tcp_v4_get_syncookie(sk, iph, th, &cookie);
5890                 break;
5891
5892 #if IS_BUILTIN(CONFIG_IPV6)
5893         case 6:
5894                 if (unlikely(iph_len < sizeof(struct ipv6hdr)))
5895                         return -EINVAL;
5896
5897                 if (sk->sk_family != AF_INET6)
5898                         return -EINVAL;
5899
5900                 mss = tcp_v6_get_syncookie(sk, iph, th, &cookie);
5901                 break;
5902 #endif /* CONFIG_IPV6 */
5903
5904         default:
5905                 return -EPROTONOSUPPORT;
5906         }
5907         if (mss == 0)
5908                 return -ENOENT;
5909
5910         return cookie | ((u64)mss << 32);
5911 #else
5912         return -EOPNOTSUPP;
5913 #endif /* CONFIG_SYN_COOKIES */
5914 }
5915
5916 static const struct bpf_func_proto bpf_tcp_gen_syncookie_proto = {
5917         .func           = bpf_tcp_gen_syncookie,
5918         .gpl_only       = true, /* __cookie_v*_init_sequence() is GPL */
5919         .pkt_access     = true,
5920         .ret_type       = RET_INTEGER,
5921         .arg1_type      = ARG_PTR_TO_SOCK_COMMON,
5922         .arg2_type      = ARG_PTR_TO_MEM,
5923         .arg3_type      = ARG_CONST_SIZE,
5924         .arg4_type      = ARG_PTR_TO_MEM,
5925         .arg5_type      = ARG_CONST_SIZE,
5926 };
5927
5928 #endif /* CONFIG_INET */
5929
5930 bool bpf_helper_changes_pkt_data(void *func)
5931 {
5932         if (func == bpf_skb_vlan_push ||
5933             func == bpf_skb_vlan_pop ||
5934             func == bpf_skb_store_bytes ||
5935             func == bpf_skb_change_proto ||
5936             func == bpf_skb_change_head ||
5937             func == sk_skb_change_head ||
5938             func == bpf_skb_change_tail ||
5939             func == sk_skb_change_tail ||
5940             func == bpf_skb_adjust_room ||
5941             func == bpf_skb_pull_data ||
5942             func == sk_skb_pull_data ||
5943             func == bpf_clone_redirect ||
5944             func == bpf_l3_csum_replace ||
5945             func == bpf_l4_csum_replace ||
5946             func == bpf_xdp_adjust_head ||
5947             func == bpf_xdp_adjust_meta ||
5948             func == bpf_msg_pull_data ||
5949             func == bpf_msg_push_data ||
5950             func == bpf_msg_pop_data ||
5951             func == bpf_xdp_adjust_tail ||
5952 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
5953             func == bpf_lwt_seg6_store_bytes ||
5954             func == bpf_lwt_seg6_adjust_srh ||
5955             func == bpf_lwt_seg6_action ||
5956 #endif
5957             func == bpf_lwt_in_push_encap ||
5958             func == bpf_lwt_xmit_push_encap)
5959                 return true;
5960
5961         return false;
5962 }
5963
5964 static const struct bpf_func_proto *
5965 bpf_base_func_proto(enum bpf_func_id func_id)
5966 {
5967         switch (func_id) {
5968         case BPF_FUNC_map_lookup_elem:
5969                 return &bpf_map_lookup_elem_proto;
5970         case BPF_FUNC_map_update_elem:
5971                 return &bpf_map_update_elem_proto;
5972         case BPF_FUNC_map_delete_elem:
5973                 return &bpf_map_delete_elem_proto;
5974         case BPF_FUNC_map_push_elem:
5975                 return &bpf_map_push_elem_proto;
5976         case BPF_FUNC_map_pop_elem:
5977                 return &bpf_map_pop_elem_proto;
5978         case BPF_FUNC_map_peek_elem:
5979                 return &bpf_map_peek_elem_proto;
5980         case BPF_FUNC_get_prandom_u32:
5981                 return &bpf_get_prandom_u32_proto;
5982         case BPF_FUNC_get_smp_processor_id:
5983                 return &bpf_get_raw_smp_processor_id_proto;
5984         case BPF_FUNC_get_numa_node_id:
5985                 return &bpf_get_numa_node_id_proto;
5986         case BPF_FUNC_tail_call:
5987                 return &bpf_tail_call_proto;
5988         case BPF_FUNC_ktime_get_ns:
5989                 return &bpf_ktime_get_ns_proto;
5990         default:
5991                 break;
5992         }
5993
5994         if (!capable(CAP_SYS_ADMIN))
5995                 return NULL;
5996
5997         switch (func_id) {
5998         case BPF_FUNC_spin_lock:
5999                 return &bpf_spin_lock_proto;
6000         case BPF_FUNC_spin_unlock:
6001                 return &bpf_spin_unlock_proto;
6002         case BPF_FUNC_trace_printk:
6003                 return bpf_get_trace_printk_proto();
6004         default:
6005                 return NULL;
6006         }
6007 }
6008
6009 static const struct bpf_func_proto *
6010 sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6011 {
6012         switch (func_id) {
6013         /* inet and inet6 sockets are created in a process
6014          * context so there is always a valid uid/gid
6015          */
6016         case BPF_FUNC_get_current_uid_gid:
6017                 return &bpf_get_current_uid_gid_proto;
6018         case BPF_FUNC_get_local_storage:
6019                 return &bpf_get_local_storage_proto;
6020         default:
6021                 return bpf_base_func_proto(func_id);
6022         }
6023 }
6024
6025 static const struct bpf_func_proto *
6026 sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6027 {
6028         switch (func_id) {
6029         /* inet and inet6 sockets are created in a process
6030          * context so there is always a valid uid/gid
6031          */
6032         case BPF_FUNC_get_current_uid_gid:
6033                 return &bpf_get_current_uid_gid_proto;
6034         case BPF_FUNC_bind:
6035                 switch (prog->expected_attach_type) {
6036                 case BPF_CGROUP_INET4_CONNECT:
6037                 case BPF_CGROUP_INET6_CONNECT:
6038                         return &bpf_bind_proto;
6039                 default:
6040                         return NULL;
6041                 }
6042         case BPF_FUNC_get_socket_cookie:
6043                 return &bpf_get_socket_cookie_sock_addr_proto;
6044         case BPF_FUNC_get_local_storage:
6045                 return &bpf_get_local_storage_proto;
6046 #ifdef CONFIG_INET
6047         case BPF_FUNC_sk_lookup_tcp:
6048                 return &bpf_sock_addr_sk_lookup_tcp_proto;
6049         case BPF_FUNC_sk_lookup_udp:
6050                 return &bpf_sock_addr_sk_lookup_udp_proto;
6051         case BPF_FUNC_sk_release:
6052                 return &bpf_sk_release_proto;
6053         case BPF_FUNC_skc_lookup_tcp:
6054                 return &bpf_sock_addr_skc_lookup_tcp_proto;
6055 #endif /* CONFIG_INET */
6056         case BPF_FUNC_sk_storage_get:
6057                 return &bpf_sk_storage_get_proto;
6058         case BPF_FUNC_sk_storage_delete:
6059                 return &bpf_sk_storage_delete_proto;
6060         default:
6061                 return bpf_base_func_proto(func_id);
6062         }
6063 }
6064
6065 static const struct bpf_func_proto *
6066 sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6067 {
6068         switch (func_id) {
6069         case BPF_FUNC_skb_load_bytes:
6070                 return &bpf_skb_load_bytes_proto;
6071         case BPF_FUNC_skb_load_bytes_relative:
6072                 return &bpf_skb_load_bytes_relative_proto;
6073         case BPF_FUNC_get_socket_cookie:
6074                 return &bpf_get_socket_cookie_proto;
6075         case BPF_FUNC_get_socket_uid:
6076                 return &bpf_get_socket_uid_proto;
6077         case BPF_FUNC_perf_event_output:
6078                 return &bpf_skb_event_output_proto;
6079         default:
6080                 return bpf_base_func_proto(func_id);
6081         }
6082 }
6083
6084 const struct bpf_func_proto bpf_sk_storage_get_proto __weak;
6085 const struct bpf_func_proto bpf_sk_storage_delete_proto __weak;
6086
6087 static const struct bpf_func_proto *
6088 cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6089 {
6090         switch (func_id) {
6091         case BPF_FUNC_get_local_storage:
6092                 return &bpf_get_local_storage_proto;
6093         case BPF_FUNC_sk_fullsock:
6094                 return &bpf_sk_fullsock_proto;
6095         case BPF_FUNC_sk_storage_get:
6096                 return &bpf_sk_storage_get_proto;
6097         case BPF_FUNC_sk_storage_delete:
6098                 return &bpf_sk_storage_delete_proto;
6099         case BPF_FUNC_perf_event_output:
6100                 return &bpf_skb_event_output_proto;
6101 #ifdef CONFIG_SOCK_CGROUP_DATA
6102         case BPF_FUNC_skb_cgroup_id:
6103                 return &bpf_skb_cgroup_id_proto;
6104 #endif
6105 #ifdef CONFIG_INET
6106         case BPF_FUNC_tcp_sock:
6107                 return &bpf_tcp_sock_proto;
6108         case BPF_FUNC_get_listener_sock:
6109                 return &bpf_get_listener_sock_proto;
6110         case BPF_FUNC_skb_ecn_set_ce:
6111                 return &bpf_skb_ecn_set_ce_proto;
6112 #endif
6113         default:
6114                 return sk_filter_func_proto(func_id, prog);
6115         }
6116 }
6117
6118 static const struct bpf_func_proto *
6119 tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6120 {
6121         switch (func_id) {
6122         case BPF_FUNC_skb_store_bytes:
6123                 return &bpf_skb_store_bytes_proto;
6124         case BPF_FUNC_skb_load_bytes:
6125                 return &bpf_skb_load_bytes_proto;
6126         case BPF_FUNC_skb_load_bytes_relative:
6127                 return &bpf_skb_load_bytes_relative_proto;
6128         case BPF_FUNC_skb_pull_data:
6129                 return &bpf_skb_pull_data_proto;
6130         case BPF_FUNC_csum_diff:
6131                 return &bpf_csum_diff_proto;
6132         case BPF_FUNC_csum_update:
6133                 return &bpf_csum_update_proto;
6134         case BPF_FUNC_l3_csum_replace:
6135                 return &bpf_l3_csum_replace_proto;
6136         case BPF_FUNC_l4_csum_replace:
6137                 return &bpf_l4_csum_replace_proto;
6138         case BPF_FUNC_clone_redirect:
6139                 return &bpf_clone_redirect_proto;
6140         case BPF_FUNC_get_cgroup_classid:
6141                 return &bpf_get_cgroup_classid_proto;
6142         case BPF_FUNC_skb_vlan_push:
6143                 return &bpf_skb_vlan_push_proto;
6144         case BPF_FUNC_skb_vlan_pop:
6145                 return &bpf_skb_vlan_pop_proto;
6146         case BPF_FUNC_skb_change_proto:
6147                 return &bpf_skb_change_proto_proto;
6148         case BPF_FUNC_skb_change_type:
6149                 return &bpf_skb_change_type_proto;
6150         case BPF_FUNC_skb_adjust_room:
6151                 return &bpf_skb_adjust_room_proto;
6152         case BPF_FUNC_skb_change_tail:
6153                 return &bpf_skb_change_tail_proto;
6154         case BPF_FUNC_skb_get_tunnel_key:
6155                 return &bpf_skb_get_tunnel_key_proto;
6156         case BPF_FUNC_skb_set_tunnel_key:
6157                 return bpf_get_skb_set_tunnel_proto(func_id);
6158         case BPF_FUNC_skb_get_tunnel_opt:
6159                 return &bpf_skb_get_tunnel_opt_proto;
6160         case BPF_FUNC_skb_set_tunnel_opt:
6161                 return bpf_get_skb_set_tunnel_proto(func_id);
6162         case BPF_FUNC_redirect:
6163                 return &bpf_redirect_proto;
6164         case BPF_FUNC_get_route_realm:
6165                 return &bpf_get_route_realm_proto;
6166         case BPF_FUNC_get_hash_recalc:
6167                 return &bpf_get_hash_recalc_proto;
6168         case BPF_FUNC_set_hash_invalid:
6169                 return &bpf_set_hash_invalid_proto;
6170         case BPF_FUNC_set_hash:
6171                 return &bpf_set_hash_proto;
6172         case BPF_FUNC_perf_event_output:
6173                 return &bpf_skb_event_output_proto;
6174         case BPF_FUNC_get_smp_processor_id:
6175                 return &bpf_get_smp_processor_id_proto;
6176         case BPF_FUNC_skb_under_cgroup:
6177                 return &bpf_skb_under_cgroup_proto;
6178         case BPF_FUNC_get_socket_cookie:
6179                 return &bpf_get_socket_cookie_proto;
6180         case BPF_FUNC_get_socket_uid:
6181                 return &bpf_get_socket_uid_proto;
6182         case BPF_FUNC_fib_lookup:
6183                 return &bpf_skb_fib_lookup_proto;
6184         case BPF_FUNC_sk_fullsock:
6185                 return &bpf_sk_fullsock_proto;
6186         case BPF_FUNC_sk_storage_get:
6187                 return &bpf_sk_storage_get_proto;
6188         case BPF_FUNC_sk_storage_delete:
6189                 return &bpf_sk_storage_delete_proto;
6190 #ifdef CONFIG_XFRM
6191         case BPF_FUNC_skb_get_xfrm_state:
6192                 return &bpf_skb_get_xfrm_state_proto;
6193 #endif
6194 #ifdef CONFIG_SOCK_CGROUP_DATA
6195         case BPF_FUNC_skb_cgroup_id:
6196                 return &bpf_skb_cgroup_id_proto;
6197         case BPF_FUNC_skb_ancestor_cgroup_id:
6198                 return &bpf_skb_ancestor_cgroup_id_proto;
6199 #endif
6200 #ifdef CONFIG_INET
6201         case BPF_FUNC_sk_lookup_tcp:
6202                 return &bpf_sk_lookup_tcp_proto;
6203         case BPF_FUNC_sk_lookup_udp:
6204                 return &bpf_sk_lookup_udp_proto;
6205         case BPF_FUNC_sk_release:
6206                 return &bpf_sk_release_proto;
6207         case BPF_FUNC_tcp_sock:
6208                 return &bpf_tcp_sock_proto;
6209         case BPF_FUNC_get_listener_sock:
6210                 return &bpf_get_listener_sock_proto;
6211         case BPF_FUNC_skc_lookup_tcp:
6212                 return &bpf_skc_lookup_tcp_proto;
6213         case BPF_FUNC_tcp_check_syncookie:
6214                 return &bpf_tcp_check_syncookie_proto;
6215         case BPF_FUNC_skb_ecn_set_ce:
6216                 return &bpf_skb_ecn_set_ce_proto;
6217         case BPF_FUNC_tcp_gen_syncookie:
6218                 return &bpf_tcp_gen_syncookie_proto;
6219 #endif
6220         default:
6221                 return bpf_base_func_proto(func_id);
6222         }
6223 }
6224
6225 static const struct bpf_func_proto *
6226 xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6227 {
6228         switch (func_id) {
6229         case BPF_FUNC_perf_event_output:
6230                 return &bpf_xdp_event_output_proto;
6231         case BPF_FUNC_get_smp_processor_id:
6232                 return &bpf_get_smp_processor_id_proto;
6233         case BPF_FUNC_csum_diff:
6234                 return &bpf_csum_diff_proto;
6235         case BPF_FUNC_xdp_adjust_head:
6236                 return &bpf_xdp_adjust_head_proto;
6237         case BPF_FUNC_xdp_adjust_meta:
6238                 return &bpf_xdp_adjust_meta_proto;
6239         case BPF_FUNC_redirect:
6240                 return &bpf_xdp_redirect_proto;
6241         case BPF_FUNC_redirect_map:
6242                 return &bpf_xdp_redirect_map_proto;
6243         case BPF_FUNC_xdp_adjust_tail:
6244                 return &bpf_xdp_adjust_tail_proto;
6245         case BPF_FUNC_fib_lookup:
6246                 return &bpf_xdp_fib_lookup_proto;
6247 #ifdef CONFIG_INET
6248         case BPF_FUNC_sk_lookup_udp:
6249                 return &bpf_xdp_sk_lookup_udp_proto;
6250         case BPF_FUNC_sk_lookup_tcp:
6251                 return &bpf_xdp_sk_lookup_tcp_proto;
6252         case BPF_FUNC_sk_release:
6253                 return &bpf_sk_release_proto;
6254         case BPF_FUNC_skc_lookup_tcp:
6255                 return &bpf_xdp_skc_lookup_tcp_proto;
6256         case BPF_FUNC_tcp_check_syncookie:
6257                 return &bpf_tcp_check_syncookie_proto;
6258         case BPF_FUNC_tcp_gen_syncookie:
6259                 return &bpf_tcp_gen_syncookie_proto;
6260 #endif
6261         default:
6262                 return bpf_base_func_proto(func_id);
6263         }
6264 }
6265
6266 const struct bpf_func_proto bpf_sock_map_update_proto __weak;
6267 const struct bpf_func_proto bpf_sock_hash_update_proto __weak;
6268
6269 static const struct bpf_func_proto *
6270 sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6271 {
6272         switch (func_id) {
6273         case BPF_FUNC_setsockopt:
6274                 return &bpf_setsockopt_proto;
6275         case BPF_FUNC_getsockopt:
6276                 return &bpf_getsockopt_proto;
6277         case BPF_FUNC_sock_ops_cb_flags_set:
6278                 return &bpf_sock_ops_cb_flags_set_proto;
6279         case BPF_FUNC_sock_map_update:
6280                 return &bpf_sock_map_update_proto;
6281         case BPF_FUNC_sock_hash_update:
6282                 return &bpf_sock_hash_update_proto;
6283         case BPF_FUNC_get_socket_cookie:
6284                 return &bpf_get_socket_cookie_sock_ops_proto;
6285         case BPF_FUNC_get_local_storage:
6286                 return &bpf_get_local_storage_proto;
6287         case BPF_FUNC_perf_event_output:
6288                 return &bpf_sockopt_event_output_proto;
6289         case BPF_FUNC_sk_storage_get:
6290                 return &bpf_sk_storage_get_proto;
6291         case BPF_FUNC_sk_storage_delete:
6292                 return &bpf_sk_storage_delete_proto;
6293 #ifdef CONFIG_INET
6294         case BPF_FUNC_tcp_sock:
6295                 return &bpf_tcp_sock_proto;
6296 #endif /* CONFIG_INET */
6297         default:
6298                 return bpf_base_func_proto(func_id);
6299         }
6300 }
6301
6302 const struct bpf_func_proto bpf_msg_redirect_map_proto __weak;
6303 const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak;
6304
6305 static const struct bpf_func_proto *
6306 sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6307 {
6308         switch (func_id) {
6309         case BPF_FUNC_msg_redirect_map:
6310                 return &bpf_msg_redirect_map_proto;
6311         case BPF_FUNC_msg_redirect_hash:
6312                 return &bpf_msg_redirect_hash_proto;
6313         case BPF_FUNC_msg_apply_bytes:
6314                 return &bpf_msg_apply_bytes_proto;
6315         case BPF_FUNC_msg_cork_bytes:
6316                 return &bpf_msg_cork_bytes_proto;
6317         case BPF_FUNC_msg_pull_data:
6318                 return &bpf_msg_pull_data_proto;
6319         case BPF_FUNC_msg_push_data:
6320                 return &bpf_msg_push_data_proto;
6321         case BPF_FUNC_msg_pop_data:
6322                 return &bpf_msg_pop_data_proto;
6323         default:
6324                 return bpf_base_func_proto(func_id);
6325         }
6326 }
6327
6328 const struct bpf_func_proto bpf_sk_redirect_map_proto __weak;
6329 const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak;
6330
6331 static const struct bpf_func_proto *
6332 sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6333 {
6334         switch (func_id) {
6335         case BPF_FUNC_skb_store_bytes:
6336                 return &bpf_skb_store_bytes_proto;
6337         case BPF_FUNC_skb_load_bytes:
6338                 return &bpf_skb_load_bytes_proto;
6339         case BPF_FUNC_skb_pull_data:
6340                 return &sk_skb_pull_data_proto;
6341         case BPF_FUNC_skb_change_tail:
6342                 return &sk_skb_change_tail_proto;
6343         case BPF_FUNC_skb_change_head:
6344                 return &sk_skb_change_head_proto;
6345         case BPF_FUNC_get_socket_cookie:
6346                 return &bpf_get_socket_cookie_proto;
6347         case BPF_FUNC_get_socket_uid:
6348                 return &bpf_get_socket_uid_proto;
6349         case BPF_FUNC_sk_redirect_map:
6350                 return &bpf_sk_redirect_map_proto;
6351         case BPF_FUNC_sk_redirect_hash:
6352                 return &bpf_sk_redirect_hash_proto;
6353         case BPF_FUNC_perf_event_output:
6354                 return &bpf_skb_event_output_proto;
6355 #ifdef CONFIG_INET
6356         case BPF_FUNC_sk_lookup_tcp:
6357                 return &bpf_sk_lookup_tcp_proto;
6358         case BPF_FUNC_sk_lookup_udp:
6359                 return &bpf_sk_lookup_udp_proto;
6360         case BPF_FUNC_sk_release:
6361                 return &bpf_sk_release_proto;
6362         case BPF_FUNC_skc_lookup_tcp:
6363                 return &bpf_skc_lookup_tcp_proto;
6364 #endif
6365         default:
6366                 return bpf_base_func_proto(func_id);
6367         }
6368 }
6369
6370 static const struct bpf_func_proto *
6371 flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6372 {
6373         switch (func_id) {
6374         case BPF_FUNC_skb_load_bytes:
6375                 return &bpf_flow_dissector_load_bytes_proto;
6376         default:
6377                 return bpf_base_func_proto(func_id);
6378         }
6379 }
6380
6381 static const struct bpf_func_proto *
6382 lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6383 {
6384         switch (func_id) {
6385         case BPF_FUNC_skb_load_bytes:
6386                 return &bpf_skb_load_bytes_proto;
6387         case BPF_FUNC_skb_pull_data:
6388                 return &bpf_skb_pull_data_proto;
6389         case BPF_FUNC_csum_diff:
6390                 return &bpf_csum_diff_proto;
6391         case BPF_FUNC_get_cgroup_classid:
6392                 return &bpf_get_cgroup_classid_proto;
6393         case BPF_FUNC_get_route_realm:
6394                 return &bpf_get_route_realm_proto;
6395         case BPF_FUNC_get_hash_recalc:
6396                 return &bpf_get_hash_recalc_proto;
6397         case BPF_FUNC_perf_event_output:
6398                 return &bpf_skb_event_output_proto;
6399         case BPF_FUNC_get_smp_processor_id:
6400                 return &bpf_get_smp_processor_id_proto;
6401         case BPF_FUNC_skb_under_cgroup:
6402                 return &bpf_skb_under_cgroup_proto;
6403         default:
6404                 return bpf_base_func_proto(func_id);
6405         }
6406 }
6407
6408 static const struct bpf_func_proto *
6409 lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6410 {
6411         switch (func_id) {
6412         case BPF_FUNC_lwt_push_encap:
6413                 return &bpf_lwt_in_push_encap_proto;
6414         default:
6415                 return lwt_out_func_proto(func_id, prog);
6416         }
6417 }
6418
6419 static const struct bpf_func_proto *
6420 lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6421 {
6422         switch (func_id) {
6423         case BPF_FUNC_skb_get_tunnel_key:
6424                 return &bpf_skb_get_tunnel_key_proto;
6425         case BPF_FUNC_skb_set_tunnel_key:
6426                 return bpf_get_skb_set_tunnel_proto(func_id);
6427         case BPF_FUNC_skb_get_tunnel_opt:
6428                 return &bpf_skb_get_tunnel_opt_proto;
6429         case BPF_FUNC_skb_set_tunnel_opt:
6430                 return bpf_get_skb_set_tunnel_proto(func_id);
6431         case BPF_FUNC_redirect:
6432                 return &bpf_redirect_proto;
6433         case BPF_FUNC_clone_redirect:
6434                 return &bpf_clone_redirect_proto;
6435         case BPF_FUNC_skb_change_tail:
6436                 return &bpf_skb_change_tail_proto;
6437         case BPF_FUNC_skb_change_head:
6438                 return &bpf_skb_change_head_proto;
6439         case BPF_FUNC_skb_store_bytes:
6440                 return &bpf_skb_store_bytes_proto;
6441         case BPF_FUNC_csum_update:
6442                 return &bpf_csum_update_proto;
6443         case BPF_FUNC_l3_csum_replace:
6444                 return &bpf_l3_csum_replace_proto;
6445         case BPF_FUNC_l4_csum_replace:
6446                 return &bpf_l4_csum_replace_proto;
6447         case BPF_FUNC_set_hash_invalid:
6448                 return &bpf_set_hash_invalid_proto;
6449         case BPF_FUNC_lwt_push_encap:
6450                 return &bpf_lwt_xmit_push_encap_proto;
6451         default:
6452                 return lwt_out_func_proto(func_id, prog);
6453         }
6454 }
6455
6456 static const struct bpf_func_proto *
6457 lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
6458 {
6459         switch (func_id) {
6460 #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
6461         case BPF_FUNC_lwt_seg6_store_bytes:
6462                 return &bpf_lwt_seg6_store_bytes_proto;
6463         case BPF_FUNC_lwt_seg6_action:
6464                 return &bpf_lwt_seg6_action_proto;
6465         case BPF_FUNC_lwt_seg6_adjust_srh:
6466                 return &bpf_lwt_seg6_adjust_srh_proto;
6467 #endif
6468         default:
6469                 return lwt_out_func_proto(func_id, prog);
6470         }
6471 }
6472
6473 static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type,
6474                                     const struct bpf_prog *prog,
6475                                     struct bpf_insn_access_aux *info)
6476 {
6477         const int size_default = sizeof(__u32);
6478
6479         if (off < 0 || off >= sizeof(struct __sk_buff))
6480                 return false;
6481
6482         /* The verifier guarantees that size > 0. */
6483         if (off % size != 0)
6484                 return false;
6485
6486         switch (off) {
6487         case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6488                 if (off + size > offsetofend(struct __sk_buff, cb[4]))
6489                         return false;
6490                 break;
6491         case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]):
6492         case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]):
6493         case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4):
6494         case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4):
6495         case bpf_ctx_range(struct __sk_buff, data):
6496         case bpf_ctx_range(struct __sk_buff, data_meta):
6497         case bpf_ctx_range(struct __sk_buff, data_end):
6498                 if (size != size_default)
6499                         return false;
6500                 break;
6501         case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
6502                 return false;
6503         case bpf_ctx_range(struct __sk_buff, tstamp):
6504                 if (size != sizeof(__u64))
6505                         return false;
6506                 break;
6507         case offsetof(struct __sk_buff, sk):
6508                 if (type == BPF_WRITE || size != sizeof(__u64))
6509                         return false;
6510                 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL;
6511                 break;
6512         default:
6513                 /* Only narrow read access allowed for now. */
6514                 if (type == BPF_WRITE) {
6515                         if (size != size_default)
6516                                 return false;
6517                 } else {
6518                         bpf_ctx_record_field_size(info, size_default);
6519                         if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6520                                 return false;
6521                 }
6522         }
6523
6524         return true;
6525 }
6526
6527 static bool sk_filter_is_valid_access(int off, int size,
6528                                       enum bpf_access_type type,
6529                                       const struct bpf_prog *prog,
6530                                       struct bpf_insn_access_aux *info)
6531 {
6532         switch (off) {
6533         case bpf_ctx_range(struct __sk_buff, tc_classid):
6534         case bpf_ctx_range(struct __sk_buff, data):
6535         case bpf_ctx_range(struct __sk_buff, data_meta):
6536         case bpf_ctx_range(struct __sk_buff, data_end):
6537         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6538         case bpf_ctx_range(struct __sk_buff, tstamp):
6539         case bpf_ctx_range(struct __sk_buff, wire_len):
6540                 return false;
6541         }
6542
6543         if (type == BPF_WRITE) {
6544                 switch (off) {
6545                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6546                         break;
6547                 default:
6548                         return false;
6549                 }
6550         }
6551
6552         return bpf_skb_is_valid_access(off, size, type, prog, info);
6553 }
6554
6555 static bool cg_skb_is_valid_access(int off, int size,
6556                                    enum bpf_access_type type,
6557                                    const struct bpf_prog *prog,
6558                                    struct bpf_insn_access_aux *info)
6559 {
6560         switch (off) {
6561         case bpf_ctx_range(struct __sk_buff, tc_classid):
6562         case bpf_ctx_range(struct __sk_buff, data_meta):
6563         case bpf_ctx_range(struct __sk_buff, wire_len):
6564                 return false;
6565         case bpf_ctx_range(struct __sk_buff, data):
6566         case bpf_ctx_range(struct __sk_buff, data_end):
6567                 if (!capable(CAP_SYS_ADMIN))
6568                         return false;
6569                 break;
6570         }
6571
6572         if (type == BPF_WRITE) {
6573                 switch (off) {
6574                 case bpf_ctx_range(struct __sk_buff, mark):
6575                 case bpf_ctx_range(struct __sk_buff, priority):
6576                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6577                         break;
6578                 case bpf_ctx_range(struct __sk_buff, tstamp):
6579                         if (!capable(CAP_SYS_ADMIN))
6580                                 return false;
6581                         break;
6582                 default:
6583                         return false;
6584                 }
6585         }
6586
6587         switch (off) {
6588         case bpf_ctx_range(struct __sk_buff, data):
6589                 info->reg_type = PTR_TO_PACKET;
6590                 break;
6591         case bpf_ctx_range(struct __sk_buff, data_end):
6592                 info->reg_type = PTR_TO_PACKET_END;
6593                 break;
6594         }
6595
6596         return bpf_skb_is_valid_access(off, size, type, prog, info);
6597 }
6598
6599 static bool lwt_is_valid_access(int off, int size,
6600                                 enum bpf_access_type type,
6601                                 const struct bpf_prog *prog,
6602                                 struct bpf_insn_access_aux *info)
6603 {
6604         switch (off) {
6605         case bpf_ctx_range(struct __sk_buff, tc_classid):
6606         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6607         case bpf_ctx_range(struct __sk_buff, data_meta):
6608         case bpf_ctx_range(struct __sk_buff, tstamp):
6609         case bpf_ctx_range(struct __sk_buff, wire_len):
6610                 return false;
6611         }
6612
6613         if (type == BPF_WRITE) {
6614                 switch (off) {
6615                 case bpf_ctx_range(struct __sk_buff, mark):
6616                 case bpf_ctx_range(struct __sk_buff, priority):
6617                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6618                         break;
6619                 default:
6620                         return false;
6621                 }
6622         }
6623
6624         switch (off) {
6625         case bpf_ctx_range(struct __sk_buff, data):
6626                 info->reg_type = PTR_TO_PACKET;
6627                 break;
6628         case bpf_ctx_range(struct __sk_buff, data_end):
6629                 info->reg_type = PTR_TO_PACKET_END;
6630                 break;
6631         }
6632
6633         return bpf_skb_is_valid_access(off, size, type, prog, info);
6634 }
6635
6636 /* Attach type specific accesses */
6637 static bool __sock_filter_check_attach_type(int off,
6638                                             enum bpf_access_type access_type,
6639                                             enum bpf_attach_type attach_type)
6640 {
6641         switch (off) {
6642         case offsetof(struct bpf_sock, bound_dev_if):
6643         case offsetof(struct bpf_sock, mark):
6644         case offsetof(struct bpf_sock, priority):
6645                 switch (attach_type) {
6646                 case BPF_CGROUP_INET_SOCK_CREATE:
6647                         goto full_access;
6648                 default:
6649                         return false;
6650                 }
6651         case bpf_ctx_range(struct bpf_sock, src_ip4):
6652                 switch (attach_type) {
6653                 case BPF_CGROUP_INET4_POST_BIND:
6654                         goto read_only;
6655                 default:
6656                         return false;
6657                 }
6658         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6659                 switch (attach_type) {
6660                 case BPF_CGROUP_INET6_POST_BIND:
6661                         goto read_only;
6662                 default:
6663                         return false;
6664                 }
6665         case bpf_ctx_range(struct bpf_sock, src_port):
6666                 switch (attach_type) {
6667                 case BPF_CGROUP_INET4_POST_BIND:
6668                 case BPF_CGROUP_INET6_POST_BIND:
6669                         goto read_only;
6670                 default:
6671                         return false;
6672                 }
6673         }
6674 read_only:
6675         return access_type == BPF_READ;
6676 full_access:
6677         return true;
6678 }
6679
6680 bool bpf_sock_common_is_valid_access(int off, int size,
6681                                      enum bpf_access_type type,
6682                                      struct bpf_insn_access_aux *info)
6683 {
6684         switch (off) {
6685         case bpf_ctx_range_till(struct bpf_sock, type, priority):
6686                 return false;
6687         default:
6688                 return bpf_sock_is_valid_access(off, size, type, info);
6689         }
6690 }
6691
6692 bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type,
6693                               struct bpf_insn_access_aux *info)
6694 {
6695         const int size_default = sizeof(__u32);
6696
6697         if (off < 0 || off >= sizeof(struct bpf_sock))
6698                 return false;
6699         if (off % size != 0)
6700                 return false;
6701
6702         switch (off) {
6703         case offsetof(struct bpf_sock, state):
6704         case offsetof(struct bpf_sock, family):
6705         case offsetof(struct bpf_sock, type):
6706         case offsetof(struct bpf_sock, protocol):
6707         case offsetof(struct bpf_sock, dst_port):
6708         case offsetof(struct bpf_sock, src_port):
6709         case bpf_ctx_range(struct bpf_sock, src_ip4):
6710         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
6711         case bpf_ctx_range(struct bpf_sock, dst_ip4):
6712         case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
6713                 bpf_ctx_record_field_size(info, size_default);
6714                 return bpf_ctx_narrow_access_ok(off, size, size_default);
6715         }
6716
6717         return size == size_default;
6718 }
6719
6720 static bool sock_filter_is_valid_access(int off, int size,
6721                                         enum bpf_access_type type,
6722                                         const struct bpf_prog *prog,
6723                                         struct bpf_insn_access_aux *info)
6724 {
6725         if (!bpf_sock_is_valid_access(off, size, type, info))
6726                 return false;
6727         return __sock_filter_check_attach_type(off, type,
6728                                                prog->expected_attach_type);
6729 }
6730
6731 static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write,
6732                              const struct bpf_prog *prog)
6733 {
6734         /* Neither direct read nor direct write requires any preliminary
6735          * action.
6736          */
6737         return 0;
6738 }
6739
6740 static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write,
6741                                 const struct bpf_prog *prog, int drop_verdict)
6742 {
6743         struct bpf_insn *insn = insn_buf;
6744
6745         if (!direct_write)
6746                 return 0;
6747
6748         /* if (!skb->cloned)
6749          *       goto start;
6750          *
6751          * (Fast-path, otherwise approximation that we might be
6752          *  a clone, do the rest in helper.)
6753          */
6754         *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET());
6755         *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK);
6756         *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7);
6757
6758         /* ret = bpf_skb_pull_data(skb, 0); */
6759         *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
6760         *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2);
6761         *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6762                                BPF_FUNC_skb_pull_data);
6763         /* if (!ret)
6764          *      goto restore;
6765          * return TC_ACT_SHOT;
6766          */
6767         *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2);
6768         *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict);
6769         *insn++ = BPF_EXIT_INSN();
6770
6771         /* restore: */
6772         *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
6773         /* start: */
6774         *insn++ = prog->insnsi[0];
6775
6776         return insn - insn_buf;
6777 }
6778
6779 static int bpf_gen_ld_abs(const struct bpf_insn *orig,
6780                           struct bpf_insn *insn_buf)
6781 {
6782         bool indirect = BPF_MODE(orig->code) == BPF_IND;
6783         struct bpf_insn *insn = insn_buf;
6784
6785         /* We're guaranteed here that CTX is in R6. */
6786         *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX);
6787         if (!indirect) {
6788                 *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm);
6789         } else {
6790                 *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg);
6791                 if (orig->imm)
6792                         *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm);
6793         }
6794
6795         switch (BPF_SIZE(orig->code)) {
6796         case BPF_B:
6797                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache);
6798                 break;
6799         case BPF_H:
6800                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache);
6801                 break;
6802         case BPF_W:
6803                 *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache);
6804                 break;
6805         }
6806
6807         *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2);
6808         *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0);
6809         *insn++ = BPF_EXIT_INSN();
6810
6811         return insn - insn_buf;
6812 }
6813
6814 static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write,
6815                                const struct bpf_prog *prog)
6816 {
6817         return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT);
6818 }
6819
6820 static bool tc_cls_act_is_valid_access(int off, int size,
6821                                        enum bpf_access_type type,
6822                                        const struct bpf_prog *prog,
6823                                        struct bpf_insn_access_aux *info)
6824 {
6825         if (type == BPF_WRITE) {
6826                 switch (off) {
6827                 case bpf_ctx_range(struct __sk_buff, mark):
6828                 case bpf_ctx_range(struct __sk_buff, tc_index):
6829                 case bpf_ctx_range(struct __sk_buff, priority):
6830                 case bpf_ctx_range(struct __sk_buff, tc_classid):
6831                 case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]):
6832                 case bpf_ctx_range(struct __sk_buff, tstamp):
6833                 case bpf_ctx_range(struct __sk_buff, queue_mapping):
6834                         break;
6835                 default:
6836                         return false;
6837                 }
6838         }
6839
6840         switch (off) {
6841         case bpf_ctx_range(struct __sk_buff, data):
6842                 info->reg_type = PTR_TO_PACKET;
6843                 break;
6844         case bpf_ctx_range(struct __sk_buff, data_meta):
6845                 info->reg_type = PTR_TO_PACKET_META;
6846                 break;
6847         case bpf_ctx_range(struct __sk_buff, data_end):
6848                 info->reg_type = PTR_TO_PACKET_END;
6849                 break;
6850         case bpf_ctx_range_till(struct __sk_buff, family, local_port):
6851                 return false;
6852         }
6853
6854         return bpf_skb_is_valid_access(off, size, type, prog, info);
6855 }
6856
6857 static bool __is_valid_xdp_access(int off, int size)
6858 {
6859         if (off < 0 || off >= sizeof(struct xdp_md))
6860                 return false;
6861         if (off % size != 0)
6862                 return false;
6863         if (size != sizeof(__u32))
6864                 return false;
6865
6866         return true;
6867 }
6868
6869 static bool xdp_is_valid_access(int off, int size,
6870                                 enum bpf_access_type type,
6871                                 const struct bpf_prog *prog,
6872                                 struct bpf_insn_access_aux *info)
6873 {
6874         if (type == BPF_WRITE) {
6875                 if (bpf_prog_is_dev_bound(prog->aux)) {
6876                         switch (off) {
6877                         case offsetof(struct xdp_md, rx_queue_index):
6878                                 return __is_valid_xdp_access(off, size);
6879                         }
6880                 }
6881                 return false;
6882         }
6883
6884         switch (off) {
6885         case offsetof(struct xdp_md, data):
6886                 info->reg_type = PTR_TO_PACKET;
6887                 break;
6888         case offsetof(struct xdp_md, data_meta):
6889                 info->reg_type = PTR_TO_PACKET_META;
6890                 break;
6891         case offsetof(struct xdp_md, data_end):
6892                 info->reg_type = PTR_TO_PACKET_END;
6893                 break;
6894         }
6895
6896         return __is_valid_xdp_access(off, size);
6897 }
6898
6899 void bpf_warn_invalid_xdp_action(u32 act)
6900 {
6901         const u32 act_max = XDP_REDIRECT;
6902
6903         WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n",
6904                   act > act_max ? "Illegal" : "Driver unsupported",
6905                   act);
6906 }
6907 EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
6908
6909 static bool sock_addr_is_valid_access(int off, int size,
6910                                       enum bpf_access_type type,
6911                                       const struct bpf_prog *prog,
6912                                       struct bpf_insn_access_aux *info)
6913 {
6914         const int size_default = sizeof(__u32);
6915
6916         if (off < 0 || off >= sizeof(struct bpf_sock_addr))
6917                 return false;
6918         if (off % size != 0)
6919                 return false;
6920
6921         /* Disallow access to IPv6 fields from IPv4 contex and vise
6922          * versa.
6923          */
6924         switch (off) {
6925         case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6926                 switch (prog->expected_attach_type) {
6927                 case BPF_CGROUP_INET4_BIND:
6928                 case BPF_CGROUP_INET4_CONNECT:
6929                 case BPF_CGROUP_UDP4_SENDMSG:
6930                 case BPF_CGROUP_UDP4_RECVMSG:
6931                         break;
6932                 default:
6933                         return false;
6934                 }
6935                 break;
6936         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6937                 switch (prog->expected_attach_type) {
6938                 case BPF_CGROUP_INET6_BIND:
6939                 case BPF_CGROUP_INET6_CONNECT:
6940                 case BPF_CGROUP_UDP6_SENDMSG:
6941                 case BPF_CGROUP_UDP6_RECVMSG:
6942                         break;
6943                 default:
6944                         return false;
6945                 }
6946                 break;
6947         case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6948                 switch (prog->expected_attach_type) {
6949                 case BPF_CGROUP_UDP4_SENDMSG:
6950                         break;
6951                 default:
6952                         return false;
6953                 }
6954                 break;
6955         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6956                                 msg_src_ip6[3]):
6957                 switch (prog->expected_attach_type) {
6958                 case BPF_CGROUP_UDP6_SENDMSG:
6959                         break;
6960                 default:
6961                         return false;
6962                 }
6963                 break;
6964         }
6965
6966         switch (off) {
6967         case bpf_ctx_range(struct bpf_sock_addr, user_ip4):
6968         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
6969         case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4):
6970         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
6971                                 msg_src_ip6[3]):
6972                 if (type == BPF_READ) {
6973                         bpf_ctx_record_field_size(info, size_default);
6974
6975                         if (bpf_ctx_wide_access_ok(off, size,
6976                                                    struct bpf_sock_addr,
6977                                                    user_ip6))
6978                                 return true;
6979
6980                         if (bpf_ctx_wide_access_ok(off, size,
6981                                                    struct bpf_sock_addr,
6982                                                    msg_src_ip6))
6983                                 return true;
6984
6985                         if (!bpf_ctx_narrow_access_ok(off, size, size_default))
6986                                 return false;
6987                 } else {
6988                         if (bpf_ctx_wide_access_ok(off, size,
6989                                                    struct bpf_sock_addr,
6990                                                    user_ip6))
6991                                 return true;
6992
6993                         if (bpf_ctx_wide_access_ok(off, size,
6994                                                    struct bpf_sock_addr,
6995                                                    msg_src_ip6))
6996                                 return true;
6997
6998                         if (size != size_default)
6999                                 return false;
7000                 }
7001                 break;
7002         case bpf_ctx_range(struct bpf_sock_addr, user_port):
7003                 if (size != size_default)
7004                         return false;
7005                 break;
7006         case offsetof(struct bpf_sock_addr, sk):
7007                 if (type != BPF_READ)
7008                         return false;
7009                 if (size != sizeof(__u64))
7010                         return false;
7011                 info->reg_type = PTR_TO_SOCKET;
7012                 break;
7013         default:
7014                 if (type == BPF_READ) {
7015                         if (size != size_default)
7016                                 return false;
7017                 } else {
7018                         return false;
7019                 }
7020         }
7021
7022         return true;
7023 }
7024
7025 static bool sock_ops_is_valid_access(int off, int size,
7026                                      enum bpf_access_type type,
7027                                      const struct bpf_prog *prog,
7028                                      struct bpf_insn_access_aux *info)
7029 {
7030         const int size_default = sizeof(__u32);
7031
7032         if (off < 0 || off >= sizeof(struct bpf_sock_ops))
7033                 return false;
7034
7035         /* The verifier guarantees that size > 0. */
7036         if (off % size != 0)
7037                 return false;
7038
7039         if (type == BPF_WRITE) {
7040                 switch (off) {
7041                 case offsetof(struct bpf_sock_ops, reply):
7042                 case offsetof(struct bpf_sock_ops, sk_txhash):
7043                         if (size != size_default)
7044                                 return false;
7045                         break;
7046                 default:
7047                         return false;
7048                 }
7049         } else {
7050                 switch (off) {
7051                 case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received,
7052                                         bytes_acked):
7053                         if (size != sizeof(__u64))
7054                                 return false;
7055                         break;
7056                 case offsetof(struct bpf_sock_ops, sk):
7057                         if (size != sizeof(__u64))
7058                                 return false;
7059                         info->reg_type = PTR_TO_SOCKET_OR_NULL;
7060                         break;
7061                 default:
7062                         if (size != size_default)
7063                                 return false;
7064                         break;
7065                 }
7066         }
7067
7068         return true;
7069 }
7070
7071 static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write,
7072                            const struct bpf_prog *prog)
7073 {
7074         return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP);
7075 }
7076
7077 static bool sk_skb_is_valid_access(int off, int size,
7078                                    enum bpf_access_type type,
7079                                    const struct bpf_prog *prog,
7080                                    struct bpf_insn_access_aux *info)
7081 {
7082         switch (off) {
7083         case bpf_ctx_range(struct __sk_buff, tc_classid):
7084         case bpf_ctx_range(struct __sk_buff, data_meta):
7085         case bpf_ctx_range(struct __sk_buff, tstamp):
7086         case bpf_ctx_range(struct __sk_buff, wire_len):
7087                 return false;
7088         }
7089
7090         if (type == BPF_WRITE) {
7091                 switch (off) {
7092                 case bpf_ctx_range(struct __sk_buff, tc_index):
7093                 case bpf_ctx_range(struct __sk_buff, priority):
7094                         break;
7095                 default:
7096                         return false;
7097                 }
7098         }
7099
7100         switch (off) {
7101         case bpf_ctx_range(struct __sk_buff, mark):
7102                 return false;
7103         case bpf_ctx_range(struct __sk_buff, data):
7104                 info->reg_type = PTR_TO_PACKET;
7105                 break;
7106         case bpf_ctx_range(struct __sk_buff, data_end):
7107                 info->reg_type = PTR_TO_PACKET_END;
7108                 break;
7109         }
7110
7111         return bpf_skb_is_valid_access(off, size, type, prog, info);
7112 }
7113
7114 static bool sk_msg_is_valid_access(int off, int size,
7115                                    enum bpf_access_type type,
7116                                    const struct bpf_prog *prog,
7117                                    struct bpf_insn_access_aux *info)
7118 {
7119         if (type == BPF_WRITE)
7120                 return false;
7121
7122         if (off % size != 0)
7123                 return false;
7124
7125         switch (off) {
7126         case offsetof(struct sk_msg_md, data):
7127                 info->reg_type = PTR_TO_PACKET;
7128                 if (size != sizeof(__u64))
7129                         return false;
7130                 break;
7131         case offsetof(struct sk_msg_md, data_end):
7132                 info->reg_type = PTR_TO_PACKET_END;
7133                 if (size != sizeof(__u64))
7134                         return false;
7135                 break;
7136         case bpf_ctx_range(struct sk_msg_md, family):
7137         case bpf_ctx_range(struct sk_msg_md, remote_ip4):
7138         case bpf_ctx_range(struct sk_msg_md, local_ip4):
7139         case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]):
7140         case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]):
7141         case bpf_ctx_range(struct sk_msg_md, remote_port):
7142         case bpf_ctx_range(struct sk_msg_md, local_port):
7143         case bpf_ctx_range(struct sk_msg_md, size):
7144                 if (size != sizeof(__u32))
7145                         return false;
7146                 break;
7147         default:
7148                 return false;
7149         }
7150         return true;
7151 }
7152
7153 static bool flow_dissector_is_valid_access(int off, int size,
7154                                            enum bpf_access_type type,
7155                                            const struct bpf_prog *prog,
7156                                            struct bpf_insn_access_aux *info)
7157 {
7158         const int size_default = sizeof(__u32);
7159
7160         if (off < 0 || off >= sizeof(struct __sk_buff))
7161                 return false;
7162
7163         if (type == BPF_WRITE)
7164                 return false;
7165
7166         switch (off) {
7167         case bpf_ctx_range(struct __sk_buff, data):
7168                 if (size != size_default)
7169                         return false;
7170                 info->reg_type = PTR_TO_PACKET;
7171                 return true;
7172         case bpf_ctx_range(struct __sk_buff, data_end):
7173                 if (size != size_default)
7174                         return false;
7175                 info->reg_type = PTR_TO_PACKET_END;
7176                 return true;
7177         case bpf_ctx_range_ptr(struct __sk_buff, flow_keys):
7178                 if (size != sizeof(__u64))
7179                         return false;
7180                 info->reg_type = PTR_TO_FLOW_KEYS;
7181                 return true;
7182         default:
7183                 return false;
7184         }
7185 }
7186
7187 static u32 flow_dissector_convert_ctx_access(enum bpf_access_type type,
7188                                              const struct bpf_insn *si,
7189                                              struct bpf_insn *insn_buf,
7190                                              struct bpf_prog *prog,
7191                                              u32 *target_size)
7192
7193 {
7194         struct bpf_insn *insn = insn_buf;
7195
7196         switch (si->off) {
7197         case offsetof(struct __sk_buff, data):
7198                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data),
7199                                       si->dst_reg, si->src_reg,
7200                                       offsetof(struct bpf_flow_dissector, data));
7201                 break;
7202
7203         case offsetof(struct __sk_buff, data_end):
7204                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end),
7205                                       si->dst_reg, si->src_reg,
7206                                       offsetof(struct bpf_flow_dissector, data_end));
7207                 break;
7208
7209         case offsetof(struct __sk_buff, flow_keys):
7210                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys),
7211                                       si->dst_reg, si->src_reg,
7212                                       offsetof(struct bpf_flow_dissector, flow_keys));
7213                 break;
7214         }
7215
7216         return insn - insn_buf;
7217 }
7218
7219 static u32 bpf_convert_ctx_access(enum bpf_access_type type,
7220                                   const struct bpf_insn *si,
7221                                   struct bpf_insn *insn_buf,
7222                                   struct bpf_prog *prog, u32 *target_size)
7223 {
7224         struct bpf_insn *insn = insn_buf;
7225         int off;
7226
7227         switch (si->off) {
7228         case offsetof(struct __sk_buff, len):
7229                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7230                                       bpf_target_off(struct sk_buff, len, 4,
7231                                                      target_size));
7232                 break;
7233
7234         case offsetof(struct __sk_buff, protocol):
7235                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7236                                       bpf_target_off(struct sk_buff, protocol, 2,
7237                                                      target_size));
7238                 break;
7239
7240         case offsetof(struct __sk_buff, vlan_proto):
7241                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7242                                       bpf_target_off(struct sk_buff, vlan_proto, 2,
7243                                                      target_size));
7244                 break;
7245
7246         case offsetof(struct __sk_buff, priority):
7247                 if (type == BPF_WRITE)
7248                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7249                                               bpf_target_off(struct sk_buff, priority, 4,
7250                                                              target_size));
7251                 else
7252                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7253                                               bpf_target_off(struct sk_buff, priority, 4,
7254                                                              target_size));
7255                 break;
7256
7257         case offsetof(struct __sk_buff, ingress_ifindex):
7258                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7259                                       bpf_target_off(struct sk_buff, skb_iif, 4,
7260                                                      target_size));
7261                 break;
7262
7263         case offsetof(struct __sk_buff, ifindex):
7264                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
7265                                       si->dst_reg, si->src_reg,
7266                                       offsetof(struct sk_buff, dev));
7267                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
7268                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7269                                       bpf_target_off(struct net_device, ifindex, 4,
7270                                                      target_size));
7271                 break;
7272
7273         case offsetof(struct __sk_buff, hash):
7274                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7275                                       bpf_target_off(struct sk_buff, hash, 4,
7276                                                      target_size));
7277                 break;
7278
7279         case offsetof(struct __sk_buff, mark):
7280                 if (type == BPF_WRITE)
7281                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7282                                               bpf_target_off(struct sk_buff, mark, 4,
7283                                                              target_size));
7284                 else
7285                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7286                                               bpf_target_off(struct sk_buff, mark, 4,
7287                                                              target_size));
7288                 break;
7289
7290         case offsetof(struct __sk_buff, pkt_type):
7291                 *target_size = 1;
7292                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7293                                       PKT_TYPE_OFFSET());
7294                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX);
7295 #ifdef __BIG_ENDIAN_BITFIELD
7296                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5);
7297 #endif
7298                 break;
7299
7300         case offsetof(struct __sk_buff, queue_mapping):
7301                 if (type == BPF_WRITE) {
7302                         *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1);
7303                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7304                                               bpf_target_off(struct sk_buff,
7305                                                              queue_mapping,
7306                                                              2, target_size));
7307                 } else {
7308                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7309                                               bpf_target_off(struct sk_buff,
7310                                                              queue_mapping,
7311                                                              2, target_size));
7312                 }
7313                 break;
7314
7315         case offsetof(struct __sk_buff, vlan_present):
7316                 *target_size = 1;
7317                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg,
7318                                       PKT_VLAN_PRESENT_OFFSET());
7319                 if (PKT_VLAN_PRESENT_BIT)
7320                         *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT);
7321                 if (PKT_VLAN_PRESENT_BIT < 7)
7322                         *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1);
7323                 break;
7324
7325         case offsetof(struct __sk_buff, vlan_tci):
7326                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7327                                       bpf_target_off(struct sk_buff, vlan_tci, 2,
7328                                                      target_size));
7329                 break;
7330
7331         case offsetof(struct __sk_buff, cb[0]) ...
7332              offsetofend(struct __sk_buff, cb[4]) - 1:
7333                 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
7334                 BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
7335                               offsetof(struct qdisc_skb_cb, data)) %
7336                              sizeof(__u64));
7337
7338                 prog->cb_access = 1;
7339                 off  = si->off;
7340                 off -= offsetof(struct __sk_buff, cb[0]);
7341                 off += offsetof(struct sk_buff, cb);
7342                 off += offsetof(struct qdisc_skb_cb, data);
7343                 if (type == BPF_WRITE)
7344                         *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
7345                                               si->src_reg, off);
7346                 else
7347                         *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
7348                                               si->src_reg, off);
7349                 break;
7350
7351         case offsetof(struct __sk_buff, tc_classid):
7352                 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
7353
7354                 off  = si->off;
7355                 off -= offsetof(struct __sk_buff, tc_classid);
7356                 off += offsetof(struct sk_buff, cb);
7357                 off += offsetof(struct qdisc_skb_cb, tc_classid);
7358                 *target_size = 2;
7359                 if (type == BPF_WRITE)
7360                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
7361                                               si->src_reg, off);
7362                 else
7363                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
7364                                               si->src_reg, off);
7365                 break;
7366
7367         case offsetof(struct __sk_buff, data):
7368                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
7369                                       si->dst_reg, si->src_reg,
7370                                       offsetof(struct sk_buff, data));
7371                 break;
7372
7373         case offsetof(struct __sk_buff, data_meta):
7374                 off  = si->off;
7375                 off -= offsetof(struct __sk_buff, data_meta);
7376                 off += offsetof(struct sk_buff, cb);
7377                 off += offsetof(struct bpf_skb_data_end, data_meta);
7378                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7379                                       si->src_reg, off);
7380                 break;
7381
7382         case offsetof(struct __sk_buff, data_end):
7383                 off  = si->off;
7384                 off -= offsetof(struct __sk_buff, data_end);
7385                 off += offsetof(struct sk_buff, cb);
7386                 off += offsetof(struct bpf_skb_data_end, data_end);
7387                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
7388                                       si->src_reg, off);
7389                 break;
7390
7391         case offsetof(struct __sk_buff, tc_index):
7392 #ifdef CONFIG_NET_SCHED
7393                 if (type == BPF_WRITE)
7394                         *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
7395                                               bpf_target_off(struct sk_buff, tc_index, 2,
7396                                                              target_size));
7397                 else
7398                         *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
7399                                               bpf_target_off(struct sk_buff, tc_index, 2,
7400                                                              target_size));
7401 #else
7402                 *target_size = 2;
7403                 if (type == BPF_WRITE)
7404                         *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
7405                 else
7406                         *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7407 #endif
7408                 break;
7409
7410         case offsetof(struct __sk_buff, napi_id):
7411 #if defined(CONFIG_NET_RX_BUSY_POLL)
7412                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7413                                       bpf_target_off(struct sk_buff, napi_id, 4,
7414                                                      target_size));
7415                 *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1);
7416                 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7417 #else
7418                 *target_size = 4;
7419                 *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
7420 #endif
7421                 break;
7422         case offsetof(struct __sk_buff, family):
7423                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
7424
7425                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7426                                       si->dst_reg, si->src_reg,
7427                                       offsetof(struct sk_buff, sk));
7428                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7429                                       bpf_target_off(struct sock_common,
7430                                                      skc_family,
7431                                                      2, target_size));
7432                 break;
7433         case offsetof(struct __sk_buff, remote_ip4):
7434                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
7435
7436                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7437                                       si->dst_reg, si->src_reg,
7438                                       offsetof(struct sk_buff, sk));
7439                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7440                                       bpf_target_off(struct sock_common,
7441                                                      skc_daddr,
7442                                                      4, target_size));
7443                 break;
7444         case offsetof(struct __sk_buff, local_ip4):
7445                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7446                                           skc_rcv_saddr) != 4);
7447
7448                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7449                                       si->dst_reg, si->src_reg,
7450                                       offsetof(struct sk_buff, sk));
7451                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7452                                       bpf_target_off(struct sock_common,
7453                                                      skc_rcv_saddr,
7454                                                      4, target_size));
7455                 break;
7456         case offsetof(struct __sk_buff, remote_ip6[0]) ...
7457              offsetof(struct __sk_buff, remote_ip6[3]):
7458 #if IS_ENABLED(CONFIG_IPV6)
7459                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7460                                           skc_v6_daddr.s6_addr32[0]) != 4);
7461
7462                 off = si->off;
7463                 off -= offsetof(struct __sk_buff, remote_ip6[0]);
7464
7465                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7466                                       si->dst_reg, si->src_reg,
7467                                       offsetof(struct sk_buff, sk));
7468                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7469                                       offsetof(struct sock_common,
7470                                                skc_v6_daddr.s6_addr32[0]) +
7471                                       off);
7472 #else
7473                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7474 #endif
7475                 break;
7476         case offsetof(struct __sk_buff, local_ip6[0]) ...
7477              offsetof(struct __sk_buff, local_ip6[3]):
7478 #if IS_ENABLED(CONFIG_IPV6)
7479                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
7480                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
7481
7482                 off = si->off;
7483                 off -= offsetof(struct __sk_buff, local_ip6[0]);
7484
7485                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7486                                       si->dst_reg, si->src_reg,
7487                                       offsetof(struct sk_buff, sk));
7488                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7489                                       offsetof(struct sock_common,
7490                                                skc_v6_rcv_saddr.s6_addr32[0]) +
7491                                       off);
7492 #else
7493                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7494 #endif
7495                 break;
7496
7497         case offsetof(struct __sk_buff, remote_port):
7498                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
7499
7500                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7501                                       si->dst_reg, si->src_reg,
7502                                       offsetof(struct sk_buff, sk));
7503                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7504                                       bpf_target_off(struct sock_common,
7505                                                      skc_dport,
7506                                                      2, target_size));
7507 #ifndef __BIG_ENDIAN_BITFIELD
7508                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
7509 #endif
7510                 break;
7511
7512         case offsetof(struct __sk_buff, local_port):
7513                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
7514
7515                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7516                                       si->dst_reg, si->src_reg,
7517                                       offsetof(struct sk_buff, sk));
7518                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
7519                                       bpf_target_off(struct sock_common,
7520                                                      skc_num, 2, target_size));
7521                 break;
7522
7523         case offsetof(struct __sk_buff, tstamp):
7524                 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8);
7525
7526                 if (type == BPF_WRITE)
7527                         *insn++ = BPF_STX_MEM(BPF_DW,
7528                                               si->dst_reg, si->src_reg,
7529                                               bpf_target_off(struct sk_buff,
7530                                                              tstamp, 8,
7531                                                              target_size));
7532                 else
7533                         *insn++ = BPF_LDX_MEM(BPF_DW,
7534                                               si->dst_reg, si->src_reg,
7535                                               bpf_target_off(struct sk_buff,
7536                                                              tstamp, 8,
7537                                                              target_size));
7538                 break;
7539
7540         case offsetof(struct __sk_buff, gso_segs):
7541                 /* si->dst_reg = skb_shinfo(SKB); */
7542 #ifdef NET_SKBUFF_DATA_USES_OFFSET
7543                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7544                                       BPF_REG_AX, si->src_reg,
7545                                       offsetof(struct sk_buff, end));
7546                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head),
7547                                       si->dst_reg, si->src_reg,
7548                                       offsetof(struct sk_buff, head));
7549                 *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX);
7550 #else
7551                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end),
7552                                       si->dst_reg, si->src_reg,
7553                                       offsetof(struct sk_buff, end));
7554 #endif
7555                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs),
7556                                       si->dst_reg, si->dst_reg,
7557                                       bpf_target_off(struct skb_shared_info,
7558                                                      gso_segs, 2,
7559                                                      target_size));
7560                 break;
7561         case offsetof(struct __sk_buff, wire_len):
7562                 BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4);
7563
7564                 off = si->off;
7565                 off -= offsetof(struct __sk_buff, wire_len);
7566                 off += offsetof(struct sk_buff, cb);
7567                 off += offsetof(struct qdisc_skb_cb, pkt_len);
7568                 *target_size = 4;
7569                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off);
7570                 break;
7571
7572         case offsetof(struct __sk_buff, sk):
7573                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk),
7574                                       si->dst_reg, si->src_reg,
7575                                       offsetof(struct sk_buff, sk));
7576                 break;
7577         }
7578
7579         return insn - insn_buf;
7580 }
7581
7582 u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
7583                                 const struct bpf_insn *si,
7584                                 struct bpf_insn *insn_buf,
7585                                 struct bpf_prog *prog, u32 *target_size)
7586 {
7587         struct bpf_insn *insn = insn_buf;
7588         int off;
7589
7590         switch (si->off) {
7591         case offsetof(struct bpf_sock, bound_dev_if):
7592                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
7593
7594                 if (type == BPF_WRITE)
7595                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7596                                         offsetof(struct sock, sk_bound_dev_if));
7597                 else
7598                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7599                                       offsetof(struct sock, sk_bound_dev_if));
7600                 break;
7601
7602         case offsetof(struct bpf_sock, mark):
7603                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4);
7604
7605                 if (type == BPF_WRITE)
7606                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7607                                         offsetof(struct sock, sk_mark));
7608                 else
7609                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7610                                       offsetof(struct sock, sk_mark));
7611                 break;
7612
7613         case offsetof(struct bpf_sock, priority):
7614                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4);
7615
7616                 if (type == BPF_WRITE)
7617                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
7618                                         offsetof(struct sock, sk_priority));
7619                 else
7620                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7621                                       offsetof(struct sock, sk_priority));
7622                 break;
7623
7624         case offsetof(struct bpf_sock, family):
7625                 *insn++ = BPF_LDX_MEM(
7626                         BPF_FIELD_SIZEOF(struct sock_common, skc_family),
7627                         si->dst_reg, si->src_reg,
7628                         bpf_target_off(struct sock_common,
7629                                        skc_family,
7630                                        FIELD_SIZEOF(struct sock_common,
7631                                                     skc_family),
7632                                        target_size));
7633                 break;
7634
7635         case offsetof(struct bpf_sock, type):
7636                 BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2);
7637                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7638                                       offsetof(struct sock, __sk_flags_offset));
7639                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7640                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7641                 *target_size = 2;
7642                 break;
7643
7644         case offsetof(struct bpf_sock, protocol):
7645                 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
7646                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
7647                                       offsetof(struct sock, __sk_flags_offset));
7648                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7649                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
7650                 *target_size = 1;
7651                 break;
7652
7653         case offsetof(struct bpf_sock, src_ip4):
7654                 *insn++ = BPF_LDX_MEM(
7655                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7656                         bpf_target_off(struct sock_common, skc_rcv_saddr,
7657                                        FIELD_SIZEOF(struct sock_common,
7658                                                     skc_rcv_saddr),
7659                                        target_size));
7660                 break;
7661
7662         case offsetof(struct bpf_sock, dst_ip4):
7663                 *insn++ = BPF_LDX_MEM(
7664                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7665                         bpf_target_off(struct sock_common, skc_daddr,
7666                                        FIELD_SIZEOF(struct sock_common,
7667                                                     skc_daddr),
7668                                        target_size));
7669                 break;
7670
7671         case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]):
7672 #if IS_ENABLED(CONFIG_IPV6)
7673                 off = si->off;
7674                 off -= offsetof(struct bpf_sock, src_ip6[0]);
7675                 *insn++ = BPF_LDX_MEM(
7676                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7677                         bpf_target_off(
7678                                 struct sock_common,
7679                                 skc_v6_rcv_saddr.s6_addr32[0],
7680                                 FIELD_SIZEOF(struct sock_common,
7681                                              skc_v6_rcv_saddr.s6_addr32[0]),
7682                                 target_size) + off);
7683 #else
7684                 (void)off;
7685                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7686 #endif
7687                 break;
7688
7689         case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]):
7690 #if IS_ENABLED(CONFIG_IPV6)
7691                 off = si->off;
7692                 off -= offsetof(struct bpf_sock, dst_ip6[0]);
7693                 *insn++ = BPF_LDX_MEM(
7694                         BPF_SIZE(si->code), si->dst_reg, si->src_reg,
7695                         bpf_target_off(struct sock_common,
7696                                        skc_v6_daddr.s6_addr32[0],
7697                                        FIELD_SIZEOF(struct sock_common,
7698                                                     skc_v6_daddr.s6_addr32[0]),
7699                                        target_size) + off);
7700 #else
7701                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
7702                 *target_size = 4;
7703 #endif
7704                 break;
7705
7706         case offsetof(struct bpf_sock, src_port):
7707                 *insn++ = BPF_LDX_MEM(
7708                         BPF_FIELD_SIZEOF(struct sock_common, skc_num),
7709                         si->dst_reg, si->src_reg,
7710                         bpf_target_off(struct sock_common, skc_num,
7711                                        FIELD_SIZEOF(struct sock_common,
7712                                                     skc_num),
7713                                        target_size));
7714                 break;
7715
7716         case offsetof(struct bpf_sock, dst_port):
7717                 *insn++ = BPF_LDX_MEM(
7718                         BPF_FIELD_SIZEOF(struct sock_common, skc_dport),
7719                         si->dst_reg, si->src_reg,
7720                         bpf_target_off(struct sock_common, skc_dport,
7721                                        FIELD_SIZEOF(struct sock_common,
7722                                                     skc_dport),
7723                                        target_size));
7724                 break;
7725
7726         case offsetof(struct bpf_sock, state):
7727                 *insn++ = BPF_LDX_MEM(
7728                         BPF_FIELD_SIZEOF(struct sock_common, skc_state),
7729                         si->dst_reg, si->src_reg,
7730                         bpf_target_off(struct sock_common, skc_state,
7731                                        FIELD_SIZEOF(struct sock_common,
7732                                                     skc_state),
7733                                        target_size));
7734                 break;
7735         }
7736
7737         return insn - insn_buf;
7738 }
7739
7740 static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
7741                                          const struct bpf_insn *si,
7742                                          struct bpf_insn *insn_buf,
7743                                          struct bpf_prog *prog, u32 *target_size)
7744 {
7745         struct bpf_insn *insn = insn_buf;
7746
7747         switch (si->off) {
7748         case offsetof(struct __sk_buff, ifindex):
7749                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
7750                                       si->dst_reg, si->src_reg,
7751                                       offsetof(struct sk_buff, dev));
7752                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7753                                       bpf_target_off(struct net_device, ifindex, 4,
7754                                                      target_size));
7755                 break;
7756         default:
7757                 return bpf_convert_ctx_access(type, si, insn_buf, prog,
7758                                               target_size);
7759         }
7760
7761         return insn - insn_buf;
7762 }
7763
7764 static u32 xdp_convert_ctx_access(enum bpf_access_type type,
7765                                   const struct bpf_insn *si,
7766                                   struct bpf_insn *insn_buf,
7767                                   struct bpf_prog *prog, u32 *target_size)
7768 {
7769         struct bpf_insn *insn = insn_buf;
7770
7771         switch (si->off) {
7772         case offsetof(struct xdp_md, data):
7773                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
7774                                       si->dst_reg, si->src_reg,
7775                                       offsetof(struct xdp_buff, data));
7776                 break;
7777         case offsetof(struct xdp_md, data_meta):
7778                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta),
7779                                       si->dst_reg, si->src_reg,
7780                                       offsetof(struct xdp_buff, data_meta));
7781                 break;
7782         case offsetof(struct xdp_md, data_end):
7783                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
7784                                       si->dst_reg, si->src_reg,
7785                                       offsetof(struct xdp_buff, data_end));
7786                 break;
7787         case offsetof(struct xdp_md, ingress_ifindex):
7788                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7789                                       si->dst_reg, si->src_reg,
7790                                       offsetof(struct xdp_buff, rxq));
7791                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev),
7792                                       si->dst_reg, si->dst_reg,
7793                                       offsetof(struct xdp_rxq_info, dev));
7794                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7795                                       offsetof(struct net_device, ifindex));
7796                 break;
7797         case offsetof(struct xdp_md, rx_queue_index):
7798                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq),
7799                                       si->dst_reg, si->src_reg,
7800                                       offsetof(struct xdp_buff, rxq));
7801                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
7802                                       offsetof(struct xdp_rxq_info,
7803                                                queue_index));
7804                 break;
7805         }
7806
7807         return insn - insn_buf;
7808 }
7809
7810 /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of
7811  * context Structure, F is Field in context structure that contains a pointer
7812  * to Nested Structure of type NS that has the field NF.
7813  *
7814  * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make
7815  * sure that SIZE is not greater than actual size of S.F.NF.
7816  *
7817  * If offset OFF is provided, the load happens from that offset relative to
7818  * offset of NF.
7819  */
7820 #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF)          \
7821         do {                                                                   \
7822                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg,     \
7823                                       si->src_reg, offsetof(S, F));            \
7824                 *insn++ = BPF_LDX_MEM(                                         \
7825                         SIZE, si->dst_reg, si->dst_reg,                        \
7826                         bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
7827                                        target_size)                            \
7828                                 + OFF);                                        \
7829         } while (0)
7830
7831 #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF)                              \
7832         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF,                     \
7833                                              BPF_FIELD_SIZEOF(NS, NF), 0)
7834
7835 /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to
7836  * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation.
7837  *
7838  * In addition it uses Temporary Field TF (member of struct S) as the 3rd
7839  * "register" since two registers available in convert_ctx_access are not
7840  * enough: we can't override neither SRC, since it contains value to store, nor
7841  * DST since it contains pointer to context that may be used by later
7842  * instructions. But we need a temporary place to save pointer to nested
7843  * structure whose field we want to store to.
7844  */
7845 #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE, OFF, TF)          \
7846         do {                                                                   \
7847                 int tmp_reg = BPF_REG_9;                                       \
7848                 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
7849                         --tmp_reg;                                             \
7850                 if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg)          \
7851                         --tmp_reg;                                             \
7852                 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg,            \
7853                                       offsetof(S, TF));                        \
7854                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg,         \
7855                                       si->dst_reg, offsetof(S, F));            \
7856                 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg,              \
7857                         bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF),           \
7858                                        target_size)                            \
7859                                 + OFF);                                        \
7860                 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg,            \
7861                                       offsetof(S, TF));                        \
7862         } while (0)
7863
7864 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \
7865                                                       TF)                      \
7866         do {                                                                   \
7867                 if (type == BPF_WRITE) {                                       \
7868                         SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, SIZE,   \
7869                                                          OFF, TF);             \
7870                 } else {                                                       \
7871                         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(                  \
7872                                 S, NS, F, NF, SIZE, OFF);  \
7873                 }                                                              \
7874         } while (0)
7875
7876 #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF)                 \
7877         SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(                         \
7878                 S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF)
7879
7880 static u32 sock_addr_convert_ctx_access(enum bpf_access_type type,
7881                                         const struct bpf_insn *si,
7882                                         struct bpf_insn *insn_buf,
7883                                         struct bpf_prog *prog, u32 *target_size)
7884 {
7885         struct bpf_insn *insn = insn_buf;
7886         int off;
7887
7888         switch (si->off) {
7889         case offsetof(struct bpf_sock_addr, user_family):
7890                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7891                                             struct sockaddr, uaddr, sa_family);
7892                 break;
7893
7894         case offsetof(struct bpf_sock_addr, user_ip4):
7895                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7896                         struct bpf_sock_addr_kern, struct sockaddr_in, uaddr,
7897                         sin_addr, BPF_SIZE(si->code), 0, tmp_reg);
7898                 break;
7899
7900         case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]):
7901                 off = si->off;
7902                 off -= offsetof(struct bpf_sock_addr, user_ip6[0]);
7903                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7904                         struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr,
7905                         sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off,
7906                         tmp_reg);
7907                 break;
7908
7909         case offsetof(struct bpf_sock_addr, user_port):
7910                 /* To get port we need to know sa_family first and then treat
7911                  * sockaddr as either sockaddr_in or sockaddr_in6.
7912                  * Though we can simplify since port field has same offset and
7913                  * size in both structures.
7914                  * Here we check this invariant and use just one of the
7915                  * structures if it's true.
7916                  */
7917                 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) !=
7918                              offsetof(struct sockaddr_in6, sin6_port));
7919                 BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) !=
7920                              FIELD_SIZEOF(struct sockaddr_in6, sin6_port));
7921                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern,
7922                                                      struct sockaddr_in6, uaddr,
7923                                                      sin6_port, tmp_reg);
7924                 break;
7925
7926         case offsetof(struct bpf_sock_addr, family):
7927                 SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern,
7928                                             struct sock, sk, sk_family);
7929                 break;
7930
7931         case offsetof(struct bpf_sock_addr, type):
7932                 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7933                         struct bpf_sock_addr_kern, struct sock, sk,
7934                         __sk_flags_offset, BPF_W, 0);
7935                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
7936                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
7937                 break;
7938
7939         case offsetof(struct bpf_sock_addr, protocol):
7940                 SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(
7941                         struct bpf_sock_addr_kern, struct sock, sk,
7942                         __sk_flags_offset, BPF_W, 0);
7943                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
7944                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
7945                                         SK_FL_PROTO_SHIFT);
7946                 break;
7947
7948         case offsetof(struct bpf_sock_addr, msg_src_ip4):
7949                 /* Treat t_ctx as struct in_addr for msg_src_ip4. */
7950                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7951                         struct bpf_sock_addr_kern, struct in_addr, t_ctx,
7952                         s_addr, BPF_SIZE(si->code), 0, tmp_reg);
7953                 break;
7954
7955         case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0],
7956                                 msg_src_ip6[3]):
7957                 off = si->off;
7958                 off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]);
7959                 /* Treat t_ctx as struct in6_addr for msg_src_ip6. */
7960                 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(
7961                         struct bpf_sock_addr_kern, struct in6_addr, t_ctx,
7962                         s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg);
7963                 break;
7964         case offsetof(struct bpf_sock_addr, sk):
7965                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sock_addr_kern, sk),
7966                                       si->dst_reg, si->src_reg,
7967                                       offsetof(struct bpf_sock_addr_kern, sk));
7968                 break;
7969         }
7970
7971         return insn - insn_buf;
7972 }
7973
7974 static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
7975                                        const struct bpf_insn *si,
7976                                        struct bpf_insn *insn_buf,
7977                                        struct bpf_prog *prog,
7978                                        u32 *target_size)
7979 {
7980         struct bpf_insn *insn = insn_buf;
7981         int off;
7982
7983 /* Helper macro for adding read access to tcp_sock or sock fields. */
7984 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
7985         do {                                                                  \
7986                 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
7987                              FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
7988                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
7989                                                 struct bpf_sock_ops_kern,     \
7990                                                 is_fullsock),                 \
7991                                       si->dst_reg, si->src_reg,               \
7992                                       offsetof(struct bpf_sock_ops_kern,      \
7993                                                is_fullsock));                 \
7994                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2);            \
7995                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
7996                                                 struct bpf_sock_ops_kern, sk),\
7997                                       si->dst_reg, si->src_reg,               \
7998                                       offsetof(struct bpf_sock_ops_kern, sk));\
7999                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ,                   \
8000                                                        OBJ_FIELD),            \
8001                                       si->dst_reg, si->dst_reg,               \
8002                                       offsetof(OBJ, OBJ_FIELD));              \
8003         } while (0)
8004
8005 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
8006                 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
8007
8008 /* Helper macro for adding write access to tcp_sock or sock fields.
8009  * The macro is called with two registers, dst_reg which contains a pointer
8010  * to ctx (context) and src_reg which contains the value that should be
8011  * stored. However, we need an additional register since we cannot overwrite
8012  * dst_reg because it may be used later in the program.
8013  * Instead we "borrow" one of the other register. We first save its value
8014  * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore
8015  * it at the end of the macro.
8016  */
8017 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ)                         \
8018         do {                                                                  \
8019                 int reg = BPF_REG_9;                                          \
8020                 BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) >                   \
8021                              FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD));   \
8022                 if (si->dst_reg == reg || si->src_reg == reg)                 \
8023                         reg--;                                                \
8024                 if (si->dst_reg == reg || si->src_reg == reg)                 \
8025                         reg--;                                                \
8026                 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg,               \
8027                                       offsetof(struct bpf_sock_ops_kern,      \
8028                                                temp));                        \
8029                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
8030                                                 struct bpf_sock_ops_kern,     \
8031                                                 is_fullsock),                 \
8032                                       reg, si->dst_reg,                       \
8033                                       offsetof(struct bpf_sock_ops_kern,      \
8034                                                is_fullsock));                 \
8035                 *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2);                    \
8036                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(                       \
8037                                                 struct bpf_sock_ops_kern, sk),\
8038                                       reg, si->dst_reg,                       \
8039                                       offsetof(struct bpf_sock_ops_kern, sk));\
8040                 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD),       \
8041                                       reg, si->src_reg,                       \
8042                                       offsetof(OBJ, OBJ_FIELD));              \
8043                 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg,               \
8044                                       offsetof(struct bpf_sock_ops_kern,      \
8045                                                temp));                        \
8046         } while (0)
8047
8048 #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE)            \
8049         do {                                                                  \
8050                 if (TYPE == BPF_WRITE)                                        \
8051                         SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
8052                 else                                                          \
8053                         SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ);        \
8054         } while (0)
8055
8056         if (insn > insn_buf)
8057                 return insn - insn_buf;
8058
8059         switch (si->off) {
8060         case offsetof(struct bpf_sock_ops, op) ...
8061              offsetof(struct bpf_sock_ops, replylong[3]):
8062                 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) !=
8063                              FIELD_SIZEOF(struct bpf_sock_ops_kern, op));
8064                 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) !=
8065                              FIELD_SIZEOF(struct bpf_sock_ops_kern, reply));
8066                 BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) !=
8067                              FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong));
8068                 off = si->off;
8069                 off -= offsetof(struct bpf_sock_ops, op);
8070                 off += offsetof(struct bpf_sock_ops_kern, op);
8071                 if (type == BPF_WRITE)
8072                         *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
8073                                               off);
8074                 else
8075                         *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
8076                                               off);
8077                 break;
8078
8079         case offsetof(struct bpf_sock_ops, family):
8080                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
8081
8082                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8083                                               struct bpf_sock_ops_kern, sk),
8084                                       si->dst_reg, si->src_reg,
8085                                       offsetof(struct bpf_sock_ops_kern, sk));
8086                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8087                                       offsetof(struct sock_common, skc_family));
8088                 break;
8089
8090         case offsetof(struct bpf_sock_ops, remote_ip4):
8091                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
8092
8093                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8094                                                 struct bpf_sock_ops_kern, sk),
8095                                       si->dst_reg, si->src_reg,
8096                                       offsetof(struct bpf_sock_ops_kern, sk));
8097                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8098                                       offsetof(struct sock_common, skc_daddr));
8099                 break;
8100
8101         case offsetof(struct bpf_sock_ops, local_ip4):
8102                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8103                                           skc_rcv_saddr) != 4);
8104
8105                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8106                                               struct bpf_sock_ops_kern, sk),
8107                                       si->dst_reg, si->src_reg,
8108                                       offsetof(struct bpf_sock_ops_kern, sk));
8109                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8110                                       offsetof(struct sock_common,
8111                                                skc_rcv_saddr));
8112                 break;
8113
8114         case offsetof(struct bpf_sock_ops, remote_ip6[0]) ...
8115              offsetof(struct bpf_sock_ops, remote_ip6[3]):
8116 #if IS_ENABLED(CONFIG_IPV6)
8117                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8118                                           skc_v6_daddr.s6_addr32[0]) != 4);
8119
8120                 off = si->off;
8121                 off -= offsetof(struct bpf_sock_ops, remote_ip6[0]);
8122                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8123                                                 struct bpf_sock_ops_kern, sk),
8124                                       si->dst_reg, si->src_reg,
8125                                       offsetof(struct bpf_sock_ops_kern, sk));
8126                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8127                                       offsetof(struct sock_common,
8128                                                skc_v6_daddr.s6_addr32[0]) +
8129                                       off);
8130 #else
8131                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8132 #endif
8133                 break;
8134
8135         case offsetof(struct bpf_sock_ops, local_ip6[0]) ...
8136              offsetof(struct bpf_sock_ops, local_ip6[3]):
8137 #if IS_ENABLED(CONFIG_IPV6)
8138                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8139                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8140
8141                 off = si->off;
8142                 off -= offsetof(struct bpf_sock_ops, local_ip6[0]);
8143                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8144                                                 struct bpf_sock_ops_kern, sk),
8145                                       si->dst_reg, si->src_reg,
8146                                       offsetof(struct bpf_sock_ops_kern, sk));
8147                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8148                                       offsetof(struct sock_common,
8149                                                skc_v6_rcv_saddr.s6_addr32[0]) +
8150                                       off);
8151 #else
8152                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8153 #endif
8154                 break;
8155
8156         case offsetof(struct bpf_sock_ops, remote_port):
8157                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
8158
8159                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8160                                                 struct bpf_sock_ops_kern, sk),
8161                                       si->dst_reg, si->src_reg,
8162                                       offsetof(struct bpf_sock_ops_kern, sk));
8163                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8164                                       offsetof(struct sock_common, skc_dport));
8165 #ifndef __BIG_ENDIAN_BITFIELD
8166                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8167 #endif
8168                 break;
8169
8170         case offsetof(struct bpf_sock_ops, local_port):
8171                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
8172
8173                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8174                                                 struct bpf_sock_ops_kern, sk),
8175                                       si->dst_reg, si->src_reg,
8176                                       offsetof(struct bpf_sock_ops_kern, sk));
8177                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8178                                       offsetof(struct sock_common, skc_num));
8179                 break;
8180
8181         case offsetof(struct bpf_sock_ops, is_fullsock):
8182                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8183                                                 struct bpf_sock_ops_kern,
8184                                                 is_fullsock),
8185                                       si->dst_reg, si->src_reg,
8186                                       offsetof(struct bpf_sock_ops_kern,
8187                                                is_fullsock));
8188                 break;
8189
8190         case offsetof(struct bpf_sock_ops, state):
8191                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1);
8192
8193                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8194                                                 struct bpf_sock_ops_kern, sk),
8195                                       si->dst_reg, si->src_reg,
8196                                       offsetof(struct bpf_sock_ops_kern, sk));
8197                 *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg,
8198                                       offsetof(struct sock_common, skc_state));
8199                 break;
8200
8201         case offsetof(struct bpf_sock_ops, rtt_min):
8202                 BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) !=
8203                              sizeof(struct minmax));
8204                 BUILD_BUG_ON(sizeof(struct minmax) <
8205                              sizeof(struct minmax_sample));
8206
8207                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8208                                                 struct bpf_sock_ops_kern, sk),
8209                                       si->dst_reg, si->src_reg,
8210                                       offsetof(struct bpf_sock_ops_kern, sk));
8211                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8212                                       offsetof(struct tcp_sock, rtt_min) +
8213                                       FIELD_SIZEOF(struct minmax_sample, t));
8214                 break;
8215
8216         case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags):
8217                 SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags,
8218                                    struct tcp_sock);
8219                 break;
8220
8221         case offsetof(struct bpf_sock_ops, sk_txhash):
8222                 SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash,
8223                                           struct sock, type);
8224                 break;
8225         case offsetof(struct bpf_sock_ops, snd_cwnd):
8226                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_cwnd);
8227                 break;
8228         case offsetof(struct bpf_sock_ops, srtt_us):
8229                 SOCK_OPS_GET_TCP_SOCK_FIELD(srtt_us);
8230                 break;
8231         case offsetof(struct bpf_sock_ops, snd_ssthresh):
8232                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_ssthresh);
8233                 break;
8234         case offsetof(struct bpf_sock_ops, rcv_nxt):
8235                 SOCK_OPS_GET_TCP_SOCK_FIELD(rcv_nxt);
8236                 break;
8237         case offsetof(struct bpf_sock_ops, snd_nxt):
8238                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_nxt);
8239                 break;
8240         case offsetof(struct bpf_sock_ops, snd_una):
8241                 SOCK_OPS_GET_TCP_SOCK_FIELD(snd_una);
8242                 break;
8243         case offsetof(struct bpf_sock_ops, mss_cache):
8244                 SOCK_OPS_GET_TCP_SOCK_FIELD(mss_cache);
8245                 break;
8246         case offsetof(struct bpf_sock_ops, ecn_flags):
8247                 SOCK_OPS_GET_TCP_SOCK_FIELD(ecn_flags);
8248                 break;
8249         case offsetof(struct bpf_sock_ops, rate_delivered):
8250                 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_delivered);
8251                 break;
8252         case offsetof(struct bpf_sock_ops, rate_interval_us):
8253                 SOCK_OPS_GET_TCP_SOCK_FIELD(rate_interval_us);
8254                 break;
8255         case offsetof(struct bpf_sock_ops, packets_out):
8256                 SOCK_OPS_GET_TCP_SOCK_FIELD(packets_out);
8257                 break;
8258         case offsetof(struct bpf_sock_ops, retrans_out):
8259                 SOCK_OPS_GET_TCP_SOCK_FIELD(retrans_out);
8260                 break;
8261         case offsetof(struct bpf_sock_ops, total_retrans):
8262                 SOCK_OPS_GET_TCP_SOCK_FIELD(total_retrans);
8263                 break;
8264         case offsetof(struct bpf_sock_ops, segs_in):
8265                 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_in);
8266                 break;
8267         case offsetof(struct bpf_sock_ops, data_segs_in):
8268                 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_in);
8269                 break;
8270         case offsetof(struct bpf_sock_ops, segs_out):
8271                 SOCK_OPS_GET_TCP_SOCK_FIELD(segs_out);
8272                 break;
8273         case offsetof(struct bpf_sock_ops, data_segs_out):
8274                 SOCK_OPS_GET_TCP_SOCK_FIELD(data_segs_out);
8275                 break;
8276         case offsetof(struct bpf_sock_ops, lost_out):
8277                 SOCK_OPS_GET_TCP_SOCK_FIELD(lost_out);
8278                 break;
8279         case offsetof(struct bpf_sock_ops, sacked_out):
8280                 SOCK_OPS_GET_TCP_SOCK_FIELD(sacked_out);
8281                 break;
8282         case offsetof(struct bpf_sock_ops, bytes_received):
8283                 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_received);
8284                 break;
8285         case offsetof(struct bpf_sock_ops, bytes_acked):
8286                 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
8287                 break;
8288         case offsetof(struct bpf_sock_ops, sk):
8289                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8290                                                 struct bpf_sock_ops_kern,
8291                                                 is_fullsock),
8292                                       si->dst_reg, si->src_reg,
8293                                       offsetof(struct bpf_sock_ops_kern,
8294                                                is_fullsock));
8295                 *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
8296                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8297                                                 struct bpf_sock_ops_kern, sk),
8298                                       si->dst_reg, si->src_reg,
8299                                       offsetof(struct bpf_sock_ops_kern, sk));
8300                 break;
8301         }
8302         return insn - insn_buf;
8303 }
8304
8305 static u32 sk_skb_convert_ctx_access(enum bpf_access_type type,
8306                                      const struct bpf_insn *si,
8307                                      struct bpf_insn *insn_buf,
8308                                      struct bpf_prog *prog, u32 *target_size)
8309 {
8310         struct bpf_insn *insn = insn_buf;
8311         int off;
8312
8313         switch (si->off) {
8314         case offsetof(struct __sk_buff, data_end):
8315                 off  = si->off;
8316                 off -= offsetof(struct __sk_buff, data_end);
8317                 off += offsetof(struct sk_buff, cb);
8318                 off += offsetof(struct tcp_skb_cb, bpf.data_end);
8319                 *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
8320                                       si->src_reg, off);
8321                 break;
8322         default:
8323                 return bpf_convert_ctx_access(type, si, insn_buf, prog,
8324                                               target_size);
8325         }
8326
8327         return insn - insn_buf;
8328 }
8329
8330 static u32 sk_msg_convert_ctx_access(enum bpf_access_type type,
8331                                      const struct bpf_insn *si,
8332                                      struct bpf_insn *insn_buf,
8333                                      struct bpf_prog *prog, u32 *target_size)
8334 {
8335         struct bpf_insn *insn = insn_buf;
8336 #if IS_ENABLED(CONFIG_IPV6)
8337         int off;
8338 #endif
8339
8340         /* convert ctx uses the fact sg element is first in struct */
8341         BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0);
8342
8343         switch (si->off) {
8344         case offsetof(struct sk_msg_md, data):
8345                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data),
8346                                       si->dst_reg, si->src_reg,
8347                                       offsetof(struct sk_msg, data));
8348                 break;
8349         case offsetof(struct sk_msg_md, data_end):
8350                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end),
8351                                       si->dst_reg, si->src_reg,
8352                                       offsetof(struct sk_msg, data_end));
8353                 break;
8354         case offsetof(struct sk_msg_md, family):
8355                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2);
8356
8357                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8358                                               struct sk_msg, sk),
8359                                       si->dst_reg, si->src_reg,
8360                                       offsetof(struct sk_msg, sk));
8361                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8362                                       offsetof(struct sock_common, skc_family));
8363                 break;
8364
8365         case offsetof(struct sk_msg_md, remote_ip4):
8366                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4);
8367
8368                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8369                                                 struct sk_msg, sk),
8370                                       si->dst_reg, si->src_reg,
8371                                       offsetof(struct sk_msg, sk));
8372                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8373                                       offsetof(struct sock_common, skc_daddr));
8374                 break;
8375
8376         case offsetof(struct sk_msg_md, local_ip4):
8377                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8378                                           skc_rcv_saddr) != 4);
8379
8380                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8381                                               struct sk_msg, sk),
8382                                       si->dst_reg, si->src_reg,
8383                                       offsetof(struct sk_msg, sk));
8384                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8385                                       offsetof(struct sock_common,
8386                                                skc_rcv_saddr));
8387                 break;
8388
8389         case offsetof(struct sk_msg_md, remote_ip6[0]) ...
8390              offsetof(struct sk_msg_md, remote_ip6[3]):
8391 #if IS_ENABLED(CONFIG_IPV6)
8392                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8393                                           skc_v6_daddr.s6_addr32[0]) != 4);
8394
8395                 off = si->off;
8396                 off -= offsetof(struct sk_msg_md, remote_ip6[0]);
8397                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8398                                                 struct sk_msg, sk),
8399                                       si->dst_reg, si->src_reg,
8400                                       offsetof(struct sk_msg, sk));
8401                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8402                                       offsetof(struct sock_common,
8403                                                skc_v6_daddr.s6_addr32[0]) +
8404                                       off);
8405 #else
8406                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8407 #endif
8408                 break;
8409
8410         case offsetof(struct sk_msg_md, local_ip6[0]) ...
8411              offsetof(struct sk_msg_md, local_ip6[3]):
8412 #if IS_ENABLED(CONFIG_IPV6)
8413                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common,
8414                                           skc_v6_rcv_saddr.s6_addr32[0]) != 4);
8415
8416                 off = si->off;
8417                 off -= offsetof(struct sk_msg_md, local_ip6[0]);
8418                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8419                                                 struct sk_msg, sk),
8420                                       si->dst_reg, si->src_reg,
8421                                       offsetof(struct sk_msg, sk));
8422                 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
8423                                       offsetof(struct sock_common,
8424                                                skc_v6_rcv_saddr.s6_addr32[0]) +
8425                                       off);
8426 #else
8427                 *insn++ = BPF_MOV32_IMM(si->dst_reg, 0);
8428 #endif
8429                 break;
8430
8431         case offsetof(struct sk_msg_md, remote_port):
8432                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2);
8433
8434                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8435                                                 struct sk_msg, sk),
8436                                       si->dst_reg, si->src_reg,
8437                                       offsetof(struct sk_msg, sk));
8438                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8439                                       offsetof(struct sock_common, skc_dport));
8440 #ifndef __BIG_ENDIAN_BITFIELD
8441                 *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16);
8442 #endif
8443                 break;
8444
8445         case offsetof(struct sk_msg_md, local_port):
8446                 BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2);
8447
8448                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
8449                                                 struct sk_msg, sk),
8450                                       si->dst_reg, si->src_reg,
8451                                       offsetof(struct sk_msg, sk));
8452                 *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg,
8453                                       offsetof(struct sock_common, skc_num));
8454                 break;
8455
8456         case offsetof(struct sk_msg_md, size):
8457                 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size),
8458                                       si->dst_reg, si->src_reg,
8459                                       offsetof(struct sk_msg_sg, size));
8460                 break;
8461         }
8462
8463         return insn - insn_buf;
8464 }
8465
8466 const struct bpf_verifier_ops sk_filter_verifier_ops = {
8467         .get_func_proto         = sk_filter_func_proto,
8468         .is_valid_access        = sk_filter_is_valid_access,
8469         .convert_ctx_access     = bpf_convert_ctx_access,
8470         .gen_ld_abs             = bpf_gen_ld_abs,
8471 };
8472
8473 const struct bpf_prog_ops sk_filter_prog_ops = {
8474         .test_run               = bpf_prog_test_run_skb,
8475 };
8476
8477 const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
8478         .get_func_proto         = tc_cls_act_func_proto,
8479         .is_valid_access        = tc_cls_act_is_valid_access,
8480         .convert_ctx_access     = tc_cls_act_convert_ctx_access,
8481         .gen_prologue           = tc_cls_act_prologue,
8482         .gen_ld_abs             = bpf_gen_ld_abs,
8483 };
8484
8485 const struct bpf_prog_ops tc_cls_act_prog_ops = {
8486         .test_run               = bpf_prog_test_run_skb,
8487 };
8488
8489 const struct bpf_verifier_ops xdp_verifier_ops = {
8490         .get_func_proto         = xdp_func_proto,
8491         .is_valid_access        = xdp_is_valid_access,
8492         .convert_ctx_access     = xdp_convert_ctx_access,
8493         .gen_prologue           = bpf_noop_prologue,
8494 };
8495
8496 const struct bpf_prog_ops xdp_prog_ops = {
8497         .test_run               = bpf_prog_test_run_xdp,
8498 };
8499
8500 const struct bpf_verifier_ops cg_skb_verifier_ops = {
8501         .get_func_proto         = cg_skb_func_proto,
8502         .is_valid_access        = cg_skb_is_valid_access,
8503         .convert_ctx_access     = bpf_convert_ctx_access,
8504 };
8505
8506 const struct bpf_prog_ops cg_skb_prog_ops = {
8507         .test_run               = bpf_prog_test_run_skb,
8508 };
8509
8510 const struct bpf_verifier_ops lwt_in_verifier_ops = {
8511         .get_func_proto         = lwt_in_func_proto,
8512         .is_valid_access        = lwt_is_valid_access,
8513         .convert_ctx_access     = bpf_convert_ctx_access,
8514 };
8515
8516 const struct bpf_prog_ops lwt_in_prog_ops = {
8517         .test_run               = bpf_prog_test_run_skb,
8518 };
8519
8520 const struct bpf_verifier_ops lwt_out_verifier_ops = {
8521         .get_func_proto         = lwt_out_func_proto,
8522         .is_valid_access        = lwt_is_valid_access,
8523         .convert_ctx_access     = bpf_convert_ctx_access,
8524 };
8525
8526 const struct bpf_prog_ops lwt_out_prog_ops = {
8527         .test_run               = bpf_prog_test_run_skb,
8528 };
8529
8530 const struct bpf_verifier_ops lwt_xmit_verifier_ops = {
8531         .get_func_proto         = lwt_xmit_func_proto,
8532         .is_valid_access        = lwt_is_valid_access,
8533         .convert_ctx_access     = bpf_convert_ctx_access,
8534         .gen_prologue           = tc_cls_act_prologue,
8535 };
8536
8537 const struct bpf_prog_ops lwt_xmit_prog_ops = {
8538         .test_run               = bpf_prog_test_run_skb,
8539 };
8540
8541 const struct bpf_verifier_ops lwt_seg6local_verifier_ops = {
8542         .get_func_proto         = lwt_seg6local_func_proto,
8543         .is_valid_access        = lwt_is_valid_access,
8544         .convert_ctx_access     = bpf_convert_ctx_access,
8545 };
8546
8547 const struct bpf_prog_ops lwt_seg6local_prog_ops = {
8548         .test_run               = bpf_prog_test_run_skb,
8549 };
8550
8551 const struct bpf_verifier_ops cg_sock_verifier_ops = {
8552         .get_func_proto         = sock_filter_func_proto,
8553         .is_valid_access        = sock_filter_is_valid_access,
8554         .convert_ctx_access     = bpf_sock_convert_ctx_access,
8555 };
8556
8557 const struct bpf_prog_ops cg_sock_prog_ops = {
8558 };
8559
8560 const struct bpf_verifier_ops cg_sock_addr_verifier_ops = {
8561         .get_func_proto         = sock_addr_func_proto,
8562         .is_valid_access        = sock_addr_is_valid_access,
8563         .convert_ctx_access     = sock_addr_convert_ctx_access,
8564 };
8565
8566 const struct bpf_prog_ops cg_sock_addr_prog_ops = {
8567 };
8568
8569 const struct bpf_verifier_ops sock_ops_verifier_ops = {
8570         .get_func_proto         = sock_ops_func_proto,
8571         .is_valid_access        = sock_ops_is_valid_access,
8572         .convert_ctx_access     = sock_ops_convert_ctx_access,
8573 };
8574
8575 const struct bpf_prog_ops sock_ops_prog_ops = {
8576 };
8577
8578 const struct bpf_verifier_ops sk_skb_verifier_ops = {
8579         .get_func_proto         = sk_skb_func_proto,
8580         .is_valid_access        = sk_skb_is_valid_access,
8581         .convert_ctx_access     = sk_skb_convert_ctx_access,
8582         .gen_prologue           = sk_skb_prologue,
8583 };
8584
8585 const struct bpf_prog_ops sk_skb_prog_ops = {
8586 };
8587
8588 const struct bpf_verifier_ops sk_msg_verifier_ops = {
8589         .get_func_proto         = sk_msg_func_proto,
8590         .is_valid_access        = sk_msg_is_valid_access,
8591         .convert_ctx_access     = sk_msg_convert_ctx_access,
8592         .gen_prologue           = bpf_noop_prologue,
8593 };
8594
8595 const struct bpf_prog_ops sk_msg_prog_ops = {
8596 };
8597
8598 const struct bpf_verifier_ops flow_dissector_verifier_ops = {
8599         .get_func_proto         = flow_dissector_func_proto,
8600         .is_valid_access        = flow_dissector_is_valid_access,
8601         .convert_ctx_access     = flow_dissector_convert_ctx_access,
8602 };
8603
8604 const struct bpf_prog_ops flow_dissector_prog_ops = {
8605         .test_run               = bpf_prog_test_run_flow_dissector,
8606 };
8607
8608 int sk_detach_filter(struct sock *sk)
8609 {
8610         int ret = -ENOENT;
8611         struct sk_filter *filter;
8612
8613         if (sock_flag(sk, SOCK_FILTER_LOCKED))
8614                 return -EPERM;
8615
8616         filter = rcu_dereference_protected(sk->sk_filter,
8617                                            lockdep_sock_is_held(sk));
8618         if (filter) {
8619                 RCU_INIT_POINTER(sk->sk_filter, NULL);
8620                 sk_filter_uncharge(sk, filter);
8621                 ret = 0;
8622         }
8623
8624         return ret;
8625 }
8626 EXPORT_SYMBOL_GPL(sk_detach_filter);
8627
8628 int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
8629                   unsigned int len)
8630 {
8631         struct sock_fprog_kern *fprog;
8632         struct sk_filter *filter;
8633         int ret = 0;
8634
8635         lock_sock(sk);
8636         filter = rcu_dereference_protected(sk->sk_filter,
8637                                            lockdep_sock_is_held(sk));
8638         if (!filter)
8639                 goto out;
8640
8641         /* We're copying the filter that has been originally attached,
8642          * so no conversion/decode needed anymore. eBPF programs that
8643          * have no original program cannot be dumped through this.
8644          */
8645         ret = -EACCES;
8646         fprog = filter->prog->orig_prog;
8647         if (!fprog)
8648                 goto out;
8649
8650         ret = fprog->len;
8651         if (!len)
8652                 /* User space only enquires number of filter blocks. */
8653                 goto out;
8654
8655         ret = -EINVAL;
8656         if (len < fprog->len)
8657                 goto out;
8658
8659         ret = -EFAULT;
8660         if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
8661                 goto out;
8662
8663         /* Instead of bytes, the API requests to return the number
8664          * of filter blocks.
8665          */
8666         ret = fprog->len;
8667 out:
8668         release_sock(sk);
8669         return ret;
8670 }
8671
8672 #ifdef CONFIG_INET
8673 struct sk_reuseport_kern {
8674         struct sk_buff *skb;
8675         struct sock *sk;
8676         struct sock *selected_sk;
8677         void *data_end;
8678         u32 hash;
8679         u32 reuseport_id;
8680         bool bind_inany;
8681 };
8682
8683 static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern,
8684                                     struct sock_reuseport *reuse,
8685                                     struct sock *sk, struct sk_buff *skb,
8686                                     u32 hash)
8687 {
8688         reuse_kern->skb = skb;
8689         reuse_kern->sk = sk;
8690         reuse_kern->selected_sk = NULL;
8691         reuse_kern->data_end = skb->data + skb_headlen(skb);
8692         reuse_kern->hash = hash;
8693         reuse_kern->reuseport_id = reuse->reuseport_id;
8694         reuse_kern->bind_inany = reuse->bind_inany;
8695 }
8696
8697 struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
8698                                   struct bpf_prog *prog, struct sk_buff *skb,
8699                                   u32 hash)
8700 {
8701         struct sk_reuseport_kern reuse_kern;
8702         enum sk_action action;
8703
8704         bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash);
8705         action = BPF_PROG_RUN(prog, &reuse_kern);
8706
8707         if (action == SK_PASS)
8708                 return reuse_kern.selected_sk;
8709         else
8710                 return ERR_PTR(-ECONNREFUSED);
8711 }
8712
8713 BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
8714            struct bpf_map *, map, void *, key, u32, flags)
8715 {
8716         struct sock_reuseport *reuse;
8717         struct sock *selected_sk;
8718
8719         selected_sk = map->ops->map_lookup_elem(map, key);
8720         if (!selected_sk)
8721                 return -ENOENT;
8722
8723         reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
8724         if (!reuse)
8725                 /* selected_sk is unhashed (e.g. by close()) after the
8726                  * above map_lookup_elem().  Treat selected_sk has already
8727                  * been removed from the map.
8728                  */
8729                 return -ENOENT;
8730
8731         if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
8732                 struct sock *sk;
8733
8734                 if (unlikely(!reuse_kern->reuseport_id))
8735                         /* There is a small race between adding the
8736                          * sk to the map and setting the
8737                          * reuse_kern->reuseport_id.
8738                          * Treat it as the sk has not been added to
8739                          * the bpf map yet.
8740                          */
8741                         return -ENOENT;
8742
8743                 sk = reuse_kern->sk;
8744                 if (sk->sk_protocol != selected_sk->sk_protocol)
8745                         return -EPROTOTYPE;
8746                 else if (sk->sk_family != selected_sk->sk_family)
8747                         return -EAFNOSUPPORT;
8748
8749                 /* Catch all. Likely bound to a different sockaddr. */
8750                 return -EBADFD;
8751         }
8752
8753         reuse_kern->selected_sk = selected_sk;
8754
8755         return 0;
8756 }
8757
8758 static const struct bpf_func_proto sk_select_reuseport_proto = {
8759         .func           = sk_select_reuseport,
8760         .gpl_only       = false,
8761         .ret_type       = RET_INTEGER,
8762         .arg1_type      = ARG_PTR_TO_CTX,
8763         .arg2_type      = ARG_CONST_MAP_PTR,
8764         .arg3_type      = ARG_PTR_TO_MAP_KEY,
8765         .arg4_type      = ARG_ANYTHING,
8766 };
8767
8768 BPF_CALL_4(sk_reuseport_load_bytes,
8769            const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8770            void *, to, u32, len)
8771 {
8772         return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len);
8773 }
8774
8775 static const struct bpf_func_proto sk_reuseport_load_bytes_proto = {
8776         .func           = sk_reuseport_load_bytes,
8777         .gpl_only       = false,
8778         .ret_type       = RET_INTEGER,
8779         .arg1_type      = ARG_PTR_TO_CTX,
8780         .arg2_type      = ARG_ANYTHING,
8781         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
8782         .arg4_type      = ARG_CONST_SIZE,
8783 };
8784
8785 BPF_CALL_5(sk_reuseport_load_bytes_relative,
8786            const struct sk_reuseport_kern *, reuse_kern, u32, offset,
8787            void *, to, u32, len, u32, start_header)
8788 {
8789         return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to,
8790                                                len, start_header);
8791 }
8792
8793 static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = {
8794         .func           = sk_reuseport_load_bytes_relative,
8795         .gpl_only       = false,
8796         .ret_type       = RET_INTEGER,
8797         .arg1_type      = ARG_PTR_TO_CTX,
8798         .arg2_type      = ARG_ANYTHING,
8799         .arg3_type      = ARG_PTR_TO_UNINIT_MEM,
8800         .arg4_type      = ARG_CONST_SIZE,
8801         .arg5_type      = ARG_ANYTHING,
8802 };
8803
8804 static const struct bpf_func_proto *
8805 sk_reuseport_func_proto(enum bpf_func_id func_id,
8806                         const struct bpf_prog *prog)
8807 {
8808         switch (func_id) {
8809         case BPF_FUNC_sk_select_reuseport:
8810                 return &sk_select_reuseport_proto;
8811         case BPF_FUNC_skb_load_bytes:
8812                 return &sk_reuseport_load_bytes_proto;
8813         case BPF_FUNC_skb_load_bytes_relative:
8814                 return &sk_reuseport_load_bytes_relative_proto;
8815         default:
8816                 return bpf_base_func_proto(func_id);
8817         }
8818 }
8819
8820 static bool
8821 sk_reuseport_is_valid_access(int off, int size,
8822                              enum bpf_access_type type,
8823                              const struct bpf_prog *prog,
8824                              struct bpf_insn_access_aux *info)
8825 {
8826         const u32 size_default = sizeof(__u32);
8827
8828         if (off < 0 || off >= sizeof(struct sk_reuseport_md) ||
8829             off % size || type != BPF_READ)
8830                 return false;
8831
8832         switch (off) {
8833         case offsetof(struct sk_reuseport_md, data):
8834                 info->reg_type = PTR_TO_PACKET;
8835                 return size == sizeof(__u64);
8836
8837         case offsetof(struct sk_reuseport_md, data_end):
8838                 info->reg_type = PTR_TO_PACKET_END;
8839                 return size == sizeof(__u64);
8840
8841         case offsetof(struct sk_reuseport_md, hash):
8842                 return size == size_default;
8843
8844         /* Fields that allow narrowing */
8845         case bpf_ctx_range(struct sk_reuseport_md, eth_protocol):
8846                 if (size < FIELD_SIZEOF(struct sk_buff, protocol))
8847                         return false;
8848                 /* fall through */
8849         case bpf_ctx_range(struct sk_reuseport_md, ip_protocol):
8850         case bpf_ctx_range(struct sk_reuseport_md, bind_inany):
8851         case bpf_ctx_range(struct sk_reuseport_md, len):
8852                 bpf_ctx_record_field_size(info, size_default);
8853                 return bpf_ctx_narrow_access_ok(off, size, size_default);
8854
8855         default:
8856                 return false;
8857         }
8858 }
8859
8860 #define SK_REUSEPORT_LOAD_FIELD(F) ({                                   \
8861         *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8862                               si->dst_reg, si->src_reg,                 \
8863                               bpf_target_off(struct sk_reuseport_kern, F, \
8864                                              FIELD_SIZEOF(struct sk_reuseport_kern, F), \
8865                                              target_size));             \
8866         })
8867
8868 #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD)                          \
8869         SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern,           \
8870                                     struct sk_buff,                     \
8871                                     skb,                                \
8872                                     SKB_FIELD)
8873
8874 #define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \
8875         SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern,  \
8876                                              struct sock,               \
8877                                              sk,                        \
8878                                              SK_FIELD, BPF_SIZE, EXTRA_OFF)
8879
8880 static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type,
8881                                            const struct bpf_insn *si,
8882                                            struct bpf_insn *insn_buf,
8883                                            struct bpf_prog *prog,
8884                                            u32 *target_size)
8885 {
8886         struct bpf_insn *insn = insn_buf;
8887
8888         switch (si->off) {
8889         case offsetof(struct sk_reuseport_md, data):
8890                 SK_REUSEPORT_LOAD_SKB_FIELD(data);
8891                 break;
8892
8893         case offsetof(struct sk_reuseport_md, len):
8894                 SK_REUSEPORT_LOAD_SKB_FIELD(len);
8895                 break;
8896
8897         case offsetof(struct sk_reuseport_md, eth_protocol):
8898                 SK_REUSEPORT_LOAD_SKB_FIELD(protocol);
8899                 break;
8900
8901         case offsetof(struct sk_reuseport_md, ip_protocol):
8902                 BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE);
8903                 SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset,
8904                                                     BPF_W, 0);
8905                 *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
8906                 *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg,
8907                                         SK_FL_PROTO_SHIFT);
8908                 /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian
8909                  * aware.  No further narrowing or masking is needed.
8910                  */
8911                 *target_size = 1;
8912                 break;
8913
8914         case offsetof(struct sk_reuseport_md, data_end):
8915                 SK_REUSEPORT_LOAD_FIELD(data_end);
8916                 break;
8917
8918         case offsetof(struct sk_reuseport_md, hash):
8919                 SK_REUSEPORT_LOAD_FIELD(hash);
8920                 break;
8921
8922         case offsetof(struct sk_reuseport_md, bind_inany):
8923                 SK_REUSEPORT_LOAD_FIELD(bind_inany);
8924                 break;
8925         }
8926
8927         return insn - insn_buf;
8928 }
8929
8930 const struct bpf_verifier_ops sk_reuseport_verifier_ops = {
8931         .get_func_proto         = sk_reuseport_func_proto,
8932         .is_valid_access        = sk_reuseport_is_valid_access,
8933         .convert_ctx_access     = sk_reuseport_convert_ctx_access,
8934 };
8935
8936 const struct bpf_prog_ops sk_reuseport_prog_ops = {
8937 };
8938 #endif /* CONFIG_INET */