Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Linux Socket Filter - Kernel level socket filtering | |
3 | * | |
bd4cf0ed AS |
4 | * Based on the design of the Berkeley Packet Filter. The new |
5 | * internal format has been designed by PLUMgrid: | |
1da177e4 | 6 | * |
bd4cf0ed AS |
7 | * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com |
8 | * | |
9 | * Authors: | |
10 | * | |
11 | * Jay Schulist <jschlst@samba.org> | |
12 | * Alexei Starovoitov <ast@plumgrid.com> | |
13 | * Daniel Borkmann <dborkman@redhat.com> | |
1da177e4 LT |
14 | * |
15 | * This program is free software; you can redistribute it and/or | |
16 | * modify it under the terms of the GNU General Public License | |
17 | * as published by the Free Software Foundation; either version | |
18 | * 2 of the License, or (at your option) any later version. | |
19 | * | |
20 | * Andi Kleen - Fix a few bad bugs and races. | |
4df95ff4 | 21 | * Kris Katterjohn - Added many additional checks in bpf_check_classic() |
1da177e4 LT |
22 | */ |
23 | ||
24 | #include <linux/module.h> | |
25 | #include <linux/types.h> | |
1da177e4 LT |
26 | #include <linux/mm.h> |
27 | #include <linux/fcntl.h> | |
28 | #include <linux/socket.h> | |
91b8270f | 29 | #include <linux/sock_diag.h> |
1da177e4 LT |
30 | #include <linux/in.h> |
31 | #include <linux/inet.h> | |
32 | #include <linux/netdevice.h> | |
33 | #include <linux/if_packet.h> | |
c491680f | 34 | #include <linux/if_arp.h> |
5a0e3ad6 | 35 | #include <linux/gfp.h> |
d74bad4e | 36 | #include <net/inet_common.h> |
1da177e4 LT |
37 | #include <net/ip.h> |
38 | #include <net/protocol.h> | |
4738c1db | 39 | #include <net/netlink.h> |
1da177e4 | 40 | #include <linux/skbuff.h> |
604326b4 | 41 | #include <linux/skmsg.h> |
1da177e4 | 42 | #include <net/sock.h> |
10b89ee4 | 43 | #include <net/flow_dissector.h> |
1da177e4 LT |
44 | #include <linux/errno.h> |
45 | #include <linux/timer.h> | |
7c0f6ba6 | 46 | #include <linux/uaccess.h> |
40daafc8 | 47 | #include <asm/unaligned.h> |
d66f2b91 | 48 | #include <asm/cmpxchg.h> |
1da177e4 | 49 | #include <linux/filter.h> |
86e4ca66 | 50 | #include <linux/ratelimit.h> |
46b325c7 | 51 | #include <linux/seccomp.h> |
f3335031 | 52 | #include <linux/if_vlan.h> |
89aa0758 | 53 | #include <linux/bpf.h> |
d691f9e8 | 54 | #include <net/sch_generic.h> |
8d20aabe | 55 | #include <net/cls_cgroup.h> |
d3aa45ce | 56 | #include <net/dst_metadata.h> |
c46646d0 | 57 | #include <net/dst.h> |
538950a1 | 58 | #include <net/sock_reuseport.h> |
b1d9fc41 | 59 | #include <net/busy_poll.h> |
8c4b4c7e | 60 | #include <net/tcp.h> |
12bed760 | 61 | #include <net/xfrm.h> |
6acc9b43 | 62 | #include <net/udp.h> |
5acaee0a | 63 | #include <linux/bpf_trace.h> |
02671e23 | 64 | #include <net/xdp_sock.h> |
87f5fc7e | 65 | #include <linux/inetdevice.h> |
6acc9b43 JS |
66 | #include <net/inet_hashtables.h> |
67 | #include <net/inet6_hashtables.h> | |
87f5fc7e DA |
68 | #include <net/ip_fib.h> |
69 | #include <net/flow.h> | |
70 | #include <net/arp.h> | |
fe94cc29 | 71 | #include <net/ipv6.h> |
6acc9b43 | 72 | #include <net/net_namespace.h> |
fe94cc29 MX |
73 | #include <linux/seg6_local.h> |
74 | #include <net/seg6.h> | |
75 | #include <net/seg6_local.h> | |
1da177e4 | 76 | |
43db6d65 | 77 | /** |
f4979fce | 78 | * sk_filter_trim_cap - run a packet through a socket filter |
43db6d65 SH |
79 | * @sk: sock associated with &sk_buff |
80 | * @skb: buffer to filter | |
f4979fce | 81 | * @cap: limit on how short the eBPF program may trim the packet |
43db6d65 | 82 | * |
ff936a04 AS |
83 | * Run the eBPF program and then cut skb->data to correct size returned by |
84 | * the program. If pkt_len is 0 we toss packet. If skb->len is smaller | |
43db6d65 | 85 | * than pkt_len we keep whole skb->data. This is the socket level |
ff936a04 | 86 | * wrapper to BPF_PROG_RUN. It returns 0 if the packet should |
43db6d65 SH |
87 | * be accepted or -EPERM if the packet should be tossed. |
88 | * | |
89 | */ | |
f4979fce | 90 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) |
43db6d65 SH |
91 | { |
92 | int err; | |
93 | struct sk_filter *filter; | |
94 | ||
c93bdd0e MG |
95 | /* |
96 | * If the skb was allocated from pfmemalloc reserves, only | |
97 | * allow SOCK_MEMALLOC sockets to use it as this socket is | |
98 | * helping free memory | |
99 | */ | |
8fe809a9 ED |
100 | if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { |
101 | NET_INC_STATS(sock_net(sk), LINUX_MIB_PFMEMALLOCDROP); | |
c93bdd0e | 102 | return -ENOMEM; |
8fe809a9 | 103 | } |
c11cd3a6 DM |
104 | err = BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb); |
105 | if (err) | |
106 | return err; | |
107 | ||
43db6d65 SH |
108 | err = security_sock_rcv_skb(sk, skb); |
109 | if (err) | |
110 | return err; | |
111 | ||
80f8f102 ED |
112 | rcu_read_lock(); |
113 | filter = rcu_dereference(sk->sk_filter); | |
43db6d65 | 114 | if (filter) { |
8f917bba WB |
115 | struct sock *save_sk = skb->sk; |
116 | unsigned int pkt_len; | |
117 | ||
118 | skb->sk = sk; | |
119 | pkt_len = bpf_prog_run_save_cb(filter->prog, skb); | |
8f917bba | 120 | skb->sk = save_sk; |
d1f496fd | 121 | err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; |
43db6d65 | 122 | } |
80f8f102 | 123 | rcu_read_unlock(); |
43db6d65 SH |
124 | |
125 | return err; | |
126 | } | |
f4979fce | 127 | EXPORT_SYMBOL(sk_filter_trim_cap); |
43db6d65 | 128 | |
b390134c | 129 | BPF_CALL_1(bpf_skb_get_pay_offset, struct sk_buff *, skb) |
bd4cf0ed | 130 | { |
f3694e00 | 131 | return skb_get_poff(skb); |
bd4cf0ed AS |
132 | } |
133 | ||
b390134c | 134 | BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) |
bd4cf0ed | 135 | { |
bd4cf0ed AS |
136 | struct nlattr *nla; |
137 | ||
138 | if (skb_is_nonlinear(skb)) | |
139 | return 0; | |
140 | ||
05ab8f26 MK |
141 | if (skb->len < sizeof(struct nlattr)) |
142 | return 0; | |
143 | ||
30743837 | 144 | if (a > skb->len - sizeof(struct nlattr)) |
bd4cf0ed AS |
145 | return 0; |
146 | ||
30743837 | 147 | nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); |
bd4cf0ed AS |
148 | if (nla) |
149 | return (void *) nla - (void *) skb->data; | |
150 | ||
151 | return 0; | |
152 | } | |
153 | ||
b390134c | 154 | BPF_CALL_3(bpf_skb_get_nlattr_nest, struct sk_buff *, skb, u32, a, u32, x) |
bd4cf0ed | 155 | { |
bd4cf0ed AS |
156 | struct nlattr *nla; |
157 | ||
158 | if (skb_is_nonlinear(skb)) | |
159 | return 0; | |
160 | ||
05ab8f26 MK |
161 | if (skb->len < sizeof(struct nlattr)) |
162 | return 0; | |
163 | ||
30743837 | 164 | if (a > skb->len - sizeof(struct nlattr)) |
bd4cf0ed AS |
165 | return 0; |
166 | ||
30743837 DB |
167 | nla = (struct nlattr *) &skb->data[a]; |
168 | if (nla->nla_len > skb->len - a) | |
bd4cf0ed AS |
169 | return 0; |
170 | ||
30743837 | 171 | nla = nla_find_nested(nla, x); |
bd4cf0ed AS |
172 | if (nla) |
173 | return (void *) nla - (void *) skb->data; | |
174 | ||
175 | return 0; | |
176 | } | |
177 | ||
e0cea7ce DB |
178 | BPF_CALL_4(bpf_skb_load_helper_8, const struct sk_buff *, skb, const void *, |
179 | data, int, headlen, int, offset) | |
180 | { | |
181 | u8 tmp, *ptr; | |
182 | const int len = sizeof(tmp); | |
183 | ||
184 | if (offset >= 0) { | |
185 | if (headlen - offset >= len) | |
186 | return *(u8 *)(data + offset); | |
187 | if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) | |
188 | return tmp; | |
189 | } else { | |
190 | ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); | |
191 | if (likely(ptr)) | |
192 | return *(u8 *)ptr; | |
193 | } | |
194 | ||
195 | return -EFAULT; | |
196 | } | |
197 | ||
198 | BPF_CALL_2(bpf_skb_load_helper_8_no_cache, const struct sk_buff *, skb, | |
199 | int, offset) | |
200 | { | |
201 | return ____bpf_skb_load_helper_8(skb, skb->data, skb->len - skb->data_len, | |
202 | offset); | |
203 | } | |
204 | ||
205 | BPF_CALL_4(bpf_skb_load_helper_16, const struct sk_buff *, skb, const void *, | |
206 | data, int, headlen, int, offset) | |
207 | { | |
208 | u16 tmp, *ptr; | |
209 | const int len = sizeof(tmp); | |
210 | ||
211 | if (offset >= 0) { | |
212 | if (headlen - offset >= len) | |
213 | return get_unaligned_be16(data + offset); | |
214 | if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) | |
215 | return be16_to_cpu(tmp); | |
216 | } else { | |
217 | ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); | |
218 | if (likely(ptr)) | |
219 | return get_unaligned_be16(ptr); | |
220 | } | |
221 | ||
222 | return -EFAULT; | |
223 | } | |
224 | ||
225 | BPF_CALL_2(bpf_skb_load_helper_16_no_cache, const struct sk_buff *, skb, | |
226 | int, offset) | |
227 | { | |
228 | return ____bpf_skb_load_helper_16(skb, skb->data, skb->len - skb->data_len, | |
229 | offset); | |
230 | } | |
231 | ||
232 | BPF_CALL_4(bpf_skb_load_helper_32, const struct sk_buff *, skb, const void *, | |
233 | data, int, headlen, int, offset) | |
234 | { | |
235 | u32 tmp, *ptr; | |
236 | const int len = sizeof(tmp); | |
237 | ||
238 | if (likely(offset >= 0)) { | |
239 | if (headlen - offset >= len) | |
240 | return get_unaligned_be32(data + offset); | |
241 | if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) | |
242 | return be32_to_cpu(tmp); | |
243 | } else { | |
244 | ptr = bpf_internal_load_pointer_neg_helper(skb, offset, len); | |
245 | if (likely(ptr)) | |
246 | return get_unaligned_be32(ptr); | |
247 | } | |
248 | ||
249 | return -EFAULT; | |
250 | } | |
251 | ||
252 | BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb, | |
253 | int, offset) | |
254 | { | |
255 | return ____bpf_skb_load_helper_32(skb, skb->data, skb->len - skb->data_len, | |
256 | offset); | |
257 | } | |
258 | ||
b390134c | 259 | BPF_CALL_0(bpf_get_raw_cpu_id) |
bd4cf0ed AS |
260 | { |
261 | return raw_smp_processor_id(); | |
262 | } | |
263 | ||
80b48c44 | 264 | static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = { |
b390134c | 265 | .func = bpf_get_raw_cpu_id, |
80b48c44 DB |
266 | .gpl_only = false, |
267 | .ret_type = RET_INTEGER, | |
268 | }; | |
269 | ||
9bac3d6d AS |
270 | static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, |
271 | struct bpf_insn *insn_buf) | |
272 | { | |
273 | struct bpf_insn *insn = insn_buf; | |
274 | ||
275 | switch (skb_field) { | |
276 | case SKF_AD_MARK: | |
277 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | |
278 | ||
279 | *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, | |
280 | offsetof(struct sk_buff, mark)); | |
281 | break; | |
282 | ||
283 | case SKF_AD_PKTTYPE: | |
284 | *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); | |
285 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); | |
286 | #ifdef __BIG_ENDIAN_BITFIELD | |
287 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); | |
288 | #endif | |
289 | break; | |
290 | ||
291 | case SKF_AD_QUEUE: | |
292 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); | |
293 | ||
294 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
295 | offsetof(struct sk_buff, queue_mapping)); | |
296 | break; | |
c2497395 | 297 | |
c2497395 AS |
298 | case SKF_AD_VLAN_TAG: |
299 | case SKF_AD_VLAN_TAG_PRESENT: | |
300 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); | |
301 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); | |
302 | ||
303 | /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ | |
304 | *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, | |
305 | offsetof(struct sk_buff, vlan_tci)); | |
306 | if (skb_field == SKF_AD_VLAN_TAG) { | |
307 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, | |
308 | ~VLAN_TAG_PRESENT); | |
309 | } else { | |
310 | /* dst_reg >>= 12 */ | |
311 | *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 12); | |
312 | /* dst_reg &= 1 */ | |
313 | *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); | |
314 | } | |
315 | break; | |
9bac3d6d AS |
316 | } |
317 | ||
318 | return insn - insn_buf; | |
319 | } | |
320 | ||
bd4cf0ed | 321 | static bool convert_bpf_extensions(struct sock_filter *fp, |
2695fb55 | 322 | struct bpf_insn **insnp) |
bd4cf0ed | 323 | { |
2695fb55 | 324 | struct bpf_insn *insn = *insnp; |
9bac3d6d | 325 | u32 cnt; |
bd4cf0ed AS |
326 | |
327 | switch (fp->k) { | |
328 | case SKF_AD_OFF + SKF_AD_PROTOCOL: | |
0b8c707d DB |
329 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); |
330 | ||
331 | /* A = *(u16 *) (CTX + offsetof(protocol)) */ | |
332 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, | |
333 | offsetof(struct sk_buff, protocol)); | |
334 | /* A = ntohs(A) [emitting a nop or swap16] */ | |
335 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); | |
bd4cf0ed AS |
336 | break; |
337 | ||
338 | case SKF_AD_OFF + SKF_AD_PKTTYPE: | |
9bac3d6d AS |
339 | cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); |
340 | insn += cnt - 1; | |
bd4cf0ed AS |
341 | break; |
342 | ||
343 | case SKF_AD_OFF + SKF_AD_IFINDEX: | |
344 | case SKF_AD_OFF + SKF_AD_HATYPE: | |
bd4cf0ed AS |
345 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); |
346 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); | |
f8f6d679 | 347 | |
f035a515 | 348 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
f8f6d679 DB |
349 | BPF_REG_TMP, BPF_REG_CTX, |
350 | offsetof(struct sk_buff, dev)); | |
351 | /* if (tmp != 0) goto pc + 1 */ | |
352 | *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); | |
353 | *insn++ = BPF_EXIT_INSN(); | |
354 | if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) | |
355 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, | |
356 | offsetof(struct net_device, ifindex)); | |
357 | else | |
358 | *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, | |
359 | offsetof(struct net_device, type)); | |
bd4cf0ed AS |
360 | break; |
361 | ||
362 | case SKF_AD_OFF + SKF_AD_MARK: | |
9bac3d6d AS |
363 | cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); |
364 | insn += cnt - 1; | |
bd4cf0ed AS |
365 | break; |
366 | ||
367 | case SKF_AD_OFF + SKF_AD_RXHASH: | |
368 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); | |
369 | ||
9739eef1 AS |
370 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, |
371 | offsetof(struct sk_buff, hash)); | |
bd4cf0ed AS |
372 | break; |
373 | ||
374 | case SKF_AD_OFF + SKF_AD_QUEUE: | |
9bac3d6d AS |
375 | cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); |
376 | insn += cnt - 1; | |
bd4cf0ed AS |
377 | break; |
378 | ||
379 | case SKF_AD_OFF + SKF_AD_VLAN_TAG: | |
c2497395 AS |
380 | cnt = convert_skb_access(SKF_AD_VLAN_TAG, |
381 | BPF_REG_A, BPF_REG_CTX, insn); | |
382 | insn += cnt - 1; | |
383 | break; | |
bd4cf0ed | 384 | |
c2497395 AS |
385 | case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: |
386 | cnt = convert_skb_access(SKF_AD_VLAN_TAG_PRESENT, | |
387 | BPF_REG_A, BPF_REG_CTX, insn); | |
388 | insn += cnt - 1; | |
bd4cf0ed AS |
389 | break; |
390 | ||
27cd5452 MS |
391 | case SKF_AD_OFF + SKF_AD_VLAN_TPID: |
392 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); | |
393 | ||
394 | /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ | |
395 | *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, | |
396 | offsetof(struct sk_buff, vlan_proto)); | |
397 | /* A = ntohs(A) [emitting a nop or swap16] */ | |
398 | *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); | |
399 | break; | |
400 | ||
bd4cf0ed AS |
401 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: |
402 | case SKF_AD_OFF + SKF_AD_NLATTR: | |
403 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: | |
404 | case SKF_AD_OFF + SKF_AD_CPU: | |
4cd3675e | 405 | case SKF_AD_OFF + SKF_AD_RANDOM: |
e430f34e | 406 | /* arg1 = CTX */ |
f8f6d679 | 407 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); |
bd4cf0ed | 408 | /* arg2 = A */ |
f8f6d679 | 409 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); |
bd4cf0ed | 410 | /* arg3 = X */ |
f8f6d679 | 411 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); |
e430f34e | 412 | /* Emit call(arg1=CTX, arg2=A, arg3=X) */ |
bd4cf0ed AS |
413 | switch (fp->k) { |
414 | case SKF_AD_OFF + SKF_AD_PAY_OFFSET: | |
b390134c | 415 | *insn = BPF_EMIT_CALL(bpf_skb_get_pay_offset); |
bd4cf0ed AS |
416 | break; |
417 | case SKF_AD_OFF + SKF_AD_NLATTR: | |
b390134c | 418 | *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); |
bd4cf0ed AS |
419 | break; |
420 | case SKF_AD_OFF + SKF_AD_NLATTR_NEST: | |
b390134c | 421 | *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr_nest); |
bd4cf0ed AS |
422 | break; |
423 | case SKF_AD_OFF + SKF_AD_CPU: | |
b390134c | 424 | *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); |
bd4cf0ed | 425 | break; |
4cd3675e | 426 | case SKF_AD_OFF + SKF_AD_RANDOM: |
3ad00405 DB |
427 | *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); |
428 | bpf_user_rnd_init_once(); | |
4cd3675e | 429 | break; |
bd4cf0ed AS |
430 | } |
431 | break; | |
432 | ||
433 | case SKF_AD_OFF + SKF_AD_ALU_XOR_X: | |
9739eef1 AS |
434 | /* A ^= X */ |
435 | *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); | |
bd4cf0ed AS |
436 | break; |
437 | ||
438 | default: | |
439 | /* This is just a dummy call to avoid letting the compiler | |
440 | * evict __bpf_call_base() as an optimization. Placed here | |
441 | * where no-one bothers. | |
442 | */ | |
443 | BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); | |
444 | return false; | |
445 | } | |
446 | ||
447 | *insnp = insn; | |
448 | return true; | |
449 | } | |
450 | ||
e0cea7ce DB |
451 | static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) |
452 | { | |
453 | const bool unaligned_ok = IS_BUILTIN(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS); | |
454 | int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); | |
455 | bool endian = BPF_SIZE(fp->code) == BPF_H || | |
456 | BPF_SIZE(fp->code) == BPF_W; | |
457 | bool indirect = BPF_MODE(fp->code) == BPF_IND; | |
458 | const int ip_align = NET_IP_ALIGN; | |
459 | struct bpf_insn *insn = *insnp; | |
460 | int offset = fp->k; | |
461 | ||
462 | if (!indirect && | |
463 | ((unaligned_ok && offset >= 0) || | |
464 | (!unaligned_ok && offset >= 0 && | |
465 | offset + ip_align >= 0 && | |
466 | offset + ip_align % size == 0))) { | |
59ee4129 DB |
467 | bool ldx_off_ok = offset <= S16_MAX; |
468 | ||
e0cea7ce DB |
469 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); |
470 | *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); | |
59ee4129 DB |
471 | *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, |
472 | size, 2 + endian + (!ldx_off_ok * 2)); | |
473 | if (ldx_off_ok) { | |
474 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, | |
475 | BPF_REG_D, offset); | |
476 | } else { | |
477 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); | |
478 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); | |
479 | *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, | |
480 | BPF_REG_TMP, 0); | |
481 | } | |
e0cea7ce DB |
482 | if (endian) |
483 | *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); | |
484 | *insn++ = BPF_JMP_A(8); | |
485 | } | |
486 | ||
487 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); | |
488 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); | |
489 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); | |
490 | if (!indirect) { | |
491 | *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); | |
492 | } else { | |
493 | *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); | |
494 | if (fp->k) | |
495 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); | |
496 | } | |
497 | ||
498 | switch (BPF_SIZE(fp->code)) { | |
499 | case BPF_B: | |
500 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8); | |
501 | break; | |
502 | case BPF_H: | |
503 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16); | |
504 | break; | |
505 | case BPF_W: | |
506 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32); | |
507 | break; | |
508 | default: | |
509 | return false; | |
510 | } | |
511 | ||
512 | *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); | |
513 | *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); | |
514 | *insn = BPF_EXIT_INSN(); | |
515 | ||
516 | *insnp = insn; | |
517 | return true; | |
518 | } | |
519 | ||
bd4cf0ed | 520 | /** |
8fb575ca | 521 | * bpf_convert_filter - convert filter program |
bd4cf0ed AS |
522 | * @prog: the user passed filter program |
523 | * @len: the length of the user passed filter program | |
50bbfed9 | 524 | * @new_prog: allocated 'struct bpf_prog' or NULL |
bd4cf0ed | 525 | * @new_len: pointer to store length of converted program |
e0cea7ce | 526 | * @seen_ld_abs: bool whether we've seen ld_abs/ind |
bd4cf0ed | 527 | * |
1f504ec9 TK |
528 | * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' |
529 | * style extended BPF (eBPF). | |
bd4cf0ed AS |
530 | * Conversion workflow: |
531 | * | |
532 | * 1) First pass for calculating the new program length: | |
e0cea7ce | 533 | * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) |
bd4cf0ed AS |
534 | * |
535 | * 2) 2nd pass to remap in two passes: 1st pass finds new | |
536 | * jump offsets, 2nd pass remapping: | |
e0cea7ce | 537 | * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) |
bd4cf0ed | 538 | */ |
d9e12f42 | 539 | static int bpf_convert_filter(struct sock_filter *prog, int len, |
e0cea7ce DB |
540 | struct bpf_prog *new_prog, int *new_len, |
541 | bool *seen_ld_abs) | |
bd4cf0ed | 542 | { |
50bbfed9 AS |
543 | int new_flen = 0, pass = 0, target, i, stack_off; |
544 | struct bpf_insn *new_insn, *first_insn = NULL; | |
bd4cf0ed AS |
545 | struct sock_filter *fp; |
546 | int *addrs = NULL; | |
547 | u8 bpf_src; | |
548 | ||
549 | BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); | |
30743837 | 550 | BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); |
bd4cf0ed | 551 | |
6f9a093b | 552 | if (len <= 0 || len > BPF_MAXINSNS) |
bd4cf0ed AS |
553 | return -EINVAL; |
554 | ||
555 | if (new_prog) { | |
50bbfed9 | 556 | first_insn = new_prog->insnsi; |
658da937 DB |
557 | addrs = kcalloc(len, sizeof(*addrs), |
558 | GFP_KERNEL | __GFP_NOWARN); | |
bd4cf0ed AS |
559 | if (!addrs) |
560 | return -ENOMEM; | |
561 | } | |
562 | ||
563 | do_pass: | |
50bbfed9 | 564 | new_insn = first_insn; |
bd4cf0ed AS |
565 | fp = prog; |
566 | ||
8b614aeb | 567 | /* Classic BPF related prologue emission. */ |
50bbfed9 | 568 | if (new_prog) { |
8b614aeb DB |
569 | /* Classic BPF expects A and X to be reset first. These need |
570 | * to be guaranteed to be the first two instructions. | |
571 | */ | |
1d621674 DB |
572 | *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); |
573 | *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); | |
8b614aeb DB |
574 | |
575 | /* All programs must keep CTX in callee saved BPF_REG_CTX. | |
576 | * In eBPF case it's done by the compiler, here we need to | |
577 | * do this ourself. Initial CTX is present in BPF_REG_ARG1. | |
578 | */ | |
579 | *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); | |
e0cea7ce DB |
580 | if (*seen_ld_abs) { |
581 | /* For packet access in classic BPF, cache skb->data | |
582 | * in callee-saved BPF R8 and skb->len - skb->data_len | |
583 | * (headlen) in BPF R9. Since classic BPF is read-only | |
584 | * on CTX, we only need to cache it once. | |
585 | */ | |
586 | *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), | |
587 | BPF_REG_D, BPF_REG_CTX, | |
588 | offsetof(struct sk_buff, data)); | |
589 | *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, | |
590 | offsetof(struct sk_buff, len)); | |
591 | *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, | |
592 | offsetof(struct sk_buff, data_len)); | |
593 | *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); | |
594 | } | |
8b614aeb DB |
595 | } else { |
596 | new_insn += 3; | |
597 | } | |
bd4cf0ed AS |
598 | |
599 | for (i = 0; i < len; fp++, i++) { | |
e0cea7ce | 600 | struct bpf_insn tmp_insns[32] = { }; |
2695fb55 | 601 | struct bpf_insn *insn = tmp_insns; |
bd4cf0ed AS |
602 | |
603 | if (addrs) | |
50bbfed9 | 604 | addrs[i] = new_insn - first_insn; |
bd4cf0ed AS |
605 | |
606 | switch (fp->code) { | |
607 | /* All arithmetic insns and skb loads map as-is. */ | |
608 | case BPF_ALU | BPF_ADD | BPF_X: | |
609 | case BPF_ALU | BPF_ADD | BPF_K: | |
610 | case BPF_ALU | BPF_SUB | BPF_X: | |
611 | case BPF_ALU | BPF_SUB | BPF_K: | |
612 | case BPF_ALU | BPF_AND | BPF_X: | |
613 | case BPF_ALU | BPF_AND | BPF_K: | |
614 | case BPF_ALU | BPF_OR | BPF_X: | |
615 | case BPF_ALU | BPF_OR | BPF_K: | |
616 | case BPF_ALU | BPF_LSH | BPF_X: | |
617 | case BPF_ALU | BPF_LSH | BPF_K: | |
618 | case BPF_ALU | BPF_RSH | BPF_X: | |
619 | case BPF_ALU | BPF_RSH | BPF_K: | |
620 | case BPF_ALU | BPF_XOR | BPF_X: | |
621 | case BPF_ALU | BPF_XOR | BPF_K: | |
622 | case BPF_ALU | BPF_MUL | BPF_X: | |
623 | case BPF_ALU | BPF_MUL | BPF_K: | |
624 | case BPF_ALU | BPF_DIV | BPF_X: | |
625 | case BPF_ALU | BPF_DIV | BPF_K: | |
626 | case BPF_ALU | BPF_MOD | BPF_X: | |
627 | case BPF_ALU | BPF_MOD | BPF_K: | |
628 | case BPF_ALU | BPF_NEG: | |
629 | case BPF_LD | BPF_ABS | BPF_W: | |
630 | case BPF_LD | BPF_ABS | BPF_H: | |
631 | case BPF_LD | BPF_ABS | BPF_B: | |
632 | case BPF_LD | BPF_IND | BPF_W: | |
633 | case BPF_LD | BPF_IND | BPF_H: | |
634 | case BPF_LD | BPF_IND | BPF_B: | |
635 | /* Check for overloaded BPF extension and | |
636 | * directly convert it if found, otherwise | |
637 | * just move on with mapping. | |
638 | */ | |
639 | if (BPF_CLASS(fp->code) == BPF_LD && | |
640 | BPF_MODE(fp->code) == BPF_ABS && | |
641 | convert_bpf_extensions(fp, &insn)) | |
642 | break; | |
e0cea7ce DB |
643 | if (BPF_CLASS(fp->code) == BPF_LD && |
644 | convert_bpf_ld_abs(fp, &insn)) { | |
645 | *seen_ld_abs = true; | |
646 | break; | |
647 | } | |
bd4cf0ed | 648 | |
68fda450 | 649 | if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || |
f6b1b3bf | 650 | fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { |
68fda450 | 651 | *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); |
f6b1b3bf DB |
652 | /* Error with exception code on div/mod by 0. |
653 | * For cBPF programs, this was always return 0. | |
654 | */ | |
655 | *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); | |
656 | *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); | |
657 | *insn++ = BPF_EXIT_INSN(); | |
658 | } | |
68fda450 | 659 | |
f8f6d679 | 660 | *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); |
bd4cf0ed AS |
661 | break; |
662 | ||
f8f6d679 DB |
663 | /* Jump transformation cannot use BPF block macros |
664 | * everywhere as offset calculation and target updates | |
665 | * require a bit more work than the rest, i.e. jump | |
666 | * opcodes map as-is, but offsets need adjustment. | |
667 | */ | |
668 | ||
669 | #define BPF_EMIT_JMP \ | |
bd4cf0ed | 670 | do { \ |
050fad7c DB |
671 | const s32 off_min = S16_MIN, off_max = S16_MAX; \ |
672 | s32 off; \ | |
673 | \ | |
bd4cf0ed AS |
674 | if (target >= len || target < 0) \ |
675 | goto err; \ | |
050fad7c | 676 | off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ |
bd4cf0ed | 677 | /* Adjust pc relative offset for 2nd or 3rd insn. */ \ |
050fad7c DB |
678 | off -= insn - tmp_insns; \ |
679 | /* Reject anything not fitting into insn->off. */ \ | |
680 | if (off < off_min || off > off_max) \ | |
681 | goto err; \ | |
682 | insn->off = off; \ | |
bd4cf0ed AS |
683 | } while (0) |
684 | ||
f8f6d679 DB |
685 | case BPF_JMP | BPF_JA: |
686 | target = i + fp->k + 1; | |
687 | insn->code = fp->code; | |
688 | BPF_EMIT_JMP; | |
bd4cf0ed AS |
689 | break; |
690 | ||
691 | case BPF_JMP | BPF_JEQ | BPF_K: | |
692 | case BPF_JMP | BPF_JEQ | BPF_X: | |
693 | case BPF_JMP | BPF_JSET | BPF_K: | |
694 | case BPF_JMP | BPF_JSET | BPF_X: | |
695 | case BPF_JMP | BPF_JGT | BPF_K: | |
696 | case BPF_JMP | BPF_JGT | BPF_X: | |
697 | case BPF_JMP | BPF_JGE | BPF_K: | |
698 | case BPF_JMP | BPF_JGE | BPF_X: | |
699 | if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { | |
700 | /* BPF immediates are signed, zero extend | |
701 | * immediate into tmp register and use it | |
702 | * in compare insn. | |
703 | */ | |
f8f6d679 | 704 | *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); |
bd4cf0ed | 705 | |
e430f34e AS |
706 | insn->dst_reg = BPF_REG_A; |
707 | insn->src_reg = BPF_REG_TMP; | |
bd4cf0ed AS |
708 | bpf_src = BPF_X; |
709 | } else { | |
e430f34e | 710 | insn->dst_reg = BPF_REG_A; |
bd4cf0ed AS |
711 | insn->imm = fp->k; |
712 | bpf_src = BPF_SRC(fp->code); | |
19539ce7 | 713 | insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; |
1da177e4 | 714 | } |
bd4cf0ed AS |
715 | |
716 | /* Common case where 'jump_false' is next insn. */ | |
717 | if (fp->jf == 0) { | |
718 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; | |
719 | target = i + fp->jt + 1; | |
f8f6d679 | 720 | BPF_EMIT_JMP; |
bd4cf0ed | 721 | break; |
1da177e4 | 722 | } |
bd4cf0ed | 723 | |
92b31a9a DB |
724 | /* Convert some jumps when 'jump_true' is next insn. */ |
725 | if (fp->jt == 0) { | |
726 | switch (BPF_OP(fp->code)) { | |
727 | case BPF_JEQ: | |
728 | insn->code = BPF_JMP | BPF_JNE | bpf_src; | |
729 | break; | |
730 | case BPF_JGT: | |
731 | insn->code = BPF_JMP | BPF_JLE | bpf_src; | |
732 | break; | |
733 | case BPF_JGE: | |
734 | insn->code = BPF_JMP | BPF_JLT | bpf_src; | |
735 | break; | |
736 | default: | |
737 | goto jmp_rest; | |
738 | } | |
739 | ||
bd4cf0ed | 740 | target = i + fp->jf + 1; |
f8f6d679 | 741 | BPF_EMIT_JMP; |
bd4cf0ed | 742 | break; |
0b05b2a4 | 743 | } |
92b31a9a | 744 | jmp_rest: |
bd4cf0ed AS |
745 | /* Other jumps are mapped into two insns: Jxx and JA. */ |
746 | target = i + fp->jt + 1; | |
747 | insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; | |
f8f6d679 | 748 | BPF_EMIT_JMP; |
bd4cf0ed AS |
749 | insn++; |
750 | ||
751 | insn->code = BPF_JMP | BPF_JA; | |
752 | target = i + fp->jf + 1; | |
f8f6d679 | 753 | BPF_EMIT_JMP; |
bd4cf0ed AS |
754 | break; |
755 | ||
756 | /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ | |
e0cea7ce DB |
757 | case BPF_LDX | BPF_MSH | BPF_B: { |
758 | struct sock_filter tmp = { | |
759 | .code = BPF_LD | BPF_ABS | BPF_B, | |
760 | .k = fp->k, | |
761 | }; | |
762 | ||
763 | *seen_ld_abs = true; | |
764 | ||
765 | /* X = A */ | |
766 | *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); | |
1268e253 | 767 | /* A = BPF_R0 = *(u8 *) (skb->data + K) */ |
e0cea7ce DB |
768 | convert_bpf_ld_abs(&tmp, &insn); |
769 | insn++; | |
9739eef1 | 770 | /* A &= 0xf */ |
f8f6d679 | 771 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); |
9739eef1 | 772 | /* A <<= 2 */ |
f8f6d679 | 773 | *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); |
e0cea7ce DB |
774 | /* tmp = X */ |
775 | *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); | |
9739eef1 | 776 | /* X = A */ |
f8f6d679 | 777 | *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
9739eef1 | 778 | /* A = tmp */ |
f8f6d679 | 779 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); |
bd4cf0ed | 780 | break; |
e0cea7ce | 781 | } |
6205b9cf DB |
782 | /* RET_K is remaped into 2 insns. RET_A case doesn't need an |
783 | * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. | |
784 | */ | |
bd4cf0ed AS |
785 | case BPF_RET | BPF_A: |
786 | case BPF_RET | BPF_K: | |
6205b9cf DB |
787 | if (BPF_RVAL(fp->code) == BPF_K) |
788 | *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, | |
789 | 0, fp->k); | |
9739eef1 | 790 | *insn = BPF_EXIT_INSN(); |
bd4cf0ed AS |
791 | break; |
792 | ||
793 | /* Store to stack. */ | |
794 | case BPF_ST: | |
795 | case BPF_STX: | |
50bbfed9 | 796 | stack_off = fp->k * 4 + 4; |
f8f6d679 DB |
797 | *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == |
798 | BPF_ST ? BPF_REG_A : BPF_REG_X, | |
50bbfed9 AS |
799 | -stack_off); |
800 | /* check_load_and_stores() verifies that classic BPF can | |
801 | * load from stack only after write, so tracking | |
802 | * stack_depth for ST|STX insns is enough | |
803 | */ | |
804 | if (new_prog && new_prog->aux->stack_depth < stack_off) | |
805 | new_prog->aux->stack_depth = stack_off; | |
bd4cf0ed AS |
806 | break; |
807 | ||
808 | /* Load from stack. */ | |
809 | case BPF_LD | BPF_MEM: | |
810 | case BPF_LDX | BPF_MEM: | |
50bbfed9 | 811 | stack_off = fp->k * 4 + 4; |
f8f6d679 DB |
812 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
813 | BPF_REG_A : BPF_REG_X, BPF_REG_FP, | |
50bbfed9 | 814 | -stack_off); |
bd4cf0ed AS |
815 | break; |
816 | ||
817 | /* A = K or X = K */ | |
818 | case BPF_LD | BPF_IMM: | |
819 | case BPF_LDX | BPF_IMM: | |
f8f6d679 DB |
820 | *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? |
821 | BPF_REG_A : BPF_REG_X, fp->k); | |
bd4cf0ed AS |
822 | break; |
823 | ||
824 | /* X = A */ | |
825 | case BPF_MISC | BPF_TAX: | |
f8f6d679 | 826 | *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); |
bd4cf0ed AS |
827 | break; |
828 | ||
829 | /* A = X */ | |
830 | case BPF_MISC | BPF_TXA: | |
f8f6d679 | 831 | *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); |
bd4cf0ed AS |
832 | break; |
833 | ||
834 | /* A = skb->len or X = skb->len */ | |
835 | case BPF_LD | BPF_W | BPF_LEN: | |
836 | case BPF_LDX | BPF_W | BPF_LEN: | |
f8f6d679 DB |
837 | *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? |
838 | BPF_REG_A : BPF_REG_X, BPF_REG_CTX, | |
839 | offsetof(struct sk_buff, len)); | |
bd4cf0ed AS |
840 | break; |
841 | ||
f8f6d679 | 842 | /* Access seccomp_data fields. */ |
bd4cf0ed | 843 | case BPF_LDX | BPF_ABS | BPF_W: |
9739eef1 AS |
844 | /* A = *(u32 *) (ctx + K) */ |
845 | *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); | |
bd4cf0ed AS |
846 | break; |
847 | ||
ca9f1fd2 | 848 | /* Unknown instruction. */ |
1da177e4 | 849 | default: |
bd4cf0ed | 850 | goto err; |
1da177e4 | 851 | } |
bd4cf0ed AS |
852 | |
853 | insn++; | |
854 | if (new_prog) | |
855 | memcpy(new_insn, tmp_insns, | |
856 | sizeof(*insn) * (insn - tmp_insns)); | |
bd4cf0ed | 857 | new_insn += insn - tmp_insns; |
1da177e4 LT |
858 | } |
859 | ||
bd4cf0ed AS |
860 | if (!new_prog) { |
861 | /* Only calculating new length. */ | |
50bbfed9 | 862 | *new_len = new_insn - first_insn; |
e0cea7ce DB |
863 | if (*seen_ld_abs) |
864 | *new_len += 4; /* Prologue bits. */ | |
bd4cf0ed AS |
865 | return 0; |
866 | } | |
867 | ||
868 | pass++; | |
50bbfed9 AS |
869 | if (new_flen != new_insn - first_insn) { |
870 | new_flen = new_insn - first_insn; | |
bd4cf0ed AS |
871 | if (pass > 2) |
872 | goto err; | |
bd4cf0ed AS |
873 | goto do_pass; |
874 | } | |
875 | ||
876 | kfree(addrs); | |
877 | BUG_ON(*new_len != new_flen); | |
1da177e4 | 878 | return 0; |
bd4cf0ed AS |
879 | err: |
880 | kfree(addrs); | |
881 | return -EINVAL; | |
1da177e4 LT |
882 | } |
883 | ||
bd4cf0ed | 884 | /* Security: |
bd4cf0ed | 885 | * |
2d5311e4 | 886 | * As we dont want to clear mem[] array for each packet going through |
8ea6e345 | 887 | * __bpf_prog_run(), we check that filter loaded by user never try to read |
2d5311e4 | 888 | * a cell if not previously written, and we check all branches to be sure |
25985edc | 889 | * a malicious user doesn't try to abuse us. |
2d5311e4 | 890 | */ |
ec31a05c | 891 | static int check_load_and_stores(const struct sock_filter *filter, int flen) |
2d5311e4 | 892 | { |
34805931 | 893 | u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ |
2d5311e4 ED |
894 | int pc, ret = 0; |
895 | ||
896 | BUILD_BUG_ON(BPF_MEMWORDS > 16); | |
34805931 | 897 | |
99e72a0f | 898 | masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); |
2d5311e4 ED |
899 | if (!masks) |
900 | return -ENOMEM; | |
34805931 | 901 | |
2d5311e4 ED |
902 | memset(masks, 0xff, flen * sizeof(*masks)); |
903 | ||
904 | for (pc = 0; pc < flen; pc++) { | |
905 | memvalid &= masks[pc]; | |
906 | ||
907 | switch (filter[pc].code) { | |
34805931 DB |
908 | case BPF_ST: |
909 | case BPF_STX: | |
2d5311e4 ED |
910 | memvalid |= (1 << filter[pc].k); |
911 | break; | |
34805931 DB |
912 | case BPF_LD | BPF_MEM: |
913 | case BPF_LDX | BPF_MEM: | |
2d5311e4 ED |
914 | if (!(memvalid & (1 << filter[pc].k))) { |
915 | ret = -EINVAL; | |
916 | goto error; | |
917 | } | |
918 | break; | |
34805931 DB |
919 | case BPF_JMP | BPF_JA: |
920 | /* A jump must set masks on target */ | |
2d5311e4 ED |
921 | masks[pc + 1 + filter[pc].k] &= memvalid; |
922 | memvalid = ~0; | |
923 | break; | |
34805931 DB |
924 | case BPF_JMP | BPF_JEQ | BPF_K: |
925 | case BPF_JMP | BPF_JEQ | BPF_X: | |
926 | case BPF_JMP | BPF_JGE | BPF_K: | |
927 | case BPF_JMP | BPF_JGE | BPF_X: | |
928 | case BPF_JMP | BPF_JGT | BPF_K: | |
929 | case BPF_JMP | BPF_JGT | BPF_X: | |
930 | case BPF_JMP | BPF_JSET | BPF_K: | |
931 | case BPF_JMP | BPF_JSET | BPF_X: | |
932 | /* A jump must set masks on targets */ | |
2d5311e4 ED |
933 | masks[pc + 1 + filter[pc].jt] &= memvalid; |
934 | masks[pc + 1 + filter[pc].jf] &= memvalid; | |
935 | memvalid = ~0; | |
936 | break; | |
937 | } | |
938 | } | |
939 | error: | |
940 | kfree(masks); | |
941 | return ret; | |
942 | } | |
943 | ||
34805931 DB |
944 | static bool chk_code_allowed(u16 code_to_probe) |
945 | { | |
946 | static const bool codes[] = { | |
947 | /* 32 bit ALU operations */ | |
948 | [BPF_ALU | BPF_ADD | BPF_K] = true, | |
949 | [BPF_ALU | BPF_ADD | BPF_X] = true, | |
950 | [BPF_ALU | BPF_SUB | BPF_K] = true, | |
951 | [BPF_ALU | BPF_SUB | BPF_X] = true, | |
952 | [BPF_ALU | BPF_MUL | BPF_K] = true, | |
953 | [BPF_ALU | BPF_MUL | BPF_X] = true, | |
954 | [BPF_ALU | BPF_DIV | BPF_K] = true, | |
955 | [BPF_ALU | BPF_DIV | BPF_X] = true, | |
956 | [BPF_ALU | BPF_MOD | BPF_K] = true, | |
957 | [BPF_ALU | BPF_MOD | BPF_X] = true, | |
958 | [BPF_ALU | BPF_AND | BPF_K] = true, | |
959 | [BPF_ALU | BPF_AND | BPF_X] = true, | |
960 | [BPF_ALU | BPF_OR | BPF_K] = true, | |
961 | [BPF_ALU | BPF_OR | BPF_X] = true, | |
962 | [BPF_ALU | BPF_XOR | BPF_K] = true, | |
963 | [BPF_ALU | BPF_XOR | BPF_X] = true, | |
964 | [BPF_ALU | BPF_LSH | BPF_K] = true, | |
965 | [BPF_ALU | BPF_LSH | BPF_X] = true, | |
966 | [BPF_ALU | BPF_RSH | BPF_K] = true, | |
967 | [BPF_ALU | BPF_RSH | BPF_X] = true, | |
968 | [BPF_ALU | BPF_NEG] = true, | |
969 | /* Load instructions */ | |
970 | [BPF_LD | BPF_W | BPF_ABS] = true, | |
971 | [BPF_LD | BPF_H | BPF_ABS] = true, | |
972 | [BPF_LD | BPF_B | BPF_ABS] = true, | |
973 | [BPF_LD | BPF_W | BPF_LEN] = true, | |
974 | [BPF_LD | BPF_W | BPF_IND] = true, | |
975 | [BPF_LD | BPF_H | BPF_IND] = true, | |
976 | [BPF_LD | BPF_B | BPF_IND] = true, | |
977 | [BPF_LD | BPF_IMM] = true, | |
978 | [BPF_LD | BPF_MEM] = true, | |
979 | [BPF_LDX | BPF_W | BPF_LEN] = true, | |
980 | [BPF_LDX | BPF_B | BPF_MSH] = true, | |
981 | [BPF_LDX | BPF_IMM] = true, | |
982 | [BPF_LDX | BPF_MEM] = true, | |
983 | /* Store instructions */ | |
984 | [BPF_ST] = true, | |
985 | [BPF_STX] = true, | |
986 | /* Misc instructions */ | |
987 | [BPF_MISC | BPF_TAX] = true, | |
988 | [BPF_MISC | BPF_TXA] = true, | |
989 | /* Return instructions */ | |
990 | [BPF_RET | BPF_K] = true, | |
991 | [BPF_RET | BPF_A] = true, | |
992 | /* Jump instructions */ | |
993 | [BPF_JMP | BPF_JA] = true, | |
994 | [BPF_JMP | BPF_JEQ | BPF_K] = true, | |
995 | [BPF_JMP | BPF_JEQ | BPF_X] = true, | |
996 | [BPF_JMP | BPF_JGE | BPF_K] = true, | |
997 | [BPF_JMP | BPF_JGE | BPF_X] = true, | |
998 | [BPF_JMP | BPF_JGT | BPF_K] = true, | |
999 | [BPF_JMP | BPF_JGT | BPF_X] = true, | |
1000 | [BPF_JMP | BPF_JSET | BPF_K] = true, | |
1001 | [BPF_JMP | BPF_JSET | BPF_X] = true, | |
1002 | }; | |
1003 | ||
1004 | if (code_to_probe >= ARRAY_SIZE(codes)) | |
1005 | return false; | |
1006 | ||
1007 | return codes[code_to_probe]; | |
1008 | } | |
1009 | ||
f7bd9e36 DB |
1010 | static bool bpf_check_basics_ok(const struct sock_filter *filter, |
1011 | unsigned int flen) | |
1012 | { | |
1013 | if (filter == NULL) | |
1014 | return false; | |
1015 | if (flen == 0 || flen > BPF_MAXINSNS) | |
1016 | return false; | |
1017 | ||
1018 | return true; | |
1019 | } | |
1020 | ||
1da177e4 | 1021 | /** |
4df95ff4 | 1022 | * bpf_check_classic - verify socket filter code |
1da177e4 LT |
1023 | * @filter: filter to verify |
1024 | * @flen: length of filter | |
1025 | * | |
1026 | * Check the user's filter code. If we let some ugly | |
1027 | * filter code slip through kaboom! The filter must contain | |
93699863 KK |
1028 | * no references or jumps that are out of range, no illegal |
1029 | * instructions, and must end with a RET instruction. | |
1da177e4 | 1030 | * |
7b11f69f KK |
1031 | * All jumps are forward as they are not signed. |
1032 | * | |
1033 | * Returns 0 if the rule set is legal or -EINVAL if not. | |
1da177e4 | 1034 | */ |
d9e12f42 NS |
1035 | static int bpf_check_classic(const struct sock_filter *filter, |
1036 | unsigned int flen) | |
1da177e4 | 1037 | { |
aa1113d9 | 1038 | bool anc_found; |
34805931 | 1039 | int pc; |
1da177e4 | 1040 | |
34805931 | 1041 | /* Check the filter code now */ |
1da177e4 | 1042 | for (pc = 0; pc < flen; pc++) { |
ec31a05c | 1043 | const struct sock_filter *ftest = &filter[pc]; |
93699863 | 1044 | |
34805931 DB |
1045 | /* May we actually operate on this code? */ |
1046 | if (!chk_code_allowed(ftest->code)) | |
cba328fc | 1047 | return -EINVAL; |
34805931 | 1048 | |
93699863 | 1049 | /* Some instructions need special checks */ |
34805931 DB |
1050 | switch (ftest->code) { |
1051 | case BPF_ALU | BPF_DIV | BPF_K: | |
1052 | case BPF_ALU | BPF_MOD | BPF_K: | |
1053 | /* Check for division by zero */ | |
b6069a95 ED |
1054 | if (ftest->k == 0) |
1055 | return -EINVAL; | |
1056 | break; | |
229394e8 RV |
1057 | case BPF_ALU | BPF_LSH | BPF_K: |
1058 | case BPF_ALU | BPF_RSH | BPF_K: | |
1059 | if (ftest->k >= 32) | |
1060 | return -EINVAL; | |
1061 | break; | |
34805931 DB |
1062 | case BPF_LD | BPF_MEM: |
1063 | case BPF_LDX | BPF_MEM: | |
1064 | case BPF_ST: | |
1065 | case BPF_STX: | |
1066 | /* Check for invalid memory addresses */ | |
93699863 KK |
1067 | if (ftest->k >= BPF_MEMWORDS) |
1068 | return -EINVAL; | |
1069 | break; | |
34805931 DB |
1070 | case BPF_JMP | BPF_JA: |
1071 | /* Note, the large ftest->k might cause loops. | |
93699863 KK |
1072 | * Compare this with conditional jumps below, |
1073 | * where offsets are limited. --ANK (981016) | |
1074 | */ | |
34805931 | 1075 | if (ftest->k >= (unsigned int)(flen - pc - 1)) |
93699863 | 1076 | return -EINVAL; |
01f2f3f6 | 1077 | break; |
34805931 DB |
1078 | case BPF_JMP | BPF_JEQ | BPF_K: |
1079 | case BPF_JMP | BPF_JEQ | BPF_X: | |
1080 | case BPF_JMP | BPF_JGE | BPF_K: | |
1081 | case BPF_JMP | BPF_JGE | BPF_X: | |
1082 | case BPF_JMP | BPF_JGT | BPF_K: | |
1083 | case BPF_JMP | BPF_JGT | BPF_X: | |
1084 | case BPF_JMP | BPF_JSET | BPF_K: | |
1085 | case BPF_JMP | BPF_JSET | BPF_X: | |
1086 | /* Both conditionals must be safe */ | |
e35bedf3 | 1087 | if (pc + ftest->jt + 1 >= flen || |
93699863 KK |
1088 | pc + ftest->jf + 1 >= flen) |
1089 | return -EINVAL; | |
cba328fc | 1090 | break; |
34805931 DB |
1091 | case BPF_LD | BPF_W | BPF_ABS: |
1092 | case BPF_LD | BPF_H | BPF_ABS: | |
1093 | case BPF_LD | BPF_B | BPF_ABS: | |
aa1113d9 | 1094 | anc_found = false; |
34805931 DB |
1095 | if (bpf_anc_helper(ftest) & BPF_ANC) |
1096 | anc_found = true; | |
1097 | /* Ancillary operation unknown or unsupported */ | |
aa1113d9 DB |
1098 | if (anc_found == false && ftest->k >= SKF_AD_OFF) |
1099 | return -EINVAL; | |
01f2f3f6 HPP |
1100 | } |
1101 | } | |
93699863 | 1102 | |
34805931 | 1103 | /* Last instruction must be a RET code */ |
01f2f3f6 | 1104 | switch (filter[flen - 1].code) { |
34805931 DB |
1105 | case BPF_RET | BPF_K: |
1106 | case BPF_RET | BPF_A: | |
2d5311e4 | 1107 | return check_load_and_stores(filter, flen); |
cba328fc | 1108 | } |
34805931 | 1109 | |
cba328fc | 1110 | return -EINVAL; |
1da177e4 LT |
1111 | } |
1112 | ||
7ae457c1 AS |
1113 | static int bpf_prog_store_orig_filter(struct bpf_prog *fp, |
1114 | const struct sock_fprog *fprog) | |
a3ea269b | 1115 | { |
009937e7 | 1116 | unsigned int fsize = bpf_classic_proglen(fprog); |
a3ea269b DB |
1117 | struct sock_fprog_kern *fkprog; |
1118 | ||
1119 | fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); | |
1120 | if (!fp->orig_prog) | |
1121 | return -ENOMEM; | |
1122 | ||
1123 | fkprog = fp->orig_prog; | |
1124 | fkprog->len = fprog->len; | |
658da937 DB |
1125 | |
1126 | fkprog->filter = kmemdup(fp->insns, fsize, | |
1127 | GFP_KERNEL | __GFP_NOWARN); | |
a3ea269b DB |
1128 | if (!fkprog->filter) { |
1129 | kfree(fp->orig_prog); | |
1130 | return -ENOMEM; | |
1131 | } | |
1132 | ||
1133 | return 0; | |
1134 | } | |
1135 | ||
7ae457c1 | 1136 | static void bpf_release_orig_filter(struct bpf_prog *fp) |
a3ea269b DB |
1137 | { |
1138 | struct sock_fprog_kern *fprog = fp->orig_prog; | |
1139 | ||
1140 | if (fprog) { | |
1141 | kfree(fprog->filter); | |
1142 | kfree(fprog); | |
1143 | } | |
1144 | } | |
1145 | ||
7ae457c1 AS |
1146 | static void __bpf_prog_release(struct bpf_prog *prog) |
1147 | { | |
24701ece | 1148 | if (prog->type == BPF_PROG_TYPE_SOCKET_FILTER) { |
89aa0758 AS |
1149 | bpf_prog_put(prog); |
1150 | } else { | |
1151 | bpf_release_orig_filter(prog); | |
1152 | bpf_prog_free(prog); | |
1153 | } | |
7ae457c1 AS |
1154 | } |
1155 | ||
34c5bd66 PN |
1156 | static void __sk_filter_release(struct sk_filter *fp) |
1157 | { | |
7ae457c1 AS |
1158 | __bpf_prog_release(fp->prog); |
1159 | kfree(fp); | |
34c5bd66 PN |
1160 | } |
1161 | ||
47e958ea | 1162 | /** |
46bcf14f | 1163 | * sk_filter_release_rcu - Release a socket filter by rcu_head |
47e958ea PE |
1164 | * @rcu: rcu_head that contains the sk_filter to free |
1165 | */ | |
fbc907f0 | 1166 | static void sk_filter_release_rcu(struct rcu_head *rcu) |
47e958ea PE |
1167 | { |
1168 | struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); | |
1169 | ||
34c5bd66 | 1170 | __sk_filter_release(fp); |
47e958ea | 1171 | } |
fbc907f0 DB |
1172 | |
1173 | /** | |
1174 | * sk_filter_release - release a socket filter | |
1175 | * @fp: filter to remove | |
1176 | * | |
1177 | * Remove a filter from a socket and release its resources. | |
1178 | */ | |
1179 | static void sk_filter_release(struct sk_filter *fp) | |
1180 | { | |
4c355cdf | 1181 | if (refcount_dec_and_test(&fp->refcnt)) |
fbc907f0 DB |
1182 | call_rcu(&fp->rcu, sk_filter_release_rcu); |
1183 | } | |
1184 | ||
1185 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) | |
1186 | { | |
7ae457c1 | 1187 | u32 filter_size = bpf_prog_size(fp->prog->len); |
fbc907f0 | 1188 | |
278571ba AS |
1189 | atomic_sub(filter_size, &sk->sk_omem_alloc); |
1190 | sk_filter_release(fp); | |
fbc907f0 | 1191 | } |
47e958ea | 1192 | |
278571ba AS |
1193 | /* try to charge the socket memory if there is space available |
1194 | * return true on success | |
1195 | */ | |
4c355cdf | 1196 | static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
bd4cf0ed | 1197 | { |
7ae457c1 | 1198 | u32 filter_size = bpf_prog_size(fp->prog->len); |
278571ba AS |
1199 | |
1200 | /* same check as in sock_kmalloc() */ | |
1201 | if (filter_size <= sysctl_optmem_max && | |
1202 | atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { | |
278571ba AS |
1203 | atomic_add(filter_size, &sk->sk_omem_alloc); |
1204 | return true; | |
bd4cf0ed | 1205 | } |
278571ba | 1206 | return false; |
bd4cf0ed AS |
1207 | } |
1208 | ||
4c355cdf RE |
1209 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) |
1210 | { | |
eefca20e ED |
1211 | if (!refcount_inc_not_zero(&fp->refcnt)) |
1212 | return false; | |
1213 | ||
1214 | if (!__sk_filter_charge(sk, fp)) { | |
1215 | sk_filter_release(fp); | |
1216 | return false; | |
1217 | } | |
1218 | return true; | |
4c355cdf RE |
1219 | } |
1220 | ||
7ae457c1 | 1221 | static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) |
bd4cf0ed AS |
1222 | { |
1223 | struct sock_filter *old_prog; | |
7ae457c1 | 1224 | struct bpf_prog *old_fp; |
34805931 | 1225 | int err, new_len, old_len = fp->len; |
e0cea7ce | 1226 | bool seen_ld_abs = false; |
bd4cf0ed AS |
1227 | |
1228 | /* We are free to overwrite insns et al right here as it | |
1229 | * won't be used at this point in time anymore internally | |
1230 | * after the migration to the internal BPF instruction | |
1231 | * representation. | |
1232 | */ | |
1233 | BUILD_BUG_ON(sizeof(struct sock_filter) != | |
2695fb55 | 1234 | sizeof(struct bpf_insn)); |
bd4cf0ed | 1235 | |
bd4cf0ed AS |
1236 | /* Conversion cannot happen on overlapping memory areas, |
1237 | * so we need to keep the user BPF around until the 2nd | |
1238 | * pass. At this time, the user BPF is stored in fp->insns. | |
1239 | */ | |
1240 | old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), | |
658da937 | 1241 | GFP_KERNEL | __GFP_NOWARN); |
bd4cf0ed AS |
1242 | if (!old_prog) { |
1243 | err = -ENOMEM; | |
1244 | goto out_err; | |
1245 | } | |
1246 | ||
1247 | /* 1st pass: calculate the new program length. */ | |
e0cea7ce DB |
1248 | err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, |
1249 | &seen_ld_abs); | |
bd4cf0ed AS |
1250 | if (err) |
1251 | goto out_err_free; | |
1252 | ||
1253 | /* Expand fp for appending the new filter representation. */ | |
1254 | old_fp = fp; | |
60a3b225 | 1255 | fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); |
bd4cf0ed AS |
1256 | if (!fp) { |
1257 | /* The old_fp is still around in case we couldn't | |
1258 | * allocate new memory, so uncharge on that one. | |
1259 | */ | |
1260 | fp = old_fp; | |
1261 | err = -ENOMEM; | |
1262 | goto out_err_free; | |
1263 | } | |
1264 | ||
bd4cf0ed AS |
1265 | fp->len = new_len; |
1266 | ||
2695fb55 | 1267 | /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ |
e0cea7ce DB |
1268 | err = bpf_convert_filter(old_prog, old_len, fp, &new_len, |
1269 | &seen_ld_abs); | |
bd4cf0ed | 1270 | if (err) |
8fb575ca | 1271 | /* 2nd bpf_convert_filter() can fail only if it fails |
bd4cf0ed AS |
1272 | * to allocate memory, remapping must succeed. Note, |
1273 | * that at this time old_fp has already been released | |
278571ba | 1274 | * by krealloc(). |
bd4cf0ed AS |
1275 | */ |
1276 | goto out_err_free; | |
1277 | ||
d1c55ab5 | 1278 | fp = bpf_prog_select_runtime(fp, &err); |
290af866 AS |
1279 | if (err) |
1280 | goto out_err_free; | |
5fe821a9 | 1281 | |
bd4cf0ed AS |
1282 | kfree(old_prog); |
1283 | return fp; | |
1284 | ||
1285 | out_err_free: | |
1286 | kfree(old_prog); | |
1287 | out_err: | |
7ae457c1 | 1288 | __bpf_prog_release(fp); |
bd4cf0ed AS |
1289 | return ERR_PTR(err); |
1290 | } | |
1291 | ||
ac67eb2c DB |
1292 | static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, |
1293 | bpf_aux_classic_check_t trans) | |
302d6637 JP |
1294 | { |
1295 | int err; | |
1296 | ||
bd4cf0ed | 1297 | fp->bpf_func = NULL; |
a91263d5 | 1298 | fp->jited = 0; |
302d6637 | 1299 | |
4df95ff4 | 1300 | err = bpf_check_classic(fp->insns, fp->len); |
418c96ac | 1301 | if (err) { |
7ae457c1 | 1302 | __bpf_prog_release(fp); |
bd4cf0ed | 1303 | return ERR_PTR(err); |
418c96ac | 1304 | } |
302d6637 | 1305 | |
4ae92bc7 NS |
1306 | /* There might be additional checks and transformations |
1307 | * needed on classic filters, f.e. in case of seccomp. | |
1308 | */ | |
1309 | if (trans) { | |
1310 | err = trans(fp->insns, fp->len); | |
1311 | if (err) { | |
1312 | __bpf_prog_release(fp); | |
1313 | return ERR_PTR(err); | |
1314 | } | |
1315 | } | |
1316 | ||
bd4cf0ed AS |
1317 | /* Probe if we can JIT compile the filter and if so, do |
1318 | * the compilation of the filter. | |
1319 | */ | |
302d6637 | 1320 | bpf_jit_compile(fp); |
bd4cf0ed AS |
1321 | |
1322 | /* JIT compiler couldn't process this filter, so do the | |
1323 | * internal BPF translation for the optimized interpreter. | |
1324 | */ | |
5fe821a9 | 1325 | if (!fp->jited) |
7ae457c1 | 1326 | fp = bpf_migrate_filter(fp); |
bd4cf0ed AS |
1327 | |
1328 | return fp; | |
302d6637 JP |
1329 | } |
1330 | ||
1331 | /** | |
7ae457c1 | 1332 | * bpf_prog_create - create an unattached filter |
c6c4b97c | 1333 | * @pfp: the unattached filter that is created |
677a9fd3 | 1334 | * @fprog: the filter program |
302d6637 | 1335 | * |
c6c4b97c | 1336 | * Create a filter independent of any socket. We first run some |
302d6637 JP |
1337 | * sanity checks on it to make sure it does not explode on us later. |
1338 | * If an error occurs or there is insufficient memory for the filter | |
1339 | * a negative errno code is returned. On success the return is zero. | |
1340 | */ | |
7ae457c1 | 1341 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) |
302d6637 | 1342 | { |
009937e7 | 1343 | unsigned int fsize = bpf_classic_proglen(fprog); |
7ae457c1 | 1344 | struct bpf_prog *fp; |
302d6637 JP |
1345 | |
1346 | /* Make sure new filter is there and in the right amounts. */ | |
f7bd9e36 | 1347 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
302d6637 JP |
1348 | return -EINVAL; |
1349 | ||
60a3b225 | 1350 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
302d6637 JP |
1351 | if (!fp) |
1352 | return -ENOMEM; | |
a3ea269b | 1353 | |
302d6637 JP |
1354 | memcpy(fp->insns, fprog->filter, fsize); |
1355 | ||
302d6637 | 1356 | fp->len = fprog->len; |
a3ea269b DB |
1357 | /* Since unattached filters are not copied back to user |
1358 | * space through sk_get_filter(), we do not need to hold | |
1359 | * a copy here, and can spare us the work. | |
1360 | */ | |
1361 | fp->orig_prog = NULL; | |
302d6637 | 1362 | |
7ae457c1 | 1363 | /* bpf_prepare_filter() already takes care of freeing |
bd4cf0ed AS |
1364 | * memory in case something goes wrong. |
1365 | */ | |
4ae92bc7 | 1366 | fp = bpf_prepare_filter(fp, NULL); |
bd4cf0ed AS |
1367 | if (IS_ERR(fp)) |
1368 | return PTR_ERR(fp); | |
302d6637 JP |
1369 | |
1370 | *pfp = fp; | |
1371 | return 0; | |
302d6637 | 1372 | } |
7ae457c1 | 1373 | EXPORT_SYMBOL_GPL(bpf_prog_create); |
302d6637 | 1374 | |
ac67eb2c DB |
1375 | /** |
1376 | * bpf_prog_create_from_user - create an unattached filter from user buffer | |
1377 | * @pfp: the unattached filter that is created | |
1378 | * @fprog: the filter program | |
1379 | * @trans: post-classic verifier transformation handler | |
bab18991 | 1380 | * @save_orig: save classic BPF program |
ac67eb2c DB |
1381 | * |
1382 | * This function effectively does the same as bpf_prog_create(), only | |
1383 | * that it builds up its insns buffer from user space provided buffer. | |
1384 | * It also allows for passing a bpf_aux_classic_check_t handler. | |
1385 | */ | |
1386 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, | |
bab18991 | 1387 | bpf_aux_classic_check_t trans, bool save_orig) |
ac67eb2c DB |
1388 | { |
1389 | unsigned int fsize = bpf_classic_proglen(fprog); | |
1390 | struct bpf_prog *fp; | |
bab18991 | 1391 | int err; |
ac67eb2c DB |
1392 | |
1393 | /* Make sure new filter is there and in the right amounts. */ | |
f7bd9e36 | 1394 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
ac67eb2c DB |
1395 | return -EINVAL; |
1396 | ||
1397 | fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); | |
1398 | if (!fp) | |
1399 | return -ENOMEM; | |
1400 | ||
1401 | if (copy_from_user(fp->insns, fprog->filter, fsize)) { | |
1402 | __bpf_prog_free(fp); | |
1403 | return -EFAULT; | |
1404 | } | |
1405 | ||
1406 | fp->len = fprog->len; | |
ac67eb2c DB |
1407 | fp->orig_prog = NULL; |
1408 | ||
bab18991 DB |
1409 | if (save_orig) { |
1410 | err = bpf_prog_store_orig_filter(fp, fprog); | |
1411 | if (err) { | |
1412 | __bpf_prog_free(fp); | |
1413 | return -ENOMEM; | |
1414 | } | |
1415 | } | |
1416 | ||
ac67eb2c DB |
1417 | /* bpf_prepare_filter() already takes care of freeing |
1418 | * memory in case something goes wrong. | |
1419 | */ | |
1420 | fp = bpf_prepare_filter(fp, trans); | |
1421 | if (IS_ERR(fp)) | |
1422 | return PTR_ERR(fp); | |
1423 | ||
1424 | *pfp = fp; | |
1425 | return 0; | |
1426 | } | |
2ea273d7 | 1427 | EXPORT_SYMBOL_GPL(bpf_prog_create_from_user); |
ac67eb2c | 1428 | |
7ae457c1 | 1429 | void bpf_prog_destroy(struct bpf_prog *fp) |
302d6637 | 1430 | { |
7ae457c1 | 1431 | __bpf_prog_release(fp); |
302d6637 | 1432 | } |
7ae457c1 | 1433 | EXPORT_SYMBOL_GPL(bpf_prog_destroy); |
302d6637 | 1434 | |
8ced425e | 1435 | static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) |
49b31e57 DB |
1436 | { |
1437 | struct sk_filter *fp, *old_fp; | |
1438 | ||
1439 | fp = kmalloc(sizeof(*fp), GFP_KERNEL); | |
1440 | if (!fp) | |
1441 | return -ENOMEM; | |
1442 | ||
1443 | fp->prog = prog; | |
49b31e57 | 1444 | |
4c355cdf | 1445 | if (!__sk_filter_charge(sk, fp)) { |
49b31e57 DB |
1446 | kfree(fp); |
1447 | return -ENOMEM; | |
1448 | } | |
4c355cdf | 1449 | refcount_set(&fp->refcnt, 1); |
49b31e57 | 1450 | |
8ced425e HFS |
1451 | old_fp = rcu_dereference_protected(sk->sk_filter, |
1452 | lockdep_sock_is_held(sk)); | |
49b31e57 | 1453 | rcu_assign_pointer(sk->sk_filter, fp); |
8ced425e | 1454 | |
49b31e57 DB |
1455 | if (old_fp) |
1456 | sk_filter_uncharge(sk, old_fp); | |
1457 | ||
1458 | return 0; | |
1459 | } | |
1460 | ||
538950a1 CG |
1461 | static |
1462 | struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) | |
1da177e4 | 1463 | { |
009937e7 | 1464 | unsigned int fsize = bpf_classic_proglen(fprog); |
7ae457c1 | 1465 | struct bpf_prog *prog; |
1da177e4 LT |
1466 | int err; |
1467 | ||
d59577b6 | 1468 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
538950a1 | 1469 | return ERR_PTR(-EPERM); |
d59577b6 | 1470 | |
1da177e4 | 1471 | /* Make sure new filter is there and in the right amounts. */ |
f7bd9e36 | 1472 | if (!bpf_check_basics_ok(fprog->filter, fprog->len)) |
538950a1 | 1473 | return ERR_PTR(-EINVAL); |
1da177e4 | 1474 | |
f7bd9e36 | 1475 | prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); |
7ae457c1 | 1476 | if (!prog) |
538950a1 | 1477 | return ERR_PTR(-ENOMEM); |
a3ea269b | 1478 | |
7ae457c1 | 1479 | if (copy_from_user(prog->insns, fprog->filter, fsize)) { |
c0d1379a | 1480 | __bpf_prog_free(prog); |
538950a1 | 1481 | return ERR_PTR(-EFAULT); |
1da177e4 LT |
1482 | } |
1483 | ||
7ae457c1 | 1484 | prog->len = fprog->len; |
1da177e4 | 1485 | |
7ae457c1 | 1486 | err = bpf_prog_store_orig_filter(prog, fprog); |
a3ea269b | 1487 | if (err) { |
c0d1379a | 1488 | __bpf_prog_free(prog); |
538950a1 | 1489 | return ERR_PTR(-ENOMEM); |
a3ea269b DB |
1490 | } |
1491 | ||
7ae457c1 | 1492 | /* bpf_prepare_filter() already takes care of freeing |
bd4cf0ed AS |
1493 | * memory in case something goes wrong. |
1494 | */ | |
538950a1 CG |
1495 | return bpf_prepare_filter(prog, NULL); |
1496 | } | |
1497 | ||
1498 | /** | |
1499 | * sk_attach_filter - attach a socket filter | |
1500 | * @fprog: the filter program | |
1501 | * @sk: the socket to use | |
1502 | * | |
1503 | * Attach the user's filter code. We first run some sanity checks on | |
1504 | * it to make sure it does not explode on us later. If an error | |
1505 | * occurs or there is insufficient memory for the filter a negative | |
1506 | * errno code is returned. On success the return is zero. | |
1507 | */ | |
8ced425e | 1508 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
538950a1 CG |
1509 | { |
1510 | struct bpf_prog *prog = __get_filter(fprog, sk); | |
1511 | int err; | |
1512 | ||
7ae457c1 AS |
1513 | if (IS_ERR(prog)) |
1514 | return PTR_ERR(prog); | |
1515 | ||
8ced425e | 1516 | err = __sk_attach_prog(prog, sk); |
49b31e57 | 1517 | if (err < 0) { |
7ae457c1 | 1518 | __bpf_prog_release(prog); |
49b31e57 | 1519 | return err; |
278571ba AS |
1520 | } |
1521 | ||
d3904b73 | 1522 | return 0; |
1da177e4 | 1523 | } |
8ced425e | 1524 | EXPORT_SYMBOL_GPL(sk_attach_filter); |
1da177e4 | 1525 | |
538950a1 | 1526 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk) |
89aa0758 | 1527 | { |
538950a1 | 1528 | struct bpf_prog *prog = __get_filter(fprog, sk); |
49b31e57 | 1529 | int err; |
89aa0758 | 1530 | |
538950a1 CG |
1531 | if (IS_ERR(prog)) |
1532 | return PTR_ERR(prog); | |
1533 | ||
8217ca65 MKL |
1534 | if (bpf_prog_size(prog->len) > sysctl_optmem_max) |
1535 | err = -ENOMEM; | |
1536 | else | |
1537 | err = reuseport_attach_prog(sk, prog); | |
1538 | ||
1539 | if (err) | |
538950a1 | 1540 | __bpf_prog_release(prog); |
538950a1 | 1541 | |
8217ca65 | 1542 | return err; |
538950a1 CG |
1543 | } |
1544 | ||
1545 | static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) | |
1546 | { | |
89aa0758 | 1547 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
538950a1 | 1548 | return ERR_PTR(-EPERM); |
89aa0758 | 1549 | |
113214be | 1550 | return bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); |
538950a1 CG |
1551 | } |
1552 | ||
1553 | int sk_attach_bpf(u32 ufd, struct sock *sk) | |
1554 | { | |
1555 | struct bpf_prog *prog = __get_bpf(ufd, sk); | |
1556 | int err; | |
1557 | ||
1558 | if (IS_ERR(prog)) | |
1559 | return PTR_ERR(prog); | |
1560 | ||
8ced425e | 1561 | err = __sk_attach_prog(prog, sk); |
49b31e57 | 1562 | if (err < 0) { |
89aa0758 | 1563 | bpf_prog_put(prog); |
49b31e57 | 1564 | return err; |
89aa0758 AS |
1565 | } |
1566 | ||
89aa0758 AS |
1567 | return 0; |
1568 | } | |
1569 | ||
538950a1 CG |
1570 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk) |
1571 | { | |
8217ca65 | 1572 | struct bpf_prog *prog; |
538950a1 CG |
1573 | int err; |
1574 | ||
8217ca65 MKL |
1575 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
1576 | return -EPERM; | |
1577 | ||
1578 | prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SOCKET_FILTER); | |
1579 | if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL) | |
1580 | prog = bpf_prog_get_type(ufd, BPF_PROG_TYPE_SK_REUSEPORT); | |
538950a1 CG |
1581 | if (IS_ERR(prog)) |
1582 | return PTR_ERR(prog); | |
1583 | ||
8217ca65 MKL |
1584 | if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) { |
1585 | /* Like other non BPF_PROG_TYPE_SOCKET_FILTER | |
1586 | * bpf prog (e.g. sockmap). It depends on the | |
1587 | * limitation imposed by bpf_prog_load(). | |
1588 | * Hence, sysctl_optmem_max is not checked. | |
1589 | */ | |
1590 | if ((sk->sk_type != SOCK_STREAM && | |
1591 | sk->sk_type != SOCK_DGRAM) || | |
1592 | (sk->sk_protocol != IPPROTO_UDP && | |
1593 | sk->sk_protocol != IPPROTO_TCP) || | |
1594 | (sk->sk_family != AF_INET && | |
1595 | sk->sk_family != AF_INET6)) { | |
1596 | err = -ENOTSUPP; | |
1597 | goto err_prog_put; | |
1598 | } | |
1599 | } else { | |
1600 | /* BPF_PROG_TYPE_SOCKET_FILTER */ | |
1601 | if (bpf_prog_size(prog->len) > sysctl_optmem_max) { | |
1602 | err = -ENOMEM; | |
1603 | goto err_prog_put; | |
1604 | } | |
538950a1 CG |
1605 | } |
1606 | ||
8217ca65 MKL |
1607 | err = reuseport_attach_prog(sk, prog); |
1608 | err_prog_put: | |
1609 | if (err) | |
1610 | bpf_prog_put(prog); | |
1611 | ||
1612 | return err; | |
1613 | } | |
1614 | ||
1615 | void sk_reuseport_prog_free(struct bpf_prog *prog) | |
1616 | { | |
1617 | if (!prog) | |
1618 | return; | |
1619 | ||
1620 | if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) | |
1621 | bpf_prog_put(prog); | |
1622 | else | |
1623 | bpf_prog_destroy(prog); | |
538950a1 CG |
1624 | } |
1625 | ||
21cafc1d DB |
1626 | struct bpf_scratchpad { |
1627 | union { | |
1628 | __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; | |
1629 | u8 buff[MAX_BPF_STACK]; | |
1630 | }; | |
1631 | }; | |
1632 | ||
1633 | static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); | |
91bc4822 | 1634 | |
5293efe6 DB |
1635 | static inline int __bpf_try_make_writable(struct sk_buff *skb, |
1636 | unsigned int write_len) | |
1637 | { | |
1638 | return skb_ensure_writable(skb, write_len); | |
1639 | } | |
1640 | ||
db58ba45 AS |
1641 | static inline int bpf_try_make_writable(struct sk_buff *skb, |
1642 | unsigned int write_len) | |
1643 | { | |
5293efe6 | 1644 | int err = __bpf_try_make_writable(skb, write_len); |
db58ba45 | 1645 | |
6aaae2b6 | 1646 | bpf_compute_data_pointers(skb); |
db58ba45 AS |
1647 | return err; |
1648 | } | |
1649 | ||
36bbef52 DB |
1650 | static int bpf_try_make_head_writable(struct sk_buff *skb) |
1651 | { | |
1652 | return bpf_try_make_writable(skb, skb_headlen(skb)); | |
1653 | } | |
1654 | ||
a2bfe6bf DB |
1655 | static inline void bpf_push_mac_rcsum(struct sk_buff *skb) |
1656 | { | |
1657 | if (skb_at_tc_ingress(skb)) | |
1658 | skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); | |
1659 | } | |
1660 | ||
8065694e DB |
1661 | static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) |
1662 | { | |
1663 | if (skb_at_tc_ingress(skb)) | |
1664 | skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); | |
1665 | } | |
1666 | ||
f3694e00 DB |
1667 | BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, |
1668 | const void *, from, u32, len, u64, flags) | |
608cd71a | 1669 | { |
608cd71a AS |
1670 | void *ptr; |
1671 | ||
8afd54c8 | 1672 | if (unlikely(flags & ~(BPF_F_RECOMPUTE_CSUM | BPF_F_INVALIDATE_HASH))) |
781c53bc | 1673 | return -EINVAL; |
0ed661d5 | 1674 | if (unlikely(offset > 0xffff)) |
608cd71a | 1675 | return -EFAULT; |
db58ba45 | 1676 | if (unlikely(bpf_try_make_writable(skb, offset + len))) |
608cd71a AS |
1677 | return -EFAULT; |
1678 | ||
0ed661d5 | 1679 | ptr = skb->data + offset; |
781c53bc | 1680 | if (flags & BPF_F_RECOMPUTE_CSUM) |
479ffccc | 1681 | __skb_postpull_rcsum(skb, ptr, len, offset); |
608cd71a AS |
1682 | |
1683 | memcpy(ptr, from, len); | |
1684 | ||
781c53bc | 1685 | if (flags & BPF_F_RECOMPUTE_CSUM) |
479ffccc | 1686 | __skb_postpush_rcsum(skb, ptr, len, offset); |
8afd54c8 DB |
1687 | if (flags & BPF_F_INVALIDATE_HASH) |
1688 | skb_clear_hash(skb); | |
f8ffad69 | 1689 | |
608cd71a AS |
1690 | return 0; |
1691 | } | |
1692 | ||
577c50aa | 1693 | static const struct bpf_func_proto bpf_skb_store_bytes_proto = { |
608cd71a AS |
1694 | .func = bpf_skb_store_bytes, |
1695 | .gpl_only = false, | |
1696 | .ret_type = RET_INTEGER, | |
1697 | .arg1_type = ARG_PTR_TO_CTX, | |
1698 | .arg2_type = ARG_ANYTHING, | |
39f19ebb AS |
1699 | .arg3_type = ARG_PTR_TO_MEM, |
1700 | .arg4_type = ARG_CONST_SIZE, | |
91bc4822 AS |
1701 | .arg5_type = ARG_ANYTHING, |
1702 | }; | |
1703 | ||
f3694e00 DB |
1704 | BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, |
1705 | void *, to, u32, len) | |
05c74e5e | 1706 | { |
05c74e5e DB |
1707 | void *ptr; |
1708 | ||
0ed661d5 | 1709 | if (unlikely(offset > 0xffff)) |
074f528e | 1710 | goto err_clear; |
05c74e5e DB |
1711 | |
1712 | ptr = skb_header_pointer(skb, offset, len, to); | |
1713 | if (unlikely(!ptr)) | |
074f528e | 1714 | goto err_clear; |
05c74e5e DB |
1715 | if (ptr != to) |
1716 | memcpy(to, ptr, len); | |
1717 | ||
1718 | return 0; | |
074f528e DB |
1719 | err_clear: |
1720 | memset(to, 0, len); | |
1721 | return -EFAULT; | |
05c74e5e DB |
1722 | } |
1723 | ||
577c50aa | 1724 | static const struct bpf_func_proto bpf_skb_load_bytes_proto = { |
05c74e5e DB |
1725 | .func = bpf_skb_load_bytes, |
1726 | .gpl_only = false, | |
1727 | .ret_type = RET_INTEGER, | |
1728 | .arg1_type = ARG_PTR_TO_CTX, | |
1729 | .arg2_type = ARG_ANYTHING, | |
39f19ebb AS |
1730 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, |
1731 | .arg4_type = ARG_CONST_SIZE, | |
05c74e5e DB |
1732 | }; |
1733 | ||
4e1ec56c DB |
1734 | BPF_CALL_5(bpf_skb_load_bytes_relative, const struct sk_buff *, skb, |
1735 | u32, offset, void *, to, u32, len, u32, start_header) | |
1736 | { | |
3eee1f75 DB |
1737 | u8 *end = skb_tail_pointer(skb); |
1738 | u8 *net = skb_network_header(skb); | |
1739 | u8 *mac = skb_mac_header(skb); | |
4e1ec56c DB |
1740 | u8 *ptr; |
1741 | ||
3eee1f75 | 1742 | if (unlikely(offset > 0xffff || len > (end - mac))) |
4e1ec56c DB |
1743 | goto err_clear; |
1744 | ||
1745 | switch (start_header) { | |
1746 | case BPF_HDR_START_MAC: | |
3eee1f75 | 1747 | ptr = mac + offset; |
4e1ec56c DB |
1748 | break; |
1749 | case BPF_HDR_START_NET: | |
3eee1f75 | 1750 | ptr = net + offset; |
4e1ec56c DB |
1751 | break; |
1752 | default: | |
1753 | goto err_clear; | |
1754 | } | |
1755 | ||
3eee1f75 | 1756 | if (likely(ptr >= mac && ptr + len <= end)) { |
4e1ec56c DB |
1757 | memcpy(to, ptr, len); |
1758 | return 0; | |
1759 | } | |
1760 | ||
1761 | err_clear: | |
1762 | memset(to, 0, len); | |
1763 | return -EFAULT; | |
1764 | } | |
1765 | ||
1766 | static const struct bpf_func_proto bpf_skb_load_bytes_relative_proto = { | |
1767 | .func = bpf_skb_load_bytes_relative, | |
1768 | .gpl_only = false, | |
1769 | .ret_type = RET_INTEGER, | |
1770 | .arg1_type = ARG_PTR_TO_CTX, | |
1771 | .arg2_type = ARG_ANYTHING, | |
1772 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
1773 | .arg4_type = ARG_CONST_SIZE, | |
1774 | .arg5_type = ARG_ANYTHING, | |
1775 | }; | |
1776 | ||
36bbef52 DB |
1777 | BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) |
1778 | { | |
1779 | /* Idea is the following: should the needed direct read/write | |
1780 | * test fail during runtime, we can pull in more data and redo | |
1781 | * again, since implicitly, we invalidate previous checks here. | |
1782 | * | |
1783 | * Or, since we know how much we need to make read/writeable, | |
1784 | * this can be done once at the program beginning for direct | |
1785 | * access case. By this we overcome limitations of only current | |
1786 | * headroom being accessible. | |
1787 | */ | |
1788 | return bpf_try_make_writable(skb, len ? : skb_headlen(skb)); | |
1789 | } | |
1790 | ||
1791 | static const struct bpf_func_proto bpf_skb_pull_data_proto = { | |
1792 | .func = bpf_skb_pull_data, | |
1793 | .gpl_only = false, | |
1794 | .ret_type = RET_INTEGER, | |
1795 | .arg1_type = ARG_PTR_TO_CTX, | |
1796 | .arg2_type = ARG_ANYTHING, | |
1797 | }; | |
1798 | ||
0ea488ff JF |
1799 | static inline int sk_skb_try_make_writable(struct sk_buff *skb, |
1800 | unsigned int write_len) | |
1801 | { | |
1802 | int err = __bpf_try_make_writable(skb, write_len); | |
1803 | ||
1804 | bpf_compute_data_end_sk_skb(skb); | |
1805 | return err; | |
1806 | } | |
1807 | ||
1808 | BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) | |
1809 | { | |
1810 | /* Idea is the following: should the needed direct read/write | |
1811 | * test fail during runtime, we can pull in more data and redo | |
1812 | * again, since implicitly, we invalidate previous checks here. | |
1813 | * | |
1814 | * Or, since we know how much we need to make read/writeable, | |
1815 | * this can be done once at the program beginning for direct | |
1816 | * access case. By this we overcome limitations of only current | |
1817 | * headroom being accessible. | |
1818 | */ | |
1819 | return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb)); | |
1820 | } | |
1821 | ||
1822 | static const struct bpf_func_proto sk_skb_pull_data_proto = { | |
1823 | .func = sk_skb_pull_data, | |
1824 | .gpl_only = false, | |
1825 | .ret_type = RET_INTEGER, | |
1826 | .arg1_type = ARG_PTR_TO_CTX, | |
1827 | .arg2_type = ARG_ANYTHING, | |
1828 | }; | |
1829 | ||
f3694e00 DB |
1830 | BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, |
1831 | u64, from, u64, to, u64, flags) | |
91bc4822 | 1832 | { |
0ed661d5 | 1833 | __sum16 *ptr; |
91bc4822 | 1834 | |
781c53bc DB |
1835 | if (unlikely(flags & ~(BPF_F_HDR_FIELD_MASK))) |
1836 | return -EINVAL; | |
0ed661d5 | 1837 | if (unlikely(offset > 0xffff || offset & 1)) |
91bc4822 | 1838 | return -EFAULT; |
0ed661d5 | 1839 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
91bc4822 AS |
1840 | return -EFAULT; |
1841 | ||
0ed661d5 | 1842 | ptr = (__sum16 *)(skb->data + offset); |
781c53bc | 1843 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
8050c0f0 DB |
1844 | case 0: |
1845 | if (unlikely(from != 0)) | |
1846 | return -EINVAL; | |
1847 | ||
1848 | csum_replace_by_diff(ptr, to); | |
1849 | break; | |
91bc4822 AS |
1850 | case 2: |
1851 | csum_replace2(ptr, from, to); | |
1852 | break; | |
1853 | case 4: | |
1854 | csum_replace4(ptr, from, to); | |
1855 | break; | |
1856 | default: | |
1857 | return -EINVAL; | |
1858 | } | |
1859 | ||
91bc4822 AS |
1860 | return 0; |
1861 | } | |
1862 | ||
577c50aa | 1863 | static const struct bpf_func_proto bpf_l3_csum_replace_proto = { |
91bc4822 AS |
1864 | .func = bpf_l3_csum_replace, |
1865 | .gpl_only = false, | |
1866 | .ret_type = RET_INTEGER, | |
1867 | .arg1_type = ARG_PTR_TO_CTX, | |
1868 | .arg2_type = ARG_ANYTHING, | |
1869 | .arg3_type = ARG_ANYTHING, | |
1870 | .arg4_type = ARG_ANYTHING, | |
1871 | .arg5_type = ARG_ANYTHING, | |
1872 | }; | |
1873 | ||
f3694e00 DB |
1874 | BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, |
1875 | u64, from, u64, to, u64, flags) | |
91bc4822 | 1876 | { |
781c53bc | 1877 | bool is_pseudo = flags & BPF_F_PSEUDO_HDR; |
2f72959a | 1878 | bool is_mmzero = flags & BPF_F_MARK_MANGLED_0; |
d1b662ad | 1879 | bool do_mforce = flags & BPF_F_MARK_ENFORCE; |
0ed661d5 | 1880 | __sum16 *ptr; |
91bc4822 | 1881 | |
d1b662ad DB |
1882 | if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE | |
1883 | BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK))) | |
781c53bc | 1884 | return -EINVAL; |
0ed661d5 | 1885 | if (unlikely(offset > 0xffff || offset & 1)) |
91bc4822 | 1886 | return -EFAULT; |
0ed661d5 | 1887 | if (unlikely(bpf_try_make_writable(skb, offset + sizeof(*ptr)))) |
91bc4822 AS |
1888 | return -EFAULT; |
1889 | ||
0ed661d5 | 1890 | ptr = (__sum16 *)(skb->data + offset); |
d1b662ad | 1891 | if (is_mmzero && !do_mforce && !*ptr) |
2f72959a | 1892 | return 0; |
91bc4822 | 1893 | |
781c53bc | 1894 | switch (flags & BPF_F_HDR_FIELD_MASK) { |
7d672345 DB |
1895 | case 0: |
1896 | if (unlikely(from != 0)) | |
1897 | return -EINVAL; | |
1898 | ||
1899 | inet_proto_csum_replace_by_diff(ptr, skb, to, is_pseudo); | |
1900 | break; | |
91bc4822 AS |
1901 | case 2: |
1902 | inet_proto_csum_replace2(ptr, skb, from, to, is_pseudo); | |
1903 | break; | |
1904 | case 4: | |
1905 | inet_proto_csum_replace4(ptr, skb, from, to, is_pseudo); | |
1906 | break; | |
1907 | default: | |
1908 | return -EINVAL; | |
1909 | } | |
1910 | ||
2f72959a DB |
1911 | if (is_mmzero && !*ptr) |
1912 | *ptr = CSUM_MANGLED_0; | |
91bc4822 AS |
1913 | return 0; |
1914 | } | |
1915 | ||
577c50aa | 1916 | static const struct bpf_func_proto bpf_l4_csum_replace_proto = { |
91bc4822 AS |
1917 | .func = bpf_l4_csum_replace, |
1918 | .gpl_only = false, | |
1919 | .ret_type = RET_INTEGER, | |
1920 | .arg1_type = ARG_PTR_TO_CTX, | |
1921 | .arg2_type = ARG_ANYTHING, | |
1922 | .arg3_type = ARG_ANYTHING, | |
1923 | .arg4_type = ARG_ANYTHING, | |
1924 | .arg5_type = ARG_ANYTHING, | |
608cd71a AS |
1925 | }; |
1926 | ||
f3694e00 DB |
1927 | BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, |
1928 | __be32 *, to, u32, to_size, __wsum, seed) | |
7d672345 | 1929 | { |
21cafc1d | 1930 | struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); |
f3694e00 | 1931 | u32 diff_size = from_size + to_size; |
7d672345 DB |
1932 | int i, j = 0; |
1933 | ||
1934 | /* This is quite flexible, some examples: | |
1935 | * | |
1936 | * from_size == 0, to_size > 0, seed := csum --> pushing data | |
1937 | * from_size > 0, to_size == 0, seed := csum --> pulling data | |
1938 | * from_size > 0, to_size > 0, seed := 0 --> diffing data | |
1939 | * | |
1940 | * Even for diffing, from_size and to_size don't need to be equal. | |
1941 | */ | |
1942 | if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || | |
1943 | diff_size > sizeof(sp->diff))) | |
1944 | return -EINVAL; | |
1945 | ||
1946 | for (i = 0; i < from_size / sizeof(__be32); i++, j++) | |
1947 | sp->diff[j] = ~from[i]; | |
1948 | for (i = 0; i < to_size / sizeof(__be32); i++, j++) | |
1949 | sp->diff[j] = to[i]; | |
1950 | ||
1951 | return csum_partial(sp->diff, diff_size, seed); | |
1952 | } | |
1953 | ||
577c50aa | 1954 | static const struct bpf_func_proto bpf_csum_diff_proto = { |
7d672345 DB |
1955 | .func = bpf_csum_diff, |
1956 | .gpl_only = false, | |
36bbef52 | 1957 | .pkt_access = true, |
7d672345 | 1958 | .ret_type = RET_INTEGER, |
db1ac496 | 1959 | .arg1_type = ARG_PTR_TO_MEM_OR_NULL, |
39f19ebb | 1960 | .arg2_type = ARG_CONST_SIZE_OR_ZERO, |
db1ac496 | 1961 | .arg3_type = ARG_PTR_TO_MEM_OR_NULL, |
39f19ebb | 1962 | .arg4_type = ARG_CONST_SIZE_OR_ZERO, |
7d672345 DB |
1963 | .arg5_type = ARG_ANYTHING, |
1964 | }; | |
1965 | ||
36bbef52 DB |
1966 | BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) |
1967 | { | |
1968 | /* The interface is to be used in combination with bpf_csum_diff() | |
1969 | * for direct packet writes. csum rotation for alignment as well | |
1970 | * as emulating csum_sub() can be done from the eBPF program. | |
1971 | */ | |
1972 | if (skb->ip_summed == CHECKSUM_COMPLETE) | |
1973 | return (skb->csum = csum_add(skb->csum, csum)); | |
1974 | ||
1975 | return -ENOTSUPP; | |
1976 | } | |
1977 | ||
1978 | static const struct bpf_func_proto bpf_csum_update_proto = { | |
1979 | .func = bpf_csum_update, | |
1980 | .gpl_only = false, | |
1981 | .ret_type = RET_INTEGER, | |
1982 | .arg1_type = ARG_PTR_TO_CTX, | |
1983 | .arg2_type = ARG_ANYTHING, | |
1984 | }; | |
1985 | ||
a70b506e DB |
1986 | static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) |
1987 | { | |
a70b506e DB |
1988 | return dev_forward_skb(dev, skb); |
1989 | } | |
1990 | ||
4e3264d2 MKL |
1991 | static inline int __bpf_rx_skb_no_mac(struct net_device *dev, |
1992 | struct sk_buff *skb) | |
1993 | { | |
1994 | int ret = ____dev_forward_skb(dev, skb); | |
1995 | ||
1996 | if (likely(!ret)) { | |
1997 | skb->dev = dev; | |
1998 | ret = netif_rx(skb); | |
1999 | } | |
2000 | ||
2001 | return ret; | |
2002 | } | |
2003 | ||
a70b506e DB |
2004 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
2005 | { | |
2006 | int ret; | |
2007 | ||
2008 | if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { | |
2009 | net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); | |
2010 | kfree_skb(skb); | |
2011 | return -ENETDOWN; | |
2012 | } | |
2013 | ||
2014 | skb->dev = dev; | |
2015 | ||
2016 | __this_cpu_inc(xmit_recursion); | |
2017 | ret = dev_queue_xmit(skb); | |
2018 | __this_cpu_dec(xmit_recursion); | |
2019 | ||
2020 | return ret; | |
2021 | } | |
2022 | ||
4e3264d2 MKL |
2023 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, |
2024 | u32 flags) | |
2025 | { | |
2026 | /* skb->mac_len is not set on normal egress */ | |
2027 | unsigned int mlen = skb->network_header - skb->mac_header; | |
2028 | ||
2029 | __skb_pull(skb, mlen); | |
2030 | ||
2031 | /* At ingress, the mac header has already been pulled once. | |
2032 | * At egress, skb_pospull_rcsum has to be done in case that | |
2033 | * the skb is originated from ingress (i.e. a forwarded skb) | |
2034 | * to ensure that rcsum starts at net header. | |
2035 | */ | |
2036 | if (!skb_at_tc_ingress(skb)) | |
2037 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | |
2038 | skb_pop_mac_header(skb); | |
2039 | skb_reset_mac_len(skb); | |
2040 | return flags & BPF_F_INGRESS ? | |
2041 | __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); | |
2042 | } | |
2043 | ||
2044 | static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, | |
2045 | u32 flags) | |
2046 | { | |
3a0af8fd TG |
2047 | /* Verify that a link layer header is carried */ |
2048 | if (unlikely(skb->mac_header >= skb->network_header)) { | |
2049 | kfree_skb(skb); | |
2050 | return -ERANGE; | |
2051 | } | |
2052 | ||
4e3264d2 MKL |
2053 | bpf_push_mac_rcsum(skb); |
2054 | return flags & BPF_F_INGRESS ? | |
2055 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | |
2056 | } | |
2057 | ||
2058 | static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, | |
2059 | u32 flags) | |
2060 | { | |
c491680f | 2061 | if (dev_is_mac_header_xmit(dev)) |
4e3264d2 | 2062 | return __bpf_redirect_common(skb, dev, flags); |
c491680f DB |
2063 | else |
2064 | return __bpf_redirect_no_mac(skb, dev, flags); | |
4e3264d2 MKL |
2065 | } |
2066 | ||
f3694e00 | 2067 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) |
3896d655 | 2068 | { |
3896d655 | 2069 | struct net_device *dev; |
36bbef52 DB |
2070 | struct sk_buff *clone; |
2071 | int ret; | |
3896d655 | 2072 | |
781c53bc DB |
2073 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
2074 | return -EINVAL; | |
2075 | ||
3896d655 AS |
2076 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ifindex); |
2077 | if (unlikely(!dev)) | |
2078 | return -EINVAL; | |
2079 | ||
36bbef52 DB |
2080 | clone = skb_clone(skb, GFP_ATOMIC); |
2081 | if (unlikely(!clone)) | |
3896d655 AS |
2082 | return -ENOMEM; |
2083 | ||
36bbef52 DB |
2084 | /* For direct write, we need to keep the invariant that the skbs |
2085 | * we're dealing with need to be uncloned. Should uncloning fail | |
2086 | * here, we need to free the just generated clone to unclone once | |
2087 | * again. | |
2088 | */ | |
2089 | ret = bpf_try_make_head_writable(skb); | |
2090 | if (unlikely(ret)) { | |
2091 | kfree_skb(clone); | |
2092 | return -ENOMEM; | |
2093 | } | |
2094 | ||
4e3264d2 | 2095 | return __bpf_redirect(clone, dev, flags); |
3896d655 AS |
2096 | } |
2097 | ||
577c50aa | 2098 | static const struct bpf_func_proto bpf_clone_redirect_proto = { |
3896d655 AS |
2099 | .func = bpf_clone_redirect, |
2100 | .gpl_only = false, | |
2101 | .ret_type = RET_INTEGER, | |
2102 | .arg1_type = ARG_PTR_TO_CTX, | |
2103 | .arg2_type = ARG_ANYTHING, | |
2104 | .arg3_type = ARG_ANYTHING, | |
2105 | }; | |
2106 | ||
0b19cc0a TM |
2107 | DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); |
2108 | EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); | |
781c53bc | 2109 | |
f3694e00 | 2110 | BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) |
27b29f63 | 2111 | { |
0b19cc0a | 2112 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
27b29f63 | 2113 | |
781c53bc DB |
2114 | if (unlikely(flags & ~(BPF_F_INGRESS))) |
2115 | return TC_ACT_SHOT; | |
2116 | ||
27b29f63 AS |
2117 | ri->ifindex = ifindex; |
2118 | ri->flags = flags; | |
781c53bc | 2119 | |
27b29f63 AS |
2120 | return TC_ACT_REDIRECT; |
2121 | } | |
2122 | ||
2123 | int skb_do_redirect(struct sk_buff *skb) | |
2124 | { | |
0b19cc0a | 2125 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
27b29f63 AS |
2126 | struct net_device *dev; |
2127 | ||
2128 | dev = dev_get_by_index_rcu(dev_net(skb->dev), ri->ifindex); | |
2129 | ri->ifindex = 0; | |
2130 | if (unlikely(!dev)) { | |
2131 | kfree_skb(skb); | |
2132 | return -EINVAL; | |
2133 | } | |
2134 | ||
4e3264d2 | 2135 | return __bpf_redirect(skb, dev, ri->flags); |
27b29f63 AS |
2136 | } |
2137 | ||
577c50aa | 2138 | static const struct bpf_func_proto bpf_redirect_proto = { |
27b29f63 AS |
2139 | .func = bpf_redirect, |
2140 | .gpl_only = false, | |
2141 | .ret_type = RET_INTEGER, | |
2142 | .arg1_type = ARG_ANYTHING, | |
2143 | .arg2_type = ARG_ANYTHING, | |
2144 | }; | |
2145 | ||
604326b4 | 2146 | BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) |
2a100317 JF |
2147 | { |
2148 | msg->apply_bytes = bytes; | |
2149 | return 0; | |
2150 | } | |
2151 | ||
2152 | static const struct bpf_func_proto bpf_msg_apply_bytes_proto = { | |
2153 | .func = bpf_msg_apply_bytes, | |
2154 | .gpl_only = false, | |
2155 | .ret_type = RET_INTEGER, | |
2156 | .arg1_type = ARG_PTR_TO_CTX, | |
2157 | .arg2_type = ARG_ANYTHING, | |
2158 | }; | |
2159 | ||
604326b4 | 2160 | BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) |
91843d54 JF |
2161 | { |
2162 | msg->cork_bytes = bytes; | |
2163 | return 0; | |
2164 | } | |
2165 | ||
2166 | static const struct bpf_func_proto bpf_msg_cork_bytes_proto = { | |
2167 | .func = bpf_msg_cork_bytes, | |
2168 | .gpl_only = false, | |
2169 | .ret_type = RET_INTEGER, | |
2170 | .arg1_type = ARG_PTR_TO_CTX, | |
2171 | .arg2_type = ARG_ANYTHING, | |
2172 | }; | |
2173 | ||
604326b4 DB |
2174 | BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, |
2175 | u32, end, u64, flags) | |
015632bb | 2176 | { |
604326b4 DB |
2177 | u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; |
2178 | u32 first_sge, last_sge, i, shift, bytes_sg_total; | |
2179 | struct scatterlist *sge; | |
2180 | u8 *raw, *to, *from; | |
015632bb JF |
2181 | struct page *page; |
2182 | ||
2183 | if (unlikely(flags || end <= start)) | |
2184 | return -EINVAL; | |
2185 | ||
2186 | /* First find the starting scatterlist element */ | |
604326b4 | 2187 | i = msg->sg.start; |
015632bb | 2188 | do { |
604326b4 | 2189 | len = sk_msg_elem(msg, i)->length; |
015632bb JF |
2190 | if (start < offset + len) |
2191 | break; | |
5b24109b | 2192 | offset += len; |
604326b4 DB |
2193 | sk_msg_iter_var_next(i); |
2194 | } while (i != msg->sg.end); | |
015632bb JF |
2195 | |
2196 | if (unlikely(start >= offset + len)) | |
2197 | return -EINVAL; | |
2198 | ||
604326b4 | 2199 | first_sge = i; |
5b24109b DB |
2200 | /* The start may point into the sg element so we need to also |
2201 | * account for the headroom. | |
2202 | */ | |
2203 | bytes_sg_total = start - offset + bytes; | |
604326b4 | 2204 | if (!msg->sg.copy[i] && bytes_sg_total <= len) |
015632bb | 2205 | goto out; |
015632bb JF |
2206 | |
2207 | /* At this point we need to linearize multiple scatterlist | |
2208 | * elements or a single shared page. Either way we need to | |
2209 | * copy into a linear buffer exclusively owned by BPF. Then | |
2210 | * place the buffer in the scatterlist and fixup the original | |
2211 | * entries by removing the entries now in the linear buffer | |
2212 | * and shifting the remaining entries. For now we do not try | |
2213 | * to copy partial entries to avoid complexity of running out | |
2214 | * of sg_entry slots. The downside is reading a single byte | |
2215 | * will copy the entire sg entry. | |
2216 | */ | |
2217 | do { | |
604326b4 DB |
2218 | copy += sk_msg_elem(msg, i)->length; |
2219 | sk_msg_iter_var_next(i); | |
5b24109b | 2220 | if (bytes_sg_total <= copy) |
015632bb | 2221 | break; |
604326b4 DB |
2222 | } while (i != msg->sg.end); |
2223 | last_sge = i; | |
015632bb | 2224 | |
5b24109b | 2225 | if (unlikely(bytes_sg_total > copy)) |
015632bb JF |
2226 | return -EINVAL; |
2227 | ||
4c3d795c TD |
2228 | page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, |
2229 | get_order(copy)); | |
015632bb JF |
2230 | if (unlikely(!page)) |
2231 | return -ENOMEM; | |
015632bb | 2232 | |
604326b4 DB |
2233 | raw = page_address(page); |
2234 | i = first_sge; | |
015632bb | 2235 | do { |
604326b4 DB |
2236 | sge = sk_msg_elem(msg, i); |
2237 | from = sg_virt(sge); | |
2238 | len = sge->length; | |
2239 | to = raw + poffset; | |
015632bb JF |
2240 | |
2241 | memcpy(to, from, len); | |
9db39f4d | 2242 | poffset += len; |
604326b4 DB |
2243 | sge->length = 0; |
2244 | put_page(sg_page(sge)); | |
015632bb | 2245 | |
604326b4 DB |
2246 | sk_msg_iter_var_next(i); |
2247 | } while (i != last_sge); | |
015632bb | 2248 | |
604326b4 | 2249 | sg_set_page(&msg->sg.data[first_sge], page, copy, 0); |
015632bb JF |
2250 | |
2251 | /* To repair sg ring we need to shift entries. If we only | |
2252 | * had a single entry though we can just replace it and | |
2253 | * be done. Otherwise walk the ring and shift the entries. | |
2254 | */ | |
604326b4 DB |
2255 | WARN_ON_ONCE(last_sge == first_sge); |
2256 | shift = last_sge > first_sge ? | |
2257 | last_sge - first_sge - 1 : | |
2258 | MAX_SKB_FRAGS - first_sge + last_sge - 1; | |
015632bb JF |
2259 | if (!shift) |
2260 | goto out; | |
2261 | ||
604326b4 DB |
2262 | i = first_sge; |
2263 | sk_msg_iter_var_next(i); | |
015632bb | 2264 | do { |
604326b4 | 2265 | u32 move_from; |
015632bb | 2266 | |
604326b4 DB |
2267 | if (i + shift >= MAX_MSG_FRAGS) |
2268 | move_from = i + shift - MAX_MSG_FRAGS; | |
015632bb JF |
2269 | else |
2270 | move_from = i + shift; | |
604326b4 | 2271 | if (move_from == msg->sg.end) |
015632bb JF |
2272 | break; |
2273 | ||
604326b4 DB |
2274 | msg->sg.data[i] = msg->sg.data[move_from]; |
2275 | msg->sg.data[move_from].length = 0; | |
2276 | msg->sg.data[move_from].page_link = 0; | |
2277 | msg->sg.data[move_from].offset = 0; | |
2278 | sk_msg_iter_var_next(i); | |
015632bb | 2279 | } while (1); |
604326b4 DB |
2280 | |
2281 | msg->sg.end = msg->sg.end - shift > msg->sg.end ? | |
2282 | msg->sg.end - shift + MAX_MSG_FRAGS : | |
2283 | msg->sg.end - shift; | |
015632bb | 2284 | out: |
604326b4 | 2285 | msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; |
015632bb | 2286 | msg->data_end = msg->data + bytes; |
015632bb JF |
2287 | return 0; |
2288 | } | |
2289 | ||
2290 | static const struct bpf_func_proto bpf_msg_pull_data_proto = { | |
2291 | .func = bpf_msg_pull_data, | |
2292 | .gpl_only = false, | |
2293 | .ret_type = RET_INTEGER, | |
2294 | .arg1_type = ARG_PTR_TO_CTX, | |
2295 | .arg2_type = ARG_ANYTHING, | |
2296 | .arg3_type = ARG_ANYTHING, | |
2297 | .arg4_type = ARG_ANYTHING, | |
2298 | }; | |
2299 | ||
f3694e00 | 2300 | BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) |
8d20aabe | 2301 | { |
f3694e00 | 2302 | return task_get_classid(skb); |
8d20aabe DB |
2303 | } |
2304 | ||
2305 | static const struct bpf_func_proto bpf_get_cgroup_classid_proto = { | |
2306 | .func = bpf_get_cgroup_classid, | |
2307 | .gpl_only = false, | |
2308 | .ret_type = RET_INTEGER, | |
2309 | .arg1_type = ARG_PTR_TO_CTX, | |
2310 | }; | |
2311 | ||
f3694e00 | 2312 | BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) |
c46646d0 | 2313 | { |
f3694e00 | 2314 | return dst_tclassid(skb); |
c46646d0 DB |
2315 | } |
2316 | ||
2317 | static const struct bpf_func_proto bpf_get_route_realm_proto = { | |
2318 | .func = bpf_get_route_realm, | |
2319 | .gpl_only = false, | |
2320 | .ret_type = RET_INTEGER, | |
2321 | .arg1_type = ARG_PTR_TO_CTX, | |
2322 | }; | |
2323 | ||
f3694e00 | 2324 | BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) |
13c5c240 DB |
2325 | { |
2326 | /* If skb_clear_hash() was called due to mangling, we can | |
2327 | * trigger SW recalculation here. Later access to hash | |
2328 | * can then use the inline skb->hash via context directly | |
2329 | * instead of calling this helper again. | |
2330 | */ | |
f3694e00 | 2331 | return skb_get_hash(skb); |
13c5c240 DB |
2332 | } |
2333 | ||
2334 | static const struct bpf_func_proto bpf_get_hash_recalc_proto = { | |
2335 | .func = bpf_get_hash_recalc, | |
2336 | .gpl_only = false, | |
2337 | .ret_type = RET_INTEGER, | |
2338 | .arg1_type = ARG_PTR_TO_CTX, | |
2339 | }; | |
2340 | ||
7a4b28c6 DB |
2341 | BPF_CALL_1(bpf_set_hash_invalid, struct sk_buff *, skb) |
2342 | { | |
2343 | /* After all direct packet write, this can be used once for | |
2344 | * triggering a lazy recalc on next skb_get_hash() invocation. | |
2345 | */ | |
2346 | skb_clear_hash(skb); | |
2347 | return 0; | |
2348 | } | |
2349 | ||
2350 | static const struct bpf_func_proto bpf_set_hash_invalid_proto = { | |
2351 | .func = bpf_set_hash_invalid, | |
2352 | .gpl_only = false, | |
2353 | .ret_type = RET_INTEGER, | |
2354 | .arg1_type = ARG_PTR_TO_CTX, | |
2355 | }; | |
2356 | ||
ded092cd DB |
2357 | BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) |
2358 | { | |
2359 | /* Set user specified hash as L4(+), so that it gets returned | |
2360 | * on skb_get_hash() call unless BPF prog later on triggers a | |
2361 | * skb_clear_hash(). | |
2362 | */ | |
2363 | __skb_set_sw_hash(skb, hash, true); | |
2364 | return 0; | |
2365 | } | |
2366 | ||
2367 | static const struct bpf_func_proto bpf_set_hash_proto = { | |
2368 | .func = bpf_set_hash, | |
2369 | .gpl_only = false, | |
2370 | .ret_type = RET_INTEGER, | |
2371 | .arg1_type = ARG_PTR_TO_CTX, | |
2372 | .arg2_type = ARG_ANYTHING, | |
2373 | }; | |
2374 | ||
f3694e00 DB |
2375 | BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, |
2376 | u16, vlan_tci) | |
4e10df9a | 2377 | { |
db58ba45 | 2378 | int ret; |
4e10df9a AS |
2379 | |
2380 | if (unlikely(vlan_proto != htons(ETH_P_8021Q) && | |
2381 | vlan_proto != htons(ETH_P_8021AD))) | |
2382 | vlan_proto = htons(ETH_P_8021Q); | |
2383 | ||
8065694e | 2384 | bpf_push_mac_rcsum(skb); |
db58ba45 | 2385 | ret = skb_vlan_push(skb, vlan_proto, vlan_tci); |
8065694e DB |
2386 | bpf_pull_mac_rcsum(skb); |
2387 | ||
6aaae2b6 | 2388 | bpf_compute_data_pointers(skb); |
db58ba45 | 2389 | return ret; |
4e10df9a AS |
2390 | } |
2391 | ||
93731ef0 | 2392 | static const struct bpf_func_proto bpf_skb_vlan_push_proto = { |
4e10df9a AS |
2393 | .func = bpf_skb_vlan_push, |
2394 | .gpl_only = false, | |
2395 | .ret_type = RET_INTEGER, | |
2396 | .arg1_type = ARG_PTR_TO_CTX, | |
2397 | .arg2_type = ARG_ANYTHING, | |
2398 | .arg3_type = ARG_ANYTHING, | |
2399 | }; | |
2400 | ||
f3694e00 | 2401 | BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) |
4e10df9a | 2402 | { |
db58ba45 | 2403 | int ret; |
4e10df9a | 2404 | |
8065694e | 2405 | bpf_push_mac_rcsum(skb); |
db58ba45 | 2406 | ret = skb_vlan_pop(skb); |
8065694e DB |
2407 | bpf_pull_mac_rcsum(skb); |
2408 | ||
6aaae2b6 | 2409 | bpf_compute_data_pointers(skb); |
db58ba45 | 2410 | return ret; |
4e10df9a AS |
2411 | } |
2412 | ||
93731ef0 | 2413 | static const struct bpf_func_proto bpf_skb_vlan_pop_proto = { |
4e10df9a AS |
2414 | .func = bpf_skb_vlan_pop, |
2415 | .gpl_only = false, | |
2416 | .ret_type = RET_INTEGER, | |
2417 | .arg1_type = ARG_PTR_TO_CTX, | |
2418 | }; | |
2419 | ||
6578171a DB |
2420 | static int bpf_skb_generic_push(struct sk_buff *skb, u32 off, u32 len) |
2421 | { | |
2422 | /* Caller already did skb_cow() with len as headroom, | |
2423 | * so no need to do it here. | |
2424 | */ | |
2425 | skb_push(skb, len); | |
2426 | memmove(skb->data, skb->data + len, off); | |
2427 | memset(skb->data + off, 0, len); | |
2428 | ||
2429 | /* No skb_postpush_rcsum(skb, skb->data + off, len) | |
2430 | * needed here as it does not change the skb->csum | |
2431 | * result for checksum complete when summing over | |
2432 | * zeroed blocks. | |
2433 | */ | |
2434 | return 0; | |
2435 | } | |
2436 | ||
2437 | static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) | |
2438 | { | |
2439 | /* skb_ensure_writable() is not needed here, as we're | |
2440 | * already working on an uncloned skb. | |
2441 | */ | |
2442 | if (unlikely(!pskb_may_pull(skb, off + len))) | |
2443 | return -ENOMEM; | |
2444 | ||
2445 | skb_postpull_rcsum(skb, skb->data + off, len); | |
2446 | memmove(skb->data + len, skb->data, off); | |
2447 | __skb_pull(skb, len); | |
2448 | ||
2449 | return 0; | |
2450 | } | |
2451 | ||
2452 | static int bpf_skb_net_hdr_push(struct sk_buff *skb, u32 off, u32 len) | |
2453 | { | |
2454 | bool trans_same = skb->transport_header == skb->network_header; | |
2455 | int ret; | |
2456 | ||
2457 | /* There's no need for __skb_push()/__skb_pull() pair to | |
2458 | * get to the start of the mac header as we're guaranteed | |
2459 | * to always start from here under eBPF. | |
2460 | */ | |
2461 | ret = bpf_skb_generic_push(skb, off, len); | |
2462 | if (likely(!ret)) { | |
2463 | skb->mac_header -= len; | |
2464 | skb->network_header -= len; | |
2465 | if (trans_same) | |
2466 | skb->transport_header = skb->network_header; | |
2467 | } | |
2468 | ||
2469 | return ret; | |
2470 | } | |
2471 | ||
2472 | static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) | |
2473 | { | |
2474 | bool trans_same = skb->transport_header == skb->network_header; | |
2475 | int ret; | |
2476 | ||
2477 | /* Same here, __skb_push()/__skb_pull() pair not needed. */ | |
2478 | ret = bpf_skb_generic_pop(skb, off, len); | |
2479 | if (likely(!ret)) { | |
2480 | skb->mac_header += len; | |
2481 | skb->network_header += len; | |
2482 | if (trans_same) | |
2483 | skb->transport_header = skb->network_header; | |
2484 | } | |
2485 | ||
2486 | return ret; | |
2487 | } | |
2488 | ||
2489 | static int bpf_skb_proto_4_to_6(struct sk_buff *skb) | |
2490 | { | |
2491 | const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); | |
0daf4349 | 2492 | u32 off = skb_mac_header_len(skb); |
6578171a DB |
2493 | int ret; |
2494 | ||
d02f51cb DA |
2495 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ |
2496 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | |
2497 | return -ENOTSUPP; | |
2498 | ||
6578171a DB |
2499 | ret = skb_cow(skb, len_diff); |
2500 | if (unlikely(ret < 0)) | |
2501 | return ret; | |
2502 | ||
2503 | ret = bpf_skb_net_hdr_push(skb, off, len_diff); | |
2504 | if (unlikely(ret < 0)) | |
2505 | return ret; | |
2506 | ||
2507 | if (skb_is_gso(skb)) { | |
d02f51cb DA |
2508 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2509 | ||
880388aa DM |
2510 | /* SKB_GSO_TCPV4 needs to be changed into |
2511 | * SKB_GSO_TCPV6. | |
6578171a | 2512 | */ |
d02f51cb DA |
2513 | if (shinfo->gso_type & SKB_GSO_TCPV4) { |
2514 | shinfo->gso_type &= ~SKB_GSO_TCPV4; | |
2515 | shinfo->gso_type |= SKB_GSO_TCPV6; | |
6578171a DB |
2516 | } |
2517 | ||
2518 | /* Due to IPv6 header, MSS needs to be downgraded. */ | |
d02f51cb | 2519 | skb_decrease_gso_size(shinfo, len_diff); |
6578171a | 2520 | /* Header must be checked, and gso_segs recomputed. */ |
d02f51cb DA |
2521 | shinfo->gso_type |= SKB_GSO_DODGY; |
2522 | shinfo->gso_segs = 0; | |
6578171a DB |
2523 | } |
2524 | ||
2525 | skb->protocol = htons(ETH_P_IPV6); | |
2526 | skb_clear_hash(skb); | |
2527 | ||
2528 | return 0; | |
2529 | } | |
2530 | ||
2531 | static int bpf_skb_proto_6_to_4(struct sk_buff *skb) | |
2532 | { | |
2533 | const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); | |
0daf4349 | 2534 | u32 off = skb_mac_header_len(skb); |
6578171a DB |
2535 | int ret; |
2536 | ||
d02f51cb DA |
2537 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ |
2538 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | |
2539 | return -ENOTSUPP; | |
2540 | ||
6578171a DB |
2541 | ret = skb_unclone(skb, GFP_ATOMIC); |
2542 | if (unlikely(ret < 0)) | |
2543 | return ret; | |
2544 | ||
2545 | ret = bpf_skb_net_hdr_pop(skb, off, len_diff); | |
2546 | if (unlikely(ret < 0)) | |
2547 | return ret; | |
2548 | ||
2549 | if (skb_is_gso(skb)) { | |
d02f51cb DA |
2550 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2551 | ||
880388aa DM |
2552 | /* SKB_GSO_TCPV6 needs to be changed into |
2553 | * SKB_GSO_TCPV4. | |
6578171a | 2554 | */ |
d02f51cb DA |
2555 | if (shinfo->gso_type & SKB_GSO_TCPV6) { |
2556 | shinfo->gso_type &= ~SKB_GSO_TCPV6; | |
2557 | shinfo->gso_type |= SKB_GSO_TCPV4; | |
6578171a DB |
2558 | } |
2559 | ||
2560 | /* Due to IPv4 header, MSS can be upgraded. */ | |
d02f51cb | 2561 | skb_increase_gso_size(shinfo, len_diff); |
6578171a | 2562 | /* Header must be checked, and gso_segs recomputed. */ |
d02f51cb DA |
2563 | shinfo->gso_type |= SKB_GSO_DODGY; |
2564 | shinfo->gso_segs = 0; | |
6578171a DB |
2565 | } |
2566 | ||
2567 | skb->protocol = htons(ETH_P_IP); | |
2568 | skb_clear_hash(skb); | |
2569 | ||
2570 | return 0; | |
2571 | } | |
2572 | ||
2573 | static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) | |
2574 | { | |
2575 | __be16 from_proto = skb->protocol; | |
2576 | ||
2577 | if (from_proto == htons(ETH_P_IP) && | |
2578 | to_proto == htons(ETH_P_IPV6)) | |
2579 | return bpf_skb_proto_4_to_6(skb); | |
2580 | ||
2581 | if (from_proto == htons(ETH_P_IPV6) && | |
2582 | to_proto == htons(ETH_P_IP)) | |
2583 | return bpf_skb_proto_6_to_4(skb); | |
2584 | ||
2585 | return -ENOTSUPP; | |
2586 | } | |
2587 | ||
f3694e00 DB |
2588 | BPF_CALL_3(bpf_skb_change_proto, struct sk_buff *, skb, __be16, proto, |
2589 | u64, flags) | |
6578171a | 2590 | { |
6578171a DB |
2591 | int ret; |
2592 | ||
2593 | if (unlikely(flags)) | |
2594 | return -EINVAL; | |
2595 | ||
2596 | /* General idea is that this helper does the basic groundwork | |
2597 | * needed for changing the protocol, and eBPF program fills the | |
2598 | * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() | |
2599 | * and other helpers, rather than passing a raw buffer here. | |
2600 | * | |
2601 | * The rationale is to keep this minimal and without a need to | |
2602 | * deal with raw packet data. F.e. even if we would pass buffers | |
2603 | * here, the program still needs to call the bpf_lX_csum_replace() | |
2604 | * helpers anyway. Plus, this way we keep also separation of | |
2605 | * concerns, since f.e. bpf_skb_store_bytes() should only take | |
2606 | * care of stores. | |
2607 | * | |
2608 | * Currently, additional options and extension header space are | |
2609 | * not supported, but flags register is reserved so we can adapt | |
2610 | * that. For offloads, we mark packet as dodgy, so that headers | |
2611 | * need to be verified first. | |
2612 | */ | |
2613 | ret = bpf_skb_proto_xlat(skb, proto); | |
6aaae2b6 | 2614 | bpf_compute_data_pointers(skb); |
6578171a DB |
2615 | return ret; |
2616 | } | |
2617 | ||
2618 | static const struct bpf_func_proto bpf_skb_change_proto_proto = { | |
2619 | .func = bpf_skb_change_proto, | |
2620 | .gpl_only = false, | |
2621 | .ret_type = RET_INTEGER, | |
2622 | .arg1_type = ARG_PTR_TO_CTX, | |
2623 | .arg2_type = ARG_ANYTHING, | |
2624 | .arg3_type = ARG_ANYTHING, | |
2625 | }; | |
2626 | ||
f3694e00 | 2627 | BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) |
d2485c42 | 2628 | { |
d2485c42 | 2629 | /* We only allow a restricted subset to be changed for now. */ |
45c7fffa DB |
2630 | if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || |
2631 | !skb_pkt_type_ok(pkt_type))) | |
d2485c42 DB |
2632 | return -EINVAL; |
2633 | ||
2634 | skb->pkt_type = pkt_type; | |
2635 | return 0; | |
2636 | } | |
2637 | ||
2638 | static const struct bpf_func_proto bpf_skb_change_type_proto = { | |
2639 | .func = bpf_skb_change_type, | |
2640 | .gpl_only = false, | |
2641 | .ret_type = RET_INTEGER, | |
2642 | .arg1_type = ARG_PTR_TO_CTX, | |
2643 | .arg2_type = ARG_ANYTHING, | |
2644 | }; | |
2645 | ||
2be7e212 DB |
2646 | static u32 bpf_skb_net_base_len(const struct sk_buff *skb) |
2647 | { | |
2648 | switch (skb->protocol) { | |
2649 | case htons(ETH_P_IP): | |
2650 | return sizeof(struct iphdr); | |
2651 | case htons(ETH_P_IPV6): | |
2652 | return sizeof(struct ipv6hdr); | |
2653 | default: | |
2654 | return ~0U; | |
2655 | } | |
2656 | } | |
2657 | ||
2658 | static int bpf_skb_net_grow(struct sk_buff *skb, u32 len_diff) | |
2659 | { | |
2660 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | |
2661 | int ret; | |
2662 | ||
d02f51cb DA |
2663 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ |
2664 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | |
2665 | return -ENOTSUPP; | |
2666 | ||
2be7e212 DB |
2667 | ret = skb_cow(skb, len_diff); |
2668 | if (unlikely(ret < 0)) | |
2669 | return ret; | |
2670 | ||
2671 | ret = bpf_skb_net_hdr_push(skb, off, len_diff); | |
2672 | if (unlikely(ret < 0)) | |
2673 | return ret; | |
2674 | ||
2675 | if (skb_is_gso(skb)) { | |
d02f51cb DA |
2676 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2677 | ||
2be7e212 | 2678 | /* Due to header grow, MSS needs to be downgraded. */ |
d02f51cb | 2679 | skb_decrease_gso_size(shinfo, len_diff); |
2be7e212 | 2680 | /* Header must be checked, and gso_segs recomputed. */ |
d02f51cb DA |
2681 | shinfo->gso_type |= SKB_GSO_DODGY; |
2682 | shinfo->gso_segs = 0; | |
2be7e212 DB |
2683 | } |
2684 | ||
2685 | return 0; | |
2686 | } | |
2687 | ||
2688 | static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff) | |
2689 | { | |
2690 | u32 off = skb_mac_header_len(skb) + bpf_skb_net_base_len(skb); | |
2691 | int ret; | |
2692 | ||
d02f51cb DA |
2693 | /* SCTP uses GSO_BY_FRAGS, thus cannot adjust it. */ |
2694 | if (skb_is_gso(skb) && unlikely(skb_is_gso_sctp(skb))) | |
2695 | return -ENOTSUPP; | |
2696 | ||
2be7e212 DB |
2697 | ret = skb_unclone(skb, GFP_ATOMIC); |
2698 | if (unlikely(ret < 0)) | |
2699 | return ret; | |
2700 | ||
2701 | ret = bpf_skb_net_hdr_pop(skb, off, len_diff); | |
2702 | if (unlikely(ret < 0)) | |
2703 | return ret; | |
2704 | ||
2705 | if (skb_is_gso(skb)) { | |
d02f51cb DA |
2706 | struct skb_shared_info *shinfo = skb_shinfo(skb); |
2707 | ||
2be7e212 | 2708 | /* Due to header shrink, MSS can be upgraded. */ |
d02f51cb | 2709 | skb_increase_gso_size(shinfo, len_diff); |
2be7e212 | 2710 | /* Header must be checked, and gso_segs recomputed. */ |
d02f51cb DA |
2711 | shinfo->gso_type |= SKB_GSO_DODGY; |
2712 | shinfo->gso_segs = 0; | |
2be7e212 DB |
2713 | } |
2714 | ||
2715 | return 0; | |
2716 | } | |
2717 | ||
2718 | static u32 __bpf_skb_max_len(const struct sk_buff *skb) | |
2719 | { | |
0c6bc6e5 JF |
2720 | return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : |
2721 | SKB_MAX_ALLOC; | |
2be7e212 DB |
2722 | } |
2723 | ||
2724 | static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff) | |
2725 | { | |
2726 | bool trans_same = skb->transport_header == skb->network_header; | |
2727 | u32 len_cur, len_diff_abs = abs(len_diff); | |
2728 | u32 len_min = bpf_skb_net_base_len(skb); | |
2729 | u32 len_max = __bpf_skb_max_len(skb); | |
2730 | __be16 proto = skb->protocol; | |
2731 | bool shrink = len_diff < 0; | |
2732 | int ret; | |
2733 | ||
2734 | if (unlikely(len_diff_abs > 0xfffU)) | |
2735 | return -EFAULT; | |
2736 | if (unlikely(proto != htons(ETH_P_IP) && | |
2737 | proto != htons(ETH_P_IPV6))) | |
2738 | return -ENOTSUPP; | |
2739 | ||
2740 | len_cur = skb->len - skb_network_offset(skb); | |
2741 | if (skb_transport_header_was_set(skb) && !trans_same) | |
2742 | len_cur = skb_network_header_len(skb); | |
2743 | if ((shrink && (len_diff_abs >= len_cur || | |
2744 | len_cur - len_diff_abs < len_min)) || | |
2745 | (!shrink && (skb->len + len_diff_abs > len_max && | |
2746 | !skb_is_gso(skb)))) | |
2747 | return -ENOTSUPP; | |
2748 | ||
2749 | ret = shrink ? bpf_skb_net_shrink(skb, len_diff_abs) : | |
2750 | bpf_skb_net_grow(skb, len_diff_abs); | |
2751 | ||
6aaae2b6 | 2752 | bpf_compute_data_pointers(skb); |
e4a6a342 | 2753 | return ret; |
2be7e212 DB |
2754 | } |
2755 | ||
2756 | BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, | |
2757 | u32, mode, u64, flags) | |
2758 | { | |
2759 | if (unlikely(flags)) | |
2760 | return -EINVAL; | |
2761 | if (likely(mode == BPF_ADJ_ROOM_NET)) | |
2762 | return bpf_skb_adjust_net(skb, len_diff); | |
2763 | ||
2764 | return -ENOTSUPP; | |
2765 | } | |
2766 | ||
2767 | static const struct bpf_func_proto bpf_skb_adjust_room_proto = { | |
2768 | .func = bpf_skb_adjust_room, | |
2769 | .gpl_only = false, | |
2770 | .ret_type = RET_INTEGER, | |
2771 | .arg1_type = ARG_PTR_TO_CTX, | |
2772 | .arg2_type = ARG_ANYTHING, | |
2773 | .arg3_type = ARG_ANYTHING, | |
2774 | .arg4_type = ARG_ANYTHING, | |
2775 | }; | |
2776 | ||
5293efe6 DB |
2777 | static u32 __bpf_skb_min_len(const struct sk_buff *skb) |
2778 | { | |
2779 | u32 min_len = skb_network_offset(skb); | |
2780 | ||
2781 | if (skb_transport_header_was_set(skb)) | |
2782 | min_len = skb_transport_offset(skb); | |
2783 | if (skb->ip_summed == CHECKSUM_PARTIAL) | |
2784 | min_len = skb_checksum_start_offset(skb) + | |
2785 | skb->csum_offset + sizeof(__sum16); | |
2786 | return min_len; | |
2787 | } | |
2788 | ||
5293efe6 DB |
2789 | static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) |
2790 | { | |
2791 | unsigned int old_len = skb->len; | |
2792 | int ret; | |
2793 | ||
2794 | ret = __skb_grow_rcsum(skb, new_len); | |
2795 | if (!ret) | |
2796 | memset(skb->data + old_len, 0, new_len - old_len); | |
2797 | return ret; | |
2798 | } | |
2799 | ||
2800 | static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) | |
2801 | { | |
2802 | return __skb_trim_rcsum(skb, new_len); | |
2803 | } | |
2804 | ||
0ea488ff JF |
2805 | static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len, |
2806 | u64 flags) | |
5293efe6 | 2807 | { |
5293efe6 DB |
2808 | u32 max_len = __bpf_skb_max_len(skb); |
2809 | u32 min_len = __bpf_skb_min_len(skb); | |
5293efe6 DB |
2810 | int ret; |
2811 | ||
2812 | if (unlikely(flags || new_len > max_len || new_len < min_len)) | |
2813 | return -EINVAL; | |
2814 | if (skb->encapsulation) | |
2815 | return -ENOTSUPP; | |
2816 | ||
2817 | /* The basic idea of this helper is that it's performing the | |
2818 | * needed work to either grow or trim an skb, and eBPF program | |
2819 | * rewrites the rest via helpers like bpf_skb_store_bytes(), | |
2820 | * bpf_lX_csum_replace() and others rather than passing a raw | |
2821 | * buffer here. This one is a slow path helper and intended | |
2822 | * for replies with control messages. | |
2823 | * | |
2824 | * Like in bpf_skb_change_proto(), we want to keep this rather | |
2825 | * minimal and without protocol specifics so that we are able | |
2826 | * to separate concerns as in bpf_skb_store_bytes() should only | |
2827 | * be the one responsible for writing buffers. | |
2828 | * | |
2829 | * It's really expected to be a slow path operation here for | |
2830 | * control message replies, so we're implicitly linearizing, | |
2831 | * uncloning and drop offloads from the skb by this. | |
2832 | */ | |
2833 | ret = __bpf_try_make_writable(skb, skb->len); | |
2834 | if (!ret) { | |
2835 | if (new_len > skb->len) | |
2836 | ret = bpf_skb_grow_rcsum(skb, new_len); | |
2837 | else if (new_len < skb->len) | |
2838 | ret = bpf_skb_trim_rcsum(skb, new_len); | |
2839 | if (!ret && skb_is_gso(skb)) | |
2840 | skb_gso_reset(skb); | |
2841 | } | |
0ea488ff JF |
2842 | return ret; |
2843 | } | |
2844 | ||
2845 | BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, | |
2846 | u64, flags) | |
2847 | { | |
2848 | int ret = __bpf_skb_change_tail(skb, new_len, flags); | |
5293efe6 | 2849 | |
6aaae2b6 | 2850 | bpf_compute_data_pointers(skb); |
5293efe6 DB |
2851 | return ret; |
2852 | } | |
2853 | ||
2854 | static const struct bpf_func_proto bpf_skb_change_tail_proto = { | |
2855 | .func = bpf_skb_change_tail, | |
2856 | .gpl_only = false, | |
2857 | .ret_type = RET_INTEGER, | |
2858 | .arg1_type = ARG_PTR_TO_CTX, | |
2859 | .arg2_type = ARG_ANYTHING, | |
2860 | .arg3_type = ARG_ANYTHING, | |
2861 | }; | |
2862 | ||
0ea488ff | 2863 | BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, |
3a0af8fd | 2864 | u64, flags) |
0ea488ff JF |
2865 | { |
2866 | int ret = __bpf_skb_change_tail(skb, new_len, flags); | |
2867 | ||
2868 | bpf_compute_data_end_sk_skb(skb); | |
2869 | return ret; | |
2870 | } | |
2871 | ||
2872 | static const struct bpf_func_proto sk_skb_change_tail_proto = { | |
2873 | .func = sk_skb_change_tail, | |
2874 | .gpl_only = false, | |
2875 | .ret_type = RET_INTEGER, | |
2876 | .arg1_type = ARG_PTR_TO_CTX, | |
2877 | .arg2_type = ARG_ANYTHING, | |
2878 | .arg3_type = ARG_ANYTHING, | |
2879 | }; | |
2880 | ||
2881 | static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room, | |
2882 | u64 flags) | |
3a0af8fd TG |
2883 | { |
2884 | u32 max_len = __bpf_skb_max_len(skb); | |
2885 | u32 new_len = skb->len + head_room; | |
2886 | int ret; | |
2887 | ||
2888 | if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || | |
2889 | new_len < skb->len)) | |
2890 | return -EINVAL; | |
2891 | ||
2892 | ret = skb_cow(skb, head_room); | |
2893 | if (likely(!ret)) { | |
2894 | /* Idea for this helper is that we currently only | |
2895 | * allow to expand on mac header. This means that | |
2896 | * skb->protocol network header, etc, stay as is. | |
2897 | * Compared to bpf_skb_change_tail(), we're more | |
2898 | * flexible due to not needing to linearize or | |
2899 | * reset GSO. Intention for this helper is to be | |
2900 | * used by an L3 skb that needs to push mac header | |
2901 | * for redirection into L2 device. | |
2902 | */ | |
2903 | __skb_push(skb, head_room); | |
2904 | memset(skb->data, 0, head_room); | |
2905 | skb_reset_mac_header(skb); | |
2906 | } | |
2907 | ||
0ea488ff JF |
2908 | return ret; |
2909 | } | |
2910 | ||
2911 | BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, | |
2912 | u64, flags) | |
2913 | { | |
2914 | int ret = __bpf_skb_change_head(skb, head_room, flags); | |
2915 | ||
6aaae2b6 | 2916 | bpf_compute_data_pointers(skb); |
0ea488ff | 2917 | return ret; |
3a0af8fd TG |
2918 | } |
2919 | ||
2920 | static const struct bpf_func_proto bpf_skb_change_head_proto = { | |
2921 | .func = bpf_skb_change_head, | |
2922 | .gpl_only = false, | |
2923 | .ret_type = RET_INTEGER, | |
2924 | .arg1_type = ARG_PTR_TO_CTX, | |
2925 | .arg2_type = ARG_ANYTHING, | |
2926 | .arg3_type = ARG_ANYTHING, | |
2927 | }; | |
2928 | ||
0ea488ff JF |
2929 | BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, |
2930 | u64, flags) | |
2931 | { | |
2932 | int ret = __bpf_skb_change_head(skb, head_room, flags); | |
2933 | ||
2934 | bpf_compute_data_end_sk_skb(skb); | |
2935 | return ret; | |
2936 | } | |
2937 | ||
2938 | static const struct bpf_func_proto sk_skb_change_head_proto = { | |
2939 | .func = sk_skb_change_head, | |
2940 | .gpl_only = false, | |
2941 | .ret_type = RET_INTEGER, | |
2942 | .arg1_type = ARG_PTR_TO_CTX, | |
2943 | .arg2_type = ARG_ANYTHING, | |
2944 | .arg3_type = ARG_ANYTHING, | |
2945 | }; | |
de8f3a83 DB |
2946 | static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) |
2947 | { | |
2948 | return xdp_data_meta_unsupported(xdp) ? 0 : | |
2949 | xdp->data - xdp->data_meta; | |
2950 | } | |
2951 | ||
17bedab2 MKL |
2952 | BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) |
2953 | { | |
6dfb970d | 2954 | void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); |
de8f3a83 | 2955 | unsigned long metalen = xdp_get_metalen(xdp); |
97e19cce | 2956 | void *data_start = xdp_frame_end + metalen; |
17bedab2 MKL |
2957 | void *data = xdp->data + offset; |
2958 | ||
de8f3a83 | 2959 | if (unlikely(data < data_start || |
17bedab2 MKL |
2960 | data > xdp->data_end - ETH_HLEN)) |
2961 | return -EINVAL; | |
2962 | ||
de8f3a83 DB |
2963 | if (metalen) |
2964 | memmove(xdp->data_meta + offset, | |
2965 | xdp->data_meta, metalen); | |
2966 | xdp->data_meta += offset; | |
17bedab2 MKL |
2967 | xdp->data = data; |
2968 | ||
2969 | return 0; | |
2970 | } | |
2971 | ||
2972 | static const struct bpf_func_proto bpf_xdp_adjust_head_proto = { | |
2973 | .func = bpf_xdp_adjust_head, | |
2974 | .gpl_only = false, | |
2975 | .ret_type = RET_INTEGER, | |
2976 | .arg1_type = ARG_PTR_TO_CTX, | |
2977 | .arg2_type = ARG_ANYTHING, | |
2978 | }; | |
2979 | ||
b32cc5b9 NS |
2980 | BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) |
2981 | { | |
2982 | void *data_end = xdp->data_end + offset; | |
2983 | ||
2984 | /* only shrinking is allowed for now. */ | |
2985 | if (unlikely(offset >= 0)) | |
2986 | return -EINVAL; | |
2987 | ||
2988 | if (unlikely(data_end < xdp->data + ETH_HLEN)) | |
2989 | return -EINVAL; | |
2990 | ||
2991 | xdp->data_end = data_end; | |
2992 | ||
2993 | return 0; | |
2994 | } | |
2995 | ||
2996 | static const struct bpf_func_proto bpf_xdp_adjust_tail_proto = { | |
2997 | .func = bpf_xdp_adjust_tail, | |
2998 | .gpl_only = false, | |
2999 | .ret_type = RET_INTEGER, | |
3000 | .arg1_type = ARG_PTR_TO_CTX, | |
3001 | .arg2_type = ARG_ANYTHING, | |
3002 | }; | |
3003 | ||
de8f3a83 DB |
3004 | BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) |
3005 | { | |
97e19cce | 3006 | void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); |
de8f3a83 DB |
3007 | void *meta = xdp->data_meta + offset; |
3008 | unsigned long metalen = xdp->data - meta; | |
3009 | ||
3010 | if (xdp_data_meta_unsupported(xdp)) | |
3011 | return -ENOTSUPP; | |
97e19cce | 3012 | if (unlikely(meta < xdp_frame_end || |
de8f3a83 DB |
3013 | meta > xdp->data)) |
3014 | return -EINVAL; | |
3015 | if (unlikely((metalen & (sizeof(__u32) - 1)) || | |
3016 | (metalen > 32))) | |
3017 | return -EACCES; | |
3018 | ||
3019 | xdp->data_meta = meta; | |
3020 | ||
3021 | return 0; | |
3022 | } | |
3023 | ||
3024 | static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { | |
3025 | .func = bpf_xdp_adjust_meta, | |
3026 | .gpl_only = false, | |
3027 | .ret_type = RET_INTEGER, | |
3028 | .arg1_type = ARG_PTR_TO_CTX, | |
3029 | .arg2_type = ARG_ANYTHING, | |
3030 | }; | |
3031 | ||
11393cc9 JF |
3032 | static int __bpf_tx_xdp(struct net_device *dev, |
3033 | struct bpf_map *map, | |
3034 | struct xdp_buff *xdp, | |
3035 | u32 index) | |
814abfab | 3036 | { |
44fa2dbd | 3037 | struct xdp_frame *xdpf; |
d8d7218a | 3038 | int err, sent; |
11393cc9 JF |
3039 | |
3040 | if (!dev->netdev_ops->ndo_xdp_xmit) { | |
11393cc9 | 3041 | return -EOPNOTSUPP; |
814abfab | 3042 | } |
11393cc9 | 3043 | |
d8d7218a TM |
3044 | err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); |
3045 | if (unlikely(err)) | |
3046 | return err; | |
3047 | ||
44fa2dbd JDB |
3048 | xdpf = convert_to_xdp_frame(xdp); |
3049 | if (unlikely(!xdpf)) | |
3050 | return -EOVERFLOW; | |
3051 | ||
1e67575a | 3052 | sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH); |
735fc405 JDB |
3053 | if (sent <= 0) |
3054 | return sent; | |
9c270af3 JDB |
3055 | return 0; |
3056 | } | |
3057 | ||
47b123ed JDB |
3058 | static noinline int |
3059 | xdp_do_redirect_slow(struct net_device *dev, struct xdp_buff *xdp, | |
3060 | struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri) | |
3061 | { | |
3062 | struct net_device *fwd; | |
3063 | u32 index = ri->ifindex; | |
3064 | int err; | |
3065 | ||
3066 | fwd = dev_get_by_index_rcu(dev_net(dev), index); | |
3067 | ri->ifindex = 0; | |
3068 | if (unlikely(!fwd)) { | |
3069 | err = -EINVAL; | |
3070 | goto err; | |
3071 | } | |
3072 | ||
3073 | err = __bpf_tx_xdp(fwd, NULL, xdp, 0); | |
3074 | if (unlikely(err)) | |
3075 | goto err; | |
3076 | ||
3077 | _trace_xdp_redirect(dev, xdp_prog, index); | |
3078 | return 0; | |
3079 | err: | |
3080 | _trace_xdp_redirect_err(dev, xdp_prog, index, err); | |
3081 | return err; | |
3082 | } | |
3083 | ||
9c270af3 JDB |
3084 | static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, |
3085 | struct bpf_map *map, | |
3086 | struct xdp_buff *xdp, | |
3087 | u32 index) | |
3088 | { | |
3089 | int err; | |
3090 | ||
1b1a251c BT |
3091 | switch (map->map_type) { |
3092 | case BPF_MAP_TYPE_DEVMAP: { | |
67f29e07 | 3093 | struct bpf_dtab_netdev *dst = fwd; |
9c270af3 | 3094 | |
38edddb8 | 3095 | err = dev_map_enqueue(dst, xdp, dev_rx); |
e1302542 | 3096 | if (unlikely(err)) |
9c270af3 | 3097 | return err; |
11393cc9 | 3098 | __dev_map_insert_ctx(map, index); |
1b1a251c BT |
3099 | break; |
3100 | } | |
3101 | case BPF_MAP_TYPE_CPUMAP: { | |
9c270af3 JDB |
3102 | struct bpf_cpu_map_entry *rcpu = fwd; |
3103 | ||
3104 | err = cpu_map_enqueue(rcpu, xdp, dev_rx); | |
e1302542 | 3105 | if (unlikely(err)) |
9c270af3 JDB |
3106 | return err; |
3107 | __cpu_map_insert_ctx(map, index); | |
1b1a251c BT |
3108 | break; |
3109 | } | |
3110 | case BPF_MAP_TYPE_XSKMAP: { | |
3111 | struct xdp_sock *xs = fwd; | |
3112 | ||
3113 | err = __xsk_map_redirect(map, xdp, xs); | |
3114 | return err; | |
3115 | } | |
3116 | default: | |
3117 | break; | |
9c270af3 | 3118 | } |
e4a8e817 | 3119 | return 0; |
814abfab JF |
3120 | } |
3121 | ||
11393cc9 JF |
3122 | void xdp_do_flush_map(void) |
3123 | { | |
0b19cc0a | 3124 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
11393cc9 JF |
3125 | struct bpf_map *map = ri->map_to_flush; |
3126 | ||
11393cc9 | 3127 | ri->map_to_flush = NULL; |
9c270af3 JDB |
3128 | if (map) { |
3129 | switch (map->map_type) { | |
3130 | case BPF_MAP_TYPE_DEVMAP: | |
3131 | __dev_map_flush(map); | |
3132 | break; | |
3133 | case BPF_MAP_TYPE_CPUMAP: | |
3134 | __cpu_map_flush(map); | |
3135 | break; | |
1b1a251c BT |
3136 | case BPF_MAP_TYPE_XSKMAP: |
3137 | __xsk_map_flush(map); | |
3138 | break; | |
9c270af3 JDB |
3139 | default: |
3140 | break; | |
3141 | } | |
3142 | } | |
11393cc9 JF |
3143 | } |
3144 | EXPORT_SYMBOL_GPL(xdp_do_flush_map); | |
3145 | ||
2a68d85f | 3146 | static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index) |
9c270af3 JDB |
3147 | { |
3148 | switch (map->map_type) { | |
3149 | case BPF_MAP_TYPE_DEVMAP: | |
3150 | return __dev_map_lookup_elem(map, index); | |
3151 | case BPF_MAP_TYPE_CPUMAP: | |
3152 | return __cpu_map_lookup_elem(map, index); | |
1b1a251c BT |
3153 | case BPF_MAP_TYPE_XSKMAP: |
3154 | return __xsk_map_lookup_elem(map, index); | |
9c270af3 JDB |
3155 | default: |
3156 | return NULL; | |
3157 | } | |
3158 | } | |
3159 | ||
f6069b9a | 3160 | void bpf_clear_redirect_map(struct bpf_map *map) |
7c300131 | 3161 | { |
f6069b9a DB |
3162 | struct bpf_redirect_info *ri; |
3163 | int cpu; | |
3164 | ||
3165 | for_each_possible_cpu(cpu) { | |
3166 | ri = per_cpu_ptr(&bpf_redirect_info, cpu); | |
3167 | /* Avoid polluting remote cacheline due to writes if | |
3168 | * not needed. Once we pass this test, we need the | |
3169 | * cmpxchg() to make sure it hasn't been changed in | |
3170 | * the meantime by remote CPU. | |
3171 | */ | |
3172 | if (unlikely(READ_ONCE(ri->map) == map)) | |
3173 | cmpxchg(&ri->map, map, NULL); | |
3174 | } | |
7c300131 DB |
3175 | } |
3176 | ||
e4a8e817 | 3177 | static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, |
47b123ed JDB |
3178 | struct bpf_prog *xdp_prog, struct bpf_map *map, |
3179 | struct bpf_redirect_info *ri) | |
97f91a7c | 3180 | { |
11393cc9 | 3181 | u32 index = ri->ifindex; |
9c270af3 | 3182 | void *fwd = NULL; |
4c03bdd7 | 3183 | int err; |
97f91a7c JF |
3184 | |
3185 | ri->ifindex = 0; | |
f6069b9a | 3186 | WRITE_ONCE(ri->map, NULL); |
97f91a7c | 3187 | |
9c270af3 | 3188 | fwd = __xdp_map_lookup_elem(map, index); |
2a68d85f | 3189 | if (unlikely(!fwd)) { |
4c03bdd7 | 3190 | err = -EINVAL; |
f5836ca5 | 3191 | goto err; |
4c03bdd7 | 3192 | } |
e1302542 | 3193 | if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) |
11393cc9 JF |
3194 | xdp_do_flush_map(); |
3195 | ||
9c270af3 | 3196 | err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); |
f5836ca5 JDB |
3197 | if (unlikely(err)) |
3198 | goto err; | |
3199 | ||
3200 | ri->map_to_flush = map; | |
59a30896 | 3201 | _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); |
f5836ca5 JDB |
3202 | return 0; |
3203 | err: | |
59a30896 | 3204 | _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); |
97f91a7c JF |
3205 | return err; |
3206 | } | |
3207 | ||
5acaee0a JF |
3208 | int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, |
3209 | struct bpf_prog *xdp_prog) | |
814abfab | 3210 | { |
0b19cc0a | 3211 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
f6069b9a | 3212 | struct bpf_map *map = READ_ONCE(ri->map); |
814abfab | 3213 | |
2a68d85f | 3214 | if (likely(map)) |
47b123ed | 3215 | return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); |
97f91a7c | 3216 | |
47b123ed | 3217 | return xdp_do_redirect_slow(dev, xdp, xdp_prog, ri); |
814abfab JF |
3218 | } |
3219 | EXPORT_SYMBOL_GPL(xdp_do_redirect); | |
3220 | ||
c060bc61 XS |
3221 | static int xdp_do_generic_redirect_map(struct net_device *dev, |
3222 | struct sk_buff *skb, | |
02671e23 | 3223 | struct xdp_buff *xdp, |
f6069b9a DB |
3224 | struct bpf_prog *xdp_prog, |
3225 | struct bpf_map *map) | |
6103aa96 | 3226 | { |
0b19cc0a | 3227 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
eb48d682 | 3228 | u32 index = ri->ifindex; |
02671e23 | 3229 | void *fwd = NULL; |
2facaad6 | 3230 | int err = 0; |
6103aa96 | 3231 | |
6103aa96 | 3232 | ri->ifindex = 0; |
f6069b9a | 3233 | WRITE_ONCE(ri->map, NULL); |
96c5508e | 3234 | |
9c270af3 | 3235 | fwd = __xdp_map_lookup_elem(map, index); |
2facaad6 JDB |
3236 | if (unlikely(!fwd)) { |
3237 | err = -EINVAL; | |
f5836ca5 | 3238 | goto err; |
6103aa96 JF |
3239 | } |
3240 | ||
9c270af3 | 3241 | if (map->map_type == BPF_MAP_TYPE_DEVMAP) { |
6d5fc195 TM |
3242 | struct bpf_dtab_netdev *dst = fwd; |
3243 | ||
3244 | err = dev_map_generic_redirect(dst, skb, xdp_prog); | |
3245 | if (unlikely(err)) | |
9c270af3 | 3246 | goto err; |
02671e23 BT |
3247 | } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { |
3248 | struct xdp_sock *xs = fwd; | |
3249 | ||
3250 | err = xsk_generic_rcv(xs, xdp); | |
3251 | if (err) | |
3252 | goto err; | |
3253 | consume_skb(skb); | |
9c270af3 JDB |
3254 | } else { |
3255 | /* TODO: Handle BPF_MAP_TYPE_CPUMAP */ | |
3256 | err = -EBADRQC; | |
f5836ca5 | 3257 | goto err; |
2facaad6 | 3258 | } |
6103aa96 | 3259 | |
9c270af3 JDB |
3260 | _trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index); |
3261 | return 0; | |
3262 | err: | |
3263 | _trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err); | |
3264 | return err; | |
3265 | } | |
3266 | ||
3267 | int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, | |
02671e23 | 3268 | struct xdp_buff *xdp, struct bpf_prog *xdp_prog) |
9c270af3 | 3269 | { |
0b19cc0a | 3270 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
f6069b9a | 3271 | struct bpf_map *map = READ_ONCE(ri->map); |
9c270af3 JDB |
3272 | u32 index = ri->ifindex; |
3273 | struct net_device *fwd; | |
3274 | int err = 0; | |
3275 | ||
f6069b9a DB |
3276 | if (map) |
3277 | return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, | |
3278 | map); | |
9c270af3 JDB |
3279 | ri->ifindex = 0; |
3280 | fwd = dev_get_by_index_rcu(dev_net(dev), index); | |
3281 | if (unlikely(!fwd)) { | |
3282 | err = -EINVAL; | |
f5836ca5 | 3283 | goto err; |
2facaad6 JDB |
3284 | } |
3285 | ||
d8d7218a TM |
3286 | err = xdp_ok_fwd_dev(fwd, skb->len); |
3287 | if (unlikely(err)) | |
9c270af3 JDB |
3288 | goto err; |
3289 | ||
2facaad6 | 3290 | skb->dev = fwd; |
9c270af3 | 3291 | _trace_xdp_redirect(dev, xdp_prog, index); |
02671e23 | 3292 | generic_xdp_tx(skb, xdp_prog); |
f5836ca5 JDB |
3293 | return 0; |
3294 | err: | |
9c270af3 | 3295 | _trace_xdp_redirect_err(dev, xdp_prog, index, err); |
2facaad6 | 3296 | return err; |
6103aa96 JF |
3297 | } |
3298 | EXPORT_SYMBOL_GPL(xdp_do_generic_redirect); | |
3299 | ||
814abfab JF |
3300 | BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) |
3301 | { | |
0b19cc0a | 3302 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
814abfab JF |
3303 | |
3304 | if (unlikely(flags)) | |
3305 | return XDP_ABORTED; | |
3306 | ||
3307 | ri->ifindex = ifindex; | |
3308 | ri->flags = flags; | |
f6069b9a | 3309 | WRITE_ONCE(ri->map, NULL); |
e4a8e817 | 3310 | |
814abfab JF |
3311 | return XDP_REDIRECT; |
3312 | } | |
3313 | ||
3314 | static const struct bpf_func_proto bpf_xdp_redirect_proto = { | |
3315 | .func = bpf_xdp_redirect, | |
3316 | .gpl_only = false, | |
3317 | .ret_type = RET_INTEGER, | |
3318 | .arg1_type = ARG_ANYTHING, | |
3319 | .arg2_type = ARG_ANYTHING, | |
3320 | }; | |
3321 | ||
f6069b9a DB |
3322 | BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex, |
3323 | u64, flags) | |
e4a8e817 | 3324 | { |
0b19cc0a | 3325 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); |
e4a8e817 DB |
3326 | |
3327 | if (unlikely(flags)) | |
3328 | return XDP_ABORTED; | |
3329 | ||
3330 | ri->ifindex = ifindex; | |
3331 | ri->flags = flags; | |
f6069b9a | 3332 | WRITE_ONCE(ri->map, map); |
e4a8e817 DB |
3333 | |
3334 | return XDP_REDIRECT; | |
3335 | } | |
3336 | ||
3337 | static const struct bpf_func_proto bpf_xdp_redirect_map_proto = { | |
3338 | .func = bpf_xdp_redirect_map, | |
3339 | .gpl_only = false, | |
3340 | .ret_type = RET_INTEGER, | |
3341 | .arg1_type = ARG_CONST_MAP_PTR, | |
3342 | .arg2_type = ARG_ANYTHING, | |
3343 | .arg3_type = ARG_ANYTHING, | |
3344 | }; | |
3345 | ||
555c8a86 | 3346 | static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, |
aa7145c1 | 3347 | unsigned long off, unsigned long len) |
555c8a86 | 3348 | { |
aa7145c1 | 3349 | void *ptr = skb_header_pointer(skb, off, len, dst_buff); |
555c8a86 DB |
3350 | |
3351 | if (unlikely(!ptr)) | |
3352 | return len; | |
3353 | if (ptr != dst_buff) | |
3354 | memcpy(dst_buff, ptr, len); | |
3355 | ||
3356 | return 0; | |
3357 | } | |
3358 | ||
f3694e00 DB |
3359 | BPF_CALL_5(bpf_skb_event_output, struct sk_buff *, skb, struct bpf_map *, map, |
3360 | u64, flags, void *, meta, u64, meta_size) | |
555c8a86 | 3361 | { |
555c8a86 | 3362 | u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; |
555c8a86 DB |
3363 | |
3364 | if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) | |
3365 | return -EINVAL; | |
3366 | if (unlikely(skb_size > skb->len)) | |
3367 | return -EFAULT; | |
3368 | ||
3369 | return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, | |
3370 | bpf_skb_copy); | |
3371 | } | |
3372 | ||
3373 | static const struct bpf_func_proto bpf_skb_event_output_proto = { | |
3374 | .func = bpf_skb_event_output, | |
3375 | .gpl_only = true, | |
3376 | .ret_type = RET_INTEGER, | |
3377 | .arg1_type = ARG_PTR_TO_CTX, | |
3378 | .arg2_type = ARG_CONST_MAP_PTR, | |
3379 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 3380 | .arg4_type = ARG_PTR_TO_MEM, |
1728a4f2 | 3381 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
555c8a86 DB |
3382 | }; |
3383 | ||
c6c33454 DB |
3384 | static unsigned short bpf_tunnel_key_af(u64 flags) |
3385 | { | |
3386 | return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; | |
3387 | } | |
3388 | ||
f3694e00 DB |
3389 | BPF_CALL_4(bpf_skb_get_tunnel_key, struct sk_buff *, skb, struct bpf_tunnel_key *, to, |
3390 | u32, size, u64, flags) | |
d3aa45ce | 3391 | { |
c6c33454 DB |
3392 | const struct ip_tunnel_info *info = skb_tunnel_info(skb); |
3393 | u8 compat[sizeof(struct bpf_tunnel_key)]; | |
074f528e DB |
3394 | void *to_orig = to; |
3395 | int err; | |
d3aa45ce | 3396 | |
074f528e DB |
3397 | if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { |
3398 | err = -EINVAL; | |
3399 | goto err_clear; | |
3400 | } | |
3401 | if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { | |
3402 | err = -EPROTO; | |
3403 | goto err_clear; | |
3404 | } | |
c6c33454 | 3405 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
074f528e | 3406 | err = -EINVAL; |
c6c33454 | 3407 | switch (size) { |
4018ab18 | 3408 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
c0e760c9 | 3409 | case offsetof(struct bpf_tunnel_key, tunnel_ext): |
4018ab18 | 3410 | goto set_compat; |
c6c33454 DB |
3411 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
3412 | /* Fixup deprecated structure layouts here, so we have | |
3413 | * a common path later on. | |
3414 | */ | |
3415 | if (ip_tunnel_info_af(info) != AF_INET) | |
074f528e | 3416 | goto err_clear; |
4018ab18 | 3417 | set_compat: |
c6c33454 DB |
3418 | to = (struct bpf_tunnel_key *)compat; |
3419 | break; | |
3420 | default: | |
074f528e | 3421 | goto err_clear; |
c6c33454 DB |
3422 | } |
3423 | } | |
d3aa45ce AS |
3424 | |
3425 | to->tunnel_id = be64_to_cpu(info->key.tun_id); | |
c6c33454 DB |
3426 | to->tunnel_tos = info->key.tos; |
3427 | to->tunnel_ttl = info->key.ttl; | |
1fbc2e0c | 3428 | to->tunnel_ext = 0; |
c6c33454 | 3429 | |
4018ab18 | 3430 | if (flags & BPF_F_TUNINFO_IPV6) { |
c6c33454 DB |
3431 | memcpy(to->remote_ipv6, &info->key.u.ipv6.src, |
3432 | sizeof(to->remote_ipv6)); | |
4018ab18 DB |
3433 | to->tunnel_label = be32_to_cpu(info->key.label); |
3434 | } else { | |
c6c33454 | 3435 | to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); |
1fbc2e0c DB |
3436 | memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); |
3437 | to->tunnel_label = 0; | |
4018ab18 | 3438 | } |
c6c33454 DB |
3439 | |
3440 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) | |
074f528e | 3441 | memcpy(to_orig, to, size); |
d3aa45ce AS |
3442 | |
3443 | return 0; | |
074f528e DB |
3444 | err_clear: |
3445 | memset(to_orig, 0, size); | |
3446 | return err; | |
d3aa45ce AS |
3447 | } |
3448 | ||
577c50aa | 3449 | static const struct bpf_func_proto bpf_skb_get_tunnel_key_proto = { |
d3aa45ce AS |
3450 | .func = bpf_skb_get_tunnel_key, |
3451 | .gpl_only = false, | |
3452 | .ret_type = RET_INTEGER, | |
3453 | .arg1_type = ARG_PTR_TO_CTX, | |
39f19ebb AS |
3454 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, |
3455 | .arg3_type = ARG_CONST_SIZE, | |
d3aa45ce AS |
3456 | .arg4_type = ARG_ANYTHING, |
3457 | }; | |
3458 | ||
f3694e00 | 3459 | BPF_CALL_3(bpf_skb_get_tunnel_opt, struct sk_buff *, skb, u8 *, to, u32, size) |
14ca0751 | 3460 | { |
14ca0751 | 3461 | const struct ip_tunnel_info *info = skb_tunnel_info(skb); |
074f528e | 3462 | int err; |
14ca0751 DB |
3463 | |
3464 | if (unlikely(!info || | |
074f528e DB |
3465 | !(info->key.tun_flags & TUNNEL_OPTIONS_PRESENT))) { |
3466 | err = -ENOENT; | |
3467 | goto err_clear; | |
3468 | } | |
3469 | if (unlikely(size < info->options_len)) { | |
3470 | err = -ENOMEM; | |
3471 | goto err_clear; | |
3472 | } | |
14ca0751 DB |
3473 | |
3474 | ip_tunnel_info_opts_get(to, info); | |
074f528e DB |
3475 | if (size > info->options_len) |
3476 | memset(to + info->options_len, 0, size - info->options_len); | |
14ca0751 DB |
3477 | |
3478 | return info->options_len; | |
074f528e DB |
3479 | err_clear: |
3480 | memset(to, 0, size); | |
3481 | return err; | |
14ca0751 DB |
3482 | } |
3483 | ||
3484 | static const struct bpf_func_proto bpf_skb_get_tunnel_opt_proto = { | |
3485 | .func = bpf_skb_get_tunnel_opt, | |
3486 | .gpl_only = false, | |
3487 | .ret_type = RET_INTEGER, | |
3488 | .arg1_type = ARG_PTR_TO_CTX, | |
39f19ebb AS |
3489 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, |
3490 | .arg3_type = ARG_CONST_SIZE, | |
14ca0751 DB |
3491 | }; |
3492 | ||
d3aa45ce AS |
3493 | static struct metadata_dst __percpu *md_dst; |
3494 | ||
f3694e00 DB |
3495 | BPF_CALL_4(bpf_skb_set_tunnel_key, struct sk_buff *, skb, |
3496 | const struct bpf_tunnel_key *, from, u32, size, u64, flags) | |
d3aa45ce | 3497 | { |
d3aa45ce | 3498 | struct metadata_dst *md = this_cpu_ptr(md_dst); |
c6c33454 | 3499 | u8 compat[sizeof(struct bpf_tunnel_key)]; |
d3aa45ce AS |
3500 | struct ip_tunnel_info *info; |
3501 | ||
22080870 | 3502 | if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | |
77a5196a | 3503 | BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) |
d3aa45ce | 3504 | return -EINVAL; |
c6c33454 DB |
3505 | if (unlikely(size != sizeof(struct bpf_tunnel_key))) { |
3506 | switch (size) { | |
4018ab18 | 3507 | case offsetof(struct bpf_tunnel_key, tunnel_label): |
c0e760c9 | 3508 | case offsetof(struct bpf_tunnel_key, tunnel_ext): |
c6c33454 DB |
3509 | case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): |
3510 | /* Fixup deprecated structure layouts here, so we have | |
3511 | * a common path later on. | |
3512 | */ | |
3513 | memcpy(compat, from, size); | |
3514 | memset(compat + size, 0, sizeof(compat) - size); | |
f3694e00 | 3515 | from = (const struct bpf_tunnel_key *) compat; |
c6c33454 DB |
3516 | break; |
3517 | default: | |
3518 | return -EINVAL; | |
3519 | } | |
3520 | } | |
c0e760c9 DB |
3521 | if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || |
3522 | from->tunnel_ext)) | |
4018ab18 | 3523 | return -EINVAL; |
d3aa45ce AS |
3524 | |
3525 | skb_dst_drop(skb); | |
3526 | dst_hold((struct dst_entry *) md); | |
3527 | skb_dst_set(skb, (struct dst_entry *) md); | |
3528 | ||
3529 | info = &md->u.tun_info; | |
5540fbf4 | 3530 | memset(info, 0, sizeof(*info)); |
d3aa45ce | 3531 | info->mode = IP_TUNNEL_INFO_TX; |
c6c33454 | 3532 | |
db3c6139 | 3533 | info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; |
22080870 DB |
3534 | if (flags & BPF_F_DONT_FRAGMENT) |
3535 | info->key.tun_flags |= TUNNEL_DONT_FRAGMENT; | |
792f3dd6 WT |
3536 | if (flags & BPF_F_ZERO_CSUM_TX) |
3537 | info->key.tun_flags &= ~TUNNEL_CSUM; | |
77a5196a WT |
3538 | if (flags & BPF_F_SEQ_NUMBER) |
3539 | info->key.tun_flags |= TUNNEL_SEQ; | |
22080870 | 3540 | |
d3aa45ce | 3541 | info->key.tun_id = cpu_to_be64(from->tunnel_id); |
c6c33454 DB |
3542 | info->key.tos = from->tunnel_tos; |
3543 | info->key.ttl = from->tunnel_ttl; | |
3544 | ||
3545 | if (flags & BPF_F_TUNINFO_IPV6) { | |
3546 | info->mode |= IP_TUNNEL_INFO_IPV6; | |
3547 | memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, | |
3548 | sizeof(from->remote_ipv6)); | |
4018ab18 DB |
3549 | info->key.label = cpu_to_be32(from->tunnel_label) & |
3550 | IPV6_FLOWLABEL_MASK; | |
c6c33454 DB |
3551 | } else { |
3552 | info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); | |
3553 | } | |
d3aa45ce AS |
3554 | |
3555 | return 0; | |
3556 | } | |
3557 | ||
577c50aa | 3558 | static const struct bpf_func_proto bpf_skb_set_tunnel_key_proto = { |
d3aa45ce AS |
3559 | .func = bpf_skb_set_tunnel_key, |
3560 | .gpl_only = false, | |
3561 | .ret_type = RET_INTEGER, | |
3562 | .arg1_type = ARG_PTR_TO_CTX, | |
39f19ebb AS |
3563 | .arg2_type = ARG_PTR_TO_MEM, |
3564 | .arg3_type = ARG_CONST_SIZE, | |
d3aa45ce AS |
3565 | .arg4_type = ARG_ANYTHING, |
3566 | }; | |
3567 | ||
f3694e00 DB |
3568 | BPF_CALL_3(bpf_skb_set_tunnel_opt, struct sk_buff *, skb, |
3569 | const u8 *, from, u32, size) | |
14ca0751 | 3570 | { |
14ca0751 DB |
3571 | struct ip_tunnel_info *info = skb_tunnel_info(skb); |
3572 | const struct metadata_dst *md = this_cpu_ptr(md_dst); | |
3573 | ||
3574 | if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) | |
3575 | return -EINVAL; | |
fca5fdf6 | 3576 | if (unlikely(size > IP_TUNNEL_OPTS_MAX)) |
14ca0751 DB |
3577 | return -ENOMEM; |
3578 | ||
256c87c1 | 3579 | ip_tunnel_info_opts_set(info, from, size, TUNNEL_OPTIONS_PRESENT); |
14ca0751 DB |
3580 | |
3581 | return 0; | |
3582 | } | |
3583 | ||
3584 | static const struct bpf_func_proto bpf_skb_set_tunnel_opt_proto = { | |
3585 | .func = bpf_skb_set_tunnel_opt, | |
3586 | .gpl_only = false, | |
3587 | .ret_type = RET_INTEGER, | |
3588 | .arg1_type = ARG_PTR_TO_CTX, | |
39f19ebb AS |
3589 | .arg2_type = ARG_PTR_TO_MEM, |
3590 | .arg3_type = ARG_CONST_SIZE, | |
14ca0751 DB |
3591 | }; |
3592 | ||
3593 | static const struct bpf_func_proto * | |
3594 | bpf_get_skb_set_tunnel_proto(enum bpf_func_id which) | |
d3aa45ce AS |
3595 | { |
3596 | if (!md_dst) { | |
d66f2b91 JK |
3597 | struct metadata_dst __percpu *tmp; |
3598 | ||
3599 | tmp = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX, | |
3600 | METADATA_IP_TUNNEL, | |
3601 | GFP_KERNEL); | |
3602 | if (!tmp) | |
d3aa45ce | 3603 | return NULL; |
d66f2b91 JK |
3604 | if (cmpxchg(&md_dst, NULL, tmp)) |
3605 | metadata_dst_free_percpu(tmp); | |
d3aa45ce | 3606 | } |
14ca0751 DB |
3607 | |
3608 | switch (which) { | |
3609 | case BPF_FUNC_skb_set_tunnel_key: | |
3610 | return &bpf_skb_set_tunnel_key_proto; | |
3611 | case BPF_FUNC_skb_set_tunnel_opt: | |
3612 | return &bpf_skb_set_tunnel_opt_proto; | |
3613 | default: | |
3614 | return NULL; | |
3615 | } | |
d3aa45ce AS |
3616 | } |
3617 | ||
f3694e00 DB |
3618 | BPF_CALL_3(bpf_skb_under_cgroup, struct sk_buff *, skb, struct bpf_map *, map, |
3619 | u32, idx) | |
4a482f34 | 3620 | { |
4a482f34 MKL |
3621 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
3622 | struct cgroup *cgrp; | |
3623 | struct sock *sk; | |
4a482f34 | 3624 | |
2d48c5f9 | 3625 | sk = skb_to_full_sk(skb); |
4a482f34 MKL |
3626 | if (!sk || !sk_fullsock(sk)) |
3627 | return -ENOENT; | |
f3694e00 | 3628 | if (unlikely(idx >= array->map.max_entries)) |
4a482f34 MKL |
3629 | return -E2BIG; |
3630 | ||
f3694e00 | 3631 | cgrp = READ_ONCE(array->ptrs[idx]); |
4a482f34 MKL |
3632 | if (unlikely(!cgrp)) |
3633 | return -EAGAIN; | |
3634 | ||
54fd9c2d | 3635 | return sk_under_cgroup_hierarchy(sk, cgrp); |
4a482f34 MKL |
3636 | } |
3637 | ||
747ea55e DB |
3638 | static const struct bpf_func_proto bpf_skb_under_cgroup_proto = { |
3639 | .func = bpf_skb_under_cgroup, | |
4a482f34 MKL |
3640 | .gpl_only = false, |
3641 | .ret_type = RET_INTEGER, | |
3642 | .arg1_type = ARG_PTR_TO_CTX, | |
3643 | .arg2_type = ARG_CONST_MAP_PTR, | |
3644 | .arg3_type = ARG_ANYTHING, | |
3645 | }; | |
4a482f34 | 3646 | |
cb20b08e DB |
3647 | #ifdef CONFIG_SOCK_CGROUP_DATA |
3648 | BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) | |
3649 | { | |
3650 | struct sock *sk = skb_to_full_sk(skb); | |
3651 | struct cgroup *cgrp; | |
3652 | ||
3653 | if (!sk || !sk_fullsock(sk)) | |
3654 | return 0; | |
3655 | ||
3656 | cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
3657 | return cgrp->kn->id.id; | |
3658 | } | |
3659 | ||
3660 | static const struct bpf_func_proto bpf_skb_cgroup_id_proto = { | |
3661 | .func = bpf_skb_cgroup_id, | |
3662 | .gpl_only = false, | |
3663 | .ret_type = RET_INTEGER, | |
3664 | .arg1_type = ARG_PTR_TO_CTX, | |
3665 | }; | |
77236281 AI |
3666 | |
3667 | BPF_CALL_2(bpf_skb_ancestor_cgroup_id, const struct sk_buff *, skb, int, | |
3668 | ancestor_level) | |
3669 | { | |
3670 | struct sock *sk = skb_to_full_sk(skb); | |
3671 | struct cgroup *ancestor; | |
3672 | struct cgroup *cgrp; | |
3673 | ||
3674 | if (!sk || !sk_fullsock(sk)) | |
3675 | return 0; | |
3676 | ||
3677 | cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); | |
3678 | ancestor = cgroup_ancestor(cgrp, ancestor_level); | |
3679 | if (!ancestor) | |
3680 | return 0; | |
3681 | ||
3682 | return ancestor->kn->id.id; | |
3683 | } | |
3684 | ||
3685 | static const struct bpf_func_proto bpf_skb_ancestor_cgroup_id_proto = { | |
3686 | .func = bpf_skb_ancestor_cgroup_id, | |
3687 | .gpl_only = false, | |
3688 | .ret_type = RET_INTEGER, | |
3689 | .arg1_type = ARG_PTR_TO_CTX, | |
3690 | .arg2_type = ARG_ANYTHING, | |
3691 | }; | |
cb20b08e DB |
3692 | #endif |
3693 | ||
4de16969 DB |
3694 | static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, |
3695 | unsigned long off, unsigned long len) | |
3696 | { | |
3697 | memcpy(dst_buff, src_buff + off, len); | |
3698 | return 0; | |
3699 | } | |
3700 | ||
f3694e00 DB |
3701 | BPF_CALL_5(bpf_xdp_event_output, struct xdp_buff *, xdp, struct bpf_map *, map, |
3702 | u64, flags, void *, meta, u64, meta_size) | |
4de16969 | 3703 | { |
4de16969 | 3704 | u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; |
4de16969 DB |
3705 | |
3706 | if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) | |
3707 | return -EINVAL; | |
3708 | if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) | |
3709 | return -EFAULT; | |
3710 | ||
9c471370 MKL |
3711 | return bpf_event_output(map, flags, meta, meta_size, xdp->data, |
3712 | xdp_size, bpf_xdp_copy); | |
4de16969 DB |
3713 | } |
3714 | ||
3715 | static const struct bpf_func_proto bpf_xdp_event_output_proto = { | |
3716 | .func = bpf_xdp_event_output, | |
3717 | .gpl_only = true, | |
3718 | .ret_type = RET_INTEGER, | |
3719 | .arg1_type = ARG_PTR_TO_CTX, | |
3720 | .arg2_type = ARG_CONST_MAP_PTR, | |
3721 | .arg3_type = ARG_ANYTHING, | |
39f19ebb | 3722 | .arg4_type = ARG_PTR_TO_MEM, |
1728a4f2 | 3723 | .arg5_type = ARG_CONST_SIZE_OR_ZERO, |
4de16969 DB |
3724 | }; |
3725 | ||
91b8270f CF |
3726 | BPF_CALL_1(bpf_get_socket_cookie, struct sk_buff *, skb) |
3727 | { | |
3728 | return skb->sk ? sock_gen_cookie(skb->sk) : 0; | |
3729 | } | |
3730 | ||
3731 | static const struct bpf_func_proto bpf_get_socket_cookie_proto = { | |
3732 | .func = bpf_get_socket_cookie, | |
3733 | .gpl_only = false, | |
3734 | .ret_type = RET_INTEGER, | |
3735 | .arg1_type = ARG_PTR_TO_CTX, | |
3736 | }; | |
3737 | ||
d692f113 AI |
3738 | BPF_CALL_1(bpf_get_socket_cookie_sock_addr, struct bpf_sock_addr_kern *, ctx) |
3739 | { | |
3740 | return sock_gen_cookie(ctx->sk); | |
3741 | } | |
3742 | ||
3743 | static const struct bpf_func_proto bpf_get_socket_cookie_sock_addr_proto = { | |
3744 | .func = bpf_get_socket_cookie_sock_addr, | |
3745 | .gpl_only = false, | |
3746 | .ret_type = RET_INTEGER, | |
3747 | .arg1_type = ARG_PTR_TO_CTX, | |
3748 | }; | |
3749 | ||
3750 | BPF_CALL_1(bpf_get_socket_cookie_sock_ops, struct bpf_sock_ops_kern *, ctx) | |
3751 | { | |
3752 | return sock_gen_cookie(ctx->sk); | |
3753 | } | |
3754 | ||
3755 | static const struct bpf_func_proto bpf_get_socket_cookie_sock_ops_proto = { | |
3756 | .func = bpf_get_socket_cookie_sock_ops, | |
3757 | .gpl_only = false, | |
3758 | .ret_type = RET_INTEGER, | |
3759 | .arg1_type = ARG_PTR_TO_CTX, | |
3760 | }; | |
3761 | ||
6acc5c29 CF |
3762 | BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) |
3763 | { | |
3764 | struct sock *sk = sk_to_full_sk(skb->sk); | |
3765 | kuid_t kuid; | |
3766 | ||
3767 | if (!sk || !sk_fullsock(sk)) | |
3768 | return overflowuid; | |
3769 | kuid = sock_net_uid(sock_net(sk), sk); | |
3770 | return from_kuid_munged(sock_net(sk)->user_ns, kuid); | |
3771 | } | |
3772 | ||
3773 | static const struct bpf_func_proto bpf_get_socket_uid_proto = { | |
3774 | .func = bpf_get_socket_uid, | |
3775 | .gpl_only = false, | |
3776 | .ret_type = RET_INTEGER, | |
3777 | .arg1_type = ARG_PTR_TO_CTX, | |
3778 | }; | |
3779 | ||
8c4b4c7e LB |
3780 | BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, |
3781 | int, level, int, optname, char *, optval, int, optlen) | |
3782 | { | |
3783 | struct sock *sk = bpf_sock->sk; | |
3784 | int ret = 0; | |
3785 | int val; | |
3786 | ||
3787 | if (!sk_fullsock(sk)) | |
3788 | return -EINVAL; | |
3789 | ||
3790 | if (level == SOL_SOCKET) { | |
3791 | if (optlen != sizeof(int)) | |
3792 | return -EINVAL; | |
3793 | val = *((int *)optval); | |
3794 | ||
3795 | /* Only some socketops are supported */ | |
3796 | switch (optname) { | |
3797 | case SO_RCVBUF: | |
3798 | sk->sk_userlocks |= SOCK_RCVBUF_LOCK; | |
3799 | sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); | |
3800 | break; | |
3801 | case SO_SNDBUF: | |
3802 | sk->sk_userlocks |= SOCK_SNDBUF_LOCK; | |
3803 | sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); | |
3804 | break; | |
3805 | case SO_MAX_PACING_RATE: | |
3806 | sk->sk_max_pacing_rate = val; | |
3807 | sk->sk_pacing_rate = min(sk->sk_pacing_rate, | |
3808 | sk->sk_max_pacing_rate); | |
3809 | break; | |
3810 | case SO_PRIORITY: | |
3811 | sk->sk_priority = val; | |
3812 | break; | |
3813 | case SO_RCVLOWAT: | |
3814 | if (val < 0) | |
3815 | val = INT_MAX; | |
3816 | sk->sk_rcvlowat = val ? : 1; | |
3817 | break; | |
3818 | case SO_MARK: | |
3819 | sk->sk_mark = val; | |
3820 | break; | |
3821 | default: | |
3822 | ret = -EINVAL; | |
3823 | } | |
a5192c52 | 3824 | #ifdef CONFIG_INET |
6f5c39fa NS |
3825 | } else if (level == SOL_IP) { |
3826 | if (optlen != sizeof(int) || sk->sk_family != AF_INET) | |
3827 | return -EINVAL; | |
3828 | ||
3829 | val = *((int *)optval); | |
3830 | /* Only some options are supported */ | |
3831 | switch (optname) { | |
3832 | case IP_TOS: | |
3833 | if (val < -1 || val > 0xff) { | |
3834 | ret = -EINVAL; | |
3835 | } else { | |
3836 | struct inet_sock *inet = inet_sk(sk); | |
3837 | ||
3838 | if (val == -1) | |
3839 | val = 0; | |
3840 | inet->tos = val; | |
3841 | } | |
3842 | break; | |
3843 | default: | |
3844 | ret = -EINVAL; | |
3845 | } | |
6f9bd3d7 LB |
3846 | #if IS_ENABLED(CONFIG_IPV6) |
3847 | } else if (level == SOL_IPV6) { | |
3848 | if (optlen != sizeof(int) || sk->sk_family != AF_INET6) | |
3849 | return -EINVAL; | |
3850 | ||
3851 | val = *((int *)optval); | |
3852 | /* Only some options are supported */ | |
3853 | switch (optname) { | |
3854 | case IPV6_TCLASS: | |
3855 | if (val < -1 || val > 0xff) { | |
3856 | ret = -EINVAL; | |
3857 | } else { | |
3858 | struct ipv6_pinfo *np = inet6_sk(sk); | |
3859 | ||
3860 | if (val == -1) | |
3861 | val = 0; | |
3862 | np->tclass = val; | |
3863 | } | |
3864 | break; | |
3865 | default: | |
3866 | ret = -EINVAL; | |
3867 | } | |
3868 | #endif | |
8c4b4c7e LB |
3869 | } else if (level == SOL_TCP && |
3870 | sk->sk_prot->setsockopt == tcp_setsockopt) { | |
91b5b21c LB |
3871 | if (optname == TCP_CONGESTION) { |
3872 | char name[TCP_CA_NAME_MAX]; | |
ebfa00c5 | 3873 | bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN; |
91b5b21c LB |
3874 | |
3875 | strncpy(name, optval, min_t(long, optlen, | |
3876 | TCP_CA_NAME_MAX-1)); | |
3877 | name[TCP_CA_NAME_MAX-1] = 0; | |
6f9bd3d7 LB |
3878 | ret = tcp_set_congestion_control(sk, name, false, |
3879 | reinit); | |
91b5b21c | 3880 | } else { |
fc747810 LB |
3881 | struct tcp_sock *tp = tcp_sk(sk); |
3882 | ||
3883 | if (optlen != sizeof(int)) | |
3884 | return -EINVAL; | |
3885 | ||
3886 | val = *((int *)optval); | |
3887 | /* Only some options are supported */ | |
3888 | switch (optname) { | |
3889 | case TCP_BPF_IW: | |
3890 | if (val <= 0 || tp->data_segs_out > 0) | |
3891 | ret = -EINVAL; | |
3892 | else | |
3893 | tp->snd_cwnd = val; | |
3894 | break; | |
13bf9641 LB |
3895 | case TCP_BPF_SNDCWND_CLAMP: |
3896 | if (val <= 0) { | |
3897 | ret = -EINVAL; | |
3898 | } else { | |
3899 | tp->snd_cwnd_clamp = val; | |
3900 | tp->snd_ssthresh = val; | |
3901 | } | |
6d3f06a0 | 3902 | break; |
1e215300 NS |
3903 | case TCP_SAVE_SYN: |
3904 | if (val < 0 || val > 1) | |
3905 | ret = -EINVAL; | |
3906 | else | |
3907 | tp->save_syn = val; | |
3908 | break; | |
fc747810 LB |
3909 | default: |
3910 | ret = -EINVAL; | |
3911 | } | |
91b5b21c | 3912 | } |
91b5b21c | 3913 | #endif |
8c4b4c7e LB |
3914 | } else { |
3915 | ret = -EINVAL; | |
3916 | } | |
3917 | return ret; | |
3918 | } | |
3919 | ||
3920 | static const struct bpf_func_proto bpf_setsockopt_proto = { | |
3921 | .func = bpf_setsockopt, | |
cd86d1fd | 3922 | .gpl_only = false, |
8c4b4c7e LB |
3923 | .ret_type = RET_INTEGER, |
3924 | .arg1_type = ARG_PTR_TO_CTX, | |
3925 | .arg2_type = ARG_ANYTHING, | |
3926 | .arg3_type = ARG_ANYTHING, | |
3927 | .arg4_type = ARG_PTR_TO_MEM, | |
3928 | .arg5_type = ARG_CONST_SIZE, | |
3929 | }; | |
3930 | ||
cd86d1fd LB |
3931 | BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, |
3932 | int, level, int, optname, char *, optval, int, optlen) | |
3933 | { | |
3934 | struct sock *sk = bpf_sock->sk; | |
cd86d1fd LB |
3935 | |
3936 | if (!sk_fullsock(sk)) | |
3937 | goto err_clear; | |
cd86d1fd LB |
3938 | #ifdef CONFIG_INET |
3939 | if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { | |
1edb6e03 AR |
3940 | struct inet_connection_sock *icsk; |
3941 | struct tcp_sock *tp; | |
3942 | ||
1e215300 NS |
3943 | switch (optname) { |
3944 | case TCP_CONGESTION: | |
3945 | icsk = inet_csk(sk); | |
cd86d1fd LB |
3946 | |
3947 | if (!icsk->icsk_ca_ops || optlen <= 1) | |
3948 | goto err_clear; | |
3949 | strncpy(optval, icsk->icsk_ca_ops->name, optlen); | |
3950 | optval[optlen - 1] = 0; | |
1e215300 NS |
3951 | break; |
3952 | case TCP_SAVED_SYN: | |
3953 | tp = tcp_sk(sk); | |
3954 | ||
3955 | if (optlen <= 0 || !tp->saved_syn || | |
3956 | optlen > tp->saved_syn[0]) | |
3957 | goto err_clear; | |
3958 | memcpy(optval, tp->saved_syn + 1, optlen); | |
3959 | break; | |
3960 | default: | |
cd86d1fd LB |
3961 | goto err_clear; |
3962 | } | |
6f5c39fa NS |
3963 | } else if (level == SOL_IP) { |
3964 | struct inet_sock *inet = inet_sk(sk); | |
3965 | ||
3966 | if (optlen != sizeof(int) || sk->sk_family != AF_INET) | |
3967 | goto err_clear; | |
3968 | ||
3969 | /* Only some options are supported */ | |
3970 | switch (optname) { | |
3971 | case IP_TOS: | |
3972 | *((int *)optval) = (int)inet->tos; | |
3973 | break; | |
3974 | default: | |
3975 | goto err_clear; | |
3976 | } | |
6f9bd3d7 LB |
3977 | #if IS_ENABLED(CONFIG_IPV6) |
3978 | } else if (level == SOL_IPV6) { | |
3979 | struct ipv6_pinfo *np = inet6_sk(sk); | |
3980 | ||
3981 | if (optlen != sizeof(int) || sk->sk_family != AF_INET6) | |
3982 | goto err_clear; | |
3983 | ||
3984 | /* Only some options are supported */ | |
3985 | switch (optname) { | |
3986 | case IPV6_TCLASS: | |
3987 | *((int *)optval) = (int)np->tclass; | |
3988 | break; | |
3989 | default: | |
3990 | goto err_clear; | |
3991 | } | |
3992 | #endif | |
cd86d1fd LB |
3993 | } else { |
3994 | goto err_clear; | |
3995 | } | |
aa2bc739 | 3996 | return 0; |
cd86d1fd LB |
3997 | #endif |
3998 | err_clear: | |
3999 | memset(optval, 0, optlen); | |
4000 | return -EINVAL; | |
4001 | } | |
4002 | ||
4003 | static const struct bpf_func_proto bpf_getsockopt_proto = { | |
4004 | .func = bpf_getsockopt, | |
4005 | .gpl_only = false, | |
4006 | .ret_type = RET_INTEGER, | |
4007 | .arg1_type = ARG_PTR_TO_CTX, | |
4008 | .arg2_type = ARG_ANYTHING, | |
4009 | .arg3_type = ARG_ANYTHING, | |
4010 | .arg4_type = ARG_PTR_TO_UNINIT_MEM, | |
4011 | .arg5_type = ARG_CONST_SIZE, | |
4012 | }; | |
4013 | ||
b13d8807 LB |
4014 | BPF_CALL_2(bpf_sock_ops_cb_flags_set, struct bpf_sock_ops_kern *, bpf_sock, |
4015 | int, argval) | |
4016 | { | |
4017 | struct sock *sk = bpf_sock->sk; | |
4018 | int val = argval & BPF_SOCK_OPS_ALL_CB_FLAGS; | |
4019 | ||
a7dcdf6e | 4020 | if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) |
b13d8807 LB |
4021 | return -EINVAL; |
4022 | ||
b13d8807 LB |
4023 | if (val) |
4024 | tcp_sk(sk)->bpf_sock_ops_cb_flags = val; | |
4025 | ||
4026 | return argval & (~BPF_SOCK_OPS_ALL_CB_FLAGS); | |
b13d8807 LB |
4027 | } |
4028 | ||
4029 | static const struct bpf_func_proto bpf_sock_ops_cb_flags_set_proto = { | |
4030 | .func = bpf_sock_ops_cb_flags_set, | |
4031 | .gpl_only = false, | |
4032 | .ret_type = RET_INTEGER, | |
4033 | .arg1_type = ARG_PTR_TO_CTX, | |
4034 | .arg2_type = ARG_ANYTHING, | |
4035 | }; | |
4036 | ||
d74bad4e AI |
4037 | const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; |
4038 | EXPORT_SYMBOL_GPL(ipv6_bpf_stub); | |
4039 | ||
4040 | BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, | |
4041 | int, addr_len) | |
4042 | { | |
4043 | #ifdef CONFIG_INET | |
4044 | struct sock *sk = ctx->sk; | |
4045 | int err; | |
4046 | ||
4047 | /* Binding to port can be expensive so it's prohibited in the helper. | |
4048 | * Only binding to IP is supported. | |
4049 | */ | |
4050 | err = -EINVAL; | |
4051 | if (addr->sa_family == AF_INET) { | |
4052 | if (addr_len < sizeof(struct sockaddr_in)) | |
4053 | return err; | |
4054 | if (((struct sockaddr_in *)addr)->sin_port != htons(0)) | |
4055 | return err; | |
4056 | return __inet_bind(sk, addr, addr_len, true, false); | |
4057 | #if IS_ENABLED(CONFIG_IPV6) | |
4058 | } else if (addr->sa_family == AF_INET6) { | |
4059 | if (addr_len < SIN6_LEN_RFC2133) | |
4060 | return err; | |
4061 | if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) | |
4062 | return err; | |
4063 | /* ipv6_bpf_stub cannot be NULL, since it's called from | |
4064 | * bpf_cgroup_inet6_connect hook and ipv6 is already loaded | |
4065 | */ | |
4066 | return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false); | |
4067 | #endif /* CONFIG_IPV6 */ | |
4068 | } | |
4069 | #endif /* CONFIG_INET */ | |
4070 | ||
4071 | return -EAFNOSUPPORT; | |
4072 | } | |
4073 | ||
4074 | static const struct bpf_func_proto bpf_bind_proto = { | |
4075 | .func = bpf_bind, | |
4076 | .gpl_only = false, | |
4077 | .ret_type = RET_INTEGER, | |
4078 | .arg1_type = ARG_PTR_TO_CTX, | |
4079 | .arg2_type = ARG_PTR_TO_MEM, | |
4080 | .arg3_type = ARG_CONST_SIZE, | |
4081 | }; | |
4082 | ||
12bed760 EB |
4083 | #ifdef CONFIG_XFRM |
4084 | BPF_CALL_5(bpf_skb_get_xfrm_state, struct sk_buff *, skb, u32, index, | |
4085 | struct bpf_xfrm_state *, to, u32, size, u64, flags) | |
4086 | { | |
4087 | const struct sec_path *sp = skb_sec_path(skb); | |
4088 | const struct xfrm_state *x; | |
4089 | ||
4090 | if (!sp || unlikely(index >= sp->len || flags)) | |
4091 | goto err_clear; | |
4092 | ||
4093 | x = sp->xvec[index]; | |
4094 | ||
4095 | if (unlikely(size != sizeof(struct bpf_xfrm_state))) | |
4096 | goto err_clear; | |
4097 | ||
4098 | to->reqid = x->props.reqid; | |
4099 | to->spi = x->id.spi; | |
4100 | to->family = x->props.family; | |
1fbc2e0c DB |
4101 | to->ext = 0; |
4102 | ||
12bed760 EB |
4103 | if (to->family == AF_INET6) { |
4104 | memcpy(to->remote_ipv6, x->props.saddr.a6, | |
4105 | sizeof(to->remote_ipv6)); | |
4106 | } else { | |
4107 | to->remote_ipv4 = x->props.saddr.a4; | |
1fbc2e0c | 4108 | memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); |
12bed760 EB |
4109 | } |
4110 | ||
4111 | return 0; | |
4112 | err_clear: | |
4113 | memset(to, 0, size); | |
4114 | return -EINVAL; | |
4115 | } | |
4116 | ||
4117 | static const struct bpf_func_proto bpf_skb_get_xfrm_state_proto = { | |
4118 | .func = bpf_skb_get_xfrm_state, | |
4119 | .gpl_only = false, | |
4120 | .ret_type = RET_INTEGER, | |
4121 | .arg1_type = ARG_PTR_TO_CTX, | |
4122 | .arg2_type = ARG_ANYTHING, | |
4123 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
4124 | .arg4_type = ARG_CONST_SIZE, | |
4125 | .arg5_type = ARG_ANYTHING, | |
4126 | }; | |
4127 | #endif | |
4128 | ||
87f5fc7e DA |
4129 | #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) |
4130 | static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params, | |
4131 | const struct neighbour *neigh, | |
4132 | const struct net_device *dev) | |
4133 | { | |
4134 | memcpy(params->dmac, neigh->ha, ETH_ALEN); | |
4135 | memcpy(params->smac, dev->dev_addr, ETH_ALEN); | |
4136 | params->h_vlan_TCI = 0; | |
4137 | params->h_vlan_proto = 0; | |
4c79579b | 4138 | params->ifindex = dev->ifindex; |
87f5fc7e | 4139 | |
4c79579b | 4140 | return 0; |
87f5fc7e DA |
4141 | } |
4142 | #endif | |
4143 | ||
4144 | #if IS_ENABLED(CONFIG_INET) | |
4145 | static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |
4f74fede | 4146 | u32 flags, bool check_mtu) |
87f5fc7e DA |
4147 | { |
4148 | struct in_device *in_dev; | |
4149 | struct neighbour *neigh; | |
4150 | struct net_device *dev; | |
4151 | struct fib_result res; | |
4152 | struct fib_nh *nh; | |
4153 | struct flowi4 fl4; | |
4154 | int err; | |
4f74fede | 4155 | u32 mtu; |
87f5fc7e DA |
4156 | |
4157 | dev = dev_get_by_index_rcu(net, params->ifindex); | |
4158 | if (unlikely(!dev)) | |
4159 | return -ENODEV; | |
4160 | ||
4161 | /* verify forwarding is enabled on this interface */ | |
4162 | in_dev = __in_dev_get_rcu(dev); | |
4163 | if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) | |
4c79579b | 4164 | return BPF_FIB_LKUP_RET_FWD_DISABLED; |
87f5fc7e DA |
4165 | |
4166 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { | |
4167 | fl4.flowi4_iif = 1; | |
4168 | fl4.flowi4_oif = params->ifindex; | |
4169 | } else { | |
4170 | fl4.flowi4_iif = params->ifindex; | |
4171 | fl4.flowi4_oif = 0; | |
4172 | } | |
4173 | fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; | |
4174 | fl4.flowi4_scope = RT_SCOPE_UNIVERSE; | |
4175 | fl4.flowi4_flags = 0; | |
4176 | ||
4177 | fl4.flowi4_proto = params->l4_protocol; | |
4178 | fl4.daddr = params->ipv4_dst; | |
4179 | fl4.saddr = params->ipv4_src; | |
4180 | fl4.fl4_sport = params->sport; | |
4181 | fl4.fl4_dport = params->dport; | |
4182 | ||
4183 | if (flags & BPF_FIB_LOOKUP_DIRECT) { | |
4184 | u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; | |
4185 | struct fib_table *tb; | |
4186 | ||
4187 | tb = fib_get_table(net, tbid); | |
4188 | if (unlikely(!tb)) | |
4c79579b | 4189 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
87f5fc7e DA |
4190 | |
4191 | err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); | |
4192 | } else { | |
4193 | fl4.flowi4_mark = 0; | |
4194 | fl4.flowi4_secid = 0; | |
4195 | fl4.flowi4_tun_key.tun_id = 0; | |
4196 | fl4.flowi4_uid = sock_net_uid(net, NULL); | |
4197 | ||
4198 | err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); | |
4199 | } | |
4200 | ||
4c79579b DA |
4201 | if (err) { |
4202 | /* map fib lookup errors to RTN_ type */ | |
4203 | if (err == -EINVAL) | |
4204 | return BPF_FIB_LKUP_RET_BLACKHOLE; | |
4205 | if (err == -EHOSTUNREACH) | |
4206 | return BPF_FIB_LKUP_RET_UNREACHABLE; | |
4207 | if (err == -EACCES) | |
4208 | return BPF_FIB_LKUP_RET_PROHIBIT; | |
4209 | ||
4210 | return BPF_FIB_LKUP_RET_NOT_FWDED; | |
4211 | } | |
4212 | ||
4213 | if (res.type != RTN_UNICAST) | |
4214 | return BPF_FIB_LKUP_RET_NOT_FWDED; | |
87f5fc7e DA |
4215 | |
4216 | if (res.fi->fib_nhs > 1) | |
4217 | fib_select_path(net, &res, &fl4, NULL); | |
4218 | ||
4f74fede DA |
4219 | if (check_mtu) { |
4220 | mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst); | |
4221 | if (params->tot_len > mtu) | |
4c79579b | 4222 | return BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4f74fede DA |
4223 | } |
4224 | ||
87f5fc7e DA |
4225 | nh = &res.fi->fib_nh[res.nh_sel]; |
4226 | ||
4227 | /* do not handle lwt encaps right now */ | |
4228 | if (nh->nh_lwtstate) | |
4c79579b | 4229 | return BPF_FIB_LKUP_RET_UNSUPP_LWT; |
87f5fc7e DA |
4230 | |
4231 | dev = nh->nh_dev; | |
87f5fc7e DA |
4232 | if (nh->nh_gw) |
4233 | params->ipv4_dst = nh->nh_gw; | |
4234 | ||
4235 | params->rt_metric = res.fi->fib_priority; | |
4236 | ||
4237 | /* xdp and cls_bpf programs are run in RCU-bh so | |
4238 | * rcu_read_lock_bh is not needed here | |
4239 | */ | |
4240 | neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst); | |
4c79579b DA |
4241 | if (!neigh) |
4242 | return BPF_FIB_LKUP_RET_NO_NEIGH; | |
87f5fc7e | 4243 | |
4c79579b | 4244 | return bpf_fib_set_fwd_params(params, neigh, dev); |
87f5fc7e DA |
4245 | } |
4246 | #endif | |
4247 | ||
4248 | #if IS_ENABLED(CONFIG_IPV6) | |
4249 | static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, | |
4f74fede | 4250 | u32 flags, bool check_mtu) |
87f5fc7e DA |
4251 | { |
4252 | struct in6_addr *src = (struct in6_addr *) params->ipv6_src; | |
4253 | struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; | |
4254 | struct neighbour *neigh; | |
4255 | struct net_device *dev; | |
4256 | struct inet6_dev *idev; | |
4257 | struct fib6_info *f6i; | |
4258 | struct flowi6 fl6; | |
4259 | int strict = 0; | |
4260 | int oif; | |
4f74fede | 4261 | u32 mtu; |
87f5fc7e DA |
4262 | |
4263 | /* link local addresses are never forwarded */ | |
4264 | if (rt6_need_strict(dst) || rt6_need_strict(src)) | |
4c79579b | 4265 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
87f5fc7e DA |
4266 | |
4267 | dev = dev_get_by_index_rcu(net, params->ifindex); | |
4268 | if (unlikely(!dev)) | |
4269 | return -ENODEV; | |
4270 | ||
4271 | idev = __in6_dev_get_safely(dev); | |
4272 | if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) | |
4c79579b | 4273 | return BPF_FIB_LKUP_RET_FWD_DISABLED; |
87f5fc7e DA |
4274 | |
4275 | if (flags & BPF_FIB_LOOKUP_OUTPUT) { | |
4276 | fl6.flowi6_iif = 1; | |
4277 | oif = fl6.flowi6_oif = params->ifindex; | |
4278 | } else { | |
4279 | oif = fl6.flowi6_iif = params->ifindex; | |
4280 | fl6.flowi6_oif = 0; | |
4281 | strict = RT6_LOOKUP_F_HAS_SADDR; | |
4282 | } | |
bd3a08aa | 4283 | fl6.flowlabel = params->flowinfo; |
87f5fc7e DA |
4284 | fl6.flowi6_scope = 0; |
4285 | fl6.flowi6_flags = 0; | |
4286 | fl6.mp_hash = 0; | |
4287 | ||
4288 | fl6.flowi6_proto = params->l4_protocol; | |
4289 | fl6.daddr = *dst; | |
4290 | fl6.saddr = *src; | |
4291 | fl6.fl6_sport = params->sport; | |
4292 | fl6.fl6_dport = params->dport; | |
4293 | ||
4294 | if (flags & BPF_FIB_LOOKUP_DIRECT) { | |
4295 | u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; | |
4296 | struct fib6_table *tb; | |
4297 | ||
4298 | tb = ipv6_stub->fib6_get_table(net, tbid); | |
4299 | if (unlikely(!tb)) | |
4c79579b | 4300 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
87f5fc7e DA |
4301 | |
4302 | f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict); | |
4303 | } else { | |
4304 | fl6.flowi6_mark = 0; | |
4305 | fl6.flowi6_secid = 0; | |
4306 | fl6.flowi6_tun_key.tun_id = 0; | |
4307 | fl6.flowi6_uid = sock_net_uid(net, NULL); | |
4308 | ||
4309 | f6i = ipv6_stub->fib6_lookup(net, oif, &fl6, strict); | |
4310 | } | |
4311 | ||
4312 | if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry)) | |
4c79579b DA |
4313 | return BPF_FIB_LKUP_RET_NOT_FWDED; |
4314 | ||
4315 | if (unlikely(f6i->fib6_flags & RTF_REJECT)) { | |
4316 | switch (f6i->fib6_type) { | |
4317 | case RTN_BLACKHOLE: | |
4318 | return BPF_FIB_LKUP_RET_BLACKHOLE; | |
4319 | case RTN_UNREACHABLE: | |
4320 | return BPF_FIB_LKUP_RET_UNREACHABLE; | |
4321 | case RTN_PROHIBIT: | |
4322 | return BPF_FIB_LKUP_RET_PROHIBIT; | |
4323 | default: | |
4324 | return BPF_FIB_LKUP_RET_NOT_FWDED; | |
4325 | } | |
4326 | } | |
87f5fc7e | 4327 | |
4c79579b DA |
4328 | if (f6i->fib6_type != RTN_UNICAST) |
4329 | return BPF_FIB_LKUP_RET_NOT_FWDED; | |
87f5fc7e DA |
4330 | |
4331 | if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0) | |
4332 | f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6, | |
4333 | fl6.flowi6_oif, NULL, | |
4334 | strict); | |
4335 | ||
4f74fede DA |
4336 | if (check_mtu) { |
4337 | mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src); | |
4338 | if (params->tot_len > mtu) | |
4c79579b | 4339 | return BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4f74fede DA |
4340 | } |
4341 | ||
87f5fc7e | 4342 | if (f6i->fib6_nh.nh_lwtstate) |
4c79579b | 4343 | return BPF_FIB_LKUP_RET_UNSUPP_LWT; |
87f5fc7e DA |
4344 | |
4345 | if (f6i->fib6_flags & RTF_GATEWAY) | |
4346 | *dst = f6i->fib6_nh.nh_gw; | |
4347 | ||
4348 | dev = f6i->fib6_nh.nh_dev; | |
4349 | params->rt_metric = f6i->fib6_metric; | |
4350 | ||
4351 | /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is | |
4352 | * not needed here. Can not use __ipv6_neigh_lookup_noref here | |
4353 | * because we need to get nd_tbl via the stub | |
4354 | */ | |
4355 | neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128, | |
4356 | ndisc_hashfn, dst, dev); | |
4c79579b DA |
4357 | if (!neigh) |
4358 | return BPF_FIB_LKUP_RET_NO_NEIGH; | |
87f5fc7e | 4359 | |
4c79579b | 4360 | return bpf_fib_set_fwd_params(params, neigh, dev); |
87f5fc7e DA |
4361 | } |
4362 | #endif | |
4363 | ||
4364 | BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, | |
4365 | struct bpf_fib_lookup *, params, int, plen, u32, flags) | |
4366 | { | |
4367 | if (plen < sizeof(*params)) | |
4368 | return -EINVAL; | |
4369 | ||
9ce64f19 DA |
4370 | if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) |
4371 | return -EINVAL; | |
4372 | ||
87f5fc7e DA |
4373 | switch (params->family) { |
4374 | #if IS_ENABLED(CONFIG_INET) | |
4375 | case AF_INET: | |
4376 | return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params, | |
4f74fede | 4377 | flags, true); |
87f5fc7e DA |
4378 | #endif |
4379 | #if IS_ENABLED(CONFIG_IPV6) | |
4380 | case AF_INET6: | |
4381 | return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params, | |
4f74fede | 4382 | flags, true); |
87f5fc7e DA |
4383 | #endif |
4384 | } | |
bcece5dc | 4385 | return -EAFNOSUPPORT; |
87f5fc7e DA |
4386 | } |
4387 | ||
4388 | static const struct bpf_func_proto bpf_xdp_fib_lookup_proto = { | |
4389 | .func = bpf_xdp_fib_lookup, | |
4390 | .gpl_only = true, | |
4391 | .ret_type = RET_INTEGER, | |
4392 | .arg1_type = ARG_PTR_TO_CTX, | |
4393 | .arg2_type = ARG_PTR_TO_MEM, | |
4394 | .arg3_type = ARG_CONST_SIZE, | |
4395 | .arg4_type = ARG_ANYTHING, | |
4396 | }; | |
4397 | ||
4398 | BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, | |
4399 | struct bpf_fib_lookup *, params, int, plen, u32, flags) | |
4400 | { | |
4f74fede | 4401 | struct net *net = dev_net(skb->dev); |
4c79579b | 4402 | int rc = -EAFNOSUPPORT; |
4f74fede | 4403 | |
87f5fc7e DA |
4404 | if (plen < sizeof(*params)) |
4405 | return -EINVAL; | |
4406 | ||
9ce64f19 DA |
4407 | if (flags & ~(BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_OUTPUT)) |
4408 | return -EINVAL; | |
4409 | ||
87f5fc7e DA |
4410 | switch (params->family) { |
4411 | #if IS_ENABLED(CONFIG_INET) | |
4412 | case AF_INET: | |
4c79579b | 4413 | rc = bpf_ipv4_fib_lookup(net, params, flags, false); |
4f74fede | 4414 | break; |
87f5fc7e DA |
4415 | #endif |
4416 | #if IS_ENABLED(CONFIG_IPV6) | |
4417 | case AF_INET6: | |
4c79579b | 4418 | rc = bpf_ipv6_fib_lookup(net, params, flags, false); |
4f74fede | 4419 | break; |
87f5fc7e DA |
4420 | #endif |
4421 | } | |
4f74fede | 4422 | |
4c79579b | 4423 | if (!rc) { |
4f74fede DA |
4424 | struct net_device *dev; |
4425 | ||
4c79579b | 4426 | dev = dev_get_by_index_rcu(net, params->ifindex); |
4f74fede | 4427 | if (!is_skb_forwardable(dev, skb)) |
4c79579b | 4428 | rc = BPF_FIB_LKUP_RET_FRAG_NEEDED; |
4f74fede DA |
4429 | } |
4430 | ||
4c79579b | 4431 | return rc; |
87f5fc7e DA |
4432 | } |
4433 | ||
4434 | static const struct bpf_func_proto bpf_skb_fib_lookup_proto = { | |
4435 | .func = bpf_skb_fib_lookup, | |
4436 | .gpl_only = true, | |
4437 | .ret_type = RET_INTEGER, | |
4438 | .arg1_type = ARG_PTR_TO_CTX, | |
4439 | .arg2_type = ARG_PTR_TO_MEM, | |
4440 | .arg3_type = ARG_CONST_SIZE, | |
4441 | .arg4_type = ARG_ANYTHING, | |
4442 | }; | |
4443 | ||
fe94cc29 MX |
4444 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) |
4445 | static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) | |
4446 | { | |
4447 | int err; | |
4448 | struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; | |
4449 | ||
4450 | if (!seg6_validate_srh(srh, len)) | |
4451 | return -EINVAL; | |
4452 | ||
4453 | switch (type) { | |
4454 | case BPF_LWT_ENCAP_SEG6_INLINE: | |
4455 | if (skb->protocol != htons(ETH_P_IPV6)) | |
4456 | return -EBADMSG; | |
4457 | ||
4458 | err = seg6_do_srh_inline(skb, srh); | |
4459 | break; | |
4460 | case BPF_LWT_ENCAP_SEG6: | |
4461 | skb_reset_inner_headers(skb); | |
4462 | skb->encapsulation = 1; | |
4463 | err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6); | |
4464 | break; | |
4465 | default: | |
4466 | return -EINVAL; | |
4467 | } | |
4468 | ||
4469 | bpf_compute_data_pointers(skb); | |
4470 | if (err) | |
4471 | return err; | |
4472 | ||
4473 | ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); | |
4474 | skb_set_transport_header(skb, sizeof(struct ipv6hdr)); | |
4475 | ||
4476 | return seg6_lookup_nexthop(skb, NULL, 0); | |
4477 | } | |
4478 | #endif /* CONFIG_IPV6_SEG6_BPF */ | |
4479 | ||
4480 | BPF_CALL_4(bpf_lwt_push_encap, struct sk_buff *, skb, u32, type, void *, hdr, | |
4481 | u32, len) | |
4482 | { | |
4483 | switch (type) { | |
4484 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) | |
4485 | case BPF_LWT_ENCAP_SEG6: | |
4486 | case BPF_LWT_ENCAP_SEG6_INLINE: | |
4487 | return bpf_push_seg6_encap(skb, type, hdr, len); | |
4488 | #endif | |
4489 | default: | |
4490 | return -EINVAL; | |
4491 | } | |
4492 | } | |
4493 | ||
4494 | static const struct bpf_func_proto bpf_lwt_push_encap_proto = { | |
4495 | .func = bpf_lwt_push_encap, | |
4496 | .gpl_only = false, | |
4497 | .ret_type = RET_INTEGER, | |
4498 | .arg1_type = ARG_PTR_TO_CTX, | |
4499 | .arg2_type = ARG_ANYTHING, | |
4500 | .arg3_type = ARG_PTR_TO_MEM, | |
4501 | .arg4_type = ARG_CONST_SIZE | |
4502 | }; | |
4503 | ||
61d76980 | 4504 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) |
fe94cc29 MX |
4505 | BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, |
4506 | const void *, from, u32, len) | |
4507 | { | |
fe94cc29 MX |
4508 | struct seg6_bpf_srh_state *srh_state = |
4509 | this_cpu_ptr(&seg6_bpf_srh_states); | |
486cdf21 | 4510 | struct ipv6_sr_hdr *srh = srh_state->srh; |
fe94cc29 | 4511 | void *srh_tlvs, *srh_end, *ptr; |
fe94cc29 MX |
4512 | int srhoff = 0; |
4513 | ||
486cdf21 | 4514 | if (srh == NULL) |
fe94cc29 MX |
4515 | return -EINVAL; |
4516 | ||
fe94cc29 MX |
4517 | srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); |
4518 | srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); | |
4519 | ||
4520 | ptr = skb->data + offset; | |
4521 | if (ptr >= srh_tlvs && ptr + len <= srh_end) | |
486cdf21 | 4522 | srh_state->valid = false; |
fe94cc29 MX |
4523 | else if (ptr < (void *)&srh->flags || |
4524 | ptr + len > (void *)&srh->segments) | |
4525 | return -EFAULT; | |
4526 | ||
4527 | if (unlikely(bpf_try_make_writable(skb, offset + len))) | |
4528 | return -EFAULT; | |
486cdf21 MX |
4529 | if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) |
4530 | return -EINVAL; | |
4531 | srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); | |
fe94cc29 MX |
4532 | |
4533 | memcpy(skb->data + offset, from, len); | |
4534 | return 0; | |
fe94cc29 MX |
4535 | } |
4536 | ||
4537 | static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = { | |
4538 | .func = bpf_lwt_seg6_store_bytes, | |
4539 | .gpl_only = false, | |
4540 | .ret_type = RET_INTEGER, | |
4541 | .arg1_type = ARG_PTR_TO_CTX, | |
4542 | .arg2_type = ARG_ANYTHING, | |
4543 | .arg3_type = ARG_PTR_TO_MEM, | |
4544 | .arg4_type = ARG_CONST_SIZE | |
4545 | }; | |
4546 | ||
486cdf21 | 4547 | static void bpf_update_srh_state(struct sk_buff *skb) |
fe94cc29 | 4548 | { |
fe94cc29 MX |
4549 | struct seg6_bpf_srh_state *srh_state = |
4550 | this_cpu_ptr(&seg6_bpf_srh_states); | |
fe94cc29 | 4551 | int srhoff = 0; |
fe94cc29 | 4552 | |
486cdf21 MX |
4553 | if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { |
4554 | srh_state->srh = NULL; | |
4555 | } else { | |
4556 | srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); | |
4557 | srh_state->hdrlen = srh_state->srh->hdrlen << 3; | |
4558 | srh_state->valid = true; | |
fe94cc29 | 4559 | } |
486cdf21 MX |
4560 | } |
4561 | ||
4562 | BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, | |
4563 | u32, action, void *, param, u32, param_len) | |
4564 | { | |
4565 | struct seg6_bpf_srh_state *srh_state = | |
4566 | this_cpu_ptr(&seg6_bpf_srh_states); | |
4567 | int hdroff = 0; | |
4568 | int err; | |
fe94cc29 MX |
4569 | |
4570 | switch (action) { | |
4571 | case SEG6_LOCAL_ACTION_END_X: | |
486cdf21 MX |
4572 | if (!seg6_bpf_has_valid_srh(skb)) |
4573 | return -EBADMSG; | |
fe94cc29 MX |
4574 | if (param_len != sizeof(struct in6_addr)) |
4575 | return -EINVAL; | |
4576 | return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); | |
4577 | case SEG6_LOCAL_ACTION_END_T: | |
486cdf21 MX |
4578 | if (!seg6_bpf_has_valid_srh(skb)) |
4579 | return -EBADMSG; | |
fe94cc29 MX |
4580 | if (param_len != sizeof(int)) |
4581 | return -EINVAL; | |
4582 | return seg6_lookup_nexthop(skb, NULL, *(int *)param); | |
486cdf21 MX |
4583 | case SEG6_LOCAL_ACTION_END_DT6: |
4584 | if (!seg6_bpf_has_valid_srh(skb)) | |
4585 | return -EBADMSG; | |
fe94cc29 MX |
4586 | if (param_len != sizeof(int)) |
4587 | return -EINVAL; | |
486cdf21 MX |
4588 | |
4589 | if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) | |
4590 | return -EBADMSG; | |
4591 | if (!pskb_pull(skb, hdroff)) | |
4592 | return -EBADMSG; | |
4593 | ||
4594 | skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); | |
4595 | skb_reset_network_header(skb); | |
4596 | skb_reset_transport_header(skb); | |
4597 | skb->encapsulation = 0; | |
4598 | ||
4599 | bpf_compute_data_pointers(skb); | |
4600 | bpf_update_srh_state(skb); | |
fe94cc29 MX |
4601 | return seg6_lookup_nexthop(skb, NULL, *(int *)param); |
4602 | case SEG6_LOCAL_ACTION_END_B6: | |
486cdf21 MX |
4603 | if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) |
4604 | return -EBADMSG; | |
fe94cc29 MX |
4605 | err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6_INLINE, |
4606 | param, param_len); | |
4607 | if (!err) | |
486cdf21 MX |
4608 | bpf_update_srh_state(skb); |
4609 | ||
fe94cc29 MX |
4610 | return err; |
4611 | case SEG6_LOCAL_ACTION_END_B6_ENCAP: | |
486cdf21 MX |
4612 | if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) |
4613 | return -EBADMSG; | |
fe94cc29 MX |
4614 | err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, |
4615 | param, param_len); | |
4616 | if (!err) | |
486cdf21 MX |
4617 | bpf_update_srh_state(skb); |
4618 | ||
fe94cc29 MX |
4619 | return err; |
4620 | default: | |
4621 | return -EINVAL; | |
4622 | } | |
fe94cc29 MX |
4623 | } |
4624 | ||
4625 | static const struct bpf_func_proto bpf_lwt_seg6_action_proto = { | |
4626 | .func = bpf_lwt_seg6_action, | |
4627 | .gpl_only = false, | |
4628 | .ret_type = RET_INTEGER, | |
4629 | .arg1_type = ARG_PTR_TO_CTX, | |
4630 | .arg2_type = ARG_ANYTHING, | |
4631 | .arg3_type = ARG_PTR_TO_MEM, | |
4632 | .arg4_type = ARG_CONST_SIZE | |
4633 | }; | |
4634 | ||
4635 | BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, | |
4636 | s32, len) | |
4637 | { | |
fe94cc29 MX |
4638 | struct seg6_bpf_srh_state *srh_state = |
4639 | this_cpu_ptr(&seg6_bpf_srh_states); | |
486cdf21 | 4640 | struct ipv6_sr_hdr *srh = srh_state->srh; |
fe94cc29 | 4641 | void *srh_end, *srh_tlvs, *ptr; |
fe94cc29 MX |
4642 | struct ipv6hdr *hdr; |
4643 | int srhoff = 0; | |
4644 | int ret; | |
4645 | ||
486cdf21 | 4646 | if (unlikely(srh == NULL)) |
fe94cc29 | 4647 | return -EINVAL; |
fe94cc29 MX |
4648 | |
4649 | srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + | |
4650 | ((srh->first_segment + 1) << 4)); | |
4651 | srh_end = (void *)((unsigned char *)srh + sizeof(*srh) + | |
4652 | srh_state->hdrlen); | |
4653 | ptr = skb->data + offset; | |
4654 | ||
4655 | if (unlikely(ptr < srh_tlvs || ptr > srh_end)) | |
4656 | return -EFAULT; | |
4657 | if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end)) | |
4658 | return -EFAULT; | |
4659 | ||
4660 | if (len > 0) { | |
4661 | ret = skb_cow_head(skb, len); | |
4662 | if (unlikely(ret < 0)) | |
4663 | return ret; | |
4664 | ||
4665 | ret = bpf_skb_net_hdr_push(skb, offset, len); | |
4666 | } else { | |
4667 | ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len); | |
4668 | } | |
4669 | ||
4670 | bpf_compute_data_pointers(skb); | |
4671 | if (unlikely(ret < 0)) | |
4672 | return ret; | |
4673 | ||
4674 | hdr = (struct ipv6hdr *)skb->data; | |
4675 | hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); | |
4676 | ||
486cdf21 MX |
4677 | if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) |
4678 | return -EINVAL; | |
4679 | srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); | |
fe94cc29 | 4680 | srh_state->hdrlen += len; |
486cdf21 | 4681 | srh_state->valid = false; |
fe94cc29 | 4682 | return 0; |
fe94cc29 MX |
4683 | } |
4684 | ||
4685 | static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = { | |
4686 | .func = bpf_lwt_seg6_adjust_srh, | |
4687 | .gpl_only = false, | |
4688 | .ret_type = RET_INTEGER, | |
4689 | .arg1_type = ARG_PTR_TO_CTX, | |
4690 | .arg2_type = ARG_ANYTHING, | |
4691 | .arg3_type = ARG_ANYTHING, | |
4692 | }; | |
61d76980 | 4693 | #endif /* CONFIG_IPV6_SEG6_BPF */ |
fe94cc29 | 4694 | |
df3f94a0 AB |
4695 | #ifdef CONFIG_INET |
4696 | static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, | |
4697 | struct sk_buff *skb, u8 family, u8 proto) | |
6acc9b43 | 4698 | { |
6acc9b43 JS |
4699 | bool refcounted = false; |
4700 | struct sock *sk = NULL; | |
67e89ac3 JS |
4701 | int dif = 0; |
4702 | ||
4703 | if (skb->dev) | |
4704 | dif = skb->dev->ifindex; | |
6acc9b43 JS |
4705 | |
4706 | if (family == AF_INET) { | |
4707 | __be32 src4 = tuple->ipv4.saddr; | |
4708 | __be32 dst4 = tuple->ipv4.daddr; | |
4709 | int sdif = inet_sdif(skb); | |
4710 | ||
4711 | if (proto == IPPROTO_TCP) | |
4712 | sk = __inet_lookup(net, &tcp_hashinfo, skb, 0, | |
4713 | src4, tuple->ipv4.sport, | |
4714 | dst4, tuple->ipv4.dport, | |
4715 | dif, sdif, &refcounted); | |
4716 | else | |
4717 | sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, | |
4718 | dst4, tuple->ipv4.dport, | |
4719 | dif, sdif, &udp_table, skb); | |
d71019b5 | 4720 | #if IS_REACHABLE(CONFIG_IPV6) |
6acc9b43 JS |
4721 | } else { |
4722 | struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; | |
4723 | struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; | |
4724 | int sdif = inet6_sdif(skb); | |
4725 | ||
4726 | if (proto == IPPROTO_TCP) | |
4727 | sk = __inet6_lookup(net, &tcp_hashinfo, skb, 0, | |
4728 | src6, tuple->ipv6.sport, | |
4729 | dst6, tuple->ipv6.dport, | |
4730 | dif, sdif, &refcounted); | |
4731 | else | |
4732 | sk = __udp6_lib_lookup(net, src6, tuple->ipv6.sport, | |
4733 | dst6, tuple->ipv6.dport, | |
4734 | dif, sdif, &udp_table, skb); | |
4735 | #endif | |
4736 | } | |
4737 | ||
4738 | if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { | |
4739 | WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); | |
4740 | sk = NULL; | |
4741 | } | |
4742 | return sk; | |
4743 | } | |
4744 | ||
4745 | /* bpf_sk_lookup performs the core lookup for different types of sockets, | |
4746 | * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. | |
4747 | * Returns the socket as an 'unsigned long' to simplify the casting in the | |
4748 | * callers to satisfy BPF_CALL declarations. | |
4749 | */ | |
4750 | static unsigned long | |
4751 | bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, | |
4752 | u8 proto, u64 netns_id, u64 flags) | |
4753 | { | |
4754 | struct net *caller_net; | |
4755 | struct sock *sk = NULL; | |
4756 | u8 family = AF_UNSPEC; | |
4757 | struct net *net; | |
4758 | ||
4759 | family = len == sizeof(tuple->ipv4) ? AF_INET : AF_INET6; | |
4760 | if (unlikely(family == AF_UNSPEC || netns_id > U32_MAX || flags)) | |
4761 | goto out; | |
4762 | ||
4763 | if (skb->dev) | |
4764 | caller_net = dev_net(skb->dev); | |
4765 | else | |
4766 | caller_net = sock_net(skb->sk); | |
4767 | if (netns_id) { | |
4768 | net = get_net_ns_by_id(caller_net, netns_id); | |
4769 | if (unlikely(!net)) | |
4770 | goto out; | |
4771 | sk = sk_lookup(net, tuple, skb, family, proto); | |
4772 | put_net(net); | |
4773 | } else { | |
4774 | net = caller_net; | |
4775 | sk = sk_lookup(net, tuple, skb, family, proto); | |
4776 | } | |
4777 | ||
4778 | if (sk) | |
4779 | sk = sk_to_full_sk(sk); | |
4780 | out: | |
4781 | return (unsigned long) sk; | |
4782 | } | |
4783 | ||
4784 | BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, | |
4785 | struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) | |
4786 | { | |
4787 | return bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); | |
4788 | } | |
4789 | ||
4790 | static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { | |
4791 | .func = bpf_sk_lookup_tcp, | |
4792 | .gpl_only = false, | |
4793 | .pkt_access = true, | |
4794 | .ret_type = RET_PTR_TO_SOCKET_OR_NULL, | |
4795 | .arg1_type = ARG_PTR_TO_CTX, | |
4796 | .arg2_type = ARG_PTR_TO_MEM, | |
4797 | .arg3_type = ARG_CONST_SIZE, | |
4798 | .arg4_type = ARG_ANYTHING, | |
4799 | .arg5_type = ARG_ANYTHING, | |
4800 | }; | |
4801 | ||
4802 | BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, | |
4803 | struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) | |
4804 | { | |
4805 | return bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags); | |
4806 | } | |
4807 | ||
4808 | static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { | |
4809 | .func = bpf_sk_lookup_udp, | |
4810 | .gpl_only = false, | |
4811 | .pkt_access = true, | |
4812 | .ret_type = RET_PTR_TO_SOCKET_OR_NULL, | |
4813 | .arg1_type = ARG_PTR_TO_CTX, | |
4814 | .arg2_type = ARG_PTR_TO_MEM, | |
4815 | .arg3_type = ARG_CONST_SIZE, | |
4816 | .arg4_type = ARG_ANYTHING, | |
4817 | .arg5_type = ARG_ANYTHING, | |
4818 | }; | |
4819 | ||
4820 | BPF_CALL_1(bpf_sk_release, struct sock *, sk) | |
4821 | { | |
4822 | if (!sock_flag(sk, SOCK_RCU_FREE)) | |
4823 | sock_gen_put(sk); | |
4824 | return 0; | |
4825 | } | |
4826 | ||
4827 | static const struct bpf_func_proto bpf_sk_release_proto = { | |
4828 | .func = bpf_sk_release, | |
4829 | .gpl_only = false, | |
4830 | .ret_type = RET_INTEGER, | |
4831 | .arg1_type = ARG_PTR_TO_SOCKET, | |
4832 | }; | |
df3f94a0 | 4833 | #endif /* CONFIG_INET */ |
6acc9b43 | 4834 | |
fe94cc29 MX |
4835 | bool bpf_helper_changes_pkt_data(void *func) |
4836 | { | |
4837 | if (func == bpf_skb_vlan_push || | |
4838 | func == bpf_skb_vlan_pop || | |
4839 | func == bpf_skb_store_bytes || | |
4840 | func == bpf_skb_change_proto || | |
4841 | func == bpf_skb_change_head || | |
0ea488ff | 4842 | func == sk_skb_change_head || |
fe94cc29 | 4843 | func == bpf_skb_change_tail || |
0ea488ff | 4844 | func == sk_skb_change_tail || |
fe94cc29 MX |
4845 | func == bpf_skb_adjust_room || |
4846 | func == bpf_skb_pull_data || | |
0ea488ff | 4847 | func == sk_skb_pull_data || |
fe94cc29 MX |
4848 | func == bpf_clone_redirect || |
4849 | func == bpf_l3_csum_replace || | |
4850 | func == bpf_l4_csum_replace || | |
4851 | func == bpf_xdp_adjust_head || | |
4852 | func == bpf_xdp_adjust_meta || | |
4853 | func == bpf_msg_pull_data || | |
4854 | func == bpf_xdp_adjust_tail || | |
61d76980 | 4855 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) |
fe94cc29 MX |
4856 | func == bpf_lwt_seg6_store_bytes || |
4857 | func == bpf_lwt_seg6_adjust_srh || | |
61d76980 MX |
4858 | func == bpf_lwt_seg6_action || |
4859 | #endif | |
4860 | func == bpf_lwt_push_encap) | |
fe94cc29 MX |
4861 | return true; |
4862 | ||
4863 | return false; | |
4864 | } | |
4865 | ||
d4052c4a | 4866 | static const struct bpf_func_proto * |
2492d3b8 | 4867 | bpf_base_func_proto(enum bpf_func_id func_id) |
89aa0758 AS |
4868 | { |
4869 | switch (func_id) { | |
4870 | case BPF_FUNC_map_lookup_elem: | |
4871 | return &bpf_map_lookup_elem_proto; | |
4872 | case BPF_FUNC_map_update_elem: | |
4873 | return &bpf_map_update_elem_proto; | |
4874 | case BPF_FUNC_map_delete_elem: | |
4875 | return &bpf_map_delete_elem_proto; | |
03e69b50 DB |
4876 | case BPF_FUNC_get_prandom_u32: |
4877 | return &bpf_get_prandom_u32_proto; | |
c04167ce | 4878 | case BPF_FUNC_get_smp_processor_id: |
80b48c44 | 4879 | return &bpf_get_raw_smp_processor_id_proto; |
2d0e30c3 DB |
4880 | case BPF_FUNC_get_numa_node_id: |
4881 | return &bpf_get_numa_node_id_proto; | |
04fd61ab AS |
4882 | case BPF_FUNC_tail_call: |
4883 | return &bpf_tail_call_proto; | |
17ca8cbf DB |
4884 | case BPF_FUNC_ktime_get_ns: |
4885 | return &bpf_ktime_get_ns_proto; | |
0756ea3e | 4886 | case BPF_FUNC_trace_printk: |
1be7f75d AS |
4887 | if (capable(CAP_SYS_ADMIN)) |
4888 | return bpf_get_trace_printk_proto(); | |
2cc0608e | 4889 | /* else: fall through */ |
89aa0758 AS |
4890 | default: |
4891 | return NULL; | |
4892 | } | |
4893 | } | |
4894 | ||
ae2cf1c4 | 4895 | static const struct bpf_func_proto * |
5e43f899 | 4896 | sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
ae2cf1c4 DA |
4897 | { |
4898 | switch (func_id) { | |
4899 | /* inet and inet6 sockets are created in a process | |
4900 | * context so there is always a valid uid/gid | |
4901 | */ | |
4902 | case BPF_FUNC_get_current_uid_gid: | |
4903 | return &bpf_get_current_uid_gid_proto; | |
cd339431 RG |
4904 | case BPF_FUNC_get_local_storage: |
4905 | return &bpf_get_local_storage_proto; | |
ae2cf1c4 DA |
4906 | default: |
4907 | return bpf_base_func_proto(func_id); | |
4908 | } | |
4909 | } | |
4910 | ||
4fbac77d AI |
4911 | static const struct bpf_func_proto * |
4912 | sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
4913 | { | |
4914 | switch (func_id) { | |
4915 | /* inet and inet6 sockets are created in a process | |
4916 | * context so there is always a valid uid/gid | |
4917 | */ | |
4918 | case BPF_FUNC_get_current_uid_gid: | |
4919 | return &bpf_get_current_uid_gid_proto; | |
d74bad4e AI |
4920 | case BPF_FUNC_bind: |
4921 | switch (prog->expected_attach_type) { | |
4922 | case BPF_CGROUP_INET4_CONNECT: | |
4923 | case BPF_CGROUP_INET6_CONNECT: | |
4924 | return &bpf_bind_proto; | |
4925 | default: | |
4926 | return NULL; | |
4927 | } | |
d692f113 AI |
4928 | case BPF_FUNC_get_socket_cookie: |
4929 | return &bpf_get_socket_cookie_sock_addr_proto; | |
cd339431 RG |
4930 | case BPF_FUNC_get_local_storage: |
4931 | return &bpf_get_local_storage_proto; | |
4fbac77d AI |
4932 | default: |
4933 | return bpf_base_func_proto(func_id); | |
4934 | } | |
4935 | } | |
4936 | ||
2492d3b8 | 4937 | static const struct bpf_func_proto * |
5e43f899 | 4938 | sk_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
2492d3b8 DB |
4939 | { |
4940 | switch (func_id) { | |
4941 | case BPF_FUNC_skb_load_bytes: | |
4942 | return &bpf_skb_load_bytes_proto; | |
4e1ec56c DB |
4943 | case BPF_FUNC_skb_load_bytes_relative: |
4944 | return &bpf_skb_load_bytes_relative_proto; | |
91b8270f CF |
4945 | case BPF_FUNC_get_socket_cookie: |
4946 | return &bpf_get_socket_cookie_proto; | |
6acc5c29 CF |
4947 | case BPF_FUNC_get_socket_uid: |
4948 | return &bpf_get_socket_uid_proto; | |
2492d3b8 DB |
4949 | default: |
4950 | return bpf_base_func_proto(func_id); | |
4951 | } | |
4952 | } | |
4953 | ||
cd339431 RG |
4954 | static const struct bpf_func_proto * |
4955 | cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
4956 | { | |
4957 | switch (func_id) { | |
4958 | case BPF_FUNC_get_local_storage: | |
4959 | return &bpf_get_local_storage_proto; | |
4960 | default: | |
4961 | return sk_filter_func_proto(func_id, prog); | |
4962 | } | |
4963 | } | |
4964 | ||
608cd71a | 4965 | static const struct bpf_func_proto * |
5e43f899 | 4966 | tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
608cd71a AS |
4967 | { |
4968 | switch (func_id) { | |
4969 | case BPF_FUNC_skb_store_bytes: | |
4970 | return &bpf_skb_store_bytes_proto; | |
05c74e5e DB |
4971 | case BPF_FUNC_skb_load_bytes: |
4972 | return &bpf_skb_load_bytes_proto; | |
4e1ec56c DB |
4973 | case BPF_FUNC_skb_load_bytes_relative: |
4974 | return &bpf_skb_load_bytes_relative_proto; | |
36bbef52 DB |
4975 | case BPF_FUNC_skb_pull_data: |
4976 | return &bpf_skb_pull_data_proto; | |
7d672345 DB |
4977 | case BPF_FUNC_csum_diff: |
4978 | return &bpf_csum_diff_proto; | |
36bbef52 DB |
4979 | case BPF_FUNC_csum_update: |
4980 | return &bpf_csum_update_proto; | |
91bc4822 AS |
4981 | case BPF_FUNC_l3_csum_replace: |
4982 | return &bpf_l3_csum_replace_proto; | |
4983 | case BPF_FUNC_l4_csum_replace: | |
4984 | return &bpf_l4_csum_replace_proto; | |
3896d655 AS |
4985 | case BPF_FUNC_clone_redirect: |
4986 | return &bpf_clone_redirect_proto; | |
8d20aabe DB |
4987 | case BPF_FUNC_get_cgroup_classid: |
4988 | return &bpf_get_cgroup_classid_proto; | |
4e10df9a AS |
4989 | case BPF_FUNC_skb_vlan_push: |
4990 | return &bpf_skb_vlan_push_proto; | |
4991 | case BPF_FUNC_skb_vlan_pop: | |
4992 | return &bpf_skb_vlan_pop_proto; | |
6578171a DB |
4993 | case BPF_FUNC_skb_change_proto: |
4994 | return &bpf_skb_change_proto_proto; | |
d2485c42 DB |
4995 | case BPF_FUNC_skb_change_type: |
4996 | return &bpf_skb_change_type_proto; | |
2be7e212 DB |
4997 | case BPF_FUNC_skb_adjust_room: |
4998 | return &bpf_skb_adjust_room_proto; | |
5293efe6 DB |
4999 | case BPF_FUNC_skb_change_tail: |
5000 | return &bpf_skb_change_tail_proto; | |
d3aa45ce AS |
5001 | case BPF_FUNC_skb_get_tunnel_key: |
5002 | return &bpf_skb_get_tunnel_key_proto; | |
5003 | case BPF_FUNC_skb_set_tunnel_key: | |
14ca0751 DB |
5004 | return bpf_get_skb_set_tunnel_proto(func_id); |
5005 | case BPF_FUNC_skb_get_tunnel_opt: | |
5006 | return &bpf_skb_get_tunnel_opt_proto; | |
5007 | case BPF_FUNC_skb_set_tunnel_opt: | |
5008 | return bpf_get_skb_set_tunnel_proto(func_id); | |
27b29f63 AS |
5009 | case BPF_FUNC_redirect: |
5010 | return &bpf_redirect_proto; | |
c46646d0 DB |
5011 | case BPF_FUNC_get_route_realm: |
5012 | return &bpf_get_route_realm_proto; | |
13c5c240 DB |
5013 | case BPF_FUNC_get_hash_recalc: |
5014 | return &bpf_get_hash_recalc_proto; | |
7a4b28c6 DB |
5015 | case BPF_FUNC_set_hash_invalid: |
5016 | return &bpf_set_hash_invalid_proto; | |
ded092cd DB |
5017 | case BPF_FUNC_set_hash: |
5018 | return &bpf_set_hash_proto; | |
bd570ff9 | 5019 | case BPF_FUNC_perf_event_output: |
555c8a86 | 5020 | return &bpf_skb_event_output_proto; |
80b48c44 DB |
5021 | case BPF_FUNC_get_smp_processor_id: |
5022 | return &bpf_get_smp_processor_id_proto; | |
747ea55e DB |
5023 | case BPF_FUNC_skb_under_cgroup: |
5024 | return &bpf_skb_under_cgroup_proto; | |
91b8270f CF |
5025 | case BPF_FUNC_get_socket_cookie: |
5026 | return &bpf_get_socket_cookie_proto; | |
6acc5c29 CF |
5027 | case BPF_FUNC_get_socket_uid: |
5028 | return &bpf_get_socket_uid_proto; | |
cb20b08e DB |
5029 | case BPF_FUNC_fib_lookup: |
5030 | return &bpf_skb_fib_lookup_proto; | |
12bed760 EB |
5031 | #ifdef CONFIG_XFRM |
5032 | case BPF_FUNC_skb_get_xfrm_state: | |
5033 | return &bpf_skb_get_xfrm_state_proto; | |
5034 | #endif | |
cb20b08e DB |
5035 | #ifdef CONFIG_SOCK_CGROUP_DATA |
5036 | case BPF_FUNC_skb_cgroup_id: | |
5037 | return &bpf_skb_cgroup_id_proto; | |
77236281 AI |
5038 | case BPF_FUNC_skb_ancestor_cgroup_id: |
5039 | return &bpf_skb_ancestor_cgroup_id_proto; | |
cb20b08e | 5040 | #endif |
df3f94a0 | 5041 | #ifdef CONFIG_INET |
6acc9b43 JS |
5042 | case BPF_FUNC_sk_lookup_tcp: |
5043 | return &bpf_sk_lookup_tcp_proto; | |
5044 | case BPF_FUNC_sk_lookup_udp: | |
5045 | return &bpf_sk_lookup_udp_proto; | |
5046 | case BPF_FUNC_sk_release: | |
5047 | return &bpf_sk_release_proto; | |
df3f94a0 | 5048 | #endif |
608cd71a | 5049 | default: |
2492d3b8 | 5050 | return bpf_base_func_proto(func_id); |
608cd71a AS |
5051 | } |
5052 | } | |
5053 | ||
6a773a15 | 5054 | static const struct bpf_func_proto * |
5e43f899 | 5055 | xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
6a773a15 | 5056 | { |
4de16969 DB |
5057 | switch (func_id) { |
5058 | case BPF_FUNC_perf_event_output: | |
5059 | return &bpf_xdp_event_output_proto; | |
669dc4d7 DB |
5060 | case BPF_FUNC_get_smp_processor_id: |
5061 | return &bpf_get_smp_processor_id_proto; | |
205c3807 DB |
5062 | case BPF_FUNC_csum_diff: |
5063 | return &bpf_csum_diff_proto; | |
17bedab2 MKL |
5064 | case BPF_FUNC_xdp_adjust_head: |
5065 | return &bpf_xdp_adjust_head_proto; | |
de8f3a83 DB |
5066 | case BPF_FUNC_xdp_adjust_meta: |
5067 | return &bpf_xdp_adjust_meta_proto; | |
814abfab JF |
5068 | case BPF_FUNC_redirect: |
5069 | return &bpf_xdp_redirect_proto; | |
97f91a7c | 5070 | case BPF_FUNC_redirect_map: |
e4a8e817 | 5071 | return &bpf_xdp_redirect_map_proto; |
b32cc5b9 NS |
5072 | case BPF_FUNC_xdp_adjust_tail: |
5073 | return &bpf_xdp_adjust_tail_proto; | |
87f5fc7e DA |
5074 | case BPF_FUNC_fib_lookup: |
5075 | return &bpf_xdp_fib_lookup_proto; | |
4de16969 | 5076 | default: |
2492d3b8 | 5077 | return bpf_base_func_proto(func_id); |
4de16969 | 5078 | } |
6a773a15 BB |
5079 | } |
5080 | ||
604326b4 DB |
5081 | const struct bpf_func_proto bpf_sock_map_update_proto __weak; |
5082 | const struct bpf_func_proto bpf_sock_hash_update_proto __weak; | |
5083 | ||
8c4b4c7e | 5084 | static const struct bpf_func_proto * |
5e43f899 | 5085 | sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
8c4b4c7e LB |
5086 | { |
5087 | switch (func_id) { | |
5088 | case BPF_FUNC_setsockopt: | |
5089 | return &bpf_setsockopt_proto; | |
cd86d1fd LB |
5090 | case BPF_FUNC_getsockopt: |
5091 | return &bpf_getsockopt_proto; | |
b13d8807 LB |
5092 | case BPF_FUNC_sock_ops_cb_flags_set: |
5093 | return &bpf_sock_ops_cb_flags_set_proto; | |
174a79ff JF |
5094 | case BPF_FUNC_sock_map_update: |
5095 | return &bpf_sock_map_update_proto; | |
81110384 JF |
5096 | case BPF_FUNC_sock_hash_update: |
5097 | return &bpf_sock_hash_update_proto; | |
d692f113 AI |
5098 | case BPF_FUNC_get_socket_cookie: |
5099 | return &bpf_get_socket_cookie_sock_ops_proto; | |
cd339431 RG |
5100 | case BPF_FUNC_get_local_storage: |
5101 | return &bpf_get_local_storage_proto; | |
8c4b4c7e LB |
5102 | default: |
5103 | return bpf_base_func_proto(func_id); | |
5104 | } | |
5105 | } | |
5106 | ||
604326b4 DB |
5107 | const struct bpf_func_proto bpf_msg_redirect_map_proto __weak; |
5108 | const struct bpf_func_proto bpf_msg_redirect_hash_proto __weak; | |
5109 | ||
5e43f899 AI |
5110 | static const struct bpf_func_proto * |
5111 | sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
4f738adb JF |
5112 | { |
5113 | switch (func_id) { | |
5114 | case BPF_FUNC_msg_redirect_map: | |
5115 | return &bpf_msg_redirect_map_proto; | |
81110384 JF |
5116 | case BPF_FUNC_msg_redirect_hash: |
5117 | return &bpf_msg_redirect_hash_proto; | |
2a100317 JF |
5118 | case BPF_FUNC_msg_apply_bytes: |
5119 | return &bpf_msg_apply_bytes_proto; | |
91843d54 JF |
5120 | case BPF_FUNC_msg_cork_bytes: |
5121 | return &bpf_msg_cork_bytes_proto; | |
015632bb JF |
5122 | case BPF_FUNC_msg_pull_data: |
5123 | return &bpf_msg_pull_data_proto; | |
cd339431 RG |
5124 | case BPF_FUNC_get_local_storage: |
5125 | return &bpf_get_local_storage_proto; | |
4f738adb JF |
5126 | default: |
5127 | return bpf_base_func_proto(func_id); | |
5128 | } | |
5129 | } | |
5130 | ||
604326b4 DB |
5131 | const struct bpf_func_proto bpf_sk_redirect_map_proto __weak; |
5132 | const struct bpf_func_proto bpf_sk_redirect_hash_proto __weak; | |
5133 | ||
5e43f899 AI |
5134 | static const struct bpf_func_proto * |
5135 | sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
b005fd18 JF |
5136 | { |
5137 | switch (func_id) { | |
8a31db56 JF |
5138 | case BPF_FUNC_skb_store_bytes: |
5139 | return &bpf_skb_store_bytes_proto; | |
b005fd18 JF |
5140 | case BPF_FUNC_skb_load_bytes: |
5141 | return &bpf_skb_load_bytes_proto; | |
8a31db56 | 5142 | case BPF_FUNC_skb_pull_data: |
0ea488ff | 5143 | return &sk_skb_pull_data_proto; |
8a31db56 | 5144 | case BPF_FUNC_skb_change_tail: |
0ea488ff | 5145 | return &sk_skb_change_tail_proto; |
8a31db56 | 5146 | case BPF_FUNC_skb_change_head: |
0ea488ff | 5147 | return &sk_skb_change_head_proto; |
b005fd18 JF |
5148 | case BPF_FUNC_get_socket_cookie: |
5149 | return &bpf_get_socket_cookie_proto; | |
5150 | case BPF_FUNC_get_socket_uid: | |
5151 | return &bpf_get_socket_uid_proto; | |
174a79ff JF |
5152 | case BPF_FUNC_sk_redirect_map: |
5153 | return &bpf_sk_redirect_map_proto; | |
81110384 JF |
5154 | case BPF_FUNC_sk_redirect_hash: |
5155 | return &bpf_sk_redirect_hash_proto; | |
cd339431 RG |
5156 | case BPF_FUNC_get_local_storage: |
5157 | return &bpf_get_local_storage_proto; | |
df3f94a0 | 5158 | #ifdef CONFIG_INET |
6acc9b43 JS |
5159 | case BPF_FUNC_sk_lookup_tcp: |
5160 | return &bpf_sk_lookup_tcp_proto; | |
5161 | case BPF_FUNC_sk_lookup_udp: | |
5162 | return &bpf_sk_lookup_udp_proto; | |
5163 | case BPF_FUNC_sk_release: | |
5164 | return &bpf_sk_release_proto; | |
df3f94a0 | 5165 | #endif |
b005fd18 JF |
5166 | default: |
5167 | return bpf_base_func_proto(func_id); | |
5168 | } | |
5169 | } | |
5170 | ||
d58e468b PP |
5171 | static const struct bpf_func_proto * |
5172 | flow_dissector_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
5173 | { | |
5174 | switch (func_id) { | |
5175 | case BPF_FUNC_skb_load_bytes: | |
5176 | return &bpf_skb_load_bytes_proto; | |
5177 | default: | |
5178 | return bpf_base_func_proto(func_id); | |
5179 | } | |
5180 | } | |
5181 | ||
cd3092c7 MX |
5182 | static const struct bpf_func_proto * |
5183 | lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
5184 | { | |
5185 | switch (func_id) { | |
5186 | case BPF_FUNC_skb_load_bytes: | |
5187 | return &bpf_skb_load_bytes_proto; | |
5188 | case BPF_FUNC_skb_pull_data: | |
5189 | return &bpf_skb_pull_data_proto; | |
5190 | case BPF_FUNC_csum_diff: | |
5191 | return &bpf_csum_diff_proto; | |
5192 | case BPF_FUNC_get_cgroup_classid: | |
5193 | return &bpf_get_cgroup_classid_proto; | |
5194 | case BPF_FUNC_get_route_realm: | |
5195 | return &bpf_get_route_realm_proto; | |
5196 | case BPF_FUNC_get_hash_recalc: | |
5197 | return &bpf_get_hash_recalc_proto; | |
5198 | case BPF_FUNC_perf_event_output: | |
5199 | return &bpf_skb_event_output_proto; | |
5200 | case BPF_FUNC_get_smp_processor_id: | |
5201 | return &bpf_get_smp_processor_id_proto; | |
5202 | case BPF_FUNC_skb_under_cgroup: | |
5203 | return &bpf_skb_under_cgroup_proto; | |
5204 | default: | |
5205 | return bpf_base_func_proto(func_id); | |
5206 | } | |
5207 | } | |
5208 | ||
5209 | static const struct bpf_func_proto * | |
5210 | lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
5211 | { | |
5212 | switch (func_id) { | |
5213 | case BPF_FUNC_lwt_push_encap: | |
5214 | return &bpf_lwt_push_encap_proto; | |
5215 | default: | |
5216 | return lwt_out_func_proto(func_id, prog); | |
5217 | } | |
5218 | } | |
5219 | ||
3a0af8fd | 5220 | static const struct bpf_func_proto * |
5e43f899 | 5221 | lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
3a0af8fd TG |
5222 | { |
5223 | switch (func_id) { | |
5224 | case BPF_FUNC_skb_get_tunnel_key: | |
5225 | return &bpf_skb_get_tunnel_key_proto; | |
5226 | case BPF_FUNC_skb_set_tunnel_key: | |
5227 | return bpf_get_skb_set_tunnel_proto(func_id); | |
5228 | case BPF_FUNC_skb_get_tunnel_opt: | |
5229 | return &bpf_skb_get_tunnel_opt_proto; | |
5230 | case BPF_FUNC_skb_set_tunnel_opt: | |
5231 | return bpf_get_skb_set_tunnel_proto(func_id); | |
5232 | case BPF_FUNC_redirect: | |
5233 | return &bpf_redirect_proto; | |
5234 | case BPF_FUNC_clone_redirect: | |
5235 | return &bpf_clone_redirect_proto; | |
5236 | case BPF_FUNC_skb_change_tail: | |
5237 | return &bpf_skb_change_tail_proto; | |
5238 | case BPF_FUNC_skb_change_head: | |
5239 | return &bpf_skb_change_head_proto; | |
5240 | case BPF_FUNC_skb_store_bytes: | |
5241 | return &bpf_skb_store_bytes_proto; | |
5242 | case BPF_FUNC_csum_update: | |
5243 | return &bpf_csum_update_proto; | |
5244 | case BPF_FUNC_l3_csum_replace: | |
5245 | return &bpf_l3_csum_replace_proto; | |
5246 | case BPF_FUNC_l4_csum_replace: | |
5247 | return &bpf_l4_csum_replace_proto; | |
5248 | case BPF_FUNC_set_hash_invalid: | |
5249 | return &bpf_set_hash_invalid_proto; | |
5250 | default: | |
cd3092c7 | 5251 | return lwt_out_func_proto(func_id, prog); |
3a0af8fd TG |
5252 | } |
5253 | } | |
5254 | ||
004d4b27 MX |
5255 | static const struct bpf_func_proto * |
5256 | lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) | |
5257 | { | |
5258 | switch (func_id) { | |
61d76980 | 5259 | #if IS_ENABLED(CONFIG_IPV6_SEG6_BPF) |
004d4b27 MX |
5260 | case BPF_FUNC_lwt_seg6_store_bytes: |
5261 | return &bpf_lwt_seg6_store_bytes_proto; | |
5262 | case BPF_FUNC_lwt_seg6_action: | |
5263 | return &bpf_lwt_seg6_action_proto; | |
5264 | case BPF_FUNC_lwt_seg6_adjust_srh: | |
5265 | return &bpf_lwt_seg6_adjust_srh_proto; | |
61d76980 | 5266 | #endif |
004d4b27 MX |
5267 | default: |
5268 | return lwt_out_func_proto(func_id, prog); | |
3a0af8fd TG |
5269 | } |
5270 | } | |
5271 | ||
f96da094 | 5272 | static bool bpf_skb_is_valid_access(int off, int size, enum bpf_access_type type, |
5e43f899 | 5273 | const struct bpf_prog *prog, |
f96da094 | 5274 | struct bpf_insn_access_aux *info) |
23994631 | 5275 | { |
f96da094 | 5276 | const int size_default = sizeof(__u32); |
23994631 | 5277 | |
9bac3d6d AS |
5278 | if (off < 0 || off >= sizeof(struct __sk_buff)) |
5279 | return false; | |
62c7989b | 5280 | |
4936e352 | 5281 | /* The verifier guarantees that size > 0. */ |
9bac3d6d AS |
5282 | if (off % size != 0) |
5283 | return false; | |
62c7989b DB |
5284 | |
5285 | switch (off) { | |
f96da094 DB |
5286 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): |
5287 | if (off + size > offsetofend(struct __sk_buff, cb[4])) | |
62c7989b DB |
5288 | return false; |
5289 | break; | |
8a31db56 JF |
5290 | case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): |
5291 | case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): | |
5292 | case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): | |
5293 | case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): | |
f96da094 | 5294 | case bpf_ctx_range(struct __sk_buff, data): |
de8f3a83 | 5295 | case bpf_ctx_range(struct __sk_buff, data_meta): |
f96da094 DB |
5296 | case bpf_ctx_range(struct __sk_buff, data_end): |
5297 | if (size != size_default) | |
23994631 | 5298 | return false; |
31fd8581 | 5299 | break; |
d58e468b PP |
5300 | case bpf_ctx_range(struct __sk_buff, flow_keys): |
5301 | if (size != sizeof(struct bpf_flow_keys *)) | |
5302 | return false; | |
5303 | break; | |
31fd8581 | 5304 | default: |
f96da094 | 5305 | /* Only narrow read access allowed for now. */ |
31fd8581 | 5306 | if (type == BPF_WRITE) { |
f96da094 | 5307 | if (size != size_default) |
31fd8581 YS |
5308 | return false; |
5309 | } else { | |
f96da094 DB |
5310 | bpf_ctx_record_field_size(info, size_default); |
5311 | if (!bpf_ctx_narrow_access_ok(off, size, size_default)) | |
23994631 | 5312 | return false; |
31fd8581 | 5313 | } |
62c7989b | 5314 | } |
9bac3d6d AS |
5315 | |
5316 | return true; | |
5317 | } | |
5318 | ||
d691f9e8 | 5319 | static bool sk_filter_is_valid_access(int off, int size, |
19de99f7 | 5320 | enum bpf_access_type type, |
5e43f899 | 5321 | const struct bpf_prog *prog, |
23994631 | 5322 | struct bpf_insn_access_aux *info) |
d691f9e8 | 5323 | { |
db58ba45 | 5324 | switch (off) { |
f96da094 DB |
5325 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
5326 | case bpf_ctx_range(struct __sk_buff, data): | |
de8f3a83 | 5327 | case bpf_ctx_range(struct __sk_buff, data_meta): |
f96da094 | 5328 | case bpf_ctx_range(struct __sk_buff, data_end): |
d58e468b | 5329 | case bpf_ctx_range(struct __sk_buff, flow_keys): |
8a31db56 | 5330 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
045efa82 | 5331 | return false; |
db58ba45 | 5332 | } |
045efa82 | 5333 | |
d691f9e8 AS |
5334 | if (type == BPF_WRITE) { |
5335 | switch (off) { | |
f96da094 | 5336 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): |
d691f9e8 AS |
5337 | break; |
5338 | default: | |
5339 | return false; | |
5340 | } | |
5341 | } | |
5342 | ||
5e43f899 | 5343 | return bpf_skb_is_valid_access(off, size, type, prog, info); |
d691f9e8 AS |
5344 | } |
5345 | ||
3a0af8fd TG |
5346 | static bool lwt_is_valid_access(int off, int size, |
5347 | enum bpf_access_type type, | |
5e43f899 | 5348 | const struct bpf_prog *prog, |
23994631 | 5349 | struct bpf_insn_access_aux *info) |
3a0af8fd TG |
5350 | { |
5351 | switch (off) { | |
f96da094 | 5352 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
8a31db56 | 5353 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
de8f3a83 | 5354 | case bpf_ctx_range(struct __sk_buff, data_meta): |
d58e468b | 5355 | case bpf_ctx_range(struct __sk_buff, flow_keys): |
3a0af8fd TG |
5356 | return false; |
5357 | } | |
5358 | ||
5359 | if (type == BPF_WRITE) { | |
5360 | switch (off) { | |
f96da094 DB |
5361 | case bpf_ctx_range(struct __sk_buff, mark): |
5362 | case bpf_ctx_range(struct __sk_buff, priority): | |
5363 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): | |
3a0af8fd TG |
5364 | break; |
5365 | default: | |
5366 | return false; | |
5367 | } | |
5368 | } | |
5369 | ||
f96da094 DB |
5370 | switch (off) { |
5371 | case bpf_ctx_range(struct __sk_buff, data): | |
5372 | info->reg_type = PTR_TO_PACKET; | |
5373 | break; | |
5374 | case bpf_ctx_range(struct __sk_buff, data_end): | |
5375 | info->reg_type = PTR_TO_PACKET_END; | |
5376 | break; | |
5377 | } | |
5378 | ||
5e43f899 | 5379 | return bpf_skb_is_valid_access(off, size, type, prog, info); |
3a0af8fd TG |
5380 | } |
5381 | ||
aac3fc32 AI |
5382 | /* Attach type specific accesses */ |
5383 | static bool __sock_filter_check_attach_type(int off, | |
5384 | enum bpf_access_type access_type, | |
5385 | enum bpf_attach_type attach_type) | |
61023658 | 5386 | { |
aac3fc32 AI |
5387 | switch (off) { |
5388 | case offsetof(struct bpf_sock, bound_dev_if): | |
5389 | case offsetof(struct bpf_sock, mark): | |
5390 | case offsetof(struct bpf_sock, priority): | |
5391 | switch (attach_type) { | |
5392 | case BPF_CGROUP_INET_SOCK_CREATE: | |
5393 | goto full_access; | |
5394 | default: | |
5395 | return false; | |
5396 | } | |
5397 | case bpf_ctx_range(struct bpf_sock, src_ip4): | |
5398 | switch (attach_type) { | |
5399 | case BPF_CGROUP_INET4_POST_BIND: | |
5400 | goto read_only; | |
5401 | default: | |
5402 | return false; | |
5403 | } | |
5404 | case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): | |
5405 | switch (attach_type) { | |
5406 | case BPF_CGROUP_INET6_POST_BIND: | |
5407 | goto read_only; | |
5408 | default: | |
5409 | return false; | |
5410 | } | |
5411 | case bpf_ctx_range(struct bpf_sock, src_port): | |
5412 | switch (attach_type) { | |
5413 | case BPF_CGROUP_INET4_POST_BIND: | |
5414 | case BPF_CGROUP_INET6_POST_BIND: | |
5415 | goto read_only; | |
61023658 DA |
5416 | default: |
5417 | return false; | |
5418 | } | |
5419 | } | |
aac3fc32 AI |
5420 | read_only: |
5421 | return access_type == BPF_READ; | |
5422 | full_access: | |
5423 | return true; | |
5424 | } | |
5425 | ||
5426 | static bool __sock_filter_check_size(int off, int size, | |
5427 | struct bpf_insn_access_aux *info) | |
5428 | { | |
5429 | const int size_default = sizeof(__u32); | |
61023658 | 5430 | |
aac3fc32 AI |
5431 | switch (off) { |
5432 | case bpf_ctx_range(struct bpf_sock, src_ip4): | |
5433 | case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): | |
5434 | bpf_ctx_record_field_size(info, size_default); | |
5435 | return bpf_ctx_narrow_access_ok(off, size, size_default); | |
5436 | } | |
5437 | ||
5438 | return size == size_default; | |
5439 | } | |
5440 | ||
c64b7983 JS |
5441 | bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
5442 | struct bpf_insn_access_aux *info) | |
aac3fc32 AI |
5443 | { |
5444 | if (off < 0 || off >= sizeof(struct bpf_sock)) | |
61023658 | 5445 | return false; |
61023658 DA |
5446 | if (off % size != 0) |
5447 | return false; | |
aac3fc32 | 5448 | if (!__sock_filter_check_size(off, size, info)) |
61023658 | 5449 | return false; |
61023658 DA |
5450 | return true; |
5451 | } | |
5452 | ||
c64b7983 JS |
5453 | static bool sock_filter_is_valid_access(int off, int size, |
5454 | enum bpf_access_type type, | |
5455 | const struct bpf_prog *prog, | |
5456 | struct bpf_insn_access_aux *info) | |
5457 | { | |
5458 | if (!bpf_sock_is_valid_access(off, size, type, info)) | |
5459 | return false; | |
5460 | return __sock_filter_check_attach_type(off, type, | |
5461 | prog->expected_attach_type); | |
5462 | } | |
5463 | ||
047b0ecd DB |
5464 | static int bpf_unclone_prologue(struct bpf_insn *insn_buf, bool direct_write, |
5465 | const struct bpf_prog *prog, int drop_verdict) | |
36bbef52 DB |
5466 | { |
5467 | struct bpf_insn *insn = insn_buf; | |
5468 | ||
5469 | if (!direct_write) | |
5470 | return 0; | |
5471 | ||
5472 | /* if (!skb->cloned) | |
5473 | * goto start; | |
5474 | * | |
5475 | * (Fast-path, otherwise approximation that we might be | |
5476 | * a clone, do the rest in helper.) | |
5477 | */ | |
5478 | *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); | |
5479 | *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); | |
5480 | *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); | |
5481 | ||
5482 | /* ret = bpf_skb_pull_data(skb, 0); */ | |
5483 | *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); | |
5484 | *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); | |
5485 | *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | |
5486 | BPF_FUNC_skb_pull_data); | |
5487 | /* if (!ret) | |
5488 | * goto restore; | |
5489 | * return TC_ACT_SHOT; | |
5490 | */ | |
5491 | *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); | |
047b0ecd | 5492 | *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); |
36bbef52 DB |
5493 | *insn++ = BPF_EXIT_INSN(); |
5494 | ||
5495 | /* restore: */ | |
5496 | *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); | |
5497 | /* start: */ | |
5498 | *insn++ = prog->insnsi[0]; | |
5499 | ||
5500 | return insn - insn_buf; | |
5501 | } | |
5502 | ||
e0cea7ce DB |
5503 | static int bpf_gen_ld_abs(const struct bpf_insn *orig, |
5504 | struct bpf_insn *insn_buf) | |
5505 | { | |
5506 | bool indirect = BPF_MODE(orig->code) == BPF_IND; | |
5507 | struct bpf_insn *insn = insn_buf; | |
5508 | ||
5509 | /* We're guaranteed here that CTX is in R6. */ | |
5510 | *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); | |
5511 | if (!indirect) { | |
5512 | *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); | |
5513 | } else { | |
5514 | *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); | |
5515 | if (orig->imm) | |
5516 | *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); | |
5517 | } | |
5518 | ||
5519 | switch (BPF_SIZE(orig->code)) { | |
5520 | case BPF_B: | |
5521 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_8_no_cache); | |
5522 | break; | |
5523 | case BPF_H: | |
5524 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_16_no_cache); | |
5525 | break; | |
5526 | case BPF_W: | |
5527 | *insn++ = BPF_EMIT_CALL(bpf_skb_load_helper_32_no_cache); | |
5528 | break; | |
5529 | } | |
5530 | ||
5531 | *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); | |
5532 | *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); | |
5533 | *insn++ = BPF_EXIT_INSN(); | |
5534 | ||
5535 | return insn - insn_buf; | |
5536 | } | |
5537 | ||
047b0ecd DB |
5538 | static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, |
5539 | const struct bpf_prog *prog) | |
5540 | { | |
5541 | return bpf_unclone_prologue(insn_buf, direct_write, prog, TC_ACT_SHOT); | |
5542 | } | |
5543 | ||
d691f9e8 | 5544 | static bool tc_cls_act_is_valid_access(int off, int size, |
19de99f7 | 5545 | enum bpf_access_type type, |
5e43f899 | 5546 | const struct bpf_prog *prog, |
23994631 | 5547 | struct bpf_insn_access_aux *info) |
d691f9e8 AS |
5548 | { |
5549 | if (type == BPF_WRITE) { | |
5550 | switch (off) { | |
f96da094 DB |
5551 | case bpf_ctx_range(struct __sk_buff, mark): |
5552 | case bpf_ctx_range(struct __sk_buff, tc_index): | |
5553 | case bpf_ctx_range(struct __sk_buff, priority): | |
5554 | case bpf_ctx_range(struct __sk_buff, tc_classid): | |
5555 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): | |
d691f9e8 AS |
5556 | break; |
5557 | default: | |
5558 | return false; | |
5559 | } | |
5560 | } | |
19de99f7 | 5561 | |
f96da094 DB |
5562 | switch (off) { |
5563 | case bpf_ctx_range(struct __sk_buff, data): | |
5564 | info->reg_type = PTR_TO_PACKET; | |
5565 | break; | |
de8f3a83 DB |
5566 | case bpf_ctx_range(struct __sk_buff, data_meta): |
5567 | info->reg_type = PTR_TO_PACKET_META; | |
5568 | break; | |
f96da094 DB |
5569 | case bpf_ctx_range(struct __sk_buff, data_end): |
5570 | info->reg_type = PTR_TO_PACKET_END; | |
5571 | break; | |
d58e468b | 5572 | case bpf_ctx_range(struct __sk_buff, flow_keys): |
8a31db56 JF |
5573 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): |
5574 | return false; | |
f96da094 DB |
5575 | } |
5576 | ||
5e43f899 | 5577 | return bpf_skb_is_valid_access(off, size, type, prog, info); |
d691f9e8 AS |
5578 | } |
5579 | ||
1afaf661 | 5580 | static bool __is_valid_xdp_access(int off, int size) |
6a773a15 BB |
5581 | { |
5582 | if (off < 0 || off >= sizeof(struct xdp_md)) | |
5583 | return false; | |
5584 | if (off % size != 0) | |
5585 | return false; | |
6088b582 | 5586 | if (size != sizeof(__u32)) |
6a773a15 BB |
5587 | return false; |
5588 | ||
5589 | return true; | |
5590 | } | |
5591 | ||
5592 | static bool xdp_is_valid_access(int off, int size, | |
5593 | enum bpf_access_type type, | |
5e43f899 | 5594 | const struct bpf_prog *prog, |
23994631 | 5595 | struct bpf_insn_access_aux *info) |
6a773a15 | 5596 | { |
0d830032 JK |
5597 | if (type == BPF_WRITE) { |
5598 | if (bpf_prog_is_dev_bound(prog->aux)) { | |
5599 | switch (off) { | |
5600 | case offsetof(struct xdp_md, rx_queue_index): | |
5601 | return __is_valid_xdp_access(off, size); | |
5602 | } | |
5603 | } | |
6a773a15 | 5604 | return false; |
0d830032 | 5605 | } |
6a773a15 BB |
5606 | |
5607 | switch (off) { | |
5608 | case offsetof(struct xdp_md, data): | |
23994631 | 5609 | info->reg_type = PTR_TO_PACKET; |
6a773a15 | 5610 | break; |
de8f3a83 DB |
5611 | case offsetof(struct xdp_md, data_meta): |
5612 | info->reg_type = PTR_TO_PACKET_META; | |
5613 | break; | |
6a773a15 | 5614 | case offsetof(struct xdp_md, data_end): |
23994631 | 5615 | info->reg_type = PTR_TO_PACKET_END; |
6a773a15 BB |
5616 | break; |
5617 | } | |
5618 | ||
1afaf661 | 5619 | return __is_valid_xdp_access(off, size); |
6a773a15 BB |
5620 | } |
5621 | ||
5622 | void bpf_warn_invalid_xdp_action(u32 act) | |
5623 | { | |
9beb8bed DB |
5624 | const u32 act_max = XDP_REDIRECT; |
5625 | ||
5626 | WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n", | |
5627 | act > act_max ? "Illegal" : "Driver unsupported", | |
5628 | act); | |
6a773a15 BB |
5629 | } |
5630 | EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action); | |
5631 | ||
4fbac77d AI |
5632 | static bool sock_addr_is_valid_access(int off, int size, |
5633 | enum bpf_access_type type, | |
5634 | const struct bpf_prog *prog, | |
5635 | struct bpf_insn_access_aux *info) | |
5636 | { | |
5637 | const int size_default = sizeof(__u32); | |
5638 | ||
5639 | if (off < 0 || off >= sizeof(struct bpf_sock_addr)) | |
5640 | return false; | |
5641 | if (off % size != 0) | |
5642 | return false; | |
5643 | ||
5644 | /* Disallow access to IPv6 fields from IPv4 contex and vise | |
5645 | * versa. | |
5646 | */ | |
5647 | switch (off) { | |
5648 | case bpf_ctx_range(struct bpf_sock_addr, user_ip4): | |
5649 | switch (prog->expected_attach_type) { | |
5650 | case BPF_CGROUP_INET4_BIND: | |
d74bad4e | 5651 | case BPF_CGROUP_INET4_CONNECT: |
1cedee13 | 5652 | case BPF_CGROUP_UDP4_SENDMSG: |
4fbac77d AI |
5653 | break; |
5654 | default: | |
5655 | return false; | |
5656 | } | |
5657 | break; | |
5658 | case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): | |
5659 | switch (prog->expected_attach_type) { | |
5660 | case BPF_CGROUP_INET6_BIND: | |
d74bad4e | 5661 | case BPF_CGROUP_INET6_CONNECT: |
1cedee13 AI |
5662 | case BPF_CGROUP_UDP6_SENDMSG: |
5663 | break; | |
5664 | default: | |
5665 | return false; | |
5666 | } | |
5667 | break; | |
5668 | case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): | |
5669 | switch (prog->expected_attach_type) { | |
5670 | case BPF_CGROUP_UDP4_SENDMSG: | |
5671 | break; | |
5672 | default: | |
5673 | return false; | |
5674 | } | |
5675 | break; | |
5676 | case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], | |
5677 | msg_src_ip6[3]): | |
5678 | switch (prog->expected_attach_type) { | |
5679 | case BPF_CGROUP_UDP6_SENDMSG: | |
4fbac77d AI |
5680 | break; |
5681 | default: | |
5682 | return false; | |
5683 | } | |
5684 | break; | |
5685 | } | |
5686 | ||
5687 | switch (off) { | |
5688 | case bpf_ctx_range(struct bpf_sock_addr, user_ip4): | |
5689 | case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): | |
1cedee13 AI |
5690 | case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): |
5691 | case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], | |
5692 | msg_src_ip6[3]): | |
4fbac77d AI |
5693 | /* Only narrow read access allowed for now. */ |
5694 | if (type == BPF_READ) { | |
5695 | bpf_ctx_record_field_size(info, size_default); | |
5696 | if (!bpf_ctx_narrow_access_ok(off, size, size_default)) | |
5697 | return false; | |
5698 | } else { | |
5699 | if (size != size_default) | |
5700 | return false; | |
5701 | } | |
5702 | break; | |
5703 | case bpf_ctx_range(struct bpf_sock_addr, user_port): | |
5704 | if (size != size_default) | |
5705 | return false; | |
5706 | break; | |
5707 | default: | |
5708 | if (type == BPF_READ) { | |
5709 | if (size != size_default) | |
5710 | return false; | |
5711 | } else { | |
5712 | return false; | |
5713 | } | |
5714 | } | |
5715 | ||
5716 | return true; | |
5717 | } | |
5718 | ||
44f0e430 LB |
5719 | static bool sock_ops_is_valid_access(int off, int size, |
5720 | enum bpf_access_type type, | |
5e43f899 | 5721 | const struct bpf_prog *prog, |
44f0e430 | 5722 | struct bpf_insn_access_aux *info) |
40304b2a | 5723 | { |
44f0e430 LB |
5724 | const int size_default = sizeof(__u32); |
5725 | ||
40304b2a LB |
5726 | if (off < 0 || off >= sizeof(struct bpf_sock_ops)) |
5727 | return false; | |
44f0e430 | 5728 | |
40304b2a LB |
5729 | /* The verifier guarantees that size > 0. */ |
5730 | if (off % size != 0) | |
5731 | return false; | |
40304b2a | 5732 | |
40304b2a LB |
5733 | if (type == BPF_WRITE) { |
5734 | switch (off) { | |
2585cd62 | 5735 | case offsetof(struct bpf_sock_ops, reply): |
6f9bd3d7 | 5736 | case offsetof(struct bpf_sock_ops, sk_txhash): |
44f0e430 LB |
5737 | if (size != size_default) |
5738 | return false; | |
40304b2a LB |
5739 | break; |
5740 | default: | |
5741 | return false; | |
5742 | } | |
44f0e430 LB |
5743 | } else { |
5744 | switch (off) { | |
5745 | case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, | |
5746 | bytes_acked): | |
5747 | if (size != sizeof(__u64)) | |
5748 | return false; | |
5749 | break; | |
5750 | default: | |
5751 | if (size != size_default) | |
5752 | return false; | |
5753 | break; | |
5754 | } | |
40304b2a LB |
5755 | } |
5756 | ||
44f0e430 | 5757 | return true; |
40304b2a LB |
5758 | } |
5759 | ||
8a31db56 JF |
5760 | static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, |
5761 | const struct bpf_prog *prog) | |
5762 | { | |
047b0ecd | 5763 | return bpf_unclone_prologue(insn_buf, direct_write, prog, SK_DROP); |
8a31db56 JF |
5764 | } |
5765 | ||
b005fd18 JF |
5766 | static bool sk_skb_is_valid_access(int off, int size, |
5767 | enum bpf_access_type type, | |
5e43f899 | 5768 | const struct bpf_prog *prog, |
b005fd18 JF |
5769 | struct bpf_insn_access_aux *info) |
5770 | { | |
de8f3a83 DB |
5771 | switch (off) { |
5772 | case bpf_ctx_range(struct __sk_buff, tc_classid): | |
5773 | case bpf_ctx_range(struct __sk_buff, data_meta): | |
d58e468b | 5774 | case bpf_ctx_range(struct __sk_buff, flow_keys): |
de8f3a83 DB |
5775 | return false; |
5776 | } | |
5777 | ||
8a31db56 JF |
5778 | if (type == BPF_WRITE) { |
5779 | switch (off) { | |
8a31db56 JF |
5780 | case bpf_ctx_range(struct __sk_buff, tc_index): |
5781 | case bpf_ctx_range(struct __sk_buff, priority): | |
5782 | break; | |
5783 | default: | |
5784 | return false; | |
5785 | } | |
5786 | } | |
5787 | ||
b005fd18 | 5788 | switch (off) { |
f7e9cb1e | 5789 | case bpf_ctx_range(struct __sk_buff, mark): |
8a31db56 | 5790 | return false; |
b005fd18 JF |
5791 | case bpf_ctx_range(struct __sk_buff, data): |
5792 | info->reg_type = PTR_TO_PACKET; | |
5793 | break; | |
5794 | case bpf_ctx_range(struct __sk_buff, data_end): | |
5795 | info->reg_type = PTR_TO_PACKET_END; | |
5796 | break; | |
5797 | } | |
5798 | ||
5e43f899 | 5799 | return bpf_skb_is_valid_access(off, size, type, prog, info); |
b005fd18 JF |
5800 | } |
5801 | ||
4f738adb JF |
5802 | static bool sk_msg_is_valid_access(int off, int size, |
5803 | enum bpf_access_type type, | |
5e43f899 | 5804 | const struct bpf_prog *prog, |
4f738adb JF |
5805 | struct bpf_insn_access_aux *info) |
5806 | { | |
5807 | if (type == BPF_WRITE) | |
5808 | return false; | |
5809 | ||
5810 | switch (off) { | |
5811 | case offsetof(struct sk_msg_md, data): | |
5812 | info->reg_type = PTR_TO_PACKET; | |
303def35 JF |
5813 | if (size != sizeof(__u64)) |
5814 | return false; | |
4f738adb JF |
5815 | break; |
5816 | case offsetof(struct sk_msg_md, data_end): | |
5817 | info->reg_type = PTR_TO_PACKET_END; | |
303def35 JF |
5818 | if (size != sizeof(__u64)) |
5819 | return false; | |
4f738adb | 5820 | break; |
303def35 JF |
5821 | default: |
5822 | if (size != sizeof(__u32)) | |
5823 | return false; | |
4f738adb JF |
5824 | } |
5825 | ||
5826 | if (off < 0 || off >= sizeof(struct sk_msg_md)) | |
5827 | return false; | |
5828 | if (off % size != 0) | |
5829 | return false; | |
4f738adb JF |
5830 | |
5831 | return true; | |
5832 | } | |
5833 | ||
d58e468b PP |
5834 | static bool flow_dissector_is_valid_access(int off, int size, |
5835 | enum bpf_access_type type, | |
5836 | const struct bpf_prog *prog, | |
5837 | struct bpf_insn_access_aux *info) | |
5838 | { | |
5839 | if (type == BPF_WRITE) { | |
5840 | switch (off) { | |
5841 | case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): | |
5842 | break; | |
5843 | default: | |
5844 | return false; | |
5845 | } | |
5846 | } | |
5847 | ||
5848 | switch (off) { | |
5849 | case bpf_ctx_range(struct __sk_buff, data): | |
5850 | info->reg_type = PTR_TO_PACKET; | |
5851 | break; | |
5852 | case bpf_ctx_range(struct __sk_buff, data_end): | |
5853 | info->reg_type = PTR_TO_PACKET_END; | |
5854 | break; | |
5855 | case bpf_ctx_range(struct __sk_buff, flow_keys): | |
5856 | info->reg_type = PTR_TO_FLOW_KEYS; | |
5857 | break; | |
5858 | case bpf_ctx_range(struct __sk_buff, tc_classid): | |
5859 | case bpf_ctx_range(struct __sk_buff, data_meta): | |
5860 | case bpf_ctx_range_till(struct __sk_buff, family, local_port): | |
5861 | return false; | |
5862 | } | |
5863 | ||
5864 | return bpf_skb_is_valid_access(off, size, type, prog, info); | |
5865 | } | |
5866 | ||
2492d3b8 DB |
5867 | static u32 bpf_convert_ctx_access(enum bpf_access_type type, |
5868 | const struct bpf_insn *si, | |
5869 | struct bpf_insn *insn_buf, | |
f96da094 | 5870 | struct bpf_prog *prog, u32 *target_size) |
9bac3d6d AS |
5871 | { |
5872 | struct bpf_insn *insn = insn_buf; | |
6b8cc1d1 | 5873 | int off; |
9bac3d6d | 5874 | |
6b8cc1d1 | 5875 | switch (si->off) { |
9bac3d6d | 5876 | case offsetof(struct __sk_buff, len): |
6b8cc1d1 | 5877 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5878 | bpf_target_off(struct sk_buff, len, 4, |
5879 | target_size)); | |
9bac3d6d AS |
5880 | break; |
5881 | ||
0b8c707d | 5882 | case offsetof(struct __sk_buff, protocol): |
6b8cc1d1 | 5883 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, |
f96da094 DB |
5884 | bpf_target_off(struct sk_buff, protocol, 2, |
5885 | target_size)); | |
0b8c707d DB |
5886 | break; |
5887 | ||
27cd5452 | 5888 | case offsetof(struct __sk_buff, vlan_proto): |
6b8cc1d1 | 5889 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, |
f96da094 DB |
5890 | bpf_target_off(struct sk_buff, vlan_proto, 2, |
5891 | target_size)); | |
27cd5452 MS |
5892 | break; |
5893 | ||
bcad5718 | 5894 | case offsetof(struct __sk_buff, priority): |
754f1e6a | 5895 | if (type == BPF_WRITE) |
6b8cc1d1 | 5896 | *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5897 | bpf_target_off(struct sk_buff, priority, 4, |
5898 | target_size)); | |
754f1e6a | 5899 | else |
6b8cc1d1 | 5900 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5901 | bpf_target_off(struct sk_buff, priority, 4, |
5902 | target_size)); | |
bcad5718 DB |
5903 | break; |
5904 | ||
37e82c2f | 5905 | case offsetof(struct __sk_buff, ingress_ifindex): |
6b8cc1d1 | 5906 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5907 | bpf_target_off(struct sk_buff, skb_iif, 4, |
5908 | target_size)); | |
37e82c2f AS |
5909 | break; |
5910 | ||
5911 | case offsetof(struct __sk_buff, ifindex): | |
f035a515 | 5912 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
6b8cc1d1 | 5913 | si->dst_reg, si->src_reg, |
37e82c2f | 5914 | offsetof(struct sk_buff, dev)); |
6b8cc1d1 DB |
5915 | *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); |
5916 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
f96da094 DB |
5917 | bpf_target_off(struct net_device, ifindex, 4, |
5918 | target_size)); | |
37e82c2f AS |
5919 | break; |
5920 | ||
ba7591d8 | 5921 | case offsetof(struct __sk_buff, hash): |
6b8cc1d1 | 5922 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5923 | bpf_target_off(struct sk_buff, hash, 4, |
5924 | target_size)); | |
ba7591d8 DB |
5925 | break; |
5926 | ||
9bac3d6d | 5927 | case offsetof(struct __sk_buff, mark): |
d691f9e8 | 5928 | if (type == BPF_WRITE) |
6b8cc1d1 | 5929 | *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5930 | bpf_target_off(struct sk_buff, mark, 4, |
5931 | target_size)); | |
d691f9e8 | 5932 | else |
6b8cc1d1 | 5933 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
5934 | bpf_target_off(struct sk_buff, mark, 4, |
5935 | target_size)); | |
d691f9e8 | 5936 | break; |
9bac3d6d AS |
5937 | |
5938 | case offsetof(struct __sk_buff, pkt_type): | |
f96da094 DB |
5939 | *target_size = 1; |
5940 | *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, | |
5941 | PKT_TYPE_OFFSET()); | |
5942 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); | |
5943 | #ifdef __BIG_ENDIAN_BITFIELD | |
5944 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); | |
5945 | #endif | |
5946 | break; | |
9bac3d6d AS |
5947 | |
5948 | case offsetof(struct __sk_buff, queue_mapping): | |
f96da094 DB |
5949 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, |
5950 | bpf_target_off(struct sk_buff, queue_mapping, 2, | |
5951 | target_size)); | |
5952 | break; | |
c2497395 | 5953 | |
c2497395 | 5954 | case offsetof(struct __sk_buff, vlan_present): |
c2497395 | 5955 | case offsetof(struct __sk_buff, vlan_tci): |
f96da094 DB |
5956 | BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); |
5957 | ||
5958 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, | |
5959 | bpf_target_off(struct sk_buff, vlan_tci, 2, | |
5960 | target_size)); | |
5961 | if (si->off == offsetof(struct __sk_buff, vlan_tci)) { | |
5962 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, | |
5963 | ~VLAN_TAG_PRESENT); | |
5964 | } else { | |
5965 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 12); | |
5966 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); | |
5967 | } | |
5968 | break; | |
d691f9e8 AS |
5969 | |
5970 | case offsetof(struct __sk_buff, cb[0]) ... | |
f96da094 | 5971 | offsetofend(struct __sk_buff, cb[4]) - 1: |
d691f9e8 | 5972 | BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); |
62c7989b DB |
5973 | BUILD_BUG_ON((offsetof(struct sk_buff, cb) + |
5974 | offsetof(struct qdisc_skb_cb, data)) % | |
5975 | sizeof(__u64)); | |
d691f9e8 | 5976 | |
ff936a04 | 5977 | prog->cb_access = 1; |
6b8cc1d1 DB |
5978 | off = si->off; |
5979 | off -= offsetof(struct __sk_buff, cb[0]); | |
5980 | off += offsetof(struct sk_buff, cb); | |
5981 | off += offsetof(struct qdisc_skb_cb, data); | |
d691f9e8 | 5982 | if (type == BPF_WRITE) |
62c7989b | 5983 | *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, |
6b8cc1d1 | 5984 | si->src_reg, off); |
d691f9e8 | 5985 | else |
62c7989b | 5986 | *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, |
6b8cc1d1 | 5987 | si->src_reg, off); |
d691f9e8 AS |
5988 | break; |
5989 | ||
045efa82 | 5990 | case offsetof(struct __sk_buff, tc_classid): |
6b8cc1d1 DB |
5991 | BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); |
5992 | ||
5993 | off = si->off; | |
5994 | off -= offsetof(struct __sk_buff, tc_classid); | |
5995 | off += offsetof(struct sk_buff, cb); | |
5996 | off += offsetof(struct qdisc_skb_cb, tc_classid); | |
f96da094 | 5997 | *target_size = 2; |
09c37a2c | 5998 | if (type == BPF_WRITE) |
6b8cc1d1 DB |
5999 | *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, |
6000 | si->src_reg, off); | |
09c37a2c | 6001 | else |
6b8cc1d1 DB |
6002 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, |
6003 | si->src_reg, off); | |
045efa82 DB |
6004 | break; |
6005 | ||
db58ba45 | 6006 | case offsetof(struct __sk_buff, data): |
f035a515 | 6007 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), |
6b8cc1d1 | 6008 | si->dst_reg, si->src_reg, |
db58ba45 AS |
6009 | offsetof(struct sk_buff, data)); |
6010 | break; | |
6011 | ||
de8f3a83 DB |
6012 | case offsetof(struct __sk_buff, data_meta): |
6013 | off = si->off; | |
6014 | off -= offsetof(struct __sk_buff, data_meta); | |
6015 | off += offsetof(struct sk_buff, cb); | |
6016 | off += offsetof(struct bpf_skb_data_end, data_meta); | |
6017 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, | |
6018 | si->src_reg, off); | |
6019 | break; | |
6020 | ||
db58ba45 | 6021 | case offsetof(struct __sk_buff, data_end): |
6b8cc1d1 DB |
6022 | off = si->off; |
6023 | off -= offsetof(struct __sk_buff, data_end); | |
6024 | off += offsetof(struct sk_buff, cb); | |
6025 | off += offsetof(struct bpf_skb_data_end, data_end); | |
6026 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, | |
6027 | si->src_reg, off); | |
db58ba45 AS |
6028 | break; |
6029 | ||
d691f9e8 AS |
6030 | case offsetof(struct __sk_buff, tc_index): |
6031 | #ifdef CONFIG_NET_SCHED | |
d691f9e8 | 6032 | if (type == BPF_WRITE) |
6b8cc1d1 | 6033 | *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, |
f96da094 DB |
6034 | bpf_target_off(struct sk_buff, tc_index, 2, |
6035 | target_size)); | |
d691f9e8 | 6036 | else |
6b8cc1d1 | 6037 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, |
f96da094 DB |
6038 | bpf_target_off(struct sk_buff, tc_index, 2, |
6039 | target_size)); | |
d691f9e8 | 6040 | #else |
2ed46ce4 | 6041 | *target_size = 2; |
d691f9e8 | 6042 | if (type == BPF_WRITE) |
6b8cc1d1 | 6043 | *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); |
d691f9e8 | 6044 | else |
6b8cc1d1 | 6045 | *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); |
b1d9fc41 DB |
6046 | #endif |
6047 | break; | |
6048 | ||
6049 | case offsetof(struct __sk_buff, napi_id): | |
6050 | #if defined(CONFIG_NET_RX_BUSY_POLL) | |
b1d9fc41 | 6051 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
f96da094 DB |
6052 | bpf_target_off(struct sk_buff, napi_id, 4, |
6053 | target_size)); | |
b1d9fc41 DB |
6054 | *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); |
6055 | *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); | |
6056 | #else | |
2ed46ce4 | 6057 | *target_size = 4; |
b1d9fc41 | 6058 | *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); |
d691f9e8 | 6059 | #endif |
6b8cc1d1 | 6060 | break; |
8a31db56 JF |
6061 | case offsetof(struct __sk_buff, family): |
6062 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); | |
6063 | ||
6064 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6065 | si->dst_reg, si->src_reg, | |
6066 | offsetof(struct sk_buff, sk)); | |
6067 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, | |
6068 | bpf_target_off(struct sock_common, | |
6069 | skc_family, | |
6070 | 2, target_size)); | |
6071 | break; | |
6072 | case offsetof(struct __sk_buff, remote_ip4): | |
6073 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); | |
6074 | ||
6075 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6076 | si->dst_reg, si->src_reg, | |
6077 | offsetof(struct sk_buff, sk)); | |
6078 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6079 | bpf_target_off(struct sock_common, | |
6080 | skc_daddr, | |
6081 | 4, target_size)); | |
6082 | break; | |
6083 | case offsetof(struct __sk_buff, local_ip4): | |
6084 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6085 | skc_rcv_saddr) != 4); | |
6086 | ||
6087 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6088 | si->dst_reg, si->src_reg, | |
6089 | offsetof(struct sk_buff, sk)); | |
6090 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6091 | bpf_target_off(struct sock_common, | |
6092 | skc_rcv_saddr, | |
6093 | 4, target_size)); | |
6094 | break; | |
6095 | case offsetof(struct __sk_buff, remote_ip6[0]) ... | |
6096 | offsetof(struct __sk_buff, remote_ip6[3]): | |
6097 | #if IS_ENABLED(CONFIG_IPV6) | |
6098 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6099 | skc_v6_daddr.s6_addr32[0]) != 4); | |
6100 | ||
6101 | off = si->off; | |
6102 | off -= offsetof(struct __sk_buff, remote_ip6[0]); | |
6103 | ||
6104 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6105 | si->dst_reg, si->src_reg, | |
6106 | offsetof(struct sk_buff, sk)); | |
6107 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6108 | offsetof(struct sock_common, | |
6109 | skc_v6_daddr.s6_addr32[0]) + | |
6110 | off); | |
6111 | #else | |
6112 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6113 | #endif | |
6114 | break; | |
6115 | case offsetof(struct __sk_buff, local_ip6[0]) ... | |
6116 | offsetof(struct __sk_buff, local_ip6[3]): | |
6117 | #if IS_ENABLED(CONFIG_IPV6) | |
6118 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6119 | skc_v6_rcv_saddr.s6_addr32[0]) != 4); | |
6120 | ||
6121 | off = si->off; | |
6122 | off -= offsetof(struct __sk_buff, local_ip6[0]); | |
6123 | ||
6124 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6125 | si->dst_reg, si->src_reg, | |
6126 | offsetof(struct sk_buff, sk)); | |
6127 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6128 | offsetof(struct sock_common, | |
6129 | skc_v6_rcv_saddr.s6_addr32[0]) + | |
6130 | off); | |
6131 | #else | |
6132 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6133 | #endif | |
6134 | break; | |
6135 | ||
6136 | case offsetof(struct __sk_buff, remote_port): | |
6137 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); | |
6138 | ||
6139 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6140 | si->dst_reg, si->src_reg, | |
6141 | offsetof(struct sk_buff, sk)); | |
6142 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, | |
6143 | bpf_target_off(struct sock_common, | |
6144 | skc_dport, | |
6145 | 2, target_size)); | |
6146 | #ifndef __BIG_ENDIAN_BITFIELD | |
6147 | *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); | |
6148 | #endif | |
6149 | break; | |
6150 | ||
6151 | case offsetof(struct __sk_buff, local_port): | |
6152 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); | |
6153 | ||
6154 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), | |
6155 | si->dst_reg, si->src_reg, | |
6156 | offsetof(struct sk_buff, sk)); | |
6157 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, | |
6158 | bpf_target_off(struct sock_common, | |
6159 | skc_num, 2, target_size)); | |
6160 | break; | |
d58e468b PP |
6161 | |
6162 | case offsetof(struct __sk_buff, flow_keys): | |
6163 | off = si->off; | |
6164 | off -= offsetof(struct __sk_buff, flow_keys); | |
6165 | off += offsetof(struct sk_buff, cb); | |
6166 | off += offsetof(struct qdisc_skb_cb, flow_keys); | |
6167 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, | |
6168 | si->src_reg, off); | |
6169 | break; | |
9bac3d6d AS |
6170 | } |
6171 | ||
6172 | return insn - insn_buf; | |
89aa0758 AS |
6173 | } |
6174 | ||
c64b7983 JS |
6175 | u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, |
6176 | const struct bpf_insn *si, | |
6177 | struct bpf_insn *insn_buf, | |
6178 | struct bpf_prog *prog, u32 *target_size) | |
61023658 DA |
6179 | { |
6180 | struct bpf_insn *insn = insn_buf; | |
aac3fc32 | 6181 | int off; |
61023658 | 6182 | |
6b8cc1d1 | 6183 | switch (si->off) { |
61023658 DA |
6184 | case offsetof(struct bpf_sock, bound_dev_if): |
6185 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4); | |
6186 | ||
6187 | if (type == BPF_WRITE) | |
6b8cc1d1 | 6188 | *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, |
61023658 DA |
6189 | offsetof(struct sock, sk_bound_dev_if)); |
6190 | else | |
6b8cc1d1 | 6191 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
61023658 DA |
6192 | offsetof(struct sock, sk_bound_dev_if)); |
6193 | break; | |
aa4c1037 | 6194 | |
482dca93 DA |
6195 | case offsetof(struct bpf_sock, mark): |
6196 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4); | |
6197 | ||
6198 | if (type == BPF_WRITE) | |
6199 | *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, | |
6200 | offsetof(struct sock, sk_mark)); | |
6201 | else | |
6202 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, | |
6203 | offsetof(struct sock, sk_mark)); | |
6204 | break; | |
6205 | ||
6206 | case offsetof(struct bpf_sock, priority): | |
6207 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4); | |
6208 | ||
6209 | if (type == BPF_WRITE) | |
6210 | *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, | |
6211 | offsetof(struct sock, sk_priority)); | |
6212 | else | |
6213 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, | |
6214 | offsetof(struct sock, sk_priority)); | |
6215 | break; | |
6216 | ||
aa4c1037 DA |
6217 | case offsetof(struct bpf_sock, family): |
6218 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2); | |
6219 | ||
6b8cc1d1 | 6220 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, |
aa4c1037 DA |
6221 | offsetof(struct sock, sk_family)); |
6222 | break; | |
6223 | ||
6224 | case offsetof(struct bpf_sock, type): | |
6b8cc1d1 | 6225 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
aa4c1037 | 6226 | offsetof(struct sock, __sk_flags_offset)); |
6b8cc1d1 DB |
6227 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); |
6228 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); | |
aa4c1037 DA |
6229 | break; |
6230 | ||
6231 | case offsetof(struct bpf_sock, protocol): | |
6b8cc1d1 | 6232 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, |
aa4c1037 | 6233 | offsetof(struct sock, __sk_flags_offset)); |
6b8cc1d1 DB |
6234 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); |
6235 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); | |
aa4c1037 | 6236 | break; |
aac3fc32 AI |
6237 | |
6238 | case offsetof(struct bpf_sock, src_ip4): | |
6239 | *insn++ = BPF_LDX_MEM( | |
6240 | BPF_SIZE(si->code), si->dst_reg, si->src_reg, | |
6241 | bpf_target_off(struct sock_common, skc_rcv_saddr, | |
6242 | FIELD_SIZEOF(struct sock_common, | |
6243 | skc_rcv_saddr), | |
6244 | target_size)); | |
6245 | break; | |
6246 | ||
6247 | case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): | |
6248 | #if IS_ENABLED(CONFIG_IPV6) | |
6249 | off = si->off; | |
6250 | off -= offsetof(struct bpf_sock, src_ip6[0]); | |
6251 | *insn++ = BPF_LDX_MEM( | |
6252 | BPF_SIZE(si->code), si->dst_reg, si->src_reg, | |
6253 | bpf_target_off( | |
6254 | struct sock_common, | |
6255 | skc_v6_rcv_saddr.s6_addr32[0], | |
6256 | FIELD_SIZEOF(struct sock_common, | |
6257 | skc_v6_rcv_saddr.s6_addr32[0]), | |
6258 | target_size) + off); | |
6259 | #else | |
6260 | (void)off; | |
6261 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6262 | #endif | |
6263 | break; | |
6264 | ||
6265 | case offsetof(struct bpf_sock, src_port): | |
6266 | *insn++ = BPF_LDX_MEM( | |
6267 | BPF_FIELD_SIZEOF(struct sock_common, skc_num), | |
6268 | si->dst_reg, si->src_reg, | |
6269 | bpf_target_off(struct sock_common, skc_num, | |
6270 | FIELD_SIZEOF(struct sock_common, | |
6271 | skc_num), | |
6272 | target_size)); | |
6273 | break; | |
61023658 DA |
6274 | } |
6275 | ||
6276 | return insn - insn_buf; | |
6277 | } | |
6278 | ||
6b8cc1d1 DB |
6279 | static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, |
6280 | const struct bpf_insn *si, | |
374fb54e | 6281 | struct bpf_insn *insn_buf, |
f96da094 | 6282 | struct bpf_prog *prog, u32 *target_size) |
374fb54e DB |
6283 | { |
6284 | struct bpf_insn *insn = insn_buf; | |
6285 | ||
6b8cc1d1 | 6286 | switch (si->off) { |
374fb54e | 6287 | case offsetof(struct __sk_buff, ifindex): |
374fb54e | 6288 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), |
6b8cc1d1 | 6289 | si->dst_reg, si->src_reg, |
374fb54e | 6290 | offsetof(struct sk_buff, dev)); |
6b8cc1d1 | 6291 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, |
f96da094 DB |
6292 | bpf_target_off(struct net_device, ifindex, 4, |
6293 | target_size)); | |
374fb54e DB |
6294 | break; |
6295 | default: | |
f96da094 DB |
6296 | return bpf_convert_ctx_access(type, si, insn_buf, prog, |
6297 | target_size); | |
374fb54e DB |
6298 | } |
6299 | ||
6300 | return insn - insn_buf; | |
6301 | } | |
6302 | ||
6b8cc1d1 DB |
6303 | static u32 xdp_convert_ctx_access(enum bpf_access_type type, |
6304 | const struct bpf_insn *si, | |
6a773a15 | 6305 | struct bpf_insn *insn_buf, |
f96da094 | 6306 | struct bpf_prog *prog, u32 *target_size) |
6a773a15 BB |
6307 | { |
6308 | struct bpf_insn *insn = insn_buf; | |
6309 | ||
6b8cc1d1 | 6310 | switch (si->off) { |
6a773a15 | 6311 | case offsetof(struct xdp_md, data): |
f035a515 | 6312 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), |
6b8cc1d1 | 6313 | si->dst_reg, si->src_reg, |
6a773a15 BB |
6314 | offsetof(struct xdp_buff, data)); |
6315 | break; | |
de8f3a83 DB |
6316 | case offsetof(struct xdp_md, data_meta): |
6317 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), | |
6318 | si->dst_reg, si->src_reg, | |
6319 | offsetof(struct xdp_buff, data_meta)); | |
6320 | break; | |
6a773a15 | 6321 | case offsetof(struct xdp_md, data_end): |
f035a515 | 6322 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), |
6b8cc1d1 | 6323 | si->dst_reg, si->src_reg, |
6a773a15 BB |
6324 | offsetof(struct xdp_buff, data_end)); |
6325 | break; | |
02dd3291 JDB |
6326 | case offsetof(struct xdp_md, ingress_ifindex): |
6327 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), | |
6328 | si->dst_reg, si->src_reg, | |
6329 | offsetof(struct xdp_buff, rxq)); | |
6330 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), | |
6331 | si->dst_reg, si->dst_reg, | |
6332 | offsetof(struct xdp_rxq_info, dev)); | |
6333 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
daaf24c6 | 6334 | offsetof(struct net_device, ifindex)); |
02dd3291 JDB |
6335 | break; |
6336 | case offsetof(struct xdp_md, rx_queue_index): | |
6337 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), | |
6338 | si->dst_reg, si->src_reg, | |
6339 | offsetof(struct xdp_buff, rxq)); | |
6340 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
daaf24c6 JDB |
6341 | offsetof(struct xdp_rxq_info, |
6342 | queue_index)); | |
02dd3291 | 6343 | break; |
6a773a15 BB |
6344 | } |
6345 | ||
6346 | return insn - insn_buf; | |
6347 | } | |
6348 | ||
4fbac77d AI |
6349 | /* SOCK_ADDR_LOAD_NESTED_FIELD() loads Nested Field S.F.NF where S is type of |
6350 | * context Structure, F is Field in context structure that contains a pointer | |
6351 | * to Nested Structure of type NS that has the field NF. | |
6352 | * | |
6353 | * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make | |
6354 | * sure that SIZE is not greater than actual size of S.F.NF. | |
6355 | * | |
6356 | * If offset OFF is provided, the load happens from that offset relative to | |
6357 | * offset of NF. | |
6358 | */ | |
6359 | #define SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF) \ | |
6360 | do { \ | |
6361 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ | |
6362 | si->src_reg, offsetof(S, F)); \ | |
6363 | *insn++ = BPF_LDX_MEM( \ | |
6364 | SIZE, si->dst_reg, si->dst_reg, \ | |
6365 | bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ | |
6366 | target_size) \ | |
6367 | + OFF); \ | |
6368 | } while (0) | |
6369 | ||
6370 | #define SOCK_ADDR_LOAD_NESTED_FIELD(S, NS, F, NF) \ | |
6371 | SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, \ | |
6372 | BPF_FIELD_SIZEOF(NS, NF), 0) | |
6373 | ||
6374 | /* SOCK_ADDR_STORE_NESTED_FIELD_OFF() has semantic similar to | |
6375 | * SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF() but for store operation. | |
6376 | * | |
6377 | * It doesn't support SIZE argument though since narrow stores are not | |
6378 | * supported for now. | |
6379 | * | |
6380 | * In addition it uses Temporary Field TF (member of struct S) as the 3rd | |
6381 | * "register" since two registers available in convert_ctx_access are not | |
6382 | * enough: we can't override neither SRC, since it contains value to store, nor | |
6383 | * DST since it contains pointer to context that may be used by later | |
6384 | * instructions. But we need a temporary place to save pointer to nested | |
6385 | * structure whose field we want to store to. | |
6386 | */ | |
6387 | #define SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, TF) \ | |
6388 | do { \ | |
6389 | int tmp_reg = BPF_REG_9; \ | |
6390 | if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ | |
6391 | --tmp_reg; \ | |
6392 | if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ | |
6393 | --tmp_reg; \ | |
6394 | *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ | |
6395 | offsetof(S, TF)); \ | |
6396 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ | |
6397 | si->dst_reg, offsetof(S, F)); \ | |
6398 | *insn++ = BPF_STX_MEM( \ | |
6399 | BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \ | |
6400 | bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ | |
6401 | target_size) \ | |
6402 | + OFF); \ | |
6403 | *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ | |
6404 | offsetof(S, TF)); \ | |
6405 | } while (0) | |
6406 | ||
6407 | #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF(S, NS, F, NF, SIZE, OFF, \ | |
6408 | TF) \ | |
6409 | do { \ | |
6410 | if (type == BPF_WRITE) { \ | |
6411 | SOCK_ADDR_STORE_NESTED_FIELD_OFF(S, NS, F, NF, OFF, \ | |
6412 | TF); \ | |
6413 | } else { \ | |
6414 | SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( \ | |
6415 | S, NS, F, NF, SIZE, OFF); \ | |
6416 | } \ | |
6417 | } while (0) | |
6418 | ||
6419 | #define SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(S, NS, F, NF, TF) \ | |
6420 | SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( \ | |
6421 | S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) | |
6422 | ||
6423 | static u32 sock_addr_convert_ctx_access(enum bpf_access_type type, | |
6424 | const struct bpf_insn *si, | |
6425 | struct bpf_insn *insn_buf, | |
6426 | struct bpf_prog *prog, u32 *target_size) | |
6427 | { | |
6428 | struct bpf_insn *insn = insn_buf; | |
6429 | int off; | |
6430 | ||
6431 | switch (si->off) { | |
6432 | case offsetof(struct bpf_sock_addr, user_family): | |
6433 | SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, | |
6434 | struct sockaddr, uaddr, sa_family); | |
6435 | break; | |
6436 | ||
6437 | case offsetof(struct bpf_sock_addr, user_ip4): | |
6438 | SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( | |
6439 | struct bpf_sock_addr_kern, struct sockaddr_in, uaddr, | |
6440 | sin_addr, BPF_SIZE(si->code), 0, tmp_reg); | |
6441 | break; | |
6442 | ||
6443 | case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): | |
6444 | off = si->off; | |
6445 | off -= offsetof(struct bpf_sock_addr, user_ip6[0]); | |
6446 | SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( | |
6447 | struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, | |
6448 | sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off, | |
6449 | tmp_reg); | |
6450 | break; | |
6451 | ||
6452 | case offsetof(struct bpf_sock_addr, user_port): | |
6453 | /* To get port we need to know sa_family first and then treat | |
6454 | * sockaddr as either sockaddr_in or sockaddr_in6. | |
6455 | * Though we can simplify since port field has same offset and | |
6456 | * size in both structures. | |
6457 | * Here we check this invariant and use just one of the | |
6458 | * structures if it's true. | |
6459 | */ | |
6460 | BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != | |
6461 | offsetof(struct sockaddr_in6, sin6_port)); | |
6462 | BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) != | |
6463 | FIELD_SIZEOF(struct sockaddr_in6, sin6_port)); | |
6464 | SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern, | |
6465 | struct sockaddr_in6, uaddr, | |
6466 | sin6_port, tmp_reg); | |
6467 | break; | |
6468 | ||
6469 | case offsetof(struct bpf_sock_addr, family): | |
6470 | SOCK_ADDR_LOAD_NESTED_FIELD(struct bpf_sock_addr_kern, | |
6471 | struct sock, sk, sk_family); | |
6472 | break; | |
6473 | ||
6474 | case offsetof(struct bpf_sock_addr, type): | |
6475 | SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( | |
6476 | struct bpf_sock_addr_kern, struct sock, sk, | |
6477 | __sk_flags_offset, BPF_W, 0); | |
6478 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); | |
6479 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); | |
6480 | break; | |
6481 | ||
6482 | case offsetof(struct bpf_sock_addr, protocol): | |
6483 | SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF( | |
6484 | struct bpf_sock_addr_kern, struct sock, sk, | |
6485 | __sk_flags_offset, BPF_W, 0); | |
6486 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); | |
6487 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, | |
6488 | SK_FL_PROTO_SHIFT); | |
6489 | break; | |
1cedee13 AI |
6490 | |
6491 | case offsetof(struct bpf_sock_addr, msg_src_ip4): | |
6492 | /* Treat t_ctx as struct in_addr for msg_src_ip4. */ | |
6493 | SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( | |
6494 | struct bpf_sock_addr_kern, struct in_addr, t_ctx, | |
6495 | s_addr, BPF_SIZE(si->code), 0, tmp_reg); | |
6496 | break; | |
6497 | ||
6498 | case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], | |
6499 | msg_src_ip6[3]): | |
6500 | off = si->off; | |
6501 | off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]); | |
6502 | /* Treat t_ctx as struct in6_addr for msg_src_ip6. */ | |
6503 | SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD_SIZE_OFF( | |
6504 | struct bpf_sock_addr_kern, struct in6_addr, t_ctx, | |
6505 | s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); | |
6506 | break; | |
4fbac77d AI |
6507 | } |
6508 | ||
6509 | return insn - insn_buf; | |
6510 | } | |
6511 | ||
40304b2a LB |
6512 | static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, |
6513 | const struct bpf_insn *si, | |
6514 | struct bpf_insn *insn_buf, | |
f96da094 DB |
6515 | struct bpf_prog *prog, |
6516 | u32 *target_size) | |
40304b2a LB |
6517 | { |
6518 | struct bpf_insn *insn = insn_buf; | |
6519 | int off; | |
6520 | ||
6521 | switch (si->off) { | |
6522 | case offsetof(struct bpf_sock_ops, op) ... | |
6523 | offsetof(struct bpf_sock_ops, replylong[3]): | |
6524 | BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) != | |
6525 | FIELD_SIZEOF(struct bpf_sock_ops_kern, op)); | |
6526 | BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) != | |
6527 | FIELD_SIZEOF(struct bpf_sock_ops_kern, reply)); | |
6528 | BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) != | |
6529 | FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong)); | |
6530 | off = si->off; | |
6531 | off -= offsetof(struct bpf_sock_ops, op); | |
6532 | off += offsetof(struct bpf_sock_ops_kern, op); | |
6533 | if (type == BPF_WRITE) | |
6534 | *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, | |
6535 | off); | |
6536 | else | |
6537 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, | |
6538 | off); | |
6539 | break; | |
6540 | ||
6541 | case offsetof(struct bpf_sock_ops, family): | |
6542 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); | |
6543 | ||
6544 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6545 | struct bpf_sock_ops_kern, sk), | |
6546 | si->dst_reg, si->src_reg, | |
6547 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6548 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, | |
6549 | offsetof(struct sock_common, skc_family)); | |
6550 | break; | |
6551 | ||
6552 | case offsetof(struct bpf_sock_ops, remote_ip4): | |
6553 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); | |
6554 | ||
6555 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6556 | struct bpf_sock_ops_kern, sk), | |
6557 | si->dst_reg, si->src_reg, | |
6558 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6559 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6560 | offsetof(struct sock_common, skc_daddr)); | |
6561 | break; | |
6562 | ||
6563 | case offsetof(struct bpf_sock_ops, local_ip4): | |
303def35 JF |
6564 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, |
6565 | skc_rcv_saddr) != 4); | |
40304b2a LB |
6566 | |
6567 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6568 | struct bpf_sock_ops_kern, sk), | |
6569 | si->dst_reg, si->src_reg, | |
6570 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6571 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6572 | offsetof(struct sock_common, | |
6573 | skc_rcv_saddr)); | |
6574 | break; | |
6575 | ||
6576 | case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... | |
6577 | offsetof(struct bpf_sock_ops, remote_ip6[3]): | |
6578 | #if IS_ENABLED(CONFIG_IPV6) | |
6579 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6580 | skc_v6_daddr.s6_addr32[0]) != 4); | |
6581 | ||
6582 | off = si->off; | |
6583 | off -= offsetof(struct bpf_sock_ops, remote_ip6[0]); | |
6584 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6585 | struct bpf_sock_ops_kern, sk), | |
6586 | si->dst_reg, si->src_reg, | |
6587 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6588 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6589 | offsetof(struct sock_common, | |
6590 | skc_v6_daddr.s6_addr32[0]) + | |
6591 | off); | |
6592 | #else | |
6593 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6594 | #endif | |
6595 | break; | |
6596 | ||
6597 | case offsetof(struct bpf_sock_ops, local_ip6[0]) ... | |
6598 | offsetof(struct bpf_sock_ops, local_ip6[3]): | |
6599 | #if IS_ENABLED(CONFIG_IPV6) | |
6600 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6601 | skc_v6_rcv_saddr.s6_addr32[0]) != 4); | |
6602 | ||
6603 | off = si->off; | |
6604 | off -= offsetof(struct bpf_sock_ops, local_ip6[0]); | |
6605 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6606 | struct bpf_sock_ops_kern, sk), | |
6607 | si->dst_reg, si->src_reg, | |
6608 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6609 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6610 | offsetof(struct sock_common, | |
6611 | skc_v6_rcv_saddr.s6_addr32[0]) + | |
6612 | off); | |
6613 | #else | |
6614 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6615 | #endif | |
6616 | break; | |
6617 | ||
6618 | case offsetof(struct bpf_sock_ops, remote_port): | |
6619 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); | |
6620 | ||
6621 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6622 | struct bpf_sock_ops_kern, sk), | |
6623 | si->dst_reg, si->src_reg, | |
6624 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6625 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, | |
6626 | offsetof(struct sock_common, skc_dport)); | |
6627 | #ifndef __BIG_ENDIAN_BITFIELD | |
6628 | *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); | |
6629 | #endif | |
6630 | break; | |
6631 | ||
6632 | case offsetof(struct bpf_sock_ops, local_port): | |
6633 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); | |
6634 | ||
6635 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6636 | struct bpf_sock_ops_kern, sk), | |
6637 | si->dst_reg, si->src_reg, | |
6638 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6639 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, | |
6640 | offsetof(struct sock_common, skc_num)); | |
6641 | break; | |
f19397a5 LB |
6642 | |
6643 | case offsetof(struct bpf_sock_ops, is_fullsock): | |
6644 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6645 | struct bpf_sock_ops_kern, | |
6646 | is_fullsock), | |
6647 | si->dst_reg, si->src_reg, | |
6648 | offsetof(struct bpf_sock_ops_kern, | |
6649 | is_fullsock)); | |
6650 | break; | |
6651 | ||
44f0e430 LB |
6652 | case offsetof(struct bpf_sock_ops, state): |
6653 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1); | |
6654 | ||
6655 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6656 | struct bpf_sock_ops_kern, sk), | |
6657 | si->dst_reg, si->src_reg, | |
6658 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6659 | *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg, | |
6660 | offsetof(struct sock_common, skc_state)); | |
6661 | break; | |
6662 | ||
6663 | case offsetof(struct bpf_sock_ops, rtt_min): | |
6664 | BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != | |
6665 | sizeof(struct minmax)); | |
6666 | BUILD_BUG_ON(sizeof(struct minmax) < | |
6667 | sizeof(struct minmax_sample)); | |
6668 | ||
6669 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
6670 | struct bpf_sock_ops_kern, sk), | |
6671 | si->dst_reg, si->src_reg, | |
6672 | offsetof(struct bpf_sock_ops_kern, sk)); | |
6673 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, | |
6674 | offsetof(struct tcp_sock, rtt_min) + | |
6675 | FIELD_SIZEOF(struct minmax_sample, t)); | |
6676 | break; | |
6677 | ||
34d367c5 LB |
6678 | /* Helper macro for adding read access to tcp_sock or sock fields. */ |
6679 | #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ | |
f19397a5 | 6680 | do { \ |
34d367c5 LB |
6681 | BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ |
6682 | FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ | |
f19397a5 LB |
6683 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ |
6684 | struct bpf_sock_ops_kern, \ | |
6685 | is_fullsock), \ | |
6686 | si->dst_reg, si->src_reg, \ | |
6687 | offsetof(struct bpf_sock_ops_kern, \ | |
6688 | is_fullsock)); \ | |
6689 | *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \ | |
6690 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ | |
6691 | struct bpf_sock_ops_kern, sk),\ | |
6692 | si->dst_reg, si->src_reg, \ | |
6693 | offsetof(struct bpf_sock_ops_kern, sk));\ | |
34d367c5 LB |
6694 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \ |
6695 | OBJ_FIELD), \ | |
6696 | si->dst_reg, si->dst_reg, \ | |
6697 | offsetof(OBJ, OBJ_FIELD)); \ | |
f19397a5 LB |
6698 | } while (0) |
6699 | ||
b73042b8 LB |
6700 | /* Helper macro for adding write access to tcp_sock or sock fields. |
6701 | * The macro is called with two registers, dst_reg which contains a pointer | |
6702 | * to ctx (context) and src_reg which contains the value that should be | |
6703 | * stored. However, we need an additional register since we cannot overwrite | |
6704 | * dst_reg because it may be used later in the program. | |
6705 | * Instead we "borrow" one of the other register. We first save its value | |
6706 | * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore | |
6707 | * it at the end of the macro. | |
6708 | */ | |
6709 | #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ | |
6710 | do { \ | |
6711 | int reg = BPF_REG_9; \ | |
6712 | BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ | |
6713 | FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ | |
6714 | if (si->dst_reg == reg || si->src_reg == reg) \ | |
6715 | reg--; \ | |
6716 | if (si->dst_reg == reg || si->src_reg == reg) \ | |
6717 | reg--; \ | |
6718 | *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \ | |
6719 | offsetof(struct bpf_sock_ops_kern, \ | |
6720 | temp)); \ | |
6721 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ | |
6722 | struct bpf_sock_ops_kern, \ | |
6723 | is_fullsock), \ | |
6724 | reg, si->dst_reg, \ | |
6725 | offsetof(struct bpf_sock_ops_kern, \ | |
6726 | is_fullsock)); \ | |
6727 | *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ | |
6728 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ | |
6729 | struct bpf_sock_ops_kern, sk),\ | |
6730 | reg, si->dst_reg, \ | |
6731 | offsetof(struct bpf_sock_ops_kern, sk));\ | |
6732 | *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ | |
6733 | reg, si->src_reg, \ | |
6734 | offsetof(OBJ, OBJ_FIELD)); \ | |
6735 | *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ | |
6736 | offsetof(struct bpf_sock_ops_kern, \ | |
6737 | temp)); \ | |
6738 | } while (0) | |
6739 | ||
6740 | #define SOCK_OPS_GET_OR_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \ | |
6741 | do { \ | |
6742 | if (TYPE == BPF_WRITE) \ | |
6743 | SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ | |
6744 | else \ | |
6745 | SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ | |
6746 | } while (0) | |
6747 | ||
f19397a5 | 6748 | case offsetof(struct bpf_sock_ops, snd_cwnd): |
34d367c5 | 6749 | SOCK_OPS_GET_FIELD(snd_cwnd, snd_cwnd, struct tcp_sock); |
f19397a5 LB |
6750 | break; |
6751 | ||
6752 | case offsetof(struct bpf_sock_ops, srtt_us): | |
34d367c5 | 6753 | SOCK_OPS_GET_FIELD(srtt_us, srtt_us, struct tcp_sock); |
f19397a5 | 6754 | break; |
b13d8807 LB |
6755 | |
6756 | case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): | |
6757 | SOCK_OPS_GET_FIELD(bpf_sock_ops_cb_flags, bpf_sock_ops_cb_flags, | |
6758 | struct tcp_sock); | |
6759 | break; | |
44f0e430 LB |
6760 | |
6761 | case offsetof(struct bpf_sock_ops, snd_ssthresh): | |
6762 | SOCK_OPS_GET_FIELD(snd_ssthresh, snd_ssthresh, struct tcp_sock); | |
6763 | break; | |
6764 | ||
6765 | case offsetof(struct bpf_sock_ops, rcv_nxt): | |
6766 | SOCK_OPS_GET_FIELD(rcv_nxt, rcv_nxt, struct tcp_sock); | |
6767 | break; | |
6768 | ||
6769 | case offsetof(struct bpf_sock_ops, snd_nxt): | |
6770 | SOCK_OPS_GET_FIELD(snd_nxt, snd_nxt, struct tcp_sock); | |
6771 | break; | |
6772 | ||
6773 | case offsetof(struct bpf_sock_ops, snd_una): | |
6774 | SOCK_OPS_GET_FIELD(snd_una, snd_una, struct tcp_sock); | |
6775 | break; | |
6776 | ||
6777 | case offsetof(struct bpf_sock_ops, mss_cache): | |
6778 | SOCK_OPS_GET_FIELD(mss_cache, mss_cache, struct tcp_sock); | |
6779 | break; | |
6780 | ||
6781 | case offsetof(struct bpf_sock_ops, ecn_flags): | |
6782 | SOCK_OPS_GET_FIELD(ecn_flags, ecn_flags, struct tcp_sock); | |
6783 | break; | |
6784 | ||
6785 | case offsetof(struct bpf_sock_ops, rate_delivered): | |
6786 | SOCK_OPS_GET_FIELD(rate_delivered, rate_delivered, | |
6787 | struct tcp_sock); | |
6788 | break; | |
6789 | ||
6790 | case offsetof(struct bpf_sock_ops, rate_interval_us): | |
6791 | SOCK_OPS_GET_FIELD(rate_interval_us, rate_interval_us, | |
6792 | struct tcp_sock); | |
6793 | break; | |
6794 | ||
6795 | case offsetof(struct bpf_sock_ops, packets_out): | |
6796 | SOCK_OPS_GET_FIELD(packets_out, packets_out, struct tcp_sock); | |
6797 | break; | |
6798 | ||
6799 | case offsetof(struct bpf_sock_ops, retrans_out): | |
6800 | SOCK_OPS_GET_FIELD(retrans_out, retrans_out, struct tcp_sock); | |
6801 | break; | |
6802 | ||
6803 | case offsetof(struct bpf_sock_ops, total_retrans): | |
6804 | SOCK_OPS_GET_FIELD(total_retrans, total_retrans, | |
6805 | struct tcp_sock); | |
6806 | break; | |
6807 | ||
6808 | case offsetof(struct bpf_sock_ops, segs_in): | |
6809 | SOCK_OPS_GET_FIELD(segs_in, segs_in, struct tcp_sock); | |
6810 | break; | |
6811 | ||
6812 | case offsetof(struct bpf_sock_ops, data_segs_in): | |
6813 | SOCK_OPS_GET_FIELD(data_segs_in, data_segs_in, struct tcp_sock); | |
6814 | break; | |
6815 | ||
6816 | case offsetof(struct bpf_sock_ops, segs_out): | |
6817 | SOCK_OPS_GET_FIELD(segs_out, segs_out, struct tcp_sock); | |
6818 | break; | |
6819 | ||
6820 | case offsetof(struct bpf_sock_ops, data_segs_out): | |
6821 | SOCK_OPS_GET_FIELD(data_segs_out, data_segs_out, | |
6822 | struct tcp_sock); | |
6823 | break; | |
6824 | ||
6825 | case offsetof(struct bpf_sock_ops, lost_out): | |
6826 | SOCK_OPS_GET_FIELD(lost_out, lost_out, struct tcp_sock); | |
6827 | break; | |
6828 | ||
6829 | case offsetof(struct bpf_sock_ops, sacked_out): | |
6830 | SOCK_OPS_GET_FIELD(sacked_out, sacked_out, struct tcp_sock); | |
6831 | break; | |
6832 | ||
6833 | case offsetof(struct bpf_sock_ops, sk_txhash): | |
6f9bd3d7 LB |
6834 | SOCK_OPS_GET_OR_SET_FIELD(sk_txhash, sk_txhash, |
6835 | struct sock, type); | |
44f0e430 LB |
6836 | break; |
6837 | ||
6838 | case offsetof(struct bpf_sock_ops, bytes_received): | |
6839 | SOCK_OPS_GET_FIELD(bytes_received, bytes_received, | |
6840 | struct tcp_sock); | |
6841 | break; | |
6842 | ||
6843 | case offsetof(struct bpf_sock_ops, bytes_acked): | |
6844 | SOCK_OPS_GET_FIELD(bytes_acked, bytes_acked, struct tcp_sock); | |
6845 | break; | |
6f9bd3d7 | 6846 | |
40304b2a LB |
6847 | } |
6848 | return insn - insn_buf; | |
6849 | } | |
6850 | ||
8108a775 JF |
6851 | static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, |
6852 | const struct bpf_insn *si, | |
6853 | struct bpf_insn *insn_buf, | |
6854 | struct bpf_prog *prog, u32 *target_size) | |
6855 | { | |
6856 | struct bpf_insn *insn = insn_buf; | |
6857 | int off; | |
6858 | ||
6859 | switch (si->off) { | |
6860 | case offsetof(struct __sk_buff, data_end): | |
6861 | off = si->off; | |
6862 | off -= offsetof(struct __sk_buff, data_end); | |
6863 | off += offsetof(struct sk_buff, cb); | |
6864 | off += offsetof(struct tcp_skb_cb, bpf.data_end); | |
6865 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, | |
6866 | si->src_reg, off); | |
6867 | break; | |
6868 | default: | |
6869 | return bpf_convert_ctx_access(type, si, insn_buf, prog, | |
6870 | target_size); | |
6871 | } | |
6872 | ||
6873 | return insn - insn_buf; | |
6874 | } | |
6875 | ||
4f738adb JF |
6876 | static u32 sk_msg_convert_ctx_access(enum bpf_access_type type, |
6877 | const struct bpf_insn *si, | |
6878 | struct bpf_insn *insn_buf, | |
6879 | struct bpf_prog *prog, u32 *target_size) | |
6880 | { | |
6881 | struct bpf_insn *insn = insn_buf; | |
720e7f38 | 6882 | #if IS_ENABLED(CONFIG_IPV6) |
303def35 | 6883 | int off; |
720e7f38 | 6884 | #endif |
4f738adb JF |
6885 | |
6886 | switch (si->off) { | |
6887 | case offsetof(struct sk_msg_md, data): | |
604326b4 | 6888 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), |
4f738adb | 6889 | si->dst_reg, si->src_reg, |
604326b4 | 6890 | offsetof(struct sk_msg, data)); |
4f738adb JF |
6891 | break; |
6892 | case offsetof(struct sk_msg_md, data_end): | |
604326b4 | 6893 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), |
4f738adb | 6894 | si->dst_reg, si->src_reg, |
604326b4 | 6895 | offsetof(struct sk_msg, data_end)); |
4f738adb | 6896 | break; |
303def35 JF |
6897 | case offsetof(struct sk_msg_md, family): |
6898 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); | |
6899 | ||
6900 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6901 | struct sk_msg, sk), |
303def35 | 6902 | si->dst_reg, si->src_reg, |
604326b4 | 6903 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6904 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, |
6905 | offsetof(struct sock_common, skc_family)); | |
6906 | break; | |
6907 | ||
6908 | case offsetof(struct sk_msg_md, remote_ip4): | |
6909 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); | |
6910 | ||
6911 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6912 | struct sk_msg, sk), |
303def35 | 6913 | si->dst_reg, si->src_reg, |
604326b4 | 6914 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6915 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, |
6916 | offsetof(struct sock_common, skc_daddr)); | |
6917 | break; | |
6918 | ||
6919 | case offsetof(struct sk_msg_md, local_ip4): | |
6920 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6921 | skc_rcv_saddr) != 4); | |
6922 | ||
6923 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6924 | struct sk_msg, sk), |
303def35 | 6925 | si->dst_reg, si->src_reg, |
604326b4 | 6926 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6927 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, |
6928 | offsetof(struct sock_common, | |
6929 | skc_rcv_saddr)); | |
6930 | break; | |
6931 | ||
6932 | case offsetof(struct sk_msg_md, remote_ip6[0]) ... | |
6933 | offsetof(struct sk_msg_md, remote_ip6[3]): | |
6934 | #if IS_ENABLED(CONFIG_IPV6) | |
6935 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6936 | skc_v6_daddr.s6_addr32[0]) != 4); | |
6937 | ||
6938 | off = si->off; | |
6939 | off -= offsetof(struct sk_msg_md, remote_ip6[0]); | |
6940 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6941 | struct sk_msg, sk), |
303def35 | 6942 | si->dst_reg, si->src_reg, |
604326b4 | 6943 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6944 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, |
6945 | offsetof(struct sock_common, | |
6946 | skc_v6_daddr.s6_addr32[0]) + | |
6947 | off); | |
6948 | #else | |
6949 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6950 | #endif | |
6951 | break; | |
6952 | ||
6953 | case offsetof(struct sk_msg_md, local_ip6[0]) ... | |
6954 | offsetof(struct sk_msg_md, local_ip6[3]): | |
6955 | #if IS_ENABLED(CONFIG_IPV6) | |
6956 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, | |
6957 | skc_v6_rcv_saddr.s6_addr32[0]) != 4); | |
6958 | ||
6959 | off = si->off; | |
6960 | off -= offsetof(struct sk_msg_md, local_ip6[0]); | |
6961 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6962 | struct sk_msg, sk), |
303def35 | 6963 | si->dst_reg, si->src_reg, |
604326b4 | 6964 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6965 | *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, |
6966 | offsetof(struct sock_common, | |
6967 | skc_v6_rcv_saddr.s6_addr32[0]) + | |
6968 | off); | |
6969 | #else | |
6970 | *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); | |
6971 | #endif | |
6972 | break; | |
6973 | ||
6974 | case offsetof(struct sk_msg_md, remote_port): | |
6975 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); | |
6976 | ||
6977 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6978 | struct sk_msg, sk), |
303def35 | 6979 | si->dst_reg, si->src_reg, |
604326b4 | 6980 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6981 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, |
6982 | offsetof(struct sock_common, skc_dport)); | |
6983 | #ifndef __BIG_ENDIAN_BITFIELD | |
6984 | *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); | |
6985 | #endif | |
6986 | break; | |
6987 | ||
6988 | case offsetof(struct sk_msg_md, local_port): | |
6989 | BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); | |
6990 | ||
6991 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( | |
604326b4 | 6992 | struct sk_msg, sk), |
303def35 | 6993 | si->dst_reg, si->src_reg, |
604326b4 | 6994 | offsetof(struct sk_msg, sk)); |
303def35 JF |
6995 | *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, |
6996 | offsetof(struct sock_common, skc_num)); | |
6997 | break; | |
4f738adb JF |
6998 | } |
6999 | ||
7000 | return insn - insn_buf; | |
7001 | } | |
7002 | ||
7de16e3a | 7003 | const struct bpf_verifier_ops sk_filter_verifier_ops = { |
4936e352 DB |
7004 | .get_func_proto = sk_filter_func_proto, |
7005 | .is_valid_access = sk_filter_is_valid_access, | |
2492d3b8 | 7006 | .convert_ctx_access = bpf_convert_ctx_access, |
e0cea7ce | 7007 | .gen_ld_abs = bpf_gen_ld_abs, |
89aa0758 AS |
7008 | }; |
7009 | ||
7de16e3a | 7010 | const struct bpf_prog_ops sk_filter_prog_ops = { |
61f3c964 | 7011 | .test_run = bpf_prog_test_run_skb, |
7de16e3a JK |
7012 | }; |
7013 | ||
7014 | const struct bpf_verifier_ops tc_cls_act_verifier_ops = { | |
4936e352 DB |
7015 | .get_func_proto = tc_cls_act_func_proto, |
7016 | .is_valid_access = tc_cls_act_is_valid_access, | |
374fb54e | 7017 | .convert_ctx_access = tc_cls_act_convert_ctx_access, |
36bbef52 | 7018 | .gen_prologue = tc_cls_act_prologue, |
e0cea7ce | 7019 | .gen_ld_abs = bpf_gen_ld_abs, |
7de16e3a JK |
7020 | }; |
7021 | ||
7022 | const struct bpf_prog_ops tc_cls_act_prog_ops = { | |
1cf1cae9 | 7023 | .test_run = bpf_prog_test_run_skb, |
608cd71a AS |
7024 | }; |
7025 | ||
7de16e3a | 7026 | const struct bpf_verifier_ops xdp_verifier_ops = { |
6a773a15 BB |
7027 | .get_func_proto = xdp_func_proto, |
7028 | .is_valid_access = xdp_is_valid_access, | |
7029 | .convert_ctx_access = xdp_convert_ctx_access, | |
7de16e3a JK |
7030 | }; |
7031 | ||
7032 | const struct bpf_prog_ops xdp_prog_ops = { | |
1cf1cae9 | 7033 | .test_run = bpf_prog_test_run_xdp, |
6a773a15 BB |
7034 | }; |
7035 | ||
7de16e3a | 7036 | const struct bpf_verifier_ops cg_skb_verifier_ops = { |
cd339431 | 7037 | .get_func_proto = cg_skb_func_proto, |
0e33661d | 7038 | .is_valid_access = sk_filter_is_valid_access, |
2492d3b8 | 7039 | .convert_ctx_access = bpf_convert_ctx_access, |
7de16e3a JK |
7040 | }; |
7041 | ||
7042 | const struct bpf_prog_ops cg_skb_prog_ops = { | |
1cf1cae9 | 7043 | .test_run = bpf_prog_test_run_skb, |
0e33661d DM |
7044 | }; |
7045 | ||
cd3092c7 MX |
7046 | const struct bpf_verifier_ops lwt_in_verifier_ops = { |
7047 | .get_func_proto = lwt_in_func_proto, | |
3a0af8fd | 7048 | .is_valid_access = lwt_is_valid_access, |
2492d3b8 | 7049 | .convert_ctx_access = bpf_convert_ctx_access, |
7de16e3a JK |
7050 | }; |
7051 | ||
cd3092c7 MX |
7052 | const struct bpf_prog_ops lwt_in_prog_ops = { |
7053 | .test_run = bpf_prog_test_run_skb, | |
7054 | }; | |
7055 | ||
7056 | const struct bpf_verifier_ops lwt_out_verifier_ops = { | |
7057 | .get_func_proto = lwt_out_func_proto, | |
3a0af8fd | 7058 | .is_valid_access = lwt_is_valid_access, |
2492d3b8 | 7059 | .convert_ctx_access = bpf_convert_ctx_access, |
7de16e3a JK |
7060 | }; |
7061 | ||
cd3092c7 | 7062 | const struct bpf_prog_ops lwt_out_prog_ops = { |
1cf1cae9 | 7063 | .test_run = bpf_prog_test_run_skb, |
3a0af8fd TG |
7064 | }; |
7065 | ||
7de16e3a | 7066 | const struct bpf_verifier_ops lwt_xmit_verifier_ops = { |
3a0af8fd TG |
7067 | .get_func_proto = lwt_xmit_func_proto, |
7068 | .is_valid_access = lwt_is_valid_access, | |
2492d3b8 | 7069 | .convert_ctx_access = bpf_convert_ctx_access, |
3a0af8fd | 7070 | .gen_prologue = tc_cls_act_prologue, |
7de16e3a JK |
7071 | }; |
7072 | ||
7073 | const struct bpf_prog_ops lwt_xmit_prog_ops = { | |
1cf1cae9 | 7074 | .test_run = bpf_prog_test_run_skb, |
3a0af8fd TG |
7075 | }; |
7076 | ||
004d4b27 MX |
7077 | const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { |
7078 | .get_func_proto = lwt_seg6local_func_proto, | |
7079 | .is_valid_access = lwt_is_valid_access, | |
7080 | .convert_ctx_access = bpf_convert_ctx_access, | |
7081 | }; | |
7082 | ||
7083 | const struct bpf_prog_ops lwt_seg6local_prog_ops = { | |
7084 | .test_run = bpf_prog_test_run_skb, | |
7085 | }; | |
7086 | ||
7de16e3a | 7087 | const struct bpf_verifier_ops cg_sock_verifier_ops = { |
ae2cf1c4 | 7088 | .get_func_proto = sock_filter_func_proto, |
61023658 | 7089 | .is_valid_access = sock_filter_is_valid_access, |
c64b7983 | 7090 | .convert_ctx_access = bpf_sock_convert_ctx_access, |
61023658 DA |
7091 | }; |
7092 | ||
7de16e3a JK |
7093 | const struct bpf_prog_ops cg_sock_prog_ops = { |
7094 | }; | |
7095 | ||
4fbac77d AI |
7096 | const struct bpf_verifier_ops cg_sock_addr_verifier_ops = { |
7097 | .get_func_proto = sock_addr_func_proto, | |
7098 | .is_valid_access = sock_addr_is_valid_access, | |
7099 | .convert_ctx_access = sock_addr_convert_ctx_access, | |
7100 | }; | |
7101 | ||
7102 | const struct bpf_prog_ops cg_sock_addr_prog_ops = { | |
7103 | }; | |
7104 | ||
7de16e3a | 7105 | const struct bpf_verifier_ops sock_ops_verifier_ops = { |
8c4b4c7e | 7106 | .get_func_proto = sock_ops_func_proto, |
40304b2a LB |
7107 | .is_valid_access = sock_ops_is_valid_access, |
7108 | .convert_ctx_access = sock_ops_convert_ctx_access, | |
7109 | }; | |
7110 | ||
7de16e3a JK |
7111 | const struct bpf_prog_ops sock_ops_prog_ops = { |
7112 | }; | |
7113 | ||
7114 | const struct bpf_verifier_ops sk_skb_verifier_ops = { | |
b005fd18 JF |
7115 | .get_func_proto = sk_skb_func_proto, |
7116 | .is_valid_access = sk_skb_is_valid_access, | |
8108a775 | 7117 | .convert_ctx_access = sk_skb_convert_ctx_access, |
8a31db56 | 7118 | .gen_prologue = sk_skb_prologue, |
b005fd18 JF |
7119 | }; |
7120 | ||
7de16e3a JK |
7121 | const struct bpf_prog_ops sk_skb_prog_ops = { |
7122 | }; | |
7123 | ||
4f738adb JF |
7124 | const struct bpf_verifier_ops sk_msg_verifier_ops = { |
7125 | .get_func_proto = sk_msg_func_proto, | |
7126 | .is_valid_access = sk_msg_is_valid_access, | |
7127 | .convert_ctx_access = sk_msg_convert_ctx_access, | |
7128 | }; | |
7129 | ||
7130 | const struct bpf_prog_ops sk_msg_prog_ops = { | |
7131 | }; | |
7132 | ||
d58e468b PP |
7133 | const struct bpf_verifier_ops flow_dissector_verifier_ops = { |
7134 | .get_func_proto = flow_dissector_func_proto, | |
7135 | .is_valid_access = flow_dissector_is_valid_access, | |
7136 | .convert_ctx_access = bpf_convert_ctx_access, | |
7137 | }; | |
7138 | ||
7139 | const struct bpf_prog_ops flow_dissector_prog_ops = { | |
7140 | }; | |
7141 | ||
8ced425e | 7142 | int sk_detach_filter(struct sock *sk) |
55b33325 PE |
7143 | { |
7144 | int ret = -ENOENT; | |
7145 | struct sk_filter *filter; | |
7146 | ||
d59577b6 VB |
7147 | if (sock_flag(sk, SOCK_FILTER_LOCKED)) |
7148 | return -EPERM; | |
7149 | ||
8ced425e HFS |
7150 | filter = rcu_dereference_protected(sk->sk_filter, |
7151 | lockdep_sock_is_held(sk)); | |
55b33325 | 7152 | if (filter) { |
a9b3cd7f | 7153 | RCU_INIT_POINTER(sk->sk_filter, NULL); |
46bcf14f | 7154 | sk_filter_uncharge(sk, filter); |
55b33325 PE |
7155 | ret = 0; |
7156 | } | |
a3ea269b | 7157 | |
55b33325 PE |
7158 | return ret; |
7159 | } | |
8ced425e | 7160 | EXPORT_SYMBOL_GPL(sk_detach_filter); |
a8fc9277 | 7161 | |
a3ea269b DB |
7162 | int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, |
7163 | unsigned int len) | |
a8fc9277 | 7164 | { |
a3ea269b | 7165 | struct sock_fprog_kern *fprog; |
a8fc9277 | 7166 | struct sk_filter *filter; |
a3ea269b | 7167 | int ret = 0; |
a8fc9277 PE |
7168 | |
7169 | lock_sock(sk); | |
7170 | filter = rcu_dereference_protected(sk->sk_filter, | |
8ced425e | 7171 | lockdep_sock_is_held(sk)); |
a8fc9277 PE |
7172 | if (!filter) |
7173 | goto out; | |
a3ea269b DB |
7174 | |
7175 | /* We're copying the filter that has been originally attached, | |
93d08b69 DB |
7176 | * so no conversion/decode needed anymore. eBPF programs that |
7177 | * have no original program cannot be dumped through this. | |
a3ea269b | 7178 | */ |
93d08b69 | 7179 | ret = -EACCES; |
7ae457c1 | 7180 | fprog = filter->prog->orig_prog; |
93d08b69 DB |
7181 | if (!fprog) |
7182 | goto out; | |
a3ea269b DB |
7183 | |
7184 | ret = fprog->len; | |
a8fc9277 | 7185 | if (!len) |
a3ea269b | 7186 | /* User space only enquires number of filter blocks. */ |
a8fc9277 | 7187 | goto out; |
a3ea269b | 7188 | |
a8fc9277 | 7189 | ret = -EINVAL; |
a3ea269b | 7190 | if (len < fprog->len) |
a8fc9277 PE |
7191 | goto out; |
7192 | ||
7193 | ret = -EFAULT; | |
009937e7 | 7194 | if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) |
a3ea269b | 7195 | goto out; |
a8fc9277 | 7196 | |
a3ea269b DB |
7197 | /* Instead of bytes, the API requests to return the number |
7198 | * of filter blocks. | |
7199 | */ | |
7200 | ret = fprog->len; | |
a8fc9277 PE |
7201 | out: |
7202 | release_sock(sk); | |
7203 | return ret; | |
7204 | } | |
2dbb9b9e MKL |
7205 | |
7206 | #ifdef CONFIG_INET | |
7207 | struct sk_reuseport_kern { | |
7208 | struct sk_buff *skb; | |
7209 | struct sock *sk; | |
7210 | struct sock *selected_sk; | |
7211 | void *data_end; | |
7212 | u32 hash; | |
7213 | u32 reuseport_id; | |
7214 | bool bind_inany; | |
7215 | }; | |
7216 | ||
7217 | static void bpf_init_reuseport_kern(struct sk_reuseport_kern *reuse_kern, | |
7218 | struct sock_reuseport *reuse, | |
7219 | struct sock *sk, struct sk_buff *skb, | |
7220 | u32 hash) | |
7221 | { | |
7222 | reuse_kern->skb = skb; | |
7223 | reuse_kern->sk = sk; | |
7224 | reuse_kern->selected_sk = NULL; | |
7225 | reuse_kern->data_end = skb->data + skb_headlen(skb); | |
7226 | reuse_kern->hash = hash; | |
7227 | reuse_kern->reuseport_id = reuse->reuseport_id; | |
7228 | reuse_kern->bind_inany = reuse->bind_inany; | |
7229 | } | |
7230 | ||
7231 | struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
7232 | struct bpf_prog *prog, struct sk_buff *skb, | |
7233 | u32 hash) | |
7234 | { | |
7235 | struct sk_reuseport_kern reuse_kern; | |
7236 | enum sk_action action; | |
7237 | ||
7238 | bpf_init_reuseport_kern(&reuse_kern, reuse, sk, skb, hash); | |
7239 | action = BPF_PROG_RUN(prog, &reuse_kern); | |
7240 | ||
7241 | if (action == SK_PASS) | |
7242 | return reuse_kern.selected_sk; | |
7243 | else | |
7244 | return ERR_PTR(-ECONNREFUSED); | |
7245 | } | |
7246 | ||
7247 | BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, | |
7248 | struct bpf_map *, map, void *, key, u32, flags) | |
7249 | { | |
7250 | struct sock_reuseport *reuse; | |
7251 | struct sock *selected_sk; | |
7252 | ||
7253 | selected_sk = map->ops->map_lookup_elem(map, key); | |
7254 | if (!selected_sk) | |
7255 | return -ENOENT; | |
7256 | ||
7257 | reuse = rcu_dereference(selected_sk->sk_reuseport_cb); | |
7258 | if (!reuse) | |
7259 | /* selected_sk is unhashed (e.g. by close()) after the | |
7260 | * above map_lookup_elem(). Treat selected_sk has already | |
7261 | * been removed from the map. | |
7262 | */ | |
7263 | return -ENOENT; | |
7264 | ||
7265 | if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { | |
7266 | struct sock *sk; | |
7267 | ||
7268 | if (unlikely(!reuse_kern->reuseport_id)) | |
7269 | /* There is a small race between adding the | |
7270 | * sk to the map and setting the | |
7271 | * reuse_kern->reuseport_id. | |
7272 | * Treat it as the sk has not been added to | |
7273 | * the bpf map yet. | |
7274 | */ | |
7275 | return -ENOENT; | |
7276 | ||
7277 | sk = reuse_kern->sk; | |
7278 | if (sk->sk_protocol != selected_sk->sk_protocol) | |
7279 | return -EPROTOTYPE; | |
7280 | else if (sk->sk_family != selected_sk->sk_family) | |
7281 | return -EAFNOSUPPORT; | |
7282 | ||
7283 | /* Catch all. Likely bound to a different sockaddr. */ | |
7284 | return -EBADFD; | |
7285 | } | |
7286 | ||
7287 | reuse_kern->selected_sk = selected_sk; | |
7288 | ||
7289 | return 0; | |
7290 | } | |
7291 | ||
7292 | static const struct bpf_func_proto sk_select_reuseport_proto = { | |
7293 | .func = sk_select_reuseport, | |
7294 | .gpl_only = false, | |
7295 | .ret_type = RET_INTEGER, | |
7296 | .arg1_type = ARG_PTR_TO_CTX, | |
7297 | .arg2_type = ARG_CONST_MAP_PTR, | |
7298 | .arg3_type = ARG_PTR_TO_MAP_KEY, | |
7299 | .arg4_type = ARG_ANYTHING, | |
7300 | }; | |
7301 | ||
7302 | BPF_CALL_4(sk_reuseport_load_bytes, | |
7303 | const struct sk_reuseport_kern *, reuse_kern, u32, offset, | |
7304 | void *, to, u32, len) | |
7305 | { | |
7306 | return ____bpf_skb_load_bytes(reuse_kern->skb, offset, to, len); | |
7307 | } | |
7308 | ||
7309 | static const struct bpf_func_proto sk_reuseport_load_bytes_proto = { | |
7310 | .func = sk_reuseport_load_bytes, | |
7311 | .gpl_only = false, | |
7312 | .ret_type = RET_INTEGER, | |
7313 | .arg1_type = ARG_PTR_TO_CTX, | |
7314 | .arg2_type = ARG_ANYTHING, | |
7315 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
7316 | .arg4_type = ARG_CONST_SIZE, | |
7317 | }; | |
7318 | ||
7319 | BPF_CALL_5(sk_reuseport_load_bytes_relative, | |
7320 | const struct sk_reuseport_kern *, reuse_kern, u32, offset, | |
7321 | void *, to, u32, len, u32, start_header) | |
7322 | { | |
7323 | return ____bpf_skb_load_bytes_relative(reuse_kern->skb, offset, to, | |
7324 | len, start_header); | |
7325 | } | |
7326 | ||
7327 | static const struct bpf_func_proto sk_reuseport_load_bytes_relative_proto = { | |
7328 | .func = sk_reuseport_load_bytes_relative, | |
7329 | .gpl_only = false, | |
7330 | .ret_type = RET_INTEGER, | |
7331 | .arg1_type = ARG_PTR_TO_CTX, | |
7332 | .arg2_type = ARG_ANYTHING, | |
7333 | .arg3_type = ARG_PTR_TO_UNINIT_MEM, | |
7334 | .arg4_type = ARG_CONST_SIZE, | |
7335 | .arg5_type = ARG_ANYTHING, | |
7336 | }; | |
7337 | ||
7338 | static const struct bpf_func_proto * | |
7339 | sk_reuseport_func_proto(enum bpf_func_id func_id, | |
7340 | const struct bpf_prog *prog) | |
7341 | { | |
7342 | switch (func_id) { | |
7343 | case BPF_FUNC_sk_select_reuseport: | |
7344 | return &sk_select_reuseport_proto; | |
7345 | case BPF_FUNC_skb_load_bytes: | |
7346 | return &sk_reuseport_load_bytes_proto; | |
7347 | case BPF_FUNC_skb_load_bytes_relative: | |
7348 | return &sk_reuseport_load_bytes_relative_proto; | |
7349 | default: | |
7350 | return bpf_base_func_proto(func_id); | |
7351 | } | |
7352 | } | |
7353 | ||
7354 | static bool | |
7355 | sk_reuseport_is_valid_access(int off, int size, | |
7356 | enum bpf_access_type type, | |
7357 | const struct bpf_prog *prog, | |
7358 | struct bpf_insn_access_aux *info) | |
7359 | { | |
7360 | const u32 size_default = sizeof(__u32); | |
7361 | ||
7362 | if (off < 0 || off >= sizeof(struct sk_reuseport_md) || | |
7363 | off % size || type != BPF_READ) | |
7364 | return false; | |
7365 | ||
7366 | switch (off) { | |
7367 | case offsetof(struct sk_reuseport_md, data): | |
7368 | info->reg_type = PTR_TO_PACKET; | |
7369 | return size == sizeof(__u64); | |
7370 | ||
7371 | case offsetof(struct sk_reuseport_md, data_end): | |
7372 | info->reg_type = PTR_TO_PACKET_END; | |
7373 | return size == sizeof(__u64); | |
7374 | ||
7375 | case offsetof(struct sk_reuseport_md, hash): | |
7376 | return size == size_default; | |
7377 | ||
7378 | /* Fields that allow narrowing */ | |
7379 | case offsetof(struct sk_reuseport_md, eth_protocol): | |
7380 | if (size < FIELD_SIZEOF(struct sk_buff, protocol)) | |
7381 | return false; | |
4597b62f | 7382 | /* fall through */ |
2dbb9b9e MKL |
7383 | case offsetof(struct sk_reuseport_md, ip_protocol): |
7384 | case offsetof(struct sk_reuseport_md, bind_inany): | |
7385 | case offsetof(struct sk_reuseport_md, len): | |
7386 | bpf_ctx_record_field_size(info, size_default); | |
7387 | return bpf_ctx_narrow_access_ok(off, size, size_default); | |
7388 | ||
7389 | default: | |
7390 | return false; | |
7391 | } | |
7392 | } | |
7393 | ||
7394 | #define SK_REUSEPORT_LOAD_FIELD(F) ({ \ | |
7395 | *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ | |
7396 | si->dst_reg, si->src_reg, \ | |
7397 | bpf_target_off(struct sk_reuseport_kern, F, \ | |
7398 | FIELD_SIZEOF(struct sk_reuseport_kern, F), \ | |
7399 | target_size)); \ | |
7400 | }) | |
7401 | ||
7402 | #define SK_REUSEPORT_LOAD_SKB_FIELD(SKB_FIELD) \ | |
7403 | SOCK_ADDR_LOAD_NESTED_FIELD(struct sk_reuseport_kern, \ | |
7404 | struct sk_buff, \ | |
7405 | skb, \ | |
7406 | SKB_FIELD) | |
7407 | ||
7408 | #define SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(SK_FIELD, BPF_SIZE, EXTRA_OFF) \ | |
7409 | SOCK_ADDR_LOAD_NESTED_FIELD_SIZE_OFF(struct sk_reuseport_kern, \ | |
7410 | struct sock, \ | |
7411 | sk, \ | |
7412 | SK_FIELD, BPF_SIZE, EXTRA_OFF) | |
7413 | ||
7414 | static u32 sk_reuseport_convert_ctx_access(enum bpf_access_type type, | |
7415 | const struct bpf_insn *si, | |
7416 | struct bpf_insn *insn_buf, | |
7417 | struct bpf_prog *prog, | |
7418 | u32 *target_size) | |
7419 | { | |
7420 | struct bpf_insn *insn = insn_buf; | |
7421 | ||
7422 | switch (si->off) { | |
7423 | case offsetof(struct sk_reuseport_md, data): | |
7424 | SK_REUSEPORT_LOAD_SKB_FIELD(data); | |
7425 | break; | |
7426 | ||
7427 | case offsetof(struct sk_reuseport_md, len): | |
7428 | SK_REUSEPORT_LOAD_SKB_FIELD(len); | |
7429 | break; | |
7430 | ||
7431 | case offsetof(struct sk_reuseport_md, eth_protocol): | |
7432 | SK_REUSEPORT_LOAD_SKB_FIELD(protocol); | |
7433 | break; | |
7434 | ||
7435 | case offsetof(struct sk_reuseport_md, ip_protocol): | |
3f6e138d | 7436 | BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); |
2dbb9b9e MKL |
7437 | SK_REUSEPORT_LOAD_SK_FIELD_SIZE_OFF(__sk_flags_offset, |
7438 | BPF_W, 0); | |
7439 | *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); | |
7440 | *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, | |
7441 | SK_FL_PROTO_SHIFT); | |
7442 | /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian | |
7443 | * aware. No further narrowing or masking is needed. | |
7444 | */ | |
7445 | *target_size = 1; | |
7446 | break; | |
7447 | ||
7448 | case offsetof(struct sk_reuseport_md, data_end): | |
7449 | SK_REUSEPORT_LOAD_FIELD(data_end); | |
7450 | break; | |
7451 | ||
7452 | case offsetof(struct sk_reuseport_md, hash): | |
7453 | SK_REUSEPORT_LOAD_FIELD(hash); | |
7454 | break; | |
7455 | ||
7456 | case offsetof(struct sk_reuseport_md, bind_inany): | |
7457 | SK_REUSEPORT_LOAD_FIELD(bind_inany); | |
7458 | break; | |
7459 | } | |
7460 | ||
7461 | return insn - insn_buf; | |
7462 | } | |
7463 | ||
7464 | const struct bpf_verifier_ops sk_reuseport_verifier_ops = { | |
7465 | .get_func_proto = sk_reuseport_func_proto, | |
7466 | .is_valid_access = sk_reuseport_is_valid_access, | |
7467 | .convert_ctx_access = sk_reuseport_convert_ctx_access, | |
7468 | }; | |
7469 | ||
7470 | const struct bpf_prog_ops sk_reuseport_prog_ops = { | |
7471 | }; | |
7472 | #endif /* CONFIG_INET */ |