Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * Linux Socket Filter Data Structures | |
4 | */ | |
1da177e4 LT |
5 | #ifndef __LINUX_FILTER_H__ |
6 | #define __LINUX_FILTER_H__ | |
7 | ||
60063497 | 8 | #include <linux/atomic.h> |
8581fd40 | 9 | #include <linux/bpf.h> |
4c355cdf | 10 | #include <linux/refcount.h> |
0c5fe1b4 | 11 | #include <linux/compat.h> |
9f12fbe6 | 12 | #include <linux/skbuff.h> |
b954d834 DB |
13 | #include <linux/linkage.h> |
14 | #include <linux/printk.h> | |
d45ed4a4 | 15 | #include <linux/workqueue.h> |
b13138ef | 16 | #include <linux/sched.h> |
4f3446bb | 17 | #include <linux/capability.h> |
820a0b24 | 18 | #include <linux/set_memory.h> |
7105e828 | 19 | #include <linux/kallsyms.h> |
6d5fc195 | 20 | #include <linux/if_vlan.h> |
d53d2f78 | 21 | #include <linux/vmalloc.h> |
b1ea9ff6 | 22 | #include <linux/sockptr.h> |
a24d22b2 | 23 | #include <crypto/sha1.h> |
700d4796 | 24 | #include <linux/u64_stats_sync.h> |
4f3446bb | 25 | |
ff936a04 | 26 | #include <net/sch_generic.h> |
b954d834 | 27 | |
d9b8aada | 28 | #include <asm/byteorder.h> |
b954d834 | 29 | #include <uapi/linux/filter.h> |
60a3b225 DB |
30 | |
31 | struct sk_buff; | |
32 | struct sock; | |
33 | struct seccomp_data; | |
09756af4 | 34 | struct bpf_prog_aux; |
297dd12c | 35 | struct xdp_rxq_info; |
106ca27f | 36 | struct xdp_buff; |
2dbb9b9e | 37 | struct sock_reuseport; |
7b146ceb AI |
38 | struct ctl_table; |
39 | struct ctl_table_header; | |
792d4b5c | 40 | |
30743837 DB |
41 | /* ArgX, context and stack frame pointer register positions. Note, |
42 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function | |
43 | * calls in BPF_CALL instruction. | |
44 | */ | |
45 | #define BPF_REG_ARG1 BPF_REG_1 | |
46 | #define BPF_REG_ARG2 BPF_REG_2 | |
47 | #define BPF_REG_ARG3 BPF_REG_3 | |
48 | #define BPF_REG_ARG4 BPF_REG_4 | |
49 | #define BPF_REG_ARG5 BPF_REG_5 | |
50 | #define BPF_REG_CTX BPF_REG_6 | |
51 | #define BPF_REG_FP BPF_REG_10 | |
52 | ||
53 | /* Additional register mappings for converted user programs. */ | |
54 | #define BPF_REG_A BPF_REG_0 | |
55 | #define BPF_REG_X BPF_REG_7 | |
e0cea7ce DB |
56 | #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ |
57 | #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ | |
58 | #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ | |
bd4cf0ed | 59 | |
9b73bfdd | 60 | /* Kernel hidden auxiliary/helper register. */ |
4f3446bb | 61 | #define BPF_REG_AX MAX_BPF_REG |
144cd91c DB |
62 | #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) |
63 | #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG | |
4f3446bb | 64 | |
71189fa9 AS |
65 | /* unused opcode to mark special call to bpf_tail_call() helper */ |
66 | #define BPF_TAIL_CALL 0xf0 | |
67 | ||
2a02759e AS |
68 | /* unused opcode to mark special load instruction. Same as BPF_ABS */ |
69 | #define BPF_PROBE_MEM 0x20 | |
70 | ||
1ea47e01 AS |
71 | /* unused opcode to mark call to interpreter with arguments */ |
72 | #define BPF_CALL_ARGS 0xe0 | |
73 | ||
f5e81d11 DB |
74 | /* unused opcode to mark speculation barrier for mitigating |
75 | * Speculative Store Bypass | |
76 | */ | |
77 | #define BPF_NOSPEC 0xc0 | |
78 | ||
74451e66 DB |
79 | /* As per nm, we expose JITed images as text (code) section for |
80 | * kallsyms. That way, tools like perf can find it to match | |
81 | * addresses. | |
82 | */ | |
83 | #define BPF_SYM_ELF_TYPE 't' | |
84 | ||
bd4cf0ed AS |
85 | /* BPF program can access up to 512 bytes of stack space. */ |
86 | #define MAX_BPF_STACK 512 | |
87 | ||
f8f6d679 DB |
88 | /* Helper macros for filter block array initializers. */ |
89 | ||
e430f34e | 90 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
f8f6d679 | 91 | |
e430f34e | 92 | #define BPF_ALU64_REG(OP, DST, SRC) \ |
2695fb55 | 93 | ((struct bpf_insn) { \ |
f8f6d679 | 94 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
95 | .dst_reg = DST, \ |
96 | .src_reg = SRC, \ | |
f8f6d679 DB |
97 | .off = 0, \ |
98 | .imm = 0 }) | |
99 | ||
e430f34e | 100 | #define BPF_ALU32_REG(OP, DST, SRC) \ |
2695fb55 | 101 | ((struct bpf_insn) { \ |
f8f6d679 | 102 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
103 | .dst_reg = DST, \ |
104 | .src_reg = SRC, \ | |
f8f6d679 DB |
105 | .off = 0, \ |
106 | .imm = 0 }) | |
107 | ||
e430f34e | 108 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ |
f8f6d679 | 109 | |
e430f34e | 110 | #define BPF_ALU64_IMM(OP, DST, IMM) \ |
2695fb55 | 111 | ((struct bpf_insn) { \ |
f8f6d679 | 112 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
113 | .dst_reg = DST, \ |
114 | .src_reg = 0, \ | |
f8f6d679 DB |
115 | .off = 0, \ |
116 | .imm = IMM }) | |
117 | ||
e430f34e | 118 | #define BPF_ALU32_IMM(OP, DST, IMM) \ |
2695fb55 | 119 | ((struct bpf_insn) { \ |
f8f6d679 | 120 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
121 | .dst_reg = DST, \ |
122 | .src_reg = 0, \ | |
f8f6d679 DB |
123 | .off = 0, \ |
124 | .imm = IMM }) | |
125 | ||
126 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | |
127 | ||
e430f34e | 128 | #define BPF_ENDIAN(TYPE, DST, LEN) \ |
2695fb55 | 129 | ((struct bpf_insn) { \ |
f8f6d679 | 130 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ |
e430f34e AS |
131 | .dst_reg = DST, \ |
132 | .src_reg = 0, \ | |
f8f6d679 DB |
133 | .off = 0, \ |
134 | .imm = LEN }) | |
135 | ||
e430f34e | 136 | /* Short form of mov, dst_reg = src_reg */ |
f8f6d679 | 137 | |
e430f34e | 138 | #define BPF_MOV64_REG(DST, SRC) \ |
2695fb55 | 139 | ((struct bpf_insn) { \ |
f8f6d679 | 140 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ |
e430f34e AS |
141 | .dst_reg = DST, \ |
142 | .src_reg = SRC, \ | |
f8f6d679 DB |
143 | .off = 0, \ |
144 | .imm = 0 }) | |
145 | ||
e430f34e | 146 | #define BPF_MOV32_REG(DST, SRC) \ |
2695fb55 | 147 | ((struct bpf_insn) { \ |
f8f6d679 | 148 | .code = BPF_ALU | BPF_MOV | BPF_X, \ |
e430f34e AS |
149 | .dst_reg = DST, \ |
150 | .src_reg = SRC, \ | |
f8f6d679 DB |
151 | .off = 0, \ |
152 | .imm = 0 }) | |
153 | ||
e430f34e | 154 | /* Short form of mov, dst_reg = imm32 */ |
f8f6d679 | 155 | |
e430f34e | 156 | #define BPF_MOV64_IMM(DST, IMM) \ |
2695fb55 | 157 | ((struct bpf_insn) { \ |
f8f6d679 | 158 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ |
e430f34e AS |
159 | .dst_reg = DST, \ |
160 | .src_reg = 0, \ | |
f8f6d679 DB |
161 | .off = 0, \ |
162 | .imm = IMM }) | |
163 | ||
e430f34e | 164 | #define BPF_MOV32_IMM(DST, IMM) \ |
2695fb55 | 165 | ((struct bpf_insn) { \ |
f8f6d679 | 166 | .code = BPF_ALU | BPF_MOV | BPF_K, \ |
e430f34e AS |
167 | .dst_reg = DST, \ |
168 | .src_reg = 0, \ | |
f8f6d679 DB |
169 | .off = 0, \ |
170 | .imm = IMM }) | |
171 | ||
7d134041 JW |
172 | /* Special form of mov32, used for doing explicit zero extension on dst. */ |
173 | #define BPF_ZEXT_REG(DST) \ | |
174 | ((struct bpf_insn) { \ | |
175 | .code = BPF_ALU | BPF_MOV | BPF_X, \ | |
176 | .dst_reg = DST, \ | |
177 | .src_reg = DST, \ | |
178 | .off = 0, \ | |
179 | .imm = 1 }) | |
180 | ||
181 | static inline bool insn_is_zext(const struct bpf_insn *insn) | |
182 | { | |
183 | return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; | |
184 | } | |
185 | ||
02ab695b AS |
186 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ |
187 | #define BPF_LD_IMM64(DST, IMM) \ | |
188 | BPF_LD_IMM64_RAW(DST, 0, IMM) | |
189 | ||
190 | #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ | |
191 | ((struct bpf_insn) { \ | |
192 | .code = BPF_LD | BPF_DW | BPF_IMM, \ | |
193 | .dst_reg = DST, \ | |
194 | .src_reg = SRC, \ | |
195 | .off = 0, \ | |
196 | .imm = (__u32) (IMM) }), \ | |
197 | ((struct bpf_insn) { \ | |
198 | .code = 0, /* zero is reserved opcode */ \ | |
199 | .dst_reg = 0, \ | |
200 | .src_reg = 0, \ | |
201 | .off = 0, \ | |
202 | .imm = ((__u64) (IMM)) >> 32 }) | |
203 | ||
0246e64d AS |
204 | /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ |
205 | #define BPF_LD_MAP_FD(DST, MAP_FD) \ | |
206 | BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) | |
207 | ||
e430f34e | 208 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ |
f8f6d679 | 209 | |
e430f34e | 210 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 211 | ((struct bpf_insn) { \ |
f8f6d679 | 212 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
213 | .dst_reg = DST, \ |
214 | .src_reg = SRC, \ | |
f8f6d679 DB |
215 | .off = 0, \ |
216 | .imm = IMM }) | |
217 | ||
e430f34e | 218 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 219 | ((struct bpf_insn) { \ |
f8f6d679 | 220 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
221 | .dst_reg = DST, \ |
222 | .src_reg = SRC, \ | |
f8f6d679 DB |
223 | .off = 0, \ |
224 | .imm = IMM }) | |
225 | ||
e430f34e | 226 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ |
f8f6d679 | 227 | |
e430f34e | 228 | #define BPF_LD_ABS(SIZE, IMM) \ |
2695fb55 | 229 | ((struct bpf_insn) { \ |
f8f6d679 | 230 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ |
e430f34e AS |
231 | .dst_reg = 0, \ |
232 | .src_reg = 0, \ | |
f8f6d679 | 233 | .off = 0, \ |
e430f34e | 234 | .imm = IMM }) |
f8f6d679 | 235 | |
e430f34e | 236 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ |
f8f6d679 | 237 | |
e430f34e | 238 | #define BPF_LD_IND(SIZE, SRC, IMM) \ |
2695fb55 | 239 | ((struct bpf_insn) { \ |
f8f6d679 | 240 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ |
e430f34e AS |
241 | .dst_reg = 0, \ |
242 | .src_reg = SRC, \ | |
f8f6d679 | 243 | .off = 0, \ |
e430f34e | 244 | .imm = IMM }) |
f8f6d679 | 245 | |
e430f34e | 246 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ |
f8f6d679 | 247 | |
e430f34e | 248 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ |
2695fb55 | 249 | ((struct bpf_insn) { \ |
f8f6d679 | 250 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
251 | .dst_reg = DST, \ |
252 | .src_reg = SRC, \ | |
f8f6d679 DB |
253 | .off = OFF, \ |
254 | .imm = 0 }) | |
255 | ||
e430f34e AS |
256 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ |
257 | ||
258 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | |
2695fb55 | 259 | ((struct bpf_insn) { \ |
f8f6d679 | 260 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
261 | .dst_reg = DST, \ |
262 | .src_reg = SRC, \ | |
f8f6d679 DB |
263 | .off = OFF, \ |
264 | .imm = 0 }) | |
265 | ||
cffc642d | 266 | |
91c960b0 BJ |
267 | /* |
268 | * Atomic operations: | |
269 | * | |
270 | * BPF_ADD *(uint *) (dst_reg + off16) += src_reg | |
981f94c3 BJ |
271 | * BPF_AND *(uint *) (dst_reg + off16) &= src_reg |
272 | * BPF_OR *(uint *) (dst_reg + off16) |= src_reg | |
273 | * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg | |
5ca419f2 | 274 | * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); |
981f94c3 BJ |
275 | * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); |
276 | * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); | |
277 | * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); | |
5ffa2550 BJ |
278 | * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) |
279 | * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) | |
91c960b0 BJ |
280 | */ |
281 | ||
282 | #define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ | |
cffc642d | 283 | ((struct bpf_insn) { \ |
91c960b0 | 284 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ |
cffc642d MH |
285 | .dst_reg = DST, \ |
286 | .src_reg = SRC, \ | |
287 | .off = OFF, \ | |
91c960b0 BJ |
288 | .imm = OP }) |
289 | ||
290 | /* Legacy alias */ | |
291 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) | |
cffc642d | 292 | |
e430f34e AS |
293 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ |
294 | ||
295 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | |
2695fb55 | 296 | ((struct bpf_insn) { \ |
e430f34e AS |
297 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ |
298 | .dst_reg = DST, \ | |
299 | .src_reg = 0, \ | |
300 | .off = OFF, \ | |
301 | .imm = IMM }) | |
302 | ||
303 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | |
f8f6d679 | 304 | |
e430f34e | 305 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ |
2695fb55 | 306 | ((struct bpf_insn) { \ |
f8f6d679 | 307 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
308 | .dst_reg = DST, \ |
309 | .src_reg = SRC, \ | |
f8f6d679 DB |
310 | .off = OFF, \ |
311 | .imm = 0 }) | |
312 | ||
e430f34e | 313 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ |
f8f6d679 | 314 | |
e430f34e | 315 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ |
2695fb55 | 316 | ((struct bpf_insn) { \ |
f8f6d679 | 317 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
318 | .dst_reg = DST, \ |
319 | .src_reg = 0, \ | |
f8f6d679 | 320 | .off = OFF, \ |
a7b76c88 JW |
321 | .imm = IMM }) |
322 | ||
323 | /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ | |
324 | ||
325 | #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ | |
326 | ((struct bpf_insn) { \ | |
327 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ | |
328 | .dst_reg = DST, \ | |
329 | .src_reg = SRC, \ | |
330 | .off = OFF, \ | |
331 | .imm = 0 }) | |
332 | ||
333 | /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ | |
334 | ||
335 | #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ | |
336 | ((struct bpf_insn) { \ | |
337 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ | |
338 | .dst_reg = DST, \ | |
339 | .src_reg = 0, \ | |
340 | .off = OFF, \ | |
f8f6d679 DB |
341 | .imm = IMM }) |
342 | ||
614d0d77 DB |
343 | /* Unconditional jumps, goto pc + off16 */ |
344 | ||
345 | #define BPF_JMP_A(OFF) \ | |
346 | ((struct bpf_insn) { \ | |
347 | .code = BPF_JMP | BPF_JA, \ | |
348 | .dst_reg = 0, \ | |
349 | .src_reg = 0, \ | |
350 | .off = OFF, \ | |
351 | .imm = 0 }) | |
352 | ||
06be0864 DB |
353 | /* Relative call */ |
354 | ||
355 | #define BPF_CALL_REL(TGT) \ | |
356 | ((struct bpf_insn) { \ | |
357 | .code = BPF_JMP | BPF_CALL, \ | |
358 | .dst_reg = 0, \ | |
359 | .src_reg = BPF_PSEUDO_CALL, \ | |
360 | .off = 0, \ | |
361 | .imm = TGT }) | |
362 | ||
3d717fad | 363 | /* Convert function address to BPF immediate */ |
f8f6d679 | 364 | |
3d717fad | 365 | #define BPF_CALL_IMM(x) ((void *)(x) - (void *)__bpf_call_base) |
09772d92 | 366 | |
f8f6d679 | 367 | #define BPF_EMIT_CALL(FUNC) \ |
2695fb55 | 368 | ((struct bpf_insn) { \ |
f8f6d679 | 369 | .code = BPF_JMP | BPF_CALL, \ |
e430f34e AS |
370 | .dst_reg = 0, \ |
371 | .src_reg = 0, \ | |
f8f6d679 | 372 | .off = 0, \ |
3d717fad | 373 | .imm = BPF_CALL_IMM(FUNC) }) |
f8f6d679 DB |
374 | |
375 | /* Raw code statement block */ | |
376 | ||
e430f34e | 377 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ |
2695fb55 | 378 | ((struct bpf_insn) { \ |
f8f6d679 | 379 | .code = CODE, \ |
e430f34e AS |
380 | .dst_reg = DST, \ |
381 | .src_reg = SRC, \ | |
f8f6d679 DB |
382 | .off = OFF, \ |
383 | .imm = IMM }) | |
384 | ||
385 | /* Program exit */ | |
386 | ||
387 | #define BPF_EXIT_INSN() \ | |
2695fb55 | 388 | ((struct bpf_insn) { \ |
f8f6d679 | 389 | .code = BPF_JMP | BPF_EXIT, \ |
e430f34e AS |
390 | .dst_reg = 0, \ |
391 | .src_reg = 0, \ | |
f8f6d679 DB |
392 | .off = 0, \ |
393 | .imm = 0 }) | |
394 | ||
f5e81d11 DB |
395 | /* Speculation barrier */ |
396 | ||
397 | #define BPF_ST_NOSPEC() \ | |
398 | ((struct bpf_insn) { \ | |
399 | .code = BPF_ST | BPF_NOSPEC, \ | |
400 | .dst_reg = 0, \ | |
401 | .src_reg = 0, \ | |
402 | .off = 0, \ | |
403 | .imm = 0 }) | |
404 | ||
a4afd37b DB |
405 | /* Internal classic blocks for direct assignment */ |
406 | ||
407 | #define __BPF_STMT(CODE, K) \ | |
408 | ((struct sock_filter) BPF_STMT(CODE, K)) | |
409 | ||
410 | #define __BPF_JUMP(CODE, K, JT, JF) \ | |
411 | ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) | |
412 | ||
f8f6d679 DB |
413 | #define bytes_to_bpf_size(bytes) \ |
414 | ({ \ | |
415 | int bpf_size = -EINVAL; \ | |
416 | \ | |
417 | if (bytes == sizeof(u8)) \ | |
418 | bpf_size = BPF_B; \ | |
419 | else if (bytes == sizeof(u16)) \ | |
420 | bpf_size = BPF_H; \ | |
421 | else if (bytes == sizeof(u32)) \ | |
422 | bpf_size = BPF_W; \ | |
423 | else if (bytes == sizeof(u64)) \ | |
424 | bpf_size = BPF_DW; \ | |
425 | \ | |
426 | bpf_size; \ | |
427 | }) | |
9739eef1 | 428 | |
f96da094 DB |
429 | #define bpf_size_to_bytes(bpf_size) \ |
430 | ({ \ | |
431 | int bytes = -EINVAL; \ | |
432 | \ | |
433 | if (bpf_size == BPF_B) \ | |
434 | bytes = sizeof(u8); \ | |
435 | else if (bpf_size == BPF_H) \ | |
436 | bytes = sizeof(u16); \ | |
437 | else if (bpf_size == BPF_W) \ | |
438 | bytes = sizeof(u32); \ | |
439 | else if (bpf_size == BPF_DW) \ | |
440 | bytes = sizeof(u64); \ | |
441 | \ | |
442 | bytes; \ | |
443 | }) | |
444 | ||
f035a515 DB |
445 | #define BPF_SIZEOF(type) \ |
446 | ({ \ | |
447 | const int __size = bytes_to_bpf_size(sizeof(type)); \ | |
448 | BUILD_BUG_ON(__size < 0); \ | |
449 | __size; \ | |
450 | }) | |
451 | ||
452 | #define BPF_FIELD_SIZEOF(type, field) \ | |
453 | ({ \ | |
c593642c | 454 | const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \ |
f035a515 DB |
455 | BUILD_BUG_ON(__size < 0); \ |
456 | __size; \ | |
457 | }) | |
458 | ||
f96da094 DB |
459 | #define BPF_LDST_BYTES(insn) \ |
460 | ({ \ | |
e59ac634 | 461 | const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ |
f96da094 DB |
462 | WARN_ON(__size < 0); \ |
463 | __size; \ | |
464 | }) | |
465 | ||
f3694e00 DB |
466 | #define __BPF_MAP_0(m, v, ...) v |
467 | #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) | |
468 | #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) | |
469 | #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) | |
470 | #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) | |
471 | #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) | |
472 | ||
473 | #define __BPF_REG_0(...) __BPF_PAD(5) | |
474 | #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) | |
475 | #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) | |
476 | #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) | |
477 | #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) | |
478 | #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) | |
479 | ||
480 | #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) | |
481 | #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) | |
482 | ||
483 | #define __BPF_CAST(t, a) \ | |
484 | (__force t) \ | |
485 | (__force \ | |
486 | typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ | |
487 | (unsigned long)0, (t)0))) a | |
488 | #define __BPF_V void | |
489 | #define __BPF_N | |
490 | ||
491 | #define __BPF_DECL_ARGS(t, a) t a | |
492 | #define __BPF_DECL_REGS(t, a) u64 a | |
493 | ||
494 | #define __BPF_PAD(n) \ | |
495 | __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ | |
496 | u64, __ur_3, u64, __ur_4, u64, __ur_5) | |
497 | ||
498 | #define BPF_CALL_x(x, name, ...) \ | |
499 | static __always_inline \ | |
500 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ | |
7c6a469e | 501 | typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ |
f3694e00 DB |
502 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ |
503 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ | |
504 | { \ | |
7c6a469e | 505 | return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ |
f3694e00 DB |
506 | } \ |
507 | static __always_inline \ | |
508 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) | |
509 | ||
510 | #define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) | |
511 | #define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) | |
512 | #define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) | |
513 | #define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) | |
514 | #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) | |
515 | #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) | |
516 | ||
f96da094 DB |
517 | #define bpf_ctx_range(TYPE, MEMBER) \ |
518 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
519 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ | |
520 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 | |
b7df9ada DB |
521 | #if BITS_PER_LONG == 64 |
522 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | |
523 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
524 | #else | |
525 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | |
526 | offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 | |
527 | #endif /* BITS_PER_LONG == 64 */ | |
f96da094 DB |
528 | |
529 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ | |
530 | ({ \ | |
c593642c | 531 | BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \ |
f96da094 DB |
532 | *(PTR_SIZE) = (SIZE); \ |
533 | offsetof(TYPE, MEMBER); \ | |
534 | }) | |
535 | ||
bd4cf0ed | 536 | /* A struct sock_filter is architecture independent. */ |
0c5fe1b4 WD |
537 | struct compat_sock_fprog { |
538 | u16 len; | |
bd4cf0ed | 539 | compat_uptr_t filter; /* struct sock_filter * */ |
0c5fe1b4 | 540 | }; |
0c5fe1b4 | 541 | |
a3ea269b DB |
542 | struct sock_fprog_kern { |
543 | u16 len; | |
544 | struct sock_filter *filter; | |
545 | }; | |
546 | ||
b7b3fc8d IL |
547 | /* Some arches need doubleword alignment for their instructions and/or data */ |
548 | #define BPF_IMAGE_ALIGNMENT 8 | |
549 | ||
738cbe72 | 550 | struct bpf_binary_header { |
ed2d9e1a | 551 | u32 size; |
b7b3fc8d | 552 | u8 image[] __aligned(BPF_IMAGE_ALIGNMENT); |
738cbe72 DB |
553 | }; |
554 | ||
700d4796 | 555 | struct bpf_prog_stats { |
61a0abae ED |
556 | u64_stats_t cnt; |
557 | u64_stats_t nsecs; | |
558 | u64_stats_t misses; | |
700d4796 AS |
559 | struct u64_stats_sync syncp; |
560 | } __aligned(2 * sizeof(u64)); | |
561 | ||
7ae457c1 | 562 | struct sk_filter { |
4c355cdf | 563 | refcount_t refcnt; |
7ae457c1 AS |
564 | struct rcu_head rcu; |
565 | struct bpf_prog *prog; | |
566 | }; | |
567 | ||
492ecee8 AS |
568 | DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); |
569 | ||
fdf21497 | 570 | extern struct mutex nf_conn_btf_access_lock; |
6728aea7 KKD |
571 | extern int (*nfct_btf_struct_access)(struct bpf_verifier_log *log, |
572 | const struct bpf_reg_state *reg, | |
573 | int off, int size, enum bpf_access_type atype, | |
574 | u32 *next_btf_id, enum bpf_type_flag *flag); | |
fdf21497 | 575 | |
fb7dd8bc AN |
576 | typedef unsigned int (*bpf_dispatcher_fn)(const void *ctx, |
577 | const struct bpf_insn *insnsi, | |
578 | unsigned int (*bpf_func)(const void *, | |
579 | const struct bpf_insn *)); | |
580 | ||
581 | static __always_inline u32 __bpf_prog_run(const struct bpf_prog *prog, | |
582 | const void *ctx, | |
583 | bpf_dispatcher_fn dfunc) | |
584 | { | |
585 | u32 ret; | |
586 | ||
587 | cant_migrate(); | |
588 | if (static_branch_unlikely(&bpf_stats_enabled_key)) { | |
589 | struct bpf_prog_stats *stats; | |
590 | u64 start = sched_clock(); | |
f941eadd | 591 | unsigned long flags; |
fb7dd8bc AN |
592 | |
593 | ret = dfunc(ctx, prog->insnsi, prog->bpf_func); | |
594 | stats = this_cpu_ptr(prog->stats); | |
f941eadd | 595 | flags = u64_stats_update_begin_irqsave(&stats->syncp); |
61a0abae ED |
596 | u64_stats_inc(&stats->cnt); |
597 | u64_stats_add(&stats->nsecs, sched_clock() - start); | |
f941eadd | 598 | u64_stats_update_end_irqrestore(&stats->syncp, flags); |
fb7dd8bc AN |
599 | } else { |
600 | ret = dfunc(ctx, prog->insnsi, prog->bpf_func); | |
601 | } | |
602 | return ret; | |
603 | } | |
604 | ||
605 | static __always_inline u32 bpf_prog_run(const struct bpf_prog *prog, const void *ctx) | |
606 | { | |
607 | return __bpf_prog_run(prog, ctx, bpf_dispatcher_nop_func); | |
608 | } | |
3c58482a TG |
609 | |
610 | /* | |
611 | * Use in preemptible and therefore migratable context to make sure that | |
612 | * the execution of the BPF program runs on one CPU. | |
613 | * | |
614 | * This uses migrate_disable/enable() explicitly to document that the | |
615 | * invocation of a BPF program does not require reentrancy protection | |
616 | * against a BPF program which is invoked from a preempting task. | |
3c58482a TG |
617 | */ |
618 | static inline u32 bpf_prog_run_pin_on_cpu(const struct bpf_prog *prog, | |
619 | const void *ctx) | |
620 | { | |
621 | u32 ret; | |
622 | ||
623 | migrate_disable(); | |
fb7dd8bc | 624 | ret = bpf_prog_run(prog, ctx); |
3c58482a TG |
625 | migrate_enable(); |
626 | return ret; | |
627 | } | |
7e6897f9 | 628 | |
01dd194c DB |
629 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN |
630 | ||
db58ba45 AS |
631 | struct bpf_skb_data_end { |
632 | struct qdisc_skb_cb qdisc_cb; | |
de8f3a83 | 633 | void *data_meta; |
db58ba45 AS |
634 | void *data_end; |
635 | }; | |
636 | ||
ba452c9e THJ |
637 | struct bpf_nh_params { |
638 | u32 nh_family; | |
639 | union { | |
640 | u32 ipv4_nh; | |
641 | struct in6_addr ipv6_nh; | |
642 | }; | |
643 | }; | |
644 | ||
0b19cc0a | 645 | struct bpf_redirect_info { |
32637e33 | 646 | u64 tgt_index; |
43e74c02 | 647 | void *tgt_value; |
e624d4ed | 648 | struct bpf_map *map; |
32637e33 THJ |
649 | u32 flags; |
650 | u32 kern_flags; | |
ee75aef2 BT |
651 | u32 map_id; |
652 | enum bpf_map_type map_type; | |
ba452c9e | 653 | struct bpf_nh_params nh; |
0b19cc0a TM |
654 | }; |
655 | ||
656 | DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); | |
657 | ||
2539650f TM |
658 | /* flags for bpf_redirect_info kern_flags */ |
659 | #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ | |
660 | ||
6aaae2b6 DB |
661 | /* Compute the linear packet data range [data, data_end) which |
662 | * will be accessed by various program types (cls_bpf, act_bpf, | |
663 | * lwt, ...). Subsystems allowing direct data access must (!) | |
664 | * ensure that cb[] area can be written to when BPF program is | |
665 | * invoked (otherwise cb[] save/restore is necessary). | |
db58ba45 | 666 | */ |
6aaae2b6 | 667 | static inline void bpf_compute_data_pointers(struct sk_buff *skb) |
db58ba45 AS |
668 | { |
669 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
670 | ||
c593642c | 671 | BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); |
de8f3a83 DB |
672 | cb->data_meta = skb->data - skb_metadata_len(skb); |
673 | cb->data_end = skb->data + skb_headlen(skb); | |
db58ba45 AS |
674 | } |
675 | ||
b39b5f41 SL |
676 | /* Similar to bpf_compute_data_pointers(), except that save orginal |
677 | * data in cb->data and cb->meta_data for restore. | |
678 | */ | |
679 | static inline void bpf_compute_and_save_data_end( | |
680 | struct sk_buff *skb, void **saved_data_end) | |
681 | { | |
682 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
683 | ||
684 | *saved_data_end = cb->data_end; | |
685 | cb->data_end = skb->data + skb_headlen(skb); | |
686 | } | |
687 | ||
688 | /* Restore data saved by bpf_compute_data_pointers(). */ | |
689 | static inline void bpf_restore_data_end( | |
690 | struct sk_buff *skb, void *saved_data_end) | |
691 | { | |
692 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
693 | ||
694 | cb->data_end = saved_data_end; | |
695 | } | |
696 | ||
7d08c2c9 | 697 | static inline u8 *bpf_skb_cb(const struct sk_buff *skb) |
01dd194c DB |
698 | { |
699 | /* eBPF programs may read/write skb->cb[] area to transfer meta | |
700 | * data between tail calls. Since this also needs to work with | |
701 | * tc, that scratch memory is mapped to qdisc_skb_cb's data area. | |
702 | * | |
703 | * In some socket filter cases, the cb unfortunately needs to be | |
704 | * saved/restored so that protocol specific skb->cb[] data won't | |
705 | * be lost. In any case, due to unpriviledged eBPF programs | |
706 | * attached to sockets, we need to clear the bpf_skb_cb() area | |
707 | * to not leak previous contents to user space. | |
708 | */ | |
c593642c PB |
709 | BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN); |
710 | BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != | |
711 | sizeof_field(struct qdisc_skb_cb, data)); | |
01dd194c DB |
712 | |
713 | return qdisc_skb_cb(skb)->data; | |
714 | } | |
715 | ||
2a916f2f | 716 | /* Must be invoked with migration disabled */ |
6cab5e90 | 717 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, |
7d08c2c9 | 718 | const void *ctx) |
ff936a04 | 719 | { |
7d08c2c9 | 720 | const struct sk_buff *skb = ctx; |
01dd194c DB |
721 | u8 *cb_data = bpf_skb_cb(skb); |
722 | u8 cb_saved[BPF_SKB_CB_LEN]; | |
ff936a04 AS |
723 | u32 res; |
724 | ||
ff936a04 | 725 | if (unlikely(prog->cb_access)) { |
01dd194c DB |
726 | memcpy(cb_saved, cb_data, sizeof(cb_saved)); |
727 | memset(cb_data, 0, sizeof(cb_saved)); | |
ff936a04 AS |
728 | } |
729 | ||
fb7dd8bc | 730 | res = bpf_prog_run(prog, skb); |
ff936a04 AS |
731 | |
732 | if (unlikely(prog->cb_access)) | |
01dd194c | 733 | memcpy(cb_data, cb_saved, sizeof(cb_saved)); |
ff936a04 AS |
734 | |
735 | return res; | |
736 | } | |
737 | ||
6cab5e90 AS |
738 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, |
739 | struct sk_buff *skb) | |
740 | { | |
741 | u32 res; | |
742 | ||
2a916f2f | 743 | migrate_disable(); |
6cab5e90 | 744 | res = __bpf_prog_run_save_cb(prog, skb); |
2a916f2f | 745 | migrate_enable(); |
6cab5e90 AS |
746 | return res; |
747 | } | |
748 | ||
ff936a04 AS |
749 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, |
750 | struct sk_buff *skb) | |
751 | { | |
01dd194c | 752 | u8 *cb_data = bpf_skb_cb(skb); |
6cab5e90 | 753 | u32 res; |
ff936a04 AS |
754 | |
755 | if (unlikely(prog->cb_access)) | |
01dd194c DB |
756 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
757 | ||
3d9f773c | 758 | res = bpf_prog_run_pin_on_cpu(prog, skb); |
6cab5e90 | 759 | return res; |
ff936a04 AS |
760 | } |
761 | ||
6a64037d | 762 | DECLARE_BPF_DISPATCHER(xdp) |
7e6897f9 | 763 | |
879af96f JM |
764 | DECLARE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); |
765 | ||
766 | u32 xdp_master_redirect(struct xdp_buff *xdp); | |
767 | ||
366cbf2f DB |
768 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
769 | struct xdp_buff *xdp) | |
6a773a15 | 770 | { |
782347b6 THJ |
771 | /* Driver XDP hooks are invoked within a single NAPI poll cycle and thus |
772 | * under local_bh_disable(), which provides the needed RCU protection | |
773 | * for accessing map entries. | |
366cbf2f | 774 | */ |
fb7dd8bc | 775 | u32 act = __bpf_prog_run(prog, xdp, BPF_DISPATCHER_FUNC(xdp)); |
879af96f JM |
776 | |
777 | if (static_branch_unlikely(&bpf_master_redirect_enabled_key)) { | |
778 | if (act == XDP_TX && netif_is_bond_slave(xdp->rxq->dev)) | |
779 | act = xdp_master_redirect(xdp); | |
780 | } | |
781 | ||
782 | return act; | |
6a773a15 BB |
783 | } |
784 | ||
7e6897f9 BT |
785 | void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog); |
786 | ||
aafe6ae9 DB |
787 | static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) |
788 | { | |
789 | return prog->len * sizeof(struct bpf_insn); | |
790 | } | |
791 | ||
f1f7714e | 792 | static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) |
aafe6ae9 DB |
793 | { |
794 | return round_up(bpf_prog_insn_size(prog) + | |
6b0b0fa2 | 795 | sizeof(__be64) + 1, SHA1_BLOCK_SIZE); |
aafe6ae9 DB |
796 | } |
797 | ||
7ae457c1 | 798 | static inline unsigned int bpf_prog_size(unsigned int proglen) |
b715631f | 799 | { |
7ae457c1 AS |
800 | return max(sizeof(struct bpf_prog), |
801 | offsetof(struct bpf_prog, insns[proglen])); | |
b715631f SH |
802 | } |
803 | ||
7b36f929 DB |
804 | static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) |
805 | { | |
806 | /* When classic BPF programs have been loaded and the arch | |
807 | * does not have a classic BPF JIT (anymore), they have been | |
808 | * converted via bpf_migrate_filter() to eBPF and thus always | |
809 | * have an unspec program type. | |
810 | */ | |
811 | return prog->type == BPF_PROG_TYPE_UNSPEC; | |
812 | } | |
813 | ||
bc23105c DB |
814 | static inline u32 bpf_ctx_off_adjust_machine(u32 size) |
815 | { | |
816 | const u32 size_machine = sizeof(unsigned long); | |
817 | ||
818 | if (size > size_machine && size % size_machine == 0) | |
819 | size = size_machine; | |
820 | ||
821 | return size; | |
822 | } | |
823 | ||
bc23105c DB |
824 | static inline bool |
825 | bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |
826 | { | |
46f53a65 | 827 | return size <= size_default && (size & (size - 1)) == 0; |
f96da094 DB |
828 | } |
829 | ||
d9b8aada | 830 | static inline u8 |
d895a0f1 | 831 | bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default) |
d9b8aada | 832 | { |
d895a0f1 | 833 | u8 access_off = off & (size_default - 1); |
d9b8aada IL |
834 | |
835 | #ifdef __LITTLE_ENDIAN | |
d895a0f1 | 836 | return access_off; |
d9b8aada | 837 | #else |
d895a0f1 | 838 | return size_default - (access_off + size); |
d9b8aada IL |
839 | #endif |
840 | } | |
841 | ||
b4399546 | 842 | #define bpf_ctx_wide_access_ok(off, size, type, field) \ |
600c70ba SF |
843 | (size == sizeof(__u64) && \ |
844 | off >= offsetof(type, field) && \ | |
845 | off + sizeof(__u64) <= offsetofend(type, field) && \ | |
846 | off % sizeof(__u64) == 0) | |
847 | ||
009937e7 | 848 | #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) |
a3ea269b | 849 | |
60a3b225 DB |
850 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) |
851 | { | |
e1608f3f DB |
852 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON |
853 | if (!fp->jited) { | |
854 | set_vm_flush_reset_perms(fp); | |
855 | set_memory_ro((unsigned long)fp, fp->pages); | |
856 | } | |
857 | #endif | |
60a3b225 DB |
858 | } |
859 | ||
9d876e79 DB |
860 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) |
861 | { | |
d53d2f78 | 862 | set_vm_flush_reset_perms(hdr); |
ed2d9e1a SL |
863 | set_memory_ro((unsigned long)hdr, hdr->size >> PAGE_SHIFT); |
864 | set_memory_x((unsigned long)hdr, hdr->size >> PAGE_SHIFT); | |
9d876e79 DB |
865 | } |
866 | ||
f4979fce WB |
867 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); |
868 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | |
869 | { | |
870 | return sk_filter_trim_cap(sk, skb, 1); | |
871 | } | |
bd4cf0ed | 872 | |
d1c55ab5 | 873 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); |
7ae457c1 | 874 | void bpf_prog_free(struct bpf_prog *fp); |
bd4cf0ed | 875 | |
5e581dad DB |
876 | bool bpf_opcode_in_insntable(u8 code); |
877 | ||
c454a46b MKL |
878 | void bpf_prog_free_linfo(struct bpf_prog *prog); |
879 | void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, | |
880 | const u32 *insn_to_jit_off); | |
881 | int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); | |
e16301fb | 882 | void bpf_prog_jit_attempt_done(struct bpf_prog *prog); |
c454a46b | 883 | |
60a3b225 | 884 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
492ecee8 | 885 | struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); |
60a3b225 DB |
886 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, |
887 | gfp_t gfp_extra_flags); | |
888 | void __bpf_prog_free(struct bpf_prog *fp); | |
889 | ||
890 | static inline void bpf_prog_unlock_free(struct bpf_prog *fp) | |
891 | { | |
60a3b225 DB |
892 | __bpf_prog_free(fp); |
893 | } | |
894 | ||
ac67eb2c DB |
895 | typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, |
896 | unsigned int flen); | |
897 | ||
7ae457c1 | 898 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
ac67eb2c | 899 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
bab18991 | 900 | bpf_aux_classic_check_t trans, bool save_orig); |
7ae457c1 | 901 | void bpf_prog_destroy(struct bpf_prog *fp); |
a3ea269b | 902 | |
fbc907f0 | 903 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
89aa0758 | 904 | int sk_attach_bpf(u32 ufd, struct sock *sk); |
538950a1 CG |
905 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
906 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); | |
8217ca65 | 907 | void sk_reuseport_prog_free(struct bpf_prog *prog); |
fbc907f0 | 908 | int sk_detach_filter(struct sock *sk); |
4ff09db1 | 909 | int sk_get_filter(struct sock *sk, sockptr_t optval, unsigned int len); |
fbc907f0 | 910 | |
278571ba | 911 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); |
fbc907f0 | 912 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
0a14842f | 913 | |
62258278 | 914 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
1ea47e01 AS |
915 | #define __bpf_call_base_args \ |
916 | ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ | |
6943c2b0 | 917 | (void *)__bpf_call_base) |
d1c55ab5 DB |
918 | |
919 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | |
9383191d | 920 | void bpf_jit_compile(struct bpf_prog *prog); |
a4b1d3c1 | 921 | bool bpf_jit_needs_zext(void); |
95acd881 | 922 | bool bpf_jit_supports_subprog_tailcalls(void); |
e6ac2450 | 923 | bool bpf_jit_supports_kfunc_call(void); |
17bedab2 | 924 | bool bpf_helper_changes_pkt_data(void *func); |
62258278 | 925 | |
63960260 | 926 | static inline bool bpf_dump_raw_ok(const struct cred *cred) |
7105e828 DB |
927 | { |
928 | /* Reconstruction of call-sites is dependent on kallsyms, | |
929 | * thus make dump the same restriction. | |
930 | */ | |
63960260 | 931 | return kallsyms_show_value(cred); |
7105e828 DB |
932 | } |
933 | ||
c237ee5e DB |
934 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
935 | const struct bpf_insn *patch, u32 len); | |
52875a04 | 936 | int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); |
814abfab | 937 | |
f6069b9a DB |
938 | void bpf_clear_redirect_map(struct bpf_map *map); |
939 | ||
2539650f TM |
940 | static inline bool xdp_return_frame_no_direct(void) |
941 | { | |
942 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
943 | ||
944 | return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; | |
945 | } | |
946 | ||
947 | static inline void xdp_set_return_frame_no_direct(void) | |
948 | { | |
949 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
950 | ||
951 | ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; | |
952 | } | |
953 | ||
954 | static inline void xdp_clear_return_frame_no_direct(void) | |
955 | { | |
956 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
957 | ||
958 | ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; | |
959 | } | |
960 | ||
d8d7218a TM |
961 | static inline int xdp_ok_fwd_dev(const struct net_device *fwd, |
962 | unsigned int pktlen) | |
6d5fc195 TM |
963 | { |
964 | unsigned int len; | |
965 | ||
966 | if (unlikely(!(fwd->flags & IFF_UP))) | |
967 | return -ENETDOWN; | |
968 | ||
969 | len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; | |
d8d7218a | 970 | if (pktlen > len) |
6d5fc195 TM |
971 | return -EMSGSIZE; |
972 | ||
973 | return 0; | |
974 | } | |
975 | ||
1d233886 | 976 | /* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the |
11393cc9 JF |
977 | * same cpu context. Further for best results no more than a single map |
978 | * for the do_redirect/do_flush pair should be used. This limitation is | |
979 | * because we only track one map and force a flush when the map changes. | |
2ddf71e2 | 980 | * This does not appear to be a real limitation for existing software. |
11393cc9 | 981 | */ |
2facaad6 | 982 | int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, |
02671e23 | 983 | struct xdp_buff *xdp, struct bpf_prog *prog); |
5acaee0a JF |
984 | int xdp_do_redirect(struct net_device *dev, |
985 | struct xdp_buff *xdp, | |
986 | struct bpf_prog *prog); | |
1372d34c THJ |
987 | int xdp_do_redirect_frame(struct net_device *dev, |
988 | struct xdp_buff *xdp, | |
989 | struct xdp_frame *xdpf, | |
990 | struct bpf_prog *prog); | |
1d233886 THJ |
991 | void xdp_do_flush(void); |
992 | ||
993 | /* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as | |
994 | * it is no longer only flushing maps. Keep this define for compatibility | |
995 | * until all drivers are updated - do not use xdp_do_flush_map() in new code! | |
996 | */ | |
997 | #define xdp_do_flush_map xdp_do_flush | |
814abfab | 998 | |
c8064e5b | 999 | void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act); |
c237ee5e | 1000 | |
2dbb9b9e MKL |
1001 | #ifdef CONFIG_INET |
1002 | struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
1003 | struct bpf_prog *prog, struct sk_buff *skb, | |
d5e4ddae | 1004 | struct sock *migrating_sk, |
2dbb9b9e MKL |
1005 | u32 hash); |
1006 | #else | |
1007 | static inline struct sock * | |
1008 | bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
1009 | struct bpf_prog *prog, struct sk_buff *skb, | |
d5e4ddae | 1010 | struct sock *migrating_sk, |
2dbb9b9e MKL |
1011 | u32 hash) |
1012 | { | |
1013 | return NULL; | |
1014 | } | |
1015 | #endif | |
1016 | ||
b954d834 | 1017 | #ifdef CONFIG_BPF_JIT |
c94987e4 | 1018 | extern int bpf_jit_enable; |
4f3446bb | 1019 | extern int bpf_jit_harden; |
74451e66 | 1020 | extern int bpf_jit_kallsyms; |
fdadd049 | 1021 | extern long bpf_jit_limit; |
fadb7ff1 | 1022 | extern long bpf_jit_limit_max; |
c94987e4 | 1023 | |
b954d834 DB |
1024 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); |
1025 | ||
19c02415 SL |
1026 | void bpf_jit_fill_hole_with_zero(void *area, unsigned int size); |
1027 | ||
b954d834 DB |
1028 | struct bpf_binary_header * |
1029 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |
1030 | unsigned int alignment, | |
1031 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | |
1032 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); | |
116bfa96 VK |
1033 | u64 bpf_jit_alloc_exec_limit(void); |
1034 | void *bpf_jit_alloc_exec(unsigned long size); | |
1035 | void bpf_jit_free_exec(void *addr); | |
b954d834 | 1036 | void bpf_jit_free(struct bpf_prog *fp); |
1d5f82d9 SL |
1037 | struct bpf_binary_header * |
1038 | bpf_jit_binary_pack_hdr(const struct bpf_prog *fp); | |
1039 | ||
19c02415 SL |
1040 | void *bpf_prog_pack_alloc(u32 size, bpf_jit_fill_hole_t bpf_fill_ill_insns); |
1041 | void bpf_prog_pack_free(struct bpf_binary_header *hdr); | |
1042 | ||
1d5f82d9 SL |
1043 | static inline bool bpf_prog_kallsyms_verify_off(const struct bpf_prog *fp) |
1044 | { | |
1045 | return list_empty(&fp->aux->ksym.lnode) || | |
1046 | fp->aux->ksym.lnode.prev == LIST_POISON2; | |
1047 | } | |
b954d834 | 1048 | |
33c98058 SL |
1049 | struct bpf_binary_header * |
1050 | bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, | |
1051 | unsigned int alignment, | |
1052 | struct bpf_binary_header **rw_hdr, | |
1053 | u8 **rw_image, | |
1054 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | |
1055 | int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, | |
1056 | struct bpf_binary_header *ro_header, | |
1057 | struct bpf_binary_header *rw_header); | |
1058 | void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, | |
1059 | struct bpf_binary_header *rw_header); | |
1060 | ||
a66886fe DB |
1061 | int bpf_jit_add_poke_descriptor(struct bpf_prog *prog, |
1062 | struct bpf_jit_poke_descriptor *poke); | |
1063 | ||
e2c95a61 DB |
1064 | int bpf_jit_get_func_addr(const struct bpf_prog *prog, |
1065 | const struct bpf_insn *insn, bool extra_pass, | |
1066 | u64 *func_addr, bool *func_addr_fixed); | |
1067 | ||
4f3446bb DB |
1068 | struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); |
1069 | void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); | |
1070 | ||
b954d834 DB |
1071 | static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, |
1072 | u32 pass, void *image) | |
1073 | { | |
b13138ef DB |
1074 | pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, |
1075 | proglen, pass, image, current->comm, task_pid_nr(current)); | |
1076 | ||
b954d834 DB |
1077 | if (image) |
1078 | print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, | |
1079 | 16, 1, image, proglen, false); | |
1080 | } | |
4f3446bb DB |
1081 | |
1082 | static inline bool bpf_jit_is_ebpf(void) | |
1083 | { | |
1084 | # ifdef CONFIG_HAVE_EBPF_JIT | |
1085 | return true; | |
1086 | # else | |
1087 | return false; | |
1088 | # endif | |
1089 | } | |
1090 | ||
81ed18ab AS |
1091 | static inline bool ebpf_jit_enabled(void) |
1092 | { | |
1093 | return bpf_jit_enable && bpf_jit_is_ebpf(); | |
1094 | } | |
1095 | ||
74451e66 DB |
1096 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) |
1097 | { | |
1098 | return fp->jited && bpf_jit_is_ebpf(); | |
1099 | } | |
1100 | ||
60b58afc | 1101 | static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) |
4f3446bb DB |
1102 | { |
1103 | /* These are the prerequisites, should someone ever have the | |
1104 | * idea to call blinding outside of them, we make sure to | |
1105 | * bail out. | |
1106 | */ | |
1107 | if (!bpf_jit_is_ebpf()) | |
1108 | return false; | |
60b58afc | 1109 | if (!prog->jit_requested) |
4f3446bb DB |
1110 | return false; |
1111 | if (!bpf_jit_harden) | |
1112 | return false; | |
bfeb7e39 | 1113 | if (bpf_jit_harden == 1 && bpf_capable()) |
4f3446bb DB |
1114 | return false; |
1115 | ||
1116 | return true; | |
1117 | } | |
74451e66 DB |
1118 | |
1119 | static inline bool bpf_jit_kallsyms_enabled(void) | |
1120 | { | |
1121 | /* There are a couple of corner cases where kallsyms should | |
1122 | * not be enabled f.e. on hardening. | |
1123 | */ | |
1124 | if (bpf_jit_harden) | |
1125 | return false; | |
1126 | if (!bpf_jit_kallsyms) | |
1127 | return false; | |
1128 | if (bpf_jit_kallsyms == 1) | |
1129 | return true; | |
1130 | ||
1131 | return false; | |
1132 | } | |
1133 | ||
1134 | const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1135 | unsigned long *off, char *sym); | |
1136 | bool is_bpf_text_address(unsigned long addr); | |
1137 | int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |
1138 | char *sym); | |
1139 | ||
1140 | static inline const char * | |
1141 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1142 | unsigned long *off, char **modname, char *sym) | |
1143 | { | |
1144 | const char *ret = __bpf_address_lookup(addr, size, off, sym); | |
1145 | ||
1146 | if (ret && modname) | |
1147 | *modname = NULL; | |
1148 | return ret; | |
1149 | } | |
1150 | ||
1151 | void bpf_prog_kallsyms_add(struct bpf_prog *fp); | |
1152 | void bpf_prog_kallsyms_del(struct bpf_prog *fp); | |
1153 | ||
1154 | #else /* CONFIG_BPF_JIT */ | |
1155 | ||
81ed18ab AS |
1156 | static inline bool ebpf_jit_enabled(void) |
1157 | { | |
1158 | return false; | |
1159 | } | |
1160 | ||
b8cd76ca DB |
1161 | static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) |
1162 | { | |
1163 | return false; | |
1164 | } | |
1165 | ||
74451e66 DB |
1166 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) |
1167 | { | |
1168 | return false; | |
1169 | } | |
1170 | ||
a66886fe DB |
1171 | static inline int |
1172 | bpf_jit_add_poke_descriptor(struct bpf_prog *prog, | |
1173 | struct bpf_jit_poke_descriptor *poke) | |
1174 | { | |
1175 | return -ENOTSUPP; | |
1176 | } | |
1177 | ||
b954d834 DB |
1178 | static inline void bpf_jit_free(struct bpf_prog *fp) |
1179 | { | |
1180 | bpf_prog_unlock_free(fp); | |
1181 | } | |
74451e66 DB |
1182 | |
1183 | static inline bool bpf_jit_kallsyms_enabled(void) | |
1184 | { | |
1185 | return false; | |
1186 | } | |
1187 | ||
1188 | static inline const char * | |
1189 | __bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1190 | unsigned long *off, char *sym) | |
1191 | { | |
1192 | return NULL; | |
1193 | } | |
1194 | ||
1195 | static inline bool is_bpf_text_address(unsigned long addr) | |
1196 | { | |
1197 | return false; | |
1198 | } | |
1199 | ||
1200 | static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, | |
1201 | char *type, char *sym) | |
1202 | { | |
1203 | return -ERANGE; | |
1204 | } | |
1205 | ||
1206 | static inline const char * | |
1207 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1208 | unsigned long *off, char **modname, char *sym) | |
1209 | { | |
1210 | return NULL; | |
1211 | } | |
1212 | ||
1213 | static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) | |
1214 | { | |
1215 | } | |
1216 | ||
1217 | static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) | |
1218 | { | |
1219 | } | |
6ee52e2a | 1220 | |
b954d834 DB |
1221 | #endif /* CONFIG_BPF_JIT */ |
1222 | ||
7d1982b4 DB |
1223 | void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); |
1224 | ||
34805931 DB |
1225 | #define BPF_ANC BIT(15) |
1226 | ||
55795ef5 RV |
1227 | static inline bool bpf_needs_clear_a(const struct sock_filter *first) |
1228 | { | |
1229 | switch (first->code) { | |
1230 | case BPF_RET | BPF_K: | |
1231 | case BPF_LD | BPF_W | BPF_LEN: | |
1232 | return false; | |
1233 | ||
1234 | case BPF_LD | BPF_W | BPF_ABS: | |
1235 | case BPF_LD | BPF_H | BPF_ABS: | |
1236 | case BPF_LD | BPF_B | BPF_ABS: | |
1237 | if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) | |
1238 | return true; | |
1239 | return false; | |
1240 | ||
1241 | default: | |
1242 | return true; | |
1243 | } | |
1244 | } | |
1245 | ||
34805931 DB |
1246 | static inline u16 bpf_anc_helper(const struct sock_filter *ftest) |
1247 | { | |
1248 | BUG_ON(ftest->code & BPF_ANC); | |
1249 | ||
1250 | switch (ftest->code) { | |
1251 | case BPF_LD | BPF_W | BPF_ABS: | |
1252 | case BPF_LD | BPF_H | BPF_ABS: | |
1253 | case BPF_LD | BPF_B | BPF_ABS: | |
1254 | #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ | |
1255 | return BPF_ANC | SKF_AD_##CODE | |
1256 | switch (ftest->k) { | |
1257 | BPF_ANCILLARY(PROTOCOL); | |
1258 | BPF_ANCILLARY(PKTTYPE); | |
1259 | BPF_ANCILLARY(IFINDEX); | |
1260 | BPF_ANCILLARY(NLATTR); | |
1261 | BPF_ANCILLARY(NLATTR_NEST); | |
1262 | BPF_ANCILLARY(MARK); | |
1263 | BPF_ANCILLARY(QUEUE); | |
1264 | BPF_ANCILLARY(HATYPE); | |
1265 | BPF_ANCILLARY(RXHASH); | |
1266 | BPF_ANCILLARY(CPU); | |
1267 | BPF_ANCILLARY(ALU_XOR_X); | |
1268 | BPF_ANCILLARY(VLAN_TAG); | |
1269 | BPF_ANCILLARY(VLAN_TAG_PRESENT); | |
1270 | BPF_ANCILLARY(PAY_OFFSET); | |
1271 | BPF_ANCILLARY(RANDOM); | |
27cd5452 | 1272 | BPF_ANCILLARY(VLAN_TPID); |
34805931 | 1273 | } |
df561f66 | 1274 | fallthrough; |
34805931 DB |
1275 | default: |
1276 | return ftest->code; | |
1277 | } | |
1278 | } | |
1279 | ||
9f12fbe6 ZSL |
1280 | void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, |
1281 | int k, unsigned int size); | |
1282 | ||
ea02f941 MS |
1283 | static inline int bpf_tell_extensions(void) |
1284 | { | |
37692299 | 1285 | return SKF_AD_MAX; |
ea02f941 MS |
1286 | } |
1287 | ||
4fbac77d AI |
1288 | struct bpf_sock_addr_kern { |
1289 | struct sock *sk; | |
1290 | struct sockaddr *uaddr; | |
1291 | /* Temporary "register" to make indirect stores to nested structures | |
1292 | * defined above. We need three registers to make such a store, but | |
1293 | * only two (src and dst) are available at convert_ctx_access time | |
1294 | */ | |
1295 | u64 tmp_reg; | |
1cedee13 | 1296 | void *t_ctx; /* Attach type specific context. */ |
4fbac77d AI |
1297 | }; |
1298 | ||
40304b2a LB |
1299 | struct bpf_sock_ops_kern { |
1300 | struct sock *sk; | |
40304b2a | 1301 | union { |
de525be2 | 1302 | u32 args[4]; |
40304b2a LB |
1303 | u32 reply; |
1304 | u32 replylong[4]; | |
1305 | }; | |
0813a841 MKL |
1306 | struct sk_buff *syn_skb; |
1307 | struct sk_buff *skb; | |
1308 | void *skb_data_end; | |
c9985d09 MKL |
1309 | u8 op; |
1310 | u8 is_fullsock; | |
0813a841 | 1311 | u8 remaining_opt_len; |
b73042b8 LB |
1312 | u64 temp; /* temp and everything after is not |
1313 | * initialized to 0 before calling | |
1314 | * the BPF program. New fields that | |
1315 | * should be initialized to 0 should | |
1316 | * be inserted before temp. | |
1317 | * temp is scratch storage used by | |
1318 | * sock_ops_convert_ctx_access | |
1319 | * as temporary storage of a register. | |
1320 | */ | |
40304b2a LB |
1321 | }; |
1322 | ||
7b146ceb AI |
1323 | struct bpf_sysctl_kern { |
1324 | struct ctl_table_header *head; | |
1325 | struct ctl_table *table; | |
1d11b301 AI |
1326 | void *cur_val; |
1327 | size_t cur_len; | |
4e63acdf AI |
1328 | void *new_val; |
1329 | size_t new_len; | |
1330 | int new_updated; | |
7b146ceb | 1331 | int write; |
e1550bfe AI |
1332 | loff_t *ppos; |
1333 | /* Temporary "register" for indirect stores to ppos. */ | |
1334 | u64 tmp_reg; | |
7b146ceb AI |
1335 | }; |
1336 | ||
20f2505f SF |
1337 | #define BPF_SOCKOPT_KERN_BUF_SIZE 32 |
1338 | struct bpf_sockopt_buf { | |
1339 | u8 data[BPF_SOCKOPT_KERN_BUF_SIZE]; | |
1340 | }; | |
1341 | ||
0d01da6a SF |
1342 | struct bpf_sockopt_kern { |
1343 | struct sock *sk; | |
1344 | u8 *optval; | |
1345 | u8 *optval_end; | |
1346 | s32 level; | |
1347 | s32 optname; | |
1348 | s32 optlen; | |
c4dcfdd4 YZ |
1349 | /* for retval in struct bpf_cg_run_ctx */ |
1350 | struct task_struct *current_task; | |
1351 | /* Temporary "register" for indirect stores to ppos. */ | |
1352 | u64 tmp_reg; | |
0d01da6a SF |
1353 | }; |
1354 | ||
b1ea9ff6 | 1355 | int copy_bpf_fprog_from_user(struct sock_fprog *dst, sockptr_t src, int len); |
4d295e54 | 1356 | |
e9ddbb77 JS |
1357 | struct bpf_sk_lookup_kern { |
1358 | u16 family; | |
1359 | u16 protocol; | |
d66423fb LB |
1360 | __be16 sport; |
1361 | u16 dport; | |
e9ddbb77 JS |
1362 | struct { |
1363 | __be32 saddr; | |
1364 | __be32 daddr; | |
1365 | } v4; | |
1366 | struct { | |
1367 | const struct in6_addr *saddr; | |
1368 | const struct in6_addr *daddr; | |
1369 | } v6; | |
e9ddbb77 | 1370 | struct sock *selected_sk; |
f8931565 | 1371 | u32 ingress_ifindex; |
e9ddbb77 JS |
1372 | bool no_reuseport; |
1373 | }; | |
1374 | ||
1559b4aa JS |
1375 | extern struct static_key_false bpf_sk_lookup_enabled; |
1376 | ||
1377 | /* Runners for BPF_SK_LOOKUP programs to invoke on socket lookup. | |
1378 | * | |
1379 | * Allowed return values for a BPF SK_LOOKUP program are SK_PASS and | |
1380 | * SK_DROP. Their meaning is as follows: | |
1381 | * | |
1382 | * SK_PASS && ctx.selected_sk != NULL: use selected_sk as lookup result | |
1383 | * SK_PASS && ctx.selected_sk == NULL: continue to htable-based socket lookup | |
1384 | * SK_DROP : terminate lookup with -ECONNREFUSED | |
1385 | * | |
1386 | * This macro aggregates return values and selected sockets from | |
1387 | * multiple BPF programs according to following rules in order: | |
1388 | * | |
1389 | * 1. If any program returned SK_PASS and a non-NULL ctx.selected_sk, | |
1390 | * macro result is SK_PASS and last ctx.selected_sk is used. | |
1391 | * 2. If any program returned SK_DROP return value, | |
1392 | * macro result is SK_DROP. | |
1393 | * 3. Otherwise result is SK_PASS and ctx.selected_sk is NULL. | |
1394 | * | |
1395 | * Caller must ensure that the prog array is non-NULL, and that the | |
1396 | * array as well as the programs it contains remain valid. | |
1397 | */ | |
1398 | #define BPF_PROG_SK_LOOKUP_RUN_ARRAY(array, ctx, func) \ | |
1399 | ({ \ | |
1400 | struct bpf_sk_lookup_kern *_ctx = &(ctx); \ | |
1401 | struct bpf_prog_array_item *_item; \ | |
1402 | struct sock *_selected_sk = NULL; \ | |
1403 | bool _no_reuseport = false; \ | |
1404 | struct bpf_prog *_prog; \ | |
1405 | bool _all_pass = true; \ | |
1406 | u32 _ret; \ | |
1407 | \ | |
1408 | migrate_disable(); \ | |
1409 | _item = &(array)->items[0]; \ | |
1410 | while ((_prog = READ_ONCE(_item->prog))) { \ | |
1411 | /* restore most recent selection */ \ | |
1412 | _ctx->selected_sk = _selected_sk; \ | |
1413 | _ctx->no_reuseport = _no_reuseport; \ | |
1414 | \ | |
1415 | _ret = func(_prog, _ctx); \ | |
1416 | if (_ret == SK_PASS && _ctx->selected_sk) { \ | |
1417 | /* remember last non-NULL socket */ \ | |
1418 | _selected_sk = _ctx->selected_sk; \ | |
1419 | _no_reuseport = _ctx->no_reuseport; \ | |
1420 | } else if (_ret == SK_DROP && _all_pass) { \ | |
1421 | _all_pass = false; \ | |
1422 | } \ | |
1423 | _item++; \ | |
1424 | } \ | |
1425 | _ctx->selected_sk = _selected_sk; \ | |
1426 | _ctx->no_reuseport = _no_reuseport; \ | |
1427 | migrate_enable(); \ | |
1428 | _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ | |
1429 | }) | |
1430 | ||
1431 | static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, | |
1432 | const __be32 saddr, const __be16 sport, | |
1433 | const __be32 daddr, const u16 dport, | |
f8931565 | 1434 | const int ifindex, struct sock **psk) |
1559b4aa JS |
1435 | { |
1436 | struct bpf_prog_array *run_array; | |
1437 | struct sock *selected_sk = NULL; | |
1438 | bool no_reuseport = false; | |
1439 | ||
1440 | rcu_read_lock(); | |
1441 | run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); | |
1442 | if (run_array) { | |
1443 | struct bpf_sk_lookup_kern ctx = { | |
1444 | .family = AF_INET, | |
1445 | .protocol = protocol, | |
1446 | .v4.saddr = saddr, | |
1447 | .v4.daddr = daddr, | |
1448 | .sport = sport, | |
1449 | .dport = dport, | |
f8931565 | 1450 | .ingress_ifindex = ifindex, |
1559b4aa JS |
1451 | }; |
1452 | u32 act; | |
1453 | ||
fb7dd8bc | 1454 | act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); |
1559b4aa JS |
1455 | if (act == SK_PASS) { |
1456 | selected_sk = ctx.selected_sk; | |
1457 | no_reuseport = ctx.no_reuseport; | |
1458 | } else { | |
1459 | selected_sk = ERR_PTR(-ECONNREFUSED); | |
1460 | } | |
1461 | } | |
1462 | rcu_read_unlock(); | |
1463 | *psk = selected_sk; | |
1464 | return no_reuseport; | |
1465 | } | |
1466 | ||
1122702f JS |
1467 | #if IS_ENABLED(CONFIG_IPV6) |
1468 | static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, | |
1469 | const struct in6_addr *saddr, | |
1470 | const __be16 sport, | |
1471 | const struct in6_addr *daddr, | |
1472 | const u16 dport, | |
f8931565 | 1473 | const int ifindex, struct sock **psk) |
1122702f JS |
1474 | { |
1475 | struct bpf_prog_array *run_array; | |
1476 | struct sock *selected_sk = NULL; | |
1477 | bool no_reuseport = false; | |
1478 | ||
1479 | rcu_read_lock(); | |
1480 | run_array = rcu_dereference(net->bpf.run_array[NETNS_BPF_SK_LOOKUP]); | |
1481 | if (run_array) { | |
1482 | struct bpf_sk_lookup_kern ctx = { | |
1483 | .family = AF_INET6, | |
1484 | .protocol = protocol, | |
1485 | .v6.saddr = saddr, | |
1486 | .v6.daddr = daddr, | |
1487 | .sport = sport, | |
1488 | .dport = dport, | |
f8931565 | 1489 | .ingress_ifindex = ifindex, |
1122702f JS |
1490 | }; |
1491 | u32 act; | |
1492 | ||
fb7dd8bc | 1493 | act = BPF_PROG_SK_LOOKUP_RUN_ARRAY(run_array, ctx, bpf_prog_run); |
1122702f JS |
1494 | if (act == SK_PASS) { |
1495 | selected_sk = ctx.selected_sk; | |
1496 | no_reuseport = ctx.no_reuseport; | |
1497 | } else { | |
1498 | selected_sk = ERR_PTR(-ECONNREFUSED); | |
1499 | } | |
1500 | } | |
1501 | rcu_read_unlock(); | |
1502 | *psk = selected_sk; | |
1503 | return no_reuseport; | |
1504 | } | |
1505 | #endif /* IS_ENABLED(CONFIG_IPV6) */ | |
1506 | ||
32637e33 | 1507 | static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u64 index, |
e624d4ed | 1508 | u64 flags, const u64 flag_mask, |
e6a4750f BT |
1509 | void *lookup_elem(struct bpf_map *map, u32 key)) |
1510 | { | |
1511 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
e624d4ed | 1512 | const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; |
e6a4750f BT |
1513 | |
1514 | /* Lower bits of the flags are used as return code on lookup failure */ | |
e624d4ed | 1515 | if (unlikely(flags & ~(action_mask | flag_mask))) |
e6a4750f BT |
1516 | return XDP_ABORTED; |
1517 | ||
32637e33 | 1518 | ri->tgt_value = lookup_elem(map, index); |
e624d4ed | 1519 | if (unlikely(!ri->tgt_value) && !(flags & BPF_F_BROADCAST)) { |
e6a4750f BT |
1520 | /* If the lookup fails we want to clear out the state in the |
1521 | * redirect_info struct completely, so that if an eBPF program | |
1522 | * performs multiple lookups, the last one always takes | |
1523 | * precedence. | |
1524 | */ | |
ee75aef2 BT |
1525 | ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */ |
1526 | ri->map_type = BPF_MAP_TYPE_UNSPEC; | |
e624d4ed | 1527 | return flags & action_mask; |
e6a4750f BT |
1528 | } |
1529 | ||
32637e33 | 1530 | ri->tgt_index = index; |
ee75aef2 BT |
1531 | ri->map_id = map->id; |
1532 | ri->map_type = map->map_type; | |
e6a4750f | 1533 | |
e624d4ed HL |
1534 | if (flags & BPF_F_BROADCAST) { |
1535 | WRITE_ONCE(ri->map, map); | |
1536 | ri->flags = flags; | |
1537 | } else { | |
1538 | WRITE_ONCE(ri->map, NULL); | |
1539 | ri->flags = 0; | |
1540 | } | |
1541 | ||
e6a4750f BT |
1542 | return XDP_REDIRECT; |
1543 | } | |
1544 | ||
1da177e4 | 1545 | #endif /* __LINUX_FILTER_H__ */ |