Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | /* |
3 | * Linux Socket Filter Data Structures | |
4 | */ | |
1da177e4 LT |
5 | #ifndef __LINUX_FILTER_H__ |
6 | #define __LINUX_FILTER_H__ | |
7 | ||
b954d834 DB |
8 | #include <stdarg.h> |
9 | ||
60063497 | 10 | #include <linux/atomic.h> |
4c355cdf | 11 | #include <linux/refcount.h> |
0c5fe1b4 | 12 | #include <linux/compat.h> |
9f12fbe6 | 13 | #include <linux/skbuff.h> |
b954d834 DB |
14 | #include <linux/linkage.h> |
15 | #include <linux/printk.h> | |
d45ed4a4 | 16 | #include <linux/workqueue.h> |
b13138ef | 17 | #include <linux/sched.h> |
4f3446bb | 18 | #include <linux/capability.h> |
7bd509e3 | 19 | #include <linux/cryptohash.h> |
820a0b24 | 20 | #include <linux/set_memory.h> |
7105e828 | 21 | #include <linux/kallsyms.h> |
6d5fc195 | 22 | #include <linux/if_vlan.h> |
d53d2f78 | 23 | #include <linux/vmalloc.h> |
4f3446bb | 24 | |
ff936a04 | 25 | #include <net/sch_generic.h> |
b954d834 | 26 | |
b954d834 | 27 | #include <uapi/linux/filter.h> |
daedfb22 | 28 | #include <uapi/linux/bpf.h> |
60a3b225 DB |
29 | |
30 | struct sk_buff; | |
31 | struct sock; | |
32 | struct seccomp_data; | |
09756af4 | 33 | struct bpf_prog_aux; |
297dd12c | 34 | struct xdp_rxq_info; |
106ca27f | 35 | struct xdp_buff; |
2dbb9b9e | 36 | struct sock_reuseport; |
7b146ceb AI |
37 | struct ctl_table; |
38 | struct ctl_table_header; | |
792d4b5c | 39 | |
30743837 DB |
40 | /* ArgX, context and stack frame pointer register positions. Note, |
41 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function | |
42 | * calls in BPF_CALL instruction. | |
43 | */ | |
44 | #define BPF_REG_ARG1 BPF_REG_1 | |
45 | #define BPF_REG_ARG2 BPF_REG_2 | |
46 | #define BPF_REG_ARG3 BPF_REG_3 | |
47 | #define BPF_REG_ARG4 BPF_REG_4 | |
48 | #define BPF_REG_ARG5 BPF_REG_5 | |
49 | #define BPF_REG_CTX BPF_REG_6 | |
50 | #define BPF_REG_FP BPF_REG_10 | |
51 | ||
52 | /* Additional register mappings for converted user programs. */ | |
53 | #define BPF_REG_A BPF_REG_0 | |
54 | #define BPF_REG_X BPF_REG_7 | |
e0cea7ce DB |
55 | #define BPF_REG_TMP BPF_REG_2 /* scratch reg */ |
56 | #define BPF_REG_D BPF_REG_8 /* data, callee-saved */ | |
57 | #define BPF_REG_H BPF_REG_9 /* hlen, callee-saved */ | |
bd4cf0ed | 58 | |
9b73bfdd | 59 | /* Kernel hidden auxiliary/helper register. */ |
4f3446bb | 60 | #define BPF_REG_AX MAX_BPF_REG |
144cd91c DB |
61 | #define MAX_BPF_EXT_REG (MAX_BPF_REG + 1) |
62 | #define MAX_BPF_JIT_REG MAX_BPF_EXT_REG | |
4f3446bb | 63 | |
71189fa9 AS |
64 | /* unused opcode to mark special call to bpf_tail_call() helper */ |
65 | #define BPF_TAIL_CALL 0xf0 | |
66 | ||
1ea47e01 AS |
67 | /* unused opcode to mark call to interpreter with arguments */ |
68 | #define BPF_CALL_ARGS 0xe0 | |
69 | ||
74451e66 DB |
70 | /* As per nm, we expose JITed images as text (code) section for |
71 | * kallsyms. That way, tools like perf can find it to match | |
72 | * addresses. | |
73 | */ | |
74 | #define BPF_SYM_ELF_TYPE 't' | |
75 | ||
bd4cf0ed AS |
76 | /* BPF program can access up to 512 bytes of stack space. */ |
77 | #define MAX_BPF_STACK 512 | |
78 | ||
f8f6d679 DB |
79 | /* Helper macros for filter block array initializers. */ |
80 | ||
e430f34e | 81 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
f8f6d679 | 82 | |
e430f34e | 83 | #define BPF_ALU64_REG(OP, DST, SRC) \ |
2695fb55 | 84 | ((struct bpf_insn) { \ |
f8f6d679 | 85 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
86 | .dst_reg = DST, \ |
87 | .src_reg = SRC, \ | |
f8f6d679 DB |
88 | .off = 0, \ |
89 | .imm = 0 }) | |
90 | ||
e430f34e | 91 | #define BPF_ALU32_REG(OP, DST, SRC) \ |
2695fb55 | 92 | ((struct bpf_insn) { \ |
f8f6d679 | 93 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
94 | .dst_reg = DST, \ |
95 | .src_reg = SRC, \ | |
f8f6d679 DB |
96 | .off = 0, \ |
97 | .imm = 0 }) | |
98 | ||
e430f34e | 99 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ |
f8f6d679 | 100 | |
e430f34e | 101 | #define BPF_ALU64_IMM(OP, DST, IMM) \ |
2695fb55 | 102 | ((struct bpf_insn) { \ |
f8f6d679 | 103 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
104 | .dst_reg = DST, \ |
105 | .src_reg = 0, \ | |
f8f6d679 DB |
106 | .off = 0, \ |
107 | .imm = IMM }) | |
108 | ||
e430f34e | 109 | #define BPF_ALU32_IMM(OP, DST, IMM) \ |
2695fb55 | 110 | ((struct bpf_insn) { \ |
f8f6d679 | 111 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
112 | .dst_reg = DST, \ |
113 | .src_reg = 0, \ | |
f8f6d679 DB |
114 | .off = 0, \ |
115 | .imm = IMM }) | |
116 | ||
117 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | |
118 | ||
e430f34e | 119 | #define BPF_ENDIAN(TYPE, DST, LEN) \ |
2695fb55 | 120 | ((struct bpf_insn) { \ |
f8f6d679 | 121 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ |
e430f34e AS |
122 | .dst_reg = DST, \ |
123 | .src_reg = 0, \ | |
f8f6d679 DB |
124 | .off = 0, \ |
125 | .imm = LEN }) | |
126 | ||
e430f34e | 127 | /* Short form of mov, dst_reg = src_reg */ |
f8f6d679 | 128 | |
e430f34e | 129 | #define BPF_MOV64_REG(DST, SRC) \ |
2695fb55 | 130 | ((struct bpf_insn) { \ |
f8f6d679 | 131 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ |
e430f34e AS |
132 | .dst_reg = DST, \ |
133 | .src_reg = SRC, \ | |
f8f6d679 DB |
134 | .off = 0, \ |
135 | .imm = 0 }) | |
136 | ||
e430f34e | 137 | #define BPF_MOV32_REG(DST, SRC) \ |
2695fb55 | 138 | ((struct bpf_insn) { \ |
f8f6d679 | 139 | .code = BPF_ALU | BPF_MOV | BPF_X, \ |
e430f34e AS |
140 | .dst_reg = DST, \ |
141 | .src_reg = SRC, \ | |
f8f6d679 DB |
142 | .off = 0, \ |
143 | .imm = 0 }) | |
144 | ||
e430f34e | 145 | /* Short form of mov, dst_reg = imm32 */ |
f8f6d679 | 146 | |
e430f34e | 147 | #define BPF_MOV64_IMM(DST, IMM) \ |
2695fb55 | 148 | ((struct bpf_insn) { \ |
f8f6d679 | 149 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ |
e430f34e AS |
150 | .dst_reg = DST, \ |
151 | .src_reg = 0, \ | |
f8f6d679 DB |
152 | .off = 0, \ |
153 | .imm = IMM }) | |
154 | ||
e430f34e | 155 | #define BPF_MOV32_IMM(DST, IMM) \ |
2695fb55 | 156 | ((struct bpf_insn) { \ |
f8f6d679 | 157 | .code = BPF_ALU | BPF_MOV | BPF_K, \ |
e430f34e AS |
158 | .dst_reg = DST, \ |
159 | .src_reg = 0, \ | |
f8f6d679 DB |
160 | .off = 0, \ |
161 | .imm = IMM }) | |
162 | ||
7d134041 JW |
163 | /* Special form of mov32, used for doing explicit zero extension on dst. */ |
164 | #define BPF_ZEXT_REG(DST) \ | |
165 | ((struct bpf_insn) { \ | |
166 | .code = BPF_ALU | BPF_MOV | BPF_X, \ | |
167 | .dst_reg = DST, \ | |
168 | .src_reg = DST, \ | |
169 | .off = 0, \ | |
170 | .imm = 1 }) | |
171 | ||
172 | static inline bool insn_is_zext(const struct bpf_insn *insn) | |
173 | { | |
174 | return insn->code == (BPF_ALU | BPF_MOV | BPF_X) && insn->imm == 1; | |
175 | } | |
176 | ||
02ab695b AS |
177 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ |
178 | #define BPF_LD_IMM64(DST, IMM) \ | |
179 | BPF_LD_IMM64_RAW(DST, 0, IMM) | |
180 | ||
181 | #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ | |
182 | ((struct bpf_insn) { \ | |
183 | .code = BPF_LD | BPF_DW | BPF_IMM, \ | |
184 | .dst_reg = DST, \ | |
185 | .src_reg = SRC, \ | |
186 | .off = 0, \ | |
187 | .imm = (__u32) (IMM) }), \ | |
188 | ((struct bpf_insn) { \ | |
189 | .code = 0, /* zero is reserved opcode */ \ | |
190 | .dst_reg = 0, \ | |
191 | .src_reg = 0, \ | |
192 | .off = 0, \ | |
193 | .imm = ((__u64) (IMM)) >> 32 }) | |
194 | ||
0246e64d AS |
195 | /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ |
196 | #define BPF_LD_MAP_FD(DST, MAP_FD) \ | |
197 | BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) | |
198 | ||
e430f34e | 199 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ |
f8f6d679 | 200 | |
e430f34e | 201 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 202 | ((struct bpf_insn) { \ |
f8f6d679 | 203 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
204 | .dst_reg = DST, \ |
205 | .src_reg = SRC, \ | |
f8f6d679 DB |
206 | .off = 0, \ |
207 | .imm = IMM }) | |
208 | ||
e430f34e | 209 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 210 | ((struct bpf_insn) { \ |
f8f6d679 | 211 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
212 | .dst_reg = DST, \ |
213 | .src_reg = SRC, \ | |
f8f6d679 DB |
214 | .off = 0, \ |
215 | .imm = IMM }) | |
216 | ||
e430f34e | 217 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ |
f8f6d679 | 218 | |
e430f34e | 219 | #define BPF_LD_ABS(SIZE, IMM) \ |
2695fb55 | 220 | ((struct bpf_insn) { \ |
f8f6d679 | 221 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ |
e430f34e AS |
222 | .dst_reg = 0, \ |
223 | .src_reg = 0, \ | |
f8f6d679 | 224 | .off = 0, \ |
e430f34e | 225 | .imm = IMM }) |
f8f6d679 | 226 | |
e430f34e | 227 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ |
f8f6d679 | 228 | |
e430f34e | 229 | #define BPF_LD_IND(SIZE, SRC, IMM) \ |
2695fb55 | 230 | ((struct bpf_insn) { \ |
f8f6d679 | 231 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ |
e430f34e AS |
232 | .dst_reg = 0, \ |
233 | .src_reg = SRC, \ | |
f8f6d679 | 234 | .off = 0, \ |
e430f34e | 235 | .imm = IMM }) |
f8f6d679 | 236 | |
e430f34e | 237 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ |
f8f6d679 | 238 | |
e430f34e | 239 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ |
2695fb55 | 240 | ((struct bpf_insn) { \ |
f8f6d679 | 241 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
242 | .dst_reg = DST, \ |
243 | .src_reg = SRC, \ | |
f8f6d679 DB |
244 | .off = OFF, \ |
245 | .imm = 0 }) | |
246 | ||
e430f34e AS |
247 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ |
248 | ||
249 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | |
2695fb55 | 250 | ((struct bpf_insn) { \ |
f8f6d679 | 251 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
252 | .dst_reg = DST, \ |
253 | .src_reg = SRC, \ | |
f8f6d679 DB |
254 | .off = OFF, \ |
255 | .imm = 0 }) | |
256 | ||
cffc642d MH |
257 | /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ |
258 | ||
259 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ | |
260 | ((struct bpf_insn) { \ | |
261 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ | |
262 | .dst_reg = DST, \ | |
263 | .src_reg = SRC, \ | |
264 | .off = OFF, \ | |
265 | .imm = 0 }) | |
266 | ||
e430f34e AS |
267 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ |
268 | ||
269 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | |
2695fb55 | 270 | ((struct bpf_insn) { \ |
e430f34e AS |
271 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ |
272 | .dst_reg = DST, \ | |
273 | .src_reg = 0, \ | |
274 | .off = OFF, \ | |
275 | .imm = IMM }) | |
276 | ||
277 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | |
f8f6d679 | 278 | |
e430f34e | 279 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ |
2695fb55 | 280 | ((struct bpf_insn) { \ |
f8f6d679 | 281 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
282 | .dst_reg = DST, \ |
283 | .src_reg = SRC, \ | |
f8f6d679 DB |
284 | .off = OFF, \ |
285 | .imm = 0 }) | |
286 | ||
e430f34e | 287 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ |
f8f6d679 | 288 | |
e430f34e | 289 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ |
2695fb55 | 290 | ((struct bpf_insn) { \ |
f8f6d679 | 291 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
292 | .dst_reg = DST, \ |
293 | .src_reg = 0, \ | |
f8f6d679 | 294 | .off = OFF, \ |
a7b76c88 JW |
295 | .imm = IMM }) |
296 | ||
297 | /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ | |
298 | ||
299 | #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ | |
300 | ((struct bpf_insn) { \ | |
301 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ | |
302 | .dst_reg = DST, \ | |
303 | .src_reg = SRC, \ | |
304 | .off = OFF, \ | |
305 | .imm = 0 }) | |
306 | ||
307 | /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ | |
308 | ||
309 | #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ | |
310 | ((struct bpf_insn) { \ | |
311 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ | |
312 | .dst_reg = DST, \ | |
313 | .src_reg = 0, \ | |
314 | .off = OFF, \ | |
f8f6d679 DB |
315 | .imm = IMM }) |
316 | ||
614d0d77 DB |
317 | /* Unconditional jumps, goto pc + off16 */ |
318 | ||
319 | #define BPF_JMP_A(OFF) \ | |
320 | ((struct bpf_insn) { \ | |
321 | .code = BPF_JMP | BPF_JA, \ | |
322 | .dst_reg = 0, \ | |
323 | .src_reg = 0, \ | |
324 | .off = OFF, \ | |
325 | .imm = 0 }) | |
326 | ||
06be0864 DB |
327 | /* Relative call */ |
328 | ||
329 | #define BPF_CALL_REL(TGT) \ | |
330 | ((struct bpf_insn) { \ | |
331 | .code = BPF_JMP | BPF_CALL, \ | |
332 | .dst_reg = 0, \ | |
333 | .src_reg = BPF_PSEUDO_CALL, \ | |
334 | .off = 0, \ | |
335 | .imm = TGT }) | |
336 | ||
f8f6d679 DB |
337 | /* Function call */ |
338 | ||
09772d92 DB |
339 | #define BPF_CAST_CALL(x) \ |
340 | ((u64 (*)(u64, u64, u64, u64, u64))(x)) | |
341 | ||
f8f6d679 | 342 | #define BPF_EMIT_CALL(FUNC) \ |
2695fb55 | 343 | ((struct bpf_insn) { \ |
f8f6d679 | 344 | .code = BPF_JMP | BPF_CALL, \ |
e430f34e AS |
345 | .dst_reg = 0, \ |
346 | .src_reg = 0, \ | |
f8f6d679 DB |
347 | .off = 0, \ |
348 | .imm = ((FUNC) - __bpf_call_base) }) | |
349 | ||
350 | /* Raw code statement block */ | |
351 | ||
e430f34e | 352 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ |
2695fb55 | 353 | ((struct bpf_insn) { \ |
f8f6d679 | 354 | .code = CODE, \ |
e430f34e AS |
355 | .dst_reg = DST, \ |
356 | .src_reg = SRC, \ | |
f8f6d679 DB |
357 | .off = OFF, \ |
358 | .imm = IMM }) | |
359 | ||
360 | /* Program exit */ | |
361 | ||
362 | #define BPF_EXIT_INSN() \ | |
2695fb55 | 363 | ((struct bpf_insn) { \ |
f8f6d679 | 364 | .code = BPF_JMP | BPF_EXIT, \ |
e430f34e AS |
365 | .dst_reg = 0, \ |
366 | .src_reg = 0, \ | |
f8f6d679 DB |
367 | .off = 0, \ |
368 | .imm = 0 }) | |
369 | ||
a4afd37b DB |
370 | /* Internal classic blocks for direct assignment */ |
371 | ||
372 | #define __BPF_STMT(CODE, K) \ | |
373 | ((struct sock_filter) BPF_STMT(CODE, K)) | |
374 | ||
375 | #define __BPF_JUMP(CODE, K, JT, JF) \ | |
376 | ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) | |
377 | ||
f8f6d679 DB |
378 | #define bytes_to_bpf_size(bytes) \ |
379 | ({ \ | |
380 | int bpf_size = -EINVAL; \ | |
381 | \ | |
382 | if (bytes == sizeof(u8)) \ | |
383 | bpf_size = BPF_B; \ | |
384 | else if (bytes == sizeof(u16)) \ | |
385 | bpf_size = BPF_H; \ | |
386 | else if (bytes == sizeof(u32)) \ | |
387 | bpf_size = BPF_W; \ | |
388 | else if (bytes == sizeof(u64)) \ | |
389 | bpf_size = BPF_DW; \ | |
390 | \ | |
391 | bpf_size; \ | |
392 | }) | |
9739eef1 | 393 | |
f96da094 DB |
394 | #define bpf_size_to_bytes(bpf_size) \ |
395 | ({ \ | |
396 | int bytes = -EINVAL; \ | |
397 | \ | |
398 | if (bpf_size == BPF_B) \ | |
399 | bytes = sizeof(u8); \ | |
400 | else if (bpf_size == BPF_H) \ | |
401 | bytes = sizeof(u16); \ | |
402 | else if (bpf_size == BPF_W) \ | |
403 | bytes = sizeof(u32); \ | |
404 | else if (bpf_size == BPF_DW) \ | |
405 | bytes = sizeof(u64); \ | |
406 | \ | |
407 | bytes; \ | |
408 | }) | |
409 | ||
f035a515 DB |
410 | #define BPF_SIZEOF(type) \ |
411 | ({ \ | |
412 | const int __size = bytes_to_bpf_size(sizeof(type)); \ | |
413 | BUILD_BUG_ON(__size < 0); \ | |
414 | __size; \ | |
415 | }) | |
416 | ||
417 | #define BPF_FIELD_SIZEOF(type, field) \ | |
418 | ({ \ | |
419 | const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ | |
420 | BUILD_BUG_ON(__size < 0); \ | |
421 | __size; \ | |
422 | }) | |
423 | ||
f96da094 DB |
424 | #define BPF_LDST_BYTES(insn) \ |
425 | ({ \ | |
e59ac634 | 426 | const int __size = bpf_size_to_bytes(BPF_SIZE((insn)->code)); \ |
f96da094 DB |
427 | WARN_ON(__size < 0); \ |
428 | __size; \ | |
429 | }) | |
430 | ||
f3694e00 DB |
431 | #define __BPF_MAP_0(m, v, ...) v |
432 | #define __BPF_MAP_1(m, v, t, a, ...) m(t, a) | |
433 | #define __BPF_MAP_2(m, v, t, a, ...) m(t, a), __BPF_MAP_1(m, v, __VA_ARGS__) | |
434 | #define __BPF_MAP_3(m, v, t, a, ...) m(t, a), __BPF_MAP_2(m, v, __VA_ARGS__) | |
435 | #define __BPF_MAP_4(m, v, t, a, ...) m(t, a), __BPF_MAP_3(m, v, __VA_ARGS__) | |
436 | #define __BPF_MAP_5(m, v, t, a, ...) m(t, a), __BPF_MAP_4(m, v, __VA_ARGS__) | |
437 | ||
438 | #define __BPF_REG_0(...) __BPF_PAD(5) | |
439 | #define __BPF_REG_1(...) __BPF_MAP(1, __VA_ARGS__), __BPF_PAD(4) | |
440 | #define __BPF_REG_2(...) __BPF_MAP(2, __VA_ARGS__), __BPF_PAD(3) | |
441 | #define __BPF_REG_3(...) __BPF_MAP(3, __VA_ARGS__), __BPF_PAD(2) | |
442 | #define __BPF_REG_4(...) __BPF_MAP(4, __VA_ARGS__), __BPF_PAD(1) | |
443 | #define __BPF_REG_5(...) __BPF_MAP(5, __VA_ARGS__) | |
444 | ||
445 | #define __BPF_MAP(n, ...) __BPF_MAP_##n(__VA_ARGS__) | |
446 | #define __BPF_REG(n, ...) __BPF_REG_##n(__VA_ARGS__) | |
447 | ||
448 | #define __BPF_CAST(t, a) \ | |
449 | (__force t) \ | |
450 | (__force \ | |
451 | typeof(__builtin_choose_expr(sizeof(t) == sizeof(unsigned long), \ | |
452 | (unsigned long)0, (t)0))) a | |
453 | #define __BPF_V void | |
454 | #define __BPF_N | |
455 | ||
456 | #define __BPF_DECL_ARGS(t, a) t a | |
457 | #define __BPF_DECL_REGS(t, a) u64 a | |
458 | ||
459 | #define __BPF_PAD(n) \ | |
460 | __BPF_MAP(n, __BPF_DECL_ARGS, __BPF_N, u64, __ur_1, u64, __ur_2, \ | |
461 | u64, __ur_3, u64, __ur_4, u64, __ur_5) | |
462 | ||
463 | #define BPF_CALL_x(x, name, ...) \ | |
464 | static __always_inline \ | |
465 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \ | |
466 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \ | |
467 | u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \ | |
468 | { \ | |
469 | return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\ | |
470 | } \ | |
471 | static __always_inline \ | |
472 | u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)) | |
473 | ||
474 | #define BPF_CALL_0(name, ...) BPF_CALL_x(0, name, __VA_ARGS__) | |
475 | #define BPF_CALL_1(name, ...) BPF_CALL_x(1, name, __VA_ARGS__) | |
476 | #define BPF_CALL_2(name, ...) BPF_CALL_x(2, name, __VA_ARGS__) | |
477 | #define BPF_CALL_3(name, ...) BPF_CALL_x(3, name, __VA_ARGS__) | |
478 | #define BPF_CALL_4(name, ...) BPF_CALL_x(4, name, __VA_ARGS__) | |
479 | #define BPF_CALL_5(name, ...) BPF_CALL_x(5, name, __VA_ARGS__) | |
480 | ||
f96da094 DB |
481 | #define bpf_ctx_range(TYPE, MEMBER) \ |
482 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
483 | #define bpf_ctx_range_till(TYPE, MEMBER1, MEMBER2) \ | |
484 | offsetof(TYPE, MEMBER1) ... offsetofend(TYPE, MEMBER2) - 1 | |
b7df9ada DB |
485 | #if BITS_PER_LONG == 64 |
486 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | |
487 | offsetof(TYPE, MEMBER) ... offsetofend(TYPE, MEMBER) - 1 | |
488 | #else | |
489 | # define bpf_ctx_range_ptr(TYPE, MEMBER) \ | |
490 | offsetof(TYPE, MEMBER) ... offsetof(TYPE, MEMBER) + 8 - 1 | |
491 | #endif /* BITS_PER_LONG == 64 */ | |
f96da094 DB |
492 | |
493 | #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ | |
494 | ({ \ | |
495 | BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ | |
496 | *(PTR_SIZE) = (SIZE); \ | |
497 | offsetof(TYPE, MEMBER); \ | |
498 | }) | |
499 | ||
bd4cf0ed AS |
500 | #ifdef CONFIG_COMPAT |
501 | /* A struct sock_filter is architecture independent. */ | |
0c5fe1b4 WD |
502 | struct compat_sock_fprog { |
503 | u16 len; | |
bd4cf0ed | 504 | compat_uptr_t filter; /* struct sock_filter * */ |
0c5fe1b4 WD |
505 | }; |
506 | #endif | |
507 | ||
a3ea269b DB |
508 | struct sock_fprog_kern { |
509 | u16 len; | |
510 | struct sock_filter *filter; | |
511 | }; | |
512 | ||
738cbe72 | 513 | struct bpf_binary_header { |
85782e03 | 514 | u32 pages; |
92624782 ED |
515 | /* Some arches need word alignment for their instructions */ |
516 | u8 image[] __aligned(4); | |
738cbe72 DB |
517 | }; |
518 | ||
7ae457c1 | 519 | struct bpf_prog { |
286aad3c | 520 | u16 pages; /* Number of allocated pages */ |
a91263d5 | 521 | u16 jited:1, /* Is our filter JIT'ed? */ |
60b58afc | 522 | jit_requested:1,/* archs need to JIT the prog */ |
c46646d0 | 523 | gpl_compatible:1, /* Is filter GPL compatible? */ |
ff936a04 | 524 | cb_access:1, /* Is control block accessed? */ |
9802d865 | 525 | dst_needed:1, /* Do we need dst entry? */ |
1c2a088a AS |
526 | blinded:1, /* Was blinded */ |
527 | is_func:1, /* program is a bpf function */ | |
c195651e YS |
528 | kprobe_override:1, /* Do we override a kprobe? */ |
529 | has_callchain_buf:1; /* callchain buffer allocated? */ | |
24701ece | 530 | enum bpf_prog_type type; /* Type of BPF program */ |
5e43f899 | 531 | enum bpf_attach_type expected_attach_type; /* For some prog types */ |
7bd509e3 | 532 | u32 len; /* Number of filter blocks */ |
783d28dd | 533 | u32 jited_len; /* Size of jited insns in bytes */ |
f1f7714e | 534 | u8 tag[BPF_TAG_SIZE]; |
09756af4 | 535 | struct bpf_prog_aux *aux; /* Auxiliary fields */ |
24701ece | 536 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ |
88575199 DB |
537 | unsigned int (*bpf_func)(const void *ctx, |
538 | const struct bpf_insn *insn); | |
60a3b225 | 539 | /* Instructions for interpreter */ |
d45ed4a4 | 540 | union { |
bd4cf0ed | 541 | struct sock_filter insns[0]; |
2695fb55 | 542 | struct bpf_insn insnsi[0]; |
d45ed4a4 | 543 | }; |
b715631f SH |
544 | }; |
545 | ||
7ae457c1 | 546 | struct sk_filter { |
4c355cdf | 547 | refcount_t refcnt; |
7ae457c1 AS |
548 | struct rcu_head rcu; |
549 | struct bpf_prog *prog; | |
550 | }; | |
551 | ||
492ecee8 AS |
552 | DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); |
553 | ||
554 | #define BPF_PROG_RUN(prog, ctx) ({ \ | |
555 | u32 ret; \ | |
556 | cant_sleep(); \ | |
557 | if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ | |
558 | struct bpf_prog_stats *stats; \ | |
559 | u64 start = sched_clock(); \ | |
560 | ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ | |
561 | stats = this_cpu_ptr(prog->aux->stats); \ | |
562 | u64_stats_update_begin(&stats->syncp); \ | |
563 | stats->cnt++; \ | |
564 | stats->nsecs += sched_clock() - start; \ | |
565 | u64_stats_update_end(&stats->syncp); \ | |
566 | } else { \ | |
567 | ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ | |
568 | } \ | |
569 | ret; }) | |
7ae457c1 | 570 | |
01dd194c DB |
571 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN |
572 | ||
db58ba45 AS |
573 | struct bpf_skb_data_end { |
574 | struct qdisc_skb_cb qdisc_cb; | |
de8f3a83 | 575 | void *data_meta; |
db58ba45 AS |
576 | void *data_end; |
577 | }; | |
578 | ||
0b19cc0a TM |
579 | struct bpf_redirect_info { |
580 | u32 ifindex; | |
581 | u32 flags; | |
582 | struct bpf_map *map; | |
583 | struct bpf_map *map_to_flush; | |
2539650f | 584 | u32 kern_flags; |
0b19cc0a TM |
585 | }; |
586 | ||
587 | DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); | |
588 | ||
2539650f TM |
589 | /* flags for bpf_redirect_info kern_flags */ |
590 | #define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ | |
591 | ||
6aaae2b6 DB |
592 | /* Compute the linear packet data range [data, data_end) which |
593 | * will be accessed by various program types (cls_bpf, act_bpf, | |
594 | * lwt, ...). Subsystems allowing direct data access must (!) | |
595 | * ensure that cb[] area can be written to when BPF program is | |
596 | * invoked (otherwise cb[] save/restore is necessary). | |
db58ba45 | 597 | */ |
6aaae2b6 | 598 | static inline void bpf_compute_data_pointers(struct sk_buff *skb) |
db58ba45 AS |
599 | { |
600 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
601 | ||
602 | BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); | |
de8f3a83 DB |
603 | cb->data_meta = skb->data - skb_metadata_len(skb); |
604 | cb->data_end = skb->data + skb_headlen(skb); | |
db58ba45 AS |
605 | } |
606 | ||
b39b5f41 SL |
607 | /* Similar to bpf_compute_data_pointers(), except that save orginal |
608 | * data in cb->data and cb->meta_data for restore. | |
609 | */ | |
610 | static inline void bpf_compute_and_save_data_end( | |
611 | struct sk_buff *skb, void **saved_data_end) | |
612 | { | |
613 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
614 | ||
615 | *saved_data_end = cb->data_end; | |
616 | cb->data_end = skb->data + skb_headlen(skb); | |
617 | } | |
618 | ||
619 | /* Restore data saved by bpf_compute_data_pointers(). */ | |
620 | static inline void bpf_restore_data_end( | |
621 | struct sk_buff *skb, void *saved_data_end) | |
622 | { | |
623 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
624 | ||
625 | cb->data_end = saved_data_end; | |
626 | } | |
627 | ||
01dd194c DB |
628 | static inline u8 *bpf_skb_cb(struct sk_buff *skb) |
629 | { | |
630 | /* eBPF programs may read/write skb->cb[] area to transfer meta | |
631 | * data between tail calls. Since this also needs to work with | |
632 | * tc, that scratch memory is mapped to qdisc_skb_cb's data area. | |
633 | * | |
634 | * In some socket filter cases, the cb unfortunately needs to be | |
635 | * saved/restored so that protocol specific skb->cb[] data won't | |
636 | * be lost. In any case, due to unpriviledged eBPF programs | |
637 | * attached to sockets, we need to clear the bpf_skb_cb() area | |
638 | * to not leak previous contents to user space. | |
639 | */ | |
640 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); | |
641 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != | |
642 | FIELD_SIZEOF(struct qdisc_skb_cb, data)); | |
643 | ||
644 | return qdisc_skb_cb(skb)->data; | |
645 | } | |
646 | ||
6cab5e90 AS |
647 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, |
648 | struct sk_buff *skb) | |
ff936a04 | 649 | { |
01dd194c DB |
650 | u8 *cb_data = bpf_skb_cb(skb); |
651 | u8 cb_saved[BPF_SKB_CB_LEN]; | |
ff936a04 AS |
652 | u32 res; |
653 | ||
ff936a04 | 654 | if (unlikely(prog->cb_access)) { |
01dd194c DB |
655 | memcpy(cb_saved, cb_data, sizeof(cb_saved)); |
656 | memset(cb_data, 0, sizeof(cb_saved)); | |
ff936a04 AS |
657 | } |
658 | ||
659 | res = BPF_PROG_RUN(prog, skb); | |
660 | ||
661 | if (unlikely(prog->cb_access)) | |
01dd194c | 662 | memcpy(cb_data, cb_saved, sizeof(cb_saved)); |
ff936a04 AS |
663 | |
664 | return res; | |
665 | } | |
666 | ||
6cab5e90 AS |
667 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, |
668 | struct sk_buff *skb) | |
669 | { | |
670 | u32 res; | |
671 | ||
672 | preempt_disable(); | |
673 | res = __bpf_prog_run_save_cb(prog, skb); | |
674 | preempt_enable(); | |
675 | return res; | |
676 | } | |
677 | ||
ff936a04 AS |
678 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, |
679 | struct sk_buff *skb) | |
680 | { | |
01dd194c | 681 | u8 *cb_data = bpf_skb_cb(skb); |
6cab5e90 | 682 | u32 res; |
ff936a04 AS |
683 | |
684 | if (unlikely(prog->cb_access)) | |
01dd194c DB |
685 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
686 | ||
6cab5e90 AS |
687 | preempt_disable(); |
688 | res = BPF_PROG_RUN(prog, skb); | |
689 | preempt_enable(); | |
690 | return res; | |
ff936a04 AS |
691 | } |
692 | ||
366cbf2f DB |
693 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
694 | struct xdp_buff *xdp) | |
6a773a15 | 695 | { |
366cbf2f DB |
696 | /* Caller needs to hold rcu_read_lock() (!), otherwise program |
697 | * can be released while still running, or map elements could be | |
698 | * freed early while still having concurrent users. XDP fastpath | |
699 | * already takes rcu_read_lock() when fetching the program, so | |
700 | * it's not necessary here anymore. | |
701 | */ | |
702 | return BPF_PROG_RUN(prog, xdp); | |
6a773a15 BB |
703 | } |
704 | ||
aafe6ae9 DB |
705 | static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog) |
706 | { | |
707 | return prog->len * sizeof(struct bpf_insn); | |
708 | } | |
709 | ||
f1f7714e | 710 | static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog) |
aafe6ae9 DB |
711 | { |
712 | return round_up(bpf_prog_insn_size(prog) + | |
713 | sizeof(__be64) + 1, SHA_MESSAGE_BYTES); | |
714 | } | |
715 | ||
7ae457c1 | 716 | static inline unsigned int bpf_prog_size(unsigned int proglen) |
b715631f | 717 | { |
7ae457c1 AS |
718 | return max(sizeof(struct bpf_prog), |
719 | offsetof(struct bpf_prog, insns[proglen])); | |
b715631f SH |
720 | } |
721 | ||
7b36f929 DB |
722 | static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) |
723 | { | |
724 | /* When classic BPF programs have been loaded and the arch | |
725 | * does not have a classic BPF JIT (anymore), they have been | |
726 | * converted via bpf_migrate_filter() to eBPF and thus always | |
727 | * have an unspec program type. | |
728 | */ | |
729 | return prog->type == BPF_PROG_TYPE_UNSPEC; | |
730 | } | |
731 | ||
bc23105c DB |
732 | static inline u32 bpf_ctx_off_adjust_machine(u32 size) |
733 | { | |
734 | const u32 size_machine = sizeof(unsigned long); | |
735 | ||
736 | if (size > size_machine && size % size_machine == 0) | |
737 | size = size_machine; | |
738 | ||
739 | return size; | |
740 | } | |
741 | ||
bc23105c DB |
742 | static inline bool |
743 | bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default) | |
744 | { | |
46f53a65 | 745 | return size <= size_default && (size & (size - 1)) == 0; |
f96da094 DB |
746 | } |
747 | ||
009937e7 | 748 | #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) |
a3ea269b | 749 | |
60a3b225 DB |
750 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) |
751 | { | |
d53d2f78 | 752 | set_vm_flush_reset_perms(fp); |
85782e03 | 753 | set_memory_ro((unsigned long)fp, fp->pages); |
60a3b225 DB |
754 | } |
755 | ||
9d876e79 DB |
756 | static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr) |
757 | { | |
d53d2f78 | 758 | set_vm_flush_reset_perms(hdr); |
85782e03 | 759 | set_memory_ro((unsigned long)hdr, hdr->pages); |
f2c65fb3 | 760 | set_memory_x((unsigned long)hdr, hdr->pages); |
9d876e79 DB |
761 | } |
762 | ||
74451e66 DB |
763 | static inline struct bpf_binary_header * |
764 | bpf_jit_binary_hdr(const struct bpf_prog *fp) | |
765 | { | |
766 | unsigned long real_start = (unsigned long)fp->bpf_func; | |
767 | unsigned long addr = real_start & PAGE_MASK; | |
768 | ||
769 | return (void *)addr; | |
770 | } | |
771 | ||
f4979fce WB |
772 | int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap); |
773 | static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | |
774 | { | |
775 | return sk_filter_trim_cap(sk, skb, 1); | |
776 | } | |
bd4cf0ed | 777 | |
d1c55ab5 | 778 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); |
7ae457c1 | 779 | void bpf_prog_free(struct bpf_prog *fp); |
bd4cf0ed | 780 | |
5e581dad DB |
781 | bool bpf_opcode_in_insntable(u8 code); |
782 | ||
c454a46b MKL |
783 | void bpf_prog_free_linfo(struct bpf_prog *prog); |
784 | void bpf_prog_fill_jited_linfo(struct bpf_prog *prog, | |
785 | const u32 *insn_to_jit_off); | |
786 | int bpf_prog_alloc_jited_linfo(struct bpf_prog *prog); | |
787 | void bpf_prog_free_jited_linfo(struct bpf_prog *prog); | |
788 | void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); | |
789 | ||
60a3b225 | 790 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
492ecee8 | 791 | struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); |
60a3b225 DB |
792 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, |
793 | gfp_t gfp_extra_flags); | |
794 | void __bpf_prog_free(struct bpf_prog *fp); | |
795 | ||
796 | static inline void bpf_prog_unlock_free(struct bpf_prog *fp) | |
797 | { | |
60a3b225 DB |
798 | __bpf_prog_free(fp); |
799 | } | |
800 | ||
ac67eb2c DB |
801 | typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, |
802 | unsigned int flen); | |
803 | ||
7ae457c1 | 804 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
ac67eb2c | 805 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
bab18991 | 806 | bpf_aux_classic_check_t trans, bool save_orig); |
7ae457c1 | 807 | void bpf_prog_destroy(struct bpf_prog *fp); |
a3ea269b | 808 | |
fbc907f0 | 809 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
89aa0758 | 810 | int sk_attach_bpf(u32 ufd, struct sock *sk); |
538950a1 CG |
811 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
812 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); | |
8217ca65 | 813 | void sk_reuseport_prog_free(struct bpf_prog *prog); |
fbc907f0 | 814 | int sk_detach_filter(struct sock *sk); |
fbc907f0 DB |
815 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, |
816 | unsigned int len); | |
fbc907f0 | 817 | |
278571ba | 818 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); |
fbc907f0 | 819 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
0a14842f | 820 | |
62258278 | 821 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
1ea47e01 AS |
822 | #define __bpf_call_base_args \ |
823 | ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ | |
824 | __bpf_call_base) | |
d1c55ab5 DB |
825 | |
826 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | |
9383191d | 827 | void bpf_jit_compile(struct bpf_prog *prog); |
a4b1d3c1 | 828 | bool bpf_jit_needs_zext(void); |
17bedab2 | 829 | bool bpf_helper_changes_pkt_data(void *func); |
62258278 | 830 | |
7105e828 DB |
831 | static inline bool bpf_dump_raw_ok(void) |
832 | { | |
833 | /* Reconstruction of call-sites is dependent on kallsyms, | |
834 | * thus make dump the same restriction. | |
835 | */ | |
836 | return kallsyms_show_value() == 1; | |
837 | } | |
838 | ||
c237ee5e DB |
839 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
840 | const struct bpf_insn *patch, u32 len); | |
52875a04 | 841 | int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); |
814abfab | 842 | |
f6069b9a DB |
843 | void bpf_clear_redirect_map(struct bpf_map *map); |
844 | ||
2539650f TM |
845 | static inline bool xdp_return_frame_no_direct(void) |
846 | { | |
847 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
848 | ||
849 | return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; | |
850 | } | |
851 | ||
852 | static inline void xdp_set_return_frame_no_direct(void) | |
853 | { | |
854 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
855 | ||
856 | ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; | |
857 | } | |
858 | ||
859 | static inline void xdp_clear_return_frame_no_direct(void) | |
860 | { | |
861 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
862 | ||
863 | ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; | |
864 | } | |
865 | ||
d8d7218a TM |
866 | static inline int xdp_ok_fwd_dev(const struct net_device *fwd, |
867 | unsigned int pktlen) | |
6d5fc195 TM |
868 | { |
869 | unsigned int len; | |
870 | ||
871 | if (unlikely(!(fwd->flags & IFF_UP))) | |
872 | return -ENETDOWN; | |
873 | ||
874 | len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN; | |
d8d7218a | 875 | if (pktlen > len) |
6d5fc195 TM |
876 | return -EMSGSIZE; |
877 | ||
878 | return 0; | |
879 | } | |
880 | ||
11393cc9 JF |
881 | /* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the |
882 | * same cpu context. Further for best results no more than a single map | |
883 | * for the do_redirect/do_flush pair should be used. This limitation is | |
884 | * because we only track one map and force a flush when the map changes. | |
2ddf71e2 | 885 | * This does not appear to be a real limitation for existing software. |
11393cc9 | 886 | */ |
2facaad6 | 887 | int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, |
02671e23 | 888 | struct xdp_buff *xdp, struct bpf_prog *prog); |
5acaee0a JF |
889 | int xdp_do_redirect(struct net_device *dev, |
890 | struct xdp_buff *xdp, | |
891 | struct bpf_prog *prog); | |
11393cc9 | 892 | void xdp_do_flush_map(void); |
814abfab | 893 | |
6a773a15 | 894 | void bpf_warn_invalid_xdp_action(u32 act); |
c237ee5e | 895 | |
2dbb9b9e MKL |
896 | #ifdef CONFIG_INET |
897 | struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
898 | struct bpf_prog *prog, struct sk_buff *skb, | |
899 | u32 hash); | |
900 | #else | |
901 | static inline struct sock * | |
902 | bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk, | |
903 | struct bpf_prog *prog, struct sk_buff *skb, | |
904 | u32 hash) | |
905 | { | |
906 | return NULL; | |
907 | } | |
908 | #endif | |
909 | ||
b954d834 | 910 | #ifdef CONFIG_BPF_JIT |
c94987e4 | 911 | extern int bpf_jit_enable; |
4f3446bb | 912 | extern int bpf_jit_harden; |
74451e66 | 913 | extern int bpf_jit_kallsyms; |
fdadd049 | 914 | extern long bpf_jit_limit; |
c94987e4 | 915 | |
b954d834 DB |
916 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); |
917 | ||
918 | struct bpf_binary_header * | |
919 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |
920 | unsigned int alignment, | |
921 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | |
922 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); | |
116bfa96 VK |
923 | u64 bpf_jit_alloc_exec_limit(void); |
924 | void *bpf_jit_alloc_exec(unsigned long size); | |
925 | void bpf_jit_free_exec(void *addr); | |
b954d834 DB |
926 | void bpf_jit_free(struct bpf_prog *fp); |
927 | ||
e2c95a61 DB |
928 | int bpf_jit_get_func_addr(const struct bpf_prog *prog, |
929 | const struct bpf_insn *insn, bool extra_pass, | |
930 | u64 *func_addr, bool *func_addr_fixed); | |
931 | ||
4f3446bb DB |
932 | struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); |
933 | void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); | |
934 | ||
b954d834 DB |
935 | static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, |
936 | u32 pass, void *image) | |
937 | { | |
b13138ef DB |
938 | pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, |
939 | proglen, pass, image, current->comm, task_pid_nr(current)); | |
940 | ||
b954d834 DB |
941 | if (image) |
942 | print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, | |
943 | 16, 1, image, proglen, false); | |
944 | } | |
4f3446bb DB |
945 | |
946 | static inline bool bpf_jit_is_ebpf(void) | |
947 | { | |
948 | # ifdef CONFIG_HAVE_EBPF_JIT | |
949 | return true; | |
950 | # else | |
951 | return false; | |
952 | # endif | |
953 | } | |
954 | ||
81ed18ab AS |
955 | static inline bool ebpf_jit_enabled(void) |
956 | { | |
957 | return bpf_jit_enable && bpf_jit_is_ebpf(); | |
958 | } | |
959 | ||
74451e66 DB |
960 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) |
961 | { | |
962 | return fp->jited && bpf_jit_is_ebpf(); | |
963 | } | |
964 | ||
60b58afc | 965 | static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) |
4f3446bb DB |
966 | { |
967 | /* These are the prerequisites, should someone ever have the | |
968 | * idea to call blinding outside of them, we make sure to | |
969 | * bail out. | |
970 | */ | |
971 | if (!bpf_jit_is_ebpf()) | |
972 | return false; | |
60b58afc | 973 | if (!prog->jit_requested) |
4f3446bb DB |
974 | return false; |
975 | if (!bpf_jit_harden) | |
976 | return false; | |
977 | if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) | |
978 | return false; | |
979 | ||
980 | return true; | |
981 | } | |
74451e66 DB |
982 | |
983 | static inline bool bpf_jit_kallsyms_enabled(void) | |
984 | { | |
985 | /* There are a couple of corner cases where kallsyms should | |
986 | * not be enabled f.e. on hardening. | |
987 | */ | |
988 | if (bpf_jit_harden) | |
989 | return false; | |
990 | if (!bpf_jit_kallsyms) | |
991 | return false; | |
992 | if (bpf_jit_kallsyms == 1) | |
993 | return true; | |
994 | ||
995 | return false; | |
996 | } | |
997 | ||
998 | const char *__bpf_address_lookup(unsigned long addr, unsigned long *size, | |
999 | unsigned long *off, char *sym); | |
1000 | bool is_bpf_text_address(unsigned long addr); | |
1001 | int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type, | |
1002 | char *sym); | |
1003 | ||
1004 | static inline const char * | |
1005 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1006 | unsigned long *off, char **modname, char *sym) | |
1007 | { | |
1008 | const char *ret = __bpf_address_lookup(addr, size, off, sym); | |
1009 | ||
1010 | if (ret && modname) | |
1011 | *modname = NULL; | |
1012 | return ret; | |
1013 | } | |
1014 | ||
1015 | void bpf_prog_kallsyms_add(struct bpf_prog *fp); | |
1016 | void bpf_prog_kallsyms_del(struct bpf_prog *fp); | |
6ee52e2a | 1017 | void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); |
74451e66 DB |
1018 | |
1019 | #else /* CONFIG_BPF_JIT */ | |
1020 | ||
81ed18ab AS |
1021 | static inline bool ebpf_jit_enabled(void) |
1022 | { | |
1023 | return false; | |
1024 | } | |
1025 | ||
74451e66 DB |
1026 | static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) |
1027 | { | |
1028 | return false; | |
1029 | } | |
1030 | ||
b954d834 DB |
1031 | static inline void bpf_jit_free(struct bpf_prog *fp) |
1032 | { | |
1033 | bpf_prog_unlock_free(fp); | |
1034 | } | |
74451e66 DB |
1035 | |
1036 | static inline bool bpf_jit_kallsyms_enabled(void) | |
1037 | { | |
1038 | return false; | |
1039 | } | |
1040 | ||
1041 | static inline const char * | |
1042 | __bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1043 | unsigned long *off, char *sym) | |
1044 | { | |
1045 | return NULL; | |
1046 | } | |
1047 | ||
1048 | static inline bool is_bpf_text_address(unsigned long addr) | |
1049 | { | |
1050 | return false; | |
1051 | } | |
1052 | ||
1053 | static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value, | |
1054 | char *type, char *sym) | |
1055 | { | |
1056 | return -ERANGE; | |
1057 | } | |
1058 | ||
1059 | static inline const char * | |
1060 | bpf_address_lookup(unsigned long addr, unsigned long *size, | |
1061 | unsigned long *off, char **modname, char *sym) | |
1062 | { | |
1063 | return NULL; | |
1064 | } | |
1065 | ||
1066 | static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) | |
1067 | { | |
1068 | } | |
1069 | ||
1070 | static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) | |
1071 | { | |
1072 | } | |
6ee52e2a SL |
1073 | |
1074 | static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) | |
1075 | { | |
1076 | sym[0] = '\0'; | |
1077 | } | |
1078 | ||
b954d834 DB |
1079 | #endif /* CONFIG_BPF_JIT */ |
1080 | ||
7d1982b4 DB |
1081 | void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); |
1082 | void bpf_prog_kallsyms_del_all(struct bpf_prog *fp); | |
1083 | ||
34805931 DB |
1084 | #define BPF_ANC BIT(15) |
1085 | ||
55795ef5 RV |
1086 | static inline bool bpf_needs_clear_a(const struct sock_filter *first) |
1087 | { | |
1088 | switch (first->code) { | |
1089 | case BPF_RET | BPF_K: | |
1090 | case BPF_LD | BPF_W | BPF_LEN: | |
1091 | return false; | |
1092 | ||
1093 | case BPF_LD | BPF_W | BPF_ABS: | |
1094 | case BPF_LD | BPF_H | BPF_ABS: | |
1095 | case BPF_LD | BPF_B | BPF_ABS: | |
1096 | if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) | |
1097 | return true; | |
1098 | return false; | |
1099 | ||
1100 | default: | |
1101 | return true; | |
1102 | } | |
1103 | } | |
1104 | ||
34805931 DB |
1105 | static inline u16 bpf_anc_helper(const struct sock_filter *ftest) |
1106 | { | |
1107 | BUG_ON(ftest->code & BPF_ANC); | |
1108 | ||
1109 | switch (ftest->code) { | |
1110 | case BPF_LD | BPF_W | BPF_ABS: | |
1111 | case BPF_LD | BPF_H | BPF_ABS: | |
1112 | case BPF_LD | BPF_B | BPF_ABS: | |
1113 | #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ | |
1114 | return BPF_ANC | SKF_AD_##CODE | |
1115 | switch (ftest->k) { | |
1116 | BPF_ANCILLARY(PROTOCOL); | |
1117 | BPF_ANCILLARY(PKTTYPE); | |
1118 | BPF_ANCILLARY(IFINDEX); | |
1119 | BPF_ANCILLARY(NLATTR); | |
1120 | BPF_ANCILLARY(NLATTR_NEST); | |
1121 | BPF_ANCILLARY(MARK); | |
1122 | BPF_ANCILLARY(QUEUE); | |
1123 | BPF_ANCILLARY(HATYPE); | |
1124 | BPF_ANCILLARY(RXHASH); | |
1125 | BPF_ANCILLARY(CPU); | |
1126 | BPF_ANCILLARY(ALU_XOR_X); | |
1127 | BPF_ANCILLARY(VLAN_TAG); | |
1128 | BPF_ANCILLARY(VLAN_TAG_PRESENT); | |
1129 | BPF_ANCILLARY(PAY_OFFSET); | |
1130 | BPF_ANCILLARY(RANDOM); | |
27cd5452 | 1131 | BPF_ANCILLARY(VLAN_TPID); |
34805931 DB |
1132 | } |
1133 | /* Fallthrough. */ | |
1134 | default: | |
1135 | return ftest->code; | |
1136 | } | |
1137 | } | |
1138 | ||
9f12fbe6 ZSL |
1139 | void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, |
1140 | int k, unsigned int size); | |
1141 | ||
1142 | static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, | |
1143 | unsigned int size, void *buffer) | |
1144 | { | |
1145 | if (k >= 0) | |
1146 | return skb_header_pointer(skb, k, size, buffer); | |
1147 | ||
1148 | return bpf_internal_load_pointer_neg_helper(skb, k, size); | |
1149 | } | |
1150 | ||
ea02f941 MS |
1151 | static inline int bpf_tell_extensions(void) |
1152 | { | |
37692299 | 1153 | return SKF_AD_MAX; |
ea02f941 MS |
1154 | } |
1155 | ||
4fbac77d AI |
1156 | struct bpf_sock_addr_kern { |
1157 | struct sock *sk; | |
1158 | struct sockaddr *uaddr; | |
1159 | /* Temporary "register" to make indirect stores to nested structures | |
1160 | * defined above. We need three registers to make such a store, but | |
1161 | * only two (src and dst) are available at convert_ctx_access time | |
1162 | */ | |
1163 | u64 tmp_reg; | |
1cedee13 | 1164 | void *t_ctx; /* Attach type specific context. */ |
4fbac77d AI |
1165 | }; |
1166 | ||
40304b2a LB |
1167 | struct bpf_sock_ops_kern { |
1168 | struct sock *sk; | |
1169 | u32 op; | |
1170 | union { | |
de525be2 | 1171 | u32 args[4]; |
40304b2a LB |
1172 | u32 reply; |
1173 | u32 replylong[4]; | |
1174 | }; | |
f19397a5 | 1175 | u32 is_fullsock; |
b73042b8 LB |
1176 | u64 temp; /* temp and everything after is not |
1177 | * initialized to 0 before calling | |
1178 | * the BPF program. New fields that | |
1179 | * should be initialized to 0 should | |
1180 | * be inserted before temp. | |
1181 | * temp is scratch storage used by | |
1182 | * sock_ops_convert_ctx_access | |
1183 | * as temporary storage of a register. | |
1184 | */ | |
40304b2a LB |
1185 | }; |
1186 | ||
7b146ceb AI |
1187 | struct bpf_sysctl_kern { |
1188 | struct ctl_table_header *head; | |
1189 | struct ctl_table *table; | |
1d11b301 AI |
1190 | void *cur_val; |
1191 | size_t cur_len; | |
4e63acdf AI |
1192 | void *new_val; |
1193 | size_t new_len; | |
1194 | int new_updated; | |
7b146ceb | 1195 | int write; |
e1550bfe AI |
1196 | loff_t *ppos; |
1197 | /* Temporary "register" for indirect stores to ppos. */ | |
1198 | u64 tmp_reg; | |
7b146ceb AI |
1199 | }; |
1200 | ||
1da177e4 | 1201 | #endif /* __LINUX_FILTER_H__ */ |