Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Linux Socket Filter Data Structures | |
3 | */ | |
1da177e4 LT |
4 | #ifndef __LINUX_FILTER_H__ |
5 | #define __LINUX_FILTER_H__ | |
6 | ||
b954d834 DB |
7 | #include <stdarg.h> |
8 | ||
60063497 | 9 | #include <linux/atomic.h> |
0c5fe1b4 | 10 | #include <linux/compat.h> |
9f12fbe6 | 11 | #include <linux/skbuff.h> |
b954d834 DB |
12 | #include <linux/linkage.h> |
13 | #include <linux/printk.h> | |
d45ed4a4 | 14 | #include <linux/workqueue.h> |
b13138ef | 15 | #include <linux/sched.h> |
4f3446bb DB |
16 | #include <linux/capability.h> |
17 | ||
ff936a04 | 18 | #include <net/sch_generic.h> |
b954d834 | 19 | |
60a3b225 | 20 | #include <asm/cacheflush.h> |
b954d834 DB |
21 | |
22 | #include <uapi/linux/filter.h> | |
daedfb22 | 23 | #include <uapi/linux/bpf.h> |
60a3b225 DB |
24 | |
25 | struct sk_buff; | |
26 | struct sock; | |
27 | struct seccomp_data; | |
09756af4 | 28 | struct bpf_prog_aux; |
792d4b5c | 29 | |
30743837 DB |
30 | /* ArgX, context and stack frame pointer register positions. Note, |
31 | * Arg1, Arg2, Arg3, etc are used as argument mappings of function | |
32 | * calls in BPF_CALL instruction. | |
33 | */ | |
34 | #define BPF_REG_ARG1 BPF_REG_1 | |
35 | #define BPF_REG_ARG2 BPF_REG_2 | |
36 | #define BPF_REG_ARG3 BPF_REG_3 | |
37 | #define BPF_REG_ARG4 BPF_REG_4 | |
38 | #define BPF_REG_ARG5 BPF_REG_5 | |
39 | #define BPF_REG_CTX BPF_REG_6 | |
40 | #define BPF_REG_FP BPF_REG_10 | |
41 | ||
42 | /* Additional register mappings for converted user programs. */ | |
43 | #define BPF_REG_A BPF_REG_0 | |
44 | #define BPF_REG_X BPF_REG_7 | |
45 | #define BPF_REG_TMP BPF_REG_8 | |
bd4cf0ed | 46 | |
4f3446bb DB |
47 | /* Kernel hidden auxiliary/helper register for hardening step. |
48 | * Only used by eBPF JITs. It's nothing more than a temporary | |
49 | * register that JITs use internally, only that here it's part | |
50 | * of eBPF instructions that have been rewritten for blinding | |
51 | * constants. See JIT pre-step in bpf_jit_blind_constants(). | |
52 | */ | |
53 | #define BPF_REG_AX MAX_BPF_REG | |
54 | #define MAX_BPF_JIT_REG (MAX_BPF_REG + 1) | |
55 | ||
bd4cf0ed AS |
56 | /* BPF program can access up to 512 bytes of stack space. */ |
57 | #define MAX_BPF_STACK 512 | |
58 | ||
f8f6d679 DB |
59 | /* Helper macros for filter block array initializers. */ |
60 | ||
e430f34e | 61 | /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ |
f8f6d679 | 62 | |
e430f34e | 63 | #define BPF_ALU64_REG(OP, DST, SRC) \ |
2695fb55 | 64 | ((struct bpf_insn) { \ |
f8f6d679 | 65 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
66 | .dst_reg = DST, \ |
67 | .src_reg = SRC, \ | |
f8f6d679 DB |
68 | .off = 0, \ |
69 | .imm = 0 }) | |
70 | ||
e430f34e | 71 | #define BPF_ALU32_REG(OP, DST, SRC) \ |
2695fb55 | 72 | ((struct bpf_insn) { \ |
f8f6d679 | 73 | .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
74 | .dst_reg = DST, \ |
75 | .src_reg = SRC, \ | |
f8f6d679 DB |
76 | .off = 0, \ |
77 | .imm = 0 }) | |
78 | ||
e430f34e | 79 | /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ |
f8f6d679 | 80 | |
e430f34e | 81 | #define BPF_ALU64_IMM(OP, DST, IMM) \ |
2695fb55 | 82 | ((struct bpf_insn) { \ |
f8f6d679 | 83 | .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
84 | .dst_reg = DST, \ |
85 | .src_reg = 0, \ | |
f8f6d679 DB |
86 | .off = 0, \ |
87 | .imm = IMM }) | |
88 | ||
e430f34e | 89 | #define BPF_ALU32_IMM(OP, DST, IMM) \ |
2695fb55 | 90 | ((struct bpf_insn) { \ |
f8f6d679 | 91 | .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
92 | .dst_reg = DST, \ |
93 | .src_reg = 0, \ | |
f8f6d679 DB |
94 | .off = 0, \ |
95 | .imm = IMM }) | |
96 | ||
97 | /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ | |
98 | ||
e430f34e | 99 | #define BPF_ENDIAN(TYPE, DST, LEN) \ |
2695fb55 | 100 | ((struct bpf_insn) { \ |
f8f6d679 | 101 | .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ |
e430f34e AS |
102 | .dst_reg = DST, \ |
103 | .src_reg = 0, \ | |
f8f6d679 DB |
104 | .off = 0, \ |
105 | .imm = LEN }) | |
106 | ||
e430f34e | 107 | /* Short form of mov, dst_reg = src_reg */ |
f8f6d679 | 108 | |
e430f34e | 109 | #define BPF_MOV64_REG(DST, SRC) \ |
2695fb55 | 110 | ((struct bpf_insn) { \ |
f8f6d679 | 111 | .code = BPF_ALU64 | BPF_MOV | BPF_X, \ |
e430f34e AS |
112 | .dst_reg = DST, \ |
113 | .src_reg = SRC, \ | |
f8f6d679 DB |
114 | .off = 0, \ |
115 | .imm = 0 }) | |
116 | ||
e430f34e | 117 | #define BPF_MOV32_REG(DST, SRC) \ |
2695fb55 | 118 | ((struct bpf_insn) { \ |
f8f6d679 | 119 | .code = BPF_ALU | BPF_MOV | BPF_X, \ |
e430f34e AS |
120 | .dst_reg = DST, \ |
121 | .src_reg = SRC, \ | |
f8f6d679 DB |
122 | .off = 0, \ |
123 | .imm = 0 }) | |
124 | ||
e430f34e | 125 | /* Short form of mov, dst_reg = imm32 */ |
f8f6d679 | 126 | |
e430f34e | 127 | #define BPF_MOV64_IMM(DST, IMM) \ |
2695fb55 | 128 | ((struct bpf_insn) { \ |
f8f6d679 | 129 | .code = BPF_ALU64 | BPF_MOV | BPF_K, \ |
e430f34e AS |
130 | .dst_reg = DST, \ |
131 | .src_reg = 0, \ | |
f8f6d679 DB |
132 | .off = 0, \ |
133 | .imm = IMM }) | |
134 | ||
e430f34e | 135 | #define BPF_MOV32_IMM(DST, IMM) \ |
2695fb55 | 136 | ((struct bpf_insn) { \ |
f8f6d679 | 137 | .code = BPF_ALU | BPF_MOV | BPF_K, \ |
e430f34e AS |
138 | .dst_reg = DST, \ |
139 | .src_reg = 0, \ | |
f8f6d679 DB |
140 | .off = 0, \ |
141 | .imm = IMM }) | |
142 | ||
02ab695b AS |
143 | /* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ |
144 | #define BPF_LD_IMM64(DST, IMM) \ | |
145 | BPF_LD_IMM64_RAW(DST, 0, IMM) | |
146 | ||
147 | #define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ | |
148 | ((struct bpf_insn) { \ | |
149 | .code = BPF_LD | BPF_DW | BPF_IMM, \ | |
150 | .dst_reg = DST, \ | |
151 | .src_reg = SRC, \ | |
152 | .off = 0, \ | |
153 | .imm = (__u32) (IMM) }), \ | |
154 | ((struct bpf_insn) { \ | |
155 | .code = 0, /* zero is reserved opcode */ \ | |
156 | .dst_reg = 0, \ | |
157 | .src_reg = 0, \ | |
158 | .off = 0, \ | |
159 | .imm = ((__u64) (IMM)) >> 32 }) | |
160 | ||
0246e64d AS |
161 | /* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ |
162 | #define BPF_LD_MAP_FD(DST, MAP_FD) \ | |
163 | BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) | |
164 | ||
e430f34e | 165 | /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ |
f8f6d679 | 166 | |
e430f34e | 167 | #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 168 | ((struct bpf_insn) { \ |
f8f6d679 | 169 | .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
170 | .dst_reg = DST, \ |
171 | .src_reg = SRC, \ | |
f8f6d679 DB |
172 | .off = 0, \ |
173 | .imm = IMM }) | |
174 | ||
e430f34e | 175 | #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ |
2695fb55 | 176 | ((struct bpf_insn) { \ |
f8f6d679 | 177 | .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ |
e430f34e AS |
178 | .dst_reg = DST, \ |
179 | .src_reg = SRC, \ | |
f8f6d679 DB |
180 | .off = 0, \ |
181 | .imm = IMM }) | |
182 | ||
e430f34e | 183 | /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ |
f8f6d679 | 184 | |
e430f34e | 185 | #define BPF_LD_ABS(SIZE, IMM) \ |
2695fb55 | 186 | ((struct bpf_insn) { \ |
f8f6d679 | 187 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ |
e430f34e AS |
188 | .dst_reg = 0, \ |
189 | .src_reg = 0, \ | |
f8f6d679 | 190 | .off = 0, \ |
e430f34e | 191 | .imm = IMM }) |
f8f6d679 | 192 | |
e430f34e | 193 | /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ |
f8f6d679 | 194 | |
e430f34e | 195 | #define BPF_LD_IND(SIZE, SRC, IMM) \ |
2695fb55 | 196 | ((struct bpf_insn) { \ |
f8f6d679 | 197 | .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ |
e430f34e AS |
198 | .dst_reg = 0, \ |
199 | .src_reg = SRC, \ | |
f8f6d679 | 200 | .off = 0, \ |
e430f34e | 201 | .imm = IMM }) |
f8f6d679 | 202 | |
e430f34e | 203 | /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ |
f8f6d679 | 204 | |
e430f34e | 205 | #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ |
2695fb55 | 206 | ((struct bpf_insn) { \ |
f8f6d679 | 207 | .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
208 | .dst_reg = DST, \ |
209 | .src_reg = SRC, \ | |
f8f6d679 DB |
210 | .off = OFF, \ |
211 | .imm = 0 }) | |
212 | ||
e430f34e AS |
213 | /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ |
214 | ||
215 | #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ | |
2695fb55 | 216 | ((struct bpf_insn) { \ |
f8f6d679 | 217 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ |
e430f34e AS |
218 | .dst_reg = DST, \ |
219 | .src_reg = SRC, \ | |
f8f6d679 DB |
220 | .off = OFF, \ |
221 | .imm = 0 }) | |
222 | ||
cffc642d MH |
223 | /* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */ |
224 | ||
225 | #define BPF_STX_XADD(SIZE, DST, SRC, OFF) \ | |
226 | ((struct bpf_insn) { \ | |
227 | .code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \ | |
228 | .dst_reg = DST, \ | |
229 | .src_reg = SRC, \ | |
230 | .off = OFF, \ | |
231 | .imm = 0 }) | |
232 | ||
e430f34e AS |
233 | /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ |
234 | ||
235 | #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ | |
2695fb55 | 236 | ((struct bpf_insn) { \ |
e430f34e AS |
237 | .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ |
238 | .dst_reg = DST, \ | |
239 | .src_reg = 0, \ | |
240 | .off = OFF, \ | |
241 | .imm = IMM }) | |
242 | ||
243 | /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ | |
f8f6d679 | 244 | |
e430f34e | 245 | #define BPF_JMP_REG(OP, DST, SRC, OFF) \ |
2695fb55 | 246 | ((struct bpf_insn) { \ |
f8f6d679 | 247 | .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ |
e430f34e AS |
248 | .dst_reg = DST, \ |
249 | .src_reg = SRC, \ | |
f8f6d679 DB |
250 | .off = OFF, \ |
251 | .imm = 0 }) | |
252 | ||
e430f34e | 253 | /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ |
f8f6d679 | 254 | |
e430f34e | 255 | #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ |
2695fb55 | 256 | ((struct bpf_insn) { \ |
f8f6d679 | 257 | .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ |
e430f34e AS |
258 | .dst_reg = DST, \ |
259 | .src_reg = 0, \ | |
f8f6d679 DB |
260 | .off = OFF, \ |
261 | .imm = IMM }) | |
262 | ||
263 | /* Function call */ | |
264 | ||
265 | #define BPF_EMIT_CALL(FUNC) \ | |
2695fb55 | 266 | ((struct bpf_insn) { \ |
f8f6d679 | 267 | .code = BPF_JMP | BPF_CALL, \ |
e430f34e AS |
268 | .dst_reg = 0, \ |
269 | .src_reg = 0, \ | |
f8f6d679 DB |
270 | .off = 0, \ |
271 | .imm = ((FUNC) - __bpf_call_base) }) | |
272 | ||
273 | /* Raw code statement block */ | |
274 | ||
e430f34e | 275 | #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ |
2695fb55 | 276 | ((struct bpf_insn) { \ |
f8f6d679 | 277 | .code = CODE, \ |
e430f34e AS |
278 | .dst_reg = DST, \ |
279 | .src_reg = SRC, \ | |
f8f6d679 DB |
280 | .off = OFF, \ |
281 | .imm = IMM }) | |
282 | ||
283 | /* Program exit */ | |
284 | ||
285 | #define BPF_EXIT_INSN() \ | |
2695fb55 | 286 | ((struct bpf_insn) { \ |
f8f6d679 | 287 | .code = BPF_JMP | BPF_EXIT, \ |
e430f34e AS |
288 | .dst_reg = 0, \ |
289 | .src_reg = 0, \ | |
f8f6d679 DB |
290 | .off = 0, \ |
291 | .imm = 0 }) | |
292 | ||
a4afd37b DB |
293 | /* Internal classic blocks for direct assignment */ |
294 | ||
295 | #define __BPF_STMT(CODE, K) \ | |
296 | ((struct sock_filter) BPF_STMT(CODE, K)) | |
297 | ||
298 | #define __BPF_JUMP(CODE, K, JT, JF) \ | |
299 | ((struct sock_filter) BPF_JUMP(CODE, K, JT, JF)) | |
300 | ||
f8f6d679 DB |
301 | #define bytes_to_bpf_size(bytes) \ |
302 | ({ \ | |
303 | int bpf_size = -EINVAL; \ | |
304 | \ | |
305 | if (bytes == sizeof(u8)) \ | |
306 | bpf_size = BPF_B; \ | |
307 | else if (bytes == sizeof(u16)) \ | |
308 | bpf_size = BPF_H; \ | |
309 | else if (bytes == sizeof(u32)) \ | |
310 | bpf_size = BPF_W; \ | |
311 | else if (bytes == sizeof(u64)) \ | |
312 | bpf_size = BPF_DW; \ | |
313 | \ | |
314 | bpf_size; \ | |
315 | }) | |
9739eef1 | 316 | |
bd4cf0ed AS |
317 | #ifdef CONFIG_COMPAT |
318 | /* A struct sock_filter is architecture independent. */ | |
0c5fe1b4 WD |
319 | struct compat_sock_fprog { |
320 | u16 len; | |
bd4cf0ed | 321 | compat_uptr_t filter; /* struct sock_filter * */ |
0c5fe1b4 WD |
322 | }; |
323 | #endif | |
324 | ||
a3ea269b DB |
325 | struct sock_fprog_kern { |
326 | u16 len; | |
327 | struct sock_filter *filter; | |
328 | }; | |
329 | ||
738cbe72 DB |
330 | struct bpf_binary_header { |
331 | unsigned int pages; | |
332 | u8 image[]; | |
333 | }; | |
334 | ||
7ae457c1 | 335 | struct bpf_prog { |
286aad3c | 336 | u16 pages; /* Number of allocated pages */ |
a91263d5 DB |
337 | kmemcheck_bitfield_begin(meta); |
338 | u16 jited:1, /* Is our filter JIT'ed? */ | |
c46646d0 | 339 | gpl_compatible:1, /* Is filter GPL compatible? */ |
ff936a04 | 340 | cb_access:1, /* Is control block accessed? */ |
c46646d0 | 341 | dst_needed:1; /* Do we need dst entry? */ |
a91263d5 | 342 | kmemcheck_bitfield_end(meta); |
286aad3c | 343 | u32 len; /* Number of filter blocks */ |
24701ece | 344 | enum bpf_prog_type type; /* Type of BPF program */ |
09756af4 | 345 | struct bpf_prog_aux *aux; /* Auxiliary fields */ |
24701ece | 346 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ |
0a14842f | 347 | unsigned int (*bpf_func)(const struct sk_buff *skb, |
2695fb55 | 348 | const struct bpf_insn *filter); |
60a3b225 | 349 | /* Instructions for interpreter */ |
d45ed4a4 | 350 | union { |
bd4cf0ed | 351 | struct sock_filter insns[0]; |
2695fb55 | 352 | struct bpf_insn insnsi[0]; |
d45ed4a4 | 353 | }; |
b715631f SH |
354 | }; |
355 | ||
7ae457c1 AS |
356 | struct sk_filter { |
357 | atomic_t refcnt; | |
358 | struct rcu_head rcu; | |
359 | struct bpf_prog *prog; | |
360 | }; | |
361 | ||
362 | #define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) | |
363 | ||
01dd194c DB |
364 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN |
365 | ||
db58ba45 AS |
366 | struct bpf_skb_data_end { |
367 | struct qdisc_skb_cb qdisc_cb; | |
368 | void *data_end; | |
369 | }; | |
370 | ||
371 | /* compute the linear packet data range [data, data_end) which | |
372 | * will be accessed by cls_bpf and act_bpf programs | |
373 | */ | |
374 | static inline void bpf_compute_data_end(struct sk_buff *skb) | |
375 | { | |
376 | struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; | |
377 | ||
378 | BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); | |
379 | cb->data_end = skb->data + skb_headlen(skb); | |
380 | } | |
381 | ||
01dd194c DB |
382 | static inline u8 *bpf_skb_cb(struct sk_buff *skb) |
383 | { | |
384 | /* eBPF programs may read/write skb->cb[] area to transfer meta | |
385 | * data between tail calls. Since this also needs to work with | |
386 | * tc, that scratch memory is mapped to qdisc_skb_cb's data area. | |
387 | * | |
388 | * In some socket filter cases, the cb unfortunately needs to be | |
389 | * saved/restored so that protocol specific skb->cb[] data won't | |
390 | * be lost. In any case, due to unpriviledged eBPF programs | |
391 | * attached to sockets, we need to clear the bpf_skb_cb() area | |
392 | * to not leak previous contents to user space. | |
393 | */ | |
394 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); | |
395 | BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != | |
396 | FIELD_SIZEOF(struct qdisc_skb_cb, data)); | |
397 | ||
398 | return qdisc_skb_cb(skb)->data; | |
399 | } | |
400 | ||
ff936a04 AS |
401 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, |
402 | struct sk_buff *skb) | |
403 | { | |
01dd194c DB |
404 | u8 *cb_data = bpf_skb_cb(skb); |
405 | u8 cb_saved[BPF_SKB_CB_LEN]; | |
ff936a04 AS |
406 | u32 res; |
407 | ||
ff936a04 | 408 | if (unlikely(prog->cb_access)) { |
01dd194c DB |
409 | memcpy(cb_saved, cb_data, sizeof(cb_saved)); |
410 | memset(cb_data, 0, sizeof(cb_saved)); | |
ff936a04 AS |
411 | } |
412 | ||
413 | res = BPF_PROG_RUN(prog, skb); | |
414 | ||
415 | if (unlikely(prog->cb_access)) | |
01dd194c | 416 | memcpy(cb_data, cb_saved, sizeof(cb_saved)); |
ff936a04 AS |
417 | |
418 | return res; | |
419 | } | |
420 | ||
421 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | |
422 | struct sk_buff *skb) | |
423 | { | |
01dd194c | 424 | u8 *cb_data = bpf_skb_cb(skb); |
ff936a04 AS |
425 | |
426 | if (unlikely(prog->cb_access)) | |
01dd194c DB |
427 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
428 | ||
ff936a04 AS |
429 | return BPF_PROG_RUN(prog, skb); |
430 | } | |
431 | ||
7ae457c1 | 432 | static inline unsigned int bpf_prog_size(unsigned int proglen) |
b715631f | 433 | { |
7ae457c1 AS |
434 | return max(sizeof(struct bpf_prog), |
435 | offsetof(struct bpf_prog, insns[proglen])); | |
b715631f SH |
436 | } |
437 | ||
7b36f929 DB |
438 | static inline bool bpf_prog_was_classic(const struct bpf_prog *prog) |
439 | { | |
440 | /* When classic BPF programs have been loaded and the arch | |
441 | * does not have a classic BPF JIT (anymore), they have been | |
442 | * converted via bpf_migrate_filter() to eBPF and thus always | |
443 | * have an unspec program type. | |
444 | */ | |
445 | return prog->type == BPF_PROG_TYPE_UNSPEC; | |
446 | } | |
447 | ||
009937e7 | 448 | #define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) |
a3ea269b | 449 | |
60a3b225 DB |
450 | #ifdef CONFIG_DEBUG_SET_MODULE_RONX |
451 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) | |
452 | { | |
453 | set_memory_ro((unsigned long)fp, fp->pages); | |
454 | } | |
455 | ||
456 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | |
457 | { | |
458 | set_memory_rw((unsigned long)fp, fp->pages); | |
459 | } | |
460 | #else | |
461 | static inline void bpf_prog_lock_ro(struct bpf_prog *fp) | |
462 | { | |
463 | } | |
464 | ||
465 | static inline void bpf_prog_unlock_ro(struct bpf_prog *fp) | |
466 | { | |
467 | } | |
468 | #endif /* CONFIG_DEBUG_SET_MODULE_RONX */ | |
469 | ||
fbc907f0 | 470 | int sk_filter(struct sock *sk, struct sk_buff *skb); |
bd4cf0ed | 471 | |
d1c55ab5 | 472 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); |
7ae457c1 | 473 | void bpf_prog_free(struct bpf_prog *fp); |
bd4cf0ed | 474 | |
60a3b225 DB |
475 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
476 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | |
477 | gfp_t gfp_extra_flags); | |
478 | void __bpf_prog_free(struct bpf_prog *fp); | |
479 | ||
480 | static inline void bpf_prog_unlock_free(struct bpf_prog *fp) | |
481 | { | |
482 | bpf_prog_unlock_ro(fp); | |
483 | __bpf_prog_free(fp); | |
484 | } | |
485 | ||
ac67eb2c DB |
486 | typedef int (*bpf_aux_classic_check_t)(struct sock_filter *filter, |
487 | unsigned int flen); | |
488 | ||
7ae457c1 | 489 | int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); |
ac67eb2c | 490 | int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog, |
bab18991 | 491 | bpf_aux_classic_check_t trans, bool save_orig); |
7ae457c1 | 492 | void bpf_prog_destroy(struct bpf_prog *fp); |
a3ea269b | 493 | |
fbc907f0 | 494 | int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
89aa0758 | 495 | int sk_attach_bpf(u32 ufd, struct sock *sk); |
538950a1 CG |
496 | int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk); |
497 | int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk); | |
fbc907f0 | 498 | int sk_detach_filter(struct sock *sk); |
fbc907f0 DB |
499 | int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, |
500 | unsigned int len); | |
fbc907f0 | 501 | |
278571ba | 502 | bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); |
fbc907f0 | 503 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
0a14842f | 504 | |
62258278 | 505 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
d1c55ab5 DB |
506 | |
507 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | |
4e10df9a | 508 | bool bpf_helper_changes_skb_data(void *func); |
62258278 | 509 | |
c237ee5e DB |
510 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
511 | const struct bpf_insn *patch, u32 len); | |
512 | ||
b954d834 | 513 | #ifdef CONFIG_BPF_JIT |
c94987e4 | 514 | extern int bpf_jit_enable; |
4f3446bb | 515 | extern int bpf_jit_harden; |
c94987e4 | 516 | |
b954d834 DB |
517 | typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size); |
518 | ||
519 | struct bpf_binary_header * | |
520 | bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |
521 | unsigned int alignment, | |
522 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | |
523 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); | |
524 | ||
525 | void bpf_jit_compile(struct bpf_prog *fp); | |
526 | void bpf_jit_free(struct bpf_prog *fp); | |
527 | ||
4f3446bb DB |
528 | struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp); |
529 | void bpf_jit_prog_release_other(struct bpf_prog *fp, struct bpf_prog *fp_other); | |
530 | ||
b954d834 DB |
531 | static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, |
532 | u32 pass, void *image) | |
533 | { | |
b13138ef DB |
534 | pr_err("flen=%u proglen=%u pass=%u image=%pK from=%s pid=%d\n", flen, |
535 | proglen, pass, image, current->comm, task_pid_nr(current)); | |
536 | ||
b954d834 DB |
537 | if (image) |
538 | print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_OFFSET, | |
539 | 16, 1, image, proglen, false); | |
540 | } | |
4f3446bb DB |
541 | |
542 | static inline bool bpf_jit_is_ebpf(void) | |
543 | { | |
544 | # ifdef CONFIG_HAVE_EBPF_JIT | |
545 | return true; | |
546 | # else | |
547 | return false; | |
548 | # endif | |
549 | } | |
550 | ||
551 | static inline bool bpf_jit_blinding_enabled(void) | |
552 | { | |
553 | /* These are the prerequisites, should someone ever have the | |
554 | * idea to call blinding outside of them, we make sure to | |
555 | * bail out. | |
556 | */ | |
557 | if (!bpf_jit_is_ebpf()) | |
558 | return false; | |
559 | if (!bpf_jit_enable) | |
560 | return false; | |
561 | if (!bpf_jit_harden) | |
562 | return false; | |
563 | if (bpf_jit_harden == 1 && capable(CAP_SYS_ADMIN)) | |
564 | return false; | |
565 | ||
566 | return true; | |
567 | } | |
b954d834 DB |
568 | #else |
569 | static inline void bpf_jit_compile(struct bpf_prog *fp) | |
570 | { | |
571 | } | |
572 | ||
573 | static inline void bpf_jit_free(struct bpf_prog *fp) | |
574 | { | |
575 | bpf_prog_unlock_free(fp); | |
576 | } | |
577 | #endif /* CONFIG_BPF_JIT */ | |
578 | ||
34805931 DB |
579 | #define BPF_ANC BIT(15) |
580 | ||
55795ef5 RV |
581 | static inline bool bpf_needs_clear_a(const struct sock_filter *first) |
582 | { | |
583 | switch (first->code) { | |
584 | case BPF_RET | BPF_K: | |
585 | case BPF_LD | BPF_W | BPF_LEN: | |
586 | return false; | |
587 | ||
588 | case BPF_LD | BPF_W | BPF_ABS: | |
589 | case BPF_LD | BPF_H | BPF_ABS: | |
590 | case BPF_LD | BPF_B | BPF_ABS: | |
591 | if (first->k == SKF_AD_OFF + SKF_AD_ALU_XOR_X) | |
592 | return true; | |
593 | return false; | |
594 | ||
595 | default: | |
596 | return true; | |
597 | } | |
598 | } | |
599 | ||
34805931 DB |
600 | static inline u16 bpf_anc_helper(const struct sock_filter *ftest) |
601 | { | |
602 | BUG_ON(ftest->code & BPF_ANC); | |
603 | ||
604 | switch (ftest->code) { | |
605 | case BPF_LD | BPF_W | BPF_ABS: | |
606 | case BPF_LD | BPF_H | BPF_ABS: | |
607 | case BPF_LD | BPF_B | BPF_ABS: | |
608 | #define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ | |
609 | return BPF_ANC | SKF_AD_##CODE | |
610 | switch (ftest->k) { | |
611 | BPF_ANCILLARY(PROTOCOL); | |
612 | BPF_ANCILLARY(PKTTYPE); | |
613 | BPF_ANCILLARY(IFINDEX); | |
614 | BPF_ANCILLARY(NLATTR); | |
615 | BPF_ANCILLARY(NLATTR_NEST); | |
616 | BPF_ANCILLARY(MARK); | |
617 | BPF_ANCILLARY(QUEUE); | |
618 | BPF_ANCILLARY(HATYPE); | |
619 | BPF_ANCILLARY(RXHASH); | |
620 | BPF_ANCILLARY(CPU); | |
621 | BPF_ANCILLARY(ALU_XOR_X); | |
622 | BPF_ANCILLARY(VLAN_TAG); | |
623 | BPF_ANCILLARY(VLAN_TAG_PRESENT); | |
624 | BPF_ANCILLARY(PAY_OFFSET); | |
625 | BPF_ANCILLARY(RANDOM); | |
27cd5452 | 626 | BPF_ANCILLARY(VLAN_TPID); |
34805931 DB |
627 | } |
628 | /* Fallthrough. */ | |
629 | default: | |
630 | return ftest->code; | |
631 | } | |
632 | } | |
633 | ||
9f12fbe6 ZSL |
634 | void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, |
635 | int k, unsigned int size); | |
636 | ||
637 | static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, | |
638 | unsigned int size, void *buffer) | |
639 | { | |
640 | if (k >= 0) | |
641 | return skb_header_pointer(skb, k, size, buffer); | |
642 | ||
643 | return bpf_internal_load_pointer_neg_helper(skb, k, size); | |
644 | } | |
645 | ||
ea02f941 MS |
646 | static inline int bpf_tell_extensions(void) |
647 | { | |
37692299 | 648 | return SKF_AD_MAX; |
ea02f941 MS |
649 | } |
650 | ||
1da177e4 | 651 | #endif /* __LINUX_FILTER_H__ */ |