Commit | Line | Data |
---|---|---|
e54bcde3 ZSL |
1 | /* |
2 | * BPF JIT compiler for ARM64 | |
3 | * | |
4 | * Copyright (C) 2014 Zi Shen Lim <zlim.lnx@gmail.com> | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | */ | |
18 | ||
19 | #define pr_fmt(fmt) "bpf_jit: " fmt | |
20 | ||
21 | #include <linux/filter.h> | |
22 | #include <linux/moduleloader.h> | |
23 | #include <linux/printk.h> | |
24 | #include <linux/skbuff.h> | |
25 | #include <linux/slab.h> | |
26 | #include <asm/byteorder.h> | |
27 | #include <asm/cacheflush.h> | |
28 | ||
29 | #include "bpf_jit.h" | |
30 | ||
31 | int bpf_jit_enable __read_mostly; | |
32 | ||
33 | #define TMP_REG_1 (MAX_BPF_REG + 0) | |
34 | #define TMP_REG_2 (MAX_BPF_REG + 1) | |
35 | ||
36 | /* Map BPF registers to A64 registers */ | |
37 | static const int bpf2a64[] = { | |
38 | /* return value from in-kernel function, and exit value from eBPF */ | |
39 | [BPF_REG_0] = A64_R(7), | |
40 | /* arguments from eBPF program to in-kernel function */ | |
41 | [BPF_REG_1] = A64_R(0), | |
42 | [BPF_REG_2] = A64_R(1), | |
43 | [BPF_REG_3] = A64_R(2), | |
44 | [BPF_REG_4] = A64_R(3), | |
45 | [BPF_REG_5] = A64_R(4), | |
46 | /* callee saved registers that in-kernel function will preserve */ | |
47 | [BPF_REG_6] = A64_R(19), | |
48 | [BPF_REG_7] = A64_R(20), | |
49 | [BPF_REG_8] = A64_R(21), | |
50 | [BPF_REG_9] = A64_R(22), | |
51 | /* read-only frame pointer to access stack */ | |
52 | [BPF_REG_FP] = A64_FP, | |
53 | /* temporary register for internal BPF JIT */ | |
54 | [TMP_REG_1] = A64_R(23), | |
55 | [TMP_REG_2] = A64_R(24), | |
56 | }; | |
57 | ||
58 | struct jit_ctx { | |
59 | const struct bpf_prog *prog; | |
60 | int idx; | |
61 | int tmp_used; | |
62 | int body_offset; | |
63 | int *offset; | |
64 | u32 *image; | |
65 | }; | |
66 | ||
67 | static inline void emit(const u32 insn, struct jit_ctx *ctx) | |
68 | { | |
69 | if (ctx->image != NULL) | |
70 | ctx->image[ctx->idx] = cpu_to_le32(insn); | |
71 | ||
72 | ctx->idx++; | |
73 | } | |
74 | ||
75 | static inline void emit_a64_mov_i64(const int reg, const u64 val, | |
76 | struct jit_ctx *ctx) | |
77 | { | |
78 | u64 tmp = val; | |
79 | int shift = 0; | |
80 | ||
81 | emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); | |
82 | tmp >>= 16; | |
83 | shift += 16; | |
84 | while (tmp) { | |
85 | if (tmp & 0xffff) | |
86 | emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); | |
87 | tmp >>= 16; | |
88 | shift += 16; | |
89 | } | |
90 | } | |
91 | ||
92 | static inline void emit_a64_mov_i(const int is64, const int reg, | |
93 | const s32 val, struct jit_ctx *ctx) | |
94 | { | |
95 | u16 hi = val >> 16; | |
96 | u16 lo = val & 0xffff; | |
97 | ||
98 | if (hi & 0x8000) { | |
99 | if (hi == 0xffff) { | |
100 | emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); | |
101 | } else { | |
102 | emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); | |
103 | emit(A64_MOVK(is64, reg, lo, 0), ctx); | |
104 | } | |
105 | } else { | |
106 | emit(A64_MOVZ(is64, reg, lo, 0), ctx); | |
107 | if (hi) | |
108 | emit(A64_MOVK(is64, reg, hi, 16), ctx); | |
109 | } | |
110 | } | |
111 | ||
112 | static inline int bpf2a64_offset(int bpf_to, int bpf_from, | |
113 | const struct jit_ctx *ctx) | |
114 | { | |
115 | int to = ctx->offset[bpf_to + 1]; | |
116 | /* -1 to account for the Branch instruction */ | |
117 | int from = ctx->offset[bpf_from + 1] - 1; | |
118 | ||
119 | return to - from; | |
120 | } | |
121 | ||
122 | static inline int epilogue_offset(const struct jit_ctx *ctx) | |
123 | { | |
124 | int to = ctx->offset[ctx->prog->len - 1]; | |
125 | int from = ctx->idx - ctx->body_offset; | |
126 | ||
127 | return to - from; | |
128 | } | |
129 | ||
130 | /* Stack must be multiples of 16B */ | |
131 | #define STACK_ALIGN(sz) (((sz) + 15) & ~15) | |
132 | ||
133 | static void build_prologue(struct jit_ctx *ctx) | |
134 | { | |
135 | const u8 r6 = bpf2a64[BPF_REG_6]; | |
136 | const u8 r7 = bpf2a64[BPF_REG_7]; | |
137 | const u8 r8 = bpf2a64[BPF_REG_8]; | |
138 | const u8 r9 = bpf2a64[BPF_REG_9]; | |
139 | const u8 fp = bpf2a64[BPF_REG_FP]; | |
140 | const u8 ra = bpf2a64[BPF_REG_A]; | |
141 | const u8 rx = bpf2a64[BPF_REG_X]; | |
142 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | |
143 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | |
144 | int stack_size = MAX_BPF_STACK; | |
145 | ||
146 | stack_size += 4; /* extra for skb_copy_bits buffer */ | |
147 | stack_size = STACK_ALIGN(stack_size); | |
148 | ||
149 | /* Save callee-saved register */ | |
150 | emit(A64_PUSH(r6, r7, A64_SP), ctx); | |
151 | emit(A64_PUSH(r8, r9, A64_SP), ctx); | |
152 | if (ctx->tmp_used) | |
153 | emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); | |
154 | ||
155 | /* Set up BPF stack */ | |
156 | emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); | |
157 | ||
158 | /* Set up frame pointer */ | |
159 | emit(A64_MOV(1, fp, A64_SP), ctx); | |
160 | ||
161 | /* Clear registers A and X */ | |
162 | emit_a64_mov_i64(ra, 0, ctx); | |
163 | emit_a64_mov_i64(rx, 0, ctx); | |
164 | } | |
165 | ||
166 | static void build_epilogue(struct jit_ctx *ctx) | |
167 | { | |
168 | const u8 r0 = bpf2a64[BPF_REG_0]; | |
169 | const u8 r6 = bpf2a64[BPF_REG_6]; | |
170 | const u8 r7 = bpf2a64[BPF_REG_7]; | |
171 | const u8 r8 = bpf2a64[BPF_REG_8]; | |
172 | const u8 r9 = bpf2a64[BPF_REG_9]; | |
173 | const u8 fp = bpf2a64[BPF_REG_FP]; | |
174 | const u8 tmp1 = bpf2a64[TMP_REG_1]; | |
175 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | |
176 | int stack_size = MAX_BPF_STACK; | |
177 | ||
178 | stack_size += 4; /* extra for skb_copy_bits buffer */ | |
179 | stack_size = STACK_ALIGN(stack_size); | |
180 | ||
181 | /* We're done with BPF stack */ | |
182 | emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); | |
183 | ||
184 | /* Restore callee-saved register */ | |
185 | if (ctx->tmp_used) | |
186 | emit(A64_POP(tmp1, tmp2, A64_SP), ctx); | |
187 | emit(A64_POP(r8, r9, A64_SP), ctx); | |
188 | emit(A64_POP(r6, r7, A64_SP), ctx); | |
189 | ||
190 | /* Restore frame pointer */ | |
191 | emit(A64_MOV(1, fp, A64_SP), ctx); | |
192 | ||
193 | /* Set return value */ | |
194 | emit(A64_MOV(1, A64_R(0), r0), ctx); | |
195 | ||
196 | emit(A64_RET(A64_LR), ctx); | |
197 | } | |
198 | ||
199 | static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | |
200 | { | |
201 | const u8 code = insn->code; | |
202 | const u8 dst = bpf2a64[insn->dst_reg]; | |
203 | const u8 src = bpf2a64[insn->src_reg]; | |
204 | const u8 tmp = bpf2a64[TMP_REG_1]; | |
205 | const u8 tmp2 = bpf2a64[TMP_REG_2]; | |
206 | const s16 off = insn->off; | |
207 | const s32 imm = insn->imm; | |
208 | const int i = insn - ctx->prog->insnsi; | |
209 | const bool is64 = BPF_CLASS(code) == BPF_ALU64; | |
210 | u8 jmp_cond; | |
211 | s32 jmp_offset; | |
212 | ||
213 | switch (code) { | |
214 | /* dst = src */ | |
215 | case BPF_ALU | BPF_MOV | BPF_X: | |
216 | case BPF_ALU64 | BPF_MOV | BPF_X: | |
217 | emit(A64_MOV(is64, dst, src), ctx); | |
218 | break; | |
219 | /* dst = dst OP src */ | |
220 | case BPF_ALU | BPF_ADD | BPF_X: | |
221 | case BPF_ALU64 | BPF_ADD | BPF_X: | |
222 | emit(A64_ADD(is64, dst, dst, src), ctx); | |
223 | break; | |
224 | case BPF_ALU | BPF_SUB | BPF_X: | |
225 | case BPF_ALU64 | BPF_SUB | BPF_X: | |
226 | emit(A64_SUB(is64, dst, dst, src), ctx); | |
227 | break; | |
228 | case BPF_ALU | BPF_AND | BPF_X: | |
229 | case BPF_ALU64 | BPF_AND | BPF_X: | |
230 | emit(A64_AND(is64, dst, dst, src), ctx); | |
231 | break; | |
232 | case BPF_ALU | BPF_OR | BPF_X: | |
233 | case BPF_ALU64 | BPF_OR | BPF_X: | |
234 | emit(A64_ORR(is64, dst, dst, src), ctx); | |
235 | break; | |
236 | case BPF_ALU | BPF_XOR | BPF_X: | |
237 | case BPF_ALU64 | BPF_XOR | BPF_X: | |
238 | emit(A64_EOR(is64, dst, dst, src), ctx); | |
239 | break; | |
240 | case BPF_ALU | BPF_MUL | BPF_X: | |
241 | case BPF_ALU64 | BPF_MUL | BPF_X: | |
242 | emit(A64_MUL(is64, dst, dst, src), ctx); | |
243 | break; | |
244 | case BPF_ALU | BPF_DIV | BPF_X: | |
245 | case BPF_ALU64 | BPF_DIV | BPF_X: | |
246 | emit(A64_UDIV(is64, dst, dst, src), ctx); | |
247 | break; | |
248 | case BPF_ALU | BPF_MOD | BPF_X: | |
249 | case BPF_ALU64 | BPF_MOD | BPF_X: | |
250 | ctx->tmp_used = 1; | |
251 | emit(A64_UDIV(is64, tmp, dst, src), ctx); | |
252 | emit(A64_MUL(is64, tmp, tmp, src), ctx); | |
253 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | |
254 | break; | |
255 | /* dst = -dst */ | |
256 | case BPF_ALU | BPF_NEG: | |
257 | case BPF_ALU64 | BPF_NEG: | |
258 | emit(A64_NEG(is64, dst, dst), ctx); | |
259 | break; | |
260 | /* dst = BSWAP##imm(dst) */ | |
261 | case BPF_ALU | BPF_END | BPF_FROM_LE: | |
262 | case BPF_ALU | BPF_END | BPF_FROM_BE: | |
263 | #ifdef CONFIG_CPU_BIG_ENDIAN | |
264 | if (BPF_SRC(code) == BPF_FROM_BE) | |
265 | break; | |
266 | #else /* !CONFIG_CPU_BIG_ENDIAN */ | |
267 | if (BPF_SRC(code) == BPF_FROM_LE) | |
268 | break; | |
269 | #endif | |
270 | switch (imm) { | |
271 | case 16: | |
272 | emit(A64_REV16(is64, dst, dst), ctx); | |
273 | break; | |
274 | case 32: | |
275 | emit(A64_REV32(is64, dst, dst), ctx); | |
276 | break; | |
277 | case 64: | |
278 | emit(A64_REV64(dst, dst), ctx); | |
279 | break; | |
280 | } | |
281 | break; | |
282 | /* dst = imm */ | |
283 | case BPF_ALU | BPF_MOV | BPF_K: | |
284 | case BPF_ALU64 | BPF_MOV | BPF_K: | |
285 | emit_a64_mov_i(is64, dst, imm, ctx); | |
286 | break; | |
287 | /* dst = dst OP imm */ | |
288 | case BPF_ALU | BPF_ADD | BPF_K: | |
289 | case BPF_ALU64 | BPF_ADD | BPF_K: | |
290 | ctx->tmp_used = 1; | |
291 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
292 | emit(A64_ADD(is64, dst, dst, tmp), ctx); | |
293 | break; | |
294 | case BPF_ALU | BPF_SUB | BPF_K: | |
295 | case BPF_ALU64 | BPF_SUB | BPF_K: | |
296 | ctx->tmp_used = 1; | |
297 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
298 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | |
299 | break; | |
300 | case BPF_ALU | BPF_AND | BPF_K: | |
301 | case BPF_ALU64 | BPF_AND | BPF_K: | |
302 | ctx->tmp_used = 1; | |
303 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
304 | emit(A64_AND(is64, dst, dst, tmp), ctx); | |
305 | break; | |
306 | case BPF_ALU | BPF_OR | BPF_K: | |
307 | case BPF_ALU64 | BPF_OR | BPF_K: | |
308 | ctx->tmp_used = 1; | |
309 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
310 | emit(A64_ORR(is64, dst, dst, tmp), ctx); | |
311 | break; | |
312 | case BPF_ALU | BPF_XOR | BPF_K: | |
313 | case BPF_ALU64 | BPF_XOR | BPF_K: | |
314 | ctx->tmp_used = 1; | |
315 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
316 | emit(A64_EOR(is64, dst, dst, tmp), ctx); | |
317 | break; | |
318 | case BPF_ALU | BPF_MUL | BPF_K: | |
319 | case BPF_ALU64 | BPF_MUL | BPF_K: | |
320 | ctx->tmp_used = 1; | |
321 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
322 | emit(A64_MUL(is64, dst, dst, tmp), ctx); | |
323 | break; | |
324 | case BPF_ALU | BPF_DIV | BPF_K: | |
325 | case BPF_ALU64 | BPF_DIV | BPF_K: | |
326 | ctx->tmp_used = 1; | |
327 | emit_a64_mov_i(is64, tmp, imm, ctx); | |
328 | emit(A64_UDIV(is64, dst, dst, tmp), ctx); | |
329 | break; | |
330 | case BPF_ALU | BPF_MOD | BPF_K: | |
331 | case BPF_ALU64 | BPF_MOD | BPF_K: | |
332 | ctx->tmp_used = 1; | |
333 | emit_a64_mov_i(is64, tmp2, imm, ctx); | |
334 | emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); | |
335 | emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); | |
336 | emit(A64_SUB(is64, dst, dst, tmp), ctx); | |
337 | break; | |
338 | case BPF_ALU | BPF_LSH | BPF_K: | |
339 | case BPF_ALU64 | BPF_LSH | BPF_K: | |
340 | emit(A64_LSL(is64, dst, dst, imm), ctx); | |
341 | break; | |
342 | case BPF_ALU | BPF_RSH | BPF_K: | |
343 | case BPF_ALU64 | BPF_RSH | BPF_K: | |
344 | emit(A64_LSR(is64, dst, dst, imm), ctx); | |
345 | break; | |
346 | case BPF_ALU | BPF_ARSH | BPF_K: | |
347 | case BPF_ALU64 | BPF_ARSH | BPF_K: | |
348 | emit(A64_ASR(is64, dst, dst, imm), ctx); | |
349 | break; | |
350 | ||
351 | #define check_imm(bits, imm) do { \ | |
352 | if ((((imm) > 0) && ((imm) >> (bits))) || \ | |
353 | (((imm) < 0) && (~(imm) >> (bits)))) { \ | |
354 | pr_info("[%2d] imm=%d(0x%x) out of range\n", \ | |
355 | i, imm, imm); \ | |
356 | return -EINVAL; \ | |
357 | } \ | |
358 | } while (0) | |
359 | #define check_imm19(imm) check_imm(19, imm) | |
360 | #define check_imm26(imm) check_imm(26, imm) | |
361 | ||
362 | /* JUMP off */ | |
363 | case BPF_JMP | BPF_JA: | |
364 | jmp_offset = bpf2a64_offset(i + off, i, ctx); | |
365 | check_imm26(jmp_offset); | |
366 | emit(A64_B(jmp_offset), ctx); | |
367 | break; | |
368 | /* IF (dst COND src) JUMP off */ | |
369 | case BPF_JMP | BPF_JEQ | BPF_X: | |
370 | case BPF_JMP | BPF_JGT | BPF_X: | |
371 | case BPF_JMP | BPF_JGE | BPF_X: | |
372 | case BPF_JMP | BPF_JNE | BPF_X: | |
373 | case BPF_JMP | BPF_JSGT | BPF_X: | |
374 | case BPF_JMP | BPF_JSGE | BPF_X: | |
375 | emit(A64_CMP(1, dst, src), ctx); | |
376 | emit_cond_jmp: | |
377 | jmp_offset = bpf2a64_offset(i + off, i, ctx); | |
378 | check_imm19(jmp_offset); | |
379 | switch (BPF_OP(code)) { | |
380 | case BPF_JEQ: | |
381 | jmp_cond = A64_COND_EQ; | |
382 | break; | |
383 | case BPF_JGT: | |
384 | jmp_cond = A64_COND_HI; | |
385 | break; | |
386 | case BPF_JGE: | |
387 | jmp_cond = A64_COND_CS; | |
388 | break; | |
389 | case BPF_JNE: | |
390 | jmp_cond = A64_COND_NE; | |
391 | break; | |
392 | case BPF_JSGT: | |
393 | jmp_cond = A64_COND_GT; | |
394 | break; | |
395 | case BPF_JSGE: | |
396 | jmp_cond = A64_COND_GE; | |
397 | break; | |
398 | default: | |
399 | return -EFAULT; | |
400 | } | |
401 | emit(A64_B_(jmp_cond, jmp_offset), ctx); | |
402 | break; | |
403 | case BPF_JMP | BPF_JSET | BPF_X: | |
404 | emit(A64_TST(1, dst, src), ctx); | |
405 | goto emit_cond_jmp; | |
406 | /* IF (dst COND imm) JUMP off */ | |
407 | case BPF_JMP | BPF_JEQ | BPF_K: | |
408 | case BPF_JMP | BPF_JGT | BPF_K: | |
409 | case BPF_JMP | BPF_JGE | BPF_K: | |
410 | case BPF_JMP | BPF_JNE | BPF_K: | |
411 | case BPF_JMP | BPF_JSGT | BPF_K: | |
412 | case BPF_JMP | BPF_JSGE | BPF_K: | |
413 | ctx->tmp_used = 1; | |
414 | emit_a64_mov_i(1, tmp, imm, ctx); | |
415 | emit(A64_CMP(1, dst, tmp), ctx); | |
416 | goto emit_cond_jmp; | |
417 | case BPF_JMP | BPF_JSET | BPF_K: | |
418 | ctx->tmp_used = 1; | |
419 | emit_a64_mov_i(1, tmp, imm, ctx); | |
420 | emit(A64_TST(1, dst, tmp), ctx); | |
421 | goto emit_cond_jmp; | |
422 | /* function call */ | |
423 | case BPF_JMP | BPF_CALL: | |
424 | { | |
425 | const u8 r0 = bpf2a64[BPF_REG_0]; | |
426 | const u64 func = (u64)__bpf_call_base + imm; | |
427 | ||
428 | ctx->tmp_used = 1; | |
429 | emit_a64_mov_i64(tmp, func, ctx); | |
430 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | |
431 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | |
432 | emit(A64_BLR(tmp), ctx); | |
433 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | |
434 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); | |
435 | break; | |
436 | } | |
437 | /* function return */ | |
438 | case BPF_JMP | BPF_EXIT: | |
439 | if (i == ctx->prog->len - 1) | |
440 | break; | |
441 | jmp_offset = epilogue_offset(ctx); | |
442 | check_imm26(jmp_offset); | |
443 | emit(A64_B(jmp_offset), ctx); | |
444 | break; | |
445 | ||
446 | /* LDX: dst = *(size *)(src + off) */ | |
447 | case BPF_LDX | BPF_MEM | BPF_W: | |
448 | case BPF_LDX | BPF_MEM | BPF_H: | |
449 | case BPF_LDX | BPF_MEM | BPF_B: | |
450 | case BPF_LDX | BPF_MEM | BPF_DW: | |
451 | ctx->tmp_used = 1; | |
452 | emit_a64_mov_i(1, tmp, off, ctx); | |
453 | switch (BPF_SIZE(code)) { | |
454 | case BPF_W: | |
455 | emit(A64_LDR32(dst, src, tmp), ctx); | |
456 | break; | |
457 | case BPF_H: | |
458 | emit(A64_LDRH(dst, src, tmp), ctx); | |
459 | break; | |
460 | case BPF_B: | |
461 | emit(A64_LDRB(dst, src, tmp), ctx); | |
462 | break; | |
463 | case BPF_DW: | |
464 | emit(A64_LDR64(dst, src, tmp), ctx); | |
465 | break; | |
466 | } | |
467 | break; | |
468 | ||
469 | /* ST: *(size *)(dst + off) = imm */ | |
470 | case BPF_ST | BPF_MEM | BPF_W: | |
471 | case BPF_ST | BPF_MEM | BPF_H: | |
472 | case BPF_ST | BPF_MEM | BPF_B: | |
473 | case BPF_ST | BPF_MEM | BPF_DW: | |
474 | goto notyet; | |
475 | ||
476 | /* STX: *(size *)(dst + off) = src */ | |
477 | case BPF_STX | BPF_MEM | BPF_W: | |
478 | case BPF_STX | BPF_MEM | BPF_H: | |
479 | case BPF_STX | BPF_MEM | BPF_B: | |
480 | case BPF_STX | BPF_MEM | BPF_DW: | |
481 | ctx->tmp_used = 1; | |
482 | emit_a64_mov_i(1, tmp, off, ctx); | |
483 | switch (BPF_SIZE(code)) { | |
484 | case BPF_W: | |
485 | emit(A64_STR32(src, dst, tmp), ctx); | |
486 | break; | |
487 | case BPF_H: | |
488 | emit(A64_STRH(src, dst, tmp), ctx); | |
489 | break; | |
490 | case BPF_B: | |
491 | emit(A64_STRB(src, dst, tmp), ctx); | |
492 | break; | |
493 | case BPF_DW: | |
494 | emit(A64_STR64(src, dst, tmp), ctx); | |
495 | break; | |
496 | } | |
497 | break; | |
498 | /* STX XADD: lock *(u32 *)(dst + off) += src */ | |
499 | case BPF_STX | BPF_XADD | BPF_W: | |
500 | /* STX XADD: lock *(u64 *)(dst + off) += src */ | |
501 | case BPF_STX | BPF_XADD | BPF_DW: | |
502 | goto notyet; | |
503 | ||
504 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ | |
505 | case BPF_LD | BPF_ABS | BPF_W: | |
506 | case BPF_LD | BPF_ABS | BPF_H: | |
507 | case BPF_LD | BPF_ABS | BPF_B: | |
508 | /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */ | |
509 | case BPF_LD | BPF_IND | BPF_W: | |
510 | case BPF_LD | BPF_IND | BPF_H: | |
511 | case BPF_LD | BPF_IND | BPF_B: | |
512 | { | |
513 | const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */ | |
514 | const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */ | |
515 | const u8 fp = bpf2a64[BPF_REG_FP]; | |
516 | const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */ | |
517 | const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */ | |
518 | const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */ | |
519 | const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */ | |
520 | const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */ | |
521 | int size; | |
522 | ||
523 | emit(A64_MOV(1, r1, r6), ctx); | |
524 | emit_a64_mov_i(0, r2, imm, ctx); | |
525 | if (BPF_MODE(code) == BPF_IND) | |
526 | emit(A64_ADD(0, r2, r2, src), ctx); | |
527 | switch (BPF_SIZE(code)) { | |
528 | case BPF_W: | |
529 | size = 4; | |
530 | break; | |
531 | case BPF_H: | |
532 | size = 2; | |
533 | break; | |
534 | case BPF_B: | |
535 | size = 1; | |
536 | break; | |
537 | default: | |
538 | return -EINVAL; | |
539 | } | |
540 | emit_a64_mov_i64(r3, size, ctx); | |
541 | emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); | |
542 | emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); | |
543 | emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); | |
544 | emit(A64_MOV(1, A64_FP, A64_SP), ctx); | |
545 | emit(A64_BLR(r5), ctx); | |
546 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | |
547 | emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); | |
548 | ||
549 | jmp_offset = epilogue_offset(ctx); | |
550 | check_imm19(jmp_offset); | |
551 | emit(A64_CBZ(1, r0, jmp_offset), ctx); | |
552 | emit(A64_MOV(1, r5, r0), ctx); | |
553 | switch (BPF_SIZE(code)) { | |
554 | case BPF_W: | |
555 | emit(A64_LDR32(r0, r5, A64_ZR), ctx); | |
556 | #ifndef CONFIG_CPU_BIG_ENDIAN | |
557 | emit(A64_REV32(0, r0, r0), ctx); | |
558 | #endif | |
559 | break; | |
560 | case BPF_H: | |
561 | emit(A64_LDRH(r0, r5, A64_ZR), ctx); | |
562 | #ifndef CONFIG_CPU_BIG_ENDIAN | |
563 | emit(A64_REV16(0, r0, r0), ctx); | |
564 | #endif | |
565 | break; | |
566 | case BPF_B: | |
567 | emit(A64_LDRB(r0, r5, A64_ZR), ctx); | |
568 | break; | |
569 | } | |
570 | break; | |
571 | } | |
572 | notyet: | |
573 | pr_info_once("*** NOT YET: opcode %02x ***\n", code); | |
574 | return -EFAULT; | |
575 | ||
576 | default: | |
577 | pr_err_once("unknown opcode %02x\n", code); | |
578 | return -EINVAL; | |
579 | } | |
580 | ||
581 | return 0; | |
582 | } | |
583 | ||
584 | static int build_body(struct jit_ctx *ctx) | |
585 | { | |
586 | const struct bpf_prog *prog = ctx->prog; | |
587 | int i; | |
588 | ||
589 | for (i = 0; i < prog->len; i++) { | |
590 | const struct bpf_insn *insn = &prog->insnsi[i]; | |
591 | int ret; | |
592 | ||
593 | if (ctx->image == NULL) | |
594 | ctx->offset[i] = ctx->idx; | |
595 | ||
596 | ret = build_insn(insn, ctx); | |
597 | if (ret) | |
598 | return ret; | |
599 | } | |
600 | ||
601 | return 0; | |
602 | } | |
603 | ||
604 | static inline void bpf_flush_icache(void *start, void *end) | |
605 | { | |
606 | flush_icache_range((unsigned long)start, (unsigned long)end); | |
607 | } | |
608 | ||
609 | void bpf_jit_compile(struct bpf_prog *prog) | |
610 | { | |
611 | /* Nothing to do here. We support Internal BPF. */ | |
612 | } | |
613 | ||
614 | void bpf_int_jit_compile(struct bpf_prog *prog) | |
615 | { | |
616 | struct jit_ctx ctx; | |
617 | int image_size; | |
618 | ||
619 | if (!bpf_jit_enable) | |
620 | return; | |
621 | ||
622 | if (!prog || !prog->len) | |
623 | return; | |
624 | ||
625 | memset(&ctx, 0, sizeof(ctx)); | |
626 | ctx.prog = prog; | |
627 | ||
628 | ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); | |
629 | if (ctx.offset == NULL) | |
630 | return; | |
631 | ||
632 | /* 1. Initial fake pass to compute ctx->idx. */ | |
633 | ||
634 | /* Fake pass to fill in ctx->offset. */ | |
635 | if (build_body(&ctx)) | |
636 | goto out; | |
637 | ||
638 | build_prologue(&ctx); | |
639 | ||
640 | build_epilogue(&ctx); | |
641 | ||
642 | /* Now we know the actual image size. */ | |
643 | image_size = sizeof(u32) * ctx.idx; | |
644 | ctx.image = module_alloc(image_size); | |
645 | if (unlikely(ctx.image == NULL)) | |
646 | goto out; | |
647 | ||
648 | /* 2. Now, the actual pass. */ | |
649 | ||
650 | ctx.idx = 0; | |
651 | build_prologue(&ctx); | |
652 | ||
653 | ctx.body_offset = ctx.idx; | |
60ef0494 DB |
654 | if (build_body(&ctx)) { |
655 | module_free(NULL, ctx.image); | |
e54bcde3 | 656 | goto out; |
60ef0494 | 657 | } |
e54bcde3 ZSL |
658 | |
659 | build_epilogue(&ctx); | |
660 | ||
661 | /* And we're done. */ | |
662 | if (bpf_jit_enable > 1) | |
663 | bpf_jit_dump(prog->len, image_size, 2, ctx.image); | |
664 | ||
665 | bpf_flush_icache(ctx.image, ctx.image + ctx.idx); | |
666 | prog->bpf_func = (void *)ctx.image; | |
667 | prog->jited = 1; | |
668 | ||
669 | out: | |
670 | kfree(ctx.offset); | |
671 | } | |
672 | ||
673 | void bpf_jit_free(struct bpf_prog *prog) | |
674 | { | |
675 | if (prog->jited) | |
676 | module_free(NULL, prog->bpf_func); | |
677 | ||
678 | kfree(prog); | |
679 | } |