Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-block.git] / arch / arm64 / net / bpf_jit_comp.c
CommitLineData
e54bcde3
ZSL
1/*
2 * BPF JIT compiler for ARM64
3 *
42ff712b 4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
e54bcde3
ZSL
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#define pr_fmt(fmt) "bpf_jit: " fmt
20
21#include <linux/filter.h>
e54bcde3
ZSL
22#include <linux/printk.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
b569c1c6 25
e54bcde3
ZSL
26#include <asm/byteorder.h>
27#include <asm/cacheflush.h>
b569c1c6 28#include <asm/debug-monitors.h>
e54bcde3
ZSL
29
30#include "bpf_jit.h"
31
32int bpf_jit_enable __read_mostly;
33
34#define TMP_REG_1 (MAX_BPF_REG + 0)
35#define TMP_REG_2 (MAX_BPF_REG + 1)
36
37/* Map BPF registers to A64 registers */
38static const int bpf2a64[] = {
39 /* return value from in-kernel function, and exit value from eBPF */
40 [BPF_REG_0] = A64_R(7),
41 /* arguments from eBPF program to in-kernel function */
42 [BPF_REG_1] = A64_R(0),
43 [BPF_REG_2] = A64_R(1),
44 [BPF_REG_3] = A64_R(2),
45 [BPF_REG_4] = A64_R(3),
46 [BPF_REG_5] = A64_R(4),
47 /* callee saved registers that in-kernel function will preserve */
48 [BPF_REG_6] = A64_R(19),
49 [BPF_REG_7] = A64_R(20),
50 [BPF_REG_8] = A64_R(21),
51 [BPF_REG_9] = A64_R(22),
52 /* read-only frame pointer to access stack */
ec0738db 53 [BPF_REG_FP] = A64_R(25),
e54bcde3
ZSL
54 /* temporary register for internal BPF JIT */
55 [TMP_REG_1] = A64_R(23),
56 [TMP_REG_2] = A64_R(24),
57};
58
59struct jit_ctx {
60 const struct bpf_prog *prog;
61 int idx;
62 int tmp_used;
51c9fbb1 63 int epilogue_offset;
e54bcde3
ZSL
64 int *offset;
65 u32 *image;
66};
67
68static inline void emit(const u32 insn, struct jit_ctx *ctx)
69{
70 if (ctx->image != NULL)
71 ctx->image[ctx->idx] = cpu_to_le32(insn);
72
73 ctx->idx++;
74}
75
76static inline void emit_a64_mov_i64(const int reg, const u64 val,
77 struct jit_ctx *ctx)
78{
79 u64 tmp = val;
80 int shift = 0;
81
82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx);
83 tmp >>= 16;
84 shift += 16;
85 while (tmp) {
86 if (tmp & 0xffff)
87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
88 tmp >>= 16;
89 shift += 16;
90 }
91}
92
93static inline void emit_a64_mov_i(const int is64, const int reg,
94 const s32 val, struct jit_ctx *ctx)
95{
96 u16 hi = val >> 16;
97 u16 lo = val & 0xffff;
98
99 if (hi & 0x8000) {
100 if (hi == 0xffff) {
101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
102 } else {
103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
104 emit(A64_MOVK(is64, reg, lo, 0), ctx);
105 }
106 } else {
107 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
108 if (hi)
109 emit(A64_MOVK(is64, reg, hi, 16), ctx);
110 }
111}
112
113static inline int bpf2a64_offset(int bpf_to, int bpf_from,
114 const struct jit_ctx *ctx)
115{
8eee539d 116 int to = ctx->offset[bpf_to];
e54bcde3 117 /* -1 to account for the Branch instruction */
8eee539d 118 int from = ctx->offset[bpf_from] - 1;
e54bcde3
ZSL
119
120 return to - from;
121}
122
b569c1c6
DB
123static void jit_fill_hole(void *area, unsigned int size)
124{
125 u32 *ptr;
126 /* We are guaranteed to have aligned memory. */
127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
129}
130
e54bcde3
ZSL
131static inline int epilogue_offset(const struct jit_ctx *ctx)
132{
51c9fbb1
ZSL
133 int to = ctx->epilogue_offset;
134 int from = ctx->idx;
e54bcde3
ZSL
135
136 return to - from;
137}
138
139/* Stack must be multiples of 16B */
140#define STACK_ALIGN(sz) (((sz) + 15) & ~15)
141
f4b16fce
ZSL
142#define _STACK_SIZE \
143 (MAX_BPF_STACK \
144 + 4 /* extra for skb_copy_bits buffer */)
145
146#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
147
e54bcde3
ZSL
148static void build_prologue(struct jit_ctx *ctx)
149{
150 const u8 r6 = bpf2a64[BPF_REG_6];
151 const u8 r7 = bpf2a64[BPF_REG_7];
152 const u8 r8 = bpf2a64[BPF_REG_8];
153 const u8 r9 = bpf2a64[BPF_REG_9];
154 const u8 fp = bpf2a64[BPF_REG_FP];
e54bcde3
ZSL
155 const u8 tmp1 = bpf2a64[TMP_REG_1];
156 const u8 tmp2 = bpf2a64[TMP_REG_2];
e54bcde3 157
ec0738db
YS
158 /*
159 * BPF prog stack layout
160 *
161 * high
162 * original A64_SP => 0:+-----+ BPF prologue
163 * |FP/LR|
164 * current A64_FP => -16:+-----+
165 * | ... | callee saved registers
166 * +-----+
167 * | | x25/x26
f4b16fce 168 * BPF fp register => -80:+-----+ <= (BPF_FP)
ec0738db
YS
169 * | |
170 * | ... | BPF prog stack
171 * | |
f4b16fce
ZSL
172 * +-----+ <= (BPF_FP - MAX_BPF_STACK)
173 * |RSVD | JIT scratchpad
174 * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
ec0738db
YS
175 * | |
176 * | ... | Function call stack
177 * | |
178 * +-----+
179 * low
180 *
181 */
182
183 /* Save FP and LR registers to stay align with ARM64 AAPCS */
184 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
185 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
186
e54bcde3
ZSL
187 /* Save callee-saved register */
188 emit(A64_PUSH(r6, r7, A64_SP), ctx);
189 emit(A64_PUSH(r8, r9, A64_SP), ctx);
190 if (ctx->tmp_used)
191 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
192
ec0738db
YS
193 /* Save fp (x25) and x26. SP requires 16 bytes alignment */
194 emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
e54bcde3 195
ec0738db 196 /* Set up BPF prog stack base register (x25) */
e54bcde3
ZSL
197 emit(A64_MOV(1, fp, A64_SP), ctx);
198
ec0738db 199 /* Set up function call stack */
f4b16fce 200 emit(A64_SUB_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
e54bcde3
ZSL
201}
202
203static void build_epilogue(struct jit_ctx *ctx)
204{
205 const u8 r0 = bpf2a64[BPF_REG_0];
206 const u8 r6 = bpf2a64[BPF_REG_6];
207 const u8 r7 = bpf2a64[BPF_REG_7];
208 const u8 r8 = bpf2a64[BPF_REG_8];
209 const u8 r9 = bpf2a64[BPF_REG_9];
210 const u8 fp = bpf2a64[BPF_REG_FP];
211 const u8 tmp1 = bpf2a64[TMP_REG_1];
212 const u8 tmp2 = bpf2a64[TMP_REG_2];
e54bcde3
ZSL
213
214 /* We're done with BPF stack */
f4b16fce 215 emit(A64_ADD_I(1, A64_SP, A64_SP, STACK_SIZE), ctx);
e54bcde3 216
ec0738db
YS
217 /* Restore fs (x25) and x26 */
218 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
219
e54bcde3
ZSL
220 /* Restore callee-saved register */
221 if (ctx->tmp_used)
222 emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
223 emit(A64_POP(r8, r9, A64_SP), ctx);
224 emit(A64_POP(r6, r7, A64_SP), ctx);
225
ec0738db
YS
226 /* Restore FP/LR registers */
227 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
e54bcde3
ZSL
228
229 /* Set return value */
230 emit(A64_MOV(1, A64_R(0), r0), ctx);
231
232 emit(A64_RET(A64_LR), ctx);
233}
234
30d3d94c
ZSL
235/* JITs an eBPF instruction.
236 * Returns:
237 * 0 - successfully JITed an 8-byte eBPF instruction.
238 * >0 - successfully JITed a 16-byte eBPF instruction.
239 * <0 - failed to JIT.
240 */
e54bcde3
ZSL
241static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
242{
243 const u8 code = insn->code;
244 const u8 dst = bpf2a64[insn->dst_reg];
245 const u8 src = bpf2a64[insn->src_reg];
246 const u8 tmp = bpf2a64[TMP_REG_1];
247 const u8 tmp2 = bpf2a64[TMP_REG_2];
248 const s16 off = insn->off;
249 const s32 imm = insn->imm;
250 const int i = insn - ctx->prog->insnsi;
251 const bool is64 = BPF_CLASS(code) == BPF_ALU64;
252 u8 jmp_cond;
253 s32 jmp_offset;
254
251599e1
ZSL
255#define check_imm(bits, imm) do { \
256 if ((((imm) > 0) && ((imm) >> (bits))) || \
257 (((imm) < 0) && (~(imm) >> (bits)))) { \
258 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
259 i, imm, imm); \
260 return -EINVAL; \
261 } \
262} while (0)
263#define check_imm19(imm) check_imm(19, imm)
264#define check_imm26(imm) check_imm(26, imm)
265
e54bcde3
ZSL
266 switch (code) {
267 /* dst = src */
268 case BPF_ALU | BPF_MOV | BPF_X:
269 case BPF_ALU64 | BPF_MOV | BPF_X:
270 emit(A64_MOV(is64, dst, src), ctx);
271 break;
272 /* dst = dst OP src */
273 case BPF_ALU | BPF_ADD | BPF_X:
274 case BPF_ALU64 | BPF_ADD | BPF_X:
275 emit(A64_ADD(is64, dst, dst, src), ctx);
276 break;
277 case BPF_ALU | BPF_SUB | BPF_X:
278 case BPF_ALU64 | BPF_SUB | BPF_X:
279 emit(A64_SUB(is64, dst, dst, src), ctx);
280 break;
281 case BPF_ALU | BPF_AND | BPF_X:
282 case BPF_ALU64 | BPF_AND | BPF_X:
283 emit(A64_AND(is64, dst, dst, src), ctx);
284 break;
285 case BPF_ALU | BPF_OR | BPF_X:
286 case BPF_ALU64 | BPF_OR | BPF_X:
287 emit(A64_ORR(is64, dst, dst, src), ctx);
288 break;
289 case BPF_ALU | BPF_XOR | BPF_X:
290 case BPF_ALU64 | BPF_XOR | BPF_X:
291 emit(A64_EOR(is64, dst, dst, src), ctx);
292 break;
293 case BPF_ALU | BPF_MUL | BPF_X:
294 case BPF_ALU64 | BPF_MUL | BPF_X:
295 emit(A64_MUL(is64, dst, dst, src), ctx);
296 break;
297 case BPF_ALU | BPF_DIV | BPF_X:
298 case BPF_ALU64 | BPF_DIV | BPF_X:
e54bcde3
ZSL
299 case BPF_ALU | BPF_MOD | BPF_X:
300 case BPF_ALU64 | BPF_MOD | BPF_X:
251599e1
ZSL
301 {
302 const u8 r0 = bpf2a64[BPF_REG_0];
303
304 /* if (src == 0) return 0 */
305 jmp_offset = 3; /* skip ahead to else path */
306 check_imm19(jmp_offset);
307 emit(A64_CBNZ(is64, src, jmp_offset), ctx);
308 emit(A64_MOVZ(1, r0, 0, 0), ctx);
309 jmp_offset = epilogue_offset(ctx);
310 check_imm26(jmp_offset);
311 emit(A64_B(jmp_offset), ctx);
312 /* else */
14e589ff
ZSL
313 switch (BPF_OP(code)) {
314 case BPF_DIV:
315 emit(A64_UDIV(is64, dst, dst, src), ctx);
316 break;
317 case BPF_MOD:
318 ctx->tmp_used = 1;
319 emit(A64_UDIV(is64, tmp, dst, src), ctx);
320 emit(A64_MUL(is64, tmp, tmp, src), ctx);
321 emit(A64_SUB(is64, dst, dst, tmp), ctx);
322 break;
323 }
e54bcde3 324 break;
251599e1 325 }
d65a634a
ZSL
326 case BPF_ALU | BPF_LSH | BPF_X:
327 case BPF_ALU64 | BPF_LSH | BPF_X:
328 emit(A64_LSLV(is64, dst, dst, src), ctx);
329 break;
330 case BPF_ALU | BPF_RSH | BPF_X:
331 case BPF_ALU64 | BPF_RSH | BPF_X:
332 emit(A64_LSRV(is64, dst, dst, src), ctx);
333 break;
334 case BPF_ALU | BPF_ARSH | BPF_X:
335 case BPF_ALU64 | BPF_ARSH | BPF_X:
336 emit(A64_ASRV(is64, dst, dst, src), ctx);
337 break;
e54bcde3
ZSL
338 /* dst = -dst */
339 case BPF_ALU | BPF_NEG:
340 case BPF_ALU64 | BPF_NEG:
341 emit(A64_NEG(is64, dst, dst), ctx);
342 break;
343 /* dst = BSWAP##imm(dst) */
344 case BPF_ALU | BPF_END | BPF_FROM_LE:
345 case BPF_ALU | BPF_END | BPF_FROM_BE:
346#ifdef CONFIG_CPU_BIG_ENDIAN
347 if (BPF_SRC(code) == BPF_FROM_BE)
d63903bb 348 goto emit_bswap_uxt;
e54bcde3
ZSL
349#else /* !CONFIG_CPU_BIG_ENDIAN */
350 if (BPF_SRC(code) == BPF_FROM_LE)
d63903bb 351 goto emit_bswap_uxt;
e54bcde3
ZSL
352#endif
353 switch (imm) {
354 case 16:
355 emit(A64_REV16(is64, dst, dst), ctx);
d63903bb
XW
356 /* zero-extend 16 bits into 64 bits */
357 emit(A64_UXTH(is64, dst, dst), ctx);
e54bcde3
ZSL
358 break;
359 case 32:
360 emit(A64_REV32(is64, dst, dst), ctx);
d63903bb 361 /* upper 32 bits already cleared */
e54bcde3
ZSL
362 break;
363 case 64:
364 emit(A64_REV64(dst, dst), ctx);
365 break;
366 }
367 break;
d63903bb
XW
368emit_bswap_uxt:
369 switch (imm) {
370 case 16:
371 /* zero-extend 16 bits into 64 bits */
372 emit(A64_UXTH(is64, dst, dst), ctx);
373 break;
374 case 32:
375 /* zero-extend 32 bits into 64 bits */
376 emit(A64_UXTW(is64, dst, dst), ctx);
377 break;
378 case 64:
379 /* nop */
380 break;
381 }
382 break;
e54bcde3
ZSL
383 /* dst = imm */
384 case BPF_ALU | BPF_MOV | BPF_K:
385 case BPF_ALU64 | BPF_MOV | BPF_K:
386 emit_a64_mov_i(is64, dst, imm, ctx);
387 break;
388 /* dst = dst OP imm */
389 case BPF_ALU | BPF_ADD | BPF_K:
390 case BPF_ALU64 | BPF_ADD | BPF_K:
391 ctx->tmp_used = 1;
392 emit_a64_mov_i(is64, tmp, imm, ctx);
393 emit(A64_ADD(is64, dst, dst, tmp), ctx);
394 break;
395 case BPF_ALU | BPF_SUB | BPF_K:
396 case BPF_ALU64 | BPF_SUB | BPF_K:
397 ctx->tmp_used = 1;
398 emit_a64_mov_i(is64, tmp, imm, ctx);
399 emit(A64_SUB(is64, dst, dst, tmp), ctx);
400 break;
401 case BPF_ALU | BPF_AND | BPF_K:
402 case BPF_ALU64 | BPF_AND | BPF_K:
403 ctx->tmp_used = 1;
404 emit_a64_mov_i(is64, tmp, imm, ctx);
405 emit(A64_AND(is64, dst, dst, tmp), ctx);
406 break;
407 case BPF_ALU | BPF_OR | BPF_K:
408 case BPF_ALU64 | BPF_OR | BPF_K:
409 ctx->tmp_used = 1;
410 emit_a64_mov_i(is64, tmp, imm, ctx);
411 emit(A64_ORR(is64, dst, dst, tmp), ctx);
412 break;
413 case BPF_ALU | BPF_XOR | BPF_K:
414 case BPF_ALU64 | BPF_XOR | BPF_K:
415 ctx->tmp_used = 1;
416 emit_a64_mov_i(is64, tmp, imm, ctx);
417 emit(A64_EOR(is64, dst, dst, tmp), ctx);
418 break;
419 case BPF_ALU | BPF_MUL | BPF_K:
420 case BPF_ALU64 | BPF_MUL | BPF_K:
421 ctx->tmp_used = 1;
422 emit_a64_mov_i(is64, tmp, imm, ctx);
423 emit(A64_MUL(is64, dst, dst, tmp), ctx);
424 break;
425 case BPF_ALU | BPF_DIV | BPF_K:
426 case BPF_ALU64 | BPF_DIV | BPF_K:
427 ctx->tmp_used = 1;
428 emit_a64_mov_i(is64, tmp, imm, ctx);
429 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
430 break;
431 case BPF_ALU | BPF_MOD | BPF_K:
432 case BPF_ALU64 | BPF_MOD | BPF_K:
433 ctx->tmp_used = 1;
434 emit_a64_mov_i(is64, tmp2, imm, ctx);
435 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
436 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx);
437 emit(A64_SUB(is64, dst, dst, tmp), ctx);
438 break;
439 case BPF_ALU | BPF_LSH | BPF_K:
440 case BPF_ALU64 | BPF_LSH | BPF_K:
441 emit(A64_LSL(is64, dst, dst, imm), ctx);
442 break;
443 case BPF_ALU | BPF_RSH | BPF_K:
444 case BPF_ALU64 | BPF_RSH | BPF_K:
445 emit(A64_LSR(is64, dst, dst, imm), ctx);
446 break;
447 case BPF_ALU | BPF_ARSH | BPF_K:
448 case BPF_ALU64 | BPF_ARSH | BPF_K:
449 emit(A64_ASR(is64, dst, dst, imm), ctx);
450 break;
451
e54bcde3
ZSL
452 /* JUMP off */
453 case BPF_JMP | BPF_JA:
454 jmp_offset = bpf2a64_offset(i + off, i, ctx);
455 check_imm26(jmp_offset);
456 emit(A64_B(jmp_offset), ctx);
457 break;
458 /* IF (dst COND src) JUMP off */
459 case BPF_JMP | BPF_JEQ | BPF_X:
460 case BPF_JMP | BPF_JGT | BPF_X:
461 case BPF_JMP | BPF_JGE | BPF_X:
462 case BPF_JMP | BPF_JNE | BPF_X:
463 case BPF_JMP | BPF_JSGT | BPF_X:
464 case BPF_JMP | BPF_JSGE | BPF_X:
465 emit(A64_CMP(1, dst, src), ctx);
466emit_cond_jmp:
467 jmp_offset = bpf2a64_offset(i + off, i, ctx);
468 check_imm19(jmp_offset);
469 switch (BPF_OP(code)) {
470 case BPF_JEQ:
471 jmp_cond = A64_COND_EQ;
472 break;
473 case BPF_JGT:
474 jmp_cond = A64_COND_HI;
475 break;
476 case BPF_JGE:
477 jmp_cond = A64_COND_CS;
478 break;
98397fc5 479 case BPF_JSET:
e54bcde3
ZSL
480 case BPF_JNE:
481 jmp_cond = A64_COND_NE;
482 break;
483 case BPF_JSGT:
484 jmp_cond = A64_COND_GT;
485 break;
486 case BPF_JSGE:
487 jmp_cond = A64_COND_GE;
488 break;
489 default:
490 return -EFAULT;
491 }
492 emit(A64_B_(jmp_cond, jmp_offset), ctx);
493 break;
494 case BPF_JMP | BPF_JSET | BPF_X:
495 emit(A64_TST(1, dst, src), ctx);
496 goto emit_cond_jmp;
497 /* IF (dst COND imm) JUMP off */
498 case BPF_JMP | BPF_JEQ | BPF_K:
499 case BPF_JMP | BPF_JGT | BPF_K:
500 case BPF_JMP | BPF_JGE | BPF_K:
501 case BPF_JMP | BPF_JNE | BPF_K:
502 case BPF_JMP | BPF_JSGT | BPF_K:
503 case BPF_JMP | BPF_JSGE | BPF_K:
504 ctx->tmp_used = 1;
505 emit_a64_mov_i(1, tmp, imm, ctx);
506 emit(A64_CMP(1, dst, tmp), ctx);
507 goto emit_cond_jmp;
508 case BPF_JMP | BPF_JSET | BPF_K:
509 ctx->tmp_used = 1;
510 emit_a64_mov_i(1, tmp, imm, ctx);
511 emit(A64_TST(1, dst, tmp), ctx);
512 goto emit_cond_jmp;
513 /* function call */
514 case BPF_JMP | BPF_CALL:
515 {
516 const u8 r0 = bpf2a64[BPF_REG_0];
517 const u64 func = (u64)__bpf_call_base + imm;
518
519 ctx->tmp_used = 1;
520 emit_a64_mov_i64(tmp, func, ctx);
521 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
522 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
523 emit(A64_BLR(tmp), ctx);
524 emit(A64_MOV(1, r0, A64_R(0)), ctx);
525 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
526 break;
527 }
528 /* function return */
529 case BPF_JMP | BPF_EXIT:
51c9fbb1
ZSL
530 /* Optimization: when last instruction is EXIT,
531 simply fallthrough to epilogue. */
e54bcde3
ZSL
532 if (i == ctx->prog->len - 1)
533 break;
534 jmp_offset = epilogue_offset(ctx);
535 check_imm26(jmp_offset);
536 emit(A64_B(jmp_offset), ctx);
537 break;
538
30d3d94c
ZSL
539 /* dst = imm64 */
540 case BPF_LD | BPF_IMM | BPF_DW:
541 {
542 const struct bpf_insn insn1 = insn[1];
543 u64 imm64;
544
545 if (insn1.code != 0 || insn1.src_reg != 0 ||
546 insn1.dst_reg != 0 || insn1.off != 0) {
547 /* Note: verifier in BPF core must catch invalid
548 * instructions.
549 */
550 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
551 return -EINVAL;
552 }
553
1e4df6b7 554 imm64 = (u64)insn1.imm << 32 | (u32)imm;
30d3d94c
ZSL
555 emit_a64_mov_i64(dst, imm64, ctx);
556
557 return 1;
558 }
559
e54bcde3
ZSL
560 /* LDX: dst = *(size *)(src + off) */
561 case BPF_LDX | BPF_MEM | BPF_W:
562 case BPF_LDX | BPF_MEM | BPF_H:
563 case BPF_LDX | BPF_MEM | BPF_B:
564 case BPF_LDX | BPF_MEM | BPF_DW:
565 ctx->tmp_used = 1;
566 emit_a64_mov_i(1, tmp, off, ctx);
567 switch (BPF_SIZE(code)) {
568 case BPF_W:
569 emit(A64_LDR32(dst, src, tmp), ctx);
570 break;
571 case BPF_H:
572 emit(A64_LDRH(dst, src, tmp), ctx);
573 break;
574 case BPF_B:
575 emit(A64_LDRB(dst, src, tmp), ctx);
576 break;
577 case BPF_DW:
578 emit(A64_LDR64(dst, src, tmp), ctx);
579 break;
580 }
581 break;
582
583 /* ST: *(size *)(dst + off) = imm */
584 case BPF_ST | BPF_MEM | BPF_W:
585 case BPF_ST | BPF_MEM | BPF_H:
586 case BPF_ST | BPF_MEM | BPF_B:
587 case BPF_ST | BPF_MEM | BPF_DW:
df849ba3
YS
588 /* Load imm to a register then store it */
589 ctx->tmp_used = 1;
590 emit_a64_mov_i(1, tmp2, off, ctx);
591 emit_a64_mov_i(1, tmp, imm, ctx);
592 switch (BPF_SIZE(code)) {
593 case BPF_W:
594 emit(A64_STR32(tmp, dst, tmp2), ctx);
595 break;
596 case BPF_H:
597 emit(A64_STRH(tmp, dst, tmp2), ctx);
598 break;
599 case BPF_B:
600 emit(A64_STRB(tmp, dst, tmp2), ctx);
601 break;
602 case BPF_DW:
603 emit(A64_STR64(tmp, dst, tmp2), ctx);
604 break;
605 }
606 break;
e54bcde3
ZSL
607
608 /* STX: *(size *)(dst + off) = src */
609 case BPF_STX | BPF_MEM | BPF_W:
610 case BPF_STX | BPF_MEM | BPF_H:
611 case BPF_STX | BPF_MEM | BPF_B:
612 case BPF_STX | BPF_MEM | BPF_DW:
613 ctx->tmp_used = 1;
614 emit_a64_mov_i(1, tmp, off, ctx);
615 switch (BPF_SIZE(code)) {
616 case BPF_W:
617 emit(A64_STR32(src, dst, tmp), ctx);
618 break;
619 case BPF_H:
620 emit(A64_STRH(src, dst, tmp), ctx);
621 break;
622 case BPF_B:
623 emit(A64_STRB(src, dst, tmp), ctx);
624 break;
625 case BPF_DW:
626 emit(A64_STR64(src, dst, tmp), ctx);
627 break;
628 }
629 break;
630 /* STX XADD: lock *(u32 *)(dst + off) += src */
631 case BPF_STX | BPF_XADD | BPF_W:
632 /* STX XADD: lock *(u64 *)(dst + off) += src */
633 case BPF_STX | BPF_XADD | BPF_DW:
634 goto notyet;
635
636 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
637 case BPF_LD | BPF_ABS | BPF_W:
638 case BPF_LD | BPF_ABS | BPF_H:
639 case BPF_LD | BPF_ABS | BPF_B:
640 /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + src + imm)) */
641 case BPF_LD | BPF_IND | BPF_W:
642 case BPF_LD | BPF_IND | BPF_H:
643 case BPF_LD | BPF_IND | BPF_B:
644 {
645 const u8 r0 = bpf2a64[BPF_REG_0]; /* r0 = return value */
646 const u8 r6 = bpf2a64[BPF_REG_6]; /* r6 = pointer to sk_buff */
647 const u8 fp = bpf2a64[BPF_REG_FP];
648 const u8 r1 = bpf2a64[BPF_REG_1]; /* r1: struct sk_buff *skb */
649 const u8 r2 = bpf2a64[BPF_REG_2]; /* r2: int k */
650 const u8 r3 = bpf2a64[BPF_REG_3]; /* r3: unsigned int size */
651 const u8 r4 = bpf2a64[BPF_REG_4]; /* r4: void *buffer */
652 const u8 r5 = bpf2a64[BPF_REG_5]; /* r5: void *(*func)(...) */
653 int size;
654
655 emit(A64_MOV(1, r1, r6), ctx);
656 emit_a64_mov_i(0, r2, imm, ctx);
657 if (BPF_MODE(code) == BPF_IND)
658 emit(A64_ADD(0, r2, r2, src), ctx);
659 switch (BPF_SIZE(code)) {
660 case BPF_W:
661 size = 4;
662 break;
663 case BPF_H:
664 size = 2;
665 break;
666 case BPF_B:
667 size = 1;
668 break;
669 default:
670 return -EINVAL;
671 }
672 emit_a64_mov_i64(r3, size, ctx);
f4b16fce 673 emit(A64_SUB_I(1, r4, fp, STACK_SIZE), ctx);
e54bcde3
ZSL
674 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx);
675 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
676 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
677 emit(A64_BLR(r5), ctx);
678 emit(A64_MOV(1, r0, A64_R(0)), ctx);
679 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
680
681 jmp_offset = epilogue_offset(ctx);
682 check_imm19(jmp_offset);
683 emit(A64_CBZ(1, r0, jmp_offset), ctx);
684 emit(A64_MOV(1, r5, r0), ctx);
685 switch (BPF_SIZE(code)) {
686 case BPF_W:
687 emit(A64_LDR32(r0, r5, A64_ZR), ctx);
688#ifndef CONFIG_CPU_BIG_ENDIAN
689 emit(A64_REV32(0, r0, r0), ctx);
690#endif
691 break;
692 case BPF_H:
693 emit(A64_LDRH(r0, r5, A64_ZR), ctx);
694#ifndef CONFIG_CPU_BIG_ENDIAN
695 emit(A64_REV16(0, r0, r0), ctx);
696#endif
697 break;
698 case BPF_B:
699 emit(A64_LDRB(r0, r5, A64_ZR), ctx);
700 break;
701 }
702 break;
703 }
704notyet:
705 pr_info_once("*** NOT YET: opcode %02x ***\n", code);
706 return -EFAULT;
707
708 default:
709 pr_err_once("unknown opcode %02x\n", code);
710 return -EINVAL;
711 }
712
713 return 0;
714}
715
716static int build_body(struct jit_ctx *ctx)
717{
718 const struct bpf_prog *prog = ctx->prog;
719 int i;
720
721 for (i = 0; i < prog->len; i++) {
722 const struct bpf_insn *insn = &prog->insnsi[i];
723 int ret;
724
8eee539d
XW
725 ret = build_insn(insn, ctx);
726
e54bcde3
ZSL
727 if (ctx->image == NULL)
728 ctx->offset[i] = ctx->idx;
729
30d3d94c
ZSL
730 if (ret > 0) {
731 i++;
732 continue;
733 }
e54bcde3
ZSL
734 if (ret)
735 return ret;
736 }
737
738 return 0;
739}
740
42ff712b
ZSL
741static int validate_code(struct jit_ctx *ctx)
742{
743 int i;
744
745 for (i = 0; i < ctx->idx; i++) {
746 u32 a64_insn = le32_to_cpu(ctx->image[i]);
747
748 if (a64_insn == AARCH64_BREAK_FAULT)
749 return -1;
750 }
751
752 return 0;
753}
754
e54bcde3
ZSL
755static inline void bpf_flush_icache(void *start, void *end)
756{
757 flush_icache_range((unsigned long)start, (unsigned long)end);
758}
759
760void bpf_jit_compile(struct bpf_prog *prog)
761{
762 /* Nothing to do here. We support Internal BPF. */
763}
764
765void bpf_int_jit_compile(struct bpf_prog *prog)
766{
b569c1c6 767 struct bpf_binary_header *header;
e54bcde3
ZSL
768 struct jit_ctx ctx;
769 int image_size;
b569c1c6 770 u8 *image_ptr;
e54bcde3
ZSL
771
772 if (!bpf_jit_enable)
773 return;
774
775 if (!prog || !prog->len)
776 return;
777
778 memset(&ctx, 0, sizeof(ctx));
779 ctx.prog = prog;
780
781 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
782 if (ctx.offset == NULL)
783 return;
784
785 /* 1. Initial fake pass to compute ctx->idx. */
786
51c9fbb1 787 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */
e54bcde3
ZSL
788 if (build_body(&ctx))
789 goto out;
790
791 build_prologue(&ctx);
51c9fbb1
ZSL
792
793 ctx.epilogue_offset = ctx.idx;
e54bcde3
ZSL
794 build_epilogue(&ctx);
795
796 /* Now we know the actual image size. */
797 image_size = sizeof(u32) * ctx.idx;
b569c1c6
DB
798 header = bpf_jit_binary_alloc(image_size, &image_ptr,
799 sizeof(u32), jit_fill_hole);
800 if (header == NULL)
e54bcde3
ZSL
801 goto out;
802
803 /* 2. Now, the actual pass. */
804
b569c1c6 805 ctx.image = (u32 *)image_ptr;
e54bcde3 806 ctx.idx = 0;
b569c1c6 807
e54bcde3
ZSL
808 build_prologue(&ctx);
809
60ef0494 810 if (build_body(&ctx)) {
b569c1c6 811 bpf_jit_binary_free(header);
e54bcde3 812 goto out;
60ef0494 813 }
e54bcde3
ZSL
814
815 build_epilogue(&ctx);
816
42ff712b
ZSL
817 /* 3. Extra pass to validate JITed code. */
818 if (validate_code(&ctx)) {
819 bpf_jit_binary_free(header);
820 goto out;
821 }
822
e54bcde3
ZSL
823 /* And we're done. */
824 if (bpf_jit_enable > 1)
825 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
826
c3d4c682 827 bpf_flush_icache(header, ctx.image + ctx.idx);
b569c1c6
DB
828
829 set_memory_ro((unsigned long)header, header->pages);
e54bcde3 830 prog->bpf_func = (void *)ctx.image;
a91263d5 831 prog->jited = 1;
e54bcde3
ZSL
832out:
833 kfree(ctx.offset);
834}
835
836void bpf_jit_free(struct bpf_prog *prog)
837{
b569c1c6
DB
838 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
839 struct bpf_binary_header *header = (void *)addr;
840
841 if (!prog->jited)
842 goto free_filter;
843
844 set_memory_rw(addr, header->pages);
845 bpf_jit_binary_free(header);
e54bcde3 846
b569c1c6
DB
847free_filter:
848 bpf_prog_unlock_free(prog);
e54bcde3 849}