Merge remote-tracking branch 'asoc/topic/pcm5102a' into asoc-next
[linux-2.6-block.git] / arch / powerpc / net / bpf_jit_comp64.c
CommitLineData
156d0e29
NR
1/*
2 * bpf_jit_comp64.c: eBPF JIT compiler
3 *
4 * Copyright 2016 Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>
5 * IBM Corporation
6 *
7 * Based on the powerpc classic BPF JIT compiler by Matt Evans
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14#include <linux/moduleloader.h>
15#include <asm/cacheflush.h>
16#include <linux/netdevice.h>
17#include <linux/filter.h>
18#include <linux/if_vlan.h>
19#include <asm/kprobes.h>
ce076141 20#include <linux/bpf.h>
156d0e29
NR
21
22#include "bpf_jit64.h"
23
156d0e29
NR
24static void bpf_jit_fill_ill_insns(void *area, unsigned int size)
25{
6acdc9a6 26 memset32(area, BREAKPOINT_INSTRUCTION, size/4);
156d0e29
NR
27}
28
29static inline void bpf_flush_icache(void *start, void *end)
30{
31 smp_wmb();
32 flush_icache_range((unsigned long)start, (unsigned long)end);
33}
34
35static inline bool bpf_is_seen_register(struct codegen_context *ctx, int i)
36{
37 return (ctx->seen & (1 << (31 - b2p[i])));
38}
39
40static inline void bpf_set_seen_register(struct codegen_context *ctx, int i)
41{
42 ctx->seen |= (1 << (31 - b2p[i]));
43}
44
45static inline bool bpf_has_stack_frame(struct codegen_context *ctx)
46{
47 /*
48 * We only need a stack frame if:
49 * - we call other functions (kernel helpers), or
50 * - the bpf program uses its stack area
51 * The latter condition is deduced from the usage of BPF_REG_FP
52 */
53 return ctx->seen & SEEN_FUNC || bpf_is_seen_register(ctx, BPF_REG_FP);
54}
55
7b847f52
NR
56/*
57 * When not setting up our own stackframe, the redzone usage is:
58 *
59 * [ prev sp ] <-------------
60 * [ ... ] |
61 * sp (r1) ---> [ stack pointer ] --------------
62 * [ nv gpr save area ] 8*8
63 * [ tail_call_cnt ] 8
64 * [ local_tmp_var ] 8
65 * [ unused red zone ] 208 bytes protected
66 */
67static int bpf_jit_stack_local(struct codegen_context *ctx)
68{
69 if (bpf_has_stack_frame(ctx))
ac0761eb 70 return STACK_FRAME_MIN_SIZE + ctx->stack_size;
7b847f52
NR
71 else
72 return -(BPF_PPC_STACK_SAVE + 16);
73}
74
ce076141
NR
75static int bpf_jit_stack_tailcallcnt(struct codegen_context *ctx)
76{
77 return bpf_jit_stack_local(ctx) + 8;
78}
79
7b847f52
NR
80static int bpf_jit_stack_offsetof(struct codegen_context *ctx, int reg)
81{
82 if (reg >= BPF_PPC_NVR_MIN && reg < 32)
ac0761eb
SD
83 return (bpf_has_stack_frame(ctx) ?
84 (BPF_PPC_STACKFRAME + ctx->stack_size) : 0)
85 - (8 * (32 - reg));
7b847f52
NR
86
87 pr_err("BPF JIT is asking about unknown registers");
88 BUG();
89}
90
156d0e29
NR
91static void bpf_jit_emit_skb_loads(u32 *image, struct codegen_context *ctx)
92{
93 /*
94 * Load skb->len and skb->data_len
95 * r3 points to skb
96 */
97 PPC_LWZ(b2p[SKB_HLEN_REG], 3, offsetof(struct sk_buff, len));
98 PPC_LWZ(b2p[TMP_REG_1], 3, offsetof(struct sk_buff, data_len));
99 /* header_len = len - data_len */
100 PPC_SUB(b2p[SKB_HLEN_REG], b2p[SKB_HLEN_REG], b2p[TMP_REG_1]);
101
102 /* skb->data pointer */
103 PPC_BPF_LL(b2p[SKB_DATA_REG], 3, offsetof(struct sk_buff, data));
104}
105
ce076141 106static void bpf_jit_build_prologue(u32 *image, struct codegen_context *ctx)
156d0e29 107{
ce076141
NR
108 int i;
109
156d0e29 110 /*
ce076141
NR
111 * Initialize tail_call_cnt if we do tail calls.
112 * Otherwise, put in NOPs so that it can be skipped when we are
113 * invoked through a tail call.
156d0e29 114 */
ce076141
NR
115 if (ctx->seen & SEEN_TAILCALL) {
116 PPC_LI(b2p[TMP_REG_1], 0);
117 /* this goes in the redzone */
118 PPC_BPF_STL(b2p[TMP_REG_1], 1, -(BPF_PPC_STACK_SAVE + 8));
119 } else {
120 PPC_NOP();
121 PPC_NOP();
122 }
156d0e29 123
ce076141 124#define BPF_TAILCALL_PROLOGUE_SIZE 8
156d0e29 125
7b847f52 126 if (bpf_has_stack_frame(ctx)) {
156d0e29
NR
127 /*
128 * We need a stack frame, but we don't necessarily need to
129 * save/restore LR unless we call other functions
130 */
131 if (ctx->seen & SEEN_FUNC) {
132 EMIT(PPC_INST_MFLR | __PPC_RT(R0));
133 PPC_BPF_STL(0, 1, PPC_LR_STKOFF);
134 }
135
ac0761eb 136 PPC_BPF_STLU(1, 1, -(BPF_PPC_STACKFRAME + ctx->stack_size));
156d0e29
NR
137 }
138
139 /*
140 * Back up non-volatile regs -- BPF registers 6-10
141 * If we haven't created our own stack frame, we save these
142 * in the protected zone below the previous stack frame
143 */
144 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
145 if (bpf_is_seen_register(ctx, i))
7b847f52 146 PPC_BPF_STL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
156d0e29
NR
147
148 /*
149 * Save additional non-volatile regs if we cache skb
150 * Also, setup skb data
151 */
152 if (ctx->seen & SEEN_SKB) {
153 PPC_BPF_STL(b2p[SKB_HLEN_REG], 1,
7b847f52 154 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
156d0e29 155 PPC_BPF_STL(b2p[SKB_DATA_REG], 1,
7b847f52 156 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
156d0e29
NR
157 bpf_jit_emit_skb_loads(image, ctx);
158 }
159
160 /* Setup frame pointer to point to the bpf stack area */
161 if (bpf_is_seen_register(ctx, BPF_REG_FP))
162 PPC_ADDI(b2p[BPF_REG_FP], 1,
ac0761eb 163 STACK_FRAME_MIN_SIZE + ctx->stack_size);
156d0e29
NR
164}
165
ce076141 166static void bpf_jit_emit_common_epilogue(u32 *image, struct codegen_context *ctx)
156d0e29
NR
167{
168 int i;
156d0e29 169
156d0e29
NR
170 /* Restore NVRs */
171 for (i = BPF_REG_6; i <= BPF_REG_10; i++)
172 if (bpf_is_seen_register(ctx, i))
7b847f52 173 PPC_BPF_LL(b2p[i], 1, bpf_jit_stack_offsetof(ctx, b2p[i]));
156d0e29
NR
174
175 /* Restore non-volatile registers used for skb cache */
176 if (ctx->seen & SEEN_SKB) {
177 PPC_BPF_LL(b2p[SKB_HLEN_REG], 1,
7b847f52 178 bpf_jit_stack_offsetof(ctx, b2p[SKB_HLEN_REG]));
156d0e29 179 PPC_BPF_LL(b2p[SKB_DATA_REG], 1,
7b847f52 180 bpf_jit_stack_offsetof(ctx, b2p[SKB_DATA_REG]));
156d0e29
NR
181 }
182
183 /* Tear down our stack frame */
7b847f52 184 if (bpf_has_stack_frame(ctx)) {
ac0761eb 185 PPC_ADDI(1, 1, BPF_PPC_STACKFRAME + ctx->stack_size);
156d0e29
NR
186 if (ctx->seen & SEEN_FUNC) {
187 PPC_BPF_LL(0, 1, PPC_LR_STKOFF);
188 PPC_MTLR(0);
189 }
190 }
ce076141
NR
191}
192
193static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
194{
195 bpf_jit_emit_common_epilogue(image, ctx);
196
197 /* Move result to r3 */
198 PPC_MR(3, b2p[BPF_REG_0]);
156d0e29
NR
199
200 PPC_BLR();
201}
202
ce076141
NR
203static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func)
204{
205#ifdef PPC64_ELF_ABI_v1
206 /* func points to the function descriptor */
207 PPC_LI64(b2p[TMP_REG_2], func);
208 /* Load actual entry point from function descriptor */
209 PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0);
210 /* ... and move it to LR */
211 PPC_MTLR(b2p[TMP_REG_1]);
212 /*
213 * Load TOC from function descriptor at offset 8.
214 * We can clobber r2 since we get called through a
215 * function pointer (so caller will save/restore r2)
216 * and since we don't use a TOC ourself.
217 */
218 PPC_BPF_LL(2, b2p[TMP_REG_2], 8);
219#else
220 /* We can clobber r12 */
221 PPC_FUNC_ADDR(12, func);
222 PPC_MTLR(12);
223#endif
224 PPC_BLRL();
225}
226
227static void bpf_jit_emit_tail_call(u32 *image, struct codegen_context *ctx, u32 out)
228{
229 /*
230 * By now, the eBPF program has already setup parameters in r3, r4 and r5
231 * r3/BPF_REG_1 - pointer to ctx -- passed as is to the next bpf program
232 * r4/BPF_REG_2 - pointer to bpf_array
233 * r5/BPF_REG_3 - index in bpf_array
234 */
235 int b2p_bpf_array = b2p[BPF_REG_2];
236 int b2p_index = b2p[BPF_REG_3];
237
238 /*
239 * if (index >= array->map.max_entries)
240 * goto out;
241 */
242 PPC_LWZ(b2p[TMP_REG_1], b2p_bpf_array, offsetof(struct bpf_array, map.max_entries));
d269176e 243 PPC_RLWINM(b2p_index, b2p_index, 0, 0, 31);
ce076141
NR
244 PPC_CMPLW(b2p_index, b2p[TMP_REG_1]);
245 PPC_BCC(COND_GE, out);
246
247 /*
248 * if (tail_call_cnt > MAX_TAIL_CALL_CNT)
249 * goto out;
250 */
251 PPC_LD(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
252 PPC_CMPLWI(b2p[TMP_REG_1], MAX_TAIL_CALL_CNT);
253 PPC_BCC(COND_GT, out);
254
255 /*
256 * tail_call_cnt++;
257 */
258 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], 1);
259 PPC_BPF_STL(b2p[TMP_REG_1], 1, bpf_jit_stack_tailcallcnt(ctx));
260
261 /* prog = array->ptrs[index]; */
262 PPC_MULI(b2p[TMP_REG_1], b2p_index, 8);
263 PPC_ADD(b2p[TMP_REG_1], b2p[TMP_REG_1], b2p_bpf_array);
264 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_array, ptrs));
265
266 /*
267 * if (prog == NULL)
268 * goto out;
269 */
270 PPC_CMPLDI(b2p[TMP_REG_1], 0);
271 PPC_BCC(COND_EQ, out);
272
273 /* goto *(prog->bpf_func + prologue_size); */
274 PPC_LD(b2p[TMP_REG_1], b2p[TMP_REG_1], offsetof(struct bpf_prog, bpf_func));
275#ifdef PPC64_ELF_ABI_v1
276 /* skip past the function descriptor */
277 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1],
278 FUNCTION_DESCR_SIZE + BPF_TAILCALL_PROLOGUE_SIZE);
279#else
280 PPC_ADDI(b2p[TMP_REG_1], b2p[TMP_REG_1], BPF_TAILCALL_PROLOGUE_SIZE);
281#endif
282 PPC_MTCTR(b2p[TMP_REG_1]);
283
284 /* tear down stack, restore NVRs, ... */
285 bpf_jit_emit_common_epilogue(image, ctx);
286
287 PPC_BCTR();
288 /* out: */
289}
290
156d0e29
NR
291/* Assemble the body code between the prologue & epilogue */
292static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
293 struct codegen_context *ctx,
294 u32 *addrs)
295{
296 const struct bpf_insn *insn = fp->insnsi;
297 int flen = fp->len;
298 int i;
299
300 /* Start of epilogue code - will only be valid 2nd pass onwards */
301 u32 exit_addr = addrs[flen];
302
303 for (i = 0; i < flen; i++) {
304 u32 code = insn[i].code;
305 u32 dst_reg = b2p[insn[i].dst_reg];
306 u32 src_reg = b2p[insn[i].src_reg];
307 s16 off = insn[i].off;
308 s32 imm = insn[i].imm;
309 u64 imm64;
310 u8 *func;
311 u32 true_cond;
156d0e29
NR
312
313 /*
314 * addrs[] maps a BPF bytecode address into a real offset from
315 * the start of the body code.
316 */
317 addrs[i] = ctx->idx * 4;
318
319 /*
320 * As an optimization, we note down which non-volatile registers
321 * are used so that we can only save/restore those in our
322 * prologue and epilogue. We do this here regardless of whether
323 * the actual BPF instruction uses src/dst registers or not
324 * (for instance, BPF_CALL does not use them). The expectation
325 * is that those instructions will have src_reg/dst_reg set to
326 * 0. Even otherwise, we just lose some prologue/epilogue
327 * optimization but everything else should work without
328 * any issues.
329 */
7b847f52 330 if (dst_reg >= BPF_PPC_NVR_MIN && dst_reg < 32)
156d0e29 331 bpf_set_seen_register(ctx, insn[i].dst_reg);
7b847f52 332 if (src_reg >= BPF_PPC_NVR_MIN && src_reg < 32)
156d0e29
NR
333 bpf_set_seen_register(ctx, insn[i].src_reg);
334
335 switch (code) {
336 /*
337 * Arithmetic operations: ADD/SUB/MUL/DIV/MOD/NEG
338 */
339 case BPF_ALU | BPF_ADD | BPF_X: /* (u32) dst += (u32) src */
340 case BPF_ALU64 | BPF_ADD | BPF_X: /* dst += src */
341 PPC_ADD(dst_reg, dst_reg, src_reg);
342 goto bpf_alu32_trunc;
343 case BPF_ALU | BPF_SUB | BPF_X: /* (u32) dst -= (u32) src */
344 case BPF_ALU64 | BPF_SUB | BPF_X: /* dst -= src */
345 PPC_SUB(dst_reg, dst_reg, src_reg);
346 goto bpf_alu32_trunc;
347 case BPF_ALU | BPF_ADD | BPF_K: /* (u32) dst += (u32) imm */
348 case BPF_ALU | BPF_SUB | BPF_K: /* (u32) dst -= (u32) imm */
349 case BPF_ALU64 | BPF_ADD | BPF_K: /* dst += imm */
350 case BPF_ALU64 | BPF_SUB | BPF_K: /* dst -= imm */
351 if (BPF_OP(code) == BPF_SUB)
352 imm = -imm;
353 if (imm) {
354 if (imm >= -32768 && imm < 32768)
355 PPC_ADDI(dst_reg, dst_reg, IMM_L(imm));
356 else {
357 PPC_LI32(b2p[TMP_REG_1], imm);
358 PPC_ADD(dst_reg, dst_reg, b2p[TMP_REG_1]);
359 }
360 }
361 goto bpf_alu32_trunc;
362 case BPF_ALU | BPF_MUL | BPF_X: /* (u32) dst *= (u32) src */
363 case BPF_ALU64 | BPF_MUL | BPF_X: /* dst *= src */
364 if (BPF_CLASS(code) == BPF_ALU)
365 PPC_MULW(dst_reg, dst_reg, src_reg);
366 else
367 PPC_MULD(dst_reg, dst_reg, src_reg);
368 goto bpf_alu32_trunc;
369 case BPF_ALU | BPF_MUL | BPF_K: /* (u32) dst *= (u32) imm */
370 case BPF_ALU64 | BPF_MUL | BPF_K: /* dst *= imm */
371 if (imm >= -32768 && imm < 32768)
372 PPC_MULI(dst_reg, dst_reg, IMM_L(imm));
373 else {
374 PPC_LI32(b2p[TMP_REG_1], imm);
375 if (BPF_CLASS(code) == BPF_ALU)
376 PPC_MULW(dst_reg, dst_reg,
377 b2p[TMP_REG_1]);
378 else
379 PPC_MULD(dst_reg, dst_reg,
380 b2p[TMP_REG_1]);
381 }
382 goto bpf_alu32_trunc;
383 case BPF_ALU | BPF_DIV | BPF_X: /* (u32) dst /= (u32) src */
384 case BPF_ALU | BPF_MOD | BPF_X: /* (u32) dst %= (u32) src */
156d0e29
NR
385 if (BPF_OP(code) == BPF_MOD) {
386 PPC_DIVWU(b2p[TMP_REG_1], dst_reg, src_reg);
387 PPC_MULW(b2p[TMP_REG_1], src_reg,
388 b2p[TMP_REG_1]);
389 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
390 } else
391 PPC_DIVWU(dst_reg, dst_reg, src_reg);
392 goto bpf_alu32_trunc;
393 case BPF_ALU64 | BPF_DIV | BPF_X: /* dst /= src */
394 case BPF_ALU64 | BPF_MOD | BPF_X: /* dst %= src */
156d0e29
NR
395 if (BPF_OP(code) == BPF_MOD) {
396 PPC_DIVD(b2p[TMP_REG_1], dst_reg, src_reg);
397 PPC_MULD(b2p[TMP_REG_1], src_reg,
398 b2p[TMP_REG_1]);
399 PPC_SUB(dst_reg, dst_reg, b2p[TMP_REG_1]);
400 } else
401 PPC_DIVD(dst_reg, dst_reg, src_reg);
402 break;
403 case BPF_ALU | BPF_MOD | BPF_K: /* (u32) dst %= (u32) imm */
404 case BPF_ALU | BPF_DIV | BPF_K: /* (u32) dst /= (u32) imm */
405 case BPF_ALU64 | BPF_MOD | BPF_K: /* dst %= imm */
406 case BPF_ALU64 | BPF_DIV | BPF_K: /* dst /= imm */
407 if (imm == 0)
408 return -EINVAL;
409 else if (imm == 1)
410 goto bpf_alu32_trunc;
411
412 PPC_LI32(b2p[TMP_REG_1], imm);
413 switch (BPF_CLASS(code)) {
414 case BPF_ALU:
415 if (BPF_OP(code) == BPF_MOD) {
416 PPC_DIVWU(b2p[TMP_REG_2], dst_reg,
417 b2p[TMP_REG_1]);
418 PPC_MULW(b2p[TMP_REG_1],
419 b2p[TMP_REG_1],
420 b2p[TMP_REG_2]);
421 PPC_SUB(dst_reg, dst_reg,
422 b2p[TMP_REG_1]);
423 } else
424 PPC_DIVWU(dst_reg, dst_reg,
425 b2p[TMP_REG_1]);
426 break;
427 case BPF_ALU64:
428 if (BPF_OP(code) == BPF_MOD) {
429 PPC_DIVD(b2p[TMP_REG_2], dst_reg,
430 b2p[TMP_REG_1]);
431 PPC_MULD(b2p[TMP_REG_1],
432 b2p[TMP_REG_1],
433 b2p[TMP_REG_2]);
434 PPC_SUB(dst_reg, dst_reg,
435 b2p[TMP_REG_1]);
436 } else
437 PPC_DIVD(dst_reg, dst_reg,
438 b2p[TMP_REG_1]);
439 break;
440 }
441 goto bpf_alu32_trunc;
442 case BPF_ALU | BPF_NEG: /* (u32) dst = -dst */
443 case BPF_ALU64 | BPF_NEG: /* dst = -dst */
444 PPC_NEG(dst_reg, dst_reg);
445 goto bpf_alu32_trunc;
446
447 /*
448 * Logical operations: AND/OR/XOR/[A]LSH/[A]RSH
449 */
450 case BPF_ALU | BPF_AND | BPF_X: /* (u32) dst = dst & src */
451 case BPF_ALU64 | BPF_AND | BPF_X: /* dst = dst & src */
452 PPC_AND(dst_reg, dst_reg, src_reg);
453 goto bpf_alu32_trunc;
454 case BPF_ALU | BPF_AND | BPF_K: /* (u32) dst = dst & imm */
455 case BPF_ALU64 | BPF_AND | BPF_K: /* dst = dst & imm */
456 if (!IMM_H(imm))
457 PPC_ANDI(dst_reg, dst_reg, IMM_L(imm));
458 else {
459 /* Sign-extended */
460 PPC_LI32(b2p[TMP_REG_1], imm);
461 PPC_AND(dst_reg, dst_reg, b2p[TMP_REG_1]);
462 }
463 goto bpf_alu32_trunc;
464 case BPF_ALU | BPF_OR | BPF_X: /* dst = (u32) dst | (u32) src */
465 case BPF_ALU64 | BPF_OR | BPF_X: /* dst = dst | src */
466 PPC_OR(dst_reg, dst_reg, src_reg);
467 goto bpf_alu32_trunc;
468 case BPF_ALU | BPF_OR | BPF_K:/* dst = (u32) dst | (u32) imm */
469 case BPF_ALU64 | BPF_OR | BPF_K:/* dst = dst | imm */
470 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
471 /* Sign-extended */
472 PPC_LI32(b2p[TMP_REG_1], imm);
473 PPC_OR(dst_reg, dst_reg, b2p[TMP_REG_1]);
474 } else {
475 if (IMM_L(imm))
476 PPC_ORI(dst_reg, dst_reg, IMM_L(imm));
477 if (IMM_H(imm))
478 PPC_ORIS(dst_reg, dst_reg, IMM_H(imm));
479 }
480 goto bpf_alu32_trunc;
481 case BPF_ALU | BPF_XOR | BPF_X: /* (u32) dst ^= src */
482 case BPF_ALU64 | BPF_XOR | BPF_X: /* dst ^= src */
483 PPC_XOR(dst_reg, dst_reg, src_reg);
484 goto bpf_alu32_trunc;
485 case BPF_ALU | BPF_XOR | BPF_K: /* (u32) dst ^= (u32) imm */
486 case BPF_ALU64 | BPF_XOR | BPF_K: /* dst ^= imm */
487 if (imm < 0 && BPF_CLASS(code) == BPF_ALU64) {
488 /* Sign-extended */
489 PPC_LI32(b2p[TMP_REG_1], imm);
490 PPC_XOR(dst_reg, dst_reg, b2p[TMP_REG_1]);
491 } else {
492 if (IMM_L(imm))
493 PPC_XORI(dst_reg, dst_reg, IMM_L(imm));
494 if (IMM_H(imm))
495 PPC_XORIS(dst_reg, dst_reg, IMM_H(imm));
496 }
497 goto bpf_alu32_trunc;
498 case BPF_ALU | BPF_LSH | BPF_X: /* (u32) dst <<= (u32) src */
499 /* slw clears top 32 bits */
500 PPC_SLW(dst_reg, dst_reg, src_reg);
501 break;
502 case BPF_ALU64 | BPF_LSH | BPF_X: /* dst <<= src; */
503 PPC_SLD(dst_reg, dst_reg, src_reg);
504 break;
505 case BPF_ALU | BPF_LSH | BPF_K: /* (u32) dst <<== (u32) imm */
506 /* with imm 0, we still need to clear top 32 bits */
507 PPC_SLWI(dst_reg, dst_reg, imm);
508 break;
509 case BPF_ALU64 | BPF_LSH | BPF_K: /* dst <<== imm */
510 if (imm != 0)
511 PPC_SLDI(dst_reg, dst_reg, imm);
512 break;
513 case BPF_ALU | BPF_RSH | BPF_X: /* (u32) dst >>= (u32) src */
514 PPC_SRW(dst_reg, dst_reg, src_reg);
515 break;
516 case BPF_ALU64 | BPF_RSH | BPF_X: /* dst >>= src */
517 PPC_SRD(dst_reg, dst_reg, src_reg);
518 break;
519 case BPF_ALU | BPF_RSH | BPF_K: /* (u32) dst >>= (u32) imm */
520 PPC_SRWI(dst_reg, dst_reg, imm);
521 break;
522 case BPF_ALU64 | BPF_RSH | BPF_K: /* dst >>= imm */
523 if (imm != 0)
524 PPC_SRDI(dst_reg, dst_reg, imm);
525 break;
526 case BPF_ALU64 | BPF_ARSH | BPF_X: /* (s64) dst >>= src */
527 PPC_SRAD(dst_reg, dst_reg, src_reg);
528 break;
529 case BPF_ALU64 | BPF_ARSH | BPF_K: /* (s64) dst >>= imm */
530 if (imm != 0)
531 PPC_SRADI(dst_reg, dst_reg, imm);
532 break;
533
534 /*
535 * MOV
536 */
537 case BPF_ALU | BPF_MOV | BPF_X: /* (u32) dst = src */
538 case BPF_ALU64 | BPF_MOV | BPF_X: /* dst = src */
539 PPC_MR(dst_reg, src_reg);
540 goto bpf_alu32_trunc;
541 case BPF_ALU | BPF_MOV | BPF_K: /* (u32) dst = imm */
542 case BPF_ALU64 | BPF_MOV | BPF_K: /* dst = (s64) imm */
543 PPC_LI32(dst_reg, imm);
544 if (imm < 0)
545 goto bpf_alu32_trunc;
546 break;
547
548bpf_alu32_trunc:
549 /* Truncate to 32-bits */
550 if (BPF_CLASS(code) == BPF_ALU)
551 PPC_RLWINM(dst_reg, dst_reg, 0, 0, 31);
552 break;
553
554 /*
555 * BPF_FROM_BE/LE
556 */
557 case BPF_ALU | BPF_END | BPF_FROM_LE:
558 case BPF_ALU | BPF_END | BPF_FROM_BE:
559#ifdef __BIG_ENDIAN__
560 if (BPF_SRC(code) == BPF_FROM_BE)
561 goto emit_clear;
562#else /* !__BIG_ENDIAN__ */
563 if (BPF_SRC(code) == BPF_FROM_LE)
564 goto emit_clear;
565#endif
566 switch (imm) {
567 case 16:
568 /* Rotate 8 bits left & mask with 0x0000ff00 */
569 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 16, 23);
570 /* Rotate 8 bits right & insert LSB to reg */
571 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 24, 31);
572 /* Move result back to dst_reg */
573 PPC_MR(dst_reg, b2p[TMP_REG_1]);
574 break;
575 case 32:
576 /*
577 * Rotate word left by 8 bits:
578 * 2 bytes are already in their final position
579 * -- byte 2 and 4 (of bytes 1, 2, 3 and 4)
580 */
581 PPC_RLWINM(b2p[TMP_REG_1], dst_reg, 8, 0, 31);
582 /* Rotate 24 bits and insert byte 1 */
583 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 0, 7);
584 /* Rotate 24 bits and insert byte 3 */
585 PPC_RLWIMI(b2p[TMP_REG_1], dst_reg, 24, 16, 23);
586 PPC_MR(dst_reg, b2p[TMP_REG_1]);
587 break;
588 case 64:
589 /*
590 * Way easier and faster(?) to store the value
591 * into stack and then use ldbrx
592 *
156d0e29
NR
593 * ctx->seen will be reliable in pass2, but
594 * the instructions generated will remain the
595 * same across all passes
596 */
7b847f52
NR
597 PPC_STD(dst_reg, 1, bpf_jit_stack_local(ctx));
598 PPC_ADDI(b2p[TMP_REG_1], 1, bpf_jit_stack_local(ctx));
156d0e29
NR
599 PPC_LDBRX(dst_reg, 0, b2p[TMP_REG_1]);
600 break;
601 }
602 break;
603
604emit_clear:
605 switch (imm) {
606 case 16:
607 /* zero-extend 16 bits into 64 bits */
608 PPC_RLDICL(dst_reg, dst_reg, 0, 48);
609 break;
610 case 32:
611 /* zero-extend 32 bits into 64 bits */
612 PPC_RLDICL(dst_reg, dst_reg, 0, 32);
613 break;
614 case 64:
615 /* nop */
616 break;
617 }
618 break;
619
620 /*
621 * BPF_ST(X)
622 */
623 case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src */
624 case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */
625 if (BPF_CLASS(code) == BPF_ST) {
626 PPC_LI(b2p[TMP_REG_1], imm);
627 src_reg = b2p[TMP_REG_1];
628 }
629 PPC_STB(src_reg, dst_reg, off);
630 break;
631 case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */
632 case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */
633 if (BPF_CLASS(code) == BPF_ST) {
634 PPC_LI(b2p[TMP_REG_1], imm);
635 src_reg = b2p[TMP_REG_1];
636 }
637 PPC_STH(src_reg, dst_reg, off);
638 break;
639 case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */
640 case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */
641 if (BPF_CLASS(code) == BPF_ST) {
642 PPC_LI32(b2p[TMP_REG_1], imm);
643 src_reg = b2p[TMP_REG_1];
644 }
645 PPC_STW(src_reg, dst_reg, off);
646 break;
647 case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */
648 case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */
649 if (BPF_CLASS(code) == BPF_ST) {
650 PPC_LI32(b2p[TMP_REG_1], imm);
651 src_reg = b2p[TMP_REG_1];
652 }
653 PPC_STD(src_reg, dst_reg, off);
654 break;
655
656 /*
657 * BPF_STX XADD (atomic_add)
658 */
659 /* *(u32 *)(dst + off) += src */
660 case BPF_STX | BPF_XADD | BPF_W:
661 /* Get EA into TMP_REG_1 */
662 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
663 /* error if EA is not word-aligned */
664 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
665 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
666 PPC_LI(b2p[BPF_REG_0], 0);
667 PPC_JMP(exit_addr);
668 /* load value from memory into TMP_REG_2 */
669 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
670 /* add value from src_reg into this */
671 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
672 /* store result back */
673 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
674 /* we're done if this succeeded */
675 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
676 /* otherwise, let's try once more */
677 PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
678 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
679 PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
680 /* exit if the store was not successful */
681 PPC_LI(b2p[BPF_REG_0], 0);
682 PPC_BCC(COND_NE, exit_addr);
683 break;
684 /* *(u64 *)(dst + off) += src */
685 case BPF_STX | BPF_XADD | BPF_DW:
686 PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
687 /* error if EA is not doubleword-aligned */
688 PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
689 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
690 PPC_LI(b2p[BPF_REG_0], 0);
691 PPC_JMP(exit_addr);
692 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
693 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
694 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
695 PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
696 PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
697 PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
698 PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
699 PPC_LI(b2p[BPF_REG_0], 0);
700 PPC_BCC(COND_NE, exit_addr);
701 break;
702
703 /*
704 * BPF_LDX
705 */
706 /* dst = *(u8 *)(ul) (src + off) */
707 case BPF_LDX | BPF_MEM | BPF_B:
708 PPC_LBZ(dst_reg, src_reg, off);
709 break;
710 /* dst = *(u16 *)(ul) (src + off) */
711 case BPF_LDX | BPF_MEM | BPF_H:
712 PPC_LHZ(dst_reg, src_reg, off);
713 break;
714 /* dst = *(u32 *)(ul) (src + off) */
715 case BPF_LDX | BPF_MEM | BPF_W:
716 PPC_LWZ(dst_reg, src_reg, off);
717 break;
718 /* dst = *(u64 *)(ul) (src + off) */
719 case BPF_LDX | BPF_MEM | BPF_DW:
720 PPC_LD(dst_reg, src_reg, off);
721 break;
722
723 /*
724 * Doubleword load
725 * 16 byte instruction that uses two 'struct bpf_insn'
726 */
727 case BPF_LD | BPF_IMM | BPF_DW: /* dst = (u64) imm */
728 imm64 = ((u64)(u32) insn[i].imm) |
729 (((u64)(u32) insn[i+1].imm) << 32);
730 /* Adjust for two bpf instructions */
731 addrs[++i] = ctx->idx * 4;
732 PPC_LI64(dst_reg, imm64);
733 break;
734
735 /*
736 * Return/Exit
737 */
738 case BPF_JMP | BPF_EXIT:
739 /*
740 * If this isn't the very last instruction, branch to
741 * the epilogue. If we _are_ the last instruction,
742 * we'll just fall through to the epilogue.
743 */
744 if (i != flen - 1)
745 PPC_JMP(exit_addr);
746 /* else fall through to the epilogue */
747 break;
748
749 /*
750 * Call kernel helper
751 */
752 case BPF_JMP | BPF_CALL:
753 ctx->seen |= SEEN_FUNC;
754 func = (u8 *) __bpf_call_base + imm;
755
756 /* Save skb pointer if we need to re-cache skb data */
87338c8e
DB
757 if ((ctx->seen & SEEN_SKB) &&
758 bpf_helper_changes_pkt_data(func))
7b847f52 759 PPC_BPF_STL(3, 1, bpf_jit_stack_local(ctx));
156d0e29
NR
760
761 bpf_jit_emit_func_call(image, ctx, (u64)func);
762
763 /* move return value from r3 to BPF_REG_0 */
764 PPC_MR(b2p[BPF_REG_0], 3);
765
766 /* refresh skb cache */
87338c8e
DB
767 if ((ctx->seen & SEEN_SKB) &&
768 bpf_helper_changes_pkt_data(func)) {
156d0e29 769 /* reload skb pointer to r3 */
7b847f52 770 PPC_BPF_LL(3, 1, bpf_jit_stack_local(ctx));
156d0e29
NR
771 bpf_jit_emit_skb_loads(image, ctx);
772 }
773 break;
774
775 /*
776 * Jumps and branches
777 */
778 case BPF_JMP | BPF_JA:
779 PPC_JMP(addrs[i + 1 + off]);
780 break;
781
782 case BPF_JMP | BPF_JGT | BPF_K:
783 case BPF_JMP | BPF_JGT | BPF_X:
784 case BPF_JMP | BPF_JSGT | BPF_K:
785 case BPF_JMP | BPF_JSGT | BPF_X:
786 true_cond = COND_GT;
787 goto cond_branch;
20dbf5cc
DB
788 case BPF_JMP | BPF_JLT | BPF_K:
789 case BPF_JMP | BPF_JLT | BPF_X:
790 case BPF_JMP | BPF_JSLT | BPF_K:
791 case BPF_JMP | BPF_JSLT | BPF_X:
792 true_cond = COND_LT;
793 goto cond_branch;
156d0e29
NR
794 case BPF_JMP | BPF_JGE | BPF_K:
795 case BPF_JMP | BPF_JGE | BPF_X:
796 case BPF_JMP | BPF_JSGE | BPF_K:
797 case BPF_JMP | BPF_JSGE | BPF_X:
798 true_cond = COND_GE;
799 goto cond_branch;
20dbf5cc
DB
800 case BPF_JMP | BPF_JLE | BPF_K:
801 case BPF_JMP | BPF_JLE | BPF_X:
802 case BPF_JMP | BPF_JSLE | BPF_K:
803 case BPF_JMP | BPF_JSLE | BPF_X:
804 true_cond = COND_LE;
805 goto cond_branch;
156d0e29
NR
806 case BPF_JMP | BPF_JEQ | BPF_K:
807 case BPF_JMP | BPF_JEQ | BPF_X:
808 true_cond = COND_EQ;
809 goto cond_branch;
810 case BPF_JMP | BPF_JNE | BPF_K:
811 case BPF_JMP | BPF_JNE | BPF_X:
812 true_cond = COND_NE;
813 goto cond_branch;
814 case BPF_JMP | BPF_JSET | BPF_K:
815 case BPF_JMP | BPF_JSET | BPF_X:
816 true_cond = COND_NE;
817 /* Fall through */
818
819cond_branch:
820 switch (code) {
821 case BPF_JMP | BPF_JGT | BPF_X:
20dbf5cc 822 case BPF_JMP | BPF_JLT | BPF_X:
156d0e29 823 case BPF_JMP | BPF_JGE | BPF_X:
20dbf5cc 824 case BPF_JMP | BPF_JLE | BPF_X:
156d0e29
NR
825 case BPF_JMP | BPF_JEQ | BPF_X:
826 case BPF_JMP | BPF_JNE | BPF_X:
827 /* unsigned comparison */
828 PPC_CMPLD(dst_reg, src_reg);
829 break;
830 case BPF_JMP | BPF_JSGT | BPF_X:
20dbf5cc 831 case BPF_JMP | BPF_JSLT | BPF_X:
156d0e29 832 case BPF_JMP | BPF_JSGE | BPF_X:
20dbf5cc 833 case BPF_JMP | BPF_JSLE | BPF_X:
156d0e29
NR
834 /* signed comparison */
835 PPC_CMPD(dst_reg, src_reg);
836 break;
837 case BPF_JMP | BPF_JSET | BPF_X:
838 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg, src_reg);
839 break;
840 case BPF_JMP | BPF_JNE | BPF_K:
841 case BPF_JMP | BPF_JEQ | BPF_K:
842 case BPF_JMP | BPF_JGT | BPF_K:
20dbf5cc 843 case BPF_JMP | BPF_JLT | BPF_K:
156d0e29 844 case BPF_JMP | BPF_JGE | BPF_K:
20dbf5cc 845 case BPF_JMP | BPF_JLE | BPF_K:
156d0e29
NR
846 /*
847 * Need sign-extended load, so only positive
848 * values can be used as imm in cmpldi
849 */
850 if (imm >= 0 && imm < 32768)
851 PPC_CMPLDI(dst_reg, imm);
852 else {
853 /* sign-extending load */
854 PPC_LI32(b2p[TMP_REG_1], imm);
855 /* ... but unsigned comparison */
856 PPC_CMPLD(dst_reg, b2p[TMP_REG_1]);
857 }
858 break;
859 case BPF_JMP | BPF_JSGT | BPF_K:
20dbf5cc 860 case BPF_JMP | BPF_JSLT | BPF_K:
156d0e29 861 case BPF_JMP | BPF_JSGE | BPF_K:
20dbf5cc 862 case BPF_JMP | BPF_JSLE | BPF_K:
156d0e29
NR
863 /*
864 * signed comparison, so any 16-bit value
865 * can be used in cmpdi
866 */
867 if (imm >= -32768 && imm < 32768)
868 PPC_CMPDI(dst_reg, imm);
869 else {
870 PPC_LI32(b2p[TMP_REG_1], imm);
871 PPC_CMPD(dst_reg, b2p[TMP_REG_1]);
872 }
873 break;
874 case BPF_JMP | BPF_JSET | BPF_K:
875 /* andi does not sign-extend the immediate */
876 if (imm >= 0 && imm < 32768)
877 /* PPC_ANDI is _only/always_ dot-form */
878 PPC_ANDI(b2p[TMP_REG_1], dst_reg, imm);
879 else {
880 PPC_LI32(b2p[TMP_REG_1], imm);
881 PPC_AND_DOT(b2p[TMP_REG_1], dst_reg,
882 b2p[TMP_REG_1]);
883 }
884 break;
885 }
886 PPC_BCC(true_cond, addrs[i + 1 + off]);
887 break;
888
889 /*
890 * Loads from packet header/data
891 * Assume 32-bit input value in imm and X (src_reg)
892 */
893
894 /* Absolute loads */
895 case BPF_LD | BPF_W | BPF_ABS:
896 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_word);
897 goto common_load_abs;
898 case BPF_LD | BPF_H | BPF_ABS:
899 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_half);
900 goto common_load_abs;
901 case BPF_LD | BPF_B | BPF_ABS:
902 func = (u8 *)CHOOSE_LOAD_FUNC(imm, sk_load_byte);
903common_load_abs:
904 /*
905 * Load from [imm]
906 * Load into r4, which can just be passed onto
907 * skb load helpers as the second parameter
908 */
909 PPC_LI32(4, imm);
910 goto common_load;
911
912 /* Indirect loads */
913 case BPF_LD | BPF_W | BPF_IND:
914 func = (u8 *)sk_load_word;
915 goto common_load_ind;
916 case BPF_LD | BPF_H | BPF_IND:
917 func = (u8 *)sk_load_half;
918 goto common_load_ind;
919 case BPF_LD | BPF_B | BPF_IND:
920 func = (u8 *)sk_load_byte;
921common_load_ind:
922 /*
923 * Load from [src_reg + imm]
924 * Treat src_reg as a 32-bit value
925 */
926 PPC_EXTSW(4, src_reg);
927 if (imm) {
928 if (imm >= -32768 && imm < 32768)
929 PPC_ADDI(4, 4, IMM_L(imm));
930 else {
931 PPC_LI32(b2p[TMP_REG_1], imm);
932 PPC_ADD(4, 4, b2p[TMP_REG_1]);
933 }
934 }
935
936common_load:
937 ctx->seen |= SEEN_SKB;
938 ctx->seen |= SEEN_FUNC;
939 bpf_jit_emit_func_call(image, ctx, (u64)func);
940
941 /*
942 * Helper returns 'lt' condition on error, and an
943 * appropriate return value in BPF_REG_0
944 */
945 PPC_BCC(COND_LT, exit_addr);
946 break;
947
948 /*
ce076141 949 * Tail call
156d0e29 950 */
71189fa9 951 case BPF_JMP | BPF_TAIL_CALL:
ce076141
NR
952 ctx->seen |= SEEN_TAILCALL;
953 bpf_jit_emit_tail_call(image, ctx, addrs[i + 1]);
954 break;
156d0e29
NR
955
956 default:
957 /*
958 * The filter contains something cruel & unusual.
959 * We don't handle it, but also there shouldn't be
960 * anything missing from our list.
961 */
962 pr_err_ratelimited("eBPF filter opcode %04x (@%d) unsupported\n",
963 code, i);
964 return -ENOTSUPP;
965 }
966 }
967
968 /* Set end-of-body-code address for exit. */
969 addrs[i] = ctx->idx * 4;
970
971 return 0;
972}
973
156d0e29
NR
974struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
975{
976 u32 proglen;
977 u32 alloclen;
978 u8 *image = NULL;
979 u32 *code_base;
980 u32 *addrs;
981 struct codegen_context cgctx;
982 int pass;
983 int flen;
984 struct bpf_binary_header *bpf_hdr;
b7b7013c
NR
985 struct bpf_prog *org_fp = fp;
986 struct bpf_prog *tmp_fp;
987 bool bpf_blinded = false;
156d0e29 988
60b58afc 989 if (!fp->jit_requested)
b7b7013c
NR
990 return org_fp;
991
992 tmp_fp = bpf_jit_blind_constants(org_fp);
993 if (IS_ERR(tmp_fp))
994 return org_fp;
995
996 if (tmp_fp != org_fp) {
997 bpf_blinded = true;
998 fp = tmp_fp;
999 }
156d0e29
NR
1000
1001 flen = fp->len;
1002 addrs = kzalloc((flen+1) * sizeof(*addrs), GFP_KERNEL);
b7b7013c
NR
1003 if (addrs == NULL) {
1004 fp = org_fp;
1005 goto out;
1006 }
1007
1008 memset(&cgctx, 0, sizeof(struct codegen_context));
156d0e29 1009
ac0761eb
SD
1010 /* Make sure that the stack is quadword aligned. */
1011 cgctx.stack_size = round_up(fp->aux->stack_depth, 16);
1012
156d0e29 1013 /* Scouting faux-generate pass 0 */
b7b7013c 1014 if (bpf_jit_build_body(fp, 0, &cgctx, addrs)) {
156d0e29 1015 /* We hit something illegal or unsupported. */
b7b7013c 1016 fp = org_fp;
156d0e29 1017 goto out;
b7b7013c 1018 }
156d0e29
NR
1019
1020 /*
1021 * Pretend to build prologue, given the features we've seen. This will
1022 * update ctgtx.idx as it pretends to output instructions, then we can
1023 * calculate total size from idx.
1024 */
1025 bpf_jit_build_prologue(0, &cgctx);
1026 bpf_jit_build_epilogue(0, &cgctx);
1027
1028 proglen = cgctx.idx * 4;
1029 alloclen = proglen + FUNCTION_DESCR_SIZE;
1030
1031 bpf_hdr = bpf_jit_binary_alloc(alloclen, &image, 4,
1032 bpf_jit_fill_ill_insns);
b7b7013c
NR
1033 if (!bpf_hdr) {
1034 fp = org_fp;
156d0e29 1035 goto out;
b7b7013c 1036 }
156d0e29
NR
1037
1038 code_base = (u32 *)(image + FUNCTION_DESCR_SIZE);
1039
1040 /* Code generation passes 1-2 */
1041 for (pass = 1; pass < 3; pass++) {
1042 /* Now build the prologue, body code & epilogue for real. */
1043 cgctx.idx = 0;
1044 bpf_jit_build_prologue(code_base, &cgctx);
1045 bpf_jit_build_body(fp, code_base, &cgctx, addrs);
1046 bpf_jit_build_epilogue(code_base, &cgctx);
1047
1048 if (bpf_jit_enable > 1)
1049 pr_info("Pass %d: shrink = %d, seen = 0x%x\n", pass,
1050 proglen - (cgctx.idx * 4), cgctx.seen);
1051 }
1052
1053 if (bpf_jit_enable > 1)
1054 /*
1055 * Note that we output the base address of the code_base
1056 * rather than image, since opcodes are in code_base.
1057 */
1058 bpf_jit_dump(flen, proglen, pass, code_base);
1059
156d0e29 1060#ifdef PPC64_ELF_ABI_v1
052de33c
DB
1061 /* Function descriptor nastiness: Address + TOC */
1062 ((u64 *)image)[0] = (u64)code_base;
1063 ((u64 *)image)[1] = local_paca->kernel_toc;
156d0e29 1064#endif
052de33c
DB
1065
1066 fp->bpf_func = (void *)image;
1067 fp->jited = 1;
783d28dd 1068 fp->jited_len = alloclen;
156d0e29 1069
10528b9c 1070 bpf_flush_icache(bpf_hdr, (u8 *)bpf_hdr + (bpf_hdr->pages * PAGE_SIZE));
156d0e29
NR
1071
1072out:
1073 kfree(addrs);
b7b7013c
NR
1074
1075 if (bpf_blinded)
1076 bpf_jit_prog_release_other(fp, fp == org_fp ? tmp_fp : org_fp);
1077
156d0e29
NR
1078 return fp;
1079}
1080
74451e66 1081/* Overriding bpf_jit_free() as we don't set images read-only. */
156d0e29
NR
1082void bpf_jit_free(struct bpf_prog *fp)
1083{
1084 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1085 struct bpf_binary_header *bpf_hdr = (void *)addr;
1086
1087 if (fp->jited)
1088 bpf_jit_binary_free(bpf_hdr);
1089
1090 bpf_prog_unlock_free(fp);
1091}