1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2016-2018 Netronome Systems, Inc. */
4 #define pr_fmt(fmt) "NFP net bpf: " fmt
8 #include <linux/filter.h>
9 #include <linux/kernel.h>
10 #include <linux/pkt_cls.h>
11 #include <linux/reciprocal_div.h>
12 #include <linux/unistd.h>
15 #include "../nfp_asm.h"
16 #include "../nfp_net_ctrl.h"
18 /* --- NFP prog --- */
19 /* Foreach "multiple" entries macros provide pos and next<n> pointers.
20 * It's safe to modify the next pointers (but not pos).
22 #define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
24 next = list_next_entry(pos, l); \
25 &(nfp_prog)->insns != &pos->l && \
26 &(nfp_prog)->insns != &next->l; \
27 pos = nfp_meta_next(pos), \
28 next = nfp_meta_next(pos))
30 #define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
32 next = list_next_entry(pos, l), \
33 next2 = list_next_entry(next, l); \
34 &(nfp_prog)->insns != &pos->l && \
35 &(nfp_prog)->insns != &next->l && \
36 &(nfp_prog)->insns != &next2->l; \
37 pos = nfp_meta_next(pos), \
38 next = nfp_meta_next(pos), \
39 next2 = nfp_meta_next(next))
42 nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
44 return meta->l.prev != &nfp_prog->insns;
47 static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) {
50 pr_warn("instruction limit reached (%u NFP instructions)\n",
52 nfp_prog->error = -ENOSPC;
56 nfp_prog->prog[nfp_prog->prog_len] = insn;
60 static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
62 return nfp_prog->prog_len;
66 nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
68 /* If there is a recorded error we may have dropped instructions;
69 * that doesn't have to be due to translator bug, and the translation
70 * will fail anyway, so just return OK.
74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
77 /* --- Emitters --- */
79 __emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx,
85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
86 FIELD_PREP(OP_CMD_CTX, ctx) |
87 FIELD_PREP(OP_CMD_B_SRC, breg) |
88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
89 FIELD_PREP(OP_CMD_XFER, xfer) |
90 FIELD_PREP(OP_CMD_CNT, size) |
91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) |
92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
93 FIELD_PREP(OP_CMD_INDIR, indir) |
94 FIELD_PREP(OP_CMD_MODE, mode);
96 nfp_prog_push(nfp_prog, insn);
100 emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir)
103 struct nfp_insn_re_regs reg;
106 err = swreg_to_restricted(reg_none(), lreg, rreg, ®, false);
108 nfp_prog->error = err;
112 pr_err("cmd can't swap arguments\n");
113 nfp_prog->error = -EFAULT;
116 if (reg.dst_lmextn || reg.src_lmextn) {
117 pr_err("cmd can't use LMextn\n");
118 nfp_prog->error = -EFAULT;
122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx,
127 emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false);
134 emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true);
141 __emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
142 enum br_ctx_signal_state css, u16 addr, u8 defer)
144 u16 addr_lo, addr_hi;
147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
148 addr_hi = addr != addr_lo;
151 FIELD_PREP(OP_BR_MASK, mask) |
152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
153 FIELD_PREP(OP_BR_CSS, css) |
154 FIELD_PREP(OP_BR_DEFBR, defer) |
155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
158 nfp_prog_push(nfp_prog, insn);
162 emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
163 enum nfp_relo_type relo)
165 if (mask == BR_UNC && defer > 2) {
166 pr_err("BUG: branch defer out of bounds %d\n", defer);
167 nfp_prog->error = -EFAULT;
171 __emit_br(nfp_prog, mask,
172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
173 BR_CSS_NONE, addr, defer);
175 nfp_prog->prog[nfp_prog->prog_len - 1] |=
176 FIELD_PREP(OP_RELO_TYPE, relo);
180 emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
186 __emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer,
187 bool set, bool src_lmextn)
189 u16 addr_lo, addr_hi;
192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO));
193 addr_hi = addr != addr_lo;
195 insn = OP_BR_BIT_BASE |
196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) |
197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) |
198 FIELD_PREP(OP_BR_BIT_BV, set) |
199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) |
200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) |
201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) |
202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn);
204 nfp_prog_push(nfp_prog, insn);
208 emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr,
209 u8 defer, bool set, enum nfp_relo_type relo)
211 struct nfp_insn_re_regs reg;
214 /* NOTE: The bit to test is specified as an rotation amount, such that
215 * the bit to test will be placed on the MSB of the result when
216 * doing a rotate right. For bit X, we need right rotate X + 1.
220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), ®, false);
222 nfp_prog->error = err;
226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set,
229 nfp_prog->prog[nfp_prog->prog_len - 1] |=
230 FIELD_PREP(OP_RELO_TYPE, relo);
234 emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL);
240 __emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
241 u8 defer, bool dst_lmextn, bool src_lmextn)
245 insn = OP_BR_ALU_BASE |
246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) |
247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) |
248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) |
249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) |
250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) |
251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn);
253 nfp_prog_push(nfp_prog, insn);
256 static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
258 struct nfp_insn_ur_regs reg;
261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), ®);
263 nfp_prog->error = err;
267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn,
272 __emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
273 enum immed_width width, bool invert,
274 enum immed_shift shift, bool wr_both,
275 bool dst_lmextn, bool src_lmextn)
279 insn = OP_IMMED_BASE |
280 FIELD_PREP(OP_IMMED_A_SRC, areg) |
281 FIELD_PREP(OP_IMMED_B_SRC, breg) |
282 FIELD_PREP(OP_IMMED_IMM, imm_hi) |
283 FIELD_PREP(OP_IMMED_WIDTH, width) |
284 FIELD_PREP(OP_IMMED_INV, invert) |
285 FIELD_PREP(OP_IMMED_SHIFT, shift) |
286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
290 nfp_prog_push(nfp_prog, insn);
294 emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
295 enum immed_width width, bool invert, enum immed_shift shift)
297 struct nfp_insn_ur_regs reg;
300 if (swreg_type(dst) == NN_REG_IMM) {
301 nfp_prog->error = -EFAULT;
305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), ®);
307 nfp_prog->error = err;
311 /* Use reg.dst when destination is No-Dest. */
312 __emit_immed(nfp_prog,
313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
314 reg.breg, imm >> 8, width, invert, shift,
315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
319 __emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
320 enum shf_sc sc, u8 shift,
321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
322 bool dst_lmextn, bool src_lmextn)
326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
327 nfp_prog->error = -EFAULT;
331 if (sc == SHF_SC_L_SHF)
335 FIELD_PREP(OP_SHF_A_SRC, areg) |
336 FIELD_PREP(OP_SHF_SC, sc) |
337 FIELD_PREP(OP_SHF_B_SRC, breg) |
338 FIELD_PREP(OP_SHF_I8, i8) |
339 FIELD_PREP(OP_SHF_SW, sw) |
340 FIELD_PREP(OP_SHF_DST, dst) |
341 FIELD_PREP(OP_SHF_SHIFT, shift) |
342 FIELD_PREP(OP_SHF_OP, op) |
343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
344 FIELD_PREP(OP_SHF_WR_AB, wr_both) |
345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
348 nfp_prog_push(nfp_prog, insn);
352 emit_shf(struct nfp_prog *nfp_prog, swreg dst,
353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
355 struct nfp_insn_re_regs reg;
358 err = swreg_to_restricted(dst, lreg, rreg, ®, true);
360 nfp_prog->error = err;
364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
366 reg.dst_lmextn, reg.src_lmextn);
370 emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst,
371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc)
373 if (sc == SHF_SC_R_ROT) {
374 pr_err("indirect shift is not allowed on rotation\n");
375 nfp_prog->error = -EFAULT;
379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0);
383 __emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
385 bool dst_lmextn, bool src_lmextn)
390 FIELD_PREP(OP_ALU_A_SRC, areg) |
391 FIELD_PREP(OP_ALU_B_SRC, breg) |
392 FIELD_PREP(OP_ALU_DST, dst) |
393 FIELD_PREP(OP_ALU_SW, swap) |
394 FIELD_PREP(OP_ALU_OP, op) |
395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
396 FIELD_PREP(OP_ALU_WR_AB, wr_both) |
397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
400 nfp_prog_push(nfp_prog, insn);
404 emit_alu(struct nfp_prog *nfp_prog, swreg dst,
405 swreg lreg, enum alu_op op, swreg rreg)
407 struct nfp_insn_ur_regs reg;
410 err = swreg_to_unrestricted(dst, lreg, rreg, ®);
412 nfp_prog->error = err;
416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
417 reg.areg, op, reg.breg, reg.swap, reg.wr_both,
418 reg.dst_lmextn, reg.src_lmextn);
422 __emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
423 enum mul_type type, enum mul_step step, u16 breg, bool swap,
424 bool wr_both, bool dst_lmextn, bool src_lmextn)
429 FIELD_PREP(OP_MUL_A_SRC, areg) |
430 FIELD_PREP(OP_MUL_B_SRC, breg) |
431 FIELD_PREP(OP_MUL_STEP, step) |
432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
433 FIELD_PREP(OP_MUL_SW, swap) |
434 FIELD_PREP(OP_MUL_TYPE, type) |
435 FIELD_PREP(OP_MUL_WR_AB, wr_both) |
436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
439 nfp_prog_push(nfp_prog, insn);
443 emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
444 enum mul_step step, swreg rreg)
446 struct nfp_insn_ur_regs reg;
450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
451 nfp_prog->error = -EINVAL;
455 if (step == MUL_LAST || step == MUL_LAST_2) {
456 /* When type is step and step Number is LAST or LAST2, left
457 * source is used as destination.
459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, ®);
462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, ®);
467 nfp_prog->error = err;
471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
476 __emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
478 bool zero, bool swap, bool wr_both,
479 bool dst_lmextn, bool src_lmextn)
484 FIELD_PREP(OP_LDF_A_SRC, areg) |
485 FIELD_PREP(OP_LDF_SC, sc) |
486 FIELD_PREP(OP_LDF_B_SRC, breg) |
487 FIELD_PREP(OP_LDF_I8, imm8) |
488 FIELD_PREP(OP_LDF_SW, swap) |
489 FIELD_PREP(OP_LDF_ZF, zero) |
490 FIELD_PREP(OP_LDF_BMASK, bmask) |
491 FIELD_PREP(OP_LDF_SHF, shift) |
492 FIELD_PREP(OP_LDF_WR_AB, wr_both) |
493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
496 nfp_prog_push(nfp_prog, insn);
500 emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
501 enum shf_sc sc, u8 shift, bool zero)
503 struct nfp_insn_re_regs reg;
506 /* Note: ld_field is special as it uses one of the src regs as dst */
507 err = swreg_to_restricted(dst, dst, src, ®, true);
509 nfp_prog->error = err;
513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
514 reg.i8, zero, reg.swap, reg.wr_both,
515 reg.dst_lmextn, reg.src_lmextn);
519 emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
520 enum shf_sc sc, u8 shift)
522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
526 __emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
527 bool dst_lmextn, bool src_lmextn)
531 insn = OP_LCSR_BASE |
532 FIELD_PREP(OP_LCSR_A_SRC, areg) |
533 FIELD_PREP(OP_LCSR_B_SRC, breg) |
534 FIELD_PREP(OP_LCSR_WRITE, wr) |
535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) |
536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
539 nfp_prog_push(nfp_prog, insn);
542 static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
544 struct nfp_insn_ur_regs reg;
547 /* This instruction takes immeds instead of reg_none() for the ignored
548 * operand, but we can't encode 2 immeds in one instr with our normal
549 * swreg infra so if param is an immed, we encode as reg_none() and
550 * copy the immed to both operands.
552 if (swreg_type(src) == NN_REG_IMM) {
553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), ®);
556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), ®);
559 nfp_prog->error = err;
563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr,
564 false, reg.src_lmextn);
567 /* CSR value is read in following immed[gpr, 0] */
568 static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr)
570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false);
573 static void emit_nop(struct nfp_prog *nfp_prog)
575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
578 /* --- Wrappers --- */
579 static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
581 if (!(imm & 0xffff0000)) {
583 *shift = IMMED_SHIFT_0B;
584 } else if (!(imm & 0xff0000ff)) {
586 *shift = IMMED_SHIFT_1B;
587 } else if (!(imm & 0x0000ffff)) {
589 *shift = IMMED_SHIFT_2B;
597 static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
599 enum immed_shift shift;
602 if (pack_immed(imm, &val, &shift)) {
603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
604 } else if (pack_immed(~imm, &val, &shift)) {
605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
608 false, IMMED_SHIFT_0B);
609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
610 false, IMMED_SHIFT_2B);
615 wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
616 enum nfp_relo_type relo)
619 pr_err("relocation of a large immediate!\n");
620 nfp_prog->error = -EFAULT;
623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
625 nfp_prog->prog[nfp_prog->prog_len - 1] |=
626 FIELD_PREP(OP_RELO_TYPE, relo);
629 /* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
630 * If the @imm is small enough encode it directly in operand and return
631 * otherwise load @imm to a spare register and return its encoding.
633 static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
635 if (FIELD_FIT(UR_REG_IMM_MAX, imm))
638 wrp_immed(nfp_prog, tmp_reg, imm);
642 /* re_load_imm_any() - encode immediate or use tmp register (restricted)
643 * If the @imm is small enough encode it directly in operand and return
644 * otherwise load @imm to a spare register and return its encoding.
646 static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
648 if (FIELD_FIT(RE_REG_IMM_MAX, imm))
651 wrp_immed(nfp_prog, tmp_reg, imm);
655 static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
661 static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
666 static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
671 /* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
672 * result to @dst from low end.
675 wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
679 u8 mask = (1 << field_len) - 1;
681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
684 /* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the
685 * result to @dst from offset, there is no change on the other bits of @dst.
688 wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src,
689 u8 field_len, u8 offset)
691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE;
692 u8 mask = ((1 << field_len) - 1) << offset;
694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8);
698 addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
699 swreg *rega, swreg *regb)
701 if (offset == reg_imm(0)) {
702 *rega = reg_a(src_gpr);
703 *regb = reg_b(src_gpr + 1);
707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
710 *rega = imm_a(nfp_prog);
711 *regb = imm_b(nfp_prog);
714 /* NFP has Command Push Pull bus which supports bluk memory operations. */
715 static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
717 bool descending_seq = meta->ldst_gather_len < 0;
718 s16 len = abs(meta->ldst_gather_len);
724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
726 src_base = reg_a(meta->insn.src_reg * 2);
727 xfer_num = round_up(len, 4) / 4;
730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
733 /* Setup PREV_ALU fields to override memory read length. */
735 wrp_immed(nfp_prog, reg_none(),
736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
738 /* Memory read from source addr into transfer-in registers. */
739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32);
743 /* Move from transfer-in to transfer-out. */
744 for (i = 0; i < xfer_num; i++)
745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
750 /* Use single direct_ref write8. */
751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
754 } else if (len <= 32 && IS_ALIGNED(len, 4)) {
755 /* Use single direct_ref write32. */
756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
759 } else if (len <= 32) {
760 /* Use single indirect_ref write8. */
761 wrp_immed(nfp_prog, reg_none(),
762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
764 reg_a(meta->paired_st->dst_reg * 2), off,
765 len - 1, CMD_CTX_SWAP);
766 } else if (IS_ALIGNED(len, 4)) {
767 /* Use single indirect_ref write32. */
768 wrp_immed(nfp_prog, reg_none(),
769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
771 reg_a(meta->paired_st->dst_reg * 2), off,
772 xfer_num - 1, CMD_CTX_SWAP);
773 } else if (len <= 40) {
774 /* Use one direct_ref write32 to write the first 32-bytes, then
775 * another direct_ref write8 to write the remaining bytes.
777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
778 reg_a(meta->paired_st->dst_reg * 2), off, 7,
781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
787 /* Use one indirect_ref write32 to write 4-bytes aligned length,
788 * then another direct_ref write8 to write the remaining bytes.
792 wrp_immed(nfp_prog, reg_none(),
793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
795 reg_a(meta->paired_st->dst_reg * 2), off,
796 xfer_num - 2, CMD_CTX_SWAP);
797 new_off = meta->paired_st->off + (xfer_num - 1) * 4;
798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
801 (len & 0x3) - 1, CMD_CTX_SWAP);
804 /* TODO: The following extra load is to make sure data flow be identical
805 * before and after we do memory copy optimization.
807 * The load destination register is not guaranteed to be dead, so we
808 * need to make sure it is loaded with the value the same as before
809 * this transformation.
811 * These extra loads could be removed once we have accurate register
816 else if (BPF_SIZE(meta->insn.code) != BPF_DW)
817 xfer_num = xfer_num - 1;
819 xfer_num = xfer_num - 2;
821 switch (BPF_SIZE(meta->insn.code)) {
823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
824 reg_xfer(xfer_num), 1,
825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
829 reg_xfer(xfer_num), 2, (len & 3) ^ 2);
832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
839 reg_xfer(xfer_num + 1));
843 if (BPF_SIZE(meta->insn.code) != BPF_DW)
844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
850 data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
855 /* We load the value from the address indicated in @offset and then
856 * shift out the data we don't need. Note: this is big endian!
859 shift = size < 4 ? 4 - size : 0;
861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP);
866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
867 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
869 for (; i * 4 < size; i++)
870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
879 data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
880 swreg lreg, swreg rreg, int size, enum cmd_mode mode)
885 /* We load the value from the address indicated in rreg + lreg and then
886 * mask out the data we don't need. Note: this is little endian!
889 mask = size < 4 ? GENMASK(size - 1, 0) : 0;
891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP);
896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
897 reg_xfer(0), SHF_SC_NONE, 0, true);
899 for (; i * 4 < size; i++)
900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
909 data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
917 data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
922 addr40_offset(nfp_prog, src_gpr, offset, ®a, ®b);
924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
925 size, CMD_MODE_40b_BA);
929 construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
933 /* Calculate the true offset (src_reg + imm) */
934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
937 /* Check packet length (size guaranteed to fit b/c it's u8) */
938 emit_alu(nfp_prog, imm_a(nfp_prog),
939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
940 emit_alu(nfp_prog, reg_none(),
941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
948 static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
952 /* Check packet length */
953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
959 return data_ld(nfp_prog, tmp_reg, 0, size);
963 data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
968 for (i = 0; i * 4 < size; i++)
969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
978 data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
981 wrp_immed(nfp_prog, reg_xfer(0), imm);
983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
992 (*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
997 wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1001 bool should_inc = needs_inc && new_gpr && !last;
1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
1013 /* Move the entire word */
1015 wrp_mov(nfp_prog, reg_both(dst),
1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1025 mask = (1 << size) - 1;
1028 if (WARN_ON_ONCE(mask > 0xf))
1031 shf = abs(src_byte - dst_byte) * 8;
1032 if (src_byte == dst_byte) {
1034 } else if (src_byte < dst_byte) {
1041 /* ld_field can address fewer indexes, if offset too large do RMW.
1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1044 if (idx <= RE_REG_LM_IDX_MAX) {
1045 reg = reg_lm(lm3 ? 3 : 0, idx);
1047 reg = imm_a(nfp_prog);
1048 /* If it's not the first part of the load and we start a new GPR
1049 * that means we are loading a second part of the LMEM word into
1050 * a new GPR. IOW we've already looked that LMEM word and
1051 * therefore it has been loaded into imm_a().
1053 if (first || !new_gpr)
1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx));
1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1066 wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1070 bool should_inc = needs_inc && new_gpr && !last;
1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
1082 /* Move the entire word */
1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1095 mask = (1 << size) - 1;
1098 if (WARN_ON_ONCE(mask > 0xf))
1101 shf = abs(src_byte - dst_byte) * 8;
1102 if (src_byte == dst_byte) {
1104 } else if (src_byte < dst_byte) {
1111 /* ld_field can address fewer indexes, if offset too large do RMW.
1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1114 if (idx <= RE_REG_LM_IDX_MAX) {
1115 reg = reg_lm(lm3 ? 3 : 0, idx);
1117 reg = imm_a(nfp_prog);
1118 /* Only first and last LMEM locations are going to need RMW,
1119 * the middle location will be overwritten fully.
1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx));
1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
1127 if (new_gpr || last) {
1128 if (idx > RE_REG_LM_IDX_MAX)
1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg);
1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1138 mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
1140 bool clr_gpr, lmem_step step)
1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
1143 bool first = true, last;
1144 bool needs_inc = false;
1145 swreg stack_off_reg;
1151 if (meta->ptr_not_const ||
1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) {
1153 /* Use of the last encountered ptr_off is OK, they all have
1154 * the same alignment. Depend on low bits of value being
1155 * discarded when written to LMaddr register.
1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
1158 stack_imm(nfp_prog));
1160 emit_alu(nfp_prog, imm_b(nfp_prog),
1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
1164 } else if (off + size <= 64) {
1165 /* We can reach bottom 64B with LMaddr0 */
1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
1168 /* We have to set up a new pointer. If we know the offset
1169 * and the entire access falls into a single 32 byte aligned
1170 * window we won't have to increment the LM pointer.
1171 * The 32 byte alignment is imporant because offset is ORed in
1172 * not added when doing *l$indexN[off].
1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
1175 stack_imm(nfp_prog));
1176 emit_alu(nfp_prog, imm_b(nfp_prog),
1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
1182 stack_imm(nfp_prog));
1184 emit_alu(nfp_prog, imm_b(nfp_prog),
1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1191 /* For size < 4 one slot will be filled by zeroing of upper. */
1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
1195 if (clr_gpr && size < 8)
1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1202 slice_size = min(size, 4 - gpr_byte);
1203 slice_end = min(off + slice_size, round_up(off + 1, 4));
1204 slice_size = slice_end - off;
1206 last = slice_size == size;
1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
1212 first, gpr != prev_gpr, last, lm3, needs_inc);
1219 gpr_byte += slice_size;
1220 if (gpr_byte >= 4) {
1233 wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
1237 if (alu_op == ALU_OP_AND) {
1239 wrp_immed(nfp_prog, reg_both(dst), 0);
1243 if (alu_op == ALU_OP_OR) {
1245 wrp_immed(nfp_prog, reg_both(dst), ~0U);
1249 if (alu_op == ALU_OP_XOR) {
1251 emit_alu(nfp_prog, reg_both(dst), reg_none(),
1252 ALU_OP_NOT, reg_b(dst));
1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
1262 wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1263 enum alu_op alu_op, bool skip)
1265 const struct bpf_insn *insn = &meta->insn;
1266 u64 imm = insn->imm; /* sign extend */
1269 meta->flags |= FLAG_INSN_SKIP_NOOP;
1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
1280 wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1286 emit_alu(nfp_prog, reg_both(dst + 1),
1287 reg_a(dst + 1), alu_op, reg_b(src + 1));
1293 wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1296 const struct bpf_insn *insn = &meta->insn;
1298 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305 wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1308 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1310 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1311 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1317 wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
1318 enum br_mask br_mask, u16 off)
1320 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
1321 emit_br(nfp_prog, br_mask, off, 0);
1325 wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1326 enum alu_op alu_op, enum br_mask br_mask)
1328 const struct bpf_insn *insn = &meta->insn;
1330 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
1331 insn->src_reg * 2, br_mask, insn->off);
1332 if (is_mbpf_jmp64(meta))
1333 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
1334 insn->src_reg * 2 + 1, br_mask, insn->off);
1339 static const struct jmp_code_map {
1340 enum br_mask br_mask;
1342 } jmp_code_map[] = {
1343 [BPF_JGT >> 4] = { BR_BLO, true },
1344 [BPF_JGE >> 4] = { BR_BHS, false },
1345 [BPF_JLT >> 4] = { BR_BLO, false },
1346 [BPF_JLE >> 4] = { BR_BHS, true },
1347 [BPF_JSGT >> 4] = { BR_BLT, true },
1348 [BPF_JSGE >> 4] = { BR_BGE, false },
1349 [BPF_JSLT >> 4] = { BR_BLT, false },
1350 [BPF_JSLE >> 4] = { BR_BGE, true },
1353 static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
1357 op = BPF_OP(meta->insn.code) >> 4;
1358 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */
1359 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
1360 !jmp_code_map[op].br_mask,
1361 "no code found for jump instruction"))
1364 return &jmp_code_map[op];
1367 static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1369 const struct bpf_insn *insn = &meta->insn;
1370 u64 imm = insn->imm; /* sign extend */
1371 const struct jmp_code_map *code;
1372 enum alu_op alu_op, carry_op;
1373 u8 reg = insn->dst_reg * 2;
1376 code = nfp_jmp_code_get(meta);
1380 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
1381 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
1383 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
1385 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
1387 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
1389 if (is_mbpf_jmp64(meta)) {
1390 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
1392 emit_alu(nfp_prog, reg_none(),
1393 reg_a(reg + 1), carry_op, tmp_reg);
1395 emit_alu(nfp_prog, reg_none(),
1396 tmp_reg, carry_op, reg_a(reg + 1));
1399 emit_br(nfp_prog, code->br_mask, insn->off, 0);
1404 static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1406 const struct bpf_insn *insn = &meta->insn;
1407 const struct jmp_code_map *code;
1410 code = nfp_jmp_code_get(meta);
1414 areg = insn->dst_reg * 2;
1415 breg = insn->src_reg * 2;
1423 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
1424 if (is_mbpf_jmp64(meta))
1425 emit_alu(nfp_prog, reg_none(),
1426 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
1427 emit_br(nfp_prog, code->br_mask, insn->off, 0);
1432 static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
1434 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
1436 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
1441 wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1442 swreg rreg, bool gen_high_half)
1444 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1445 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
1446 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
1447 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
1448 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
1449 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
1451 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
1454 wrp_immed(nfp_prog, dst_hi, 0);
1458 wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1461 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1462 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
1463 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
1464 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
1468 wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1469 bool gen_high_half, bool ropnd_from_reg)
1471 swreg multiplier, multiplicand, dst_hi, dst_lo;
1472 const struct bpf_insn *insn = &meta->insn;
1473 u32 lopnd_max, ropnd_max;
1476 dst_reg = insn->dst_reg;
1477 multiplicand = reg_a(dst_reg * 2);
1478 dst_hi = reg_both(dst_reg * 2 + 1);
1479 dst_lo = reg_both(dst_reg * 2);
1480 lopnd_max = meta->umax_dst;
1481 if (ropnd_from_reg) {
1482 multiplier = reg_b(insn->src_reg * 2);
1483 ropnd_max = meta->umax_src;
1485 u32 imm = insn->imm;
1487 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1490 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
1491 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
1494 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
1499 static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
1501 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
1502 struct reciprocal_value_adv rvalue;
1506 if (imm > U32_MAX) {
1507 wrp_immed(nfp_prog, dst_both, 0);
1511 /* NOTE: because we are using "reciprocal_value_adv" which doesn't
1512 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
1513 * to handle such case which actually equals to the result of unsigned
1514 * comparison "dst >= imm" which could be calculated using the following
1517 * alu[--, dst, -, imm]
1519 * alu[dst, imm, +carry, 0]
1522 if (imm > 1U << 31) {
1523 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1525 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
1526 wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
1527 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
1532 rvalue = reciprocal_value_adv(imm, 32);
1534 if (rvalue.is_wide_m && !(imm & 1)) {
1535 pre_shift = fls(imm & -imm) - 1;
1536 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
1540 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
1541 if (imm == 1U << exp) {
1542 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1544 } else if (rvalue.is_wide_m) {
1545 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
1547 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
1549 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1551 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
1553 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1554 SHF_SC_R_SHF, rvalue.sh - 1);
1557 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1558 dst_b, SHF_SC_R_SHF, pre_shift);
1559 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
1560 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1561 dst_b, SHF_SC_R_SHF, rvalue.sh);
1567 static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1569 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
1570 struct nfp_bpf_cap_adjust_head *adjust_head;
1571 u32 ret_einval, end;
1573 adjust_head = &nfp_prog->bpf->adjust_head;
1575 /* Optimized version - 5 vs 14 cycles */
1576 if (nfp_prog->adjust_head_location != UINT_MAX) {
1577 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
1580 emit_alu(nfp_prog, pptr_reg(nfp_prog),
1581 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
1582 emit_alu(nfp_prog, plen_reg(nfp_prog),
1583 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1584 emit_alu(nfp_prog, pv_len(nfp_prog),
1585 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1587 wrp_immed(nfp_prog, reg_both(0), 0);
1588 wrp_immed(nfp_prog, reg_both(1), 0);
1590 /* TODO: when adjust head is guaranteed to succeed we can
1591 * also eliminate the following if (r0 == 0) branch.
1597 ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
1598 end = ret_einval + 2;
1600 /* We need to use a temp because offset is just a part of the pkt ptr */
1601 emit_alu(nfp_prog, tmp,
1602 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
1604 /* Validate result will fit within FW datapath constraints */
1605 emit_alu(nfp_prog, reg_none(),
1606 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
1607 emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1608 emit_alu(nfp_prog, reg_none(),
1609 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
1610 emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1612 /* Validate the length is at least ETH_HLEN */
1613 emit_alu(nfp_prog, tmp_len,
1614 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1615 emit_alu(nfp_prog, reg_none(),
1616 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
1617 emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1619 /* Load the ret code */
1620 wrp_immed(nfp_prog, reg_both(0), 0);
1621 wrp_immed(nfp_prog, reg_both(1), 0);
1623 /* Modify the packet metadata */
1624 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
1626 /* Skip over the -EINVAL ret code (defer 2) */
1627 emit_br(nfp_prog, BR_UNC, end, 2);
1629 emit_alu(nfp_prog, plen_reg(nfp_prog),
1630 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1631 emit_alu(nfp_prog, pv_len(nfp_prog),
1632 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1634 /* return -EINVAL target */
1635 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1638 wrp_immed(nfp_prog, reg_both(0), -22);
1639 wrp_immed(nfp_prog, reg_both(1), ~0);
1641 if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1647 static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1649 u32 ret_einval, end;
1652 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
1654 plen = imm_a(nfp_prog);
1655 delta = reg_a(2 * 2);
1657 ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
1658 end = nfp_prog_current_offset(nfp_prog) + 11;
1660 /* Calculate resulting length */
1661 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
1662 /* delta == 0 is not allowed by the kernel, add must overflow to make
1665 emit_br(nfp_prog, BR_BCC, ret_einval, 0);
1667 /* if (new_len < 14) then -EINVAL */
1668 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
1669 emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1671 emit_alu(nfp_prog, plen_reg(nfp_prog),
1672 plen_reg(nfp_prog), ALU_OP_ADD, delta);
1673 emit_alu(nfp_prog, pv_len(nfp_prog),
1674 pv_len(nfp_prog), ALU_OP_ADD, delta);
1676 emit_br(nfp_prog, BR_UNC, end, 2);
1677 wrp_immed(nfp_prog, reg_both(0), 0);
1678 wrp_immed(nfp_prog, reg_both(1), 0);
1680 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1683 wrp_immed(nfp_prog, reg_both(0), -22);
1684 wrp_immed(nfp_prog, reg_both(1), ~0);
1686 if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1693 map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1699 /* We only have to reload LM0 if the key is not at start of stack */
1700 lm_off = nfp_prog->stack_frame_depth;
1701 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off;
1702 load_lm_ptr = meta->arg2.var_off || lm_off;
1704 /* Set LM0 to start of key */
1706 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
1707 if (meta->func_id == BPF_FUNC_map_update_elem)
1708 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
1710 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1712 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
1714 /* Load map ID into A0 */
1715 wrp_mov(nfp_prog, reg_a(0), reg_a(2));
1717 /* Load the return address into B0 */
1718 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1720 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1723 /* Reset the LM0 pointer */
1727 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
1728 wrp_nops(nfp_prog, 3);
1734 nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1736 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM);
1737 /* CSR value is read in following immed[gpr, 0] */
1738 emit_immed(nfp_prog, reg_both(0), 0,
1739 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1740 emit_immed(nfp_prog, reg_both(1), 0,
1741 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1746 nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1751 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
1753 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
1755 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1758 /* Load ptr type into A1 */
1759 wrp_mov(nfp_prog, reg_a(1), ptr_type);
1761 /* Load the return address into B0 */
1762 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1764 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1771 nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1775 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5;
1777 /* Make sure the queue id fits into FW field */
1778 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2),
1779 ALU_OP_AND_NOT_B, reg_imm(0xff));
1780 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2);
1782 /* Set the 'queue selected' bit and the queue value */
1783 emit_shf(nfp_prog, pv_qsel_set(nfp_prog),
1784 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1),
1785 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT);
1786 emit_ld_field(nfp_prog,
1787 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2),
1789 /* Delay slots end here, we will jump over next instruction if queue
1790 * value fits into the field.
1792 emit_ld_field(nfp_prog,
1793 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX),
1796 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt))
1802 /* --- Callbacks --- */
1803 static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1805 const struct bpf_insn *insn = &meta->insn;
1806 u8 dst = insn->dst_reg * 2;
1807 u8 src = insn->src_reg * 2;
1809 if (insn->src_reg == BPF_REG_10) {
1810 swreg stack_depth_reg;
1812 stack_depth_reg = ur_load_imm_any(nfp_prog,
1813 nfp_prog->stack_frame_depth,
1814 stack_imm(nfp_prog));
1815 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog),
1816 ALU_OP_ADD, stack_depth_reg);
1817 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1819 wrp_reg_mov(nfp_prog, dst, src);
1820 wrp_reg_mov(nfp_prog, dst + 1, src + 1);
1826 static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1828 u64 imm = meta->insn.imm; /* sign extend */
1830 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
1831 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
1836 static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1838 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
1841 static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1843 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
1846 static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1848 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
1851 static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1853 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1856 static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1858 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
1861 static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1863 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1866 static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1868 const struct bpf_insn *insn = &meta->insn;
1870 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1871 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
1872 reg_b(insn->src_reg * 2));
1873 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1874 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
1875 reg_b(insn->src_reg * 2 + 1));
1880 static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1882 const struct bpf_insn *insn = &meta->insn;
1883 u64 imm = insn->imm; /* sign extend */
1885 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
1886 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
1891 static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1893 const struct bpf_insn *insn = &meta->insn;
1895 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1896 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
1897 reg_b(insn->src_reg * 2));
1898 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1899 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
1900 reg_b(insn->src_reg * 2 + 1));
1905 static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1907 const struct bpf_insn *insn = &meta->insn;
1908 u64 imm = insn->imm; /* sign extend */
1910 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
1911 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
1916 static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1918 return wrp_mul(nfp_prog, meta, true, true);
1921 static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1923 return wrp_mul(nfp_prog, meta, true, false);
1926 static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1928 const struct bpf_insn *insn = &meta->insn;
1930 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
1933 static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1935 /* NOTE: verifier hook has rejected cases for which verifier doesn't
1936 * know whether the source operand is constant or not.
1938 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
1941 static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1943 const struct bpf_insn *insn = &meta->insn;
1945 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
1946 ALU_OP_SUB, reg_b(insn->dst_reg * 2));
1947 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
1948 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
1954 * if shift_amt >= 32
1955 * dst_high = dst_low << shift_amt[4:0]
1958 * dst_high = (dst_high, dst_low) >> (32 - shift_amt)
1959 * dst_low = dst_low << shift_amt
1961 * The indirect shift will use the same logic at runtime.
1963 static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
1968 if (shift_amt < 32) {
1969 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1),
1970 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF,
1972 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
1973 reg_b(dst), SHF_SC_L_SHF, shift_amt);
1974 } else if (shift_amt == 32) {
1975 wrp_reg_mov(nfp_prog, dst + 1, dst);
1976 wrp_immed(nfp_prog, reg_both(dst), 0);
1977 } else if (shift_amt > 32) {
1978 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
1979 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32);
1980 wrp_immed(nfp_prog, reg_both(dst), 0);
1986 static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1988 const struct bpf_insn *insn = &meta->insn;
1989 u8 dst = insn->dst_reg * 2;
1991 return __shl_imm64(nfp_prog, dst, insn->imm);
1994 static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
1996 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB,
1998 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0));
1999 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE,
2000 reg_b(dst), SHF_SC_R_DSHF);
2003 /* NOTE: for indirect left shift, HIGH part should be calculated first. */
2004 static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2006 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2007 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2008 reg_b(dst), SHF_SC_L_SHF);
2011 static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2013 shl_reg64_lt32_high(nfp_prog, dst, src);
2014 shl_reg64_lt32_low(nfp_prog, dst, src);
2017 static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2019 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2020 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2021 reg_b(dst), SHF_SC_L_SHF);
2022 wrp_immed(nfp_prog, reg_both(dst), 0);
2025 static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2027 const struct bpf_insn *insn = &meta->insn;
2031 dst = insn->dst_reg * 2;
2032 umin = meta->umin_src;
2033 umax = meta->umax_src;
2035 return __shl_imm64(nfp_prog, dst, umin);
2037 src = insn->src_reg * 2;
2039 shl_reg64_lt32(nfp_prog, dst, src);
2040 } else if (umin >= 32) {
2041 shl_reg64_ge32(nfp_prog, dst, src);
2043 /* Generate different instruction sequences depending on runtime
2044 * value of shift amount.
2046 u16 label_ge32, label_end;
2048 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7;
2049 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2051 shl_reg64_lt32_high(nfp_prog, dst, src);
2052 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2053 emit_br(nfp_prog, BR_UNC, label_end, 2);
2054 /* shl_reg64_lt32_low packed in delay slot. */
2055 shl_reg64_lt32_low(nfp_prog, dst, src);
2057 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2059 shl_reg64_ge32(nfp_prog, dst, src);
2061 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2069 * if shift_amt >= 32
2071 * dst_low = dst_high >> shift_amt[4:0]
2073 * dst_high = dst_high >> shift_amt
2074 * dst_low = (dst_high, dst_low) >> shift_amt
2076 * The indirect shift will use the same logic at runtime.
2078 static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2083 if (shift_amt < 32) {
2084 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2085 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2086 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2087 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2088 } else if (shift_amt == 32) {
2089 wrp_reg_mov(nfp_prog, dst, dst + 1);
2090 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2091 } else if (shift_amt > 32) {
2092 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2093 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
2094 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2100 static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2102 const struct bpf_insn *insn = &meta->insn;
2103 u8 dst = insn->dst_reg * 2;
2105 return __shr_imm64(nfp_prog, dst, insn->imm);
2108 /* NOTE: for indirect right shift, LOW part should be calculated first. */
2109 static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2111 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2112 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2113 reg_b(dst + 1), SHF_SC_R_SHF);
2116 static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2118 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2119 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2120 reg_b(dst), SHF_SC_R_DSHF);
2123 static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2125 shr_reg64_lt32_low(nfp_prog, dst, src);
2126 shr_reg64_lt32_high(nfp_prog, dst, src);
2129 static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2131 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2132 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2133 reg_b(dst + 1), SHF_SC_R_SHF);
2134 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2137 static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2139 const struct bpf_insn *insn = &meta->insn;
2143 dst = insn->dst_reg * 2;
2144 umin = meta->umin_src;
2145 umax = meta->umax_src;
2147 return __shr_imm64(nfp_prog, dst, umin);
2149 src = insn->src_reg * 2;
2151 shr_reg64_lt32(nfp_prog, dst, src);
2152 } else if (umin >= 32) {
2153 shr_reg64_ge32(nfp_prog, dst, src);
2155 /* Generate different instruction sequences depending on runtime
2156 * value of shift amount.
2158 u16 label_ge32, label_end;
2160 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2161 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2162 shr_reg64_lt32_low(nfp_prog, dst, src);
2163 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2164 emit_br(nfp_prog, BR_UNC, label_end, 2);
2165 /* shr_reg64_lt32_high packed in delay slot. */
2166 shr_reg64_lt32_high(nfp_prog, dst, src);
2168 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2170 shr_reg64_ge32(nfp_prog, dst, src);
2172 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2179 /* Code logic is the same as __shr_imm64 except ashr requires signedness bit
2180 * told through PREV_ALU result.
2182 static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2187 if (shift_amt < 32) {
2188 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2189 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2190 /* Set signedness bit. */
2191 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2193 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2194 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2195 } else if (shift_amt == 32) {
2196 /* NOTE: this also helps setting signedness bit. */
2197 wrp_reg_mov(nfp_prog, dst, dst + 1);
2198 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2199 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2200 } else if (shift_amt > 32) {
2201 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2203 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2204 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
2205 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2206 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2212 static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2214 const struct bpf_insn *insn = &meta->insn;
2215 u8 dst = insn->dst_reg * 2;
2217 return __ashr_imm64(nfp_prog, dst, insn->imm);
2220 static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2222 /* NOTE: the first insn will set both indirect shift amount (source A)
2223 * and signedness bit (MSB of result).
2225 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2226 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2227 reg_b(dst + 1), SHF_SC_R_SHF);
2230 static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2232 /* NOTE: it is the same as logic shift because we don't need to shift in
2233 * signedness bit when the shift amount is less than 32.
2235 return shr_reg64_lt32_low(nfp_prog, dst, src);
2238 static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2240 ashr_reg64_lt32_low(nfp_prog, dst, src);
2241 ashr_reg64_lt32_high(nfp_prog, dst, src);
2244 static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2246 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2247 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2248 reg_b(dst + 1), SHF_SC_R_SHF);
2249 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2250 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2253 /* Like ashr_imm64, but need to use indirect shift. */
2254 static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2256 const struct bpf_insn *insn = &meta->insn;
2260 dst = insn->dst_reg * 2;
2261 umin = meta->umin_src;
2262 umax = meta->umax_src;
2264 return __ashr_imm64(nfp_prog, dst, umin);
2266 src = insn->src_reg * 2;
2268 ashr_reg64_lt32(nfp_prog, dst, src);
2269 } else if (umin >= 32) {
2270 ashr_reg64_ge32(nfp_prog, dst, src);
2272 u16 label_ge32, label_end;
2274 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2275 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2276 ashr_reg64_lt32_low(nfp_prog, dst, src);
2277 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2278 emit_br(nfp_prog, BR_UNC, label_end, 2);
2279 /* ashr_reg64_lt32_high packed in delay slot. */
2280 ashr_reg64_lt32_high(nfp_prog, dst, src);
2282 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2284 ashr_reg64_ge32(nfp_prog, dst, src);
2286 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2293 static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2295 const struct bpf_insn *insn = &meta->insn;
2297 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
2298 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2303 static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2305 const struct bpf_insn *insn = &meta->insn;
2307 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
2308 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2313 static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2315 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
2318 static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2320 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR);
2323 static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2325 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
2328 static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2330 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND);
2333 static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2335 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
2338 static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2340 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR);
2343 static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2345 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
2348 static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2350 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD);
2353 static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2355 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
2358 static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2360 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB);
2363 static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2365 return wrp_mul(nfp_prog, meta, false, true);
2368 static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2370 return wrp_mul(nfp_prog, meta, false, false);
2373 static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2375 return div_reg64(nfp_prog, meta);
2378 static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2380 return div_imm64(nfp_prog, meta);
2383 static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2385 u8 dst = meta->insn.dst_reg * 2;
2387 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
2388 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2393 static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2396 /* Set signedness bit (MSB of result). */
2397 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR,
2399 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2400 reg_b(dst), SHF_SC_R_SHF, shift_amt);
2402 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2407 static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2409 const struct bpf_insn *insn = &meta->insn;
2413 dst = insn->dst_reg * 2;
2414 umin = meta->umin_src;
2415 umax = meta->umax_src;
2417 return __ashr_imm(nfp_prog, dst, umin);
2419 src = insn->src_reg * 2;
2420 /* NOTE: the first insn will set both indirect shift amount (source A)
2421 * and signedness bit (MSB of result).
2423 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
2424 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2425 reg_b(dst), SHF_SC_R_SHF);
2426 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2431 static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2433 const struct bpf_insn *insn = &meta->insn;
2434 u8 dst = insn->dst_reg * 2;
2436 return __ashr_imm(nfp_prog, dst, insn->imm);
2439 static int __shr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2442 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2443 reg_b(dst), SHF_SC_R_SHF, shift_amt);
2444 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2448 static int shr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2450 const struct bpf_insn *insn = &meta->insn;
2451 u8 dst = insn->dst_reg * 2;
2453 return __shr_imm(nfp_prog, dst, insn->imm);
2456 static int shr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2458 const struct bpf_insn *insn = &meta->insn;
2462 dst = insn->dst_reg * 2;
2463 umin = meta->umin_src;
2464 umax = meta->umax_src;
2466 return __shr_imm(nfp_prog, dst, umin);
2468 src = insn->src_reg * 2;
2469 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2470 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2471 reg_b(dst), SHF_SC_R_SHF);
2472 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2476 static int __shl_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2479 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2480 reg_b(dst), SHF_SC_L_SHF, shift_amt);
2481 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2485 static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2487 const struct bpf_insn *insn = &meta->insn;
2488 u8 dst = insn->dst_reg * 2;
2490 return __shl_imm(nfp_prog, dst, insn->imm);
2493 static int shl_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2495 const struct bpf_insn *insn = &meta->insn;
2499 dst = insn->dst_reg * 2;
2500 umin = meta->umin_src;
2501 umax = meta->umax_src;
2503 return __shl_imm(nfp_prog, dst, umin);
2505 src = insn->src_reg * 2;
2506 shl_reg64_lt32_low(nfp_prog, dst, src);
2507 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2511 static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2513 const struct bpf_insn *insn = &meta->insn;
2514 u8 gpr = insn->dst_reg * 2;
2516 switch (insn->imm) {
2518 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
2520 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
2523 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2526 wrp_end32(nfp_prog, reg_a(gpr), gpr);
2527 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2530 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
2532 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
2533 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
2540 static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2542 struct nfp_insn_meta *prev = nfp_meta_prev(meta);
2546 dst = prev->insn.dst_reg * 2;
2547 imm_lo = prev->insn.imm;
2548 imm_hi = meta->insn.imm;
2550 wrp_immed(nfp_prog, reg_both(dst), imm_lo);
2552 /* mov is always 1 insn, load imm may be two, so try to use mov */
2553 if (imm_hi == imm_lo)
2554 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
2556 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
2561 static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2563 meta->double_cb = imm_ld8_part2;
2567 static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2569 return construct_data_ld(nfp_prog, meta->insn.imm, 1);
2572 static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2574 return construct_data_ld(nfp_prog, meta->insn.imm, 2);
2577 static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2579 return construct_data_ld(nfp_prog, meta->insn.imm, 4);
2582 static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2584 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2585 meta->insn.src_reg * 2, 1);
2588 static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2590 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2591 meta->insn.src_reg * 2, 2);
2594 static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2596 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
2597 meta->insn.src_reg * 2, 4);
2601 mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2602 unsigned int size, unsigned int ptr_off)
2604 return mem_op_stack(nfp_prog, meta, size, ptr_off,
2605 meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
2606 true, wrp_lmem_load);
2609 static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2612 swreg dst = reg_both(meta->insn.dst_reg * 2);
2614 switch (meta->insn.off) {
2615 case offsetof(struct __sk_buff, len):
2616 if (size != FIELD_SIZEOF(struct __sk_buff, len))
2618 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
2620 case offsetof(struct __sk_buff, data):
2621 if (size != FIELD_SIZEOF(struct __sk_buff, data))
2623 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2625 case offsetof(struct __sk_buff, data_end):
2626 if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
2628 emit_alu(nfp_prog, dst,
2629 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2635 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2640 static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2643 swreg dst = reg_both(meta->insn.dst_reg * 2);
2645 switch (meta->insn.off) {
2646 case offsetof(struct xdp_md, data):
2647 if (size != FIELD_SIZEOF(struct xdp_md, data))
2649 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2651 case offsetof(struct xdp_md, data_end):
2652 if (size != FIELD_SIZEOF(struct xdp_md, data_end))
2654 emit_alu(nfp_prog, dst,
2655 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2661 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2667 mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2672 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2674 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
2675 tmp_reg, meta->insn.dst_reg * 2, size);
2679 mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2684 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2686 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
2687 tmp_reg, meta->insn.dst_reg * 2, size);
2691 mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
2692 struct nfp_insn_meta *meta)
2694 s16 range_start = meta->pkt_cache.range_start;
2695 s16 range_end = meta->pkt_cache.range_end;
2696 swreg src_base, off;
2700 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog));
2701 src_base = reg_a(meta->insn.src_reg * 2);
2702 len = range_end - range_start;
2703 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH;
2705 indir = len > 8 * REG_WIDTH;
2706 /* Setup PREV_ALU for indirect mode. */
2708 wrp_immed(nfp_prog, reg_none(),
2709 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
2711 /* Cache memory into transfer-in registers. */
2712 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
2713 off, xfer_num - 1, CMD_CTX_SWAP, indir);
2717 mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
2718 struct nfp_insn_meta *meta,
2721 s16 range_start = meta->pkt_cache.range_start;
2722 s16 insn_off = meta->insn.off - range_start;
2723 swreg dst_lo, dst_hi, src_lo, src_mid;
2724 u8 dst_gpr = meta->insn.dst_reg * 2;
2725 u8 len_lo = size, len_mid = 0;
2726 u8 idx = insn_off / REG_WIDTH;
2727 u8 off = insn_off % REG_WIDTH;
2729 dst_hi = reg_both(dst_gpr + 1);
2730 dst_lo = reg_both(dst_gpr);
2731 src_lo = reg_xfer(idx);
2733 /* The read length could involve as many as three registers. */
2734 if (size > REG_WIDTH - off) {
2735 /* Calculate the part in the second register. */
2736 len_lo = REG_WIDTH - off;
2737 len_mid = size - len_lo;
2739 /* Calculate the part in the third register. */
2740 if (size > 2 * REG_WIDTH - off)
2741 len_mid = REG_WIDTH;
2744 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
2747 wrp_immed(nfp_prog, dst_hi, 0);
2751 src_mid = reg_xfer(idx + 1);
2753 if (size <= REG_WIDTH) {
2754 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
2755 wrp_immed(nfp_prog, dst_hi, 0);
2757 swreg src_hi = reg_xfer(idx + 2);
2759 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid,
2760 REG_WIDTH - len_lo, len_lo);
2761 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo,
2762 REG_WIDTH - len_lo);
2763 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo,
2771 mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
2772 struct nfp_insn_meta *meta,
2775 swreg dst_lo, dst_hi, src_lo;
2778 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH;
2779 dst_gpr = meta->insn.dst_reg * 2;
2780 dst_hi = reg_both(dst_gpr + 1);
2781 dst_lo = reg_both(dst_gpr);
2782 src_lo = reg_xfer(idx);
2784 if (size < REG_WIDTH) {
2785 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
2786 wrp_immed(nfp_prog, dst_hi, 0);
2787 } else if (size == REG_WIDTH) {
2788 wrp_mov(nfp_prog, dst_lo, src_lo);
2789 wrp_immed(nfp_prog, dst_hi, 0);
2791 swreg src_hi = reg_xfer(idx + 1);
2793 wrp_mov(nfp_prog, dst_lo, src_lo);
2794 wrp_mov(nfp_prog, dst_hi, src_hi);
2801 mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
2802 struct nfp_insn_meta *meta, unsigned int size)
2804 u8 off = meta->insn.off - meta->pkt_cache.range_start;
2806 if (IS_ALIGNED(off, REG_WIDTH))
2807 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size);
2809 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size);
2813 mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2816 if (meta->ldst_gather_len)
2817 return nfp_cpp_memcpy(nfp_prog, meta);
2819 if (meta->ptr.type == PTR_TO_CTX) {
2820 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2821 return mem_ldx_xdp(nfp_prog, meta, size);
2823 return mem_ldx_skb(nfp_prog, meta, size);
2826 if (meta->ptr.type == PTR_TO_PACKET) {
2827 if (meta->pkt_cache.range_end) {
2828 if (meta->pkt_cache.do_init)
2829 mem_ldx_data_init_pktcache(nfp_prog, meta);
2831 return mem_ldx_data_from_pktcache(nfp_prog, meta, size);
2833 return mem_ldx_data(nfp_prog, meta, size);
2837 if (meta->ptr.type == PTR_TO_STACK)
2838 return mem_ldx_stack(nfp_prog, meta, size,
2839 meta->ptr.off + meta->ptr.var_off.value);
2841 if (meta->ptr.type == PTR_TO_MAP_VALUE)
2842 return mem_ldx_emem(nfp_prog, meta, size);
2847 static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2849 return mem_ldx(nfp_prog, meta, 1);
2852 static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2854 return mem_ldx(nfp_prog, meta, 2);
2857 static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2859 return mem_ldx(nfp_prog, meta, 4);
2862 static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2864 return mem_ldx(nfp_prog, meta, 8);
2868 mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2871 u64 imm = meta->insn.imm; /* sign extend */
2874 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2876 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2880 static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2883 if (meta->ptr.type == PTR_TO_PACKET)
2884 return mem_st_data(nfp_prog, meta, size);
2889 static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2891 return mem_st(nfp_prog, meta, 1);
2894 static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2896 return mem_st(nfp_prog, meta, 2);
2899 static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2901 return mem_st(nfp_prog, meta, 4);
2904 static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2906 return mem_st(nfp_prog, meta, 8);
2910 mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2915 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2917 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2918 meta->insn.src_reg * 2, size);
2922 mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2923 unsigned int size, unsigned int ptr_off)
2925 return mem_op_stack(nfp_prog, meta, size, ptr_off,
2926 meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
2927 false, wrp_lmem_store);
2930 static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2932 switch (meta->insn.off) {
2933 case offsetof(struct xdp_md, rx_queue_index):
2934 return nfp_queue_select(nfp_prog, meta);
2937 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */
2942 mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2945 if (meta->ptr.type == PTR_TO_PACKET)
2946 return mem_stx_data(nfp_prog, meta, size);
2948 if (meta->ptr.type == PTR_TO_STACK)
2949 return mem_stx_stack(nfp_prog, meta, size,
2950 meta->ptr.off + meta->ptr.var_off.value);
2955 static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2957 return mem_stx(nfp_prog, meta, 1);
2960 static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2962 return mem_stx(nfp_prog, meta, 2);
2965 static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2967 if (meta->ptr.type == PTR_TO_CTX)
2968 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2969 return mem_stx_xdp(nfp_prog, meta);
2970 return mem_stx(nfp_prog, meta, 4);
2973 static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2975 return mem_stx(nfp_prog, meta, 8);
2979 mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
2981 u8 dst_gpr = meta->insn.dst_reg * 2;
2982 u8 src_gpr = meta->insn.src_reg * 2;
2983 unsigned int full_add, out;
2984 swreg addra, addrb, off;
2986 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2988 /* We can fit 16 bits into command immediate, if we know the immediate
2989 * is guaranteed to either always or never fit into 16 bit we only
2990 * generate code to handle that particular case, otherwise generate
2993 out = nfp_prog_current_offset(nfp_prog);
2994 full_add = nfp_prog_current_offset(nfp_prog);
2996 if (meta->insn.off) {
3000 if (meta->xadd_maybe_16bit) {
3004 if (meta->xadd_over_16bit)
3006 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
3011 /* Generate the branch for choosing add_imm vs add */
3012 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
3013 swreg max_imm = imm_a(nfp_prog);
3015 wrp_immed(nfp_prog, max_imm, 0xffff);
3016 emit_alu(nfp_prog, reg_none(),
3017 max_imm, ALU_OP_SUB, reg_b(src_gpr));
3018 emit_alu(nfp_prog, reg_none(),
3019 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1));
3020 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0);
3024 /* If insn has an offset add to the address */
3025 if (!meta->insn.off) {
3026 addra = reg_a(dst_gpr);
3027 addrb = reg_b(dst_gpr + 1);
3029 emit_alu(nfp_prog, imma_a(nfp_prog),
3030 reg_a(dst_gpr), ALU_OP_ADD, off);
3031 emit_alu(nfp_prog, imma_b(nfp_prog),
3032 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0));
3033 addra = imma_a(nfp_prog);
3034 addrb = imma_b(nfp_prog);
3037 /* Generate the add_imm if 16 bits are possible */
3038 if (meta->xadd_maybe_16bit) {
3039 swreg prev_alu = imm_a(nfp_prog);
3041 wrp_immed(nfp_prog, prev_alu,
3042 FIELD_PREP(CMD_OVE_DATA, 2) |
3044 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2));
3045 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2);
3046 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0,
3047 addra, addrb, 0, CMD_CTX_NO_SWAP);
3049 if (meta->xadd_over_16bit)
3050 emit_br(nfp_prog, BR_UNC, out, 0);
3053 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add))
3056 /* Generate the add if 16 bits are not guaranteed */
3057 if (meta->xadd_over_16bit) {
3058 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0,
3059 addra, addrb, is64 << 2,
3060 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1);
3062 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr));
3064 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1));
3067 if (!nfp_prog_confirm_current_offset(nfp_prog, out))
3073 static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3075 return mem_xadd(nfp_prog, meta, false);
3078 static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3080 return mem_xadd(nfp_prog, meta, true);
3083 static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3085 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
3090 static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3092 const struct bpf_insn *insn = &meta->insn;
3093 u64 imm = insn->imm; /* sign extend */
3094 swreg or1, or2, tmp_reg;
3096 or1 = reg_a(insn->dst_reg * 2);
3097 or2 = reg_b(insn->dst_reg * 2 + 1);
3100 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3101 emit_alu(nfp_prog, imm_a(nfp_prog),
3102 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3103 or1 = imm_a(nfp_prog);
3107 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3108 emit_alu(nfp_prog, imm_b(nfp_prog),
3109 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3110 or2 = imm_b(nfp_prog);
3113 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
3114 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3119 static int jeq32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3121 const struct bpf_insn *insn = &meta->insn;
3124 tmp_reg = ur_load_imm_any(nfp_prog, insn->imm, imm_b(nfp_prog));
3125 emit_alu(nfp_prog, reg_none(),
3126 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3127 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3132 static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3134 const struct bpf_insn *insn = &meta->insn;
3135 u64 imm = insn->imm; /* sign extend */
3136 u8 dst_gpr = insn->dst_reg * 2;
3139 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3140 emit_alu(nfp_prog, imm_b(nfp_prog),
3141 reg_a(dst_gpr), ALU_OP_AND, tmp_reg);
3142 /* Upper word of the mask can only be 0 or ~0 from sign extension,
3143 * so either ignore it or OR the whole thing in.
3145 if (is_mbpf_jmp64(meta) && imm >> 32) {
3146 emit_alu(nfp_prog, reg_none(),
3147 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
3149 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3154 static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3156 const struct bpf_insn *insn = &meta->insn;
3157 u64 imm = insn->imm; /* sign extend */
3158 bool is_jmp32 = is_mbpf_jmp32(meta);
3163 emit_alu(nfp_prog, reg_none(), reg_none(), ALU_OP_NONE,
3164 reg_b(insn->dst_reg * 2));
3166 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
3167 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
3168 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3172 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3173 emit_alu(nfp_prog, reg_none(),
3174 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3175 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3180 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3181 emit_alu(nfp_prog, reg_none(),
3182 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3183 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3188 static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3190 const struct bpf_insn *insn = &meta->insn;
3192 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
3193 ALU_OP_XOR, reg_b(insn->src_reg * 2));
3194 if (is_mbpf_jmp64(meta)) {
3195 emit_alu(nfp_prog, imm_b(nfp_prog),
3196 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR,
3197 reg_b(insn->src_reg * 2 + 1));
3198 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR,
3201 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3206 static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3208 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
3211 static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3213 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
3217 bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3219 u32 ret_tgt, stack_depth, offset_br;
3222 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN);
3223 /* Space for saving the return address is accounted for by the callee,
3224 * so stack_depth can be zero for the main function.
3227 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3228 stack_imm(nfp_prog));
3229 emit_alu(nfp_prog, stack_reg(nfp_prog),
3230 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg);
3231 emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3232 NFP_CSR_ACT_LM_ADDR0);
3235 /* Two cases for jumping to the callee:
3237 * - If callee uses and needs to save R6~R9 then:
3238 * 1. Put the start offset of the callee into imm_b(). This will
3239 * require a fixup step, as we do not necessarily know this
3241 * 2. Put the return address from the callee to the caller into
3242 * register ret_reg().
3243 * 3. (After defer slots are consumed) Jump to the subroutine that
3244 * pushes the registers to the stack.
3245 * The subroutine acts as a trampoline, and returns to the address in
3246 * imm_b(), i.e. jumps to the callee.
3248 * - If callee does not need to save R6~R9 then just load return
3249 * address to the caller in ret_reg(), and jump to the callee
3252 * Using ret_reg() to pass the return address to the callee is set here
3253 * as a convention. The callee can then push this address onto its
3254 * stack frame in its prologue. The advantages of passing the return
3255 * address through ret_reg(), instead of pushing it to the stack right
3256 * here, are the following:
3257 * - It looks cleaner.
3258 * - If the called function is called multiple time, we get a lower
3260 * - We save two no-op instructions that should be added just before
3261 * the emit_br() when stack depth is not null otherwise.
3262 * - If we ever find a register to hold the return address during whole
3263 * execution of the callee, we will not have to push the return
3264 * address to the stack for leaf functions.
3266 if (!meta->jmp_dst) {
3267 pr_err("BUG: BPF-to-BPF call has no destination recorded\n");
3270 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) {
3271 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
3272 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2,
3273 RELO_BR_GO_CALL_PUSH_REGS);
3274 offset_br = nfp_prog_current_offset(nfp_prog);
3275 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
3277 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
3278 emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1);
3279 offset_br = nfp_prog_current_offset(nfp_prog);
3281 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
3283 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
3287 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3288 stack_imm(nfp_prog));
3289 emit_alu(nfp_prog, stack_reg(nfp_prog),
3290 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
3291 emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3292 NFP_CSR_ACT_LM_ADDR0);
3293 wrp_nops(nfp_prog, 3);
3296 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog);
3297 meta->num_insns_after_br -= offset_br;
3302 static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3304 switch (meta->insn.imm) {
3305 case BPF_FUNC_xdp_adjust_head:
3306 return adjust_head(nfp_prog, meta);
3307 case BPF_FUNC_xdp_adjust_tail:
3308 return adjust_tail(nfp_prog, meta);
3309 case BPF_FUNC_map_lookup_elem:
3310 case BPF_FUNC_map_update_elem:
3311 case BPF_FUNC_map_delete_elem:
3312 return map_call_stack_common(nfp_prog, meta);
3313 case BPF_FUNC_get_prandom_u32:
3314 return nfp_get_prandom_u32(nfp_prog, meta);
3315 case BPF_FUNC_perf_event_output:
3316 return nfp_perf_event_output(nfp_prog, meta);
3318 WARN_ONCE(1, "verifier allowed unsupported function\n");
3323 static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3325 if (is_mbpf_pseudo_call(meta))
3326 return bpf_to_bpf_call(nfp_prog, meta);
3328 return helper_call(nfp_prog, meta);
3331 static bool nfp_is_main_function(struct nfp_insn_meta *meta)
3333 return meta->subprog_idx == 0;
3336 static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3338 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
3344 nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3346 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) {
3347 /* Pop R6~R9 to the stack via related subroutine.
3348 * We loaded the return address to the caller into ret_reg().
3349 * This means that the subroutine does not come back here, we
3350 * make it jump back to the subprogram caller directly!
3352 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1,
3353 RELO_BR_GO_CALL_POP_REGS);
3354 /* Pop return address from the stack. */
3355 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3357 /* Pop return address from the stack. */
3358 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3359 /* Jump back to caller if no callee-saved registers were used
3360 * by the subprogram.
3362 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0);
3368 static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3370 if (nfp_is_main_function(meta))
3371 return goto_out(nfp_prog, meta);
3373 return nfp_subprog_epilogue(nfp_prog, meta);
3376 static const instr_cb_t instr_cb[256] = {
3377 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
3378 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
3379 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
3380 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
3381 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
3382 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
3383 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
3384 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
3385 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
3386 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
3387 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
3388 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
3389 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
3390 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
3391 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
3392 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
3393 [BPF_ALU64 | BPF_NEG] = neg_reg64,
3394 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
3395 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
3396 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64,
3397 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
3398 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64,
3399 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64,
3400 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
3401 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
3402 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
3403 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
3404 [BPF_ALU | BPF_AND | BPF_X] = and_reg,
3405 [BPF_ALU | BPF_AND | BPF_K] = and_imm,
3406 [BPF_ALU | BPF_OR | BPF_X] = or_reg,
3407 [BPF_ALU | BPF_OR | BPF_K] = or_imm,
3408 [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
3409 [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
3410 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
3411 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
3412 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
3413 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
3414 [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
3415 [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
3416 [BPF_ALU | BPF_NEG] = neg_reg,
3417 [BPF_ALU | BPF_LSH | BPF_X] = shl_reg,
3418 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
3419 [BPF_ALU | BPF_RSH | BPF_X] = shr_reg,
3420 [BPF_ALU | BPF_RSH | BPF_K] = shr_imm,
3421 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg,
3422 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm,
3423 [BPF_ALU | BPF_END | BPF_X] = end_reg32,
3424 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
3425 [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
3426 [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
3427 [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
3428 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
3429 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
3430 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
3431 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
3432 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
3433 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
3434 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
3435 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1,
3436 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
3437 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
3438 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
3439 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
3440 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
3441 [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
3442 [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
3443 [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
3444 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
3445 [BPF_JMP | BPF_JA | BPF_K] = jump,
3446 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
3447 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
3448 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
3449 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
3450 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
3451 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
3452 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
3453 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
3454 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
3455 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
3456 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
3457 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
3458 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
3459 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
3460 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
3461 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
3462 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
3463 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
3464 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
3465 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
3466 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
3467 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
3468 [BPF_JMP32 | BPF_JEQ | BPF_K] = jeq32_imm,
3469 [BPF_JMP32 | BPF_JGT | BPF_K] = cmp_imm,
3470 [BPF_JMP32 | BPF_JGE | BPF_K] = cmp_imm,
3471 [BPF_JMP32 | BPF_JLT | BPF_K] = cmp_imm,
3472 [BPF_JMP32 | BPF_JLE | BPF_K] = cmp_imm,
3473 [BPF_JMP32 | BPF_JSGT | BPF_K] =cmp_imm,
3474 [BPF_JMP32 | BPF_JSGE | BPF_K] =cmp_imm,
3475 [BPF_JMP32 | BPF_JSLT | BPF_K] =cmp_imm,
3476 [BPF_JMP32 | BPF_JSLE | BPF_K] =cmp_imm,
3477 [BPF_JMP32 | BPF_JSET | BPF_K] =jset_imm,
3478 [BPF_JMP32 | BPF_JNE | BPF_K] = jne_imm,
3479 [BPF_JMP32 | BPF_JEQ | BPF_X] = jeq_reg,
3480 [BPF_JMP32 | BPF_JGT | BPF_X] = cmp_reg,
3481 [BPF_JMP32 | BPF_JGE | BPF_X] = cmp_reg,
3482 [BPF_JMP32 | BPF_JLT | BPF_X] = cmp_reg,
3483 [BPF_JMP32 | BPF_JLE | BPF_X] = cmp_reg,
3484 [BPF_JMP32 | BPF_JSGT | BPF_X] =cmp_reg,
3485 [BPF_JMP32 | BPF_JSGE | BPF_X] =cmp_reg,
3486 [BPF_JMP32 | BPF_JSLT | BPF_X] =cmp_reg,
3487 [BPF_JMP32 | BPF_JSLE | BPF_X] =cmp_reg,
3488 [BPF_JMP32 | BPF_JSET | BPF_X] =jset_reg,
3489 [BPF_JMP32 | BPF_JNE | BPF_X] = jne_reg,
3490 [BPF_JMP | BPF_CALL] = call,
3491 [BPF_JMP | BPF_EXIT] = jmp_exit,
3494 /* --- Assembler logic --- */
3496 nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
3497 struct nfp_insn_meta *jmp_dst, u32 br_idx)
3499 if (immed_get_value(nfp_prog->prog[br_idx + 1])) {
3500 pr_err("BUG: failed to fix up callee register saving\n");
3504 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off);
3509 static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
3511 struct nfp_insn_meta *meta, *jmp_dst;
3515 list_for_each_entry(meta, &nfp_prog->insns, l) {
3516 if (meta->flags & FLAG_INSN_SKIP_MASK)
3518 if (!is_mbpf_jmp(meta))
3520 if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
3521 !nfp_is_main_function(meta))
3523 if (is_mbpf_helper_call(meta))
3526 if (list_is_last(&meta->l, &nfp_prog->insns))
3527 br_idx = nfp_prog->last_bpf_off;
3529 br_idx = list_next_entry(meta, l)->off - 1;
3531 /* For BPF-to-BPF function call, a stack adjustment sequence is
3532 * generated after the return instruction. Therefore, we must
3533 * withdraw the length of this sequence to have br_idx pointing
3534 * to where the "branch" NFP instruction is expected to be.
3536 if (is_mbpf_pseudo_call(meta))
3537 br_idx -= meta->num_insns_after_br;
3539 if (!nfp_is_br(nfp_prog->prog[br_idx])) {
3540 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
3541 br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
3545 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
3548 /* Leave special branches for later */
3549 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
3550 RELO_BR_REL && !is_mbpf_pseudo_call(meta))
3553 if (!meta->jmp_dst) {
3554 pr_err("Non-exit jump doesn't have destination info recorded!!\n");
3558 jmp_dst = meta->jmp_dst;
3560 if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) {
3561 pr_err("Branch landing on removed instruction!!\n");
3565 if (is_mbpf_pseudo_call(meta) &&
3566 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) {
3567 err = nfp_fixup_immed_relo(nfp_prog, meta,
3573 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
3577 for (idx = meta->off; idx <= br_idx; idx++) {
3578 if (!nfp_is_br(nfp_prog->prog[idx]))
3580 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
3587 static void nfp_intro(struct nfp_prog *nfp_prog)
3589 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
3590 emit_alu(nfp_prog, plen_reg(nfp_prog),
3591 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
3595 nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3597 /* Save return address into the stack. */
3598 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog));
3602 nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3604 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth;
3606 nfp_prog->stack_frame_depth = round_up(depth, 4);
3607 nfp_subprog_prologue(nfp_prog, meta);
3610 bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
3612 return meta->flags & FLAG_INSN_IS_SUBPROG_START;
3615 static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
3617 /* TC direct-action mode:
3618 * 0,1 ok NOT SUPPORTED[1]
3619 * 2 drop 0x22 -> drop, count as stat1
3620 * 4,5 nuke 0x02 -> drop
3621 * 7 redir 0x44 -> redir, count as stat2
3622 * * unspec 0x11 -> pass, count as stat0
3624 * [1] We can't support OK and RECLASSIFY because we can't tell TC
3625 * the exact decision made. We are forced to support UNSPEC
3626 * to handle aborts so that's the only one we handle for passing
3627 * packets up the stack.
3629 /* Target for aborts */
3630 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3632 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3634 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3635 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
3637 /* Target for normal exits */
3638 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3640 /* if R0 > 7 jump to abort */
3641 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
3642 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3643 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3645 wrp_immed(nfp_prog, reg_b(2), 0x41221211);
3646 wrp_immed(nfp_prog, reg_b(3), 0x41001211);
3648 emit_shf(nfp_prog, reg_a(1),
3649 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
3651 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3652 emit_shf(nfp_prog, reg_a(2),
3653 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3655 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3656 emit_shf(nfp_prog, reg_b(2),
3657 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
3659 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3661 emit_shf(nfp_prog, reg_b(2),
3662 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
3663 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3666 static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
3668 /* XDP return codes:
3669 * 0 aborted 0x82 -> drop, count as stat3
3670 * 1 drop 0x22 -> drop, count as stat1
3671 * 2 pass 0x11 -> pass, count as stat0
3672 * 3 tx 0x44 -> redir, count as stat2
3673 * * unknown 0x82 -> drop, count as stat3
3675 /* Target for aborts */
3676 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3678 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3680 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3681 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
3683 /* Target for normal exits */
3684 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3686 /* if R0 > 3 jump to abort */
3687 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
3688 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3690 wrp_immed(nfp_prog, reg_b(2), 0x44112282);
3692 emit_shf(nfp_prog, reg_a(1),
3693 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
3695 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3696 emit_shf(nfp_prog, reg_b(2),
3697 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3699 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
3701 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
3702 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3705 static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog)
3709 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++)
3710 if (nfp_prog->subprog[idx].needs_reg_push)
3716 static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
3720 /* Subroutine: Save all callee saved registers (R6 ~ R9).
3721 * imm_b() holds the return address.
3723 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog);
3724 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3725 u8 adj = (reg - BPF_REG_0) * 2;
3726 u8 idx = (reg - BPF_REG_6) * 2;
3728 /* The first slot in the stack frame is used to push the return
3729 * address in bpf_to_bpf_call(), start just after.
3731 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj));
3733 if (reg == BPF_REG_8)
3734 /* Prepare to jump back, last 3 insns use defer slots */
3735 emit_rtn(nfp_prog, imm_b(nfp_prog), 3);
3737 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1));
3741 static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
3745 /* Subroutine: Restore all callee saved registers (R6 ~ R9).
3746 * ret_reg() holds the return address.
3748 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog);
3749 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3750 u8 adj = (reg - BPF_REG_0) * 2;
3751 u8 idx = (reg - BPF_REG_6) * 2;
3753 /* The first slot in the stack frame holds the return address,
3754 * start popping just after that.
3756 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx));
3758 if (reg == BPF_REG_8)
3759 /* Prepare to jump back, last 3 insns use defer slots */
3760 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3);
3762 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1));
3766 static void nfp_outro(struct nfp_prog *nfp_prog)
3768 switch (nfp_prog->type) {
3769 case BPF_PROG_TYPE_SCHED_CLS:
3770 nfp_outro_tc_da(nfp_prog);
3772 case BPF_PROG_TYPE_XDP:
3773 nfp_outro_xdp(nfp_prog);
3779 if (!nfp_prog_needs_callee_reg_save(nfp_prog))
3782 nfp_push_callee_registers(nfp_prog);
3783 nfp_pop_callee_registers(nfp_prog);
3786 static int nfp_translate(struct nfp_prog *nfp_prog)
3788 struct nfp_insn_meta *meta;
3792 depth = nfp_prog->subprog[0].stack_depth;
3793 nfp_prog->stack_frame_depth = round_up(depth, 4);
3795 nfp_intro(nfp_prog);
3796 if (nfp_prog->error)
3797 return nfp_prog->error;
3799 list_for_each_entry(meta, &nfp_prog->insns, l) {
3800 instr_cb_t cb = instr_cb[meta->insn.code];
3802 meta->off = nfp_prog_current_offset(nfp_prog);
3804 if (nfp_is_subprog_start(meta)) {
3805 nfp_start_subprog(nfp_prog, meta);
3806 if (nfp_prog->error)
3807 return nfp_prog->error;
3810 if (meta->flags & FLAG_INSN_SKIP_MASK) {
3811 nfp_prog->n_translated++;
3815 if (nfp_meta_has_prev(nfp_prog, meta) &&
3816 nfp_meta_prev(meta)->double_cb)
3817 cb = nfp_meta_prev(meta)->double_cb;
3820 err = cb(nfp_prog, meta);
3823 if (nfp_prog->error)
3824 return nfp_prog->error;
3826 nfp_prog->n_translated++;
3829 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
3831 nfp_outro(nfp_prog);
3832 if (nfp_prog->error)
3833 return nfp_prog->error;
3835 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
3836 if (nfp_prog->error)
3837 return nfp_prog->error;
3839 return nfp_fixup_branches(nfp_prog);
3842 /* --- Optimizations --- */
3843 static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
3845 struct nfp_insn_meta *meta;
3847 list_for_each_entry(meta, &nfp_prog->insns, l) {
3848 struct bpf_insn insn = meta->insn;
3850 /* Programs converted from cBPF start with register xoring */
3851 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
3852 insn.src_reg == insn.dst_reg)
3855 /* Programs start with R6 = R1 but we ignore the skb pointer */
3856 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
3857 insn.src_reg == 1 && insn.dst_reg == 6)
3858 meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
3860 /* Return as soon as something doesn't match */
3861 if (!(meta->flags & FLAG_INSN_SKIP_MASK))
3866 /* abs(insn.imm) will fit better into unrestricted reg immediate -
3867 * convert add/sub of a negative number into a sub/add of a positive one.
3869 static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
3871 struct nfp_insn_meta *meta;
3873 list_for_each_entry(meta, &nfp_prog->insns, l) {
3874 struct bpf_insn insn = meta->insn;
3876 if (meta->flags & FLAG_INSN_SKIP_MASK)
3879 if (!is_mbpf_alu(meta) && !is_mbpf_jmp(meta))
3881 if (BPF_SRC(insn.code) != BPF_K)
3886 if (is_mbpf_jmp(meta)) {
3887 switch (BPF_OP(insn.code)) {
3892 meta->jump_neg_op = true;
3898 if (BPF_OP(insn.code) == BPF_ADD)
3899 insn.code = BPF_CLASS(insn.code) | BPF_SUB;
3900 else if (BPF_OP(insn.code) == BPF_SUB)
3901 insn.code = BPF_CLASS(insn.code) | BPF_ADD;
3905 meta->insn.code = insn.code | BPF_K;
3908 meta->insn.imm = -insn.imm;
3912 /* Remove masking after load since our load guarantees this is not needed */
3913 static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
3915 struct nfp_insn_meta *meta1, *meta2;
3916 const s32 exp_mask[] = {
3917 [BPF_B] = 0x000000ffU,
3918 [BPF_H] = 0x0000ffffU,
3919 [BPF_W] = 0xffffffffU,
3922 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
3923 struct bpf_insn insn, next;
3928 if (BPF_CLASS(insn.code) != BPF_LD)
3930 if (BPF_MODE(insn.code) != BPF_ABS &&
3931 BPF_MODE(insn.code) != BPF_IND)
3934 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
3937 if (!exp_mask[BPF_SIZE(insn.code)])
3939 if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
3942 if (next.src_reg || next.dst_reg)
3945 if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
3948 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
3952 static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
3954 struct nfp_insn_meta *meta1, *meta2, *meta3;
3956 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
3957 struct bpf_insn insn, next1, next2;
3960 next1 = meta2->insn;
3961 next2 = meta3->insn;
3963 if (BPF_CLASS(insn.code) != BPF_LD)
3965 if (BPF_MODE(insn.code) != BPF_ABS &&
3966 BPF_MODE(insn.code) != BPF_IND)
3968 if (BPF_SIZE(insn.code) != BPF_W)
3971 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
3972 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
3973 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
3974 next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
3977 if (next1.src_reg || next1.dst_reg ||
3978 next2.src_reg || next2.dst_reg)
3981 if (next1.imm != 0x20 || next2.imm != 0x20)
3984 if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
3985 meta3->flags & FLAG_INSN_IS_JUMP_DST)
3988 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
3989 meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
3993 /* load/store pair that forms memory copy sould look like the following:
3995 * ld_width R, [addr_src + offset_src]
3996 * st_width [addr_dest + offset_dest], R
3998 * The destination register of load and source register of store should
3999 * be the same, load and store should also perform at the same width.
4000 * If either of addr_src or addr_dest is stack pointer, we don't do the
4001 * CPP optimization as stack is modelled by registers on NFP.
4004 curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
4005 struct nfp_insn_meta *st_meta)
4007 struct bpf_insn *ld = &ld_meta->insn;
4008 struct bpf_insn *st = &st_meta->insn;
4010 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
4013 if (ld_meta->ptr.type != PTR_TO_PACKET &&
4014 ld_meta->ptr.type != PTR_TO_MAP_VALUE)
4017 if (st_meta->ptr.type != PTR_TO_PACKET)
4020 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
4023 if (ld->dst_reg != st->src_reg)
4026 /* There is jump to the store insn in this pair. */
4027 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
4033 /* Currently, we only support chaining load/store pairs if:
4035 * - Their address base registers are the same.
4036 * - Their address offsets are in the same order.
4037 * - They operate at the same memory width.
4038 * - There is no jump into the middle of them.
4041 curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
4042 struct nfp_insn_meta *st_meta,
4043 struct bpf_insn *prev_ld,
4044 struct bpf_insn *prev_st)
4046 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
4047 struct bpf_insn *ld = &ld_meta->insn;
4048 struct bpf_insn *st = &st_meta->insn;
4049 s16 prev_ld_off, prev_st_off;
4051 /* This pair is the start pair. */
4055 prev_size = BPF_LDST_BYTES(prev_ld);
4056 curr_size = BPF_LDST_BYTES(ld);
4057 prev_ld_base = prev_ld->src_reg;
4058 prev_st_base = prev_st->dst_reg;
4059 prev_ld_dst = prev_ld->dst_reg;
4060 prev_ld_off = prev_ld->off;
4061 prev_st_off = prev_st->off;
4063 if (ld->dst_reg != prev_ld_dst)
4066 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
4069 if (curr_size != prev_size)
4072 /* There is jump to the head of this pair. */
4073 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
4076 /* Both in ascending order. */
4077 if (prev_ld_off + prev_size == ld->off &&
4078 prev_st_off + prev_size == st->off)
4081 /* Both in descending order. */
4082 if (ld->off + curr_size == prev_ld_off &&
4083 st->off + curr_size == prev_st_off)
4089 /* Return TRUE if cross memory access happens. Cross memory access means
4090 * store area is overlapping with load area that a later load might load
4091 * the value from previous store, for this case we can't treat the sequence
4092 * as an memory copy.
4095 cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
4096 struct nfp_insn_meta *head_st_meta)
4098 s16 head_ld_off, head_st_off, ld_off;
4100 /* Different pointer types does not overlap. */
4101 if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
4104 /* load and store are both PTR_TO_PACKET, check ID info. */
4105 if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
4108 /* Canonicalize the offsets. Turn all of them against the original
4111 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
4112 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
4113 ld_off = ld->off + head_ld_meta->ptr.off;
4115 /* Ascending order cross. */
4116 if (ld_off > head_ld_off &&
4117 head_ld_off < head_st_off && ld_off >= head_st_off)
4120 /* Descending order cross. */
4121 if (ld_off < head_ld_off &&
4122 head_ld_off > head_st_off && ld_off <= head_st_off)
4128 /* This pass try to identify the following instructoin sequences.
4130 * load R, [regA + offA]
4131 * store [regB + offB], R
4132 * load R, [regA + offA + const_imm_A]
4133 * store [regB + offB + const_imm_A], R
4134 * load R, [regA + offA + 2 * const_imm_A]
4135 * store [regB + offB + 2 * const_imm_A], R
4138 * Above sequence is typically generated by compiler when lowering
4139 * memcpy. NFP prefer using CPP instructions to accelerate it.
4141 static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
4143 struct nfp_insn_meta *head_ld_meta = NULL;
4144 struct nfp_insn_meta *head_st_meta = NULL;
4145 struct nfp_insn_meta *meta1, *meta2;
4146 struct bpf_insn *prev_ld = NULL;
4147 struct bpf_insn *prev_st = NULL;
4150 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
4151 struct bpf_insn *ld = &meta1->insn;
4152 struct bpf_insn *st = &meta2->insn;
4154 /* Reset record status if any of the following if true:
4155 * - The current insn pair is not load/store.
4156 * - The load/store pair doesn't chain with previous one.
4157 * - The chained load/store pair crossed with previous pair.
4158 * - The chained load/store pair has a total size of memory
4159 * copy beyond 128 bytes which is the maximum length a
4160 * single NFP CPP command can transfer.
4162 if (!curr_pair_is_memcpy(meta1, meta2) ||
4163 !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
4165 (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
4167 head_ld_meta->ldst_gather_len >= 128))) {
4172 s16 prev_ld_off = prev_ld->off;
4173 s16 prev_st_off = prev_st->off;
4174 s16 head_ld_off = head_ld_meta->insn.off;
4176 if (prev_ld_off < head_ld_off) {
4177 head_ld_meta->insn.off = prev_ld_off;
4178 head_st_meta->insn.off = prev_st_off;
4179 head_ld_meta->ldst_gather_len =
4180 -head_ld_meta->ldst_gather_len;
4183 head_ld_meta->paired_st = &head_st_meta->insn;
4184 head_st_meta->flags |=
4185 FLAG_INSN_SKIP_PREC_DEPENDENT;
4187 head_ld_meta->ldst_gather_len = 0;
4190 /* If the chain is ended by an load/store pair then this
4191 * could serve as the new head of the the next chain.
4193 if (curr_pair_is_memcpy(meta1, meta2)) {
4194 head_ld_meta = meta1;
4195 head_st_meta = meta2;
4196 head_ld_meta->ldst_gather_len =
4198 meta1 = nfp_meta_next(meta1);
4199 meta2 = nfp_meta_next(meta2);
4204 head_ld_meta = NULL;
4205 head_st_meta = NULL;
4214 if (!head_ld_meta) {
4215 head_ld_meta = meta1;
4216 head_st_meta = meta2;
4218 meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
4219 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
4222 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
4223 meta1 = nfp_meta_next(meta1);
4224 meta2 = nfp_meta_next(meta2);
4231 static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
4233 struct nfp_insn_meta *meta, *range_node = NULL;
4234 s16 range_start = 0, range_end = 0;
4235 bool cache_avail = false;
4236 struct bpf_insn *insn;
4237 s32 range_ptr_off = 0;
4238 u32 range_ptr_id = 0;
4240 list_for_each_entry(meta, &nfp_prog->insns, l) {
4241 if (meta->flags & FLAG_INSN_IS_JUMP_DST)
4242 cache_avail = false;
4244 if (meta->flags & FLAG_INSN_SKIP_MASK)
4249 if (is_mbpf_store_pkt(meta) ||
4250 insn->code == (BPF_JMP | BPF_CALL) ||
4251 is_mbpf_classic_store_pkt(meta) ||
4252 is_mbpf_classic_load(meta)) {
4253 cache_avail = false;
4257 if (!is_mbpf_load(meta))
4260 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) {
4261 cache_avail = false;
4268 goto end_current_then_start_new;
4272 /* Check ID to make sure two reads share the same
4273 * variable offset against PTR_TO_PACKET, and check OFF
4274 * to make sure they also share the same constant
4277 * OFFs don't really need to be the same, because they
4278 * are the constant offsets against PTR_TO_PACKET, so
4279 * for different OFFs, we could canonicalize them to
4280 * offsets against original packet pointer. We don't
4283 if (meta->ptr.id == range_ptr_id &&
4284 meta->ptr.off == range_ptr_off) {
4285 s16 new_start = range_start;
4286 s16 end, off = insn->off;
4287 s16 new_end = range_end;
4288 bool changed = false;
4290 if (off < range_start) {
4295 end = off + BPF_LDST_BYTES(insn);
4296 if (end > range_end) {
4304 if (new_end - new_start <= 64) {
4305 /* Install new range. */
4306 range_start = new_start;
4307 range_end = new_end;
4312 end_current_then_start_new:
4313 range_node->pkt_cache.range_start = range_start;
4314 range_node->pkt_cache.range_end = range_end;
4317 range_node->pkt_cache.do_init = true;
4318 range_ptr_id = range_node->ptr.id;
4319 range_ptr_off = range_node->ptr.off;
4320 range_start = insn->off;
4321 range_end = insn->off + BPF_LDST_BYTES(insn);
4325 range_node->pkt_cache.range_start = range_start;
4326 range_node->pkt_cache.range_end = range_end;
4329 list_for_each_entry(meta, &nfp_prog->insns, l) {
4330 if (meta->flags & FLAG_INSN_SKIP_MASK)
4333 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
4334 if (meta->pkt_cache.do_init) {
4335 range_start = meta->pkt_cache.range_start;
4336 range_end = meta->pkt_cache.range_end;
4338 meta->pkt_cache.range_start = range_start;
4339 meta->pkt_cache.range_end = range_end;
4345 static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
4347 nfp_bpf_opt_reg_init(nfp_prog);
4349 nfp_bpf_opt_neg_add_sub(nfp_prog);
4350 nfp_bpf_opt_ld_mask(nfp_prog);
4351 nfp_bpf_opt_ld_shift(nfp_prog);
4352 nfp_bpf_opt_ldst_gather(nfp_prog);
4353 nfp_bpf_opt_pkt_cache(nfp_prog);
4358 static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
4360 struct nfp_insn_meta *meta1, *meta2;
4361 struct nfp_bpf_map *nfp_map;
4362 struct bpf_map *map;
4365 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
4366 if (meta1->flags & FLAG_INSN_SKIP_MASK ||
4367 meta2->flags & FLAG_INSN_SKIP_MASK)
4370 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
4371 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD)
4374 map = (void *)(unsigned long)((u32)meta1->insn.imm |
4375 (u64)meta2->insn.imm << 32);
4376 if (bpf_map_offload_neutral(map)) {
4379 nfp_map = map_to_offmap(map)->dev_priv;
4383 meta1->insn.imm = id;
4384 meta2->insn.imm = 0;
4390 static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
4392 __le64 *ustore = (__force __le64 *)prog;
4395 for (i = 0; i < len; i++) {
4398 err = nfp_ustore_check_valid_no_ecc(prog[i]);
4402 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
4408 static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
4412 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
4416 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
4417 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
4418 kvfree(nfp_prog->prog);
4419 nfp_prog->prog = prog;
4422 int nfp_bpf_jit(struct nfp_prog *nfp_prog)
4426 ret = nfp_bpf_replace_map_ptrs(nfp_prog);
4430 ret = nfp_bpf_optimize(nfp_prog);
4434 ret = nfp_translate(nfp_prog);
4436 pr_err("Translation failed with error %d (translated: %u)\n",
4437 ret, nfp_prog->n_translated);
4441 nfp_bpf_prog_trim(nfp_prog);
4446 void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
4448 struct nfp_insn_meta *meta;
4450 /* Another pass to record jump information. */
4451 list_for_each_entry(meta, &nfp_prog->insns, l) {
4452 struct nfp_insn_meta *dst_meta;
4453 u64 code = meta->insn.code;
4454 unsigned int dst_idx;
4457 if (!is_mbpf_jmp(meta))
4459 if (BPF_OP(code) == BPF_EXIT)
4461 if (is_mbpf_helper_call(meta))
4464 /* If opcode is BPF_CALL at this point, this can only be a
4465 * BPF-to-BPF call (a.k.a pseudo call).
4467 pseudo_call = BPF_OP(code) == BPF_CALL;
4470 dst_idx = meta->n + 1 + meta->insn.imm;
4472 dst_idx = meta->n + 1 + meta->insn.off;
4474 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx);
4477 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
4479 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
4480 meta->jmp_dst = dst_meta;
4484 bool nfp_bpf_supported_opcode(u8 code)
4486 return !!instr_cb[code];
4489 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
4495 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
4498 return ERR_PTR(-ENOMEM);
4500 for (i = 0; i < nfp_prog->prog_len; i++) {
4501 enum nfp_relo_type special;
4505 special = FIELD_GET(OP_RELO_TYPE, prog[i]);
4510 br_add_offset(&prog[i], bv->start_off);
4512 case RELO_BR_GO_OUT:
4513 br_set_offset(&prog[i],
4514 nfp_prog->tgt_out + bv->start_off);
4516 case RELO_BR_GO_ABORT:
4517 br_set_offset(&prog[i],
4518 nfp_prog->tgt_abort + bv->start_off);
4520 case RELO_BR_GO_CALL_PUSH_REGS:
4521 if (!nfp_prog->tgt_call_push_regs) {
4522 pr_err("BUG: failed to detect subprogram registers needs\n");
4526 off = nfp_prog->tgt_call_push_regs + bv->start_off;
4527 br_set_offset(&prog[i], off);
4529 case RELO_BR_GO_CALL_POP_REGS:
4530 if (!nfp_prog->tgt_call_pop_regs) {
4531 pr_err("BUG: failed to detect subprogram registers needs\n");
4535 off = nfp_prog->tgt_call_pop_regs + bv->start_off;
4536 br_set_offset(&prog[i], off);
4538 case RELO_BR_NEXT_PKT:
4539 br_set_offset(&prog[i], bv->tgt_done);
4541 case RELO_BR_HELPER:
4542 val = br_get_offset(prog[i]);
4545 case BPF_FUNC_map_lookup_elem:
4546 val = nfp_prog->bpf->helpers.map_lookup;
4548 case BPF_FUNC_map_update_elem:
4549 val = nfp_prog->bpf->helpers.map_update;
4551 case BPF_FUNC_map_delete_elem:
4552 val = nfp_prog->bpf->helpers.map_delete;
4554 case BPF_FUNC_perf_event_output:
4555 val = nfp_prog->bpf->helpers.perf_event_output;
4558 pr_err("relocation of unknown helper %d\n",
4563 br_set_offset(&prog[i], val);
4565 case RELO_IMMED_REL:
4566 immed_add_value(&prog[i], bv->start_off);
4570 prog[i] &= ~OP_RELO_TYPE;
4573 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
4581 return ERR_PTR(err);