nfp: bpf: save original program length
[linux-2.6-block.git] / drivers / net / ethernet / netronome / nfp / bpf / jit.c
CommitLineData
96de2506
JK
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
cd7df56e
JK
3
4#define pr_fmt(fmt) "NFP net bpf: " fmt
5
0d49eaf4 6#include <linux/bug.h>
cd7df56e
JK
7#include <linux/bpf.h>
8#include <linux/filter.h>
2a952b03 9#include <linux/kernel.h>
cd7df56e 10#include <linux/pkt_cls.h>
2a952b03 11#include <linux/reciprocal_div.h>
cd7df56e
JK
12#include <linux/unistd.h>
13
d9ae7f2b
JK
14#include "main.h"
15#include "../nfp_asm.h"
d985888f 16#include "../nfp_net_ctrl.h"
cd7df56e
JK
17
18/* --- NFP prog --- */
19/* Foreach "multiple" entries macros provide pos and next<n> pointers.
20 * It's safe to modify the next pointers (but not pos).
21 */
22#define nfp_for_each_insn_walk2(nfp_prog, pos, next) \
23 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
24 next = list_next_entry(pos, l); \
25 &(nfp_prog)->insns != &pos->l && \
26 &(nfp_prog)->insns != &next->l; \
27 pos = nfp_meta_next(pos), \
28 next = nfp_meta_next(pos))
29
30#define nfp_for_each_insn_walk3(nfp_prog, pos, next, next2) \
31 for (pos = list_first_entry(&(nfp_prog)->insns, typeof(*pos), l), \
32 next = list_next_entry(pos, l), \
33 next2 = list_next_entry(next, l); \
34 &(nfp_prog)->insns != &pos->l && \
35 &(nfp_prog)->insns != &next->l && \
36 &(nfp_prog)->insns != &next2->l; \
37 pos = nfp_meta_next(pos), \
38 next = nfp_meta_next(pos), \
39 next2 = nfp_meta_next(next))
40
cd7df56e
JK
41static bool
42nfp_meta_has_prev(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
43{
44 return meta->l.prev != &nfp_prog->insns;
45}
46
cd7df56e
JK
47static void nfp_prog_push(struct nfp_prog *nfp_prog, u64 insn)
48{
e8a4796e
JK
49 if (nfp_prog->__prog_alloc_len / sizeof(u64) == nfp_prog->prog_len) {
50 pr_warn("instruction limit reached (%u NFP instructions)\n",
51 nfp_prog->prog_len);
cd7df56e
JK
52 nfp_prog->error = -ENOSPC;
53 return;
54 }
55
56 nfp_prog->prog[nfp_prog->prog_len] = insn;
57 nfp_prog->prog_len++;
58}
59
60static unsigned int nfp_prog_current_offset(struct nfp_prog *nfp_prog)
61{
2314fe9e 62 return nfp_prog->prog_len;
cd7df56e
JK
63}
64
0d49eaf4
JK
65static bool
66nfp_prog_confirm_current_offset(struct nfp_prog *nfp_prog, unsigned int off)
67{
68 /* If there is a recorded error we may have dropped instructions;
69 * that doesn't have to be due to translator bug, and the translation
70 * will fail anyway, so just return OK.
71 */
72 if (nfp_prog->error)
73 return true;
74 return !WARN_ON_ONCE(nfp_prog_current_offset(nfp_prog) != off);
75}
76
cd7df56e 77/* --- Emitters --- */
cd7df56e
JK
78static void
79__emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op,
b556ddd9
JK
80 u8 mode, u8 xfer, u8 areg, u8 breg, u8 size, enum cmd_ctx_swap ctx,
81 bool indir)
cd7df56e 82{
cd7df56e
JK
83 u64 insn;
84
cd7df56e
JK
85 insn = FIELD_PREP(OP_CMD_A_SRC, areg) |
86 FIELD_PREP(OP_CMD_CTX, ctx) |
87 FIELD_PREP(OP_CMD_B_SRC, breg) |
88 FIELD_PREP(OP_CMD_TOKEN, cmd_tgt_act[op].token) |
89 FIELD_PREP(OP_CMD_XFER, xfer) |
90 FIELD_PREP(OP_CMD_CNT, size) |
b556ddd9 91 FIELD_PREP(OP_CMD_SIG, ctx != CMD_CTX_NO_SWAP) |
cd7df56e 92 FIELD_PREP(OP_CMD_TGT_CMD, cmd_tgt_act[op].tgt_cmd) |
5468a8b9 93 FIELD_PREP(OP_CMD_INDIR, indir) |
cd7df56e
JK
94 FIELD_PREP(OP_CMD_MODE, mode);
95
96 nfp_prog_push(nfp_prog, insn);
97}
98
99static void
5468a8b9 100emit_cmd_any(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
b556ddd9 101 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx, bool indir)
cd7df56e
JK
102{
103 struct nfp_insn_re_regs reg;
104 int err;
105
106 err = swreg_to_restricted(reg_none(), lreg, rreg, &reg, false);
107 if (err) {
108 nfp_prog->error = err;
109 return;
110 }
111 if (reg.swap) {
112 pr_err("cmd can't swap arguments\n");
113 nfp_prog->error = -EFAULT;
114 return;
115 }
995e101f
JK
116 if (reg.dst_lmextn || reg.src_lmextn) {
117 pr_err("cmd can't use LMextn\n");
118 nfp_prog->error = -EFAULT;
119 return;
120 }
cd7df56e 121
b556ddd9 122 __emit_cmd(nfp_prog, op, mode, xfer, reg.areg, reg.breg, size, ctx,
5468a8b9
JK
123 indir);
124}
125
126static void
127emit_cmd(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
b556ddd9 128 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
5468a8b9 129{
b556ddd9 130 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, false);
cd7df56e
JK
131}
132
9879a381
JW
133static void
134emit_cmd_indir(struct nfp_prog *nfp_prog, enum cmd_tgt_map op, u8 mode, u8 xfer,
b556ddd9 135 swreg lreg, swreg rreg, u8 size, enum cmd_ctx_swap ctx)
9879a381 136{
b556ddd9 137 emit_cmd_any(nfp_prog, op, mode, xfer, lreg, rreg, size, ctx, true);
9879a381
JW
138}
139
cd7df56e
JK
140static void
141__emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, enum br_ev_pip ev_pip,
142 enum br_ctx_signal_state css, u16 addr, u8 defer)
143{
144 u16 addr_lo, addr_hi;
145 u64 insn;
146
147 addr_lo = addr & (OP_BR_ADDR_LO >> __bf_shf(OP_BR_ADDR_LO));
148 addr_hi = addr != addr_lo;
149
150 insn = OP_BR_BASE |
151 FIELD_PREP(OP_BR_MASK, mask) |
152 FIELD_PREP(OP_BR_EV_PIP, ev_pip) |
153 FIELD_PREP(OP_BR_CSS, css) |
154 FIELD_PREP(OP_BR_DEFBR, defer) |
155 FIELD_PREP(OP_BR_ADDR_LO, addr_lo) |
156 FIELD_PREP(OP_BR_ADDR_HI, addr_hi);
157
158 nfp_prog_push(nfp_prog, insn);
159}
160
2314fe9e
JK
161static void
162emit_br_relo(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer,
163 enum nfp_relo_type relo)
e3b8baf0 164{
2314fe9e 165 if (mask == BR_UNC && defer > 2) {
e3b8baf0
JK
166 pr_err("BUG: branch defer out of bounds %d\n", defer);
167 nfp_prog->error = -EFAULT;
168 return;
169 }
2314fe9e
JK
170
171 __emit_br(nfp_prog, mask,
172 mask != BR_UNC ? BR_EV_PIP_COND : BR_EV_PIP_UNCOND,
173 BR_CSS_NONE, addr, defer);
174
175 nfp_prog->prog[nfp_prog->prog_len - 1] |=
176 FIELD_PREP(OP_RELO_TYPE, relo);
e3b8baf0
JK
177}
178
cd7df56e
JK
179static void
180emit_br(struct nfp_prog *nfp_prog, enum br_mask mask, u16 addr, u8 defer)
181{
2314fe9e 182 emit_br_relo(nfp_prog, mask, addr, defer, RELO_BR_REL);
cd7df56e
JK
183}
184
991f5b36
JW
185static void
186__emit_br_bit(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 addr, u8 defer,
187 bool set, bool src_lmextn)
188{
189 u16 addr_lo, addr_hi;
190 u64 insn;
191
192 addr_lo = addr & (OP_BR_BIT_ADDR_LO >> __bf_shf(OP_BR_BIT_ADDR_LO));
193 addr_hi = addr != addr_lo;
194
195 insn = OP_BR_BIT_BASE |
196 FIELD_PREP(OP_BR_BIT_A_SRC, areg) |
197 FIELD_PREP(OP_BR_BIT_B_SRC, breg) |
198 FIELD_PREP(OP_BR_BIT_BV, set) |
199 FIELD_PREP(OP_BR_BIT_DEFBR, defer) |
200 FIELD_PREP(OP_BR_BIT_ADDR_LO, addr_lo) |
201 FIELD_PREP(OP_BR_BIT_ADDR_HI, addr_hi) |
202 FIELD_PREP(OP_BR_BIT_SRC_LMEXTN, src_lmextn);
203
204 nfp_prog_push(nfp_prog, insn);
205}
206
207static void
208emit_br_bit_relo(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr,
209 u8 defer, bool set, enum nfp_relo_type relo)
210{
211 struct nfp_insn_re_regs reg;
212 int err;
213
214 /* NOTE: The bit to test is specified as an rotation amount, such that
215 * the bit to test will be placed on the MSB of the result when
216 * doing a rotate right. For bit X, we need right rotate X + 1.
217 */
218 bit += 1;
219
220 err = swreg_to_restricted(reg_none(), src, reg_imm(bit), &reg, false);
221 if (err) {
222 nfp_prog->error = err;
223 return;
224 }
225
226 __emit_br_bit(nfp_prog, reg.areg, reg.breg, addr, defer, set,
227 reg.src_lmextn);
228
229 nfp_prog->prog[nfp_prog->prog_len - 1] |=
230 FIELD_PREP(OP_RELO_TYPE, relo);
231}
232
233static void
234emit_br_bset(struct nfp_prog *nfp_prog, swreg src, u8 bit, u16 addr, u8 defer)
235{
236 emit_br_bit_relo(nfp_prog, src, bit, addr, defer, true, RELO_BR_REL);
237}
238
389f263b
QM
239static void
240__emit_br_alu(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
241 u8 defer, bool dst_lmextn, bool src_lmextn)
242{
243 u64 insn;
244
245 insn = OP_BR_ALU_BASE |
246 FIELD_PREP(OP_BR_ALU_A_SRC, areg) |
247 FIELD_PREP(OP_BR_ALU_B_SRC, breg) |
248 FIELD_PREP(OP_BR_ALU_DEFBR, defer) |
249 FIELD_PREP(OP_BR_ALU_IMM_HI, imm_hi) |
250 FIELD_PREP(OP_BR_ALU_SRC_LMEXTN, src_lmextn) |
251 FIELD_PREP(OP_BR_ALU_DST_LMEXTN, dst_lmextn);
252
253 nfp_prog_push(nfp_prog, insn);
254}
255
256static void emit_rtn(struct nfp_prog *nfp_prog, swreg base, u8 defer)
257{
258 struct nfp_insn_ur_regs reg;
259 int err;
260
261 err = swreg_to_unrestricted(reg_none(), base, reg_imm(0), &reg);
262 if (err) {
263 nfp_prog->error = err;
264 return;
265 }
266
267 __emit_br_alu(nfp_prog, reg.areg, reg.breg, 0, defer, reg.dst_lmextn,
268 reg.src_lmextn);
269}
270
cd7df56e
JK
271static void
272__emit_immed(struct nfp_prog *nfp_prog, u16 areg, u16 breg, u16 imm_hi,
273 enum immed_width width, bool invert,
995e101f
JK
274 enum immed_shift shift, bool wr_both,
275 bool dst_lmextn, bool src_lmextn)
cd7df56e
JK
276{
277 u64 insn;
278
279 insn = OP_IMMED_BASE |
280 FIELD_PREP(OP_IMMED_A_SRC, areg) |
281 FIELD_PREP(OP_IMMED_B_SRC, breg) |
282 FIELD_PREP(OP_IMMED_IMM, imm_hi) |
283 FIELD_PREP(OP_IMMED_WIDTH, width) |
284 FIELD_PREP(OP_IMMED_INV, invert) |
285 FIELD_PREP(OP_IMMED_SHIFT, shift) |
995e101f
JK
286 FIELD_PREP(OP_IMMED_WR_AB, wr_both) |
287 FIELD_PREP(OP_IMMED_SRC_LMEXTN, src_lmextn) |
288 FIELD_PREP(OP_IMMED_DST_LMEXTN, dst_lmextn);
cd7df56e
JK
289
290 nfp_prog_push(nfp_prog, insn);
291}
292
293static void
b3f868df 294emit_immed(struct nfp_prog *nfp_prog, swreg dst, u16 imm,
cd7df56e
JK
295 enum immed_width width, bool invert, enum immed_shift shift)
296{
297 struct nfp_insn_ur_regs reg;
298 int err;
299
b3f868df 300 if (swreg_type(dst) == NN_REG_IMM) {
cd7df56e
JK
301 nfp_prog->error = -EFAULT;
302 return;
303 }
304
305 err = swreg_to_unrestricted(dst, dst, reg_imm(imm & 0xff), &reg);
306 if (err) {
307 nfp_prog->error = err;
308 return;
309 }
310
3239e7bb
JW
311 /* Use reg.dst when destination is No-Dest. */
312 __emit_immed(nfp_prog,
313 swreg_type(dst) == NN_REG_NONE ? reg.dst : reg.areg,
314 reg.breg, imm >> 8, width, invert, shift,
315 reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
cd7df56e
JK
316}
317
318static void
319__emit_shf(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
320 enum shf_sc sc, u8 shift,
995e101f
JK
321 u16 areg, enum shf_op op, u16 breg, bool i8, bool sw, bool wr_both,
322 bool dst_lmextn, bool src_lmextn)
cd7df56e
JK
323{
324 u64 insn;
325
326 if (!FIELD_FIT(OP_SHF_SHIFT, shift)) {
327 nfp_prog->error = -EFAULT;
328 return;
329 }
330
331 if (sc == SHF_SC_L_SHF)
332 shift = 32 - shift;
333
334 insn = OP_SHF_BASE |
335 FIELD_PREP(OP_SHF_A_SRC, areg) |
336 FIELD_PREP(OP_SHF_SC, sc) |
337 FIELD_PREP(OP_SHF_B_SRC, breg) |
338 FIELD_PREP(OP_SHF_I8, i8) |
339 FIELD_PREP(OP_SHF_SW, sw) |
340 FIELD_PREP(OP_SHF_DST, dst) |
341 FIELD_PREP(OP_SHF_SHIFT, shift) |
342 FIELD_PREP(OP_SHF_OP, op) |
343 FIELD_PREP(OP_SHF_DST_AB, dst_ab) |
995e101f
JK
344 FIELD_PREP(OP_SHF_WR_AB, wr_both) |
345 FIELD_PREP(OP_SHF_SRC_LMEXTN, src_lmextn) |
346 FIELD_PREP(OP_SHF_DST_LMEXTN, dst_lmextn);
cd7df56e
JK
347
348 nfp_prog_push(nfp_prog, insn);
349}
350
351static void
b3f868df
JK
352emit_shf(struct nfp_prog *nfp_prog, swreg dst,
353 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc, u8 shift)
cd7df56e
JK
354{
355 struct nfp_insn_re_regs reg;
356 int err;
357
358 err = swreg_to_restricted(dst, lreg, rreg, &reg, true);
359 if (err) {
360 nfp_prog->error = err;
361 return;
362 }
363
364 __emit_shf(nfp_prog, reg.dst, reg.dst_ab, sc, shift,
995e101f
JK
365 reg.areg, op, reg.breg, reg.i8, reg.swap, reg.wr_both,
366 reg.dst_lmextn, reg.src_lmextn);
cd7df56e
JK
367}
368
991f5b36
JW
369static void
370emit_shf_indir(struct nfp_prog *nfp_prog, swreg dst,
371 swreg lreg, enum shf_op op, swreg rreg, enum shf_sc sc)
372{
373 if (sc == SHF_SC_R_ROT) {
374 pr_err("indirect shift is not allowed on rotation\n");
375 nfp_prog->error = -EFAULT;
376 return;
377 }
378
379 emit_shf(nfp_prog, dst, lreg, op, rreg, sc, 0);
380}
381
cd7df56e
JK
382static void
383__emit_alu(struct nfp_prog *nfp_prog, u16 dst, enum alu_dst_ab dst_ab,
995e101f
JK
384 u16 areg, enum alu_op op, u16 breg, bool swap, bool wr_both,
385 bool dst_lmextn, bool src_lmextn)
cd7df56e
JK
386{
387 u64 insn;
388
389 insn = OP_ALU_BASE |
390 FIELD_PREP(OP_ALU_A_SRC, areg) |
391 FIELD_PREP(OP_ALU_B_SRC, breg) |
392 FIELD_PREP(OP_ALU_DST, dst) |
393 FIELD_PREP(OP_ALU_SW, swap) |
394 FIELD_PREP(OP_ALU_OP, op) |
395 FIELD_PREP(OP_ALU_DST_AB, dst_ab) |
995e101f
JK
396 FIELD_PREP(OP_ALU_WR_AB, wr_both) |
397 FIELD_PREP(OP_ALU_SRC_LMEXTN, src_lmextn) |
398 FIELD_PREP(OP_ALU_DST_LMEXTN, dst_lmextn);
cd7df56e
JK
399
400 nfp_prog_push(nfp_prog, insn);
401}
402
403static void
b3f868df
JK
404emit_alu(struct nfp_prog *nfp_prog, swreg dst,
405 swreg lreg, enum alu_op op, swreg rreg)
cd7df56e
JK
406{
407 struct nfp_insn_ur_regs reg;
408 int err;
409
410 err = swreg_to_unrestricted(dst, lreg, rreg, &reg);
411 if (err) {
412 nfp_prog->error = err;
413 return;
414 }
415
416 __emit_alu(nfp_prog, reg.dst, reg.dst_ab,
995e101f
JK
417 reg.areg, op, reg.breg, reg.swap, reg.wr_both,
418 reg.dst_lmextn, reg.src_lmextn);
cd7df56e
JK
419}
420
d3d23fdb
JW
421static void
422__emit_mul(struct nfp_prog *nfp_prog, enum alu_dst_ab dst_ab, u16 areg,
423 enum mul_type type, enum mul_step step, u16 breg, bool swap,
424 bool wr_both, bool dst_lmextn, bool src_lmextn)
425{
426 u64 insn;
427
428 insn = OP_MUL_BASE |
429 FIELD_PREP(OP_MUL_A_SRC, areg) |
430 FIELD_PREP(OP_MUL_B_SRC, breg) |
431 FIELD_PREP(OP_MUL_STEP, step) |
432 FIELD_PREP(OP_MUL_DST_AB, dst_ab) |
433 FIELD_PREP(OP_MUL_SW, swap) |
434 FIELD_PREP(OP_MUL_TYPE, type) |
435 FIELD_PREP(OP_MUL_WR_AB, wr_both) |
436 FIELD_PREP(OP_MUL_SRC_LMEXTN, src_lmextn) |
437 FIELD_PREP(OP_MUL_DST_LMEXTN, dst_lmextn);
438
439 nfp_prog_push(nfp_prog, insn);
440}
441
442static void
443emit_mul(struct nfp_prog *nfp_prog, swreg lreg, enum mul_type type,
444 enum mul_step step, swreg rreg)
445{
446 struct nfp_insn_ur_regs reg;
447 u16 areg;
448 int err;
449
450 if (type == MUL_TYPE_START && step != MUL_STEP_NONE) {
451 nfp_prog->error = -EINVAL;
452 return;
453 }
454
455 if (step == MUL_LAST || step == MUL_LAST_2) {
456 /* When type is step and step Number is LAST or LAST2, left
457 * source is used as destination.
458 */
459 err = swreg_to_unrestricted(lreg, reg_none(), rreg, &reg);
460 areg = reg.dst;
461 } else {
462 err = swreg_to_unrestricted(reg_none(), lreg, rreg, &reg);
463 areg = reg.areg;
464 }
465
466 if (err) {
467 nfp_prog->error = err;
468 return;
469 }
470
471 __emit_mul(nfp_prog, reg.dst_ab, areg, type, step, reg.breg, reg.swap,
472 reg.wr_both, reg.dst_lmextn, reg.src_lmextn);
473}
474
cd7df56e
JK
475static void
476__emit_ld_field(struct nfp_prog *nfp_prog, enum shf_sc sc,
477 u8 areg, u8 bmask, u8 breg, u8 shift, bool imm8,
995e101f
JK
478 bool zero, bool swap, bool wr_both,
479 bool dst_lmextn, bool src_lmextn)
cd7df56e
JK
480{
481 u64 insn;
482
483 insn = OP_LDF_BASE |
484 FIELD_PREP(OP_LDF_A_SRC, areg) |
485 FIELD_PREP(OP_LDF_SC, sc) |
486 FIELD_PREP(OP_LDF_B_SRC, breg) |
487 FIELD_PREP(OP_LDF_I8, imm8) |
488 FIELD_PREP(OP_LDF_SW, swap) |
489 FIELD_PREP(OP_LDF_ZF, zero) |
490 FIELD_PREP(OP_LDF_BMASK, bmask) |
491 FIELD_PREP(OP_LDF_SHF, shift) |
995e101f
JK
492 FIELD_PREP(OP_LDF_WR_AB, wr_both) |
493 FIELD_PREP(OP_LDF_SRC_LMEXTN, src_lmextn) |
494 FIELD_PREP(OP_LDF_DST_LMEXTN, dst_lmextn);
cd7df56e
JK
495
496 nfp_prog_push(nfp_prog, insn);
497}
498
499static void
bc8c80a8
JK
500emit_ld_field_any(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
501 enum shf_sc sc, u8 shift, bool zero)
cd7df56e
JK
502{
503 struct nfp_insn_re_regs reg;
504 int err;
505
2de1be1d
JK
506 /* Note: ld_field is special as it uses one of the src regs as dst */
507 err = swreg_to_restricted(dst, dst, src, &reg, true);
cd7df56e
JK
508 if (err) {
509 nfp_prog->error = err;
510 return;
511 }
512
513 __emit_ld_field(nfp_prog, sc, reg.areg, bmask, reg.breg, shift,
995e101f
JK
514 reg.i8, zero, reg.swap, reg.wr_both,
515 reg.dst_lmextn, reg.src_lmextn);
cd7df56e
JK
516}
517
518static void
b3f868df 519emit_ld_field(struct nfp_prog *nfp_prog, swreg dst, u8 bmask, swreg src,
cd7df56e
JK
520 enum shf_sc sc, u8 shift)
521{
bc8c80a8 522 emit_ld_field_any(nfp_prog, dst, bmask, src, sc, shift, false);
cd7df56e
JK
523}
524
2df03a50
JK
525static void
526__emit_lcsr(struct nfp_prog *nfp_prog, u16 areg, u16 breg, bool wr, u16 addr,
527 bool dst_lmextn, bool src_lmextn)
528{
529 u64 insn;
530
531 insn = OP_LCSR_BASE |
532 FIELD_PREP(OP_LCSR_A_SRC, areg) |
533 FIELD_PREP(OP_LCSR_B_SRC, breg) |
534 FIELD_PREP(OP_LCSR_WRITE, wr) |
df4a37d8 535 FIELD_PREP(OP_LCSR_ADDR, addr / 4) |
2df03a50
JK
536 FIELD_PREP(OP_LCSR_SRC_LMEXTN, src_lmextn) |
537 FIELD_PREP(OP_LCSR_DST_LMEXTN, dst_lmextn);
538
539 nfp_prog_push(nfp_prog, insn);
540}
541
542static void emit_csr_wr(struct nfp_prog *nfp_prog, swreg src, u16 addr)
543{
544 struct nfp_insn_ur_regs reg;
545 int err;
546
547 /* This instruction takes immeds instead of reg_none() for the ignored
548 * operand, but we can't encode 2 immeds in one instr with our normal
549 * swreg infra so if param is an immed, we encode as reg_none() and
550 * copy the immed to both operands.
551 */
552 if (swreg_type(src) == NN_REG_IMM) {
553 err = swreg_to_unrestricted(reg_none(), src, reg_none(), &reg);
554 reg.breg = reg.areg;
555 } else {
556 err = swreg_to_unrestricted(reg_none(), src, reg_imm(0), &reg);
557 }
558 if (err) {
559 nfp_prog->error = err;
560 return;
561 }
562
df4a37d8 563 __emit_lcsr(nfp_prog, reg.areg, reg.breg, true, addr,
2df03a50
JK
564 false, reg.src_lmextn);
565}
566
df4a37d8
JK
567/* CSR value is read in following immed[gpr, 0] */
568static void __emit_csr_rd(struct nfp_prog *nfp_prog, u16 addr)
569{
570 __emit_lcsr(nfp_prog, 0, 0, false, addr, false, false);
571}
572
1c03e03f
JK
573static void emit_nop(struct nfp_prog *nfp_prog)
574{
575 __emit_immed(nfp_prog, UR_REG_IMM, UR_REG_IMM, 0, 0, 0, 0, 0, 0, 0);
576}
577
cd7df56e
JK
578/* --- Wrappers --- */
579static bool pack_immed(u32 imm, u16 *val, enum immed_shift *shift)
580{
581 if (!(imm & 0xffff0000)) {
582 *val = imm;
583 *shift = IMMED_SHIFT_0B;
584 } else if (!(imm & 0xff0000ff)) {
585 *val = imm >> 8;
586 *shift = IMMED_SHIFT_1B;
587 } else if (!(imm & 0x0000ffff)) {
588 *val = imm >> 16;
589 *shift = IMMED_SHIFT_2B;
590 } else {
591 return false;
592 }
593
594 return true;
595}
596
b3f868df 597static void wrp_immed(struct nfp_prog *nfp_prog, swreg dst, u32 imm)
cd7df56e
JK
598{
599 enum immed_shift shift;
600 u16 val;
601
602 if (pack_immed(imm, &val, &shift)) {
603 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, false, shift);
604 } else if (pack_immed(~imm, &val, &shift)) {
605 emit_immed(nfp_prog, dst, val, IMMED_WIDTH_ALL, true, shift);
606 } else {
607 emit_immed(nfp_prog, dst, imm & 0xffff, IMMED_WIDTH_ALL,
608 false, IMMED_SHIFT_0B);
609 emit_immed(nfp_prog, dst, imm >> 16, IMMED_WIDTH_WORD,
610 false, IMMED_SHIFT_2B);
611 }
612}
613
77a3d311
JK
614static void
615wrp_immed_relo(struct nfp_prog *nfp_prog, swreg dst, u32 imm,
616 enum nfp_relo_type relo)
617{
618 if (imm > 0xffff) {
619 pr_err("relocation of a large immediate!\n");
620 nfp_prog->error = -EFAULT;
621 return;
622 }
623 emit_immed(nfp_prog, dst, imm, IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
624
625 nfp_prog->prog[nfp_prog->prog_len - 1] |=
626 FIELD_PREP(OP_RELO_TYPE, relo);
627}
628
cd7df56e
JK
629/* ur_load_imm_any() - encode immediate or use tmp register (unrestricted)
630 * If the @imm is small enough encode it directly in operand and return
631 * otherwise load @imm to a spare register and return its encoding.
632 */
b3f868df 633static swreg ur_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
cd7df56e
JK
634{
635 if (FIELD_FIT(UR_REG_IMM_MAX, imm))
636 return reg_imm(imm);
637
638 wrp_immed(nfp_prog, tmp_reg, imm);
639 return tmp_reg;
640}
641
642/* re_load_imm_any() - encode immediate or use tmp register (restricted)
643 * If the @imm is small enough encode it directly in operand and return
644 * otherwise load @imm to a spare register and return its encoding.
645 */
b3f868df 646static swreg re_load_imm_any(struct nfp_prog *nfp_prog, u32 imm, swreg tmp_reg)
cd7df56e
JK
647{
648 if (FIELD_FIT(RE_REG_IMM_MAX, imm))
649 return reg_imm(imm);
650
651 wrp_immed(nfp_prog, tmp_reg, imm);
652 return tmp_reg;
653}
654
ff42bb9f
JK
655static void wrp_nops(struct nfp_prog *nfp_prog, unsigned int count)
656{
657 while (count--)
658 emit_nop(nfp_prog);
659}
660
c000dfb5
JK
661static void wrp_mov(struct nfp_prog *nfp_prog, swreg dst, swreg src)
662{
663 emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, src);
664}
665
cd7df56e
JK
666static void wrp_reg_mov(struct nfp_prog *nfp_prog, u16 dst, u16 src)
667{
c000dfb5 668 wrp_mov(nfp_prog, reg_both(dst), reg_b(src));
cd7df56e
JK
669}
670
9879a381
JW
671/* wrp_reg_subpart() - load @field_len bytes from @offset of @src, write the
672 * result to @dst from low end.
673 */
674static void
675wrp_reg_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src, u8 field_len,
676 u8 offset)
677{
678 enum shf_sc sc = offset ? SHF_SC_R_SHF : SHF_SC_NONE;
679 u8 mask = (1 << field_len) - 1;
680
681 emit_ld_field_any(nfp_prog, dst, mask, src, sc, offset * 8, true);
682}
683
91ff69e8
JW
684/* wrp_reg_or_subpart() - load @field_len bytes from low end of @src, or the
685 * result to @dst from offset, there is no change on the other bits of @dst.
686 */
687static void
688wrp_reg_or_subpart(struct nfp_prog *nfp_prog, swreg dst, swreg src,
689 u8 field_len, u8 offset)
690{
691 enum shf_sc sc = offset ? SHF_SC_L_SHF : SHF_SC_NONE;
692 u8 mask = ((1 << field_len) - 1) << offset;
693
694 emit_ld_field(nfp_prog, dst, mask, src, sc, 32 - offset * 8);
695}
696
3dd43c33
JK
697static void
698addr40_offset(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
699 swreg *rega, swreg *regb)
700{
701 if (offset == reg_imm(0)) {
702 *rega = reg_a(src_gpr);
703 *regb = reg_b(src_gpr + 1);
704 return;
705 }
706
707 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(src_gpr), ALU_OP_ADD, offset);
708 emit_alu(nfp_prog, imm_b(nfp_prog), reg_b(src_gpr + 1), ALU_OP_ADD_C,
709 reg_imm(0));
710 *rega = imm_a(nfp_prog);
711 *regb = imm_b(nfp_prog);
712}
713
9879a381
JW
714/* NFP has Command Push Pull bus which supports bluk memory operations. */
715static int nfp_cpp_memcpy(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
716{
717 bool descending_seq = meta->ldst_gather_len < 0;
718 s16 len = abs(meta->ldst_gather_len);
719 swreg src_base, off;
3dd43c33 720 bool src_40bit_addr;
9879a381
JW
721 unsigned int i;
722 u8 xfer_num;
723
9879a381 724 off = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
3dd43c33 725 src_40bit_addr = meta->ptr.type == PTR_TO_MAP_VALUE;
9879a381
JW
726 src_base = reg_a(meta->insn.src_reg * 2);
727 xfer_num = round_up(len, 4) / 4;
728
3dd43c33 729 if (src_40bit_addr)
cc0dff6d 730 addr40_offset(nfp_prog, meta->insn.src_reg * 2, off, &src_base,
3dd43c33
JK
731 &off);
732
8c900538
JW
733 /* Setup PREV_ALU fields to override memory read length. */
734 if (len > 32)
735 wrp_immed(nfp_prog, reg_none(),
736 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
737
9879a381 738 /* Memory read from source addr into transfer-in registers. */
3dd43c33
JK
739 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP,
740 src_40bit_addr ? CMD_MODE_40b_BA : CMD_MODE_32b, 0,
b556ddd9 741 src_base, off, xfer_num - 1, CMD_CTX_SWAP, len > 32);
9879a381
JW
742
743 /* Move from transfer-in to transfer-out. */
744 for (i = 0; i < xfer_num; i++)
745 wrp_mov(nfp_prog, reg_xfer(i), reg_xfer(i));
746
747 off = re_load_imm_any(nfp_prog, meta->paired_st->off, imm_b(nfp_prog));
748
749 if (len <= 8) {
750 /* Use single direct_ref write8. */
751 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
752 reg_a(meta->paired_st->dst_reg * 2), off, len - 1,
b556ddd9 753 CMD_CTX_SWAP);
8c900538 754 } else if (len <= 32 && IS_ALIGNED(len, 4)) {
9879a381
JW
755 /* Use single direct_ref write32. */
756 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
757 reg_a(meta->paired_st->dst_reg * 2), off, xfer_num - 1,
b556ddd9 758 CMD_CTX_SWAP);
8c900538 759 } else if (len <= 32) {
9879a381
JW
760 /* Use single indirect_ref write8. */
761 wrp_immed(nfp_prog, reg_none(),
762 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, len - 1));
763 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
764 reg_a(meta->paired_st->dst_reg * 2), off,
b556ddd9 765 len - 1, CMD_CTX_SWAP);
8c900538
JW
766 } else if (IS_ALIGNED(len, 4)) {
767 /* Use single indirect_ref write32. */
768 wrp_immed(nfp_prog, reg_none(),
769 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
770 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
771 reg_a(meta->paired_st->dst_reg * 2), off,
b556ddd9 772 xfer_num - 1, CMD_CTX_SWAP);
8c900538
JW
773 } else if (len <= 40) {
774 /* Use one direct_ref write32 to write the first 32-bytes, then
775 * another direct_ref write8 to write the remaining bytes.
776 */
777 emit_cmd(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
778 reg_a(meta->paired_st->dst_reg * 2), off, 7,
b556ddd9 779 CMD_CTX_SWAP);
8c900538
JW
780
781 off = re_load_imm_any(nfp_prog, meta->paired_st->off + 32,
782 imm_b(nfp_prog));
783 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 8,
784 reg_a(meta->paired_st->dst_reg * 2), off, len - 33,
b556ddd9 785 CMD_CTX_SWAP);
8c900538
JW
786 } else {
787 /* Use one indirect_ref write32 to write 4-bytes aligned length,
788 * then another direct_ref write8 to write the remaining bytes.
789 */
790 u8 new_off;
791
792 wrp_immed(nfp_prog, reg_none(),
793 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 2));
794 emit_cmd_indir(nfp_prog, CMD_TGT_WRITE32_SWAP, CMD_MODE_32b, 0,
795 reg_a(meta->paired_st->dst_reg * 2), off,
b556ddd9 796 xfer_num - 2, CMD_CTX_SWAP);
8c900538
JW
797 new_off = meta->paired_st->off + (xfer_num - 1) * 4;
798 off = re_load_imm_any(nfp_prog, new_off, imm_b(nfp_prog));
799 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b,
800 xfer_num - 1, reg_a(meta->paired_st->dst_reg * 2), off,
b556ddd9 801 (len & 0x3) - 1, CMD_CTX_SWAP);
9879a381
JW
802 }
803
804 /* TODO: The following extra load is to make sure data flow be identical
805 * before and after we do memory copy optimization.
806 *
807 * The load destination register is not guaranteed to be dead, so we
808 * need to make sure it is loaded with the value the same as before
809 * this transformation.
810 *
811 * These extra loads could be removed once we have accurate register
812 * usage information.
813 */
814 if (descending_seq)
815 xfer_num = 0;
816 else if (BPF_SIZE(meta->insn.code) != BPF_DW)
817 xfer_num = xfer_num - 1;
818 else
819 xfer_num = xfer_num - 2;
820
821 switch (BPF_SIZE(meta->insn.code)) {
822 case BPF_B:
823 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
824 reg_xfer(xfer_num), 1,
825 IS_ALIGNED(len, 4) ? 3 : (len & 3) - 1);
826 break;
827 case BPF_H:
828 wrp_reg_subpart(nfp_prog, reg_both(meta->insn.dst_reg * 2),
829 reg_xfer(xfer_num), 2, (len & 3) ^ 2);
830 break;
831 case BPF_W:
832 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
833 reg_xfer(0));
834 break;
835 case BPF_DW:
836 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2),
837 reg_xfer(xfer_num));
838 wrp_mov(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1),
839 reg_xfer(xfer_num + 1));
840 break;
841 }
842
843 if (BPF_SIZE(meta->insn.code) != BPF_DW)
844 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
845
846 return 0;
847}
848
cd7df56e 849static int
0a793977 850data_ld(struct nfp_prog *nfp_prog, swreg offset, u8 dst_gpr, int size)
cd7df56e
JK
851{
852 unsigned int i;
853 u16 shift, sz;
cd7df56e
JK
854
855 /* We load the value from the address indicated in @offset and then
856 * shift out the data we don't need. Note: this is big endian!
857 */
0a793977 858 sz = max(size, 4);
cd7df56e
JK
859 shift = size < 4 ? 4 - size : 0;
860
0a793977 861 emit_cmd(nfp_prog, CMD_TGT_READ8, CMD_MODE_32b, 0,
b556ddd9 862 pptr_reg(nfp_prog), offset, sz - 1, CMD_CTX_SWAP);
cd7df56e
JK
863
864 i = 0;
865 if (shift)
0a793977 866 emit_shf(nfp_prog, reg_both(dst_gpr), reg_none(), SHF_OP_NONE,
cd7df56e
JK
867 reg_xfer(0), SHF_SC_R_SHF, shift * 8);
868 else
869 for (; i * 4 < size; i++)
0a793977 870 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
cd7df56e
JK
871
872 if (i < 2)
0a793977 873 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
cd7df56e
JK
874
875 return 0;
876}
877
2ca71441 878static int
3dd43c33
JK
879data_ld_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr,
880 swreg lreg, swreg rreg, int size, enum cmd_mode mode)
2ca71441
JK
881{
882 unsigned int i;
883 u8 mask, sz;
884
3dd43c33 885 /* We load the value from the address indicated in rreg + lreg and then
2ca71441
JK
886 * mask out the data we don't need. Note: this is little endian!
887 */
888 sz = max(size, 4);
889 mask = size < 4 ? GENMASK(size - 1, 0) : 0;
890
3dd43c33 891 emit_cmd(nfp_prog, CMD_TGT_READ32_SWAP, mode, 0,
b556ddd9 892 lreg, rreg, sz / 4 - 1, CMD_CTX_SWAP);
2ca71441
JK
893
894 i = 0;
895 if (mask)
896 emit_ld_field_any(nfp_prog, reg_both(dst_gpr), mask,
897 reg_xfer(0), SHF_SC_NONE, 0, true);
898 else
899 for (; i * 4 < size; i++)
900 wrp_mov(nfp_prog, reg_both(dst_gpr + i), reg_xfer(i));
901
902 if (i < 2)
903 wrp_immed(nfp_prog, reg_both(dst_gpr + 1), 0);
904
905 return 0;
906}
907
3dd43c33
JK
908static int
909data_ld_host_order_addr32(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
910 u8 dst_gpr, u8 size)
911{
912 return data_ld_host_order(nfp_prog, dst_gpr, reg_a(src_gpr), offset,
913 size, CMD_MODE_32b);
914}
915
916static int
917data_ld_host_order_addr40(struct nfp_prog *nfp_prog, u8 src_gpr, swreg offset,
918 u8 dst_gpr, u8 size)
919{
920 swreg rega, regb;
921
922 addr40_offset(nfp_prog, src_gpr, offset, &rega, &regb);
923
924 return data_ld_host_order(nfp_prog, dst_gpr, rega, regb,
925 size, CMD_MODE_40b_BA);
926}
927
0a793977
JK
928static int
929construct_data_ind_ld(struct nfp_prog *nfp_prog, u16 offset, u16 src, u8 size)
930{
931 swreg tmp_reg;
932
933 /* Calculate the true offset (src_reg + imm) */
934 tmp_reg = ur_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
935 emit_alu(nfp_prog, imm_both(nfp_prog), reg_a(src), ALU_OP_ADD, tmp_reg);
936
937 /* Check packet length (size guaranteed to fit b/c it's u8) */
938 emit_alu(nfp_prog, imm_a(nfp_prog),
939 imm_a(nfp_prog), ALU_OP_ADD, reg_imm(size));
940 emit_alu(nfp_prog, reg_none(),
941 plen_reg(nfp_prog), ALU_OP_SUB, imm_a(nfp_prog));
e84797fe 942 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
0a793977
JK
943
944 /* Load data */
945 return data_ld(nfp_prog, imm_b(nfp_prog), 0, size);
946}
947
cd7df56e
JK
948static int construct_data_ld(struct nfp_prog *nfp_prog, u16 offset, u8 size)
949{
0a793977
JK
950 swreg tmp_reg;
951
952 /* Check packet length */
953 tmp_reg = ur_load_imm_any(nfp_prog, offset + size, imm_a(nfp_prog));
954 emit_alu(nfp_prog, reg_none(), plen_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
e84797fe 955 emit_br_relo(nfp_prog, BR_BLO, BR_OFF_RELO, 0, RELO_BR_GO_ABORT);
0a793977
JK
956
957 /* Load data */
958 tmp_reg = re_load_imm_any(nfp_prog, offset, imm_b(nfp_prog));
959 return data_ld(nfp_prog, tmp_reg, 0, size);
cd7df56e
JK
960}
961
e663fe38
JK
962static int
963data_stx_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
964 u8 src_gpr, u8 size)
965{
966 unsigned int i;
967
968 for (i = 0; i * 4 < size; i++)
969 wrp_mov(nfp_prog, reg_xfer(i), reg_a(src_gpr + i));
970
971 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
b556ddd9 972 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
e663fe38
JK
973
974 return 0;
975}
976
977static int
978data_st_host_order(struct nfp_prog *nfp_prog, u8 dst_gpr, swreg offset,
979 u64 imm, u8 size)
980{
981 wrp_immed(nfp_prog, reg_xfer(0), imm);
982 if (size == 8)
983 wrp_immed(nfp_prog, reg_xfer(1), imm >> 32);
984
985 emit_cmd(nfp_prog, CMD_TGT_WRITE8_SWAP, CMD_MODE_32b, 0,
b556ddd9 986 reg_a(dst_gpr), offset, size - 1, CMD_CTX_SWAP);
e663fe38
JK
987
988 return 0;
989}
990
ee9133a8
JK
991typedef int
992(*lmem_step)(struct nfp_prog *nfp_prog, u8 gpr, u8 gpr_byte, s32 off,
2df03a50
JK
993 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
994 bool needs_inc);
a82b23fb
JK
995
996static int
997wrp_lmem_load(struct nfp_prog *nfp_prog, u8 dst, u8 dst_byte, s32 off,
2df03a50
JK
998 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
999 bool needs_inc)
a82b23fb 1000{
2df03a50 1001 bool should_inc = needs_inc && new_gpr && !last;
a82b23fb
JK
1002 u32 idx, src_byte;
1003 enum shf_sc sc;
1004 swreg reg;
1005 int shf;
1006 u8 mask;
1007
1008 if (WARN_ON_ONCE(dst_byte + size > 4 || off % 4 + size > 4))
1009 return -EOPNOTSUPP;
1010
1011 idx = off / 4;
1012
1013 /* Move the entire word */
1014 if (size == 4) {
2df03a50
JK
1015 wrp_mov(nfp_prog, reg_both(dst),
1016 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx));
a82b23fb
JK
1017 return 0;
1018 }
1019
2df03a50
JK
1020 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1021 return -EOPNOTSUPP;
1022
a82b23fb
JK
1023 src_byte = off % 4;
1024
1025 mask = (1 << size) - 1;
1026 mask <<= dst_byte;
1027
1028 if (WARN_ON_ONCE(mask > 0xf))
1029 return -EOPNOTSUPP;
1030
1031 shf = abs(src_byte - dst_byte) * 8;
1032 if (src_byte == dst_byte) {
1033 sc = SHF_SC_NONE;
1034 } else if (src_byte < dst_byte) {
1035 shf = 32 - shf;
1036 sc = SHF_SC_L_SHF;
1037 } else {
1038 sc = SHF_SC_R_SHF;
1039 }
1040
1041 /* ld_field can address fewer indexes, if offset too large do RMW.
1042 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1043 */
1044 if (idx <= RE_REG_LM_IDX_MAX) {
2df03a50 1045 reg = reg_lm(lm3 ? 3 : 0, idx);
a82b23fb
JK
1046 } else {
1047 reg = imm_a(nfp_prog);
9a90c83c
JK
1048 /* If it's not the first part of the load and we start a new GPR
1049 * that means we are loading a second part of the LMEM word into
1050 * a new GPR. IOW we've already looked that LMEM word and
1051 * therefore it has been loaded into imm_a().
1052 */
1053 if (first || !new_gpr)
1054 wrp_mov(nfp_prog, reg, reg_lm(0, idx));
a82b23fb
JK
1055 }
1056
1057 emit_ld_field_any(nfp_prog, reg_both(dst), mask, reg, sc, shf, new_gpr);
1058
2df03a50
JK
1059 if (should_inc)
1060 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
1061
a82b23fb
JK
1062 return 0;
1063}
ee9133a8
JK
1064
1065static int
1066wrp_lmem_store(struct nfp_prog *nfp_prog, u8 src, u8 src_byte, s32 off,
2df03a50
JK
1067 unsigned int size, bool first, bool new_gpr, bool last, bool lm3,
1068 bool needs_inc)
ee9133a8 1069{
2df03a50 1070 bool should_inc = needs_inc && new_gpr && !last;
ee9133a8
JK
1071 u32 idx, dst_byte;
1072 enum shf_sc sc;
1073 swreg reg;
1074 int shf;
1075 u8 mask;
1076
1077 if (WARN_ON_ONCE(src_byte + size > 4 || off % 4 + size > 4))
1078 return -EOPNOTSUPP;
1079
1080 idx = off / 4;
1081
1082 /* Move the entire word */
1083 if (size == 4) {
2df03a50
JK
1084 wrp_mov(nfp_prog,
1085 should_inc ? reg_lm_inc(3) : reg_lm(lm3 ? 3 : 0, idx),
1086 reg_b(src));
ee9133a8
JK
1087 return 0;
1088 }
1089
2df03a50
JK
1090 if (WARN_ON_ONCE(lm3 && idx > RE_REG_LM_IDX_MAX))
1091 return -EOPNOTSUPP;
1092
ee9133a8
JK
1093 dst_byte = off % 4;
1094
1095 mask = (1 << size) - 1;
1096 mask <<= dst_byte;
1097
1098 if (WARN_ON_ONCE(mask > 0xf))
1099 return -EOPNOTSUPP;
1100
1101 shf = abs(src_byte - dst_byte) * 8;
1102 if (src_byte == dst_byte) {
1103 sc = SHF_SC_NONE;
1104 } else if (src_byte < dst_byte) {
1105 shf = 32 - shf;
1106 sc = SHF_SC_L_SHF;
1107 } else {
1108 sc = SHF_SC_R_SHF;
1109 }
1110
1111 /* ld_field can address fewer indexes, if offset too large do RMW.
1112 * Because we RMV twice we waste 2 cycles on unaligned 8 byte writes.
1113 */
1114 if (idx <= RE_REG_LM_IDX_MAX) {
2df03a50 1115 reg = reg_lm(lm3 ? 3 : 0, idx);
ee9133a8
JK
1116 } else {
1117 reg = imm_a(nfp_prog);
9a90c83c
JK
1118 /* Only first and last LMEM locations are going to need RMW,
1119 * the middle location will be overwritten fully.
1120 */
1121 if (first || last)
1122 wrp_mov(nfp_prog, reg, reg_lm(0, idx));
ee9133a8
JK
1123 }
1124
1125 emit_ld_field(nfp_prog, reg, mask, reg_b(src), sc, shf);
1126
9a90c83c
JK
1127 if (new_gpr || last) {
1128 if (idx > RE_REG_LM_IDX_MAX)
1129 wrp_mov(nfp_prog, reg_lm(0, idx), reg);
2df03a50
JK
1130 if (should_inc)
1131 wrp_mov(nfp_prog, reg_none(), reg_lm_inc(3));
9a90c83c 1132 }
ee9133a8
JK
1133
1134 return 0;
1135}
1136
1137static int
1138mem_op_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
b14157ee
JK
1139 unsigned int size, unsigned int ptr_off, u8 gpr, u8 ptr_gpr,
1140 bool clr_gpr, lmem_step step)
ee9133a8 1141{
1a7e62e6 1142 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off;
9a90c83c 1143 bool first = true, last;
2df03a50
JK
1144 bool needs_inc = false;
1145 swreg stack_off_reg;
a82b23fb 1146 u8 prev_gpr = 255;
ee9133a8 1147 u32 gpr_byte = 0;
2df03a50 1148 bool lm3 = true;
ee9133a8
JK
1149 int ret;
1150
7ff0ccde
QM
1151 if (meta->ptr_not_const ||
1152 meta->flags & FLAG_INSN_PTR_CALLER_STACK_FRAME) {
b14157ee
JK
1153 /* Use of the last encountered ptr_off is OK, they all have
1154 * the same alignment. Depend on low bits of value being
1155 * discarded when written to LMaddr register.
1156 */
1157 stack_off_reg = ur_load_imm_any(nfp_prog, meta->insn.off,
1158 stack_imm(nfp_prog));
1159
1160 emit_alu(nfp_prog, imm_b(nfp_prog),
1161 reg_a(ptr_gpr), ALU_OP_ADD, stack_off_reg);
1162
1163 needs_inc = true;
1164 } else if (off + size <= 64) {
2df03a50
JK
1165 /* We can reach bottom 64B with LMaddr0 */
1166 lm3 = false;
1167 } else if (round_down(off, 32) == round_down(off + size - 1, 32)) {
1168 /* We have to set up a new pointer. If we know the offset
1169 * and the entire access falls into a single 32 byte aligned
1170 * window we won't have to increment the LM pointer.
1171 * The 32 byte alignment is imporant because offset is ORed in
1172 * not added when doing *l$indexN[off].
1173 */
1174 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 32),
1175 stack_imm(nfp_prog));
1176 emit_alu(nfp_prog, imm_b(nfp_prog),
1177 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1178
1179 off %= 32;
1180 } else {
1181 stack_off_reg = ur_load_imm_any(nfp_prog, round_down(off, 4),
1182 stack_imm(nfp_prog));
1183
1184 emit_alu(nfp_prog, imm_b(nfp_prog),
1185 stack_reg(nfp_prog), ALU_OP_ADD, stack_off_reg);
1186
1187 needs_inc = true;
1188 }
1189 if (lm3) {
1190 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3);
1191 /* For size < 4 one slot will be filled by zeroing of upper. */
1192 wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3);
1193 }
1194
a82b23fb
JK
1195 if (clr_gpr && size < 8)
1196 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
1197
ee9133a8
JK
1198 while (size) {
1199 u32 slice_end;
1200 u8 slice_size;
1201
1202 slice_size = min(size, 4 - gpr_byte);
1203 slice_end = min(off + slice_size, round_up(off + 1, 4));
1204 slice_size = slice_end - off;
1205
9a90c83c
JK
1206 last = slice_size == size;
1207
2df03a50
JK
1208 if (needs_inc)
1209 off %= 4;
1210
a82b23fb 1211 ret = step(nfp_prog, gpr, gpr_byte, off, slice_size,
2df03a50 1212 first, gpr != prev_gpr, last, lm3, needs_inc);
ee9133a8
JK
1213 if (ret)
1214 return ret;
1215
a82b23fb 1216 prev_gpr = gpr;
9a90c83c
JK
1217 first = false;
1218
ee9133a8
JK
1219 gpr_byte += slice_size;
1220 if (gpr_byte >= 4) {
1221 gpr_byte -= 4;
1222 gpr++;
1223 }
1224
1225 size -= slice_size;
1226 off += slice_size;
1227 }
1228
1229 return 0;
1230}
1231
cd7df56e
JK
1232static void
1233wrp_alu_imm(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u32 imm)
1234{
b3f868df 1235 swreg tmp_reg;
cd7df56e
JK
1236
1237 if (alu_op == ALU_OP_AND) {
1238 if (!imm)
1239 wrp_immed(nfp_prog, reg_both(dst), 0);
1240 if (!imm || !~imm)
1241 return;
1242 }
1243 if (alu_op == ALU_OP_OR) {
1244 if (!~imm)
1245 wrp_immed(nfp_prog, reg_both(dst), ~0U);
1246 if (!imm || !~imm)
1247 return;
1248 }
1249 if (alu_op == ALU_OP_XOR) {
1250 if (!~imm)
1251 emit_alu(nfp_prog, reg_both(dst), reg_none(),
5d42ced1 1252 ALU_OP_NOT, reg_b(dst));
cd7df56e
JK
1253 if (!imm || !~imm)
1254 return;
1255 }
1256
1257 tmp_reg = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1258 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, tmp_reg);
1259}
1260
1261static int
1262wrp_alu64_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1263 enum alu_op alu_op, bool skip)
1264{
1265 const struct bpf_insn *insn = &meta->insn;
1266 u64 imm = insn->imm; /* sign extend */
1267
1268 if (skip) {
91a87a58 1269 meta->flags |= FLAG_INSN_SKIP_NOOP;
cd7df56e
JK
1270 return 0;
1271 }
1272
1273 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, imm & ~0U);
1274 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, alu_op, imm >> 32);
1275
1276 return 0;
1277}
1278
1279static int
1280wrp_alu64_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1281 enum alu_op alu_op)
1282{
1283 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1284
1285 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1286 emit_alu(nfp_prog, reg_both(dst + 1),
1287 reg_a(dst + 1), alu_op, reg_b(src + 1));
1288
1289 return 0;
1290}
1291
1292static int
1293wrp_alu32_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1294 enum alu_op alu_op, bool skip)
1295{
1296 const struct bpf_insn *insn = &meta->insn;
1297
1298 if (skip) {
91a87a58 1299 meta->flags |= FLAG_INSN_SKIP_NOOP;
cd7df56e
JK
1300 return 0;
1301 }
1302
1303 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, alu_op, insn->imm);
1304 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
1305
1306 return 0;
1307}
1308
1309static int
1310wrp_alu32_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1311 enum alu_op alu_op)
1312{
1313 u8 dst = meta->insn.dst_reg * 2, src = meta->insn.src_reg * 2;
1314
1315 emit_alu(nfp_prog, reg_both(dst), reg_a(dst), alu_op, reg_b(src));
1316 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
1317
1318 return 0;
1319}
1320
1321static void
1322wrp_test_reg_one(struct nfp_prog *nfp_prog, u8 dst, enum alu_op alu_op, u8 src,
1323 enum br_mask br_mask, u16 off)
1324{
1325 emit_alu(nfp_prog, reg_none(), reg_a(dst), alu_op, reg_b(src));
1326 emit_br(nfp_prog, br_mask, off, 0);
1327}
1328
1329static int
1330wrp_test_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1331 enum alu_op alu_op, enum br_mask br_mask)
1332{
1333 const struct bpf_insn *insn = &meta->insn;
1334
cd7df56e
JK
1335 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2, alu_op,
1336 insn->src_reg * 2, br_mask, insn->off);
1337 wrp_test_reg_one(nfp_prog, insn->dst_reg * 2 + 1, alu_op,
1338 insn->src_reg * 2 + 1, br_mask, insn->off);
1339
1340 return 0;
1341}
1342
61dd8f00
JK
1343static const struct jmp_code_map {
1344 enum br_mask br_mask;
1345 bool swap;
1346} jmp_code_map[] = {
1347 [BPF_JGT >> 4] = { BR_BLO, true },
1348 [BPF_JGE >> 4] = { BR_BHS, false },
1349 [BPF_JLT >> 4] = { BR_BLO, false },
1350 [BPF_JLE >> 4] = { BR_BHS, true },
1351 [BPF_JSGT >> 4] = { BR_BLT, true },
1352 [BPF_JSGE >> 4] = { BR_BGE, false },
1353 [BPF_JSLT >> 4] = { BR_BLT, false },
1354 [BPF_JSLE >> 4] = { BR_BGE, true },
1355};
1356
1357static const struct jmp_code_map *nfp_jmp_code_get(struct nfp_insn_meta *meta)
1358{
1359 unsigned int op;
1360
1361 op = BPF_OP(meta->insn.code) >> 4;
1362 /* br_mask of 0 is BR_BEQ which we don't use in jump code table */
1363 if (WARN_ONCE(op >= ARRAY_SIZE(jmp_code_map) ||
1364 !jmp_code_map[op].br_mask,
1365 "no code found for jump instruction"))
1366 return NULL;
1367
1368 return &jmp_code_map[op];
1369}
1370
1371static int cmp_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
cd7df56e
JK
1372{
1373 const struct bpf_insn *insn = &meta->insn;
1374 u64 imm = insn->imm; /* sign extend */
61dd8f00 1375 const struct jmp_code_map *code;
7bdc97be 1376 enum alu_op alu_op, carry_op;
cd7df56e 1377 u8 reg = insn->dst_reg * 2;
b3f868df 1378 swreg tmp_reg;
cd7df56e 1379
61dd8f00
JK
1380 code = nfp_jmp_code_get(meta);
1381 if (!code)
1382 return -EINVAL;
1383
7bdc97be
JK
1384 alu_op = meta->jump_neg_op ? ALU_OP_ADD : ALU_OP_SUB;
1385 carry_op = meta->jump_neg_op ? ALU_OP_ADD_C : ALU_OP_SUB_C;
1386
cd7df56e 1387 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
61dd8f00 1388 if (!code->swap)
7bdc97be 1389 emit_alu(nfp_prog, reg_none(), reg_a(reg), alu_op, tmp_reg);
cd7df56e 1390 else
7bdc97be 1391 emit_alu(nfp_prog, reg_none(), tmp_reg, alu_op, reg_a(reg));
cd7df56e
JK
1392
1393 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
61dd8f00 1394 if (!code->swap)
cd7df56e 1395 emit_alu(nfp_prog, reg_none(),
7bdc97be 1396 reg_a(reg + 1), carry_op, tmp_reg);
cd7df56e
JK
1397 else
1398 emit_alu(nfp_prog, reg_none(),
7bdc97be 1399 tmp_reg, carry_op, reg_a(reg + 1));
cd7df56e 1400
61dd8f00 1401 emit_br(nfp_prog, code->br_mask, insn->off, 0);
cd7df56e
JK
1402
1403 return 0;
1404}
1405
61dd8f00 1406static int cmp_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
cd7df56e
JK
1407{
1408 const struct bpf_insn *insn = &meta->insn;
61dd8f00 1409 const struct jmp_code_map *code;
26fa818d
JK
1410 u8 areg, breg;
1411
61dd8f00
JK
1412 code = nfp_jmp_code_get(meta);
1413 if (!code)
1414 return -EINVAL;
1415
26fa818d
JK
1416 areg = insn->dst_reg * 2;
1417 breg = insn->src_reg * 2;
cd7df56e 1418
61dd8f00 1419 if (code->swap) {
cd7df56e
JK
1420 areg ^= breg;
1421 breg ^= areg;
1422 areg ^= breg;
1423 }
1424
1425 emit_alu(nfp_prog, reg_none(), reg_a(areg), ALU_OP_SUB, reg_b(breg));
1426 emit_alu(nfp_prog, reg_none(),
1427 reg_a(areg + 1), ALU_OP_SUB_C, reg_b(breg + 1));
61dd8f00 1428 emit_br(nfp_prog, code->br_mask, insn->off, 0);
cd7df56e
JK
1429
1430 return 0;
1431}
1432
3119d1fd
JK
1433static void wrp_end32(struct nfp_prog *nfp_prog, swreg reg_in, u8 gpr_out)
1434{
1435 emit_ld_field(nfp_prog, reg_both(gpr_out), 0xf, reg_in,
1436 SHF_SC_R_ROT, 8);
1437 emit_ld_field(nfp_prog, reg_both(gpr_out), 0x5, reg_a(gpr_out),
1438 SHF_SC_R_ROT, 16);
1439}
1440
d3d23fdb
JW
1441static void
1442wrp_mul_u32(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1443 swreg rreg, bool gen_high_half)
1444{
1445 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1446 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_1, rreg);
1447 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_2, rreg);
1448 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_3, rreg);
1449 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_32x32, MUL_STEP_4, rreg);
1450 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_32x32, MUL_LAST, reg_none());
1451 if (gen_high_half)
1452 emit_mul(nfp_prog, dst_hi, MUL_TYPE_STEP_32x32, MUL_LAST_2,
1453 reg_none());
1454 else
1455 wrp_immed(nfp_prog, dst_hi, 0);
1456}
1457
1458static void
1459wrp_mul_u16(struct nfp_prog *nfp_prog, swreg dst_hi, swreg dst_lo, swreg lreg,
1460 swreg rreg)
1461{
1462 emit_mul(nfp_prog, lreg, MUL_TYPE_START, MUL_STEP_NONE, rreg);
1463 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_1, rreg);
1464 emit_mul(nfp_prog, lreg, MUL_TYPE_STEP_16x16, MUL_STEP_2, rreg);
1465 emit_mul(nfp_prog, dst_lo, MUL_TYPE_STEP_16x16, MUL_LAST, reg_none());
1466}
1467
1468static int
1469wrp_mul(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
1470 bool gen_high_half, bool ropnd_from_reg)
1471{
1472 swreg multiplier, multiplicand, dst_hi, dst_lo;
1473 const struct bpf_insn *insn = &meta->insn;
1474 u32 lopnd_max, ropnd_max;
1475 u8 dst_reg;
1476
1477 dst_reg = insn->dst_reg;
1478 multiplicand = reg_a(dst_reg * 2);
1479 dst_hi = reg_both(dst_reg * 2 + 1);
1480 dst_lo = reg_both(dst_reg * 2);
1481 lopnd_max = meta->umax_dst;
1482 if (ropnd_from_reg) {
1483 multiplier = reg_b(insn->src_reg * 2);
1484 ropnd_max = meta->umax_src;
1485 } else {
1486 u32 imm = insn->imm;
1487
1488 multiplier = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1489 ropnd_max = imm;
1490 }
1491 if (lopnd_max > U16_MAX || ropnd_max > U16_MAX)
1492 wrp_mul_u32(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier,
1493 gen_high_half);
1494 else
1495 wrp_mul_u16(nfp_prog, dst_hi, dst_lo, multiplicand, multiplier);
1496
1497 return 0;
1498}
1499
2a952b03
JW
1500static int wrp_div_imm(struct nfp_prog *nfp_prog, u8 dst, u64 imm)
1501{
1502 swreg dst_both = reg_both(dst), dst_a = reg_a(dst), dst_b = reg_a(dst);
9fb410a8
JW
1503 struct reciprocal_value_adv rvalue;
1504 u8 pre_shift, exp;
2a952b03
JW
1505 swreg magic;
1506
1507 if (imm > U32_MAX) {
1508 wrp_immed(nfp_prog, dst_both, 0);
1509 return 0;
1510 }
1511
9fb410a8
JW
1512 /* NOTE: because we are using "reciprocal_value_adv" which doesn't
1513 * support "divisor > (1u << 31)", we need to JIT separate NFP sequence
1514 * to handle such case which actually equals to the result of unsigned
1515 * comparison "dst >= imm" which could be calculated using the following
1516 * NFP sequence:
1517 *
1518 * alu[--, dst, -, imm]
1519 * immed[imm, 0]
1520 * alu[dst, imm, +carry, 0]
1521 *
1522 */
1523 if (imm > 1U << 31) {
1524 swreg tmp_b = ur_load_imm_any(nfp_prog, imm, imm_b(nfp_prog));
1525
1526 emit_alu(nfp_prog, reg_none(), dst_a, ALU_OP_SUB, tmp_b);
1527 wrp_immed(nfp_prog, imm_a(nfp_prog), 0);
1528 emit_alu(nfp_prog, dst_both, imm_a(nfp_prog), ALU_OP_ADD_C,
1529 reg_imm(0));
1530 return 0;
1531 }
1532
1533 rvalue = reciprocal_value_adv(imm, 32);
1534 exp = rvalue.exp;
1535 if (rvalue.is_wide_m && !(imm & 1)) {
1536 pre_shift = fls(imm & -imm) - 1;
1537 rvalue = reciprocal_value_adv(imm >> pre_shift, 32 - pre_shift);
1538 } else {
1539 pre_shift = 0;
1540 }
2a952b03 1541 magic = ur_load_imm_any(nfp_prog, rvalue.m, imm_b(nfp_prog));
9fb410a8
JW
1542 if (imm == 1U << exp) {
1543 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1544 SHF_SC_R_SHF, exp);
1545 } else if (rvalue.is_wide_m) {
1546 wrp_mul_u32(nfp_prog, imm_both(nfp_prog), reg_none(), dst_a,
1547 magic, true);
1548 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_SUB,
1549 imm_b(nfp_prog));
1550 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1551 SHF_SC_R_SHF, 1);
1552 emit_alu(nfp_prog, dst_both, dst_a, ALU_OP_ADD,
1553 imm_b(nfp_prog));
1554 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE, dst_b,
1555 SHF_SC_R_SHF, rvalue.sh - 1);
1556 } else {
1557 if (pre_shift)
1558 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1559 dst_b, SHF_SC_R_SHF, pre_shift);
1560 wrp_mul_u32(nfp_prog, dst_both, reg_none(), dst_a, magic, true);
1561 emit_shf(nfp_prog, dst_both, reg_none(), SHF_OP_NONE,
1562 dst_b, SHF_SC_R_SHF, rvalue.sh);
1563 }
2a952b03
JW
1564
1565 return 0;
1566}
1567
0d49eaf4
JK
1568static int adjust_head(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1569{
1570 swreg tmp = imm_a(nfp_prog), tmp_len = imm_b(nfp_prog);
1571 struct nfp_bpf_cap_adjust_head *adjust_head;
1572 u32 ret_einval, end;
1573
1574 adjust_head = &nfp_prog->bpf->adjust_head;
1575
8231f844
JK
1576 /* Optimized version - 5 vs 14 cycles */
1577 if (nfp_prog->adjust_head_location != UINT_MAX) {
1578 if (WARN_ON_ONCE(nfp_prog->adjust_head_location != meta->n))
1579 return -EINVAL;
1580
1581 emit_alu(nfp_prog, pptr_reg(nfp_prog),
1582 reg_a(2 * 2), ALU_OP_ADD, pptr_reg(nfp_prog));
1583 emit_alu(nfp_prog, plen_reg(nfp_prog),
1584 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1585 emit_alu(nfp_prog, pv_len(nfp_prog),
1586 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1587
1588 wrp_immed(nfp_prog, reg_both(0), 0);
1589 wrp_immed(nfp_prog, reg_both(1), 0);
1590
1591 /* TODO: when adjust head is guaranteed to succeed we can
1592 * also eliminate the following if (r0 == 0) branch.
1593 */
1594
1595 return 0;
1596 }
1597
0d49eaf4
JK
1598 ret_einval = nfp_prog_current_offset(nfp_prog) + 14;
1599 end = ret_einval + 2;
1600
1601 /* We need to use a temp because offset is just a part of the pkt ptr */
1602 emit_alu(nfp_prog, tmp,
1603 reg_a(2 * 2), ALU_OP_ADD_2B, pptr_reg(nfp_prog));
1604
1605 /* Validate result will fit within FW datapath constraints */
1606 emit_alu(nfp_prog, reg_none(),
1607 tmp, ALU_OP_SUB, reg_imm(adjust_head->off_min));
1608 emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1609 emit_alu(nfp_prog, reg_none(),
1610 reg_imm(adjust_head->off_max), ALU_OP_SUB, tmp);
1611 emit_br(nfp_prog, BR_BLO, ret_einval, 0);
1612
1613 /* Validate the length is at least ETH_HLEN */
1614 emit_alu(nfp_prog, tmp_len,
1615 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1616 emit_alu(nfp_prog, reg_none(),
1617 tmp_len, ALU_OP_SUB, reg_imm(ETH_HLEN));
1618 emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1619
1620 /* Load the ret code */
1621 wrp_immed(nfp_prog, reg_both(0), 0);
1622 wrp_immed(nfp_prog, reg_both(1), 0);
1623
1624 /* Modify the packet metadata */
1625 emit_ld_field(nfp_prog, pptr_reg(nfp_prog), 0x3, tmp, SHF_SC_NONE, 0);
1626
1627 /* Skip over the -EINVAL ret code (defer 2) */
2314fe9e 1628 emit_br(nfp_prog, BR_UNC, end, 2);
0d49eaf4
JK
1629
1630 emit_alu(nfp_prog, plen_reg(nfp_prog),
1631 plen_reg(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1632 emit_alu(nfp_prog, pv_len(nfp_prog),
1633 pv_len(nfp_prog), ALU_OP_SUB, reg_a(2 * 2));
1634
1635 /* return -EINVAL target */
1636 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1637 return -EINVAL;
1638
1639 wrp_immed(nfp_prog, reg_both(0), -22);
1640 wrp_immed(nfp_prog, reg_both(1), ~0);
1641
1642 if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1643 return -EINVAL;
1644
1645 return 0;
1646}
1647
0c261593
JK
1648static int adjust_tail(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1649{
1650 u32 ret_einval, end;
1651 swreg plen, delta;
1652
1653 BUILD_BUG_ON(plen_reg(nfp_prog) != reg_b(STATIC_REG_PKT_LEN));
1654
1655 plen = imm_a(nfp_prog);
1656 delta = reg_a(2 * 2);
1657
1658 ret_einval = nfp_prog_current_offset(nfp_prog) + 9;
1659 end = nfp_prog_current_offset(nfp_prog) + 11;
1660
1661 /* Calculate resulting length */
1662 emit_alu(nfp_prog, plen, plen_reg(nfp_prog), ALU_OP_ADD, delta);
1663 /* delta == 0 is not allowed by the kernel, add must overflow to make
1664 * length smaller.
1665 */
1666 emit_br(nfp_prog, BR_BCC, ret_einval, 0);
1667
1668 /* if (new_len < 14) then -EINVAL */
1669 emit_alu(nfp_prog, reg_none(), plen, ALU_OP_SUB, reg_imm(ETH_HLEN));
1670 emit_br(nfp_prog, BR_BMI, ret_einval, 0);
1671
1672 emit_alu(nfp_prog, plen_reg(nfp_prog),
1673 plen_reg(nfp_prog), ALU_OP_ADD, delta);
1674 emit_alu(nfp_prog, pv_len(nfp_prog),
1675 pv_len(nfp_prog), ALU_OP_ADD, delta);
1676
1677 emit_br(nfp_prog, BR_UNC, end, 2);
1678 wrp_immed(nfp_prog, reg_both(0), 0);
1679 wrp_immed(nfp_prog, reg_both(1), 0);
1680
1681 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_einval))
1682 return -EINVAL;
1683
1684 wrp_immed(nfp_prog, reg_both(0), -22);
1685 wrp_immed(nfp_prog, reg_both(1), ~0);
1686
1687 if (!nfp_prog_confirm_current_offset(nfp_prog, end))
1688 return -EINVAL;
1689
1690 return 0;
1691}
1692
77a3d311 1693static int
fc448497 1694map_call_stack_common(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
77a3d311 1695{
77a3d311
JK
1696 bool load_lm_ptr;
1697 u32 ret_tgt;
1698 s64 lm_off;
77a3d311
JK
1699
1700 /* We only have to reload LM0 if the key is not at start of stack */
1a7e62e6 1701 lm_off = nfp_prog->stack_frame_depth;
2f46e0c1
JK
1702 lm_off += meta->arg2.reg.var_off.value + meta->arg2.reg.off;
1703 load_lm_ptr = meta->arg2.var_off || lm_off;
77a3d311
JK
1704
1705 /* Set LM0 to start of key */
1706 if (load_lm_ptr)
1707 emit_csr_wr(nfp_prog, reg_b(2 * 2), NFP_CSR_ACT_LM_ADDR0);
44d65a47
JK
1708 if (meta->func_id == BPF_FUNC_map_update_elem)
1709 emit_csr_wr(nfp_prog, reg_b(3 * 2), NFP_CSR_ACT_LM_ADDR2);
77a3d311 1710
fc448497 1711 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
77a3d311
JK
1712 2, RELO_BR_HELPER);
1713 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
1714
1715 /* Load map ID into A0 */
b4264c96 1716 wrp_mov(nfp_prog, reg_a(0), reg_a(2));
77a3d311
JK
1717
1718 /* Load the return address into B0 */
1719 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1720
1721 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1722 return -EINVAL;
1723
1724 /* Reset the LM0 pointer */
1725 if (!load_lm_ptr)
1726 return 0;
1727
9c9e5323 1728 emit_csr_wr(nfp_prog, stack_reg(nfp_prog), NFP_CSR_ACT_LM_ADDR0);
77a3d311
JK
1729 wrp_nops(nfp_prog, 3);
1730
1731 return 0;
1732}
1733
df4a37d8
JK
1734static int
1735nfp_get_prandom_u32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1736{
1737 __emit_csr_rd(nfp_prog, NFP_CSR_PSEUDO_RND_NUM);
1738 /* CSR value is read in following immed[gpr, 0] */
1739 emit_immed(nfp_prog, reg_both(0), 0,
1740 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1741 emit_immed(nfp_prog, reg_both(1), 0,
1742 IMMED_WIDTH_ALL, false, IMMED_SHIFT_0B);
1743 return 0;
1744}
1745
9816dd35
JK
1746static int
1747nfp_perf_event_output(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1748{
1749 swreg ptr_type;
1750 u32 ret_tgt;
1751
1752 ptr_type = ur_load_imm_any(nfp_prog, meta->arg1.type, imm_a(nfp_prog));
1753
1754 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
1755
1756 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO + meta->func_id,
1757 2, RELO_BR_HELPER);
1758
1759 /* Load ptr type into A1 */
1760 wrp_mov(nfp_prog, reg_a(1), ptr_type);
1761
1762 /* Load the return address into B0 */
1763 wrp_immed_relo(nfp_prog, reg_b(0), ret_tgt, RELO_IMMED_REL);
1764
1765 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
1766 return -EINVAL;
1767
1768 return 0;
1769}
1770
d985888f
JK
1771static int
1772nfp_queue_select(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1773{
1774 u32 jmp_tgt;
1775
1776 jmp_tgt = nfp_prog_current_offset(nfp_prog) + 5;
1777
1778 /* Make sure the queue id fits into FW field */
1779 emit_alu(nfp_prog, reg_none(), reg_a(meta->insn.src_reg * 2),
1780 ALU_OP_AND_NOT_B, reg_imm(0xff));
1781 emit_br(nfp_prog, BR_BEQ, jmp_tgt, 2);
1782
1783 /* Set the 'queue selected' bit and the queue value */
1784 emit_shf(nfp_prog, pv_qsel_set(nfp_prog),
1785 pv_qsel_set(nfp_prog), SHF_OP_OR, reg_imm(1),
1786 SHF_SC_L_SHF, PKT_VEL_QSEL_SET_BIT);
1787 emit_ld_field(nfp_prog,
1788 pv_qsel_val(nfp_prog), 0x1, reg_b(meta->insn.src_reg * 2),
1789 SHF_SC_NONE, 0);
1790 /* Delay slots end here, we will jump over next instruction if queue
1791 * value fits into the field.
1792 */
1793 emit_ld_field(nfp_prog,
1794 pv_qsel_val(nfp_prog), 0x1, reg_imm(NFP_NET_RXR_MAX),
1795 SHF_SC_NONE, 0);
1796
1797 if (!nfp_prog_confirm_current_offset(nfp_prog, jmp_tgt))
1798 return -EINVAL;
1799
1800 return 0;
1801}
1802
cd7df56e
JK
1803/* --- Callbacks --- */
1804static int mov_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1805{
1806 const struct bpf_insn *insn = &meta->insn;
b14157ee
JK
1807 u8 dst = insn->dst_reg * 2;
1808 u8 src = insn->src_reg * 2;
1809
1810 if (insn->src_reg == BPF_REG_10) {
1811 swreg stack_depth_reg;
cd7df56e 1812
b14157ee 1813 stack_depth_reg = ur_load_imm_any(nfp_prog,
1a7e62e6 1814 nfp_prog->stack_frame_depth,
b14157ee 1815 stack_imm(nfp_prog));
1a7e62e6
QM
1816 emit_alu(nfp_prog, reg_both(dst), stack_reg(nfp_prog),
1817 ALU_OP_ADD, stack_depth_reg);
b14157ee
JK
1818 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
1819 } else {
1820 wrp_reg_mov(nfp_prog, dst, src);
1821 wrp_reg_mov(nfp_prog, dst + 1, src + 1);
1822 }
cd7df56e
JK
1823
1824 return 0;
1825}
1826
1827static int mov_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1828{
1829 u64 imm = meta->insn.imm; /* sign extend */
1830
1831 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2), imm & ~0U);
1832 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), imm >> 32);
1833
1834 return 0;
1835}
1836
1837static int xor_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1838{
1839 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_XOR);
1840}
1841
1842static int xor_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1843{
1844 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_XOR, !meta->insn.imm);
1845}
1846
1847static int and_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1848{
1849 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_AND);
1850}
1851
1852static int and_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1853{
1854 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
1855}
1856
1857static int or_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1858{
1859 return wrp_alu64_reg(nfp_prog, meta, ALU_OP_OR);
1860}
1861
1862static int or_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1863{
1864 return wrp_alu64_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
1865}
1866
1867static int add_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1868{
1869 const struct bpf_insn *insn = &meta->insn;
1870
1871 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1872 reg_a(insn->dst_reg * 2), ALU_OP_ADD,
1873 reg_b(insn->src_reg * 2));
1874 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1875 reg_a(insn->dst_reg * 2 + 1), ALU_OP_ADD_C,
1876 reg_b(insn->src_reg * 2 + 1));
1877
1878 return 0;
1879}
1880
1881static int add_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1882{
1883 const struct bpf_insn *insn = &meta->insn;
1884 u64 imm = insn->imm; /* sign extend */
1885
1886 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_ADD, imm & ~0U);
1887 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_ADD_C, imm >> 32);
1888
1889 return 0;
1890}
1891
1892static int sub_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1893{
1894 const struct bpf_insn *insn = &meta->insn;
1895
1896 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2),
1897 reg_a(insn->dst_reg * 2), ALU_OP_SUB,
1898 reg_b(insn->src_reg * 2));
1899 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1),
1900 reg_a(insn->dst_reg * 2 + 1), ALU_OP_SUB_C,
1901 reg_b(insn->src_reg * 2 + 1));
1902
1903 return 0;
1904}
1905
1906static int sub_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1907{
1908 const struct bpf_insn *insn = &meta->insn;
1909 u64 imm = insn->imm; /* sign extend */
1910
1911 wrp_alu_imm(nfp_prog, insn->dst_reg * 2, ALU_OP_SUB, imm & ~0U);
1912 wrp_alu_imm(nfp_prog, insn->dst_reg * 2 + 1, ALU_OP_SUB_C, imm >> 32);
1913
1914 return 0;
1915}
1916
d3d23fdb
JW
1917static int mul_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1918{
1919 return wrp_mul(nfp_prog, meta, true, true);
1920}
1921
1922static int mul_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1923{
1924 return wrp_mul(nfp_prog, meta, true, false);
1925}
1926
2a952b03
JW
1927static int div_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1928{
1929 const struct bpf_insn *insn = &meta->insn;
1930
1931 return wrp_div_imm(nfp_prog, insn->dst_reg * 2, insn->imm);
1932}
1933
1934static int div_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1935{
1936 /* NOTE: verifier hook has rejected cases for which verifier doesn't
1937 * know whether the source operand is constant or not.
1938 */
1939 return wrp_div_imm(nfp_prog, meta->insn.dst_reg * 2, meta->umin_src);
1940}
1941
254ef4d7
JW
1942static int neg_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
1943{
1944 const struct bpf_insn *insn = &meta->insn;
1945
1946 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2), reg_imm(0),
1947 ALU_OP_SUB, reg_b(insn->dst_reg * 2));
1948 emit_alu(nfp_prog, reg_both(insn->dst_reg * 2 + 1), reg_imm(0),
1949 ALU_OP_SUB_C, reg_b(insn->dst_reg * 2 + 1));
1950
1951 return 0;
1952}
1953
991f5b36
JW
1954/* Pseudo code:
1955 * if shift_amt >= 32
1956 * dst_high = dst_low << shift_amt[4:0]
1957 * dst_low = 0;
1958 * else
1959 * dst_high = (dst_high, dst_low) >> (32 - shift_amt)
1960 * dst_low = dst_low << shift_amt
1961 *
1962 * The indirect shift will use the same logic at runtime.
1963 */
1964static int __shl_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
1965{
1966 if (shift_amt < 32) {
1967 emit_shf(nfp_prog, reg_both(dst + 1), reg_a(dst + 1),
1968 SHF_OP_NONE, reg_b(dst), SHF_SC_R_DSHF,
1969 32 - shift_amt);
1970 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
1971 reg_b(dst), SHF_SC_L_SHF, shift_amt);
1972 } else if (shift_amt == 32) {
3cae1319
JK
1973 wrp_reg_mov(nfp_prog, dst + 1, dst);
1974 wrp_immed(nfp_prog, reg_both(dst), 0);
991f5b36
JW
1975 } else if (shift_amt > 32) {
1976 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
1977 reg_b(dst), SHF_SC_L_SHF, shift_amt - 32);
3cae1319
JK
1978 wrp_immed(nfp_prog, reg_both(dst), 0);
1979 }
cd7df56e
JK
1980
1981 return 0;
1982}
1983
991f5b36 1984static int shl_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
cd7df56e
JK
1985{
1986 const struct bpf_insn *insn = &meta->insn;
3cae1319
JK
1987 u8 dst = insn->dst_reg * 2;
1988
991f5b36
JW
1989 return __shl_imm64(nfp_prog, dst, insn->imm);
1990}
1991
1992static void shl_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
1993{
1994 emit_alu(nfp_prog, imm_both(nfp_prog), reg_imm(32), ALU_OP_SUB,
1995 reg_b(src));
1996 emit_alu(nfp_prog, reg_none(), imm_a(nfp_prog), ALU_OP_OR, reg_imm(0));
1997 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_a(dst + 1), SHF_OP_NONE,
1998 reg_b(dst), SHF_SC_R_DSHF);
1999}
2000
2001/* NOTE: for indirect left shift, HIGH part should be calculated first. */
2002static void shl_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2003{
2004 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2005 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2006 reg_b(dst), SHF_SC_L_SHF);
2007}
2008
2009static void shl_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2010{
2011 shl_reg64_lt32_high(nfp_prog, dst, src);
2012 shl_reg64_lt32_low(nfp_prog, dst, src);
2013}
2014
2015static void shl_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2016{
2017 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2018 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2019 reg_b(dst), SHF_SC_L_SHF);
2020 wrp_immed(nfp_prog, reg_both(dst), 0);
2021}
2022
2023static int shl_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2024{
2025 const struct bpf_insn *insn = &meta->insn;
2026 u64 umin, umax;
2027 u8 dst, src;
2028
2029 dst = insn->dst_reg * 2;
662c5472
JW
2030 umin = meta->umin_src;
2031 umax = meta->umax_src;
991f5b36
JW
2032 if (umin == umax)
2033 return __shl_imm64(nfp_prog, dst, umin);
2034
2035 src = insn->src_reg * 2;
2036 if (umax < 32) {
2037 shl_reg64_lt32(nfp_prog, dst, src);
2038 } else if (umin >= 32) {
2039 shl_reg64_ge32(nfp_prog, dst, src);
2040 } else {
2041 /* Generate different instruction sequences depending on runtime
2042 * value of shift amount.
2043 */
2044 u16 label_ge32, label_end;
2045
2046 label_ge32 = nfp_prog_current_offset(nfp_prog) + 7;
2047 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2048
2049 shl_reg64_lt32_high(nfp_prog, dst, src);
2050 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2051 emit_br(nfp_prog, BR_UNC, label_end, 2);
2052 /* shl_reg64_lt32_low packed in delay slot. */
2053 shl_reg64_lt32_low(nfp_prog, dst, src);
2054
2055 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2056 return -EINVAL;
2057 shl_reg64_ge32(nfp_prog, dst, src);
2058
2059 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2060 return -EINVAL;
2061 }
2062
2063 return 0;
2064}
2065
2066/* Pseudo code:
2067 * if shift_amt >= 32
2068 * dst_high = 0;
2069 * dst_low = dst_high >> shift_amt[4:0]
2070 * else
2071 * dst_high = dst_high >> shift_amt
2072 * dst_low = (dst_high, dst_low) >> shift_amt
2073 *
2074 * The indirect shift will use the same logic at runtime.
2075 */
2076static int __shr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2077{
2078 if (shift_amt < 32) {
2079 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2080 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
2081 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2082 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2083 } else if (shift_amt == 32) {
3cae1319
JK
2084 wrp_reg_mov(nfp_prog, dst, dst + 1);
2085 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
991f5b36
JW
2086 } else if (shift_amt > 32) {
2087 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2088 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
3cae1319
JK
2089 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2090 }
cd7df56e
JK
2091
2092 return 0;
2093}
2094
991f5b36
JW
2095static int shr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2096{
2097 const struct bpf_insn *insn = &meta->insn;
2098 u8 dst = insn->dst_reg * 2;
2099
2100 return __shr_imm64(nfp_prog, dst, insn->imm);
2101}
2102
2103/* NOTE: for indirect right shift, LOW part should be calculated first. */
2104static void shr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2105{
2106 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2107 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_NONE,
2108 reg_b(dst + 1), SHF_SC_R_SHF);
2109}
2110
2111static void shr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2112{
2113 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2114 emit_shf_indir(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
2115 reg_b(dst), SHF_SC_R_DSHF);
2116}
2117
2118static void shr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2119{
2120 shr_reg64_lt32_low(nfp_prog, dst, src);
2121 shr_reg64_lt32_high(nfp_prog, dst, src);
2122}
2123
2124static void shr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2125{
2126 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_imm(0));
2127 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_NONE,
2128 reg_b(dst + 1), SHF_SC_R_SHF);
2129 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2130}
2131
2132static int shr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2133{
2134 const struct bpf_insn *insn = &meta->insn;
2135 u64 umin, umax;
2136 u8 dst, src;
2137
2138 dst = insn->dst_reg * 2;
662c5472
JW
2139 umin = meta->umin_src;
2140 umax = meta->umax_src;
991f5b36
JW
2141 if (umin == umax)
2142 return __shr_imm64(nfp_prog, dst, umin);
2143
2144 src = insn->src_reg * 2;
2145 if (umax < 32) {
2146 shr_reg64_lt32(nfp_prog, dst, src);
2147 } else if (umin >= 32) {
2148 shr_reg64_ge32(nfp_prog, dst, src);
2149 } else {
2150 /* Generate different instruction sequences depending on runtime
2151 * value of shift amount.
2152 */
2153 u16 label_ge32, label_end;
2154
2155 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2156 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2157 shr_reg64_lt32_low(nfp_prog, dst, src);
2158 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2159 emit_br(nfp_prog, BR_UNC, label_end, 2);
2160 /* shr_reg64_lt32_high packed in delay slot. */
2161 shr_reg64_lt32_high(nfp_prog, dst, src);
2162
2163 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2164 return -EINVAL;
2165 shr_reg64_ge32(nfp_prog, dst, src);
2166
2167 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2168 return -EINVAL;
2169 }
2170
2171 return 0;
2172}
2173
f43d0f17
JW
2174/* Code logic is the same as __shr_imm64 except ashr requires signedness bit
2175 * told through PREV_ALU result.
2176 */
c217abcc 2177static int __ashr_imm64(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
f43d0f17 2178{
c217abcc 2179 if (shift_amt < 32) {
f43d0f17 2180 emit_shf(nfp_prog, reg_both(dst), reg_a(dst + 1), SHF_OP_NONE,
c217abcc 2181 reg_b(dst), SHF_SC_R_DSHF, shift_amt);
f43d0f17
JW
2182 /* Set signedness bit. */
2183 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2184 reg_imm(0));
2185 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
c217abcc
JW
2186 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt);
2187 } else if (shift_amt == 32) {
f43d0f17
JW
2188 /* NOTE: this also helps setting signedness bit. */
2189 wrp_reg_mov(nfp_prog, dst, dst + 1);
2190 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2191 reg_b(dst + 1), SHF_SC_R_SHF, 31);
c217abcc 2192 } else if (shift_amt > 32) {
f43d0f17
JW
2193 emit_alu(nfp_prog, reg_none(), reg_a(dst + 1), ALU_OP_OR,
2194 reg_imm(0));
2195 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
c217abcc 2196 reg_b(dst + 1), SHF_SC_R_SHF, shift_amt - 32);
f43d0f17
JW
2197 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2198 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2199 }
2200
2201 return 0;
2202}
2203
c217abcc
JW
2204static int ashr_imm64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2205{
2206 const struct bpf_insn *insn = &meta->insn;
2207 u8 dst = insn->dst_reg * 2;
2208
2209 return __ashr_imm64(nfp_prog, dst, insn->imm);
2210}
2211
2212static void ashr_reg64_lt32_high(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2213{
2214 /* NOTE: the first insn will set both indirect shift amount (source A)
2215 * and signedness bit (MSB of result).
2216 */
2217 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2218 emit_shf_indir(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2219 reg_b(dst + 1), SHF_SC_R_SHF);
2220}
2221
2222static void ashr_reg64_lt32_low(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2223{
2224 /* NOTE: it is the same as logic shift because we don't need to shift in
2225 * signedness bit when the shift amount is less than 32.
2226 */
2227 return shr_reg64_lt32_low(nfp_prog, dst, src);
2228}
2229
2230static void ashr_reg64_lt32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2231{
2232 ashr_reg64_lt32_low(nfp_prog, dst, src);
2233 ashr_reg64_lt32_high(nfp_prog, dst, src);
2234}
2235
2236static void ashr_reg64_ge32(struct nfp_prog *nfp_prog, u8 dst, u8 src)
2237{
2238 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst + 1));
2239 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2240 reg_b(dst + 1), SHF_SC_R_SHF);
2241 emit_shf(nfp_prog, reg_both(dst + 1), reg_none(), SHF_OP_ASHR,
2242 reg_b(dst + 1), SHF_SC_R_SHF, 31);
2243}
2244
2245/* Like ashr_imm64, but need to use indirect shift. */
2246static int ashr_reg64(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2247{
2248 const struct bpf_insn *insn = &meta->insn;
2249 u64 umin, umax;
2250 u8 dst, src;
2251
2252 dst = insn->dst_reg * 2;
662c5472
JW
2253 umin = meta->umin_src;
2254 umax = meta->umax_src;
c217abcc
JW
2255 if (umin == umax)
2256 return __ashr_imm64(nfp_prog, dst, umin);
2257
2258 src = insn->src_reg * 2;
2259 if (umax < 32) {
2260 ashr_reg64_lt32(nfp_prog, dst, src);
2261 } else if (umin >= 32) {
2262 ashr_reg64_ge32(nfp_prog, dst, src);
2263 } else {
2264 u16 label_ge32, label_end;
2265
2266 label_ge32 = nfp_prog_current_offset(nfp_prog) + 6;
2267 emit_br_bset(nfp_prog, reg_a(src), 5, label_ge32, 0);
2268 ashr_reg64_lt32_low(nfp_prog, dst, src);
2269 label_end = nfp_prog_current_offset(nfp_prog) + 6;
2270 emit_br(nfp_prog, BR_UNC, label_end, 2);
2271 /* ashr_reg64_lt32_high packed in delay slot. */
2272 ashr_reg64_lt32_high(nfp_prog, dst, src);
2273
2274 if (!nfp_prog_confirm_current_offset(nfp_prog, label_ge32))
2275 return -EINVAL;
2276 ashr_reg64_ge32(nfp_prog, dst, src);
2277
2278 if (!nfp_prog_confirm_current_offset(nfp_prog, label_end))
2279 return -EINVAL;
2280 }
2281
2282 return 0;
2283}
2284
cd7df56e
JK
2285static int mov_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2286{
2287 const struct bpf_insn *insn = &meta->insn;
2288
2289 wrp_reg_mov(nfp_prog, insn->dst_reg * 2, insn->src_reg * 2);
2290 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2291
2292 return 0;
2293}
2294
2295static int mov_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2296{
2297 const struct bpf_insn *insn = &meta->insn;
2298
2299 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2), insn->imm);
2300 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2301
2302 return 0;
2303}
2304
2305static int xor_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2306{
2307 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_XOR);
2308}
2309
2310static int xor_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2311{
2312 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_XOR, !~meta->insn.imm);
2313}
2314
2315static int and_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2316{
2317 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_AND);
2318}
2319
2320static int and_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2321{
2322 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_AND, !~meta->insn.imm);
2323}
2324
2325static int or_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2326{
2327 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_OR);
2328}
2329
2330static int or_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2331{
2332 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_OR, !meta->insn.imm);
2333}
2334
2335static int add_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2336{
2337 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_ADD);
2338}
2339
2340static int add_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2341{
2342 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_ADD, !meta->insn.imm);
2343}
2344
2345static int sub_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2346{
2347 return wrp_alu32_reg(nfp_prog, meta, ALU_OP_SUB);
2348}
2349
2350static int sub_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2351{
2352 return wrp_alu32_imm(nfp_prog, meta, ALU_OP_SUB, !meta->insn.imm);
2353}
2354
d3d23fdb
JW
2355static int mul_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2356{
2357 return wrp_mul(nfp_prog, meta, false, true);
2358}
2359
2360static int mul_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2361{
2362 return wrp_mul(nfp_prog, meta, false, false);
2363}
2364
2a952b03
JW
2365static int div_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2366{
2367 return div_reg64(nfp_prog, meta);
2368}
2369
2370static int div_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2371{
2372 return div_imm64(nfp_prog, meta);
2373}
2374
254ef4d7
JW
2375static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2376{
2377 u8 dst = meta->insn.dst_reg * 2;
2378
2379 emit_alu(nfp_prog, reg_both(dst), reg_imm(0), ALU_OP_SUB, reg_b(dst));
2380 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
2381
2382 return 0;
2383}
2384
84708c13
JW
2385static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt)
2386{
2387 /* Set signedness bit (MSB of result). */
2388 emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0));
2389 emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst),
2390 SHF_SC_R_SHF, shift_amt);
2391 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2392
2393 return 0;
2394}
2395
2396static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2397{
2398 const struct bpf_insn *insn = &meta->insn;
2399 u64 umin, umax;
2400 u8 dst, src;
2401
2402 dst = insn->dst_reg * 2;
2403 umin = meta->umin_src;
2404 umax = meta->umax_src;
2405 if (umin == umax)
2406 return __ashr_imm(nfp_prog, dst, umin);
2407
2408 src = insn->src_reg * 2;
2409 /* NOTE: the first insn will set both indirect shift amount (source A)
2410 * and signedness bit (MSB of result).
2411 */
2412 emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst));
2413 emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR,
2414 reg_b(dst), SHF_SC_R_SHF);
2415 wrp_immed(nfp_prog, reg_both(dst + 1), 0);
2416
2417 return 0;
2418}
2419
2420static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2421{
2422 const struct bpf_insn *insn = &meta->insn;
2423 u8 dst = insn->dst_reg * 2;
2424
2425 return __ashr_imm(nfp_prog, dst, insn->imm);
2426}
2427
cd7df56e
JK
2428static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2429{
2430 const struct bpf_insn *insn = &meta->insn;
2431
2432 if (!insn->imm)
2433 return 1; /* TODO: zero shift means indirect */
2434
2435 emit_shf(nfp_prog, reg_both(insn->dst_reg * 2),
2436 reg_none(), SHF_OP_NONE, reg_b(insn->dst_reg * 2),
2437 SHF_SC_L_SHF, insn->imm);
2438 wrp_immed(nfp_prog, reg_both(insn->dst_reg * 2 + 1), 0);
2439
2440 return 0;
2441}
2442
3119d1fd
JK
2443static int end_reg32(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2444{
2445 const struct bpf_insn *insn = &meta->insn;
2446 u8 gpr = insn->dst_reg * 2;
2447
2448 switch (insn->imm) {
2449 case 16:
2450 emit_ld_field(nfp_prog, reg_both(gpr), 0x9, reg_b(gpr),
2451 SHF_SC_R_ROT, 8);
2452 emit_ld_field(nfp_prog, reg_both(gpr), 0xe, reg_a(gpr),
2453 SHF_SC_R_SHF, 16);
2454
2455 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2456 break;
2457 case 32:
2458 wrp_end32(nfp_prog, reg_a(gpr), gpr);
2459 wrp_immed(nfp_prog, reg_both(gpr + 1), 0);
2460 break;
2461 case 64:
2462 wrp_mov(nfp_prog, imm_a(nfp_prog), reg_b(gpr + 1));
2463
2464 wrp_end32(nfp_prog, reg_a(gpr), gpr + 1);
2465 wrp_end32(nfp_prog, imm_a(nfp_prog), gpr);
2466 break;
2467 }
2468
2469 return 0;
2470}
2471
cd7df56e
JK
2472static int imm_ld8_part2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2473{
9f16c8ab
JK
2474 struct nfp_insn_meta *prev = nfp_meta_prev(meta);
2475 u32 imm_lo, imm_hi;
2476 u8 dst;
2477
2478 dst = prev->insn.dst_reg * 2;
2479 imm_lo = prev->insn.imm;
2480 imm_hi = meta->insn.imm;
2481
2482 wrp_immed(nfp_prog, reg_both(dst), imm_lo);
2483
2484 /* mov is always 1 insn, load imm may be two, so try to use mov */
2485 if (imm_hi == imm_lo)
2486 wrp_mov(nfp_prog, reg_both(dst + 1), reg_a(dst));
2487 else
2488 wrp_immed(nfp_prog, reg_both(dst + 1), imm_hi);
cd7df56e
JK
2489
2490 return 0;
2491}
2492
2493static int imm_ld8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2494{
cd7df56e 2495 meta->double_cb = imm_ld8_part2;
cd7df56e
JK
2496 return 0;
2497}
2498
2499static int data_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2500{
2501 return construct_data_ld(nfp_prog, meta->insn.imm, 1);
2502}
2503
2504static int data_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2505{
2506 return construct_data_ld(nfp_prog, meta->insn.imm, 2);
2507}
2508
2509static int data_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2510{
2511 return construct_data_ld(nfp_prog, meta->insn.imm, 4);
2512}
2513
2514static int data_ind_ld1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2515{
2516 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
0a793977 2517 meta->insn.src_reg * 2, 1);
cd7df56e
JK
2518}
2519
2520static int data_ind_ld2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2521{
2522 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
0a793977 2523 meta->insn.src_reg * 2, 2);
cd7df56e
JK
2524}
2525
2526static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2527{
2528 return construct_data_ind_ld(nfp_prog, meta->insn.imm,
0a793977 2529 meta->insn.src_reg * 2, 4);
cd7df56e
JK
2530}
2531
a82b23fb
JK
2532static int
2533mem_ldx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
d3488480 2534 unsigned int size, unsigned int ptr_off)
a82b23fb 2535{
d3488480 2536 return mem_op_stack(nfp_prog, meta, size, ptr_off,
b14157ee
JK
2537 meta->insn.dst_reg * 2, meta->insn.src_reg * 2,
2538 true, wrp_lmem_load);
a82b23fb
JK
2539}
2540
943c57b9
JK
2541static int mem_ldx_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2542 u8 size)
cd7df56e 2543{
bfddbc8a
JK
2544 swreg dst = reg_both(meta->insn.dst_reg * 2);
2545
943c57b9 2546 switch (meta->insn.off) {
c6c580d7
JK
2547 case offsetof(struct __sk_buff, len):
2548 if (size != FIELD_SIZEOF(struct __sk_buff, len))
943c57b9 2549 return -EOPNOTSUPP;
bfddbc8a
JK
2550 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog));
2551 break;
c6c580d7
JK
2552 case offsetof(struct __sk_buff, data):
2553 if (size != FIELD_SIZEOF(struct __sk_buff, data))
bfddbc8a
JK
2554 return -EOPNOTSUPP;
2555 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2556 break;
c6c580d7
JK
2557 case offsetof(struct __sk_buff, data_end):
2558 if (size != FIELD_SIZEOF(struct __sk_buff, data_end))
bfddbc8a
JK
2559 return -EOPNOTSUPP;
2560 emit_alu(nfp_prog, dst,
2561 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
943c57b9
JK
2562 break;
2563 default:
46c50518 2564 return -EOPNOTSUPP;
943c57b9
JK
2565 }
2566
2567 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
cd7df56e 2568
6d677075
JK
2569 return 0;
2570}
2571
943c57b9
JK
2572static int mem_ldx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2573 u8 size)
6d677075 2574{
b3f868df 2575 swreg dst = reg_both(meta->insn.dst_reg * 2);
6d677075 2576
943c57b9 2577 switch (meta->insn.off) {
c6c580d7
JK
2578 case offsetof(struct xdp_md, data):
2579 if (size != FIELD_SIZEOF(struct xdp_md, data))
2580 return -EOPNOTSUPP;
943c57b9
JK
2581 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog));
2582 break;
c6c580d7
JK
2583 case offsetof(struct xdp_md, data_end):
2584 if (size != FIELD_SIZEOF(struct xdp_md, data_end))
2585 return -EOPNOTSUPP;
943c57b9
JK
2586 emit_alu(nfp_prog, dst,
2587 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
2588 break;
2589 default:
2590 return -EOPNOTSUPP;
2591 }
6d677075 2592
943c57b9 2593 wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
6cd80b55 2594
cd7df56e
JK
2595 return 0;
2596}
2597
2ca71441
JK
2598static int
2599mem_ldx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2600 unsigned int size)
2601{
2602 swreg tmp_reg;
2603
2604 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2605
3dd43c33
JK
2606 return data_ld_host_order_addr32(nfp_prog, meta->insn.src_reg * 2,
2607 tmp_reg, meta->insn.dst_reg * 2, size);
2608}
2609
2610static int
2611mem_ldx_emem(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2612 unsigned int size)
2613{
2614 swreg tmp_reg;
2615
2616 tmp_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2617
2618 return data_ld_host_order_addr40(nfp_prog, meta->insn.src_reg * 2,
2619 tmp_reg, meta->insn.dst_reg * 2, size);
2ca71441
JK
2620}
2621
be759237
JW
2622static void
2623mem_ldx_data_init_pktcache(struct nfp_prog *nfp_prog,
2624 struct nfp_insn_meta *meta)
2625{
2626 s16 range_start = meta->pkt_cache.range_start;
2627 s16 range_end = meta->pkt_cache.range_end;
2628 swreg src_base, off;
2629 u8 xfer_num, len;
2630 bool indir;
2631
2632 off = re_load_imm_any(nfp_prog, range_start, imm_b(nfp_prog));
2633 src_base = reg_a(meta->insn.src_reg * 2);
2634 len = range_end - range_start;
2635 xfer_num = round_up(len, REG_WIDTH) / REG_WIDTH;
2636
2637 indir = len > 8 * REG_WIDTH;
2638 /* Setup PREV_ALU for indirect mode. */
2639 if (indir)
2640 wrp_immed(nfp_prog, reg_none(),
2641 CMD_OVE_LEN | FIELD_PREP(CMD_OV_LEN, xfer_num - 1));
2642
2643 /* Cache memory into transfer-in registers. */
2644 emit_cmd_any(nfp_prog, CMD_TGT_READ32_SWAP, CMD_MODE_32b, 0, src_base,
b556ddd9 2645 off, xfer_num - 1, CMD_CTX_SWAP, indir);
be759237
JW
2646}
2647
91ff69e8
JW
2648static int
2649mem_ldx_data_from_pktcache_unaligned(struct nfp_prog *nfp_prog,
2650 struct nfp_insn_meta *meta,
2651 unsigned int size)
2652{
2653 s16 range_start = meta->pkt_cache.range_start;
2654 s16 insn_off = meta->insn.off - range_start;
2655 swreg dst_lo, dst_hi, src_lo, src_mid;
2656 u8 dst_gpr = meta->insn.dst_reg * 2;
2657 u8 len_lo = size, len_mid = 0;
2658 u8 idx = insn_off / REG_WIDTH;
2659 u8 off = insn_off % REG_WIDTH;
2660
2661 dst_hi = reg_both(dst_gpr + 1);
2662 dst_lo = reg_both(dst_gpr);
2663 src_lo = reg_xfer(idx);
2664
2665 /* The read length could involve as many as three registers. */
2666 if (size > REG_WIDTH - off) {
2667 /* Calculate the part in the second register. */
2668 len_lo = REG_WIDTH - off;
2669 len_mid = size - len_lo;
2670
2671 /* Calculate the part in the third register. */
2672 if (size > 2 * REG_WIDTH - off)
2673 len_mid = REG_WIDTH;
2674 }
2675
2676 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, len_lo, off);
2677
2678 if (!len_mid) {
2679 wrp_immed(nfp_prog, dst_hi, 0);
2680 return 0;
2681 }
2682
2683 src_mid = reg_xfer(idx + 1);
2684
2685 if (size <= REG_WIDTH) {
2686 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid, len_mid, len_lo);
2687 wrp_immed(nfp_prog, dst_hi, 0);
2688 } else {
2689 swreg src_hi = reg_xfer(idx + 2);
2690
2691 wrp_reg_or_subpart(nfp_prog, dst_lo, src_mid,
2692 REG_WIDTH - len_lo, len_lo);
2693 wrp_reg_subpart(nfp_prog, dst_hi, src_mid, len_lo,
2694 REG_WIDTH - len_lo);
2695 wrp_reg_or_subpart(nfp_prog, dst_hi, src_hi, REG_WIDTH - len_lo,
2696 len_lo);
2697 }
2698
2699 return 0;
2700}
2701
be759237
JW
2702static int
2703mem_ldx_data_from_pktcache_aligned(struct nfp_prog *nfp_prog,
2704 struct nfp_insn_meta *meta,
2705 unsigned int size)
2706{
2707 swreg dst_lo, dst_hi, src_lo;
2708 u8 dst_gpr, idx;
2709
2710 idx = (meta->insn.off - meta->pkt_cache.range_start) / REG_WIDTH;
2711 dst_gpr = meta->insn.dst_reg * 2;
2712 dst_hi = reg_both(dst_gpr + 1);
2713 dst_lo = reg_both(dst_gpr);
2714 src_lo = reg_xfer(idx);
2715
2716 if (size < REG_WIDTH) {
2717 wrp_reg_subpart(nfp_prog, dst_lo, src_lo, size, 0);
2718 wrp_immed(nfp_prog, dst_hi, 0);
2719 } else if (size == REG_WIDTH) {
2720 wrp_mov(nfp_prog, dst_lo, src_lo);
2721 wrp_immed(nfp_prog, dst_hi, 0);
2722 } else {
2723 swreg src_hi = reg_xfer(idx + 1);
2724
2725 wrp_mov(nfp_prog, dst_lo, src_lo);
2726 wrp_mov(nfp_prog, dst_hi, src_hi);
2727 }
2728
2729 return 0;
2730}
2731
2732static int
2733mem_ldx_data_from_pktcache(struct nfp_prog *nfp_prog,
2734 struct nfp_insn_meta *meta, unsigned int size)
2735{
2736 u8 off = meta->insn.off - meta->pkt_cache.range_start;
2737
91ff69e8
JW
2738 if (IS_ALIGNED(off, REG_WIDTH))
2739 return mem_ldx_data_from_pktcache_aligned(nfp_prog, meta, size);
be759237 2740
91ff69e8 2741 return mem_ldx_data_from_pktcache_unaligned(nfp_prog, meta, size);
be759237
JW
2742}
2743
2ca71441
JK
2744static int
2745mem_ldx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2746 unsigned int size)
2747{
9879a381
JW
2748 if (meta->ldst_gather_len)
2749 return nfp_cpp_memcpy(nfp_prog, meta);
2750
2ca71441 2751 if (meta->ptr.type == PTR_TO_CTX) {
012bb8a8 2752 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2ca71441
JK
2753 return mem_ldx_xdp(nfp_prog, meta, size);
2754 else
2755 return mem_ldx_skb(nfp_prog, meta, size);
2756 }
2757
be759237
JW
2758 if (meta->ptr.type == PTR_TO_PACKET) {
2759 if (meta->pkt_cache.range_end) {
2760 if (meta->pkt_cache.do_init)
2761 mem_ldx_data_init_pktcache(nfp_prog, meta);
2762
2763 return mem_ldx_data_from_pktcache(nfp_prog, meta, size);
2764 } else {
2765 return mem_ldx_data(nfp_prog, meta, size);
2766 }
2767 }
2ca71441 2768
a82b23fb 2769 if (meta->ptr.type == PTR_TO_STACK)
d3488480
JK
2770 return mem_ldx_stack(nfp_prog, meta, size,
2771 meta->ptr.off + meta->ptr.var_off.value);
3dd43c33
JK
2772
2773 if (meta->ptr.type == PTR_TO_MAP_VALUE)
2774 return mem_ldx_emem(nfp_prog, meta, size);
a82b23fb 2775
2ca71441
JK
2776 return -EOPNOTSUPP;
2777}
2778
2779static int mem_ldx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2780{
2781 return mem_ldx(nfp_prog, meta, 1);
2782}
2783
2784static int mem_ldx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2785{
2786 return mem_ldx(nfp_prog, meta, 2);
2787}
2788
6d677075
JK
2789static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2790{
2ca71441
JK
2791 return mem_ldx(nfp_prog, meta, 4);
2792}
2793
2794static int mem_ldx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2795{
2796 return mem_ldx(nfp_prog, meta, 8);
6d677075
JK
2797}
2798
e663fe38
JK
2799static int
2800mem_st_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2801 unsigned int size)
2802{
2803 u64 imm = meta->insn.imm; /* sign extend */
2804 swreg off_reg;
2805
2806 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2807
2808 return data_st_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2809 imm, size);
2810}
2811
2812static int mem_st(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2813 unsigned int size)
19d0f54e 2814{
e663fe38
JK
2815 if (meta->ptr.type == PTR_TO_PACKET)
2816 return mem_st_data(nfp_prog, meta, size);
2817
46c50518 2818 return -EOPNOTSUPP;
19d0f54e
JK
2819}
2820
e663fe38
JK
2821static int mem_st1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2822{
2823 return mem_st(nfp_prog, meta, 1);
2824}
2825
2826static int mem_st2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2827{
2828 return mem_st(nfp_prog, meta, 2);
2829}
2830
2831static int mem_st4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2832{
2833 return mem_st(nfp_prog, meta, 4);
2834}
2835
2836static int mem_st8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
6d677075 2837{
e663fe38
JK
2838 return mem_st(nfp_prog, meta, 8);
2839}
2840
2841static int
2842mem_stx_data(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2843 unsigned int size)
2844{
2845 swreg off_reg;
2846
2847 off_reg = re_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2848
2849 return data_stx_host_order(nfp_prog, meta->insn.dst_reg * 2, off_reg,
2850 meta->insn.src_reg * 2, size);
2851}
2852
ee9133a8
JK
2853static int
2854mem_stx_stack(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
d3488480 2855 unsigned int size, unsigned int ptr_off)
ee9133a8 2856{
d3488480 2857 return mem_op_stack(nfp_prog, meta, size, ptr_off,
b14157ee
JK
2858 meta->insn.src_reg * 2, meta->insn.dst_reg * 2,
2859 false, wrp_lmem_store);
ee9133a8
JK
2860}
2861
d985888f
JK
2862static int mem_stx_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2863{
2864 switch (meta->insn.off) {
2865 case offsetof(struct xdp_md, rx_queue_index):
2866 return nfp_queue_select(nfp_prog, meta);
2867 }
2868
2869 WARN_ON_ONCE(1); /* verifier should have rejected bad accesses */
2870 return -EOPNOTSUPP;
2871}
2872
e663fe38
JK
2873static int
2874mem_stx(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
2875 unsigned int size)
2876{
2877 if (meta->ptr.type == PTR_TO_PACKET)
2878 return mem_stx_data(nfp_prog, meta, size);
2879
ee9133a8 2880 if (meta->ptr.type == PTR_TO_STACK)
d3488480
JK
2881 return mem_stx_stack(nfp_prog, meta, size,
2882 meta->ptr.off + meta->ptr.var_off.value);
ee9133a8 2883
46c50518 2884 return -EOPNOTSUPP;
6d677075
JK
2885}
2886
e663fe38
JK
2887static int mem_stx1(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2888{
2889 return mem_stx(nfp_prog, meta, 1);
2890}
2891
2892static int mem_stx2(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2893{
2894 return mem_stx(nfp_prog, meta, 2);
2895}
2896
6d677075
JK
2897static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2898{
d985888f
JK
2899 if (meta->ptr.type == PTR_TO_CTX)
2900 if (nfp_prog->type == BPF_PROG_TYPE_XDP)
2901 return mem_stx_xdp(nfp_prog, meta);
e663fe38
JK
2902 return mem_stx(nfp_prog, meta, 4);
2903}
2ca71441 2904
e663fe38
JK
2905static int mem_stx8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2906{
2907 return mem_stx(nfp_prog, meta, 8);
6d677075
JK
2908}
2909
dcb0c27f
JK
2910static int
2911mem_xadd(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, bool is64)
2912{
dcb0c27f
JK
2913 u8 dst_gpr = meta->insn.dst_reg * 2;
2914 u8 src_gpr = meta->insn.src_reg * 2;
41aed09c
JK
2915 unsigned int full_add, out;
2916 swreg addra, addrb, off;
dcb0c27f
JK
2917
2918 off = ur_load_imm_any(nfp_prog, meta->insn.off, imm_b(nfp_prog));
2919
41aed09c
JK
2920 /* We can fit 16 bits into command immediate, if we know the immediate
2921 * is guaranteed to either always or never fit into 16 bit we only
2922 * generate code to handle that particular case, otherwise generate
2923 * code for both.
2924 */
2925 out = nfp_prog_current_offset(nfp_prog);
2926 full_add = nfp_prog_current_offset(nfp_prog);
2927
2928 if (meta->insn.off) {
2929 out += 2;
2930 full_add += 2;
2931 }
2932 if (meta->xadd_maybe_16bit) {
2933 out += 3;
2934 full_add += 3;
2935 }
2936 if (meta->xadd_over_16bit)
2937 out += 2 + is64;
2938 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
2939 out += 5;
2940 full_add += 5;
2941 }
2942
2943 /* Generate the branch for choosing add_imm vs add */
2944 if (meta->xadd_maybe_16bit && meta->xadd_over_16bit) {
2945 swreg max_imm = imm_a(nfp_prog);
2946
2947 wrp_immed(nfp_prog, max_imm, 0xffff);
2948 emit_alu(nfp_prog, reg_none(),
2949 max_imm, ALU_OP_SUB, reg_b(src_gpr));
2950 emit_alu(nfp_prog, reg_none(),
2951 reg_imm(0), ALU_OP_SUB_C, reg_b(src_gpr + 1));
2952 emit_br(nfp_prog, BR_BLO, full_add, meta->insn.off ? 2 : 0);
2953 /* defer for add */
2954 }
2955
dcb0c27f
JK
2956 /* If insn has an offset add to the address */
2957 if (!meta->insn.off) {
2958 addra = reg_a(dst_gpr);
2959 addrb = reg_b(dst_gpr + 1);
2960 } else {
2961 emit_alu(nfp_prog, imma_a(nfp_prog),
2962 reg_a(dst_gpr), ALU_OP_ADD, off);
2963 emit_alu(nfp_prog, imma_b(nfp_prog),
2964 reg_a(dst_gpr + 1), ALU_OP_ADD_C, reg_imm(0));
2965 addra = imma_a(nfp_prog);
2966 addrb = imma_b(nfp_prog);
2967 }
2968
41aed09c
JK
2969 /* Generate the add_imm if 16 bits are possible */
2970 if (meta->xadd_maybe_16bit) {
2971 swreg prev_alu = imm_a(nfp_prog);
2972
2973 wrp_immed(nfp_prog, prev_alu,
2974 FIELD_PREP(CMD_OVE_DATA, 2) |
2975 CMD_OVE_LEN |
2976 FIELD_PREP(CMD_OV_LEN, 0x8 | is64 << 2));
2977 wrp_reg_or_subpart(nfp_prog, prev_alu, reg_b(src_gpr), 2, 2);
2978 emit_cmd_indir(nfp_prog, CMD_TGT_ADD_IMM, CMD_MODE_40b_BA, 0,
2979 addra, addrb, 0, CMD_CTX_NO_SWAP);
2980
2981 if (meta->xadd_over_16bit)
2982 emit_br(nfp_prog, BR_UNC, out, 0);
2983 }
2984
2985 if (!nfp_prog_confirm_current_offset(nfp_prog, full_add))
2986 return -EINVAL;
2987
2988 /* Generate the add if 16 bits are not guaranteed */
2989 if (meta->xadd_over_16bit) {
2990 emit_cmd(nfp_prog, CMD_TGT_ADD, CMD_MODE_40b_BA, 0,
2991 addra, addrb, is64 << 2,
2992 is64 ? CMD_CTX_SWAP_DEFER2 : CMD_CTX_SWAP_DEFER1);
2993
2994 wrp_mov(nfp_prog, reg_xfer(0), reg_a(src_gpr));
2995 if (is64)
2996 wrp_mov(nfp_prog, reg_xfer(1), reg_a(src_gpr + 1));
2997 }
2998
2999 if (!nfp_prog_confirm_current_offset(nfp_prog, out))
3000 return -EINVAL;
dcb0c27f
JK
3001
3002 return 0;
3003}
3004
3005static int mem_xadd4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3006{
3007 return mem_xadd(nfp_prog, meta, false);
3008}
3009
3010static int mem_xadd8(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3011{
3012 return mem_xadd(nfp_prog, meta, true);
3013}
3014
cd7df56e
JK
3015static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3016{
cd7df56e
JK
3017 emit_br(nfp_prog, BR_UNC, meta->insn.off, 0);
3018
3019 return 0;
3020}
3021
3022static int jeq_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3023{
3024 const struct bpf_insn *insn = &meta->insn;
3025 u64 imm = insn->imm; /* sign extend */
b3f868df
JK
3026 swreg or1, or2, tmp_reg;
3027
3028 or1 = reg_a(insn->dst_reg * 2);
3029 or2 = reg_b(insn->dst_reg * 2 + 1);
cd7df56e 3030
cd7df56e
JK
3031 if (imm & ~0U) {
3032 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3033 emit_alu(nfp_prog, imm_a(nfp_prog),
3034 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3035 or1 = imm_a(nfp_prog);
3036 }
3037
3038 if (imm >> 32) {
3039 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3040 emit_alu(nfp_prog, imm_b(nfp_prog),
3041 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3042 or2 = imm_b(nfp_prog);
3043 }
3044
3045 emit_alu(nfp_prog, reg_none(), or1, ALU_OP_OR, or2);
3046 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3047
3048 return 0;
3049}
3050
cd7df56e
JK
3051static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3052{
3053 const struct bpf_insn *insn = &meta->insn;
3054 u64 imm = insn->imm; /* sign extend */
4987eacc 3055 u8 dst_gpr = insn->dst_reg * 2;
b3f868df 3056 swreg tmp_reg;
cd7df56e 3057
4987eacc
JK
3058 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3059 emit_alu(nfp_prog, imm_b(nfp_prog),
3060 reg_a(dst_gpr), ALU_OP_AND, tmp_reg);
3061 /* Upper word of the mask can only be 0 or ~0 from sign extension,
3062 * so either ignore it or OR the whole thing in.
3063 */
3064 if (imm >> 32)
cd7df56e 3065 emit_alu(nfp_prog, reg_none(),
4987eacc
JK
3066 reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog));
3067 emit_br(nfp_prog, BR_BNE, insn->off, 0);
cd7df56e
JK
3068
3069 return 0;
3070}
3071
3072static int jne_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3073{
3074 const struct bpf_insn *insn = &meta->insn;
3075 u64 imm = insn->imm; /* sign extend */
b3f868df 3076 swreg tmp_reg;
cd7df56e 3077
cd7df56e
JK
3078 if (!imm) {
3079 emit_alu(nfp_prog, reg_none(), reg_a(insn->dst_reg * 2),
3080 ALU_OP_OR, reg_b(insn->dst_reg * 2 + 1));
3081 emit_br(nfp_prog, BR_BNE, insn->off, 0);
82837370 3082 return 0;
cd7df56e
JK
3083 }
3084
3085 tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog));
3086 emit_alu(nfp_prog, reg_none(),
3087 reg_a(insn->dst_reg * 2), ALU_OP_XOR, tmp_reg);
3088 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3089
3090 tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog));
3091 emit_alu(nfp_prog, reg_none(),
3092 reg_a(insn->dst_reg * 2 + 1), ALU_OP_XOR, tmp_reg);
3093 emit_br(nfp_prog, BR_BNE, insn->off, 0);
3094
3095 return 0;
3096}
3097
3098static int jeq_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3099{
3100 const struct bpf_insn *insn = &meta->insn;
3101
cd7df56e
JK
3102 emit_alu(nfp_prog, imm_a(nfp_prog), reg_a(insn->dst_reg * 2),
3103 ALU_OP_XOR, reg_b(insn->src_reg * 2));
3104 emit_alu(nfp_prog, imm_b(nfp_prog), reg_a(insn->dst_reg * 2 + 1),
3105 ALU_OP_XOR, reg_b(insn->src_reg * 2 + 1));
3106 emit_alu(nfp_prog, reg_none(),
3107 imm_a(nfp_prog), ALU_OP_OR, imm_b(nfp_prog));
3108 emit_br(nfp_prog, BR_BEQ, insn->off, 0);
3109
3110 return 0;
3111}
3112
cd7df56e
JK
3113static int jset_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3114{
3115 return wrp_test_reg(nfp_prog, meta, ALU_OP_AND, BR_BNE);
3116}
3117
3118static int jne_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3119{
3120 return wrp_test_reg(nfp_prog, meta, ALU_OP_XOR, BR_BNE);
3121}
3122
389f263b
QM
3123static int
3124bpf_to_bpf_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3125{
bdf4c66f 3126 u32 ret_tgt, stack_depth, offset_br;
389f263b
QM
3127 swreg tmp_reg;
3128
3129 stack_depth = round_up(nfp_prog->stack_frame_depth, STACK_FRAME_ALIGN);
3130 /* Space for saving the return address is accounted for by the callee,
3131 * so stack_depth can be zero for the main function.
3132 */
3133 if (stack_depth) {
3134 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3135 stack_imm(nfp_prog));
3136 emit_alu(nfp_prog, stack_reg(nfp_prog),
3137 stack_reg(nfp_prog), ALU_OP_ADD, tmp_reg);
3138 emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3139 NFP_CSR_ACT_LM_ADDR0);
3140 }
3141
44549623
QM
3142 /* Two cases for jumping to the callee:
3143 *
3144 * - If callee uses and needs to save R6~R9 then:
389f263b
QM
3145 * 1. Put the start offset of the callee into imm_b(). This will
3146 * require a fixup step, as we do not necessarily know this
3147 * address yet.
3148 * 2. Put the return address from the callee to the caller into
3149 * register ret_reg().
3150 * 3. (After defer slots are consumed) Jump to the subroutine that
3151 * pushes the registers to the stack.
44549623
QM
3152 * The subroutine acts as a trampoline, and returns to the address in
3153 * imm_b(), i.e. jumps to the callee.
3154 *
3155 * - If callee does not need to save R6~R9 then just load return
3156 * address to the caller in ret_reg(), and jump to the callee
3157 * directly.
389f263b
QM
3158 *
3159 * Using ret_reg() to pass the return address to the callee is set here
3160 * as a convention. The callee can then push this address onto its
3161 * stack frame in its prologue. The advantages of passing the return
3162 * address through ret_reg(), instead of pushing it to the stack right
3163 * here, are the following:
3164 * - It looks cleaner.
3165 * - If the called function is called multiple time, we get a lower
3166 * program size.
3167 * - We save two no-op instructions that should be added just before
3168 * the emit_br() when stack depth is not null otherwise.
3169 * - If we ever find a register to hold the return address during whole
3170 * execution of the callee, we will not have to push the return
3171 * address to the stack for leaf functions.
3172 */
44549623
QM
3173 if (!meta->jmp_dst) {
3174 pr_err("BUG: BPF-to-BPF call has no destination recorded\n");
3175 return -ELOOP;
3176 }
3177 if (nfp_prog->subprog[meta->jmp_dst->subprog_idx].needs_reg_push) {
3178 ret_tgt = nfp_prog_current_offset(nfp_prog) + 3;
3179 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2,
3180 RELO_BR_GO_CALL_PUSH_REGS);
3181 offset_br = nfp_prog_current_offset(nfp_prog);
3182 wrp_immed_relo(nfp_prog, imm_b(nfp_prog), 0, RELO_IMMED_REL);
3183 } else {
3184 ret_tgt = nfp_prog_current_offset(nfp_prog) + 2;
e90287f3 3185 emit_br(nfp_prog, BR_UNC, meta->insn.imm, 1);
44549623
QM
3186 offset_br = nfp_prog_current_offset(nfp_prog);
3187 }
389f263b
QM
3188 wrp_immed_relo(nfp_prog, ret_reg(nfp_prog), ret_tgt, RELO_IMMED_REL);
3189
3190 if (!nfp_prog_confirm_current_offset(nfp_prog, ret_tgt))
3191 return -EINVAL;
3192
3193 if (stack_depth) {
3194 tmp_reg = ur_load_imm_any(nfp_prog, stack_depth,
3195 stack_imm(nfp_prog));
3196 emit_alu(nfp_prog, stack_reg(nfp_prog),
3197 stack_reg(nfp_prog), ALU_OP_SUB, tmp_reg);
3198 emit_csr_wr(nfp_prog, stack_reg(nfp_prog),
3199 NFP_CSR_ACT_LM_ADDR0);
3200 wrp_nops(nfp_prog, 3);
3201 }
3202
bdf4c66f
QM
3203 meta->num_insns_after_br = nfp_prog_current_offset(nfp_prog);
3204 meta->num_insns_after_br -= offset_br;
3205
389f263b
QM
3206 return 0;
3207}
3208
3209static int helper_call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
2cb230bd
JK
3210{
3211 switch (meta->insn.imm) {
0d49eaf4
JK
3212 case BPF_FUNC_xdp_adjust_head:
3213 return adjust_head(nfp_prog, meta);
0c261593
JK
3214 case BPF_FUNC_xdp_adjust_tail:
3215 return adjust_tail(nfp_prog, meta);
77a3d311 3216 case BPF_FUNC_map_lookup_elem:
44d65a47 3217 case BPF_FUNC_map_update_elem:
bfee64de 3218 case BPF_FUNC_map_delete_elem:
fc448497 3219 return map_call_stack_common(nfp_prog, meta);
df4a37d8
JK
3220 case BPF_FUNC_get_prandom_u32:
3221 return nfp_get_prandom_u32(nfp_prog, meta);
9816dd35
JK
3222 case BPF_FUNC_perf_event_output:
3223 return nfp_perf_event_output(nfp_prog, meta);
2cb230bd
JK
3224 default:
3225 WARN_ONCE(1, "verifier allowed unsupported function\n");
3226 return -EOPNOTSUPP;
3227 }
3228}
3229
389f263b
QM
3230static int call(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3231{
3232 if (is_mbpf_pseudo_call(meta))
3233 return bpf_to_bpf_call(nfp_prog, meta);
3234 else
3235 return helper_call(nfp_prog, meta);
3236}
3237
3238static bool nfp_is_main_function(struct nfp_insn_meta *meta)
3239{
3240 return meta->subprog_idx == 0;
3241}
3242
cd7df56e
JK
3243static int goto_out(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3244{
e84797fe 3245 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 0, RELO_BR_GO_OUT);
cd7df56e
JK
3246
3247 return 0;
3248}
3249
389f263b
QM
3250static int
3251nfp_subprog_epilogue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3252{
44549623
QM
3253 if (nfp_prog->subprog[meta->subprog_idx].needs_reg_push) {
3254 /* Pop R6~R9 to the stack via related subroutine.
3255 * We loaded the return address to the caller into ret_reg().
3256 * This means that the subroutine does not come back here, we
3257 * make it jump back to the subprogram caller directly!
3258 */
3259 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 1,
3260 RELO_BR_GO_CALL_POP_REGS);
3261 /* Pop return address from the stack. */
3262 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3263 } else {
3264 /* Pop return address from the stack. */
3265 wrp_mov(nfp_prog, ret_reg(nfp_prog), reg_lm(0, 0));
3266 /* Jump back to caller if no callee-saved registers were used
3267 * by the subprogram.
3268 */
3269 emit_rtn(nfp_prog, ret_reg(nfp_prog), 0);
3270 }
389f263b
QM
3271
3272 return 0;
3273}
3274
3275static int jmp_exit(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3276{
3277 if (nfp_is_main_function(meta))
3278 return goto_out(nfp_prog, meta);
3279 else
3280 return nfp_subprog_epilogue(nfp_prog, meta);
3281}
3282
cd7df56e
JK
3283static const instr_cb_t instr_cb[256] = {
3284 [BPF_ALU64 | BPF_MOV | BPF_X] = mov_reg64,
3285 [BPF_ALU64 | BPF_MOV | BPF_K] = mov_imm64,
3286 [BPF_ALU64 | BPF_XOR | BPF_X] = xor_reg64,
3287 [BPF_ALU64 | BPF_XOR | BPF_K] = xor_imm64,
3288 [BPF_ALU64 | BPF_AND | BPF_X] = and_reg64,
3289 [BPF_ALU64 | BPF_AND | BPF_K] = and_imm64,
3290 [BPF_ALU64 | BPF_OR | BPF_X] = or_reg64,
3291 [BPF_ALU64 | BPF_OR | BPF_K] = or_imm64,
3292 [BPF_ALU64 | BPF_ADD | BPF_X] = add_reg64,
3293 [BPF_ALU64 | BPF_ADD | BPF_K] = add_imm64,
3294 [BPF_ALU64 | BPF_SUB | BPF_X] = sub_reg64,
3295 [BPF_ALU64 | BPF_SUB | BPF_K] = sub_imm64,
d3d23fdb
JW
3296 [BPF_ALU64 | BPF_MUL | BPF_X] = mul_reg64,
3297 [BPF_ALU64 | BPF_MUL | BPF_K] = mul_imm64,
2a952b03
JW
3298 [BPF_ALU64 | BPF_DIV | BPF_X] = div_reg64,
3299 [BPF_ALU64 | BPF_DIV | BPF_K] = div_imm64,
254ef4d7 3300 [BPF_ALU64 | BPF_NEG] = neg_reg64,
991f5b36 3301 [BPF_ALU64 | BPF_LSH | BPF_X] = shl_reg64,
cd7df56e 3302 [BPF_ALU64 | BPF_LSH | BPF_K] = shl_imm64,
991f5b36 3303 [BPF_ALU64 | BPF_RSH | BPF_X] = shr_reg64,
cd7df56e 3304 [BPF_ALU64 | BPF_RSH | BPF_K] = shr_imm64,
c217abcc 3305 [BPF_ALU64 | BPF_ARSH | BPF_X] = ashr_reg64,
f43d0f17 3306 [BPF_ALU64 | BPF_ARSH | BPF_K] = ashr_imm64,
cd7df56e
JK
3307 [BPF_ALU | BPF_MOV | BPF_X] = mov_reg,
3308 [BPF_ALU | BPF_MOV | BPF_K] = mov_imm,
3309 [BPF_ALU | BPF_XOR | BPF_X] = xor_reg,
3310 [BPF_ALU | BPF_XOR | BPF_K] = xor_imm,
3311 [BPF_ALU | BPF_AND | BPF_X] = and_reg,
3312 [BPF_ALU | BPF_AND | BPF_K] = and_imm,
3313 [BPF_ALU | BPF_OR | BPF_X] = or_reg,
3314 [BPF_ALU | BPF_OR | BPF_K] = or_imm,
3315 [BPF_ALU | BPF_ADD | BPF_X] = add_reg,
3316 [BPF_ALU | BPF_ADD | BPF_K] = add_imm,
3317 [BPF_ALU | BPF_SUB | BPF_X] = sub_reg,
3318 [BPF_ALU | BPF_SUB | BPF_K] = sub_imm,
d3d23fdb
JW
3319 [BPF_ALU | BPF_MUL | BPF_X] = mul_reg,
3320 [BPF_ALU | BPF_MUL | BPF_K] = mul_imm,
2a952b03
JW
3321 [BPF_ALU | BPF_DIV | BPF_X] = div_reg,
3322 [BPF_ALU | BPF_DIV | BPF_K] = div_imm,
254ef4d7 3323 [BPF_ALU | BPF_NEG] = neg_reg,
cd7df56e 3324 [BPF_ALU | BPF_LSH | BPF_K] = shl_imm,
84708c13
JW
3325 [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg,
3326 [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm,
3119d1fd 3327 [BPF_ALU | BPF_END | BPF_X] = end_reg32,
cd7df56e
JK
3328 [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8,
3329 [BPF_LD | BPF_ABS | BPF_B] = data_ld1,
3330 [BPF_LD | BPF_ABS | BPF_H] = data_ld2,
3331 [BPF_LD | BPF_ABS | BPF_W] = data_ld4,
3332 [BPF_LD | BPF_IND | BPF_B] = data_ind_ld1,
3333 [BPF_LD | BPF_IND | BPF_H] = data_ind_ld2,
3334 [BPF_LD | BPF_IND | BPF_W] = data_ind_ld4,
2ca71441
JK
3335 [BPF_LDX | BPF_MEM | BPF_B] = mem_ldx1,
3336 [BPF_LDX | BPF_MEM | BPF_H] = mem_ldx2,
cd7df56e 3337 [BPF_LDX | BPF_MEM | BPF_W] = mem_ldx4,
2ca71441 3338 [BPF_LDX | BPF_MEM | BPF_DW] = mem_ldx8,
e663fe38
JK
3339 [BPF_STX | BPF_MEM | BPF_B] = mem_stx1,
3340 [BPF_STX | BPF_MEM | BPF_H] = mem_stx2,
19d0f54e 3341 [BPF_STX | BPF_MEM | BPF_W] = mem_stx4,
e663fe38 3342 [BPF_STX | BPF_MEM | BPF_DW] = mem_stx8,
dcb0c27f
JK
3343 [BPF_STX | BPF_XADD | BPF_W] = mem_xadd4,
3344 [BPF_STX | BPF_XADD | BPF_DW] = mem_xadd8,
e663fe38
JK
3345 [BPF_ST | BPF_MEM | BPF_B] = mem_st1,
3346 [BPF_ST | BPF_MEM | BPF_H] = mem_st2,
3347 [BPF_ST | BPF_MEM | BPF_W] = mem_st4,
3348 [BPF_ST | BPF_MEM | BPF_DW] = mem_st8,
cd7df56e
JK
3349 [BPF_JMP | BPF_JA | BPF_K] = jump,
3350 [BPF_JMP | BPF_JEQ | BPF_K] = jeq_imm,
61dd8f00
JK
3351 [BPF_JMP | BPF_JGT | BPF_K] = cmp_imm,
3352 [BPF_JMP | BPF_JGE | BPF_K] = cmp_imm,
3353 [BPF_JMP | BPF_JLT | BPF_K] = cmp_imm,
3354 [BPF_JMP | BPF_JLE | BPF_K] = cmp_imm,
3355 [BPF_JMP | BPF_JSGT | BPF_K] = cmp_imm,
3356 [BPF_JMP | BPF_JSGE | BPF_K] = cmp_imm,
3357 [BPF_JMP | BPF_JSLT | BPF_K] = cmp_imm,
3358 [BPF_JMP | BPF_JSLE | BPF_K] = cmp_imm,
cd7df56e
JK
3359 [BPF_JMP | BPF_JSET | BPF_K] = jset_imm,
3360 [BPF_JMP | BPF_JNE | BPF_K] = jne_imm,
3361 [BPF_JMP | BPF_JEQ | BPF_X] = jeq_reg,
61dd8f00
JK
3362 [BPF_JMP | BPF_JGT | BPF_X] = cmp_reg,
3363 [BPF_JMP | BPF_JGE | BPF_X] = cmp_reg,
3364 [BPF_JMP | BPF_JLT | BPF_X] = cmp_reg,
3365 [BPF_JMP | BPF_JLE | BPF_X] = cmp_reg,
3366 [BPF_JMP | BPF_JSGT | BPF_X] = cmp_reg,
3367 [BPF_JMP | BPF_JSGE | BPF_X] = cmp_reg,
3368 [BPF_JMP | BPF_JSLT | BPF_X] = cmp_reg,
3369 [BPF_JMP | BPF_JSLE | BPF_X] = cmp_reg,
cd7df56e
JK
3370 [BPF_JMP | BPF_JSET | BPF_X] = jset_reg,
3371 [BPF_JMP | BPF_JNE | BPF_X] = jne_reg,
2cb230bd 3372 [BPF_JMP | BPF_CALL] = call,
389f263b 3373 [BPF_JMP | BPF_EXIT] = jmp_exit,
cd7df56e
JK
3374};
3375
cd7df56e 3376/* --- Assembler logic --- */
2178f3f0
QM
3377static int
3378nfp_fixup_immed_relo(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta,
3379 struct nfp_insn_meta *jmp_dst, u32 br_idx)
3380{
3381 if (immed_get_value(nfp_prog->prog[br_idx + 1])) {
3382 pr_err("BUG: failed to fix up callee register saving\n");
3383 return -EINVAL;
3384 }
3385
3386 immed_set_value(&nfp_prog->prog[br_idx + 1], jmp_dst->off);
3387
3388 return 0;
3389}
3390
cd7df56e
JK
3391static int nfp_fixup_branches(struct nfp_prog *nfp_prog)
3392{
5b674140 3393 struct nfp_insn_meta *meta, *jmp_dst;
854dc87d 3394 u32 idx, br_idx;
2178f3f0 3395 int err;
cd7df56e 3396
854dc87d 3397 list_for_each_entry(meta, &nfp_prog->insns, l) {
91a87a58 3398 if (meta->flags & FLAG_INSN_SKIP_MASK)
cd7df56e
JK
3399 continue;
3400 if (BPF_CLASS(meta->insn.code) != BPF_JMP)
3401 continue;
bdf4c66f
QM
3402 if (meta->insn.code == (BPF_JMP | BPF_EXIT) &&
3403 !nfp_is_main_function(meta))
3404 continue;
3405 if (is_mbpf_helper_call(meta))
3406 continue;
cd7df56e 3407
5b674140 3408 if (list_is_last(&meta->l, &nfp_prog->insns))
2314fe9e 3409 br_idx = nfp_prog->last_bpf_off;
5b674140 3410 else
2314fe9e 3411 br_idx = list_next_entry(meta, l)->off - 1;
854dc87d 3412
bdf4c66f
QM
3413 /* For BPF-to-BPF function call, a stack adjustment sequence is
3414 * generated after the return instruction. Therefore, we must
3415 * withdraw the length of this sequence to have br_idx pointing
3416 * to where the "branch" NFP instruction is expected to be.
3417 */
3418 if (is_mbpf_pseudo_call(meta))
3419 br_idx -= meta->num_insns_after_br;
3420
cd7df56e
JK
3421 if (!nfp_is_br(nfp_prog->prog[br_idx])) {
3422 pr_err("Fixup found block not ending in branch %d %02x %016llx!!\n",
3423 br_idx, meta->insn.code, nfp_prog->prog[br_idx]);
3424 return -ELOOP;
3425 }
bdf4c66f
QM
3426
3427 if (meta->insn.code == (BPF_JMP | BPF_EXIT))
3428 continue;
3429
cd7df56e 3430 /* Leave special branches for later */
2314fe9e 3431 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
2178f3f0 3432 RELO_BR_REL && !is_mbpf_pseudo_call(meta))
cd7df56e
JK
3433 continue;
3434
5b674140
JW
3435 if (!meta->jmp_dst) {
3436 pr_err("Non-exit jump doesn't have destination info recorded!!\n");
cd7df56e
JK
3437 return -ELOOP;
3438 }
3439
5b674140 3440 jmp_dst = meta->jmp_dst;
cd7df56e 3441
91a87a58 3442 if (jmp_dst->flags & FLAG_INSN_SKIP_PREC_DEPENDENT) {
cd7df56e
JK
3443 pr_err("Branch landing on removed instruction!!\n");
3444 return -ELOOP;
3445 }
3446
44549623
QM
3447 if (is_mbpf_pseudo_call(meta) &&
3448 nfp_prog->subprog[jmp_dst->subprog_idx].needs_reg_push) {
2178f3f0
QM
3449 err = nfp_fixup_immed_relo(nfp_prog, meta,
3450 jmp_dst, br_idx);
3451 if (err)
3452 return err;
3453 }
3454
3455 if (FIELD_GET(OP_RELO_TYPE, nfp_prog->prog[br_idx]) !=
3456 RELO_BR_REL)
3457 continue;
3458
2314fe9e 3459 for (idx = meta->off; idx <= br_idx; idx++) {
cd7df56e
JK
3460 if (!nfp_is_br(nfp_prog->prog[idx]))
3461 continue;
5b674140 3462 br_set_offset(&nfp_prog->prog[idx], jmp_dst->off);
cd7df56e
JK
3463 }
3464 }
3465
cd7df56e
JK
3466 return 0;
3467}
3468
3469static void nfp_intro(struct nfp_prog *nfp_prog)
3470{
18e53b6c
JK
3471 wrp_immed(nfp_prog, plen_reg(nfp_prog), GENMASK(13, 0));
3472 emit_alu(nfp_prog, plen_reg(nfp_prog),
3473 plen_reg(nfp_prog), ALU_OP_AND, pv_len(nfp_prog));
cd7df56e
JK
3474}
3475
389f263b
QM
3476static void
3477nfp_subprog_prologue(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3478{
3479 /* Save return address into the stack. */
3480 wrp_mov(nfp_prog, reg_lm(0, 0), ret_reg(nfp_prog));
3481}
3482
3483static void
3484nfp_start_subprog(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
3485{
3486 unsigned int depth = nfp_prog->subprog[meta->subprog_idx].stack_depth;
3487
3488 nfp_prog->stack_frame_depth = round_up(depth, 4);
3489 nfp_subprog_prologue(nfp_prog, meta);
3490}
3491
3492bool nfp_is_subprog_start(struct nfp_insn_meta *meta)
3493{
3494 return meta->flags & FLAG_INSN_IS_SUBPROG_START;
3495}
3496
e3b8baf0
JK
3497static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
3498{
3499 /* TC direct-action mode:
3500 * 0,1 ok NOT SUPPORTED[1]
3501 * 2 drop 0x22 -> drop, count as stat1
3502 * 4,5 nuke 0x02 -> drop
3503 * 7 redir 0x44 -> redir, count as stat2
3504 * * unspec 0x11 -> pass, count as stat0
3505 *
3506 * [1] We can't support OK and RECLASSIFY because we can't tell TC
3507 * the exact decision made. We are forced to support UNSPEC
3508 * to handle aborts so that's the only one we handle for passing
3509 * packets up the stack.
3510 */
3511 /* Target for aborts */
3512 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3513
e84797fe 3514 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
e3b8baf0 3515
c000dfb5 3516 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
e3b8baf0
JK
3517 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x11), SHF_SC_L_SHF, 16);
3518
3519 /* Target for normal exits */
3520 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3521
3522 /* if R0 > 7 jump to abort */
3523 emit_alu(nfp_prog, reg_none(), reg_imm(7), ALU_OP_SUB, reg_b(0));
3524 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
c000dfb5 3525 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
e3b8baf0
JK
3526
3527 wrp_immed(nfp_prog, reg_b(2), 0x41221211);
3528 wrp_immed(nfp_prog, reg_b(3), 0x41001211);
3529
3530 emit_shf(nfp_prog, reg_a(1),
3531 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 2);
3532
3533 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3534 emit_shf(nfp_prog, reg_a(2),
3535 reg_imm(0xf), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3536
3537 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3538 emit_shf(nfp_prog, reg_b(2),
3539 reg_imm(0xf), SHF_OP_AND, reg_b(3), SHF_SC_R_SHF, 0);
3540
e84797fe 3541 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
e3b8baf0
JK
3542
3543 emit_shf(nfp_prog, reg_b(2),
3544 reg_a(2), SHF_OP_OR, reg_b(2), SHF_SC_L_SHF, 4);
3545 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3546}
3547
6d677075
JK
3548static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
3549{
3550 /* XDP return codes:
3551 * 0 aborted 0x82 -> drop, count as stat3
3552 * 1 drop 0x22 -> drop, count as stat1
3553 * 2 pass 0x11 -> pass, count as stat0
3554 * 3 tx 0x44 -> redir, count as stat2
3555 * * unknown 0x82 -> drop, count as stat3
3556 */
3557 /* Target for aborts */
3558 nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
3559
e84797fe 3560 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
6d677075 3561
c000dfb5 3562 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
6d677075
JK
3563 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
3564
3565 /* Target for normal exits */
3566 nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
3567
3568 /* if R0 > 3 jump to abort */
3569 emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
3570 emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
3571
3572 wrp_immed(nfp_prog, reg_b(2), 0x44112282);
3573
3574 emit_shf(nfp_prog, reg_a(1),
3575 reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
3576
3577 emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
3578 emit_shf(nfp_prog, reg_b(2),
3579 reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
3580
e84797fe 3581 emit_br_relo(nfp_prog, BR_UNC, BR_OFF_RELO, 2, RELO_BR_NEXT_PKT);
6d677075 3582
c000dfb5 3583 wrp_mov(nfp_prog, reg_a(0), NFP_BPF_ABI_FLAGS);
6d677075
JK
3584 emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
3585}
3586
44549623
QM
3587static bool nfp_prog_needs_callee_reg_save(struct nfp_prog *nfp_prog)
3588{
3589 unsigned int idx;
3590
3591 for (idx = 1; idx < nfp_prog->subprog_cnt; idx++)
3592 if (nfp_prog->subprog[idx].needs_reg_push)
3593 return true;
3594
3595 return false;
3596}
3597
389f263b
QM
3598static void nfp_push_callee_registers(struct nfp_prog *nfp_prog)
3599{
3600 u8 reg;
3601
3602 /* Subroutine: Save all callee saved registers (R6 ~ R9).
3603 * imm_b() holds the return address.
3604 */
3605 nfp_prog->tgt_call_push_regs = nfp_prog_current_offset(nfp_prog);
3606 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3607 u8 adj = (reg - BPF_REG_0) * 2;
3608 u8 idx = (reg - BPF_REG_6) * 2;
3609
3610 /* The first slot in the stack frame is used to push the return
3611 * address in bpf_to_bpf_call(), start just after.
3612 */
3613 wrp_mov(nfp_prog, reg_lm(0, 1 + idx), reg_b(adj));
3614
3615 if (reg == BPF_REG_8)
3616 /* Prepare to jump back, last 3 insns use defer slots */
3617 emit_rtn(nfp_prog, imm_b(nfp_prog), 3);
3618
3619 wrp_mov(nfp_prog, reg_lm(0, 1 + idx + 1), reg_b(adj + 1));
3620 }
3621}
3622
3623static void nfp_pop_callee_registers(struct nfp_prog *nfp_prog)
3624{
3625 u8 reg;
3626
3627 /* Subroutine: Restore all callee saved registers (R6 ~ R9).
3628 * ret_reg() holds the return address.
3629 */
3630 nfp_prog->tgt_call_pop_regs = nfp_prog_current_offset(nfp_prog);
3631 for (reg = BPF_REG_6; reg <= BPF_REG_9; reg++) {
3632 u8 adj = (reg - BPF_REG_0) * 2;
3633 u8 idx = (reg - BPF_REG_6) * 2;
3634
3635 /* The first slot in the stack frame holds the return address,
3636 * start popping just after that.
3637 */
3638 wrp_mov(nfp_prog, reg_both(adj), reg_lm(0, 1 + idx));
3639
3640 if (reg == BPF_REG_8)
3641 /* Prepare to jump back, last 3 insns use defer slots */
3642 emit_rtn(nfp_prog, ret_reg(nfp_prog), 3);
3643
3644 wrp_mov(nfp_prog, reg_both(adj + 1), reg_lm(0, 1 + idx + 1));
3645 }
3646}
3647
cd7df56e
JK
3648static void nfp_outro(struct nfp_prog *nfp_prog)
3649{
012bb8a8
JK
3650 switch (nfp_prog->type) {
3651 case BPF_PROG_TYPE_SCHED_CLS:
e3b8baf0
JK
3652 nfp_outro_tc_da(nfp_prog);
3653 break;
012bb8a8 3654 case BPF_PROG_TYPE_XDP:
6d677075
JK
3655 nfp_outro_xdp(nfp_prog);
3656 break;
012bb8a8
JK
3657 default:
3658 WARN_ON(1);
cd7df56e 3659 }
389f263b 3660
44549623 3661 if (!nfp_prog_needs_callee_reg_save(nfp_prog))
389f263b
QM
3662 return;
3663
3664 nfp_push_callee_registers(nfp_prog);
3665 nfp_pop_callee_registers(nfp_prog);
cd7df56e
JK
3666}
3667
3668static int nfp_translate(struct nfp_prog *nfp_prog)
3669{
3670 struct nfp_insn_meta *meta;
389f263b 3671 unsigned int depth;
ff42bb9f 3672 int err;
cd7df56e 3673
389f263b
QM
3674 depth = nfp_prog->subprog[0].stack_depth;
3675 nfp_prog->stack_frame_depth = round_up(depth, 4);
3676
cd7df56e
JK
3677 nfp_intro(nfp_prog);
3678 if (nfp_prog->error)
3679 return nfp_prog->error;
3680
3681 list_for_each_entry(meta, &nfp_prog->insns, l) {
3682 instr_cb_t cb = instr_cb[meta->insn.code];
3683
3684 meta->off = nfp_prog_current_offset(nfp_prog);
3685
389f263b
QM
3686 if (nfp_is_subprog_start(meta)) {
3687 nfp_start_subprog(nfp_prog, meta);
3688 if (nfp_prog->error)
3689 return nfp_prog->error;
3690 }
3691
91a87a58 3692 if (meta->flags & FLAG_INSN_SKIP_MASK) {
cd7df56e
JK
3693 nfp_prog->n_translated++;
3694 continue;
3695 }
3696
3697 if (nfp_meta_has_prev(nfp_prog, meta) &&
3698 nfp_meta_prev(meta)->double_cb)
3699 cb = nfp_meta_prev(meta)->double_cb;
3700 if (!cb)
3701 return -ENOENT;
3702 err = cb(nfp_prog, meta);
3703 if (err)
3704 return err;
e8a4796e
JK
3705 if (nfp_prog->error)
3706 return nfp_prog->error;
cd7df56e
JK
3707
3708 nfp_prog->n_translated++;
3709 }
3710
854dc87d
JW
3711 nfp_prog->last_bpf_off = nfp_prog_current_offset(nfp_prog) - 1;
3712
cd7df56e
JK
3713 nfp_outro(nfp_prog);
3714 if (nfp_prog->error)
3715 return nfp_prog->error;
3716
ff42bb9f 3717 wrp_nops(nfp_prog, NFP_USTORE_PREFETCH_WINDOW);
1c03e03f
JK
3718 if (nfp_prog->error)
3719 return nfp_prog->error;
3720
cd7df56e
JK
3721 return nfp_fixup_branches(nfp_prog);
3722}
3723
cd7df56e
JK
3724/* --- Optimizations --- */
3725static void nfp_bpf_opt_reg_init(struct nfp_prog *nfp_prog)
3726{
3727 struct nfp_insn_meta *meta;
3728
3729 list_for_each_entry(meta, &nfp_prog->insns, l) {
3730 struct bpf_insn insn = meta->insn;
3731
3732 /* Programs converted from cBPF start with register xoring */
3733 if (insn.code == (BPF_ALU64 | BPF_XOR | BPF_X) &&
3734 insn.src_reg == insn.dst_reg)
3735 continue;
3736
3737 /* Programs start with R6 = R1 but we ignore the skb pointer */
3738 if (insn.code == (BPF_ALU64 | BPF_MOV | BPF_X) &&
3739 insn.src_reg == 1 && insn.dst_reg == 6)
91a87a58 3740 meta->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
cd7df56e
JK
3741
3742 /* Return as soon as something doesn't match */
91a87a58 3743 if (!(meta->flags & FLAG_INSN_SKIP_MASK))
cd7df56e
JK
3744 return;
3745 }
3746}
3747
6c59500c
JK
3748/* abs(insn.imm) will fit better into unrestricted reg immediate -
3749 * convert add/sub of a negative number into a sub/add of a positive one.
3750 */
3751static void nfp_bpf_opt_neg_add_sub(struct nfp_prog *nfp_prog)
3752{
3753 struct nfp_insn_meta *meta;
3754
3755 list_for_each_entry(meta, &nfp_prog->insns, l) {
3756 struct bpf_insn insn = meta->insn;
3757
91a87a58 3758 if (meta->flags & FLAG_INSN_SKIP_MASK)
6c59500c
JK
3759 continue;
3760
3761 if (BPF_CLASS(insn.code) != BPF_ALU &&
7bdc97be
JK
3762 BPF_CLASS(insn.code) != BPF_ALU64 &&
3763 BPF_CLASS(insn.code) != BPF_JMP)
6c59500c
JK
3764 continue;
3765 if (BPF_SRC(insn.code) != BPF_K)
3766 continue;
3767 if (insn.imm >= 0)
3768 continue;
3769
7bdc97be
JK
3770 if (BPF_CLASS(insn.code) == BPF_JMP) {
3771 switch (BPF_OP(insn.code)) {
3772 case BPF_JGE:
3773 case BPF_JSGE:
3774 case BPF_JLT:
3775 case BPF_JSLT:
3776 meta->jump_neg_op = true;
3777 break;
3778 default:
3779 continue;
3780 }
3781 } else {
3782 if (BPF_OP(insn.code) == BPF_ADD)
3783 insn.code = BPF_CLASS(insn.code) | BPF_SUB;
3784 else if (BPF_OP(insn.code) == BPF_SUB)
3785 insn.code = BPF_CLASS(insn.code) | BPF_ADD;
3786 else
3787 continue;
6c59500c 3788
7bdc97be
JK
3789 meta->insn.code = insn.code | BPF_K;
3790 }
6c59500c
JK
3791
3792 meta->insn.imm = -insn.imm;
3793 }
3794}
3795
cd7df56e
JK
3796/* Remove masking after load since our load guarantees this is not needed */
3797static void nfp_bpf_opt_ld_mask(struct nfp_prog *nfp_prog)
3798{
3799 struct nfp_insn_meta *meta1, *meta2;
3800 const s32 exp_mask[] = {
3801 [BPF_B] = 0x000000ffU,
3802 [BPF_H] = 0x0000ffffU,
3803 [BPF_W] = 0xffffffffU,
3804 };
3805
3806 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
3807 struct bpf_insn insn, next;
3808
3809 insn = meta1->insn;
3810 next = meta2->insn;
3811
3812 if (BPF_CLASS(insn.code) != BPF_LD)
3813 continue;
3814 if (BPF_MODE(insn.code) != BPF_ABS &&
3815 BPF_MODE(insn.code) != BPF_IND)
3816 continue;
3817
3818 if (next.code != (BPF_ALU64 | BPF_AND | BPF_K))
3819 continue;
3820
3821 if (!exp_mask[BPF_SIZE(insn.code)])
3822 continue;
3823 if (exp_mask[BPF_SIZE(insn.code)] != next.imm)
3824 continue;
3825
3826 if (next.src_reg || next.dst_reg)
3827 continue;
3828
1266f5d6
JW
3829 if (meta2->flags & FLAG_INSN_IS_JUMP_DST)
3830 continue;
3831
91a87a58 3832 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
cd7df56e
JK
3833 }
3834}
3835
3836static void nfp_bpf_opt_ld_shift(struct nfp_prog *nfp_prog)
3837{
3838 struct nfp_insn_meta *meta1, *meta2, *meta3;
3839
3840 nfp_for_each_insn_walk3(nfp_prog, meta1, meta2, meta3) {
3841 struct bpf_insn insn, next1, next2;
3842
3843 insn = meta1->insn;
3844 next1 = meta2->insn;
3845 next2 = meta3->insn;
3846
3847 if (BPF_CLASS(insn.code) != BPF_LD)
3848 continue;
3849 if (BPF_MODE(insn.code) != BPF_ABS &&
3850 BPF_MODE(insn.code) != BPF_IND)
3851 continue;
3852 if (BPF_SIZE(insn.code) != BPF_W)
3853 continue;
3854
3855 if (!(next1.code == (BPF_LSH | BPF_K | BPF_ALU64) &&
3856 next2.code == (BPF_RSH | BPF_K | BPF_ALU64)) &&
3857 !(next1.code == (BPF_RSH | BPF_K | BPF_ALU64) &&
3858 next2.code == (BPF_LSH | BPF_K | BPF_ALU64)))
3859 continue;
3860
3861 if (next1.src_reg || next1.dst_reg ||
3862 next2.src_reg || next2.dst_reg)
3863 continue;
3864
3865 if (next1.imm != 0x20 || next2.imm != 0x20)
3866 continue;
3867
29fe46ef
JW
3868 if (meta2->flags & FLAG_INSN_IS_JUMP_DST ||
3869 meta3->flags & FLAG_INSN_IS_JUMP_DST)
3870 continue;
3871
91a87a58
JK
3872 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
3873 meta3->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
cd7df56e
JK
3874 }
3875}
3876
6bc7103c
JW
3877/* load/store pair that forms memory copy sould look like the following:
3878 *
3879 * ld_width R, [addr_src + offset_src]
3880 * st_width [addr_dest + offset_dest], R
3881 *
3882 * The destination register of load and source register of store should
3883 * be the same, load and store should also perform at the same width.
3884 * If either of addr_src or addr_dest is stack pointer, we don't do the
3885 * CPP optimization as stack is modelled by registers on NFP.
3886 */
3887static bool
3888curr_pair_is_memcpy(struct nfp_insn_meta *ld_meta,
3889 struct nfp_insn_meta *st_meta)
3890{
3891 struct bpf_insn *ld = &ld_meta->insn;
3892 struct bpf_insn *st = &st_meta->insn;
3893
3894 if (!is_mbpf_load(ld_meta) || !is_mbpf_store(st_meta))
3895 return false;
3896
cc0dff6d
JW
3897 if (ld_meta->ptr.type != PTR_TO_PACKET &&
3898 ld_meta->ptr.type != PTR_TO_MAP_VALUE)
6bc7103c
JW
3899 return false;
3900
3901 if (st_meta->ptr.type != PTR_TO_PACKET)
3902 return false;
3903
3904 if (BPF_SIZE(ld->code) != BPF_SIZE(st->code))
3905 return false;
3906
3907 if (ld->dst_reg != st->src_reg)
3908 return false;
3909
3910 /* There is jump to the store insn in this pair. */
3911 if (st_meta->flags & FLAG_INSN_IS_JUMP_DST)
3912 return false;
3913
3914 return true;
3915}
3916
3917/* Currently, we only support chaining load/store pairs if:
3918 *
3919 * - Their address base registers are the same.
3920 * - Their address offsets are in the same order.
3921 * - They operate at the same memory width.
3922 * - There is no jump into the middle of them.
3923 */
3924static bool
3925curr_pair_chain_with_previous(struct nfp_insn_meta *ld_meta,
3926 struct nfp_insn_meta *st_meta,
3927 struct bpf_insn *prev_ld,
3928 struct bpf_insn *prev_st)
3929{
3930 u8 prev_size, curr_size, prev_ld_base, prev_st_base, prev_ld_dst;
3931 struct bpf_insn *ld = &ld_meta->insn;
3932 struct bpf_insn *st = &st_meta->insn;
3933 s16 prev_ld_off, prev_st_off;
3934
3935 /* This pair is the start pair. */
3936 if (!prev_ld)
3937 return true;
3938
3939 prev_size = BPF_LDST_BYTES(prev_ld);
3940 curr_size = BPF_LDST_BYTES(ld);
3941 prev_ld_base = prev_ld->src_reg;
3942 prev_st_base = prev_st->dst_reg;
3943 prev_ld_dst = prev_ld->dst_reg;
3944 prev_ld_off = prev_ld->off;
3945 prev_st_off = prev_st->off;
3946
3947 if (ld->dst_reg != prev_ld_dst)
3948 return false;
3949
3950 if (ld->src_reg != prev_ld_base || st->dst_reg != prev_st_base)
3951 return false;
3952
3953 if (curr_size != prev_size)
3954 return false;
3955
3956 /* There is jump to the head of this pair. */
3957 if (ld_meta->flags & FLAG_INSN_IS_JUMP_DST)
3958 return false;
3959
3960 /* Both in ascending order. */
3961 if (prev_ld_off + prev_size == ld->off &&
3962 prev_st_off + prev_size == st->off)
3963 return true;
3964
3965 /* Both in descending order. */
3966 if (ld->off + curr_size == prev_ld_off &&
3967 st->off + curr_size == prev_st_off)
3968 return true;
3969
3970 return false;
3971}
3972
3973/* Return TRUE if cross memory access happens. Cross memory access means
3974 * store area is overlapping with load area that a later load might load
3975 * the value from previous store, for this case we can't treat the sequence
3976 * as an memory copy.
3977 */
3978static bool
3979cross_mem_access(struct bpf_insn *ld, struct nfp_insn_meta *head_ld_meta,
3980 struct nfp_insn_meta *head_st_meta)
3981{
3982 s16 head_ld_off, head_st_off, ld_off;
3983
3984 /* Different pointer types does not overlap. */
3985 if (head_ld_meta->ptr.type != head_st_meta->ptr.type)
3986 return false;
3987
3988 /* load and store are both PTR_TO_PACKET, check ID info. */
3989 if (head_ld_meta->ptr.id != head_st_meta->ptr.id)
3990 return true;
3991
3992 /* Canonicalize the offsets. Turn all of them against the original
3993 * base register.
3994 */
3995 head_ld_off = head_ld_meta->insn.off + head_ld_meta->ptr.off;
3996 head_st_off = head_st_meta->insn.off + head_st_meta->ptr.off;
3997 ld_off = ld->off + head_ld_meta->ptr.off;
3998
3999 /* Ascending order cross. */
4000 if (ld_off > head_ld_off &&
4001 head_ld_off < head_st_off && ld_off >= head_st_off)
4002 return true;
4003
4004 /* Descending order cross. */
4005 if (ld_off < head_ld_off &&
4006 head_ld_off > head_st_off && ld_off <= head_st_off)
4007 return true;
4008
4009 return false;
4010}
4011
4012/* This pass try to identify the following instructoin sequences.
4013 *
4014 * load R, [regA + offA]
4015 * store [regB + offB], R
4016 * load R, [regA + offA + const_imm_A]
4017 * store [regB + offB + const_imm_A], R
4018 * load R, [regA + offA + 2 * const_imm_A]
4019 * store [regB + offB + 2 * const_imm_A], R
4020 * ...
4021 *
4022 * Above sequence is typically generated by compiler when lowering
4023 * memcpy. NFP prefer using CPP instructions to accelerate it.
4024 */
4025static void nfp_bpf_opt_ldst_gather(struct nfp_prog *nfp_prog)
4026{
4027 struct nfp_insn_meta *head_ld_meta = NULL;
4028 struct nfp_insn_meta *head_st_meta = NULL;
4029 struct nfp_insn_meta *meta1, *meta2;
4030 struct bpf_insn *prev_ld = NULL;
4031 struct bpf_insn *prev_st = NULL;
4032 u8 count = 0;
4033
4034 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
4035 struct bpf_insn *ld = &meta1->insn;
4036 struct bpf_insn *st = &meta2->insn;
4037
4038 /* Reset record status if any of the following if true:
4039 * - The current insn pair is not load/store.
4040 * - The load/store pair doesn't chain with previous one.
4041 * - The chained load/store pair crossed with previous pair.
4042 * - The chained load/store pair has a total size of memory
4043 * copy beyond 128 bytes which is the maximum length a
4044 * single NFP CPP command can transfer.
4045 */
4046 if (!curr_pair_is_memcpy(meta1, meta2) ||
4047 !curr_pair_chain_with_previous(meta1, meta2, prev_ld,
4048 prev_st) ||
4049 (head_ld_meta && (cross_mem_access(ld, head_ld_meta,
4050 head_st_meta) ||
4051 head_ld_meta->ldst_gather_len >= 128))) {
4052 if (!count)
4053 continue;
4054
4055 if (count > 1) {
4056 s16 prev_ld_off = prev_ld->off;
4057 s16 prev_st_off = prev_st->off;
4058 s16 head_ld_off = head_ld_meta->insn.off;
4059
4060 if (prev_ld_off < head_ld_off) {
4061 head_ld_meta->insn.off = prev_ld_off;
4062 head_st_meta->insn.off = prev_st_off;
4063 head_ld_meta->ldst_gather_len =
4064 -head_ld_meta->ldst_gather_len;
4065 }
4066
4067 head_ld_meta->paired_st = &head_st_meta->insn;
91a87a58
JK
4068 head_st_meta->flags |=
4069 FLAG_INSN_SKIP_PREC_DEPENDENT;
6bc7103c
JW
4070 } else {
4071 head_ld_meta->ldst_gather_len = 0;
4072 }
4073
4074 /* If the chain is ended by an load/store pair then this
4075 * could serve as the new head of the the next chain.
4076 */
4077 if (curr_pair_is_memcpy(meta1, meta2)) {
4078 head_ld_meta = meta1;
4079 head_st_meta = meta2;
4080 head_ld_meta->ldst_gather_len =
4081 BPF_LDST_BYTES(ld);
4082 meta1 = nfp_meta_next(meta1);
4083 meta2 = nfp_meta_next(meta2);
4084 prev_ld = ld;
4085 prev_st = st;
4086 count = 1;
4087 } else {
4088 head_ld_meta = NULL;
4089 head_st_meta = NULL;
4090 prev_ld = NULL;
4091 prev_st = NULL;
4092 count = 0;
4093 }
4094
4095 continue;
4096 }
4097
4098 if (!head_ld_meta) {
4099 head_ld_meta = meta1;
4100 head_st_meta = meta2;
4101 } else {
91a87a58
JK
4102 meta1->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
4103 meta2->flags |= FLAG_INSN_SKIP_PREC_DEPENDENT;
6bc7103c
JW
4104 }
4105
4106 head_ld_meta->ldst_gather_len += BPF_LDST_BYTES(ld);
4107 meta1 = nfp_meta_next(meta1);
4108 meta2 = nfp_meta_next(meta2);
4109 prev_ld = ld;
4110 prev_st = st;
4111 count++;
4112 }
4113}
4114
87b10ecd
JW
4115static void nfp_bpf_opt_pkt_cache(struct nfp_prog *nfp_prog)
4116{
4117 struct nfp_insn_meta *meta, *range_node = NULL;
4118 s16 range_start = 0, range_end = 0;
4119 bool cache_avail = false;
4120 struct bpf_insn *insn;
4121 s32 range_ptr_off = 0;
4122 u32 range_ptr_id = 0;
4123
4124 list_for_each_entry(meta, &nfp_prog->insns, l) {
4125 if (meta->flags & FLAG_INSN_IS_JUMP_DST)
4126 cache_avail = false;
4127
91a87a58 4128 if (meta->flags & FLAG_INSN_SKIP_MASK)
87b10ecd
JW
4129 continue;
4130
4131 insn = &meta->insn;
4132
4133 if (is_mbpf_store_pkt(meta) ||
4134 insn->code == (BPF_JMP | BPF_CALL) ||
4135 is_mbpf_classic_store_pkt(meta) ||
4136 is_mbpf_classic_load(meta)) {
4137 cache_avail = false;
4138 continue;
4139 }
4140
4141 if (!is_mbpf_load(meta))
4142 continue;
4143
4144 if (meta->ptr.type != PTR_TO_PACKET || meta->ldst_gather_len) {
4145 cache_avail = false;
4146 continue;
4147 }
4148
4149 if (!cache_avail) {
4150 cache_avail = true;
4151 if (range_node)
4152 goto end_current_then_start_new;
4153 goto start_new;
4154 }
4155
4156 /* Check ID to make sure two reads share the same
4157 * variable offset against PTR_TO_PACKET, and check OFF
4158 * to make sure they also share the same constant
4159 * offset.
4160 *
4161 * OFFs don't really need to be the same, because they
4162 * are the constant offsets against PTR_TO_PACKET, so
4163 * for different OFFs, we could canonicalize them to
4164 * offsets against original packet pointer. We don't
4165 * support this.
4166 */
4167 if (meta->ptr.id == range_ptr_id &&
4168 meta->ptr.off == range_ptr_off) {
4169 s16 new_start = range_start;
4170 s16 end, off = insn->off;
4171 s16 new_end = range_end;
4172 bool changed = false;
4173
4174 if (off < range_start) {
4175 new_start = off;
4176 changed = true;
4177 }
4178
4179 end = off + BPF_LDST_BYTES(insn);
4180 if (end > range_end) {
4181 new_end = end;
4182 changed = true;
4183 }
4184
4185 if (!changed)
4186 continue;
4187
4188 if (new_end - new_start <= 64) {
4189 /* Install new range. */
4190 range_start = new_start;
4191 range_end = new_end;
4192 continue;
4193 }
4194 }
4195
4196end_current_then_start_new:
4197 range_node->pkt_cache.range_start = range_start;
4198 range_node->pkt_cache.range_end = range_end;
4199start_new:
4200 range_node = meta;
4201 range_node->pkt_cache.do_init = true;
4202 range_ptr_id = range_node->ptr.id;
4203 range_ptr_off = range_node->ptr.off;
4204 range_start = insn->off;
4205 range_end = insn->off + BPF_LDST_BYTES(insn);
4206 }
4207
4208 if (range_node) {
4209 range_node->pkt_cache.range_start = range_start;
4210 range_node->pkt_cache.range_end = range_end;
4211 }
4212
4213 list_for_each_entry(meta, &nfp_prog->insns, l) {
91a87a58 4214 if (meta->flags & FLAG_INSN_SKIP_MASK)
87b10ecd
JW
4215 continue;
4216
4217 if (is_mbpf_load_pkt(meta) && !meta->ldst_gather_len) {
4218 if (meta->pkt_cache.do_init) {
4219 range_start = meta->pkt_cache.range_start;
4220 range_end = meta->pkt_cache.range_end;
4221 } else {
4222 meta->pkt_cache.range_start = range_start;
4223 meta->pkt_cache.range_end = range_end;
4224 }
4225 }
4226 }
4227}
4228
cd7df56e
JK
4229static int nfp_bpf_optimize(struct nfp_prog *nfp_prog)
4230{
cd7df56e
JK
4231 nfp_bpf_opt_reg_init(nfp_prog);
4232
6c59500c 4233 nfp_bpf_opt_neg_add_sub(nfp_prog);
cd7df56e
JK
4234 nfp_bpf_opt_ld_mask(nfp_prog);
4235 nfp_bpf_opt_ld_shift(nfp_prog);
6bc7103c 4236 nfp_bpf_opt_ldst_gather(nfp_prog);
87b10ecd 4237 nfp_bpf_opt_pkt_cache(nfp_prog);
cd7df56e
JK
4238
4239 return 0;
4240}
4241
b4264c96
JK
4242static int nfp_bpf_replace_map_ptrs(struct nfp_prog *nfp_prog)
4243{
4244 struct nfp_insn_meta *meta1, *meta2;
4245 struct nfp_bpf_map *nfp_map;
4246 struct bpf_map *map;
ab01f4ac 4247 u32 id;
b4264c96
JK
4248
4249 nfp_for_each_insn_walk2(nfp_prog, meta1, meta2) {
91a87a58
JK
4250 if (meta1->flags & FLAG_INSN_SKIP_MASK ||
4251 meta2->flags & FLAG_INSN_SKIP_MASK)
b4264c96
JK
4252 continue;
4253
4254 if (meta1->insn.code != (BPF_LD | BPF_IMM | BPF_DW) ||
4255 meta1->insn.src_reg != BPF_PSEUDO_MAP_FD)
4256 continue;
4257
4258 map = (void *)(unsigned long)((u32)meta1->insn.imm |
4259 (u64)meta2->insn.imm << 32);
ab01f4ac
JK
4260 if (bpf_map_offload_neutral(map)) {
4261 id = map->id;
4262 } else {
4263 nfp_map = map_to_offmap(map)->dev_priv;
4264 id = nfp_map->tid;
4265 }
b4264c96 4266
ab01f4ac 4267 meta1->insn.imm = id;
b4264c96
JK
4268 meta2->insn.imm = 0;
4269 }
4270
4271 return 0;
4272}
4273
2314fe9e 4274static int nfp_bpf_ustore_calc(u64 *prog, unsigned int len)
fd068ddc 4275{
2314fe9e 4276 __le64 *ustore = (__force __le64 *)prog;
fd068ddc
JK
4277 int i;
4278
2314fe9e 4279 for (i = 0; i < len; i++) {
fd068ddc
JK
4280 int err;
4281
2314fe9e 4282 err = nfp_ustore_check_valid_no_ecc(prog[i]);
fd068ddc
JK
4283 if (err)
4284 return err;
4285
2314fe9e 4286 ustore[i] = cpu_to_le64(nfp_ustore_calc_ecc_insn(prog[i]));
fd068ddc
JK
4287 }
4288
4289 return 0;
4290}
4291
44a12ecc
JK
4292static void nfp_bpf_prog_trim(struct nfp_prog *nfp_prog)
4293{
4294 void *prog;
4295
4296 prog = kvmalloc_array(nfp_prog->prog_len, sizeof(u64), GFP_KERNEL);
4297 if (!prog)
4298 return;
4299
4300 nfp_prog->__prog_alloc_len = nfp_prog->prog_len * sizeof(u64);
4301 memcpy(prog, nfp_prog->prog, nfp_prog->__prog_alloc_len);
4302 kvfree(nfp_prog->prog);
4303 nfp_prog->prog = prog;
4304}
4305
c6c580d7 4306int nfp_bpf_jit(struct nfp_prog *nfp_prog)
cd7df56e 4307{
cd7df56e
JK
4308 int ret;
4309
b4264c96
JK
4310 ret = nfp_bpf_replace_map_ptrs(nfp_prog);
4311 if (ret)
4312 return ret;
4313
cd7df56e
JK
4314 ret = nfp_bpf_optimize(nfp_prog);
4315 if (ret)
9314c442 4316 return ret;
cd7df56e
JK
4317
4318 ret = nfp_translate(nfp_prog);
4319 if (ret) {
4320 pr_err("Translation failed with error %d (translated: %u)\n",
4321 ret, nfp_prog->n_translated);
9314c442 4322 return -EINVAL;
cd7df56e
JK
4323 }
4324
44a12ecc
JK
4325 nfp_bpf_prog_trim(nfp_prog);
4326
2314fe9e 4327 return ret;
cd7df56e 4328}
1549921d 4329
e2fc6114 4330void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog)
1549921d
JK
4331{
4332 struct nfp_insn_meta *meta;
4333
4334 /* Another pass to record jump information. */
4335 list_for_each_entry(meta, &nfp_prog->insns, l) {
e3b49dc6 4336 struct nfp_insn_meta *dst_meta;
1549921d 4337 u64 code = meta->insn.code;
e3b49dc6
QM
4338 unsigned int dst_idx;
4339 bool pseudo_call;
1549921d 4340
e3b49dc6
QM
4341 if (BPF_CLASS(code) != BPF_JMP)
4342 continue;
4343 if (BPF_OP(code) == BPF_EXIT)
4344 continue;
4345 if (is_mbpf_helper_call(meta))
4346 continue;
1549921d 4347
e3b49dc6
QM
4348 /* If opcode is BPF_CALL at this point, this can only be a
4349 * BPF-to-BPF call (a.k.a pseudo call).
4350 */
4351 pseudo_call = BPF_OP(code) == BPF_CALL;
1549921d 4352
e3b49dc6
QM
4353 if (pseudo_call)
4354 dst_idx = meta->n + 1 + meta->insn.imm;
4355 else
4356 dst_idx = meta->n + 1 + meta->insn.off;
4357
e2fc6114 4358 dst_meta = nfp_bpf_goto_meta(nfp_prog, meta, dst_idx);
e3b49dc6
QM
4359
4360 if (pseudo_call)
4361 dst_meta->flags |= FLAG_INSN_IS_SUBPROG_START;
4362
4363 dst_meta->flags |= FLAG_INSN_IS_JUMP_DST;
4364 meta->jmp_dst = dst_meta;
1549921d
JK
4365 }
4366}
2314fe9e 4367
74801e50
QM
4368bool nfp_bpf_supported_opcode(u8 code)
4369{
4370 return !!instr_cb[code];
4371}
4372
2314fe9e
JK
4373void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv)
4374{
4375 unsigned int i;
4376 u64 *prog;
4377 int err;
4378
4379 prog = kmemdup(nfp_prog->prog, nfp_prog->prog_len * sizeof(u64),
4380 GFP_KERNEL);
4381 if (!prog)
4382 return ERR_PTR(-ENOMEM);
4383
4384 for (i = 0; i < nfp_prog->prog_len; i++) {
4385 enum nfp_relo_type special;
77a3d311 4386 u32 val;
389f263b 4387 u16 off;
2314fe9e
JK
4388
4389 special = FIELD_GET(OP_RELO_TYPE, prog[i]);
4390 switch (special) {
4391 case RELO_NONE:
4392 continue;
4393 case RELO_BR_REL:
4394 br_add_offset(&prog[i], bv->start_off);
4395 break;
4396 case RELO_BR_GO_OUT:
4397 br_set_offset(&prog[i],
4398 nfp_prog->tgt_out + bv->start_off);
4399 break;
4400 case RELO_BR_GO_ABORT:
4401 br_set_offset(&prog[i],
4402 nfp_prog->tgt_abort + bv->start_off);
4403 break;
389f263b 4404 case RELO_BR_GO_CALL_PUSH_REGS:
44549623
QM
4405 if (!nfp_prog->tgt_call_push_regs) {
4406 pr_err("BUG: failed to detect subprogram registers needs\n");
4407 err = -EINVAL;
4408 goto err_free_prog;
4409 }
389f263b
QM
4410 off = nfp_prog->tgt_call_push_regs + bv->start_off;
4411 br_set_offset(&prog[i], off);
4412 break;
4413 case RELO_BR_GO_CALL_POP_REGS:
44549623
QM
4414 if (!nfp_prog->tgt_call_pop_regs) {
4415 pr_err("BUG: failed to detect subprogram registers needs\n");
4416 err = -EINVAL;
4417 goto err_free_prog;
4418 }
389f263b
QM
4419 off = nfp_prog->tgt_call_pop_regs + bv->start_off;
4420 br_set_offset(&prog[i], off);
4421 break;
2314fe9e
JK
4422 case RELO_BR_NEXT_PKT:
4423 br_set_offset(&prog[i], bv->tgt_done);
4424 break;
77a3d311
JK
4425 case RELO_BR_HELPER:
4426 val = br_get_offset(prog[i]);
4427 val -= BR_OFF_RELO;
4428 switch (val) {
4429 case BPF_FUNC_map_lookup_elem:
4430 val = nfp_prog->bpf->helpers.map_lookup;
4431 break;
44d65a47
JK
4432 case BPF_FUNC_map_update_elem:
4433 val = nfp_prog->bpf->helpers.map_update;
4434 break;
bfee64de
JK
4435 case BPF_FUNC_map_delete_elem:
4436 val = nfp_prog->bpf->helpers.map_delete;
4437 break;
9816dd35
JK
4438 case BPF_FUNC_perf_event_output:
4439 val = nfp_prog->bpf->helpers.perf_event_output;
4440 break;
77a3d311
JK
4441 default:
4442 pr_err("relocation of unknown helper %d\n",
4443 val);
4444 err = -EINVAL;
4445 goto err_free_prog;
4446 }
4447 br_set_offset(&prog[i], val);
4448 break;
4449 case RELO_IMMED_REL:
4450 immed_add_value(&prog[i], bv->start_off);
4451 break;
2314fe9e
JK
4452 }
4453
4454 prog[i] &= ~OP_RELO_TYPE;
4455 }
4456
4457 err = nfp_bpf_ustore_calc(prog, nfp_prog->prog_len);
4458 if (err)
4459 goto err_free_prog;
4460
4461 return prog;
4462
4463err_free_prog:
4464 kfree(prog);
4465 return ERR_PTR(err);
4466}