nfp: replace long license headers with SPDX
[linux-2.6-block.git] / drivers / net / ethernet / netronome / nfp / bpf / offload.c
CommitLineData
96de2506
JK
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2016-2018 Netronome Systems, Inc. */
7533fdc0
JK
3
4/*
5 * nfp_net_offload.c
6 * Netronome network device driver: TC offload functions for PF and VF
7 */
8
1bba4c41
JK
9#define pr_fmt(fmt) "NFP net bpf: " fmt
10
11#include <linux/bpf.h>
7533fdc0
JK
12#include <linux/kernel.h>
13#include <linux/netdevice.h>
14#include <linux/pci.h>
15#include <linux/jiffies.h>
16#include <linux/timer.h>
17#include <linux/list.h>
44a12ecc 18#include <linux/mm.h>
7533fdc0
JK
19
20#include <net/pkt_cls.h>
21#include <net/tc_act/tc_gact.h>
22#include <net/tc_act/tc_mirred.h>
23
d9ae7f2b 24#include "main.h"
77a844ee 25#include "../nfp_app.h"
d9ae7f2b
JK
26#include "../nfp_net_ctrl.h"
27#include "../nfp_net.h"
7533fdc0 28
630a4d38
JK
29static int
30nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
31 struct bpf_map *map)
32{
33 struct nfp_bpf_neutral_map *record;
34 int err;
35
36 /* Map record paths are entered via ndo, update side is protected. */
37 ASSERT_RTNL();
38
39 /* Reuse path - other offloaded program is already tracking this map. */
ab01f4ac 40 record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id,
630a4d38
JK
41 nfp_bpf_maps_neutral_params);
42 if (record) {
43 nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
44 record->count++;
45 return 0;
46 }
47
48 /* Grab a single ref to the map for our record. The prog destroy ndo
49 * happens after free_used_maps().
50 */
51 map = bpf_map_inc(map, false);
52 if (IS_ERR(map))
53 return PTR_ERR(map);
54
55 record = kmalloc(sizeof(*record), GFP_KERNEL);
56 if (!record) {
57 err = -ENOMEM;
58 goto err_map_put;
59 }
60
61 record->ptr = map;
ab01f4ac 62 record->map_id = map->id;
630a4d38
JK
63 record->count = 1;
64
65 err = rhashtable_insert_fast(&bpf->maps_neutral, &record->l,
66 nfp_bpf_maps_neutral_params);
67 if (err)
68 goto err_free_rec;
69
70 nfp_prog->map_records[nfp_prog->map_records_cnt++] = record;
71
72 return 0;
73
74err_free_rec:
75 kfree(record);
76err_map_put:
77 bpf_map_put(map);
78 return err;
79}
80
81static void
82nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog)
83{
84 bool freed = false;
85 int i;
86
87 ASSERT_RTNL();
88
89 for (i = 0; i < nfp_prog->map_records_cnt; i++) {
90 if (--nfp_prog->map_records[i]->count) {
91 nfp_prog->map_records[i] = NULL;
92 continue;
93 }
94
95 WARN_ON(rhashtable_remove_fast(&bpf->maps_neutral,
96 &nfp_prog->map_records[i]->l,
97 nfp_bpf_maps_neutral_params));
98 freed = true;
99 }
100
101 if (freed) {
102 synchronize_rcu();
103
104 for (i = 0; i < nfp_prog->map_records_cnt; i++)
105 if (nfp_prog->map_records[i]) {
106 bpf_map_put(nfp_prog->map_records[i]->ptr);
107 kfree(nfp_prog->map_records[i]);
108 }
109 }
110
111 kfree(nfp_prog->map_records);
112 nfp_prog->map_records = NULL;
113 nfp_prog->map_records_cnt = 0;
114}
115
116static int
117nfp_map_ptrs_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog,
118 struct bpf_prog *prog)
119{
120 int i, cnt, err;
121
122 /* Quickly count the maps we will have to remember */
123 cnt = 0;
124 for (i = 0; i < prog->aux->used_map_cnt; i++)
125 if (bpf_map_offload_neutral(prog->aux->used_maps[i]))
126 cnt++;
127 if (!cnt)
128 return 0;
129
130 nfp_prog->map_records = kmalloc_array(cnt,
131 sizeof(nfp_prog->map_records[0]),
132 GFP_KERNEL);
133 if (!nfp_prog->map_records)
134 return -ENOMEM;
135
136 for (i = 0; i < prog->aux->used_map_cnt; i++)
137 if (bpf_map_offload_neutral(prog->aux->used_maps[i])) {
138 err = nfp_map_ptr_record(bpf, nfp_prog,
139 prog->aux->used_maps[i]);
140 if (err) {
141 nfp_map_ptrs_forget(bpf, nfp_prog);
142 return err;
143 }
144 }
145 WARN_ON(cnt != nfp_prog->map_records_cnt);
146
147 return 0;
148}
149
9314c442 150static int
c1c88eae
JK
151nfp_prog_prepare(struct nfp_prog *nfp_prog, const struct bpf_insn *prog,
152 unsigned int cnt)
153{
5b674140 154 struct nfp_insn_meta *meta;
c1c88eae
JK
155 unsigned int i;
156
157 for (i = 0; i < cnt; i++) {
c1c88eae
JK
158 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
159 if (!meta)
160 return -ENOMEM;
161
162 meta->insn = prog[i];
163 meta->n = i;
33b94310 164 if (is_mbpf_alu(meta)) {
662c5472 165 meta->umin_src = U64_MAX;
33b94310
JW
166 meta->umin_dst = U64_MAX;
167 }
c1c88eae
JK
168
169 list_add_tail(&meta->l, &nfp_prog->insns);
170 }
171
1549921d 172 nfp_bpf_jit_prepare(nfp_prog, cnt);
5b674140 173
c1c88eae
JK
174 return 0;
175}
176
9314c442 177static void nfp_prog_free(struct nfp_prog *nfp_prog)
c1c88eae
JK
178{
179 struct nfp_insn_meta *meta, *tmp;
180
c5da54d9
QM
181 kfree(nfp_prog->subprog);
182
c1c88eae
JK
183 list_for_each_entry_safe(meta, tmp, &nfp_prog->insns, l) {
184 list_del(&meta->l);
185 kfree(meta);
186 }
187 kfree(nfp_prog);
188}
189
af93d15a
JK
190static int
191nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn,
192 struct netdev_bpf *bpf)
7533fdc0 193{
c6c580d7 194 struct bpf_prog *prog = bpf->verifier.prog;
9314c442 195 struct nfp_prog *nfp_prog;
7533fdc0
JK
196 int ret;
197
9314c442
JK
198 nfp_prog = kzalloc(sizeof(*nfp_prog), GFP_KERNEL);
199 if (!nfp_prog)
c6c580d7
JK
200 return -ENOMEM;
201 prog->aux->offload->dev_priv = nfp_prog;
9314c442
JK
202
203 INIT_LIST_HEAD(&nfp_prog->insns);
204 nfp_prog->type = prog->type;
77a844ee 205 nfp_prog->bpf = app->priv;
7533fdc0 206
9314c442
JK
207 ret = nfp_prog_prepare(nfp_prog, prog->insnsi, prog->len);
208 if (ret)
209 goto err_free;
210
c6c580d7
JK
211 nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog);
212 bpf->verifier.ops = &nfp_bpf_analyzer_ops;
213
214 return 0;
9314c442
JK
215
216err_free:
217 nfp_prog_free(nfp_prog);
218
c6c580d7 219 return ret;
9314c442
JK
220}
221
af93d15a 222static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog)
9314c442 223{
c6c580d7 224 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
9314c442 225 unsigned int max_instr;
eb1d7db9 226 int err;
7533fdc0 227
9314c442
JK
228 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
229 nfp_prog->__prog_alloc_len = max_instr * sizeof(u64);
230
44a12ecc 231 nfp_prog->prog = kvmalloc(nfp_prog->__prog_alloc_len, GFP_KERNEL);
9314c442 232 if (!nfp_prog->prog)
7533fdc0
JK
233 return -ENOMEM;
234
eb1d7db9
JW
235 err = nfp_bpf_jit(nfp_prog);
236 if (err)
237 return err;
238
239 prog->aux->offload->jited_len = nfp_prog->prog_len * sizeof(u64);
240 prog->aux->offload->jited_image = nfp_prog->prog;
241
630a4d38 242 return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog);
9314c442
JK
243}
244
af93d15a 245static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog)
9314c442 246{
c6c580d7
JK
247 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
248
44a12ecc 249 kvfree(nfp_prog->prog);
630a4d38 250 nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog);
9314c442 251 nfp_prog_free(nfp_prog);
c6c580d7
JK
252
253 return 0;
9314c442
JK
254}
255
dcb0c27f
JK
256/* Atomic engine requires values to be in big endian, we need to byte swap
257 * the value words used with xadd.
258 */
259static void nfp_map_bpf_byte_swap(struct nfp_bpf_map *nfp_map, void *value)
260{
261 u32 *word = value;
262 unsigned int i;
263
264 for (i = 0; i < DIV_ROUND_UP(nfp_map->offmap->map.value_size, 4); i++)
265 if (nfp_map->use_map[i] == NFP_MAP_USE_ATOMIC_CNT)
266 word[i] = (__force u32)cpu_to_be32(word[i]);
267}
268
269static int
270nfp_bpf_map_lookup_entry(struct bpf_offloaded_map *offmap,
271 void *key, void *value)
272{
273 int err;
274
275 err = nfp_bpf_ctrl_lookup_entry(offmap, key, value);
276 if (err)
277 return err;
278
279 nfp_map_bpf_byte_swap(offmap->dev_priv, value);
280 return 0;
281}
282
283static int
284nfp_bpf_map_update_entry(struct bpf_offloaded_map *offmap,
285 void *key, void *value, u64 flags)
286{
287 nfp_map_bpf_byte_swap(offmap->dev_priv, value);
288 return nfp_bpf_ctrl_update_entry(offmap, key, value, flags);
289}
290
1bba4c41
JK
291static int
292nfp_bpf_map_get_next_key(struct bpf_offloaded_map *offmap,
293 void *key, void *next_key)
294{
295 if (!key)
296 return nfp_bpf_ctrl_getfirst_entry(offmap, next_key);
297 return nfp_bpf_ctrl_getnext_entry(offmap, key, next_key);
298}
299
300static int
301nfp_bpf_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
302{
7a0ef693
JK
303 if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
304 return -EINVAL;
1bba4c41
JK
305 return nfp_bpf_ctrl_del_entry(offmap, key);
306}
307
308static const struct bpf_map_dev_ops nfp_bpf_map_ops = {
309 .map_get_next_key = nfp_bpf_map_get_next_key,
dcb0c27f
JK
310 .map_lookup_elem = nfp_bpf_map_lookup_entry,
311 .map_update_elem = nfp_bpf_map_update_entry,
1bba4c41
JK
312 .map_delete_elem = nfp_bpf_map_delete_elem,
313};
314
315static int
316nfp_bpf_map_alloc(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
317{
318 struct nfp_bpf_map *nfp_map;
dcb0c27f 319 unsigned int use_map_size;
1bba4c41
JK
320 long long int res;
321
322 if (!bpf->maps.types)
323 return -EOPNOTSUPP;
324
325 if (offmap->map.map_flags ||
326 offmap->map.numa_node != NUMA_NO_NODE) {
327 pr_info("map flags are not supported\n");
328 return -EINVAL;
329 }
330
331 if (!(bpf->maps.types & 1 << offmap->map.map_type)) {
332 pr_info("map type not supported\n");
333 return -EOPNOTSUPP;
334 }
335 if (bpf->maps.max_maps == bpf->maps_in_use) {
336 pr_info("too many maps for a device\n");
337 return -ENOMEM;
338 }
339 if (bpf->maps.max_elems - bpf->map_elems_in_use <
340 offmap->map.max_entries) {
341 pr_info("map with too many elements: %u, left: %u\n",
342 offmap->map.max_entries,
343 bpf->maps.max_elems - bpf->map_elems_in_use);
344 return -ENOMEM;
345 }
17082566
JK
346
347 if (round_up(offmap->map.key_size, 8) +
1bba4c41 348 round_up(offmap->map.value_size, 8) > bpf->maps.max_elem_sz) {
17082566
JK
349 pr_info("map elements too large: %u, FW max element size (key+value): %u\n",
350 round_up(offmap->map.key_size, 8) +
351 round_up(offmap->map.value_size, 8),
352 bpf->maps.max_elem_sz);
353 return -ENOMEM;
354 }
355 if (offmap->map.key_size > bpf->maps.max_key_sz) {
356 pr_info("map key size %u, FW max is %u\n",
357 offmap->map.key_size, bpf->maps.max_key_sz);
358 return -ENOMEM;
359 }
360 if (offmap->map.value_size > bpf->maps.max_val_sz) {
361 pr_info("map value size %u, FW max is %u\n",
362 offmap->map.value_size, bpf->maps.max_val_sz);
1bba4c41
JK
363 return -ENOMEM;
364 }
365
dcb0c27f
JK
366 use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) *
367 FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]);
368
369 nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER);
1bba4c41
JK
370 if (!nfp_map)
371 return -ENOMEM;
372
373 offmap->dev_priv = nfp_map;
374 nfp_map->offmap = offmap;
375 nfp_map->bpf = bpf;
376
377 res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map);
378 if (res < 0) {
379 kfree(nfp_map);
380 return res;
381 }
382
383 nfp_map->tid = res;
384 offmap->dev_ops = &nfp_bpf_map_ops;
385 bpf->maps_in_use++;
386 bpf->map_elems_in_use += offmap->map.max_entries;
387 list_add_tail(&nfp_map->l, &bpf->map_list);
388
389 return 0;
390}
391
392static int
393nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap)
394{
395 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
396
397 nfp_bpf_ctrl_free_map(bpf, nfp_map);
398 list_del_init(&nfp_map->l);
399 bpf->map_elems_in_use -= offmap->map.max_entries;
400 bpf->maps_in_use--;
401 kfree(nfp_map);
402
403 return 0;
404}
405
af93d15a
JK
406int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf)
407{
408 switch (bpf->command) {
409 case BPF_OFFLOAD_VERIFIER_PREP:
410 return nfp_bpf_verifier_prep(app, nn, bpf);
411 case BPF_OFFLOAD_TRANSLATE:
412 return nfp_bpf_translate(nn, bpf->offload.prog);
413 case BPF_OFFLOAD_DESTROY:
414 return nfp_bpf_destroy(nn, bpf->offload.prog);
1bba4c41
JK
415 case BPF_OFFLOAD_MAP_ALLOC:
416 return nfp_bpf_map_alloc(app->priv, bpf->offmap);
417 case BPF_OFFLOAD_MAP_FREE:
418 return nfp_bpf_map_free(app->priv, bpf->offmap);
af93d15a
JK
419 default:
420 return -EINVAL;
421 }
422}
423
9816dd35
JK
424static unsigned long
425nfp_bpf_perf_event_copy(void *dst, const void *src,
426 unsigned long off, unsigned long len)
427{
428 memcpy(dst, src + off, len);
429 return 0;
430}
431
20c54204
JK
432int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
433 unsigned int len)
9816dd35 434{
20c54204 435 struct cmsg_bpf_event *cbe = (void *)data;
ab01f4ac
JK
436 struct nfp_bpf_neutral_map *record;
437 u32 pkt_size, data_size, map_id;
438 u64 map_id_full;
9816dd35 439
20c54204
JK
440 if (len < sizeof(struct cmsg_bpf_event))
441 return -EINVAL;
9816dd35
JK
442
443 pkt_size = be32_to_cpu(cbe->pkt_size);
444 data_size = be32_to_cpu(cbe->data_size);
ab01f4ac
JK
445 map_id_full = be64_to_cpu(cbe->map_ptr);
446 map_id = map_id_full;
9816dd35 447
20c54204
JK
448 if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
449 return -EINVAL;
9816dd35 450 if (cbe->hdr.ver != CMSG_MAP_ABI_VERSION)
20c54204 451 return -EINVAL;
9816dd35
JK
452
453 rcu_read_lock();
ab01f4ac
JK
454 record = rhashtable_lookup_fast(&bpf->maps_neutral, &map_id,
455 nfp_bpf_maps_neutral_params);
456 if (!record || map_id_full > U32_MAX) {
9816dd35 457 rcu_read_unlock();
ab01f4ac
JK
458 cmsg_warn(bpf, "perf event: map id %lld (0x%llx) not recognized, dropping event\n",
459 map_id_full, map_id_full);
20c54204 460 return -EINVAL;
9816dd35
JK
461 }
462
ab01f4ac 463 bpf_event_output(record->ptr, be32_to_cpu(cbe->cpu_id),
9816dd35
JK
464 &cbe->data[round_up(pkt_size, 4)], data_size,
465 cbe->data, pkt_size, nfp_bpf_perf_event_copy);
466 rcu_read_unlock();
467
9816dd35 468 return 0;
9816dd35
JK
469}
470
52be9a7c
QM
471static int
472nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
473 struct netlink_ext_ack *extack)
9314c442 474{
c6c580d7 475 struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
9314c442 476 unsigned int max_mtu;
c6c580d7 477 dma_addr_t dma_addr;
2314fe9e 478 void *img;
9314c442
JK
479 int err;
480
481 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
482 if (max_mtu < nn->dp.netdev->mtu) {
52be9a7c 483 NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary");
c6c580d7 484 return -EOPNOTSUPP;
9314c442
JK
485 }
486
2314fe9e
JK
487 img = nfp_bpf_relo_for_vnic(nfp_prog, nn->app_priv);
488 if (IS_ERR(img))
489 return PTR_ERR(img);
490
491 dma_addr = dma_map_single(nn->dp.dev, img,
c6c580d7
JK
492 nfp_prog->prog_len * sizeof(u64),
493 DMA_TO_DEVICE);
2314fe9e
JK
494 if (dma_mapping_error(nn->dp.dev, dma_addr)) {
495 kfree(img);
c6c580d7 496 return -ENOMEM;
2314fe9e 497 }
7533fdc0 498
9314c442 499 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, nfp_prog->prog_len);
94508438 500 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, dma_addr);
7533fdc0
JK
501
502 /* Load up the JITed code */
503 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
504 if (err)
52be9a7c
QM
505 NL_SET_ERR_MSG_MOD(extack,
506 "FW command error while loading BPF");
7533fdc0 507
9314c442
JK
508 dma_unmap_single(nn->dp.dev, dma_addr, nfp_prog->prog_len * sizeof(u64),
509 DMA_TO_DEVICE);
2314fe9e 510 kfree(img);
c6c580d7
JK
511
512 return err;
e4a91cd5
JK
513}
514
52be9a7c
QM
515static void
516nfp_net_bpf_start(struct nfp_net *nn, struct netlink_ext_ack *extack)
e4a91cd5
JK
517{
518 int err;
519
7533fdc0 520 /* Enable passing packets through BPF function */
79c12a75
JK
521 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
522 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
7533fdc0
JK
523 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
524 if (err)
52be9a7c
QM
525 NL_SET_ERR_MSG_MOD(extack,
526 "FW command error while enabling BPF");
7533fdc0
JK
527}
528
529static int nfp_net_bpf_stop(struct nfp_net *nn)
530{
79c12a75 531 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
7533fdc0
JK
532 return 0;
533
79c12a75 534 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
79c12a75 535 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
7533fdc0
JK
536
537 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
538}
539
9ce7a956 540int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog,
52be9a7c 541 bool old_prog, struct netlink_ext_ack *extack)
7533fdc0 542{
c6c580d7
JK
543 int err;
544
b5faa20d
JK
545 if (prog && !bpf_offload_dev_match(prog, nn->dp.netdev))
546 return -EINVAL;
7533fdc0 547
e4a91cd5
JK
548 if (prog && old_prog) {
549 u8 cap;
550
551 cap = nn_readb(nn, NFP_NET_CFG_BPF_CAP);
552 if (!(cap & NFP_NET_BPF_CAP_RELO)) {
52be9a7c
QM
553 NL_SET_ERR_MSG_MOD(extack,
554 "FW does not support live reload");
e4a91cd5
JK
555 return -EBUSY;
556 }
557 }
9ce7a956
JK
558
559 /* Something else is loaded, different program type? */
560 if (!old_prog && nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
561 return -EBUSY;
562
e4a91cd5
JK
563 if (old_prog && !prog)
564 return nfp_net_bpf_stop(nn);
7533fdc0 565
52be9a7c 566 err = nfp_net_bpf_load(nn, prog, extack);
c6c580d7
JK
567 if (err)
568 return err;
7533fdc0 569
e4a91cd5 570 if (!old_prog)
52be9a7c 571 nfp_net_bpf_start(nn, extack);
7533fdc0 572
9ce7a956 573 return 0;
7533fdc0 574}