2 * Copyright (C) 2017-2018 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/bpf.h>
35 #include <linux/bitops.h>
36 #include <linux/bug.h>
37 #include <linux/jiffies.h>
38 #include <linux/skbuff.h>
39 #include <linux/wait.h>
41 #include "../nfp_app.h"
42 #include "../nfp_net.h"
46 #define NFP_BPF_TAG_ALLOC_SPAN (U16_MAX / 4)
48 static bool nfp_bpf_all_tags_busy(struct nfp_app_bpf *bpf)
52 used_tags = bpf->tag_alloc_next - bpf->tag_alloc_last;
54 return used_tags > NFP_BPF_TAG_ALLOC_SPAN;
57 static int nfp_bpf_alloc_tag(struct nfp_app_bpf *bpf)
59 /* All FW communication for BPF is request-reply. To make sure we
60 * don't reuse the message ID too early after timeout - limit the
61 * number of requests in flight.
63 if (nfp_bpf_all_tags_busy(bpf)) {
64 cmsg_warn(bpf, "all FW request contexts busy!\n");
68 WARN_ON(__test_and_set_bit(bpf->tag_alloc_next, bpf->tag_allocator));
69 return bpf->tag_alloc_next++;
72 static void nfp_bpf_free_tag(struct nfp_app_bpf *bpf, u16 tag)
74 WARN_ON(!__test_and_clear_bit(tag, bpf->tag_allocator));
76 while (!test_bit(bpf->tag_alloc_last, bpf->tag_allocator) &&
77 bpf->tag_alloc_last != bpf->tag_alloc_next)
78 bpf->tag_alloc_last++;
81 static struct sk_buff *
82 nfp_bpf_cmsg_alloc(struct nfp_app_bpf *bpf, unsigned int size)
86 skb = nfp_app_ctrl_msg_alloc(bpf->app, size, GFP_KERNEL);
93 nfp_bpf_cmsg_map_req_size(struct nfp_app_bpf *bpf, unsigned int n)
97 size = sizeof(struct cmsg_req_map_op);
98 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
103 static struct sk_buff *
104 nfp_bpf_cmsg_map_req_alloc(struct nfp_app_bpf *bpf, unsigned int n)
106 return nfp_bpf_cmsg_alloc(bpf, nfp_bpf_cmsg_map_req_size(bpf, n));
110 nfp_bpf_cmsg_map_reply_size(struct nfp_app_bpf *bpf, unsigned int n)
114 size = sizeof(struct cmsg_reply_map_op);
115 size += (bpf->cmsg_key_sz + bpf->cmsg_val_sz) * n;
120 static u8 nfp_bpf_cmsg_get_type(struct sk_buff *skb)
122 struct cmsg_hdr *hdr;
124 hdr = (struct cmsg_hdr *)skb->data;
129 static unsigned int nfp_bpf_cmsg_get_tag(struct sk_buff *skb)
131 struct cmsg_hdr *hdr;
133 hdr = (struct cmsg_hdr *)skb->data;
135 return be16_to_cpu(hdr->tag);
138 static struct sk_buff *__nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
140 unsigned int msg_tag;
143 skb_queue_walk(&bpf->cmsg_replies, skb) {
144 msg_tag = nfp_bpf_cmsg_get_tag(skb);
145 if (msg_tag == tag) {
146 nfp_bpf_free_tag(bpf, tag);
147 __skb_unlink(skb, &bpf->cmsg_replies);
155 static struct sk_buff *nfp_bpf_reply(struct nfp_app_bpf *bpf, u16 tag)
159 nfp_ctrl_lock(bpf->app->ctrl);
160 skb = __nfp_bpf_reply(bpf, tag);
161 nfp_ctrl_unlock(bpf->app->ctrl);
166 static struct sk_buff *nfp_bpf_reply_drop_tag(struct nfp_app_bpf *bpf, u16 tag)
170 nfp_ctrl_lock(bpf->app->ctrl);
171 skb = __nfp_bpf_reply(bpf, tag);
173 nfp_bpf_free_tag(bpf, tag);
174 nfp_ctrl_unlock(bpf->app->ctrl);
179 static struct sk_buff *
180 nfp_bpf_cmsg_wait_reply(struct nfp_app_bpf *bpf, enum nfp_bpf_cmsg_type type,
186 for (i = 0; i < 50; i++) {
188 skb = nfp_bpf_reply(bpf, tag);
193 err = wait_event_interruptible_timeout(bpf->cmsg_wq,
194 skb = nfp_bpf_reply(bpf, tag),
195 msecs_to_jiffies(5000));
196 /* We didn't get a response - try last time and atomically drop
197 * the tag even if no response is matched.
200 skb = nfp_bpf_reply_drop_tag(bpf, tag);
202 cmsg_warn(bpf, "%s waiting for response to 0x%02x: %d\n",
203 err == ERESTARTSYS ? "interrupted" : "error",
208 cmsg_warn(bpf, "timeout waiting for response to 0x%02x\n",
210 return ERR_PTR(-ETIMEDOUT);
216 static struct sk_buff *
217 nfp_bpf_cmsg_communicate(struct nfp_app_bpf *bpf, struct sk_buff *skb,
218 enum nfp_bpf_cmsg_type type, unsigned int reply_size)
220 struct cmsg_hdr *hdr;
223 nfp_ctrl_lock(bpf->app->ctrl);
224 tag = nfp_bpf_alloc_tag(bpf);
226 nfp_ctrl_unlock(bpf->app->ctrl);
227 dev_kfree_skb_any(skb);
231 hdr = (void *)skb->data;
232 hdr->ver = CMSG_MAP_ABI_VERSION;
234 hdr->tag = cpu_to_be16(tag);
236 __nfp_app_ctrl_tx(bpf->app, skb);
238 nfp_ctrl_unlock(bpf->app->ctrl);
240 skb = nfp_bpf_cmsg_wait_reply(bpf, type, tag);
244 hdr = (struct cmsg_hdr *)skb->data;
245 if (hdr->type != __CMSG_REPLY(type)) {
246 cmsg_warn(bpf, "cmsg drop - wrong type 0x%02x != 0x%02lx!\n",
247 hdr->type, __CMSG_REPLY(type));
250 /* 0 reply_size means caller will do the validation */
251 if (reply_size && skb->len != reply_size) {
252 cmsg_warn(bpf, "cmsg drop - type 0x%02x wrong size %d != %d!\n",
253 type, skb->len, reply_size);
259 dev_kfree_skb_any(skb);
260 return ERR_PTR(-EIO);
264 nfp_bpf_ctrl_rc_to_errno(struct nfp_app_bpf *bpf,
265 struct cmsg_reply_map_simple *reply)
267 static const int res_table[] = {
268 [CMSG_RC_SUCCESS] = 0,
269 [CMSG_RC_ERR_MAP_FD] = -EBADFD,
270 [CMSG_RC_ERR_MAP_NOENT] = -ENOENT,
271 [CMSG_RC_ERR_MAP_ERR] = -EINVAL,
272 [CMSG_RC_ERR_MAP_PARSE] = -EIO,
273 [CMSG_RC_ERR_MAP_EXIST] = -EEXIST,
274 [CMSG_RC_ERR_MAP_NOMEM] = -ENOMEM,
275 [CMSG_RC_ERR_MAP_E2BIG] = -E2BIG,
279 rc = be32_to_cpu(reply->rc);
280 if (rc >= ARRAY_SIZE(res_table)) {
281 cmsg_warn(bpf, "FW responded with invalid status: %u\n", rc);
285 return res_table[rc];
289 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map)
291 struct cmsg_reply_map_alloc_tbl *reply;
292 struct cmsg_req_map_alloc_tbl *req;
297 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
301 req = (void *)skb->data;
302 req->key_size = cpu_to_be32(map->key_size);
303 req->value_size = cpu_to_be32(map->value_size);
304 req->max_entries = cpu_to_be32(map->max_entries);
305 req->map_type = cpu_to_be32(map->map_type);
308 skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_ALLOC,
313 reply = (void *)skb->data;
314 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
318 tid = be32_to_cpu(reply->tid);
319 dev_consume_skb_any(skb);
323 dev_kfree_skb_any(skb);
327 void nfp_bpf_ctrl_free_map(struct nfp_app_bpf *bpf, struct nfp_bpf_map *nfp_map)
329 struct cmsg_reply_map_free_tbl *reply;
330 struct cmsg_req_map_free_tbl *req;
334 skb = nfp_bpf_cmsg_alloc(bpf, sizeof(*req));
336 cmsg_warn(bpf, "leaking map - failed to allocate msg\n");
340 req = (void *)skb->data;
341 req->tid = cpu_to_be32(nfp_map->tid);
343 skb = nfp_bpf_cmsg_communicate(bpf, skb, CMSG_TYPE_MAP_FREE,
346 cmsg_warn(bpf, "leaking map - I/O error\n");
350 reply = (void *)skb->data;
351 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
353 cmsg_warn(bpf, "leaking map - FW responded with: %d\n", err);
355 dev_consume_skb_any(skb);
359 nfp_bpf_ctrl_req_key(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
362 return &req->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
366 nfp_bpf_ctrl_req_val(struct nfp_app_bpf *bpf, struct cmsg_req_map_op *req,
369 return &req->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
373 nfp_bpf_ctrl_reply_key(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
376 return &reply->data[bpf->cmsg_key_sz * n + bpf->cmsg_val_sz * n];
380 nfp_bpf_ctrl_reply_val(struct nfp_app_bpf *bpf, struct cmsg_reply_map_op *reply,
383 return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n];
387 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap,
388 enum nfp_bpf_cmsg_type op,
389 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value)
391 struct nfp_bpf_map *nfp_map = offmap->dev_priv;
392 struct nfp_app_bpf *bpf = nfp_map->bpf;
393 struct bpf_map *map = &offmap->map;
394 struct cmsg_reply_map_op *reply;
395 struct cmsg_req_map_op *req;
399 /* FW messages have no space for more than 32 bits of flags */
403 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1);
407 req = (void *)skb->data;
408 req->tid = cpu_to_be32(nfp_map->tid);
409 req->count = cpu_to_be32(1);
410 req->flags = cpu_to_be32(flags);
414 memcpy(nfp_bpf_ctrl_req_key(bpf, req, 0), key, map->key_size);
416 memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value,
419 skb = nfp_bpf_cmsg_communicate(bpf, skb, op,
420 nfp_bpf_cmsg_map_reply_size(bpf, 1));
424 reply = (void *)skb->data;
425 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr);
431 memcpy(out_key, nfp_bpf_ctrl_reply_key(bpf, reply, 0),
434 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0),
437 dev_consume_skb_any(skb);
441 dev_kfree_skb_any(skb);
445 int nfp_bpf_ctrl_update_entry(struct bpf_offloaded_map *offmap,
446 void *key, void *value, u64 flags)
448 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_UPDATE,
449 key, value, flags, NULL, NULL);
452 int nfp_bpf_ctrl_del_entry(struct bpf_offloaded_map *offmap, void *key)
454 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_DELETE,
455 key, NULL, 0, NULL, NULL);
458 int nfp_bpf_ctrl_lookup_entry(struct bpf_offloaded_map *offmap,
459 void *key, void *value)
461 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_LOOKUP,
462 key, NULL, 0, NULL, value);
465 int nfp_bpf_ctrl_getfirst_entry(struct bpf_offloaded_map *offmap,
468 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETFIRST,
469 NULL, NULL, 0, next_key, NULL);
472 int nfp_bpf_ctrl_getnext_entry(struct bpf_offloaded_map *offmap,
473 void *key, void *next_key)
475 return nfp_bpf_ctrl_entry_op(offmap, CMSG_TYPE_MAP_GETNEXT,
476 key, NULL, 0, next_key, NULL);
479 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf)
481 return max3((unsigned int)NFP_NET_DEFAULT_MTU,
482 nfp_bpf_cmsg_map_req_size(bpf, 1),
483 nfp_bpf_cmsg_map_reply_size(bpf, 1));
486 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
488 struct nfp_app_bpf *bpf = app->priv;
491 if (unlikely(skb->len < sizeof(struct cmsg_reply_map_simple))) {
492 cmsg_warn(bpf, "cmsg drop - too short %d!\n", skb->len);
496 if (nfp_bpf_cmsg_get_type(skb) == CMSG_TYPE_BPF_EVENT) {
497 if (!nfp_bpf_event_output(bpf, skb->data, skb->len))
498 dev_consume_skb_any(skb);
500 dev_kfree_skb_any(skb);
504 nfp_ctrl_lock(bpf->app->ctrl);
506 tag = nfp_bpf_cmsg_get_tag(skb);
507 if (unlikely(!test_bit(tag, bpf->tag_allocator))) {
508 cmsg_warn(bpf, "cmsg drop - no one is waiting for tag %u!\n",
513 __skb_queue_tail(&bpf->cmsg_replies, skb);
514 wake_up_interruptible_all(&bpf->cmsg_wq);
516 nfp_ctrl_unlock(bpf->app->ctrl);
520 nfp_ctrl_unlock(bpf->app->ctrl);
522 dev_kfree_skb_any(skb);
526 nfp_bpf_ctrl_msg_rx_raw(struct nfp_app *app, const void *data, unsigned int len)
528 struct nfp_app_bpf *bpf = app->priv;
529 const struct cmsg_hdr *hdr = data;
531 if (unlikely(len < sizeof(struct cmsg_reply_map_simple))) {
532 cmsg_warn(bpf, "cmsg drop - too short %d!\n", len);
536 if (hdr->type == CMSG_TYPE_BPF_EVENT)
537 nfp_bpf_event_output(bpf, data, len);
539 cmsg_warn(bpf, "cmsg drop - msg type %d with raw buffer!\n",