2 * Copyright (C) 2016 Netronome Systems, Inc.
4 * This software is dual licensed under the GNU General License Version 2,
5 * June 1991 as shown in the file COPYING in the top-level directory of this
6 * source tree or the BSD 2-Clause License provided below. You have the
7 * option to license this software under the complete terms of either license.
9 * The BSD 2-Clause License:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * 1. Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * 2. Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * Netronome network device driver: TC offload functions for PF and VF
39 #include <linux/kernel.h>
40 #include <linux/netdevice.h>
41 #include <linux/pci.h>
42 #include <linux/jiffies.h>
43 #include <linux/timer.h>
44 #include <linux/list.h>
46 #include <net/pkt_cls.h>
47 #include <net/tc_act/tc_gact.h>
48 #include <net/tc_act/tc_mirred.h>
51 #include "nfp_net_ctrl.h"
54 void nfp_net_filter_stats_timer(unsigned long data)
56 struct nfp_net *nn = (void *)data;
57 struct nfp_stat_pair latest;
59 spin_lock_bh(&nn->rx_filter_lock);
61 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
62 mod_timer(&nn->rx_filter_stats_timer,
63 jiffies + NFP_NET_STAT_POLL_IVL);
65 spin_unlock_bh(&nn->rx_filter_lock);
67 latest.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
68 latest.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
70 if (latest.pkts != nn->rx_filter.pkts)
71 nn->rx_filter_change = jiffies;
73 nn->rx_filter = latest;
76 static void nfp_net_bpf_stats_reset(struct nfp_net *nn)
78 nn->rx_filter.pkts = nn_readq(nn, NFP_NET_CFG_STATS_APP1_FRAMES);
79 nn->rx_filter.bytes = nn_readq(nn, NFP_NET_CFG_STATS_APP1_BYTES);
80 nn->rx_filter_prev = nn->rx_filter;
81 nn->rx_filter_change = jiffies;
85 nfp_net_bpf_stats_update(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
91 pkts = nn->rx_filter.pkts - nn->rx_filter_prev.pkts;
92 bytes = nn->rx_filter.bytes - nn->rx_filter_prev.bytes;
93 bytes -= pkts * ETH_HLEN;
95 nn->rx_filter_prev = nn->rx_filter;
99 tcf_exts_to_list(cls_bpf->exts, &actions);
100 list_for_each_entry(a, &actions, list)
101 tcf_action_stats_update(a, bytes, pkts, nn->rx_filter_change);
109 nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
111 const struct tc_action *a;
117 /* TC direct action */
118 if (cls_bpf->exts_integrated) {
119 if (tc_no_actions(cls_bpf->exts))
120 return NN_ACT_DIRECT;
126 if (!tc_single_action(cls_bpf->exts))
129 tcf_exts_to_list(cls_bpf->exts, &actions);
130 list_for_each_entry(a, &actions, list) {
131 if (is_tcf_gact_shot(a))
132 return NN_ACT_TC_DROP;
134 if (is_tcf_mirred_egress_redirect(a) &&
135 tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
136 return NN_ACT_TC_REDIR;
143 nfp_net_bpf_offload_prepare(struct nfp_net *nn,
144 struct tc_cls_bpf_offload *cls_bpf,
145 struct nfp_bpf_result *res,
146 void **code, dma_addr_t *dma_addr, u16 max_instr)
148 unsigned int code_sz = max_instr * sizeof(u64);
149 enum nfp_bpf_action_type act;
150 u16 start_off, done_off;
151 unsigned int max_mtu;
154 if (!IS_ENABLED(CONFIG_BPF_SYSCALL))
157 ret = nfp_net_bpf_get_act(nn, cls_bpf);
162 max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
163 if (max_mtu < nn->dp.netdev->mtu) {
164 nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
168 start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
169 done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
171 *code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
175 ret = nfp_bpf_jit(cls_bpf->prog, *code, act, start_off, done_off,
183 dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
188 nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
189 void *code, dma_addr_t dma_addr,
190 unsigned int code_sz, unsigned int n_instr,
193 u64 bpf_addr = dma_addr;
196 nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
199 bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
201 nn_writew(nn, NFP_NET_CFG_BPF_SIZE, n_instr);
202 nn_writeq(nn, NFP_NET_CFG_BPF_ADDR, bpf_addr);
204 /* Load up the JITed code */
205 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_BPF);
207 nn_err(nn, "FW command error while loading BPF: %d\n", err);
209 /* Enable passing packets through BPF function */
210 nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
211 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
212 err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
214 nn_err(nn, "FW command error while enabling BPF: %d\n", err);
216 dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
218 nfp_net_bpf_stats_reset(nn);
219 mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
222 static int nfp_net_bpf_stop(struct nfp_net *nn)
224 if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
227 spin_lock_bh(&nn->rx_filter_lock);
228 nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
229 spin_unlock_bh(&nn->rx_filter_lock);
230 nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
232 del_timer_sync(&nn->rx_filter_stats_timer);
233 nn->dp.bpf_offload_skip_sw = 0;
235 return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
238 int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
240 struct nfp_bpf_result res;
246 max_instr = nn_readw(nn, NFP_NET_CFG_BPF_MAX_LEN);
248 switch (cls_bpf->command) {
249 case TC_CLSBPF_REPLACE:
250 /* There is nothing stopping us from implementing seamless
251 * replace but the simple method of loading I adopted in
252 * the firmware does not handle atomic replace (i.e. we have to
253 * stop the BPF offload and re-enable it). Leaking-in a few
254 * frames which didn't have BPF applied in the hardware should
255 * be fine if software fallback is available, though.
257 if (nn->dp.bpf_offload_skip_sw)
260 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
261 &dma_addr, max_instr);
265 nfp_net_bpf_stop(nn);
266 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
267 dma_addr, max_instr * sizeof(u64),
268 res.n_instr, res.dense_mode);
272 if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
275 err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
276 &dma_addr, max_instr);
280 nfp_net_bpf_load_and_start(nn, cls_bpf->gen_flags, code,
281 dma_addr, max_instr * sizeof(u64),
282 res.n_instr, res.dense_mode);
285 case TC_CLSBPF_DESTROY:
286 return nfp_net_bpf_stop(nn);
288 case TC_CLSBPF_STATS:
289 return nfp_net_bpf_stats_update(nn, cls_bpf);