net/mlx5e: Directly get flow_steering struct as input when init/cleanup ethtool steering
[linux-block.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_fs_ethtool.c
CommitLineData
6dc6071c
MG
1/*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#include <linux/mlx5/fs.h>
34#include "en.h"
db05815b 35#include "en/params.h"
1742b3d5 36#include "en/xsk/pool.h"
6dc6071c 37
248d3b4c
TT
38static int flow_type_to_traffic_type(u32 flow_type);
39
40static u32 flow_type_mask(u32 flow_type)
41{
42 return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
43}
44
6dc6071c
MG
45struct mlx5e_ethtool_rule {
46 struct list_head list;
47 struct ethtool_rx_flow_spec flow_spec;
74491de9 48 struct mlx5_flow_handle *rule;
6dc6071c 49 struct mlx5e_ethtool_table *eth_ft;
248d3b4c 50 struct mlx5e_rss *rss;
6dc6071c
MG
51};
52
53static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
54{
55 if (!--eth_ft->num_rules) {
56 mlx5_destroy_flow_table(eth_ft->ft);
57 eth_ft->ft = NULL;
58 }
59}
60
1174fce8
MG
61#define MLX5E_ETHTOOL_L3_L4_PRIO 0
62#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
6dc6071c
MG
63#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
64#define MLX5E_ETHTOOL_NUM_GROUPS 10
65static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
66 struct ethtool_rx_flow_spec *fs,
67 int num_tuples)
68{
f52f2fae 69 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
61dc7b01 70 struct mlx5_flow_table_attr ft_attr = {};
6dc6071c
MG
71 struct mlx5e_ethtool_table *eth_ft;
72 struct mlx5_flow_namespace *ns;
73 struct mlx5_flow_table *ft;
74 int max_tuples;
75 int table_size;
76 int prio;
77
248d3b4c 78 switch (flow_type_mask(fs->flow_type)) {
1174fce8
MG
79 case TCP_V4_FLOW:
80 case UDP_V4_FLOW:
ca7deb02
SM
81 case TCP_V6_FLOW:
82 case UDP_V6_FLOW:
1174fce8
MG
83 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
84 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
f52f2fae 85 eth_ft = &ethtool->l3_l4_ft[prio];
1174fce8
MG
86 break;
87 case IP_USER_FLOW:
ca7deb02 88 case IPV6_USER_FLOW:
1174fce8
MG
89 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
90 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
f52f2fae 91 eth_ft = &ethtool->l3_l4_ft[prio];
1174fce8 92 break;
6dc6071c
MG
93 case ETHER_FLOW:
94 max_tuples = ETHTOOL_NUM_L2_FTS;
95 prio = max_tuples - num_tuples;
f52f2fae 96 eth_ft = &ethtool->l2_ft[prio];
6dc6071c
MG
97 prio += MLX5E_ETHTOOL_L2_PRIO;
98 break;
99 default:
100 return ERR_PTR(-EINVAL);
101 }
102
103 eth_ft->num_rules++;
104 if (eth_ft->ft)
105 return eth_ft;
106
107 ns = mlx5_get_flow_namespace(priv->mdev,
108 MLX5_FLOW_NAMESPACE_ETHTOOL);
109 if (!ns)
9eb78923 110 return ERR_PTR(-EOPNOTSUPP);
6dc6071c
MG
111
112 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
113 flow_table_properties_nic_receive.log_max_ft_size)),
114 MLX5E_ETHTOOL_NUM_ENTRIES);
61dc7b01
PB
115
116 ft_attr.prio = prio;
117 ft_attr.max_fte = table_size;
118 ft_attr.autogroup.max_num_groups = MLX5E_ETHTOOL_NUM_GROUPS;
119 ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
6dc6071c
MG
120 if (IS_ERR(ft))
121 return (void *)ft;
122
123 eth_ft->ft = ft;
124 return eth_ft;
125}
126
127static void mask_spec(u8 *mask, u8 *val, size_t size)
128{
129 unsigned int i;
130
131 for (i = 0; i < size; i++, mask++, val++)
132 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
133}
134
142644f8
SM
135#define MLX5E_FTE_SET(header_p, fld, v) \
136 MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
137
138#define MLX5E_FTE_ADDR_OF(header_p, fld) \
139 MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
140
141static void
142set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
143 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
1174fce8
MG
144{
145 if (ip4src_m) {
142644f8 146 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
1174fce8 147 &ip4src_v, sizeof(ip4src_v));
3a95e0cc
OG
148 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
149 &ip4src_m, sizeof(ip4src_m));
1174fce8
MG
150 }
151 if (ip4dst_m) {
142644f8 152 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1174fce8 153 &ip4dst_v, sizeof(ip4dst_v));
3a95e0cc
OG
154 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
155 &ip4dst_m, sizeof(ip4dst_m));
1174fce8 156 }
142644f8
SM
157
158 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
159 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
160}
161
ca7deb02
SM
162static void
163set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
164 __be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
165{
166 u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
167
168 if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
169 memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
170 ip6src_v, ip6_sz);
171 memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
172 ip6src_m, ip6_sz);
173 }
174 if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
175 memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
176 ip6dst_v, ip6_sz);
177 memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
178 ip6dst_m, ip6_sz);
179 }
180
181 MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
182 MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
183}
184
142644f8
SM
185static void
186set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
187 __be16 pdst_m, __be16 pdst_v)
188{
189 if (psrc_m) {
3a95e0cc 190 MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
142644f8
SM
191 MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
192 }
193 if (pdst_m) {
3a95e0cc 194 MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
142644f8
SM
195 MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
196 }
197
198 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
199 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
200}
201
202static void
203set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
204 __be16 pdst_m, __be16 pdst_v)
205{
206 if (psrc_m) {
3a95e0cc 207 MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
fc433829 208 MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
142644f8
SM
209 }
210
211 if (pdst_m) {
3a95e0cc 212 MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
142644f8
SM
213 MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
214 }
215
216 MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
217 MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
218}
219
220static void
221parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
222{
223 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
224 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
225
226 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
227 l4_mask->ip4dst, l4_val->ip4dst);
228
229 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
230 l4_mask->pdst, l4_val->pdst);
231}
232
233static void
234parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
235{
236 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
237 struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
238
239 set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
240 l4_mask->ip4dst, l4_val->ip4dst);
241
242 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
243 l4_mask->pdst, l4_val->pdst);
244}
245
246static void
247parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
248{
249 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
250 struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
251
252 set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
253 l3_mask->ip4dst, l3_val->ip4dst);
974ce34a
SM
254
255 if (l3_mask->proto) {
256 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
257 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
258 }
142644f8
SM
259}
260
ca7deb02
SM
261static void
262parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
263{
264 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
265 struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
266
267 set_ip6(headers_c, headers_v, l3_mask->ip6src,
268 l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
974ce34a
SM
269
270 if (l3_mask->l4_proto) {
271 MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
272 MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
273 }
ca7deb02
SM
274}
275
276static void
277parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
278{
279 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
280 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
281
282 set_ip6(headers_c, headers_v, l4_mask->ip6src,
283 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
284
285 set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
286 l4_mask->pdst, l4_val->pdst);
287}
288
289static void
290parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
291{
292 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
293 struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
294
295 set_ip6(headers_c, headers_v, l4_mask->ip6src,
296 l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
297
298 set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
299 l4_mask->pdst, l4_val->pdst);
300}
301
142644f8
SM
302static void
303parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
304{
305 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
306 struct ethhdr *eth_val = &fs->h_u.ether_spec;
307
308 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
309 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
310 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
311 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
312 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
313 MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
314 MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
315}
316
317static void
318set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
319{
320 MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
321 MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
322 MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
323 MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
324}
325
326static void
327set_dmac(void *headers_c, void *headers_v,
328 unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
329{
330 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
331 ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
1174fce8
MG
332}
333
6dc6071c
MG
334static int set_flow_attrs(u32 *match_c, u32 *match_v,
335 struct ethtool_rx_flow_spec *fs)
336{
337 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
338 outer_headers);
339 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
340 outer_headers);
248d3b4c 341 u32 flow_type = flow_type_mask(fs->flow_type);
6dc6071c
MG
342
343 switch (flow_type) {
1174fce8 344 case TCP_V4_FLOW:
142644f8 345 parse_tcp4(outer_headers_c, outer_headers_v, fs);
1174fce8
MG
346 break;
347 case UDP_V4_FLOW:
142644f8 348 parse_udp4(outer_headers_c, outer_headers_v, fs);
1174fce8
MG
349 break;
350 case IP_USER_FLOW:
142644f8 351 parse_ip4(outer_headers_c, outer_headers_v, fs);
1174fce8 352 break;
ca7deb02
SM
353 case TCP_V6_FLOW:
354 parse_tcp6(outer_headers_c, outer_headers_v, fs);
355 break;
356 case UDP_V6_FLOW:
357 parse_udp6(outer_headers_c, outer_headers_v, fs);
358 break;
359 case IPV6_USER_FLOW:
360 parse_ip6(outer_headers_c, outer_headers_v, fs);
361 break;
6dc6071c 362 case ETHER_FLOW:
142644f8 363 parse_ether(outer_headers_c, outer_headers_v, fs);
6dc6071c
MG
364 break;
365 default:
366 return -EINVAL;
367 }
368
369 if ((fs->flow_type & FLOW_EXT) &&
142644f8
SM
370 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
371 set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
372
1174fce8
MG
373 if (fs->flow_type & FLOW_MAC_EXT &&
374 !is_zero_ether_addr(fs->m_ext.h_dest)) {
077b1e80 375 mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
142644f8
SM
376 set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
377 fs->h_ext.h_dest);
1174fce8 378 }
6dc6071c
MG
379
380 return 0;
381}
382
383static void add_rule_to_list(struct mlx5e_priv *priv,
384 struct mlx5e_ethtool_rule *rule)
385{
f52f2fae
LK
386 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
387 struct list_head *head = &ethtool->rules;
6dc6071c 388 struct mlx5e_ethtool_rule *iter;
6dc6071c 389
f52f2fae 390 list_for_each_entry(iter, &ethtool->rules, list) {
6dc6071c
MG
391 if (iter->flow_spec.location > rule->flow_spec.location)
392 break;
393 head = &iter->list;
394 }
f52f2fae 395 ethtool->tot_num_rules++;
6dc6071c
MG
396 list_add(&rule->list, head);
397}
398
399static bool outer_header_zero(u32 *match_criteria)
400{
0242f4a0 401 int size = MLX5_FLD_SZ_BYTES(fte_match_param, outer_headers);
6dc6071c
MG
402 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
403 outer_headers);
404
405 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
406 outer_headers_c + 1,
407 size - 1);
408}
409
248d3b4c
TT
410static int flow_get_tirn(struct mlx5e_priv *priv,
411 struct mlx5e_ethtool_rule *eth_rule,
412 struct ethtool_rx_flow_spec *fs,
413 u32 rss_context, u32 *tirn)
414{
415 if (fs->flow_type & FLOW_RSS) {
eaee12f0 416 struct mlx5e_packet_merge_param pkt_merge_param;
248d3b4c
TT
417 struct mlx5e_rss *rss;
418 u32 flow_type;
419 int err;
420 int tt;
421
422 rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context);
423 if (!rss)
424 return -ENOENT;
425
426 flow_type = flow_type_mask(fs->flow_type);
427 tt = flow_type_to_traffic_type(flow_type);
428 if (tt < 0)
429 return -EINVAL;
430
eaee12f0
KM
431 pkt_merge_param = priv->channels.params.packet_merge;
432 err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
248d3b4c
TT
433 if (err)
434 return err;
435 eth_rule->rss = rss;
436 mlx5e_rss_refcnt_inc(eth_rule->rss);
437 } else {
438 struct mlx5e_params *params = &priv->channels.params;
439 enum mlx5e_rq_group group;
440 u16 ix;
441
442 mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group);
443
444 *tirn = group == MLX5E_RQ_GROUP_XSK ?
445 mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) :
446 mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix);
447 }
448
449 return 0;
450}
451
74491de9
MB
452static struct mlx5_flow_handle *
453add_ethtool_flow_rule(struct mlx5e_priv *priv,
248d3b4c 454 struct mlx5e_ethtool_rule *eth_rule,
74491de9 455 struct mlx5_flow_table *ft,
248d3b4c 456 struct ethtool_rx_flow_spec *fs, u32 rss_context)
6dc6071c 457{
d22fcc80 458 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND };
6dc6071c 459 struct mlx5_flow_destination *dst = NULL;
74491de9 460 struct mlx5_flow_handle *rule;
d22fcc80 461 struct mlx5_flow_spec *spec;
6dc6071c 462 int err = 0;
6dc6071c 463
1b9a07ee 464 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
6dc6071c
MG
465 if (!spec)
466 return ERR_PTR(-ENOMEM);
467 err = set_flow_attrs(spec->match_criteria, spec->match_value,
468 fs);
469 if (err)
470 goto free;
471
472 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
66958ed9 473 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
6dc6071c
MG
474 } else {
475 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
476 if (!dst) {
477 err = -ENOMEM;
478 goto free;
479 }
480
248d3b4c
TT
481 err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num);
482 if (err)
483 goto free;
484
6dc6071c 485 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
66958ed9 486 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
6dc6071c
MG
487 }
488
489 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
bb0ee7dc 490 spec->flow_context.flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
bcec601f 491 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dst, dst ? 1 : 0);
6dc6071c
MG
492 if (IS_ERR(rule)) {
493 err = PTR_ERR(rule);
494 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
495 __func__, err);
496 goto free;
497 }
498free:
499 kvfree(spec);
500 kfree(dst);
501 return err ? ERR_PTR(err) : rule;
502}
503
e8b5c4bc 504static void del_ethtool_rule(struct mlx5e_flow_steering *fs,
6dc6071c
MG
505 struct mlx5e_ethtool_rule *eth_rule)
506{
e8b5c4bc 507 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
6dc6071c 508 if (eth_rule->rule)
74491de9 509 mlx5_del_flow_rules(eth_rule->rule);
248d3b4c
TT
510 if (eth_rule->rss)
511 mlx5e_rss_refcnt_dec(eth_rule->rss);
6dc6071c 512 list_del(&eth_rule->list);
f52f2fae 513 ethtool->tot_num_rules--;
6dc6071c
MG
514 put_flow_table(eth_rule->eth_ft);
515 kfree(eth_rule);
516}
517
518static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
519 int location)
520{
f52f2fae 521 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
6dc6071c
MG
522 struct mlx5e_ethtool_rule *iter;
523
f52f2fae 524 list_for_each_entry(iter, &ethtool->rules, list) {
6dc6071c
MG
525 if (iter->flow_spec.location == location)
526 return iter;
527 }
528 return NULL;
529}
530
531static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
532 int location)
533{
534 struct mlx5e_ethtool_rule *eth_rule;
535
536 eth_rule = find_ethtool_rule(priv, location);
537 if (eth_rule)
e8b5c4bc 538 del_ethtool_rule(priv->fs, eth_rule);
6dc6071c
MG
539
540 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
541 if (!eth_rule)
542 return ERR_PTR(-ENOMEM);
543
544 add_rule_to_list(priv, eth_rule);
545 return eth_rule;
546}
547
548#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
1174fce8
MG
549
550#define all_ones(field) (field == (__force typeof(field))-1)
551#define all_zeros_or_all_ones(field) \
552 ((field) == 0 || (field) == (__force typeof(field))-1)
553
b29c61da
SM
554static int validate_ethter(struct ethtool_rx_flow_spec *fs)
555{
556 struct ethhdr *eth_mask = &fs->m_u.ether_spec;
557 int ntuples = 0;
558
559 if (!is_zero_ether_addr(eth_mask->h_dest))
560 ntuples++;
561 if (!is_zero_ether_addr(eth_mask->h_source))
562 ntuples++;
563 if (eth_mask->h_proto)
564 ntuples++;
565 return ntuples;
566}
567
568static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
569{
570 struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
571 int ntuples = 0;
572
573 if (l4_mask->tos)
574 return -EINVAL;
575
3a95e0cc 576 if (l4_mask->ip4src)
b29c61da 577 ntuples++;
3a95e0cc 578 if (l4_mask->ip4dst)
b29c61da 579 ntuples++;
3a95e0cc 580 if (l4_mask->psrc)
b29c61da 581 ntuples++;
3a95e0cc 582 if (l4_mask->pdst)
b29c61da 583 ntuples++;
b29c61da
SM
584 /* Flow is TCP/UDP */
585 return ++ntuples;
586}
587
588static int validate_ip4(struct ethtool_rx_flow_spec *fs)
589{
590 struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
591 int ntuples = 0;
592
974ce34a 593 if (l3_mask->l4_4_bytes || l3_mask->tos ||
b29c61da
SM
594 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
595 return -EINVAL;
3a95e0cc 596 if (l3_mask->ip4src)
b29c61da 597 ntuples++;
3a95e0cc 598 if (l3_mask->ip4dst)
b29c61da 599 ntuples++;
974ce34a
SM
600 if (l3_mask->proto)
601 ntuples++;
b29c61da
SM
602 /* Flow is IPv4 */
603 return ++ntuples;
604}
605
ca7deb02
SM
606static int validate_ip6(struct ethtool_rx_flow_spec *fs)
607{
608 struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
609 int ntuples = 0;
610
974ce34a 611 if (l3_mask->l4_4_bytes || l3_mask->tclass)
ca7deb02
SM
612 return -EINVAL;
613 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
614 ntuples++;
615
616 if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
617 ntuples++;
974ce34a
SM
618 if (l3_mask->l4_proto)
619 ntuples++;
ca7deb02
SM
620 /* Flow is IPv6 */
621 return ++ntuples;
622}
623
624static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
625{
626 struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
627 int ntuples = 0;
628
629 if (l4_mask->tclass)
630 return -EINVAL;
631
632 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
633 ntuples++;
634
635 if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
636 ntuples++;
637
3a95e0cc 638 if (l4_mask->psrc)
ca7deb02 639 ntuples++;
3a95e0cc 640 if (l4_mask->pdst)
ca7deb02 641 ntuples++;
ca7deb02
SM
642 /* Flow is TCP/UDP */
643 return ++ntuples;
644}
645
b29c61da
SM
646static int validate_vlan(struct ethtool_rx_flow_spec *fs)
647{
648 if (fs->m_ext.vlan_etype ||
649 fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
650 return -EINVAL;
651
652 if (fs->m_ext.vlan_tci &&
653 (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
654 return -EINVAL;
655
656 return 1;
657}
658
6dc6071c
MG
659static int validate_flow(struct mlx5e_priv *priv,
660 struct ethtool_rx_flow_spec *fs)
661{
6dc6071c 662 int num_tuples = 0;
b29c61da 663 int ret = 0;
6dc6071c
MG
664
665 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
b29c61da 666 return -ENOSPC;
6dc6071c 667
db05815b 668 if (fs->ring_cookie != RX_CLS_FLOW_DISC)
694826e3
TT
669 if (!mlx5e_qid_validate(priv->profile, &priv->channels.params,
670 fs->ring_cookie))
db05815b 671 return -EINVAL;
6dc6071c 672
248d3b4c 673 switch (flow_type_mask(fs->flow_type)) {
6dc6071c 674 case ETHER_FLOW:
b29c61da 675 num_tuples += validate_ethter(fs);
6dc6071c 676 break;
1174fce8
MG
677 case TCP_V4_FLOW:
678 case UDP_V4_FLOW:
b29c61da
SM
679 ret = validate_tcpudp4(fs);
680 if (ret < 0)
681 return ret;
682 num_tuples += ret;
1174fce8
MG
683 break;
684 case IP_USER_FLOW:
b29c61da
SM
685 ret = validate_ip4(fs);
686 if (ret < 0)
687 return ret;
688 num_tuples += ret;
1174fce8 689 break;
ca7deb02
SM
690 case TCP_V6_FLOW:
691 case UDP_V6_FLOW:
692 ret = validate_tcpudp6(fs);
693 if (ret < 0)
694 return ret;
695 num_tuples += ret;
696 break;
697 case IPV6_USER_FLOW:
698 ret = validate_ip6(fs);
699 if (ret < 0)
700 return ret;
701 num_tuples += ret;
702 break;
6dc6071c 703 default:
b29c61da 704 return -ENOTSUPP;
6dc6071c
MG
705 }
706 if ((fs->flow_type & FLOW_EXT)) {
b29c61da
SM
707 ret = validate_vlan(fs);
708 if (ret < 0)
709 return ret;
710 num_tuples += ret;
6dc6071c
MG
711 }
712
1174fce8
MG
713 if (fs->flow_type & FLOW_MAC_EXT &&
714 !is_zero_ether_addr(fs->m_ext.h_dest))
715 num_tuples++;
716
6dc6071c
MG
717 return num_tuples;
718}
719
cff2b1e3
SM
720static int
721mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
248d3b4c 722 struct ethtool_rx_flow_spec *fs, u32 rss_context)
6dc6071c
MG
723{
724 struct mlx5e_ethtool_table *eth_ft;
725 struct mlx5e_ethtool_rule *eth_rule;
74491de9 726 struct mlx5_flow_handle *rule;
6dc6071c
MG
727 int num_tuples;
728 int err;
729
730 num_tuples = validate_flow(priv, fs);
731 if (num_tuples <= 0) {
b29c61da
SM
732 netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
733 __func__, num_tuples);
734 return num_tuples;
6dc6071c
MG
735 }
736
737 eth_ft = get_flow_table(priv, fs, num_tuples);
738 if (IS_ERR(eth_ft))
739 return PTR_ERR(eth_ft);
740
741 eth_rule = get_ethtool_rule(priv, fs->location);
742 if (IS_ERR(eth_rule)) {
743 put_flow_table(eth_ft);
744 return PTR_ERR(eth_rule);
745 }
746
747 eth_rule->flow_spec = *fs;
748 eth_rule->eth_ft = eth_ft;
bbf0b423 749
248d3b4c 750 rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context);
6dc6071c
MG
751 if (IS_ERR(rule)) {
752 err = PTR_ERR(rule);
753 goto del_ethtool_rule;
754 }
755
756 eth_rule->rule = rule;
757
758 return 0;
759
760del_ethtool_rule:
e8b5c4bc 761 del_ethtool_rule(priv->fs, eth_rule);
6dc6071c
MG
762
763 return err;
764}
765
cff2b1e3
SM
766static int
767mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
6dc6071c
MG
768{
769 struct mlx5e_ethtool_rule *eth_rule;
770 int err = 0;
771
772 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
773 return -ENOSPC;
774
775 eth_rule = find_ethtool_rule(priv, location);
776 if (!eth_rule) {
777 err = -ENOENT;
778 goto out;
779 }
780
e8b5c4bc 781 del_ethtool_rule(priv->fs, eth_rule);
6dc6071c
MG
782out:
783 return err;
784}
785
cff2b1e3
SM
786static int
787mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
788 struct ethtool_rxnfc *info, int location)
f913a72a 789{
f52f2fae 790 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
f913a72a
MG
791 struct mlx5e_ethtool_rule *eth_rule;
792
793 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
794 return -EINVAL;
795
f52f2fae 796 list_for_each_entry(eth_rule, &ethtool->rules, list) {
248d3b4c
TT
797 int index;
798
799 if (eth_rule->flow_spec.location != location)
800 continue;
801 if (!info)
f913a72a 802 return 0;
248d3b4c
TT
803 info->fs = eth_rule->flow_spec;
804 if (!eth_rule->rss)
805 return 0;
806 index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss);
807 if (index < 0)
808 return index;
809 info->rss_context = index;
810 return 0;
f913a72a
MG
811 }
812
813 return -ENOENT;
814}
815
cff2b1e3
SM
816static int
817mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
818 struct ethtool_rxnfc *info, u32 *rule_locs)
f913a72a
MG
819{
820 int location = 0;
821 int idx = 0;
822 int err = 0;
823
5e82c9e4 824 info->data = MAX_NUM_OF_ETHTOOL_RULES;
f913a72a 825 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
248d3b4c 826 err = mlx5e_ethtool_get_flow(priv, NULL, location);
f913a72a
MG
827 if (!err)
828 rule_locs[idx++] = location;
829 location++;
830 }
831 return err;
832}
833
e8b5c4bc 834void mlx5e_ethtool_cleanup_steering(struct mlx5e_flow_steering *fs)
6dc6071c 835{
e8b5c4bc 836 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
6dc6071c
MG
837 struct mlx5e_ethtool_rule *iter;
838 struct mlx5e_ethtool_rule *temp;
839
f52f2fae 840 list_for_each_entry_safe(iter, temp, &ethtool->rules, list)
e8b5c4bc 841 del_ethtool_rule(fs, iter);
6dc6071c
MG
842}
843
e8b5c4bc 844void mlx5e_ethtool_init_steering(struct mlx5e_flow_steering *fs)
6dc6071c 845{
e8b5c4bc 846 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(fs);
f52f2fae
LK
847
848 INIT_LIST_HEAD(&ethtool->rules);
6dc6071c 849}
cff2b1e3 850
d443c6f6 851static int flow_type_to_traffic_type(u32 flow_type)
756c4160
AL
852{
853 switch (flow_type) {
854 case TCP_V4_FLOW:
d443c6f6 855 return MLX5_TT_IPV4_TCP;
756c4160 856 case TCP_V6_FLOW:
d443c6f6 857 return MLX5_TT_IPV6_TCP;
756c4160 858 case UDP_V4_FLOW:
d443c6f6 859 return MLX5_TT_IPV4_UDP;
756c4160 860 case UDP_V6_FLOW:
d443c6f6 861 return MLX5_TT_IPV6_UDP;
756c4160 862 case AH_V4_FLOW:
d443c6f6 863 return MLX5_TT_IPV4_IPSEC_AH;
756c4160 864 case AH_V6_FLOW:
d443c6f6 865 return MLX5_TT_IPV6_IPSEC_AH;
756c4160 866 case ESP_V4_FLOW:
d443c6f6 867 return MLX5_TT_IPV4_IPSEC_ESP;
756c4160 868 case ESP_V6_FLOW:
d443c6f6 869 return MLX5_TT_IPV6_IPSEC_ESP;
756c4160 870 case IPV4_FLOW:
d443c6f6 871 return MLX5_TT_IPV4;
756c4160 872 case IPV6_FLOW:
d443c6f6 873 return MLX5_TT_IPV6;
756c4160 874 default:
d443c6f6 875 return -EINVAL;
756c4160
AL
876 }
877}
878
879static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv,
880 struct ethtool_rxnfc *nfc)
881{
756c4160 882 u8 rx_hash_field = 0;
43ec0f41 883 int err;
d443c6f6 884 int tt;
756c4160
AL
885
886 tt = flow_type_to_traffic_type(nfc->flow_type);
d443c6f6
MG
887 if (tt < 0)
888 return tt;
756c4160
AL
889
890 /* RSS does not support anything other than hashing to queues
891 * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest
892 * port.
893 */
894 if (nfc->flow_type != TCP_V4_FLOW &&
895 nfc->flow_type != TCP_V6_FLOW &&
896 nfc->flow_type != UDP_V4_FLOW &&
897 nfc->flow_type != UDP_V6_FLOW)
898 return -EOPNOTSUPP;
899
900 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
901 RXH_L4_B_0_1 | RXH_L4_B_2_3))
902 return -EOPNOTSUPP;
903
904 if (nfc->data & RXH_IP_SRC)
905 rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP;
906 if (nfc->data & RXH_IP_DST)
907 rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP;
908 if (nfc->data & RXH_L4_B_0_1)
909 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT;
910 if (nfc->data & RXH_L4_B_2_3)
911 rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT;
912
756c4160 913 mutex_lock(&priv->state_lock);
43ec0f41 914 err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field);
756c4160 915 mutex_unlock(&priv->state_lock);
43ec0f41
MM
916
917 return err;
756c4160
AL
918}
919
920static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
921 struct ethtool_rxnfc *nfc)
922{
756c4160 923 u32 hash_field = 0;
d443c6f6 924 int tt;
756c4160
AL
925
926 tt = flow_type_to_traffic_type(nfc->flow_type);
d443c6f6
MG
927 if (tt < 0)
928 return tt;
756c4160 929
43ec0f41 930 hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt);
756c4160
AL
931 nfc->data = 0;
932
933 if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP)
934 nfc->data |= RXH_IP_SRC;
935 if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP)
936 nfc->data |= RXH_IP_DST;
937 if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT)
938 nfc->data |= RXH_L4_B_0_1;
939 if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT)
940 nfc->data |= RXH_L4_B_2_3;
941
942 return 0;
943}
944
9fbe1c25 945int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
cff2b1e3 946{
79ce39be 947 int err = 0;
cff2b1e3
SM
948
949 switch (cmd->cmd) {
950 case ETHTOOL_SRXCLSRLINS:
248d3b4c 951 err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context);
cff2b1e3
SM
952 break;
953 case ETHTOOL_SRXCLSRLDEL:
954 err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
955 break;
756c4160
AL
956 case ETHTOOL_SRXFH:
957 err = mlx5e_set_rss_hash_opt(priv, cmd);
958 break;
cff2b1e3
SM
959 default:
960 err = -EOPNOTSUPP;
961 break;
962 }
963
964 return err;
965}
966
9fbe1c25 967int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
79ce39be 968 struct ethtool_rxnfc *info, u32 *rule_locs)
cff2b1e3 969{
f52f2fae 970 struct mlx5e_ethtool_steering *ethtool = mlx5e_fs_get_ethtool(priv->fs);
cff2b1e3
SM
971 int err = 0;
972
973 switch (info->cmd) {
cff2b1e3 974 case ETHTOOL_GRXCLSRLCNT:
f52f2fae 975 info->rule_cnt = ethtool->tot_num_rules;
cff2b1e3
SM
976 break;
977 case ETHTOOL_GRXCLSRULE:
978 err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
979 break;
980 case ETHTOOL_GRXCLSRLALL:
981 err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
982 break;
756c4160
AL
983 case ETHTOOL_GRXFH:
984 err = mlx5e_get_rss_hash_opt(priv, info);
985 break;
cff2b1e3
SM
986 default:
987 err = -EOPNOTSUPP;
988 break;
989 }
990
991 return err;
992}
993