2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/fs.h>
36 struct mlx5e_ethtool_rule {
37 struct list_head list;
38 struct ethtool_rx_flow_spec flow_spec;
39 struct mlx5_flow_rule *rule;
40 struct mlx5e_ethtool_table *eth_ft;
43 static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
45 if (!--eth_ft->num_rules) {
46 mlx5_destroy_flow_table(eth_ft->ft);
51 #define MLX5E_ETHTOOL_L3_L4_PRIO 0
52 #define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
53 #define MLX5E_ETHTOOL_NUM_ENTRIES 64000
54 #define MLX5E_ETHTOOL_NUM_GROUPS 10
55 static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
56 struct ethtool_rx_flow_spec *fs,
59 struct mlx5e_ethtool_table *eth_ft;
60 struct mlx5_flow_namespace *ns;
61 struct mlx5_flow_table *ft;
66 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
69 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
70 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
71 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
74 max_tuples = ETHTOOL_NUM_L3_L4_FTS;
75 prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
76 eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
79 max_tuples = ETHTOOL_NUM_L2_FTS;
80 prio = max_tuples - num_tuples;
81 eth_ft = &priv->fs.ethtool.l2_ft[prio];
82 prio += MLX5E_ETHTOOL_L2_PRIO;
85 return ERR_PTR(-EINVAL);
92 ns = mlx5_get_flow_namespace(priv->mdev,
93 MLX5_FLOW_NAMESPACE_ETHTOOL);
95 return ERR_PTR(-ENOTSUPP);
97 table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
98 flow_table_properties_nic_receive.log_max_ft_size)),
99 MLX5E_ETHTOOL_NUM_ENTRIES);
100 ft = mlx5_create_auto_grouped_flow_table(ns, prio,
102 MLX5E_ETHTOOL_NUM_GROUPS, 0);
110 static void mask_spec(u8 *mask, u8 *val, size_t size)
114 for (i = 0; i < size; i++, mask++, val++)
115 *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
118 static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
119 __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
122 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
123 src_ipv4_src_ipv6.ipv4_layout.ipv4),
124 &ip4src_v, sizeof(ip4src_v));
125 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
126 src_ipv4_src_ipv6.ipv4_layout.ipv4),
127 0xff, sizeof(ip4src_m));
130 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
131 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
132 &ip4dst_v, sizeof(ip4dst_v));
133 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
134 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
135 0xff, sizeof(ip4dst_m));
137 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
138 ethertype, ETH_P_IP);
139 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
143 static int set_flow_attrs(u32 *match_c, u32 *match_v,
144 struct ethtool_rx_flow_spec *fs)
146 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
148 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
150 u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
151 struct ethtool_tcpip4_spec *l4_mask;
152 struct ethtool_tcpip4_spec *l4_val;
153 struct ethtool_usrip4_spec *l3_mask;
154 struct ethtool_usrip4_spec *l3_val;
155 struct ethhdr *eth_val;
156 struct ethhdr *eth_mask;
160 l4_mask = &fs->m_u.tcp_ip4_spec;
161 l4_val = &fs->h_u.tcp_ip4_spec;
162 set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
163 l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
166 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
168 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
169 ntohs(l4_val->psrc));
172 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
174 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
175 ntohs(l4_val->pdst));
177 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
179 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
183 l4_mask = &fs->m_u.tcp_ip4_spec;
184 l4_val = &fs->h_u.tcp_ip4_spec;
185 set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
186 l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
189 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
191 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
192 ntohs(l4_val->psrc));
195 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
197 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
198 ntohs(l4_val->pdst));
200 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
202 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
206 l3_mask = &fs->m_u.usr_ip4_spec;
207 l3_val = &fs->h_u.usr_ip4_spec;
208 set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
209 l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
212 eth_mask = &fs->m_u.ether_spec;
213 eth_val = &fs->h_u.ether_spec;
215 mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
216 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
217 outer_headers_c, smac_47_16),
219 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
220 outer_headers_v, smac_47_16),
222 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
223 outer_headers_c, dmac_47_16),
225 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
226 outer_headers_v, dmac_47_16),
228 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
229 ntohs(eth_mask->h_proto));
230 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
231 ntohs(eth_val->h_proto));
237 if ((fs->flow_type & FLOW_EXT) &&
238 (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
239 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
241 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
243 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
245 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
246 first_vid, ntohs(fs->h_ext.vlan_tci));
248 if (fs->flow_type & FLOW_MAC_EXT &&
249 !is_zero_ether_addr(fs->m_ext.h_dest)) {
250 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
251 outer_headers_c, dmac_47_16),
253 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
254 outer_headers_v, dmac_47_16),
261 static void add_rule_to_list(struct mlx5e_priv *priv,
262 struct mlx5e_ethtool_rule *rule)
264 struct mlx5e_ethtool_rule *iter;
265 struct list_head *head = &priv->fs.ethtool.rules;
267 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
268 if (iter->flow_spec.location > rule->flow_spec.location)
272 priv->fs.ethtool.tot_num_rules++;
273 list_add(&rule->list, head);
276 static bool outer_header_zero(u32 *match_criteria)
278 int size = MLX5_ST_SZ_BYTES(fte_match_param);
279 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
282 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
287 static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
288 struct mlx5_flow_table *ft,
289 struct ethtool_rx_flow_spec *fs)
291 struct mlx5_flow_destination *dst = NULL;
292 struct mlx5_flow_spec *spec;
293 struct mlx5_flow_rule *rule;
297 spec = mlx5_vzalloc(sizeof(*spec));
299 return ERR_PTR(-ENOMEM);
300 err = set_flow_attrs(spec->match_criteria, spec->match_value,
305 if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
306 action = MLX5_FLOW_CONTEXT_ACTION_DROP;
308 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
314 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
315 dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
316 action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
319 spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
320 rule = mlx5_add_flow_rule(ft, spec, action,
321 MLX5_FS_DEFAULT_FLOW_TAG, dst);
324 netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
331 return err ? ERR_PTR(err) : rule;
334 static void del_ethtool_rule(struct mlx5e_priv *priv,
335 struct mlx5e_ethtool_rule *eth_rule)
338 mlx5_del_flow_rule(eth_rule->rule);
339 list_del(ð_rule->list);
340 priv->fs.ethtool.tot_num_rules--;
341 put_flow_table(eth_rule->eth_ft);
345 static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
348 struct mlx5e_ethtool_rule *iter;
350 list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
351 if (iter->flow_spec.location == location)
357 static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
360 struct mlx5e_ethtool_rule *eth_rule;
362 eth_rule = find_ethtool_rule(priv, location);
364 del_ethtool_rule(priv, eth_rule);
366 eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
368 return ERR_PTR(-ENOMEM);
370 add_rule_to_list(priv, eth_rule);
374 #define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
376 #define all_ones(field) (field == (__force typeof(field))-1)
377 #define all_zeros_or_all_ones(field) \
378 ((field) == 0 || (field) == (__force typeof(field))-1)
380 static int validate_flow(struct mlx5e_priv *priv,
381 struct ethtool_rx_flow_spec *fs)
383 struct ethtool_tcpip4_spec *l4_mask;
384 struct ethtool_usrip4_spec *l3_mask;
385 struct ethhdr *eth_mask;
388 if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
391 if (fs->ring_cookie >= priv->params.num_channels &&
392 fs->ring_cookie != RX_CLS_FLOW_DISC)
395 switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
397 eth_mask = &fs->m_u.ether_spec;
398 if (!is_zero_ether_addr(eth_mask->h_dest))
400 if (!is_zero_ether_addr(eth_mask->h_source))
402 if (eth_mask->h_proto)
407 if (fs->m_u.tcp_ip4_spec.tos)
409 l4_mask = &fs->m_u.tcp_ip4_spec;
410 if (l4_mask->ip4src) {
411 if (!all_ones(l4_mask->ip4src))
415 if (l4_mask->ip4dst) {
416 if (!all_ones(l4_mask->ip4dst))
421 if (!all_ones(l4_mask->psrc))
426 if (!all_ones(l4_mask->pdst))
430 /* Flow is TCP/UDP */
434 l3_mask = &fs->m_u.usr_ip4_spec;
435 if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
436 fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
438 if (l3_mask->ip4src) {
439 if (!all_ones(l3_mask->ip4src))
443 if (l3_mask->ip4dst) {
444 if (!all_ones(l3_mask->ip4dst))
454 if ((fs->flow_type & FLOW_EXT)) {
455 if (fs->m_ext.vlan_etype ||
456 (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
459 if (fs->m_ext.vlan_tci) {
460 if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
466 if (fs->flow_type & FLOW_MAC_EXT &&
467 !is_zero_ether_addr(fs->m_ext.h_dest))
473 int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
474 struct ethtool_rx_flow_spec *fs)
476 struct mlx5e_ethtool_table *eth_ft;
477 struct mlx5e_ethtool_rule *eth_rule;
478 struct mlx5_flow_rule *rule;
482 num_tuples = validate_flow(priv, fs);
483 if (num_tuples <= 0) {
484 netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
488 eth_ft = get_flow_table(priv, fs, num_tuples);
490 return PTR_ERR(eth_ft);
492 eth_rule = get_ethtool_rule(priv, fs->location);
493 if (IS_ERR(eth_rule)) {
494 put_flow_table(eth_ft);
495 return PTR_ERR(eth_rule);
498 eth_rule->flow_spec = *fs;
499 eth_rule->eth_ft = eth_ft;
502 goto del_ethtool_rule;
504 rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
507 goto del_ethtool_rule;
510 eth_rule->rule = rule;
515 del_ethtool_rule(priv, eth_rule);
520 int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
523 struct mlx5e_ethtool_rule *eth_rule;
526 if (location >= MAX_NUM_OF_ETHTOOL_RULES)
529 eth_rule = find_ethtool_rule(priv, location);
535 del_ethtool_rule(priv, eth_rule);
540 int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
543 struct mlx5e_ethtool_rule *eth_rule;
545 if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
548 list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
549 if (eth_rule->flow_spec.location == location) {
550 info->fs = eth_rule->flow_spec;
558 int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
565 while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
566 err = mlx5e_ethtool_get_flow(priv, info, location);
568 rule_locs[idx++] = location;
574 void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
576 struct mlx5e_ethtool_rule *iter;
577 struct mlx5e_ethtool_rule *temp;
579 list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
580 del_ethtool_rule(priv, iter);
583 void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
585 INIT_LIST_HEAD(&priv->fs.ethtool.rules);