2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
53 #include "lib/vxlan.h"
57 struct mlx5_nic_flow_attr {
63 struct mlx5_flow_table *hairpin_ft;
64 struct mlx5_fc *counter;
67 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
70 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
71 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
72 MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
73 MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
74 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
75 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
76 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
77 MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5),
80 #define MLX5E_TC_MAX_SPLITS 1
82 struct mlx5e_tc_flow {
83 struct rhash_head node;
84 struct mlx5e_priv *priv;
87 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
88 struct list_head encap; /* flows sharing the same encap ID */
89 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
90 struct list_head hairpin; /* flows sharing the same hairpin */
92 struct mlx5_esw_flow_attr esw_attr[0];
93 struct mlx5_nic_flow_attr nic_attr[0];
97 struct mlx5e_tc_flow_parse_attr {
98 struct ip_tunnel_info tun_info;
99 struct net_device *filter_dev;
100 struct mlx5_flow_spec spec;
101 int num_mod_hdr_actions;
102 void *mod_hdr_actions;
106 #define MLX5E_TC_TABLE_NUM_GROUPS 4
107 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
109 struct mlx5e_hairpin {
110 struct mlx5_hairpin *pair;
112 struct mlx5_core_dev *func_mdev;
113 struct mlx5e_priv *func_priv;
118 struct mlx5e_rqt indir_rqt;
119 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
120 struct mlx5e_ttc_table ttc;
123 struct mlx5e_hairpin_entry {
124 /* a node of a hash table which keeps all the hairpin entries */
125 struct hlist_node hairpin_hlist;
127 /* flows sharing the same hairpin */
128 struct list_head flows;
132 struct mlx5e_hairpin *hp;
140 struct mlx5e_mod_hdr_entry {
141 /* a node of a hash table which keeps all the mod_hdr entries */
142 struct hlist_node mod_hdr_hlist;
144 /* flows sharing the same mod_hdr entry */
145 struct list_head flows;
147 struct mod_hdr_key key;
152 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
154 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
156 return jhash(key->actions,
157 key->num_actions * MLX5_MH_ACT_SZ, 0);
160 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
161 struct mod_hdr_key *b)
163 if (a->num_actions != b->num_actions)
166 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
169 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
170 struct mlx5e_tc_flow *flow,
171 struct mlx5e_tc_flow_parse_attr *parse_attr)
173 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
174 int num_actions, actions_size, namespace, err;
175 struct mlx5e_mod_hdr_entry *mh;
176 struct mod_hdr_key key;
180 num_actions = parse_attr->num_mod_hdr_actions;
181 actions_size = MLX5_MH_ACT_SZ * num_actions;
183 key.actions = parse_attr->mod_hdr_actions;
184 key.num_actions = num_actions;
186 hash_key = hash_mod_hdr_info(&key);
188 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
189 namespace = MLX5_FLOW_NAMESPACE_FDB;
190 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
191 mod_hdr_hlist, hash_key) {
192 if (!cmp_mod_hdr_info(&mh->key, &key)) {
198 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
199 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
200 mod_hdr_hlist, hash_key) {
201 if (!cmp_mod_hdr_info(&mh->key, &key)) {
211 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
215 mh->key.actions = (void *)mh + sizeof(*mh);
216 memcpy(mh->key.actions, key.actions, actions_size);
217 mh->key.num_actions = num_actions;
218 INIT_LIST_HEAD(&mh->flows);
220 err = mlx5_modify_header_alloc(priv->mdev, namespace,
227 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
228 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
230 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
233 list_add(&flow->mod_hdr, &mh->flows);
234 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
235 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
237 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
246 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
247 struct mlx5e_tc_flow *flow)
249 struct list_head *next = flow->mod_hdr.next;
251 list_del(&flow->mod_hdr);
253 if (list_empty(next)) {
254 struct mlx5e_mod_hdr_entry *mh;
256 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
258 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
259 hash_del(&mh->mod_hdr_hlist);
265 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
267 struct net_device *netdev;
268 struct mlx5e_priv *priv;
270 netdev = __dev_get_by_index(net, ifindex);
271 priv = netdev_priv(netdev);
275 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
277 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
281 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
285 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
287 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
288 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
289 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
291 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
298 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
303 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
305 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
306 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
309 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
311 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
312 struct mlx5e_priv *priv = hp->func_priv;
313 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
315 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
318 for (i = 0; i < sz; i++) {
320 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
321 ix = mlx5e_bits_invert(i, ilog2(sz));
322 ix = indirection_rqt[ix];
323 rqn = hp->pair->rqn[ix];
324 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
328 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
330 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
331 struct mlx5e_priv *priv = hp->func_priv;
332 struct mlx5_core_dev *mdev = priv->mdev;
336 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
337 in = kvzalloc(inlen, GFP_KERNEL);
341 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
343 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
344 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
346 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
348 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
350 hp->indir_rqt.enabled = true;
356 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
358 struct mlx5e_priv *priv = hp->func_priv;
359 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
363 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
364 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
366 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
367 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
369 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
370 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
371 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
372 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
374 err = mlx5_core_create_tir(hp->func_mdev, in,
375 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
377 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
378 goto err_destroy_tirs;
384 for (i = 0; i < tt; i++)
385 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
389 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
393 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
394 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
397 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
398 struct ttc_params *ttc_params)
400 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
403 memset(ttc_params, 0, sizeof(*ttc_params));
405 ttc_params->any_tt_tirn = hp->tirn;
407 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
408 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
410 ft_attr->max_fte = MLX5E_NUM_TT;
411 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
412 ft_attr->prio = MLX5E_TC_PRIO;
415 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
417 struct mlx5e_priv *priv = hp->func_priv;
418 struct ttc_params ttc_params;
421 err = mlx5e_hairpin_create_indirect_rqt(hp);
425 err = mlx5e_hairpin_create_indirect_tirs(hp);
427 goto err_create_indirect_tirs;
429 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
430 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
432 goto err_create_ttc_table;
434 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
435 hp->num_channels, hp->ttc.ft.t->id);
439 err_create_ttc_table:
440 mlx5e_hairpin_destroy_indirect_tirs(hp);
441 err_create_indirect_tirs:
442 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
447 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
449 struct mlx5e_priv *priv = hp->func_priv;
451 mlx5e_destroy_ttc_table(priv, &hp->ttc);
452 mlx5e_hairpin_destroy_indirect_tirs(hp);
453 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
456 static struct mlx5e_hairpin *
457 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
460 struct mlx5_core_dev *func_mdev, *peer_mdev;
461 struct mlx5e_hairpin *hp;
462 struct mlx5_hairpin *pair;
465 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
467 return ERR_PTR(-ENOMEM);
469 func_mdev = priv->mdev;
470 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
472 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
475 goto create_pair_err;
478 hp->func_mdev = func_mdev;
479 hp->func_priv = priv;
480 hp->num_channels = params->num_channels;
482 err = mlx5e_hairpin_create_transport(hp);
484 goto create_transport_err;
486 if (hp->num_channels > 1) {
487 err = mlx5e_hairpin_rss_init(hp);
495 mlx5e_hairpin_destroy_transport(hp);
496 create_transport_err:
497 mlx5_core_hairpin_destroy(hp->pair);
503 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
505 if (hp->num_channels > 1)
506 mlx5e_hairpin_rss_cleanup(hp);
507 mlx5e_hairpin_destroy_transport(hp);
508 mlx5_core_hairpin_destroy(hp->pair);
512 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
514 return (peer_vhca_id << 16 | prio);
517 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
518 u16 peer_vhca_id, u8 prio)
520 struct mlx5e_hairpin_entry *hpe;
521 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
523 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
524 hairpin_hlist, hash_key) {
525 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
532 #define UNKNOWN_MATCH_PRIO 8
534 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
535 struct mlx5_flow_spec *spec, u8 *match_prio,
536 struct netlink_ext_ack *extack)
538 void *headers_c, *headers_v;
539 u8 prio_val, prio_mask = 0;
542 #ifdef CONFIG_MLX5_CORE_EN_DCB
543 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
544 NL_SET_ERR_MSG_MOD(extack,
545 "only PCP trust state supported for hairpin");
549 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
550 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
552 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
554 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
555 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
558 if (!vlan_present || !prio_mask) {
559 prio_val = UNKNOWN_MATCH_PRIO;
560 } else if (prio_mask != 0x7) {
561 NL_SET_ERR_MSG_MOD(extack,
562 "masked priority match not supported for hairpin");
566 *match_prio = prio_val;
570 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
571 struct mlx5e_tc_flow *flow,
572 struct mlx5e_tc_flow_parse_attr *parse_attr,
573 struct netlink_ext_ack *extack)
575 int peer_ifindex = parse_attr->mirred_ifindex;
576 struct mlx5_hairpin_params params;
577 struct mlx5_core_dev *peer_mdev;
578 struct mlx5e_hairpin_entry *hpe;
579 struct mlx5e_hairpin *hp;
586 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
587 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
588 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
592 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
593 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
597 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
601 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
605 INIT_LIST_HEAD(&hpe->flows);
606 hpe->peer_vhca_id = peer_id;
607 hpe->prio = match_prio;
609 params.log_data_size = 15;
610 params.log_data_size = min_t(u8, params.log_data_size,
611 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
612 params.log_data_size = max_t(u8, params.log_data_size,
613 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
615 params.log_num_packets = params.log_data_size -
616 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
617 params.log_num_packets = min_t(u8, params.log_num_packets,
618 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
620 params.q_counter = priv->q_counter;
621 /* set hairpin pair per each 50Gbs share of the link */
622 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
623 link_speed = max_t(u32, link_speed, 50000);
624 link_speed64 = link_speed;
625 do_div(link_speed64, 50000);
626 params.num_channels = link_speed64;
628 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
631 goto create_hairpin_err;
634 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
635 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
636 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
639 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
640 hash_hairpin_info(peer_id, match_prio));
643 if (hpe->hp->num_channels > 1) {
644 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
645 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
647 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
649 list_add(&flow->hairpin, &hpe->flows);
658 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
659 struct mlx5e_tc_flow *flow)
661 struct list_head *next = flow->hairpin.next;
663 list_del(&flow->hairpin);
665 /* no more hairpin flows for us, release the hairpin pair */
666 if (list_empty(next)) {
667 struct mlx5e_hairpin_entry *hpe;
669 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
671 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
672 hpe->hp->pair->peer_mdev->priv.name);
674 mlx5e_hairpin_destroy(hpe->hp);
675 hash_del(&hpe->hairpin_hlist);
681 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
682 struct mlx5e_tc_flow_parse_attr *parse_attr,
683 struct mlx5e_tc_flow *flow,
684 struct netlink_ext_ack *extack)
686 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
687 struct mlx5_core_dev *dev = priv->mdev;
688 struct mlx5_flow_destination dest[2] = {};
689 struct mlx5_flow_act flow_act = {
690 .action = attr->action,
691 .flow_tag = attr->flow_tag,
693 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
695 struct mlx5_fc *counter = NULL;
696 bool table_created = false;
697 int err, dest_ix = 0;
699 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
700 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
702 goto err_add_hairpin_flow;
704 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
705 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
706 dest[dest_ix].ft = attr->hairpin_ft;
708 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
709 dest[dest_ix].tir_num = attr->hairpin_tirn;
712 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
713 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
714 dest[dest_ix].ft = priv->fs.vlan.ft.t;
718 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
719 counter = mlx5_fc_create(dev, true);
720 if (IS_ERR(counter)) {
721 err = PTR_ERR(counter);
724 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
725 dest[dest_ix].counter_id = mlx5_fc_id(counter);
727 attr->counter = counter;
730 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
731 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
732 flow_act.modify_id = attr->mod_hdr_id;
733 kfree(parse_attr->mod_hdr_actions);
735 goto err_create_mod_hdr_id;
738 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
739 int tc_grp_size, tc_tbl_size;
740 u32 max_flow_counter;
742 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
743 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
745 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
747 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
748 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
751 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
754 MLX5E_TC_TABLE_NUM_GROUPS,
755 MLX5E_TC_FT_LEVEL, 0);
756 if (IS_ERR(priv->fs.tc.t)) {
757 NL_SET_ERR_MSG_MOD(extack,
758 "Failed to create tc offload table\n");
759 netdev_err(priv->netdev,
760 "Failed to create tc offload table\n");
761 err = PTR_ERR(priv->fs.tc.t);
765 table_created = true;
768 if (attr->match_level != MLX5_MATCH_NONE)
769 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
771 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
772 &flow_act, dest, dest_ix);
774 if (IS_ERR(flow->rule[0])) {
775 err = PTR_ERR(flow->rule[0]);
783 mlx5_destroy_flow_table(priv->fs.tc.t);
784 priv->fs.tc.t = NULL;
787 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
788 mlx5e_detach_mod_hdr(priv, flow);
789 err_create_mod_hdr_id:
790 mlx5_fc_destroy(dev, counter);
792 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
793 mlx5e_hairpin_flow_del(priv, flow);
794 err_add_hairpin_flow:
798 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
799 struct mlx5e_tc_flow *flow)
801 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
802 struct mlx5_fc *counter = NULL;
804 counter = attr->counter;
805 mlx5_del_flow_rules(flow->rule[0]);
806 mlx5_fc_destroy(priv->mdev, counter);
808 if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
809 mlx5_destroy_flow_table(priv->fs.tc.t);
810 priv->fs.tc.t = NULL;
813 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
814 mlx5e_detach_mod_hdr(priv, flow);
816 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
817 mlx5e_hairpin_flow_del(priv, flow);
820 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
821 struct mlx5e_tc_flow *flow);
823 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
824 struct ip_tunnel_info *tun_info,
825 struct net_device *mirred_dev,
826 struct net_device **encap_dev,
827 struct mlx5e_tc_flow *flow,
828 struct netlink_ext_ack *extack);
830 static struct mlx5_flow_handle *
831 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
832 struct mlx5e_tc_flow *flow,
833 struct mlx5_flow_spec *spec,
834 struct mlx5_esw_flow_attr *attr)
836 struct mlx5_flow_handle *rule;
838 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
842 if (attr->mirror_count) {
843 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
844 if (IS_ERR(flow->rule[1])) {
845 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
846 return flow->rule[1];
850 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
855 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
856 struct mlx5e_tc_flow *flow,
857 struct mlx5_esw_flow_attr *attr)
859 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
861 if (attr->mirror_count)
862 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
864 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
867 static struct mlx5_flow_handle *
868 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
869 struct mlx5e_tc_flow *flow,
870 struct mlx5_flow_spec *spec,
871 struct mlx5_esw_flow_attr *slow_attr)
873 struct mlx5_flow_handle *rule;
875 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
876 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
877 slow_attr->mirror_count = 0,
878 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
880 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
882 flow->flags |= MLX5E_TC_FLOW_SLOW;
888 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
889 struct mlx5e_tc_flow *flow,
890 struct mlx5_esw_flow_attr *slow_attr)
892 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
893 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
894 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
898 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
899 struct mlx5e_tc_flow_parse_attr *parse_attr,
900 struct mlx5e_tc_flow *flow,
901 struct netlink_ext_ack *extack)
903 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
904 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
905 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
906 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
907 struct net_device *out_dev, *encap_dev = NULL;
908 struct mlx5_fc *counter = NULL;
909 struct mlx5e_rep_priv *rpriv;
910 struct mlx5e_priv *out_priv;
911 int err = 0, encap_err = 0;
913 /* if prios are not supported, keep the old behaviour of using same prio
914 * for all offloaded rules.
916 if (!mlx5_eswitch_prios_supported(esw))
919 if (attr->chain > max_chain) {
920 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
922 goto err_max_prio_chain;
925 if (attr->prio > max_prio) {
926 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
928 goto err_max_prio_chain;
931 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
932 out_dev = __dev_get_by_index(dev_net(priv->netdev),
933 attr->parse_attr->mirred_ifindex);
934 encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
935 out_dev, &encap_dev, flow,
937 if (encap_err && encap_err != -EAGAIN) {
939 goto err_attach_encap;
941 out_priv = netdev_priv(encap_dev);
942 rpriv = out_priv->ppriv;
943 attr->out_rep[attr->out_count] = rpriv->rep;
944 attr->out_mdev[attr->out_count++] = out_priv->mdev;
947 err = mlx5_eswitch_add_vlan_action(esw, attr);
951 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
952 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
953 kfree(parse_attr->mod_hdr_actions);
958 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
959 counter = mlx5_fc_create(esw->dev, true);
960 if (IS_ERR(counter)) {
961 err = PTR_ERR(counter);
962 goto err_create_counter;
965 attr->counter = counter;
968 /* we get here if (1) there's no error or when
969 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
971 if (encap_err == -EAGAIN) {
972 /* continue with goto slow path rule instead */
973 struct mlx5_esw_flow_attr slow_attr;
975 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
977 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
980 if (IS_ERR(flow->rule[0])) {
981 err = PTR_ERR(flow->rule[0]);
988 mlx5_fc_destroy(esw->dev, counter);
990 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
991 mlx5e_detach_mod_hdr(priv, flow);
993 mlx5_eswitch_del_vlan_action(esw, attr);
995 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
996 mlx5e_detach_encap(priv, flow);
1002 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1003 struct mlx5e_tc_flow *flow)
1005 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1006 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1007 struct mlx5_esw_flow_attr slow_attr;
1009 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1010 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1011 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1013 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1016 mlx5_eswitch_del_vlan_action(esw, attr);
1018 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
1019 mlx5e_detach_encap(priv, flow);
1020 kvfree(attr->parse_attr);
1023 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1024 mlx5e_detach_mod_hdr(priv, flow);
1026 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1027 mlx5_fc_destroy(esw->dev, attr->counter);
1030 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1031 struct mlx5e_encap_entry *e)
1033 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1034 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1035 struct mlx5_flow_handle *rule;
1036 struct mlx5_flow_spec *spec;
1037 struct mlx5e_tc_flow *flow;
1040 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
1041 e->encap_size, e->encap_header,
1042 MLX5_FLOW_NAMESPACE_FDB,
1045 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1049 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1050 mlx5e_rep_queue_neigh_stats_work(priv);
1052 list_for_each_entry(flow, &e->flows, encap) {
1053 esw_attr = flow->esw_attr;
1054 esw_attr->encap_id = e->encap_id;
1055 spec = &esw_attr->parse_attr->spec;
1057 /* update from slow path rule to encap rule */
1058 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1060 err = PTR_ERR(rule);
1061 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1066 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1067 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
1068 flow->rule[0] = rule;
1072 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1073 struct mlx5e_encap_entry *e)
1075 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1076 struct mlx5_esw_flow_attr slow_attr;
1077 struct mlx5_flow_handle *rule;
1078 struct mlx5_flow_spec *spec;
1079 struct mlx5e_tc_flow *flow;
1082 list_for_each_entry(flow, &e->flows, encap) {
1083 spec = &flow->esw_attr->parse_attr->spec;
1085 /* update from encap rule to slow path rule */
1086 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1089 err = PTR_ERR(rule);
1090 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1095 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1096 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1097 flow->rule[0] = rule;
1100 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
1101 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1102 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1106 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1108 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1109 return flow->esw_attr->counter;
1111 return flow->nic_attr->counter;
1114 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1116 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1117 u64 bytes, packets, lastuse = 0;
1118 struct mlx5e_tc_flow *flow;
1119 struct mlx5e_encap_entry *e;
1120 struct mlx5_fc *counter;
1121 struct neigh_table *tbl;
1122 bool neigh_used = false;
1123 struct neighbour *n;
1125 if (m_neigh->family == AF_INET)
1127 #if IS_ENABLED(CONFIG_IPV6)
1128 else if (m_neigh->family == AF_INET6)
1134 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1135 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1137 list_for_each_entry(flow, &e->flows, encap) {
1138 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1139 counter = mlx5e_tc_get_counter(flow);
1140 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1141 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1152 nhe->reported_lastuse = jiffies;
1154 /* find the relevant neigh according to the cached device and
1157 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1161 neigh_event_send(n, NULL);
1166 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1167 struct mlx5e_tc_flow *flow)
1169 struct list_head *next = flow->encap.next;
1171 list_del(&flow->encap);
1172 if (list_empty(next)) {
1173 struct mlx5e_encap_entry *e;
1175 e = list_entry(next, struct mlx5e_encap_entry, flows);
1176 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1178 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1179 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1181 hash_del_rcu(&e->encap_hlist);
1182 kfree(e->encap_header);
1187 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1188 struct mlx5e_tc_flow *flow)
1190 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1191 mlx5e_tc_del_fdb_flow(priv, flow);
1193 mlx5e_tc_del_nic_flow(priv, flow);
1196 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
1197 struct tc_cls_flower_offload *f)
1199 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1201 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1203 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1205 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1208 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1209 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1211 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1212 struct flow_dissector_key_keyid *key =
1213 skb_flow_dissector_target(f->dissector,
1214 FLOW_DISSECTOR_KEY_ENC_KEYID,
1216 struct flow_dissector_key_keyid *mask =
1217 skb_flow_dissector_target(f->dissector,
1218 FLOW_DISSECTOR_KEY_ENC_KEYID,
1220 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1221 be32_to_cpu(mask->keyid));
1222 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1223 be32_to_cpu(key->keyid));
1227 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1228 struct mlx5_flow_spec *spec,
1229 struct tc_cls_flower_offload *f)
1231 struct netlink_ext_ack *extack = f->common.extack;
1232 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1234 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1237 struct flow_dissector_key_control *enc_control =
1238 skb_flow_dissector_target(f->dissector,
1239 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1242 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
1243 struct flow_dissector_key_ports *key =
1244 skb_flow_dissector_target(f->dissector,
1245 FLOW_DISSECTOR_KEY_ENC_PORTS,
1247 struct flow_dissector_key_ports *mask =
1248 skb_flow_dissector_target(f->dissector,
1249 FLOW_DISSECTOR_KEY_ENC_PORTS,
1252 /* Full udp dst port must be given */
1253 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
1254 goto vxlan_match_offload_err;
1256 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
1257 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
1258 parse_vxlan_attr(spec, f);
1260 NL_SET_ERR_MSG_MOD(extack,
1261 "port isn't an offloaded vxlan udp dport");
1262 netdev_warn(priv->netdev,
1263 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
1267 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1268 udp_dport, ntohs(mask->dst));
1269 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1270 udp_dport, ntohs(key->dst));
1272 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1273 udp_sport, ntohs(mask->src));
1274 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1275 udp_sport, ntohs(key->src));
1276 } else { /* udp dst port must be given */
1277 vxlan_match_offload_err:
1278 NL_SET_ERR_MSG_MOD(extack,
1279 "IP tunnel decap offload supported only for vxlan, must set UDP dport");
1280 netdev_warn(priv->netdev,
1281 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
1285 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1286 struct flow_dissector_key_ipv4_addrs *key =
1287 skb_flow_dissector_target(f->dissector,
1288 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1290 struct flow_dissector_key_ipv4_addrs *mask =
1291 skb_flow_dissector_target(f->dissector,
1292 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1294 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1295 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1297 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1298 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1301 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1302 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1304 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1305 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1308 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1309 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1310 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1311 struct flow_dissector_key_ipv6_addrs *key =
1312 skb_flow_dissector_target(f->dissector,
1313 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1315 struct flow_dissector_key_ipv6_addrs *mask =
1316 skb_flow_dissector_target(f->dissector,
1317 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1320 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1321 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1322 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1323 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1324 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1325 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1327 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1328 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1329 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1330 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1331 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1332 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1334 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1335 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1338 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1339 struct flow_dissector_key_ip *key =
1340 skb_flow_dissector_target(f->dissector,
1341 FLOW_DISSECTOR_KEY_ENC_IP,
1343 struct flow_dissector_key_ip *mask =
1344 skb_flow_dissector_target(f->dissector,
1345 FLOW_DISSECTOR_KEY_ENC_IP,
1348 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1349 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1351 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1352 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1354 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1355 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1358 !MLX5_CAP_ESW_FLOWTABLE_FDB
1360 ft_field_support.outer_ipv4_ttl)) {
1361 NL_SET_ERR_MSG_MOD(extack,
1362 "Matching on TTL is not supported");
1368 /* Enforce DMAC when offloading incoming tunneled flows.
1369 * Flow counters require a match on the DMAC.
1371 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1372 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1373 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1374 dmac_47_16), priv->netdev->dev_addr);
1376 /* let software handle IP fragments */
1377 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1378 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1383 static int __parse_cls_flower(struct mlx5e_priv *priv,
1384 struct mlx5_flow_spec *spec,
1385 struct tc_cls_flower_offload *f,
1388 struct netlink_ext_ack *extack = f->common.extack;
1389 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1391 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1393 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1395 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1400 *match_level = MLX5_MATCH_NONE;
1402 if (f->dissector->used_keys &
1403 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1404 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1405 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1406 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1407 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1408 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1409 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1410 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1411 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1412 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1413 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1414 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1415 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1416 BIT(FLOW_DISSECTOR_KEY_TCP) |
1417 BIT(FLOW_DISSECTOR_KEY_IP) |
1418 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1419 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1420 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1421 f->dissector->used_keys);
1425 if ((dissector_uses_key(f->dissector,
1426 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1427 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1428 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1429 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1430 struct flow_dissector_key_control *key =
1431 skb_flow_dissector_target(f->dissector,
1432 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1434 switch (key->addr_type) {
1435 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1436 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1437 if (parse_tunnel_attr(priv, spec, f))
1444 /* In decap flow, header pointers should point to the inner
1445 * headers, outer header were already set by parse_tunnel_attr
1447 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1449 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1453 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1454 struct flow_dissector_key_basic *key =
1455 skb_flow_dissector_target(f->dissector,
1456 FLOW_DISSECTOR_KEY_BASIC,
1458 struct flow_dissector_key_basic *mask =
1459 skb_flow_dissector_target(f->dissector,
1460 FLOW_DISSECTOR_KEY_BASIC,
1462 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1463 ntohs(mask->n_proto));
1464 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1465 ntohs(key->n_proto));
1468 *match_level = MLX5_MATCH_L2;
1471 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1472 struct flow_dissector_key_vlan *key =
1473 skb_flow_dissector_target(f->dissector,
1474 FLOW_DISSECTOR_KEY_VLAN,
1476 struct flow_dissector_key_vlan *mask =
1477 skb_flow_dissector_target(f->dissector,
1478 FLOW_DISSECTOR_KEY_VLAN,
1480 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1481 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1482 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1484 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1487 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1489 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1493 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1494 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1496 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1497 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1499 *match_level = MLX5_MATCH_L2;
1501 } else if (*match_level != MLX5_MATCH_NONE) {
1502 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1503 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1504 *match_level = MLX5_MATCH_L2;
1507 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1508 struct flow_dissector_key_vlan *key =
1509 skb_flow_dissector_target(f->dissector,
1510 FLOW_DISSECTOR_KEY_CVLAN,
1512 struct flow_dissector_key_vlan *mask =
1513 skb_flow_dissector_target(f->dissector,
1514 FLOW_DISSECTOR_KEY_CVLAN,
1516 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1517 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1518 MLX5_SET(fte_match_set_misc, misc_c,
1519 outer_second_svlan_tag, 1);
1520 MLX5_SET(fte_match_set_misc, misc_v,
1521 outer_second_svlan_tag, 1);
1523 MLX5_SET(fte_match_set_misc, misc_c,
1524 outer_second_cvlan_tag, 1);
1525 MLX5_SET(fte_match_set_misc, misc_v,
1526 outer_second_cvlan_tag, 1);
1529 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1531 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1533 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1534 mask->vlan_priority);
1535 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1536 key->vlan_priority);
1538 *match_level = MLX5_MATCH_L2;
1542 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1543 struct flow_dissector_key_eth_addrs *key =
1544 skb_flow_dissector_target(f->dissector,
1545 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1547 struct flow_dissector_key_eth_addrs *mask =
1548 skb_flow_dissector_target(f->dissector,
1549 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1552 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1555 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1559 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1562 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1566 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1567 *match_level = MLX5_MATCH_L2;
1570 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1571 struct flow_dissector_key_control *key =
1572 skb_flow_dissector_target(f->dissector,
1573 FLOW_DISSECTOR_KEY_CONTROL,
1576 struct flow_dissector_key_control *mask =
1577 skb_flow_dissector_target(f->dissector,
1578 FLOW_DISSECTOR_KEY_CONTROL,
1580 addr_type = key->addr_type;
1582 /* the HW doesn't support frag first/later */
1583 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1586 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1587 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1588 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1589 key->flags & FLOW_DIS_IS_FRAGMENT);
1591 /* the HW doesn't need L3 inline to match on frag=no */
1592 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1593 *match_level = MLX5_MATCH_L2;
1594 /* *** L2 attributes parsing up to here *** */
1596 *match_level = MLX5_MATCH_L3;
1600 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1601 struct flow_dissector_key_basic *key =
1602 skb_flow_dissector_target(f->dissector,
1603 FLOW_DISSECTOR_KEY_BASIC,
1605 struct flow_dissector_key_basic *mask =
1606 skb_flow_dissector_target(f->dissector,
1607 FLOW_DISSECTOR_KEY_BASIC,
1609 ip_proto = key->ip_proto;
1611 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1613 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1617 *match_level = MLX5_MATCH_L3;
1620 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1621 struct flow_dissector_key_ipv4_addrs *key =
1622 skb_flow_dissector_target(f->dissector,
1623 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1625 struct flow_dissector_key_ipv4_addrs *mask =
1626 skb_flow_dissector_target(f->dissector,
1627 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1630 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1631 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1632 &mask->src, sizeof(mask->src));
1633 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1634 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1635 &key->src, sizeof(key->src));
1636 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1637 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1638 &mask->dst, sizeof(mask->dst));
1639 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1640 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1641 &key->dst, sizeof(key->dst));
1643 if (mask->src || mask->dst)
1644 *match_level = MLX5_MATCH_L3;
1647 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1648 struct flow_dissector_key_ipv6_addrs *key =
1649 skb_flow_dissector_target(f->dissector,
1650 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1652 struct flow_dissector_key_ipv6_addrs *mask =
1653 skb_flow_dissector_target(f->dissector,
1654 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1657 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1658 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1659 &mask->src, sizeof(mask->src));
1660 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1661 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1662 &key->src, sizeof(key->src));
1664 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1665 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1666 &mask->dst, sizeof(mask->dst));
1667 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1668 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1669 &key->dst, sizeof(key->dst));
1671 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1672 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1673 *match_level = MLX5_MATCH_L3;
1676 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1677 struct flow_dissector_key_ip *key =
1678 skb_flow_dissector_target(f->dissector,
1679 FLOW_DISSECTOR_KEY_IP,
1681 struct flow_dissector_key_ip *mask =
1682 skb_flow_dissector_target(f->dissector,
1683 FLOW_DISSECTOR_KEY_IP,
1686 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1687 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1689 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1690 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1692 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1693 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1696 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1697 ft_field_support.outer_ipv4_ttl)) {
1698 NL_SET_ERR_MSG_MOD(extack,
1699 "Matching on TTL is not supported");
1703 if (mask->tos || mask->ttl)
1704 *match_level = MLX5_MATCH_L3;
1707 /* *** L3 attributes parsing up to here *** */
1709 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1710 struct flow_dissector_key_ports *key =
1711 skb_flow_dissector_target(f->dissector,
1712 FLOW_DISSECTOR_KEY_PORTS,
1714 struct flow_dissector_key_ports *mask =
1715 skb_flow_dissector_target(f->dissector,
1716 FLOW_DISSECTOR_KEY_PORTS,
1720 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1721 tcp_sport, ntohs(mask->src));
1722 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1723 tcp_sport, ntohs(key->src));
1725 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1726 tcp_dport, ntohs(mask->dst));
1727 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1728 tcp_dport, ntohs(key->dst));
1732 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1733 udp_sport, ntohs(mask->src));
1734 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1735 udp_sport, ntohs(key->src));
1737 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1738 udp_dport, ntohs(mask->dst));
1739 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1740 udp_dport, ntohs(key->dst));
1743 NL_SET_ERR_MSG_MOD(extack,
1744 "Only UDP and TCP transports are supported for L4 matching");
1745 netdev_err(priv->netdev,
1746 "Only UDP and TCP transport are supported\n");
1750 if (mask->src || mask->dst)
1751 *match_level = MLX5_MATCH_L4;
1754 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1755 struct flow_dissector_key_tcp *key =
1756 skb_flow_dissector_target(f->dissector,
1757 FLOW_DISSECTOR_KEY_TCP,
1759 struct flow_dissector_key_tcp *mask =
1760 skb_flow_dissector_target(f->dissector,
1761 FLOW_DISSECTOR_KEY_TCP,
1764 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1765 ntohs(mask->flags));
1766 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1770 *match_level = MLX5_MATCH_L4;
1776 static int parse_cls_flower(struct mlx5e_priv *priv,
1777 struct mlx5e_tc_flow *flow,
1778 struct mlx5_flow_spec *spec,
1779 struct tc_cls_flower_offload *f)
1781 struct netlink_ext_ack *extack = f->common.extack;
1782 struct mlx5_core_dev *dev = priv->mdev;
1783 struct mlx5_eswitch *esw = dev->priv.eswitch;
1784 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1785 struct mlx5_eswitch_rep *rep;
1789 err = __parse_cls_flower(priv, spec, f, &match_level);
1791 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1793 if (rep->vport != FDB_UPLINK_VPORT &&
1794 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1795 esw->offloads.inline_mode < match_level)) {
1796 NL_SET_ERR_MSG_MOD(extack,
1797 "Flow is not offloaded due to min inline setting");
1798 netdev_warn(priv->netdev,
1799 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1800 match_level, esw->offloads.inline_mode);
1805 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1806 flow->esw_attr->match_level = match_level;
1808 flow->nic_attr->match_level = match_level;
1813 struct pedit_headers {
1821 static int pedit_header_offsets[] = {
1822 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1823 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1824 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1825 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1826 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1829 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1831 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1832 struct pedit_headers *masks,
1833 struct pedit_headers *vals)
1835 u32 *curr_pmask, *curr_pval;
1837 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1840 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1841 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1843 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1846 *curr_pmask |= mask;
1847 *curr_pval |= (val & mask);
1855 struct mlx5_fields {
1861 #define OFFLOAD(fw_field, size, field, off) \
1862 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1864 static struct mlx5_fields fields[] = {
1865 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1866 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1867 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1868 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1869 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1871 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1872 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1873 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1875 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1876 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1877 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1878 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1879 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1880 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1881 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1882 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1883 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1885 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1886 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1887 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1889 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1890 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1893 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1894 * max from the SW pedit action. On success, it says how many HW actions were
1897 static int offload_pedit_fields(struct pedit_headers *masks,
1898 struct pedit_headers *vals,
1899 struct mlx5e_tc_flow_parse_attr *parse_attr,
1900 struct netlink_ext_ack *extack)
1902 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1903 int i, action_size, nactions, max_actions, first, last, next_z;
1904 void *s_masks_p, *a_masks_p, *vals_p;
1905 struct mlx5_fields *f;
1906 u8 cmd, field_bsize;
1913 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1914 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1915 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1916 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1918 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1919 action = parse_attr->mod_hdr_actions;
1920 max_actions = parse_attr->num_mod_hdr_actions;
1923 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1925 /* avoid seeing bits set from previous iterations */
1929 s_masks_p = (void *)set_masks + f->offset;
1930 a_masks_p = (void *)add_masks + f->offset;
1932 memcpy(&s_mask, s_masks_p, f->size);
1933 memcpy(&a_mask, a_masks_p, f->size);
1935 if (!s_mask && !a_mask) /* nothing to offload here */
1938 if (s_mask && a_mask) {
1939 NL_SET_ERR_MSG_MOD(extack,
1940 "can't set and add to the same HW field");
1941 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1945 if (nactions == max_actions) {
1946 NL_SET_ERR_MSG_MOD(extack,
1947 "too many pedit actions, can't offload");
1948 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1953 cmd = MLX5_ACTION_TYPE_SET;
1955 vals_p = (void *)set_vals + f->offset;
1956 /* clear to denote we consumed this field */
1957 memset(s_masks_p, 0, f->size);
1959 cmd = MLX5_ACTION_TYPE_ADD;
1961 vals_p = (void *)add_vals + f->offset;
1962 /* clear to denote we consumed this field */
1963 memset(a_masks_p, 0, f->size);
1966 field_bsize = f->size * BITS_PER_BYTE;
1968 if (field_bsize == 32) {
1969 mask_be32 = *(__be32 *)&mask;
1970 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1971 } else if (field_bsize == 16) {
1972 mask_be16 = *(__be16 *)&mask;
1973 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1976 first = find_first_bit(&mask, field_bsize);
1977 next_z = find_next_zero_bit(&mask, field_bsize, first);
1978 last = find_last_bit(&mask, field_bsize);
1979 if (first < next_z && next_z < last) {
1980 NL_SET_ERR_MSG_MOD(extack,
1981 "rewrite of few sub-fields isn't supported");
1982 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1987 MLX5_SET(set_action_in, action, action_type, cmd);
1988 MLX5_SET(set_action_in, action, field, f->field);
1990 if (cmd == MLX5_ACTION_TYPE_SET) {
1991 MLX5_SET(set_action_in, action, offset, first);
1992 /* length is num of bits to be written, zero means length of 32 */
1993 MLX5_SET(set_action_in, action, length, (last - first + 1));
1996 if (field_bsize == 32)
1997 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1998 else if (field_bsize == 16)
1999 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2000 else if (field_bsize == 8)
2001 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2003 action += action_size;
2007 parse_attr->num_mod_hdr_actions = nactions;
2011 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2012 const struct tc_action *a, int namespace,
2013 struct mlx5e_tc_flow_parse_attr *parse_attr)
2015 int nkeys, action_size, max_actions;
2017 nkeys = tcf_pedit_nkeys(a);
2018 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2020 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2021 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2022 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2023 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2025 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2026 max_actions = min(max_actions, nkeys * 16);
2028 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2029 if (!parse_attr->mod_hdr_actions)
2032 parse_attr->num_mod_hdr_actions = max_actions;
2036 static const struct pedit_headers zero_masks = {};
2038 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2039 const struct tc_action *a, int namespace,
2040 struct mlx5e_tc_flow_parse_attr *parse_attr,
2041 struct netlink_ext_ack *extack)
2043 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
2044 int nkeys, i, err = -EOPNOTSUPP;
2045 u32 mask, val, offset;
2048 nkeys = tcf_pedit_nkeys(a);
2050 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2051 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
2053 for (i = 0; i < nkeys; i++) {
2054 htype = tcf_pedit_htype(a, i);
2055 cmd = tcf_pedit_cmd(a, i);
2056 err = -EOPNOTSUPP; /* can't be all optimistic */
2058 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
2059 NL_SET_ERR_MSG_MOD(extack,
2060 "legacy pedit isn't offloaded");
2064 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
2065 NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
2069 mask = tcf_pedit_mask(a, i);
2070 val = tcf_pedit_val(a, i);
2071 offset = tcf_pedit_offset(a, i);
2073 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
2078 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
2082 err = offload_pedit_fields(masks, vals, parse_attr, extack);
2084 goto out_dealloc_parsed_actions;
2086 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2087 cmd_masks = &masks[cmd];
2088 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2089 NL_SET_ERR_MSG_MOD(extack,
2090 "attempt to offload an unsupported field");
2091 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2092 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2093 16, 1, cmd_masks, sizeof(zero_masks), true);
2095 goto out_dealloc_parsed_actions;
2101 out_dealloc_parsed_actions:
2102 kfree(parse_attr->mod_hdr_actions);
2107 static bool csum_offload_supported(struct mlx5e_priv *priv,
2110 struct netlink_ext_ack *extack)
2112 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2113 TCA_CSUM_UPDATE_FLAG_UDP;
2115 /* The HW recalcs checksums only if re-writing headers */
2116 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2117 NL_SET_ERR_MSG_MOD(extack,
2118 "TC csum action is only offloaded with pedit");
2119 netdev_warn(priv->netdev,
2120 "TC csum action is only offloaded with pedit\n");
2124 if (update_flags & ~prot_flags) {
2125 NL_SET_ERR_MSG_MOD(extack,
2126 "can't offload TC csum action for some header/s");
2127 netdev_warn(priv->netdev,
2128 "can't offload TC csum action for some header/s - flags %#x\n",
2136 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2137 struct tcf_exts *exts,
2138 struct netlink_ext_ack *extack)
2140 const struct tc_action *a;
2141 bool modify_ip_header;
2148 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2149 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2151 /* for non-IP we only re-write MACs, so we're okay */
2152 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2155 modify_ip_header = false;
2156 tcf_exts_for_each_action(i, a, exts) {
2159 if (!is_tcf_pedit(a))
2162 nkeys = tcf_pedit_nkeys(a);
2163 for (k = 0; k < nkeys; k++) {
2164 htype = tcf_pedit_htype(a, k);
2165 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2166 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2167 modify_ip_header = true;
2173 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2174 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2175 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2176 NL_SET_ERR_MSG_MOD(extack,
2177 "can't offload re-write of non TCP/UDP");
2178 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2186 static bool actions_match_supported(struct mlx5e_priv *priv,
2187 struct tcf_exts *exts,
2188 struct mlx5e_tc_flow_parse_attr *parse_attr,
2189 struct mlx5e_tc_flow *flow,
2190 struct netlink_ext_ack *extack)
2194 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2195 actions = flow->esw_attr->action;
2197 actions = flow->nic_attr->action;
2199 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2200 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2203 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2204 return modify_header_match_supported(&parse_attr->spec, exts,
2210 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2212 struct mlx5_core_dev *fmdev, *pmdev;
2213 u64 fsystem_guid, psystem_guid;
2216 pmdev = peer_priv->mdev;
2218 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2219 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2221 return (fsystem_guid == psystem_guid);
2224 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2225 struct mlx5e_tc_flow_parse_attr *parse_attr,
2226 struct mlx5e_tc_flow *flow,
2227 struct netlink_ext_ack *extack)
2229 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2230 const struct tc_action *a;
2235 if (!tcf_exts_has_actions(exts))
2238 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2240 tcf_exts_for_each_action(i, a, exts) {
2241 if (is_tcf_gact_shot(a)) {
2242 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2243 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2244 flow_table_properties_nic_receive.flow_counter))
2245 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2249 if (is_tcf_pedit(a)) {
2250 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
2251 parse_attr, extack);
2255 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2256 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2260 if (is_tcf_csum(a)) {
2261 if (csum_offload_supported(priv, action,
2262 tcf_csum_update_flags(a),
2269 if (is_tcf_mirred_egress_redirect(a)) {
2270 struct net_device *peer_dev = tcf_mirred_dev(a);
2272 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2273 same_hw_devs(priv, netdev_priv(peer_dev))) {
2274 parse_attr->mirred_ifindex = peer_dev->ifindex;
2275 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2276 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2277 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2279 NL_SET_ERR_MSG_MOD(extack,
2280 "device is not on same HW, can't offload");
2281 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2288 if (is_tcf_skbedit_mark(a)) {
2289 u32 mark = tcf_skbedit_mark(a);
2291 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2292 NL_SET_ERR_MSG_MOD(extack,
2293 "Bad flow mark - only 16 bit is supported");
2297 attr->flow_tag = mark;
2298 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2305 attr->action = action;
2306 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2312 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2313 struct ip_tunnel_key *b)
2315 return memcmp(a, b, sizeof(*a));
2318 static inline int hash_encap_info(struct ip_tunnel_key *key)
2320 return jhash(key, sizeof(*key), 0);
2323 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2324 struct net_device *mirred_dev,
2325 struct net_device **out_dev,
2327 struct neighbour **out_n,
2330 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2331 struct mlx5e_rep_priv *uplink_rpriv;
2333 struct neighbour *n = NULL;
2335 #if IS_ENABLED(CONFIG_INET)
2338 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2339 ret = PTR_ERR_OR_ZERO(rt);
2345 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2346 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2347 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
2348 *out_dev = uplink_rpriv->netdev;
2350 *out_dev = rt->dst.dev;
2353 *out_ttl = ip4_dst_hoplimit(&rt->dst);
2354 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2363 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2364 struct net_device *peer_netdev)
2366 struct mlx5e_priv *peer_priv;
2368 peer_priv = netdev_priv(peer_netdev);
2370 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2371 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2372 same_hw_devs(priv, peer_priv) &&
2373 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2374 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2377 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2378 struct net_device *mirred_dev,
2379 struct net_device **out_dev,
2381 struct neighbour **out_n,
2384 struct neighbour *n = NULL;
2385 struct dst_entry *dst;
2387 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
2388 struct mlx5e_rep_priv *uplink_rpriv;
2389 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2392 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2398 *out_ttl = ip6_dst_hoplimit(dst);
2400 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2401 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2402 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
2403 *out_dev = uplink_rpriv->netdev;
2405 *out_dev = dst->dev;
2410 n = dst_neigh_lookup(dst, &fl6->daddr);
2419 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
2420 char buf[], int encap_size,
2421 unsigned char h_dest[ETH_ALEN],
2425 __be16 udp_dst_port,
2428 struct ethhdr *eth = (struct ethhdr *)buf;
2429 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2430 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
2431 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2433 memset(buf, 0, encap_size);
2435 ether_addr_copy(eth->h_dest, h_dest);
2436 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2437 eth->h_proto = htons(ETH_P_IP);
2444 ip->protocol = IPPROTO_UDP;
2448 udp->dest = udp_dst_port;
2449 vxh->vx_flags = VXLAN_HF_VNI;
2450 vxh->vx_vni = vxlan_vni_field(vx_vni);
2453 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
2454 char buf[], int encap_size,
2455 unsigned char h_dest[ETH_ALEN],
2457 struct in6_addr *daddr,
2458 struct in6_addr *saddr,
2459 __be16 udp_dst_port,
2462 struct ethhdr *eth = (struct ethhdr *)buf;
2463 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2464 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
2465 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2467 memset(buf, 0, encap_size);
2469 ether_addr_copy(eth->h_dest, h_dest);
2470 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2471 eth->h_proto = htons(ETH_P_IPV6);
2473 ip6_flow_hdr(ip6h, tos, 0);
2474 /* the HW fills up ipv6 payload len */
2475 ip6h->nexthdr = IPPROTO_UDP;
2476 ip6h->hop_limit = ttl;
2477 ip6h->daddr = *daddr;
2478 ip6h->saddr = *saddr;
2480 udp->dest = udp_dst_port;
2481 vxh->vx_flags = VXLAN_HF_VNI;
2482 vxh->vx_vni = vxlan_vni_field(vx_vni);
2485 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2486 struct net_device *mirred_dev,
2487 struct mlx5e_encap_entry *e)
2489 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2490 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
2491 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2492 struct net_device *out_dev;
2493 struct neighbour *n = NULL;
2494 struct flowi4 fl4 = {};
2495 u8 nud_state, tos, ttl;
2499 if (max_encap_size < ipv4_encap_size) {
2500 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2501 ipv4_encap_size, max_encap_size);
2505 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2509 switch (e->tunnel_type) {
2510 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2511 fl4.flowi4_proto = IPPROTO_UDP;
2512 fl4.fl4_dport = tun_key->tp_dst;
2522 fl4.flowi4_tos = tun_key->tos;
2523 fl4.daddr = tun_key->u.ipv4.dst;
2524 fl4.saddr = tun_key->u.ipv4.src;
2526 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
2531 /* used by mlx5e_detach_encap to lookup a neigh hash table
2532 * entry in the neigh hash table when a user deletes a rule
2534 e->m_neigh.dev = n->dev;
2535 e->m_neigh.family = n->ops->family;
2536 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2537 e->out_dev = out_dev;
2539 /* It's importent to add the neigh to the hash table before checking
2540 * the neigh validity state. So if we'll get a notification, in case the
2541 * neigh changes it's validity state, we would find the relevant neigh
2544 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2548 read_lock_bh(&n->lock);
2549 nud_state = n->nud_state;
2550 ether_addr_copy(e->h_dest, n->ha);
2551 read_unlock_bh(&n->lock);
2553 switch (e->tunnel_type) {
2554 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2555 gen_vxlan_header_ipv4(out_dev, encap_header,
2556 ipv4_encap_size, e->h_dest, tos, ttl,
2558 fl4.saddr, tun_key->tp_dst,
2559 tunnel_id_to_key32(tun_key->tun_id));
2563 goto destroy_neigh_entry;
2565 e->encap_size = ipv4_encap_size;
2566 e->encap_header = encap_header;
2568 if (!(nud_state & NUD_VALID)) {
2569 neigh_event_send(n, NULL);
2574 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
2575 ipv4_encap_size, encap_header,
2576 MLX5_FLOW_NAMESPACE_FDB,
2579 goto destroy_neigh_entry;
2581 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2582 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2586 destroy_neigh_entry:
2587 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2589 kfree(encap_header);
2596 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2597 struct net_device *mirred_dev,
2598 struct mlx5e_encap_entry *e)
2600 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2601 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
2602 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2603 struct net_device *out_dev;
2604 struct neighbour *n = NULL;
2605 struct flowi6 fl6 = {};
2606 u8 nud_state, tos, ttl;
2610 if (max_encap_size < ipv6_encap_size) {
2611 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2612 ipv6_encap_size, max_encap_size);
2616 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2620 switch (e->tunnel_type) {
2621 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2622 fl6.flowi6_proto = IPPROTO_UDP;
2623 fl6.fl6_dport = tun_key->tp_dst;
2633 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2634 fl6.daddr = tun_key->u.ipv6.dst;
2635 fl6.saddr = tun_key->u.ipv6.src;
2637 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2642 /* used by mlx5e_detach_encap to lookup a neigh hash table
2643 * entry in the neigh hash table when a user deletes a rule
2645 e->m_neigh.dev = n->dev;
2646 e->m_neigh.family = n->ops->family;
2647 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2648 e->out_dev = out_dev;
2650 /* It's importent to add the neigh to the hash table before checking
2651 * the neigh validity state. So if we'll get a notification, in case the
2652 * neigh changes it's validity state, we would find the relevant neigh
2655 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2659 read_lock_bh(&n->lock);
2660 nud_state = n->nud_state;
2661 ether_addr_copy(e->h_dest, n->ha);
2662 read_unlock_bh(&n->lock);
2664 switch (e->tunnel_type) {
2665 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2666 gen_vxlan_header_ipv6(out_dev, encap_header,
2667 ipv6_encap_size, e->h_dest, tos, ttl,
2669 &fl6.saddr, tun_key->tp_dst,
2670 tunnel_id_to_key32(tun_key->tun_id));
2674 goto destroy_neigh_entry;
2677 e->encap_size = ipv6_encap_size;
2678 e->encap_header = encap_header;
2680 if (!(nud_state & NUD_VALID)) {
2681 neigh_event_send(n, NULL);
2686 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
2687 ipv6_encap_size, encap_header,
2688 MLX5_FLOW_NAMESPACE_FDB,
2691 goto destroy_neigh_entry;
2693 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2694 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2698 destroy_neigh_entry:
2699 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2701 kfree(encap_header);
2708 bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv,
2709 struct net_device *netdev)
2711 if (netif_is_vxlan(netdev) &&
2712 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
2718 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2719 struct ip_tunnel_info *tun_info,
2720 struct net_device *mirred_dev,
2721 struct net_device **encap_dev,
2722 struct mlx5e_tc_flow *flow,
2723 struct netlink_ext_ack *extack)
2725 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2726 unsigned short family = ip_tunnel_info_af(tun_info);
2727 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2728 struct ip_tunnel_key *key = &tun_info->key;
2729 struct mlx5e_encap_entry *e;
2730 int tunnel_type, err = 0;
2734 /* udp dst port must be set */
2735 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2736 goto vxlan_encap_offload_err;
2738 /* setting udp src port isn't supported */
2739 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2740 vxlan_encap_offload_err:
2741 NL_SET_ERR_MSG_MOD(extack,
2742 "must set udp dst port and not set udp src port");
2743 netdev_warn(priv->netdev,
2744 "must set udp dst port and not set udp src port\n");
2748 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
2749 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
2750 tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
2752 NL_SET_ERR_MSG_MOD(extack,
2753 "port isn't an offloaded vxlan udp dport");
2754 netdev_warn(priv->netdev,
2755 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
2759 hash_key = hash_encap_info(key);
2761 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2762 encap_hlist, hash_key) {
2763 if (!cmp_encap_info(&e->tun_info.key, key)) {
2769 /* must verify if encap is valid or not */
2773 e = kzalloc(sizeof(*e), GFP_KERNEL);
2777 e->tun_info = *tun_info;
2778 e->tunnel_type = tunnel_type;
2779 INIT_LIST_HEAD(&e->flows);
2781 if (family == AF_INET)
2782 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2783 else if (family == AF_INET6)
2784 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2786 if (err && err != -EAGAIN)
2789 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2792 list_add(&flow->encap, &e->flows);
2793 *encap_dev = e->out_dev;
2794 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2795 attr->encap_id = e->encap_id;
2806 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2807 const struct tc_action *a,
2808 struct mlx5_esw_flow_attr *attr,
2811 u8 vlan_idx = attr->total_vlan;
2813 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2816 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2818 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2819 MLX5_FS_VLAN_DEPTH))
2822 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2824 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2826 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2827 attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2828 attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2829 attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2830 if (!attr->vlan_proto[vlan_idx])
2831 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2834 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2835 MLX5_FS_VLAN_DEPTH))
2838 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2840 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2841 (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2842 tcf_vlan_push_prio(a)))
2845 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2847 } else { /* action is TCA_VLAN_ACT_MODIFY */
2851 attr->total_vlan = vlan_idx + 1;
2856 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2857 struct mlx5e_tc_flow_parse_attr *parse_attr,
2858 struct mlx5e_tc_flow *flow,
2859 struct netlink_ext_ack *extack)
2861 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2862 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2863 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2864 struct ip_tunnel_info *info = NULL;
2865 const struct tc_action *a;
2871 if (!tcf_exts_has_actions(exts))
2874 attr->in_rep = rpriv->rep;
2875 attr->in_mdev = priv->mdev;
2877 tcf_exts_for_each_action(i, a, exts) {
2878 if (is_tcf_gact_shot(a)) {
2879 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2880 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2884 if (is_tcf_pedit(a)) {
2885 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2886 parse_attr, extack);
2890 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2891 attr->mirror_count = attr->out_count;
2895 if (is_tcf_csum(a)) {
2896 if (csum_offload_supported(priv, action,
2897 tcf_csum_update_flags(a),
2904 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
2905 struct mlx5e_priv *out_priv;
2906 struct net_device *out_dev;
2908 out_dev = tcf_mirred_dev(a);
2910 /* out_dev is NULL when filters with
2911 * non-existing mirred device are replayed to
2917 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2918 NL_SET_ERR_MSG_MOD(extack,
2919 "can't support more output ports, can't offload forwarding");
2920 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2925 if (switchdev_port_same_parent_id(priv->netdev,
2927 is_merged_eswitch_dev(priv, out_dev)) {
2928 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2929 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2930 out_priv = netdev_priv(out_dev);
2931 rpriv = out_priv->ppriv;
2932 attr->out_rep[attr->out_count] = rpriv->rep;
2933 attr->out_mdev[attr->out_count++] = out_priv->mdev;
2935 parse_attr->mirred_ifindex = out_dev->ifindex;
2936 parse_attr->tun_info = *info;
2937 attr->parse_attr = parse_attr;
2938 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
2939 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2940 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2941 /* attr->out_rep is resolved when we handle encap */
2942 } else if (parse_attr->filter_dev != priv->netdev) {
2943 /* All mlx5 devices are called to configure
2944 * high level device filters. Therefore, the
2945 * *attempt* to install a filter on invalid
2946 * eswitch should not trigger an explicit error
2950 NL_SET_ERR_MSG_MOD(extack,
2951 "devices are not on same switch HW, can't offload forwarding");
2952 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2953 priv->netdev->name, out_dev->name);
2959 if (is_tcf_tunnel_set(a)) {
2960 info = tcf_tunnel_info(a);
2965 attr->mirror_count = attr->out_count;
2969 if (is_tcf_vlan(a)) {
2970 err = parse_tc_vlan_action(priv, a, attr, &action);
2975 attr->mirror_count = attr->out_count;
2979 if (is_tcf_tunnel_release(a)) {
2980 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2984 if (is_tcf_gact_goto_chain(a)) {
2985 u32 dest_chain = tcf_gact_goto_chain_index(a);
2986 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2988 if (dest_chain <= attr->chain) {
2989 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2992 if (dest_chain > max_chain) {
2993 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2996 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2997 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2998 attr->dest_chain = dest_chain;
3006 attr->action = action;
3007 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
3010 if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3011 NL_SET_ERR_MSG_MOD(extack,
3012 "current firmware doesn't support split rule for port mirroring");
3013 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
3020 static void get_flags(int flags, u16 *flow_flags)
3022 u16 __flow_flags = 0;
3024 if (flags & MLX5E_TC_INGRESS)
3025 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
3026 if (flags & MLX5E_TC_EGRESS)
3027 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
3029 *flow_flags = __flow_flags;
3032 static const struct rhashtable_params tc_ht_params = {
3033 .head_offset = offsetof(struct mlx5e_tc_flow, node),
3034 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
3035 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
3036 .automatic_shrinking = true,
3039 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
3041 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3042 struct mlx5e_rep_priv *uplink_rpriv;
3044 if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
3045 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
3046 return &uplink_rpriv->uplink_priv.tc_ht;
3048 return &priv->fs.tc.ht;
3052 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
3053 struct tc_cls_flower_offload *f, u16 flow_flags,
3054 struct mlx5e_tc_flow_parse_attr **__parse_attr,
3055 struct mlx5e_tc_flow **__flow)
3057 struct mlx5e_tc_flow_parse_attr *parse_attr;
3058 struct mlx5e_tc_flow *flow;
3061 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
3062 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3063 if (!parse_attr || !flow) {
3068 flow->cookie = f->cookie;
3069 flow->flags = flow_flags;
3073 *__parse_attr = parse_attr;
3084 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
3085 struct tc_cls_flower_offload *f,
3087 struct net_device *filter_dev,
3088 struct mlx5e_tc_flow **__flow)
3090 struct netlink_ext_ack *extack = f->common.extack;
3091 struct mlx5e_tc_flow_parse_attr *parse_attr;
3092 struct mlx5e_tc_flow *flow;
3095 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
3096 attr_size = sizeof(struct mlx5_esw_flow_attr);
3097 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3098 &parse_attr, &flow);
3101 parse_attr->filter_dev = filter_dev;
3102 flow->esw_attr->parse_attr = parse_attr;
3103 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f);
3107 flow->esw_attr->chain = f->common.chain_index;
3108 flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
3109 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
3113 err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
3117 if (!(flow->esw_attr->action &
3118 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
3133 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
3134 struct tc_cls_flower_offload *f,
3136 struct net_device *filter_dev,
3137 struct mlx5e_tc_flow **__flow)
3139 struct netlink_ext_ack *extack = f->common.extack;
3140 struct mlx5e_tc_flow_parse_attr *parse_attr;
3141 struct mlx5e_tc_flow *flow;
3144 /* multi-chain not supported for NIC rules */
3145 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3148 flow_flags |= MLX5E_TC_FLOW_NIC;
3149 attr_size = sizeof(struct mlx5_nic_flow_attr);
3150 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3151 &parse_attr, &flow);
3155 parse_attr->filter_dev = filter_dev;
3156 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, f);
3160 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
3164 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3168 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
3182 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3183 struct tc_cls_flower_offload *f,
3185 struct net_device *filter_dev,
3186 struct mlx5e_tc_flow **flow)
3188 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3192 get_flags(flags, &flow_flags);
3194 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3197 if (esw && esw->mode == SRIOV_OFFLOADS)
3198 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3201 err = mlx5e_add_nic_flow(priv, f, flow_flags,
3207 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3208 struct tc_cls_flower_offload *f, int flags)
3210 struct netlink_ext_ack *extack = f->common.extack;
3211 struct rhashtable *tc_ht = get_tc_ht(priv);
3212 struct mlx5e_tc_flow *flow;
3215 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3217 NL_SET_ERR_MSG_MOD(extack,
3218 "flow cookie already exists, ignoring");
3219 netdev_warn_once(priv->netdev,
3220 "flow cookie %lx already exists, ignoring\n",
3225 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3229 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
3236 mlx5e_tc_del_flow(priv, flow);
3242 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
3243 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
3245 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3247 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
3253 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3254 struct tc_cls_flower_offload *f, int flags)
3256 struct rhashtable *tc_ht = get_tc_ht(priv);
3257 struct mlx5e_tc_flow *flow;
3259 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3260 if (!flow || !same_flow_direction(flow, flags))
3263 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3265 mlx5e_tc_del_flow(priv, flow);
3272 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3273 struct tc_cls_flower_offload *f, int flags)
3275 struct rhashtable *tc_ht = get_tc_ht(priv);
3276 struct mlx5e_tc_flow *flow;
3277 struct mlx5_fc *counter;
3282 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3283 if (!flow || !same_flow_direction(flow, flags))
3286 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
3289 counter = mlx5e_tc_get_counter(flow);
3293 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3295 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
3300 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3301 struct mlx5e_priv *peer_priv)
3303 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3304 struct mlx5e_hairpin_entry *hpe;
3308 if (!same_hw_devs(priv, peer_priv))
3311 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3313 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3314 if (hpe->peer_vhca_id == peer_vhca_id)
3315 hpe->hp->pair->peer_gone = true;
3319 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3320 unsigned long event, void *ptr)
3322 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3323 struct mlx5e_flow_steering *fs;
3324 struct mlx5e_priv *peer_priv;
3325 struct mlx5e_tc_table *tc;
3326 struct mlx5e_priv *priv;
3328 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3329 event != NETDEV_UNREGISTER ||
3330 ndev->reg_state == NETREG_REGISTERED)
3333 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3334 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3335 priv = container_of(fs, struct mlx5e_priv, fs);
3336 peer_priv = netdev_priv(ndev);
3337 if (priv == peer_priv ||
3338 !(priv->netdev->features & NETIF_F_HW_TC))
3341 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3346 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3348 struct mlx5e_tc_table *tc = &priv->fs.tc;
3351 hash_init(tc->mod_hdr_tbl);
3352 hash_init(tc->hairpin_tbl);
3354 err = rhashtable_init(&tc->ht, &tc_ht_params);
3358 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3359 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3360 tc->netdevice_nb.notifier_call = NULL;
3361 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3367 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3369 struct mlx5e_tc_flow *flow = ptr;
3370 struct mlx5e_priv *priv = flow->priv;
3372 mlx5e_tc_del_flow(priv, flow);
3376 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3378 struct mlx5e_tc_table *tc = &priv->fs.tc;
3380 if (tc->netdevice_nb.notifier_call)
3381 unregister_netdevice_notifier(&tc->netdevice_nb);
3383 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
3385 if (!IS_ERR_OR_NULL(tc->t)) {
3386 mlx5_destroy_flow_table(tc->t);
3391 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3393 return rhashtable_init(tc_ht, &tc_ht_params);
3396 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3398 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3401 int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
3403 struct rhashtable *tc_ht = get_tc_ht(priv);
3405 return atomic_read(&tc_ht->nelems);