net/mlx5: DR, moved all the SWS code into a separate directory
authorYevgeny Kliteynik <kliteyn@nvidia.com>
Thu, 31 Oct 2024 12:58:53 +0000 (14:58 +0200)
committerJakub Kicinski <kuba@kernel.org>
Sun, 3 Nov 2024 23:37:15 +0000 (15:37 -0800)
After adding HWS support in a separate folder, moving all the SWS
code into its own folder as well.
Now SWS and HWS implementation are located in their appropriate
folders:
 - steering/sws/
 - steering/hws/

Signed-off-by: Yevgeny Kliteynik <kliteyn@nvidia.com>
Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
Link: https://patch.msgid.link/20241031125856.530927-3-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
57 files changed:
drivers/net/ethernet/mellanox/mlx5/core/Makefile
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
drivers/net/ethernet/mellanox/mlx5/core/lib/smfs.h
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h [deleted file]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h [new file with mode: 0644]
drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h [new file with mode: 0644]

index 5912f7e614f9c0ba0781474f4d50f25ea7b6e6fd..42411fe772abbcf85e37c6b3eed03564a24784c5 100644 (file)
@@ -109,16 +109,29 @@ mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/ktls_stats.o \
                                   en_accel/fs_tcp.o en_accel/ktls.o en_accel/ktls_txrx.o \
                                   en_accel/ktls_tx.o en_accel/ktls_rx.o
 
-mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/dr_domain.o steering/dr_table.o \
-                                       steering/dr_matcher.o steering/dr_rule.o \
-                                       steering/dr_icm_pool.o steering/dr_buddy.o \
-                                       steering/dr_ste.o steering/dr_send.o \
-                                       steering/dr_ste_v0.o steering/dr_ste_v1.o \
-                                       steering/dr_ste_v2.o \
-                                       steering/dr_cmd.o steering/dr_fw.o \
-                                       steering/dr_action.o steering/fs_dr.o \
-                                       steering/dr_definer.o steering/dr_ptrn.o \
-                                       steering/dr_arg.o steering/dr_dbg.o lib/smfs.o
+#
+# SW Steering
+#
+mlx5_core-$(CONFIG_MLX5_SW_STEERING) += steering/sws/dr_domain.o \
+                                       steering/sws/dr_table.o \
+                                       steering/sws/dr_matcher.o \
+                                       steering/sws/dr_rule.o \
+                                       steering/sws/dr_icm_pool.o \
+                                       steering/sws/dr_buddy.o \
+                                       steering/sws/dr_ste.o \
+                                       steering/sws/dr_send.o \
+                                       steering/sws/dr_ste_v0.o \
+                                       steering/sws/dr_ste_v1.o \
+                                       steering/sws/dr_ste_v2.o \
+                                       steering/sws/dr_cmd.o \
+                                       steering/sws/dr_fw.o \
+                                       steering/sws/dr_action.o \
+                                       steering/sws/dr_definer.o \
+                                       steering/sws/dr_ptrn.o \
+                                       steering/sws/dr_arg.o \
+                                       steering/sws/dr_dbg.o \
+                                       steering/sws/fs_dr.o \
+                                       lib/smfs.o
 
 #
 # HW Steering
index b30976627c6b91d5c6eaaed422d993ad70e25d0d..bad2df0715ecc977162bb9bf3c37ab80d9e245b1 100644 (file)
@@ -37,7 +37,7 @@
 #include <linux/mlx5/fs.h>
 #include <linux/rhashtable.h>
 #include <linux/llist.h>
-#include <steering/fs_dr.h>
+#include <steering/sws/fs_dr.h>
 
 #define FDB_TC_MAX_CHAIN 3
 #define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
index 452d0df339acd49f3627b27336450c5c14a07fbe..404f3d4b6380ca9f7c7d3f7ffc5e42f7b6408765 100644 (file)
@@ -4,8 +4,8 @@
 #ifndef __MLX5_LIB_SMFS_H__
 #define __MLX5_LIB_SMFS_H__
 
-#include "steering/mlx5dr.h"
-#include "steering/dr_types.h"
+#include "steering/sws/mlx5dr.h"
+#include "steering/sws/dr_types.h"
 
 struct mlx5dr_matcher *
 mlx5_smfs_matcher_create(struct mlx5dr_table *table, u32 priority, struct mlx5_flow_spec *spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
deleted file mode 100644 (file)
index 2ebb61e..0000000
+++ /dev/null
@@ -1,2245 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include "dr_types.h"
-#include "dr_ste.h"
-
-enum dr_action_domain {
-       DR_ACTION_DOMAIN_NIC_INGRESS,
-       DR_ACTION_DOMAIN_NIC_EGRESS,
-       DR_ACTION_DOMAIN_FDB_INGRESS,
-       DR_ACTION_DOMAIN_FDB_EGRESS,
-       DR_ACTION_DOMAIN_MAX,
-};
-
-enum dr_action_valid_state {
-       DR_ACTION_STATE_ERR,
-       DR_ACTION_STATE_NO_ACTION,
-       DR_ACTION_STATE_ENCAP,
-       DR_ACTION_STATE_DECAP,
-       DR_ACTION_STATE_MODIFY_HDR,
-       DR_ACTION_STATE_POP_VLAN,
-       DR_ACTION_STATE_PUSH_VLAN,
-       DR_ACTION_STATE_NON_TERM,
-       DR_ACTION_STATE_TERM,
-       DR_ACTION_STATE_ASO,
-       DR_ACTION_STATE_MAX,
-};
-
-static const char * const action_type_to_str[] = {
-       [DR_ACTION_TYP_TNL_L2_TO_L2] = "DR_ACTION_TYP_TNL_L2_TO_L2",
-       [DR_ACTION_TYP_L2_TO_TNL_L2] = "DR_ACTION_TYP_L2_TO_TNL_L2",
-       [DR_ACTION_TYP_TNL_L3_TO_L2] = "DR_ACTION_TYP_TNL_L3_TO_L2",
-       [DR_ACTION_TYP_L2_TO_TNL_L3] = "DR_ACTION_TYP_L2_TO_TNL_L3",
-       [DR_ACTION_TYP_DROP] = "DR_ACTION_TYP_DROP",
-       [DR_ACTION_TYP_QP] = "DR_ACTION_TYP_QP",
-       [DR_ACTION_TYP_FT] = "DR_ACTION_TYP_FT",
-       [DR_ACTION_TYP_CTR] = "DR_ACTION_TYP_CTR",
-       [DR_ACTION_TYP_TAG] = "DR_ACTION_TYP_TAG",
-       [DR_ACTION_TYP_MODIFY_HDR] = "DR_ACTION_TYP_MODIFY_HDR",
-       [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
-       [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
-       [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
-       [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
-       [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
-       [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
-       [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
-       [DR_ACTION_TYP_RANGE] = "DR_ACTION_TYP_RANGE",
-       [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
-};
-
-static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
-{
-       if (action_id > DR_ACTION_TYP_MAX)
-               action_id = DR_ACTION_TYP_MAX;
-       return action_type_to_str[action_id];
-}
-
-static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
-{
-       return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
-               MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
-               MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
-}
-
-static const enum dr_action_valid_state
-next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
-       [DR_ACTION_DOMAIN_NIC_INGRESS] = {
-               [DR_ACTION_STATE_NO_ACTION] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_DECAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ENCAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_MODIFY_HDR] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_POP_VLAN] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_PUSH_VLAN] = {
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_NON_TERM] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ASO] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_TERM] = {
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
-               },
-       },
-       [DR_ACTION_DOMAIN_NIC_EGRESS] = {
-               [DR_ACTION_STATE_NO_ACTION] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_DECAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ENCAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_MODIFY_HDR] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_POP_VLAN] = {
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_PUSH_VLAN] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_NON_TERM] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ASO] = {
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-               },
-               [DR_ACTION_STATE_TERM] = {
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
-               },
-       },
-       [DR_ACTION_DOMAIN_FDB_INGRESS] = {
-               [DR_ACTION_STATE_NO_ACTION] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_DECAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ENCAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_MODIFY_HDR] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_POP_VLAN] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_PUSH_VLAN] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_NON_TERM] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ASO] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_TERM] = {
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
-               },
-       },
-       [DR_ACTION_DOMAIN_FDB_EGRESS] = {
-               [DR_ACTION_STATE_NO_ACTION] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_DECAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ENCAP] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_MODIFY_HDR] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_POP_VLAN] = {
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_PUSH_VLAN] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_NON_TERM] = {
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_ASO] = {
-                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
-                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
-                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
-                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
-               },
-               [DR_ACTION_STATE_TERM] = {
-                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
-               },
-       },
-};
-
-static int
-dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
-                                 enum mlx5dr_action_type *action_type)
-{
-       switch (reformat_type) {
-       case DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2:
-               *action_type = DR_ACTION_TYP_TNL_L2_TO_L2;
-               break;
-       case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2:
-               *action_type = DR_ACTION_TYP_L2_TO_TNL_L2;
-               break;
-       case DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2:
-               *action_type = DR_ACTION_TYP_TNL_L3_TO_L2;
-               break;
-       case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
-               *action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
-               break;
-       case DR_ACTION_REFORMAT_TYP_INSERT_HDR:
-               *action_type = DR_ACTION_TYP_INSERT_HDR;
-               break;
-       case DR_ACTION_REFORMAT_TYP_REMOVE_HDR:
-               *action_type = DR_ACTION_TYP_REMOVE_HDR;
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/* Apply the actions on the rule STE array starting from the last_ste.
- * Actions might require more than one STE, new_num_stes will return
- * the new size of the STEs array, rule with actions.
- */
-static void dr_actions_apply(struct mlx5dr_domain *dmn,
-                            enum mlx5dr_domain_nic_type nic_type,
-                            u8 *action_type_set,
-                            u8 *last_ste,
-                            struct mlx5dr_ste_actions_attr *attr,
-                            u32 *new_num_stes)
-{
-       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
-       u32 added_stes = 0;
-
-       if (nic_type == DR_DOMAIN_NIC_TYPE_RX)
-               mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
-                                         last_ste, attr, &added_stes);
-       else
-               mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
-                                         last_ste, attr, &added_stes);
-
-       *new_num_stes += added_stes;
-}
-
-static enum dr_action_domain
-dr_action_get_action_domain(enum mlx5dr_domain_type domain,
-                           enum mlx5dr_domain_nic_type nic_type)
-{
-       switch (domain) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               return DR_ACTION_DOMAIN_NIC_INGRESS;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               return DR_ACTION_DOMAIN_NIC_EGRESS;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               if (nic_type == DR_DOMAIN_NIC_TYPE_RX)
-                       return DR_ACTION_DOMAIN_FDB_INGRESS;
-               return DR_ACTION_DOMAIN_FDB_EGRESS;
-       default:
-               WARN_ON(true);
-               return DR_ACTION_DOMAIN_MAX;
-       }
-}
-
-static
-int dr_action_validate_and_get_next_state(enum dr_action_domain action_domain,
-                                         u32 action_type,
-                                         u32 *state)
-{
-       u32 cur_state = *state;
-
-       /* Check action state machine is valid */
-       *state = next_action_state[action_domain][cur_state][action_type];
-
-       if (*state == DR_ACTION_STATE_ERR)
-               return -EOPNOTSUPP;
-
-       return 0;
-}
-
-static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
-                                     struct mlx5dr_action *dest_action,
-                                     u64 *final_icm_addr)
-{
-       int ret;
-
-       switch (dest_action->action_type) {
-       case DR_ACTION_TYP_FT:
-               /* Allow destination flow table only if table is a terminating
-                * table, since there is an *assumption* that in such case FW
-                * will recalculate the CS.
-                */
-               if (dest_action->dest_tbl->is_fw_tbl) {
-                       *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr;
-               } else {
-                       mlx5dr_dbg(dmn,
-                                  "Destination FT should be terminating when modify TTL is used\n");
-                       return -EINVAL;
-               }
-               break;
-
-       case DR_ACTION_TYP_VPORT:
-               /* If destination is vport we will get the FW flow table
-                * that recalculates the CS and forwards to the vport.
-                */
-               ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
-                                                         dest_action->vport->caps->num,
-                                                         final_icm_addr);
-               if (ret) {
-                       mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
-                       return ret;
-               }
-               break;
-
-       default:
-               break;
-       }
-
-       return 0;
-}
-
-static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
-                                       struct mlx5dr_ste_actions_attr *attr,
-                                       bool rx_rule,
-                                       bool *recalc_cs_required)
-{
-       *recalc_cs_required = false;
-
-       /* if device supports csum recalculation - no adjustment needed */
-       if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
-               return;
-
-       /* no adjustment needed on TX rules */
-       if (!rx_rule)
-               return;
-
-       if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
-               /* Ignore the modify TTL action.
-                * It is always kept as last HW action.
-                */
-               attr->modify_actions--;
-               return;
-       }
-
-       if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
-               /* Due to a HW bug on some devices, modifying TTL on RX flows
-                * will cause an incorrect checksum calculation. In such cases
-                * we will use a FW table to recalculate the checksum.
-                */
-               *recalc_cs_required = true;
-}
-
-static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
-                                    struct mlx5dr_action *actions[],
-                                    int last_idx)
-{
-       int i;
-
-       for (i = 0; i <= last_idx; i++)
-               mlx5dr_err(dmn, "< %s (%d) > ",
-                          dr_action_id_to_str(actions[i]->action_type),
-                          actions[i]->action_type);
-}
-
-static int dr_action_get_dest_fw_tbl_addr(struct mlx5dr_matcher *matcher,
-                                         struct mlx5dr_action_dest_tbl *dest_tbl,
-                                         bool is_rx_rule,
-                                         u64 *final_icm_addr)
-{
-       struct mlx5dr_cmd_query_flow_table_details output;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       int ret;
-
-       if (!dest_tbl->fw_tbl.rx_icm_addr) {
-               ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
-                                                 dest_tbl->fw_tbl.type,
-                                                 dest_tbl->fw_tbl.id,
-                                                 &output);
-               if (ret) {
-                       mlx5dr_err(dmn,
-                                  "Failed mlx5_cmd_query_flow_table ret: %d\n",
-                                  ret);
-                       return ret;
-               }
-
-               dest_tbl->fw_tbl.tx_icm_addr = output.sw_owner_icm_root_1;
-               dest_tbl->fw_tbl.rx_icm_addr = output.sw_owner_icm_root_0;
-       }
-
-       *final_icm_addr = is_rx_rule ? dest_tbl->fw_tbl.rx_icm_addr :
-                                      dest_tbl->fw_tbl.tx_icm_addr;
-       return 0;
-}
-
-static int dr_action_get_dest_sw_tbl_addr(struct mlx5dr_matcher *matcher,
-                                         struct mlx5dr_action_dest_tbl *dest_tbl,
-                                         bool is_rx_rule,
-                                         u64 *final_icm_addr)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_icm_chunk *chunk;
-
-       if (dest_tbl->tbl->dmn != dmn) {
-               mlx5dr_err(dmn,
-                          "Destination table belongs to a different domain\n");
-               return -EINVAL;
-       }
-
-       if (dest_tbl->tbl->level <= matcher->tbl->level) {
-               mlx5_core_dbg_once(dmn->mdev,
-                                  "Connecting table to a lower/same level destination table\n");
-               mlx5dr_dbg(dmn,
-                          "Connecting table at level %d to a destination table at level %d\n",
-                          matcher->tbl->level,
-                          dest_tbl->tbl->level);
-       }
-
-       chunk = is_rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
-                            dest_tbl->tbl->tx.s_anchor->chunk;
-
-       *final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
-       return 0;
-}
-
-static int dr_action_get_dest_tbl_addr(struct mlx5dr_matcher *matcher,
-                                      struct mlx5dr_action_dest_tbl *dest_tbl,
-                                      bool is_rx_rule,
-                                      u64 *final_icm_addr)
-{
-       if (dest_tbl->is_fw_tbl)
-               return dr_action_get_dest_fw_tbl_addr(matcher,
-                                                     dest_tbl,
-                                                     is_rx_rule,
-                                                     final_icm_addr);
-
-       return dr_action_get_dest_sw_tbl_addr(matcher,
-                                             dest_tbl,
-                                             is_rx_rule,
-                                             final_icm_addr);
-}
-
-#define WITH_VLAN_NUM_HW_ACTIONS 6
-
-int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
-                                struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                struct mlx5dr_action *actions[],
-                                u32 num_actions,
-                                u8 *ste_arr,
-                                u32 *new_hw_ste_arr_sz)
-{
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-       bool rx_rule = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       u8 action_type_set[DR_ACTION_TYP_MAX] = {};
-       struct mlx5dr_ste_actions_attr attr = {};
-       struct mlx5dr_action *dest_action = NULL;
-       u32 state = DR_ACTION_STATE_NO_ACTION;
-       enum dr_action_domain action_domain;
-       bool recalc_cs_required = false;
-       u8 *last_ste;
-       int i, ret;
-
-       attr.gvmi = dmn->info.caps.gvmi;
-       attr.hit_gvmi = dmn->info.caps.gvmi;
-       attr.final_icm_addr = nic_dmn->default_icm_addr;
-       action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type);
-
-       for (i = 0; i < num_actions; i++) {
-               struct mlx5dr_action *action;
-               int max_actions_type = 1;
-               u32 action_type;
-
-               action = actions[i];
-               action_type = action->action_type;
-
-               switch (action_type) {
-               case DR_ACTION_TYP_DROP:
-                       attr.final_icm_addr = nic_dmn->drop_icm_addr;
-                       attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
-                       break;
-               case DR_ACTION_TYP_FT:
-                       dest_action = action;
-                       ret = dr_action_get_dest_tbl_addr(matcher, action->dest_tbl,
-                                                         rx_rule, &attr.final_icm_addr);
-                       if (ret)
-                               return ret;
-                       break;
-               case DR_ACTION_TYP_RANGE:
-                       ret = dr_action_get_dest_tbl_addr(matcher,
-                                                         action->range->hit_tbl_action->dest_tbl,
-                                                         rx_rule, &attr.final_icm_addr);
-                       if (ret)
-                               return ret;
-
-                       ret = dr_action_get_dest_tbl_addr(matcher,
-                                                         action->range->miss_tbl_action->dest_tbl,
-                                                         rx_rule, &attr.range.miss_icm_addr);
-                       if (ret)
-                               return ret;
-
-                       attr.range.definer_id = action->range->definer_id;
-                       attr.range.min = action->range->min;
-                       attr.range.max = action->range->max;
-                       break;
-               case DR_ACTION_TYP_QP:
-                       mlx5dr_info(dmn, "Domain doesn't support QP\n");
-                       return -EOPNOTSUPP;
-               case DR_ACTION_TYP_CTR:
-                       attr.ctr_id = action->ctr->ctr_id +
-                               action->ctr->offset;
-                       break;
-               case DR_ACTION_TYP_TAG:
-                       attr.flow_tag = action->flow_tag->flow_tag;
-                       break;
-               case DR_ACTION_TYP_TNL_L2_TO_L2:
-                       break;
-               case DR_ACTION_TYP_TNL_L3_TO_L2:
-                       if (action->rewrite->ptrn && action->rewrite->arg) {
-                               attr.decap_index = mlx5dr_arg_get_obj_id(action->rewrite->arg);
-                               attr.decap_actions = action->rewrite->ptrn->num_of_actions;
-                               attr.decap_pat_idx = action->rewrite->ptrn->index;
-                       } else {
-                               attr.decap_index = action->rewrite->index;
-                               attr.decap_actions = action->rewrite->num_of_actions;
-                               attr.decap_with_vlan =
-                                       attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
-                               attr.decap_pat_idx = MLX5DR_INVALID_PATTERN_INDEX;
-                       }
-                       break;
-               case DR_ACTION_TYP_MODIFY_HDR:
-                       if (action->rewrite->single_action_opt) {
-                               attr.modify_actions = action->rewrite->num_of_actions;
-                               attr.single_modify_action = action->rewrite->data;
-                       } else {
-                               if (action->rewrite->ptrn && action->rewrite->arg) {
-                                       attr.modify_index =
-                                               mlx5dr_arg_get_obj_id(action->rewrite->arg);
-                                       attr.modify_actions = action->rewrite->ptrn->num_of_actions;
-                                       attr.modify_pat_idx = action->rewrite->ptrn->index;
-                               } else {
-                                       attr.modify_index = action->rewrite->index;
-                                       attr.modify_actions = action->rewrite->num_of_actions;
-                                       attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX;
-                               }
-                       }
-                       if (action->rewrite->modify_ttl)
-                               dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
-                                                           &recalc_cs_required);
-                       break;
-               case DR_ACTION_TYP_L2_TO_TNL_L2:
-               case DR_ACTION_TYP_L2_TO_TNL_L3:
-                       if (rx_rule &&
-                           !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) {
-                               mlx5dr_info(dmn, "Device doesn't support Encap on RX\n");
-                               return -EOPNOTSUPP;
-                       }
-                       attr.reformat.size = action->reformat->size;
-                       attr.reformat.id = action->reformat->id;
-                       break;
-               case DR_ACTION_TYP_SAMPLER:
-                       attr.final_icm_addr = rx_rule ? action->sampler->rx_icm_addr :
-                                                       action->sampler->tx_icm_addr;
-                       break;
-               case DR_ACTION_TYP_VPORT:
-                       if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
-                               /* can't go to uplink on RX rule - dropping instead */
-                               attr.final_icm_addr = nic_dmn->drop_icm_addr;
-                               attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
-                       } else {
-                               attr.hit_gvmi = action->vport->caps->vhca_gvmi;
-                               dest_action = action;
-                               attr.final_icm_addr = rx_rule ?
-                                                     action->vport->caps->icm_address_rx :
-                                                     action->vport->caps->icm_address_tx;
-                       }
-                       break;
-               case DR_ACTION_TYP_POP_VLAN:
-                       if (!rx_rule && !(dmn->ste_ctx->actions_caps &
-                                         DR_STE_CTX_ACTION_CAP_TX_POP)) {
-                               mlx5dr_dbg(dmn, "Device doesn't support POP VLAN action on TX\n");
-                               return -EOPNOTSUPP;
-                       }
-
-                       max_actions_type = MLX5DR_MAX_VLANS;
-                       attr.vlans.count++;
-                       break;
-               case DR_ACTION_TYP_PUSH_VLAN:
-                       if (rx_rule && !(dmn->ste_ctx->actions_caps &
-                                        DR_STE_CTX_ACTION_CAP_RX_PUSH)) {
-                               mlx5dr_dbg(dmn, "Device doesn't support PUSH VLAN action on RX\n");
-                               return -EOPNOTSUPP;
-                       }
-
-                       max_actions_type = MLX5DR_MAX_VLANS;
-                       if (attr.vlans.count == MLX5DR_MAX_VLANS) {
-                               mlx5dr_dbg(dmn, "Max VLAN push/pop count exceeded\n");
-                               return -EINVAL;
-                       }
-
-                       attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
-                       break;
-               case DR_ACTION_TYP_INSERT_HDR:
-               case DR_ACTION_TYP_REMOVE_HDR:
-                       attr.reformat.size = action->reformat->size;
-                       attr.reformat.id = action->reformat->id;
-                       attr.reformat.param_0 = action->reformat->param_0;
-                       attr.reformat.param_1 = action->reformat->param_1;
-                       break;
-               case DR_ACTION_TYP_ASO_FLOW_METER:
-                       attr.aso_flow_meter.obj_id = action->aso->obj_id;
-                       attr.aso_flow_meter.offset = action->aso->offset;
-                       attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
-                       attr.aso_flow_meter.init_color = action->aso->init_color;
-                       break;
-               default:
-                       mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
-                       return -EINVAL;
-               }
-
-               /* Check action duplication */
-               if (++action_type_set[action_type] > max_actions_type) {
-                       mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
-                                  action_type, max_actions_type);
-                       return -EINVAL;
-               }
-
-               /* Check action state machine is valid */
-               if (dr_action_validate_and_get_next_state(action_domain,
-                                                         action_type,
-                                                         &state)) {
-                       mlx5dr_err(dmn, "Invalid action (gvmi: %d, is_rx: %d) sequence provided:",
-                                  attr.gvmi, rx_rule);
-                       dr_action_print_sequence(dmn, actions, i);
-                       return -EOPNOTSUPP;
-               }
-       }
-
-       *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
-       last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
-
-       if (recalc_cs_required && dest_action) {
-               ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
-               if (ret) {
-                       mlx5dr_err(dmn,
-                                  "Failed to handle checksum recalculation err %d\n",
-                                  ret);
-                       return ret;
-               }
-       }
-
-       dr_actions_apply(dmn,
-                        nic_dmn->type,
-                        action_type_set,
-                        last_ste,
-                        &attr,
-                        new_hw_ste_arr_sz);
-
-       return 0;
-}
-
-static unsigned int action_size[DR_ACTION_TYP_MAX] = {
-       [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat),
-       [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat),
-       [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite),
-       [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat),
-       [DR_ACTION_TYP_FT]           = sizeof(struct mlx5dr_action_dest_tbl),
-       [DR_ACTION_TYP_CTR]          = sizeof(struct mlx5dr_action_ctr),
-       [DR_ACTION_TYP_TAG]          = sizeof(struct mlx5dr_action_flow_tag),
-       [DR_ACTION_TYP_MODIFY_HDR]   = sizeof(struct mlx5dr_action_rewrite),
-       [DR_ACTION_TYP_VPORT]        = sizeof(struct mlx5dr_action_vport),
-       [DR_ACTION_TYP_PUSH_VLAN]    = sizeof(struct mlx5dr_action_push_vlan),
-       [DR_ACTION_TYP_INSERT_HDR]   = sizeof(struct mlx5dr_action_reformat),
-       [DR_ACTION_TYP_REMOVE_HDR]   = sizeof(struct mlx5dr_action_reformat),
-       [DR_ACTION_TYP_SAMPLER]      = sizeof(struct mlx5dr_action_sampler),
-       [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
-       [DR_ACTION_TYP_RANGE]        = sizeof(struct mlx5dr_action_range),
-};
-
-static struct mlx5dr_action *
-dr_action_create_generic(enum mlx5dr_action_type action_type)
-{
-       struct mlx5dr_action *action;
-       int extra_size;
-
-       if (action_type < DR_ACTION_TYP_MAX)
-               extra_size = action_size[action_type];
-       else
-               return NULL;
-
-       action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL);
-       if (!action)
-               return NULL;
-
-       action->action_type = action_type;
-       refcount_set(&action->refcount, 1);
-       action->data = action + 1;
-
-       return action;
-}
-
-struct mlx5dr_action *mlx5dr_action_create_drop(void)
-{
-       return dr_action_create_generic(DR_ACTION_TYP_DROP);
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
-{
-       struct mlx5dr_action *action;
-
-       action = dr_action_create_generic(DR_ACTION_TYP_FT);
-       if (!action)
-               return NULL;
-
-       action->dest_tbl->is_fw_tbl = true;
-       action->dest_tbl->fw_tbl.dmn = dmn;
-       action->dest_tbl->fw_tbl.id = table_num;
-       action->dest_tbl->fw_tbl.type = FS_FT_FDB;
-       refcount_inc(&dmn->refcount);
-
-       return action;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
-{
-       struct mlx5dr_action *action;
-
-       refcount_inc(&tbl->refcount);
-
-       action = dr_action_create_generic(DR_ACTION_TYP_FT);
-       if (!action)
-               goto dec_ref;
-
-       action->dest_tbl->tbl = tbl;
-
-       return action;
-
-dec_ref:
-       refcount_dec(&tbl->refcount);
-       return NULL;
-}
-
-static void dr_action_range_definer_fill(u16 *format_id,
-                                        u8 *dw_selectors,
-                                        u8 *byte_selectors,
-                                        u8 *match_mask)
-{
-       int i;
-
-       *format_id = MLX5_IFC_DEFINER_FORMAT_ID_SELECT;
-
-       dw_selectors[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
-
-       for (i = 1; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
-               dw_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
-
-       for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
-               byte_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
-
-       MLX5_SET(match_definer_match_mask, match_mask,
-                match_dw_0, 0xffffUL << 16);
-}
-
-static int dr_action_create_range_definer(struct mlx5dr_action *action)
-{
-       u8 match_mask[MLX5_FLD_SZ_BYTES(match_definer, match_mask)] = {};
-       u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM] = {};
-       u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM] = {};
-       struct mlx5dr_domain *dmn = action->range->dmn;
-       u32 definer_id;
-       u16 format_id;
-       int ret;
-
-       dr_action_range_definer_fill(&format_id,
-                                    dw_selectors,
-                                    byte_selectors,
-                                    match_mask);
-
-       ret = mlx5dr_definer_get(dmn, format_id,
-                                dw_selectors, byte_selectors,
-                                match_mask, &definer_id);
-       if (ret)
-               return ret;
-
-       action->range->definer_id = definer_id;
-       return 0;
-}
-
-static void dr_action_destroy_range_definer(struct mlx5dr_action *action)
-{
-       mlx5dr_definer_put(action->range->dmn, action->range->definer_id);
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
-                                     u32 field,
-                                     struct mlx5_flow_table *hit_ft,
-                                     struct mlx5_flow_table *miss_ft,
-                                     u32 min,
-                                     u32 max)
-{
-       struct mlx5dr_action *action;
-       int ret;
-
-       if (!mlx5dr_supp_match_ranges(dmn->mdev)) {
-               mlx5dr_dbg(dmn, "SELECT definer support is needed for match range\n");
-               return NULL;
-       }
-
-       if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
-           min > 0xffff || max > 0xffff) {
-               mlx5dr_err(dmn, "Invalid match range parameters\n");
-               return NULL;
-       }
-
-       action = dr_action_create_generic(DR_ACTION_TYP_RANGE);
-       if (!action)
-               return NULL;
-
-       action->range->hit_tbl_action =
-               mlx5dr_is_fw_table(hit_ft) ?
-                       mlx5dr_action_create_dest_flow_fw_table(dmn, hit_ft) :
-                       mlx5dr_action_create_dest_table(hit_ft->fs_dr_table.dr_table);
-
-       if (!action->range->hit_tbl_action)
-               goto free_action;
-
-       action->range->miss_tbl_action =
-               mlx5dr_is_fw_table(miss_ft) ?
-                       mlx5dr_action_create_dest_flow_fw_table(dmn, miss_ft) :
-                       mlx5dr_action_create_dest_table(miss_ft->fs_dr_table.dr_table);
-
-       if (!action->range->miss_tbl_action)
-               goto free_hit_tbl_action;
-
-       action->range->min = min;
-       action->range->max = max;
-       action->range->dmn = dmn;
-
-       ret = dr_action_create_range_definer(action);
-       if (ret)
-               goto free_miss_tbl_action;
-
-       /* No need to increase refcount on domain for this action,
-        * the hit/miss table actions will do it internally.
-        */
-
-       return action;
-
-free_miss_tbl_action:
-       mlx5dr_action_destroy(action->range->miss_tbl_action);
-free_hit_tbl_action:
-       mlx5dr_action_destroy(action->range->hit_tbl_action);
-free_action:
-       kfree(action);
-
-       return NULL;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
-                                  struct mlx5dr_action_dest *dests,
-                                  u32 num_of_dests,
-                                  bool ignore_flow_level,
-                                  u32 flow_source)
-{
-       struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
-       struct mlx5dr_action **ref_actions;
-       struct mlx5dr_action *action;
-       bool reformat_req = false;
-       bool is_ft_wire = false;
-       u16 num_dst_ft = 0;
-       u32 num_of_ref = 0;
-       u32 ref_act_cnt;
-       u16 last_dest;
-       int ret;
-       int i;
-
-       if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
-               mlx5dr_err(dmn, "Multiple destination support is for FDB only\n");
-               return NULL;
-       }
-
-       hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL);
-       if (!hw_dests)
-               return NULL;
-
-       if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt)))
-               goto free_hw_dests;
-
-       ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL);
-       if (!ref_actions)
-               goto free_hw_dests;
-
-       for (i = 0; i < num_of_dests; i++) {
-               struct mlx5dr_action *reformat_action = dests[i].reformat;
-               struct mlx5dr_action *dest_action = dests[i].dest;
-
-               ref_actions[num_of_ref++] = dest_action;
-
-               switch (dest_action->action_type) {
-               case DR_ACTION_TYP_VPORT:
-                       hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
-                       hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
-                       hw_dests[i].vport.num = dest_action->vport->caps->num;
-                       hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi;
-                       if (reformat_action) {
-                               reformat_req = true;
-                               hw_dests[i].vport.reformat_id =
-                                       reformat_action->reformat->id;
-                               ref_actions[num_of_ref++] = reformat_action;
-                               hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
-                       }
-                       break;
-
-               case DR_ACTION_TYP_FT:
-                       if (num_dst_ft &&
-                           !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
-                               mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
-                               goto free_ref_actions;
-                       }
-                       num_dst_ft++;
-                       hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-                       if (dest_action->dest_tbl->is_fw_tbl) {
-                               hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
-                       } else {
-                               hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
-                               if (dest_action->dest_tbl->is_wire_ft) {
-                                       is_ft_wire = true;
-                                       last_dest = i;
-                               }
-                       }
-                       break;
-
-               default:
-                       mlx5dr_dbg(dmn, "Invalid multiple destinations action\n");
-                       goto free_ref_actions;
-               }
-       }
-
-       /* In multidest, the FW does the iterator in the RX except of the last
-        * one that done in the TX.
-        * So, if one of the ft target is wire, put it at the end of the dest list.
-        */
-       if (is_ft_wire && num_dst_ft > 1)
-               swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]);
-
-       action = dr_action_create_generic(DR_ACTION_TYP_FT);
-       if (!action)
-               goto free_ref_actions;
-
-       ret = mlx5dr_fw_create_md_tbl(dmn,
-                                     hw_dests,
-                                     num_of_dests,
-                                     reformat_req,
-                                     &action->dest_tbl->fw_tbl.id,
-                                     &action->dest_tbl->fw_tbl.group_id,
-                                     ignore_flow_level,
-                                     flow_source);
-       if (ret)
-               goto free_action;
-
-       refcount_inc(&dmn->refcount);
-
-       for (i = 0; i < num_of_ref; i++)
-               refcount_inc(&ref_actions[i]->refcount);
-
-       action->dest_tbl->is_fw_tbl = true;
-       action->dest_tbl->fw_tbl.dmn = dmn;
-       action->dest_tbl->fw_tbl.type = FS_FT_FDB;
-       action->dest_tbl->fw_tbl.ref_actions = ref_actions;
-       action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref;
-
-       kfree(hw_dests);
-
-       return action;
-
-free_action:
-       kfree(action);
-free_ref_actions:
-       kfree(ref_actions);
-free_hw_dests:
-       kfree(hw_dests);
-       return NULL;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
-                                       struct mlx5_flow_table *ft)
-{
-       struct mlx5dr_action *action;
-
-       action = dr_action_create_generic(DR_ACTION_TYP_FT);
-       if (!action)
-               return NULL;
-
-       action->dest_tbl->is_fw_tbl = 1;
-       action->dest_tbl->fw_tbl.type = ft->type;
-       action->dest_tbl->fw_tbl.id = ft->id;
-       action->dest_tbl->fw_tbl.dmn = dmn;
-
-       refcount_inc(&dmn->refcount);
-
-       return action;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_flow_counter(u32 counter_id)
-{
-       struct mlx5dr_action *action;
-
-       action = dr_action_create_generic(DR_ACTION_TYP_CTR);
-       if (!action)
-               return NULL;
-
-       action->ctr->ctr_id = counter_id;
-
-       return action;
-}
-
-struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
-{
-       struct mlx5dr_action *action;
-
-       action = dr_action_create_generic(DR_ACTION_TYP_TAG);
-       if (!action)
-               return NULL;
-
-       action->flow_tag->flow_tag = tag_value & 0xffffff;
-
-       return action;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id)
-{
-       struct mlx5dr_action *action;
-       u64 icm_rx, icm_tx;
-       int ret;
-
-       ret = mlx5dr_cmd_query_flow_sampler(dmn->mdev, sampler_id,
-                                           &icm_rx, &icm_tx);
-       if (ret)
-               return NULL;
-
-       action = dr_action_create_generic(DR_ACTION_TYP_SAMPLER);
-       if (!action)
-               return NULL;
-
-       action->sampler->dmn = dmn;
-       action->sampler->sampler_id = sampler_id;
-       action->sampler->rx_icm_addr = icm_rx;
-       action->sampler->tx_icm_addr = icm_tx;
-
-       refcount_inc(&dmn->refcount);
-       return action;
-}
-
-static int
-dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
-                                struct mlx5dr_domain *dmn,
-                                u8 reformat_param_0,
-                                u8 reformat_param_1,
-                                size_t data_sz,
-                                void *data)
-{
-       if (reformat_type == DR_ACTION_TYP_INSERT_HDR) {
-               if ((!data && data_sz) || (data && !data_sz) ||
-                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_size) < data_sz ||
-                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_offset) < reformat_param_1) {
-                       mlx5dr_dbg(dmn, "Invalid reformat parameters for INSERT_HDR\n");
-                       goto out_err;
-               }
-       } else if (reformat_type == DR_ACTION_TYP_REMOVE_HDR) {
-               if (data ||
-                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_size) < data_sz ||
-                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_offset) < reformat_param_1) {
-                       mlx5dr_dbg(dmn, "Invalid reformat parameters for REMOVE_HDR\n");
-                       goto out_err;
-               }
-       } else if (reformat_param_0 || reformat_param_1 ||
-                  reformat_type > DR_ACTION_TYP_REMOVE_HDR) {
-               mlx5dr_dbg(dmn, "Invalid reformat parameters\n");
-               goto out_err;
-       }
-
-       if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
-               return 0;
-
-       if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
-               if (reformat_type != DR_ACTION_TYP_TNL_L2_TO_L2 &&
-                   reformat_type != DR_ACTION_TYP_TNL_L3_TO_L2) {
-                       mlx5dr_dbg(dmn, "Action reformat type not support on RX domain\n");
-                       goto out_err;
-               }
-       } else if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
-               if (reformat_type != DR_ACTION_TYP_L2_TO_TNL_L2 &&
-                   reformat_type != DR_ACTION_TYP_L2_TO_TNL_L3) {
-                       mlx5dr_dbg(dmn, "Action reformat type not support on TX domain\n");
-                       goto out_err;
-               }
-       }
-
-       return 0;
-
-out_err:
-       return -EINVAL;
-}
-
-static int
-dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
-                                u8 reformat_param_0, u8 reformat_param_1,
-                                size_t data_sz, void *data,
-                                struct mlx5dr_action *action)
-{
-       u32 reformat_id;
-       int ret;
-
-       switch (action->action_type) {
-       case DR_ACTION_TYP_L2_TO_TNL_L2:
-       case DR_ACTION_TYP_L2_TO_TNL_L3:
-       {
-               enum mlx5_reformat_ctx_type rt;
-
-               if (action->action_type == DR_ACTION_TYP_L2_TO_TNL_L2)
-                       rt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
-               else
-                       rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
-
-               ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, 0, 0,
-                                                    data_sz, data,
-                                                    &reformat_id);
-               if (ret)
-                       return ret;
-
-               action->reformat->id = reformat_id;
-               action->reformat->size = data_sz;
-               return 0;
-       }
-       case DR_ACTION_TYP_TNL_L2_TO_L2:
-       {
-               return 0;
-       }
-       case DR_ACTION_TYP_TNL_L3_TO_L2:
-       {
-               u8 *hw_actions;
-
-               hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL);
-               if (!hw_actions)
-                       return -ENOMEM;
-
-               ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
-                                                         data, data_sz,
-                                                         hw_actions,
-                                                         DR_ACTION_CACHE_LINE_SIZE,
-                                                         &action->rewrite->num_of_actions);
-               if (ret) {
-                       mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
-                       kfree(hw_actions);
-                       return ret;
-               }
-
-               action->rewrite->data = hw_actions;
-               action->rewrite->dmn = dmn;
-
-               ret = mlx5dr_ste_alloc_modify_hdr(action);
-               if (ret) {
-                       mlx5dr_dbg(dmn, "Failed preparing reformat data\n");
-                       kfree(hw_actions);
-                       return ret;
-               }
-               return 0;
-       }
-       case DR_ACTION_TYP_INSERT_HDR:
-               ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev,
-                                                    MLX5_REFORMAT_TYPE_INSERT_HDR,
-                                                    reformat_param_0,
-                                                    reformat_param_1,
-                                                    data_sz, data,
-                                                    &reformat_id);
-               if (ret)
-                       return ret;
-
-               action->reformat->id = reformat_id;
-               action->reformat->size = data_sz;
-               action->reformat->param_0 = reformat_param_0;
-               action->reformat->param_1 = reformat_param_1;
-               return 0;
-       case DR_ACTION_TYP_REMOVE_HDR:
-               action->reformat->id = 0;
-               action->reformat->size = data_sz;
-               action->reformat->param_0 = reformat_param_0;
-               action->reformat->param_1 = reformat_param_1;
-               return 0;
-       default:
-               mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
-               return -EINVAL;
-       }
-}
-
-#define CVLAN_ETHERTYPE 0x8100
-#define SVLAN_ETHERTYPE 0x88a8
-
-struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
-{
-       return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
-}
-
-struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
-                                                    __be32 vlan_hdr)
-{
-       u32 vlan_hdr_h = ntohl(vlan_hdr);
-       u16 ethertype = vlan_hdr_h >> 16;
-       struct mlx5dr_action *action;
-
-       if (ethertype != SVLAN_ETHERTYPE && ethertype != CVLAN_ETHERTYPE) {
-               mlx5dr_dbg(dmn, "Invalid vlan ethertype\n");
-               return NULL;
-       }
-
-       action = dr_action_create_generic(DR_ACTION_TYP_PUSH_VLAN);
-       if (!action)
-               return NULL;
-
-       action->push_vlan->vlan_hdr = vlan_hdr_h;
-       return action;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
-                                    enum mlx5dr_action_reformat_type reformat_type,
-                                    u8 reformat_param_0,
-                                    u8 reformat_param_1,
-                                    size_t data_sz,
-                                    void *data)
-{
-       enum mlx5dr_action_type action_type;
-       struct mlx5dr_action *action;
-       int ret;
-
-       refcount_inc(&dmn->refcount);
-
-       /* General checks */
-       ret = dr_action_reformat_to_action_type(reformat_type, &action_type);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Invalid reformat_type provided\n");
-               goto dec_ref;
-       }
-
-       ret = dr_action_verify_reformat_params(action_type, dmn,
-                                              reformat_param_0, reformat_param_1,
-                                              data_sz, data);
-       if (ret)
-               goto dec_ref;
-
-       action = dr_action_create_generic(action_type);
-       if (!action)
-               goto dec_ref;
-
-       action->reformat->dmn = dmn;
-
-       ret = dr_action_create_reformat_action(dmn,
-                                              reformat_param_0,
-                                              reformat_param_1,
-                                              data_sz,
-                                              data,
-                                              action);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Failed creating reformat action %d\n", ret);
-               goto free_action;
-       }
-
-       return action;
-
-free_action:
-       kfree(action);
-dec_ref:
-       refcount_dec(&dmn->refcount);
-       return NULL;
-}
-
-static int
-dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
-                             __be64 *sw_action,
-                             __be64 *hw_action,
-                             const struct mlx5dr_ste_action_modify_field **ret_hw_info)
-{
-       const struct mlx5dr_ste_action_modify_field *hw_action_info;
-       u8 max_length;
-       u16 sw_field;
-       u32 data;
-
-       /* Get SW modify action data */
-       sw_field = MLX5_GET(set_action_in, sw_action, field);
-       data = MLX5_GET(set_action_in, sw_action, data);
-
-       /* Convert SW data to HW modify action format */
-       hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
-       if (!hw_action_info) {
-               mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
-               return -EINVAL;
-       }
-
-       max_length = hw_action_info->end - hw_action_info->start + 1;
-
-       mlx5dr_ste_set_action_add(dmn->ste_ctx,
-                                 hw_action,
-                                 hw_action_info->hw_field,
-                                 hw_action_info->start,
-                                 max_length,
-                                 data);
-
-       *ret_hw_info = hw_action_info;
-
-       return 0;
-}
-
-static int
-dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
-                             __be64 *sw_action,
-                             __be64 *hw_action,
-                             const struct mlx5dr_ste_action_modify_field **ret_hw_info)
-{
-       const struct mlx5dr_ste_action_modify_field *hw_action_info;
-       u8 offset, length, max_length;
-       u16 sw_field;
-       u32 data;
-
-       /* Get SW modify action data */
-       length = MLX5_GET(set_action_in, sw_action, length);
-       offset = MLX5_GET(set_action_in, sw_action, offset);
-       sw_field = MLX5_GET(set_action_in, sw_action, field);
-       data = MLX5_GET(set_action_in, sw_action, data);
-
-       /* Convert SW data to HW modify action format */
-       hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
-       if (!hw_action_info) {
-               mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
-               return -EINVAL;
-       }
-
-       /* PRM defines that length zero specific length of 32bits */
-       length = length ? length : 32;
-
-       max_length = hw_action_info->end - hw_action_info->start + 1;
-
-       if (length + offset > max_length) {
-               mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
-               return -EINVAL;
-       }
-
-       mlx5dr_ste_set_action_set(dmn->ste_ctx,
-                                 hw_action,
-                                 hw_action_info->hw_field,
-                                 hw_action_info->start + offset,
-                                 length,
-                                 data);
-
-       *ret_hw_info = hw_action_info;
-
-       return 0;
-}
-
-static int
-dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
-                              __be64 *sw_action,
-                              __be64 *hw_action,
-                              const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
-                              const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
-{
-       u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
-       const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
-       const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
-       u16 src_field, dst_field;
-
-       /* Get SW modify action data */
-       src_field = MLX5_GET(copy_action_in, sw_action, src_field);
-       dst_field = MLX5_GET(copy_action_in, sw_action, dst_field);
-       src_offset = MLX5_GET(copy_action_in, sw_action, src_offset);
-       dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset);
-       length = MLX5_GET(copy_action_in, sw_action, length);
-
-       /* Convert SW data to HW modify action format */
-       hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
-       hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
-       if (!hw_src_action_info || !hw_dst_action_info) {
-               mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
-               return -EINVAL;
-       }
-
-       /* PRM defines that length zero specific length of 32bits */
-       length = length ? length : 32;
-
-       src_max_length = hw_src_action_info->end -
-                        hw_src_action_info->start + 1;
-       dst_max_length = hw_dst_action_info->end -
-                        hw_dst_action_info->start + 1;
-
-       if (length + src_offset > src_max_length ||
-           length + dst_offset > dst_max_length) {
-               mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
-               return -EINVAL;
-       }
-
-       mlx5dr_ste_set_action_copy(dmn->ste_ctx,
-                                  hw_action,
-                                  hw_dst_action_info->hw_field,
-                                  hw_dst_action_info->start + dst_offset,
-                                  length,
-                                  hw_src_action_info->hw_field,
-                                  hw_src_action_info->start + src_offset);
-
-       *ret_dst_hw_info = hw_dst_action_info;
-       *ret_src_hw_info = hw_src_action_info;
-
-       return 0;
-}
-
-static int
-dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
-                         __be64 *sw_action,
-                         __be64 *hw_action,
-                         const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
-                         const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
-{
-       u8 action;
-       int ret;
-
-       *hw_action = 0;
-       *ret_src_hw_info = NULL;
-
-       /* Get SW modify action type */
-       action = MLX5_GET(set_action_in, sw_action, action_type);
-
-       switch (action) {
-       case MLX5_ACTION_TYPE_SET:
-               ret = dr_action_modify_sw_to_hw_set(dmn, sw_action,
-                                                   hw_action,
-                                                   ret_dst_hw_info);
-               break;
-
-       case MLX5_ACTION_TYPE_ADD:
-               ret = dr_action_modify_sw_to_hw_add(dmn, sw_action,
-                                                   hw_action,
-                                                   ret_dst_hw_info);
-               break;
-
-       case MLX5_ACTION_TYPE_COPY:
-               ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action,
-                                                    hw_action,
-                                                    ret_dst_hw_info,
-                                                    ret_src_hw_info);
-               break;
-
-       default:
-               mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static int
-dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
-                                           const __be64 *sw_action)
-{
-       u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-
-       if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
-               action->rewrite->allow_rx = 0;
-               if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
-                       mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
-                                  sw_field);
-                       return -EINVAL;
-               }
-       } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
-               action->rewrite->allow_tx = 0;
-               if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
-                       mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
-                                  sw_field);
-                       return -EINVAL;
-               }
-       }
-
-       if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
-               mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
-                                           const __be64 *sw_action)
-{
-       u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-
-       if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
-           sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
-           sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
-           sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
-               mlx5dr_dbg(dmn, "Unsupported field %d for add action\n",
-                          sw_field);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
-                                            const __be64 *sw_action)
-{
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-       u16 sw_fields[2];
-       int i;
-
-       sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field);
-       sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field);
-
-       for (i = 0; i < 2; i++) {
-               if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
-                       action->rewrite->allow_rx = 0;
-                       if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
-                               mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
-                                          sw_fields[i]);
-                               return -EINVAL;
-                       }
-               } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
-                       action->rewrite->allow_tx = 0;
-                       if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
-                               mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
-                                          sw_fields[i]);
-                               return -EINVAL;
-                       }
-               }
-       }
-
-       if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
-               mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int
-dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
-                                       const __be64 *sw_action)
-{
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-       u8 action_type;
-       int ret;
-
-       action_type = MLX5_GET(set_action_in, sw_action, action_type);
-
-       switch (action_type) {
-       case MLX5_ACTION_TYPE_SET:
-               ret = dr_action_modify_check_set_field_limitation(action,
-                                                                 sw_action);
-               break;
-
-       case MLX5_ACTION_TYPE_ADD:
-               ret = dr_action_modify_check_add_field_limitation(action,
-                                                                 sw_action);
-               break;
-
-       case MLX5_ACTION_TYPE_COPY:
-               ret = dr_action_modify_check_copy_field_limitation(action,
-                                                                  sw_action);
-               break;
-
-       default:
-               mlx5dr_info(dmn, "Unsupported action %d modify action\n",
-                           action_type);
-               ret = -EOPNOTSUPP;
-       }
-
-       return ret;
-}
-
-static bool
-dr_action_modify_check_is_ttl_modify(const void *sw_action)
-{
-       u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
-
-       return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
-}
-
-static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
-                                           u32 max_hw_actions,
-                                           u32 num_sw_actions,
-                                           __be64 sw_actions[],
-                                           __be64 hw_actions[],
-                                           u32 *num_hw_actions,
-                                           bool *modify_ttl)
-{
-       const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
-       const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-       __be64 *modify_ttl_sw_action = NULL;
-       int ret, i, hw_idx = 0;
-       __be64 *sw_action;
-       __be64 hw_action;
-       u16 hw_field = 0;
-       u32 l3_type = 0;
-       u32 l4_type = 0;
-
-       *modify_ttl = false;
-
-       action->rewrite->allow_rx = 1;
-       action->rewrite->allow_tx = 1;
-
-       for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
-               /* modify TTL is handled separately, as a last action */
-               if (i == num_sw_actions) {
-                       sw_action = modify_ttl_sw_action;
-                       modify_ttl_sw_action = NULL;
-               } else {
-                       sw_action = &sw_actions[i];
-               }
-
-               ret = dr_action_modify_check_field_limitation(action,
-                                                             sw_action);
-               if (ret)
-                       return ret;
-
-               if (!(*modify_ttl) &&
-                   dr_action_modify_check_is_ttl_modify(sw_action)) {
-                       modify_ttl_sw_action = sw_action;
-                       *modify_ttl = true;
-                       continue;
-               }
-
-               /* Convert SW action to HW action */
-               ret = dr_action_modify_sw_to_hw(dmn,
-                                               sw_action,
-                                               &hw_action,
-                                               &hw_dst_action_info,
-                                               &hw_src_action_info);
-               if (ret)
-                       return ret;
-
-               /* Due to a HW limitation we cannot modify 2 different L3 types */
-               if (l3_type && hw_dst_action_info->l3_type &&
-                   hw_dst_action_info->l3_type != l3_type) {
-                       mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
-                       return -EINVAL;
-               }
-               if (hw_dst_action_info->l3_type)
-                       l3_type = hw_dst_action_info->l3_type;
-
-               /* Due to a HW limitation we cannot modify two different L4 types */
-               if (l4_type && hw_dst_action_info->l4_type &&
-                   hw_dst_action_info->l4_type != l4_type) {
-                       mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
-                       return -EINVAL;
-               }
-               if (hw_dst_action_info->l4_type)
-                       l4_type = hw_dst_action_info->l4_type;
-
-               /* HW reads and executes two actions at once this means we
-                * need to create a gap if two actions access the same field
-                */
-               if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field ||
-                                    (hw_src_action_info &&
-                                     hw_field == hw_src_action_info->hw_field))) {
-                       /* Check if after gap insertion the total number of HW
-                        * modify actions doesn't exceeds the limit
-                        */
-                       hw_idx++;
-                       if (hw_idx >= max_hw_actions) {
-                               mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
-                               return -EINVAL;
-                       }
-               }
-               hw_field = hw_dst_action_info->hw_field;
-
-               hw_actions[hw_idx] = hw_action;
-               hw_idx++;
-       }
-
-       /* if the resulting HW actions list is empty, add NOP action */
-       if (!hw_idx)
-               hw_idx++;
-
-       *num_hw_actions = hw_idx;
-
-       return 0;
-}
-
-static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
-                                         size_t actions_sz,
-                                         __be64 actions[],
-                                         struct mlx5dr_action *action)
-{
-       u32 max_hw_actions;
-       u32 num_hw_actions;
-       u32 num_sw_actions;
-       __be64 *hw_actions;
-       bool modify_ttl;
-       int ret;
-
-       num_sw_actions = actions_sz / DR_MODIFY_ACTION_SIZE;
-       max_hw_actions = mlx5dr_icm_pool_chunk_size_to_entries(DR_CHUNK_SIZE_16);
-
-       if (num_sw_actions > max_hw_actions) {
-               mlx5dr_dbg(dmn, "Max number of actions %d exceeds limit %d\n",
-                          num_sw_actions, max_hw_actions);
-               return -EINVAL;
-       }
-
-       hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL);
-       if (!hw_actions)
-               return -ENOMEM;
-
-       ret = dr_actions_convert_modify_header(action,
-                                              max_hw_actions,
-                                              num_sw_actions,
-                                              actions,
-                                              hw_actions,
-                                              &num_hw_actions,
-                                              &modify_ttl);
-       if (ret)
-               goto free_hw_actions;
-
-       action->rewrite->modify_ttl = modify_ttl;
-       action->rewrite->data = (u8 *)hw_actions;
-       action->rewrite->num_of_actions = num_hw_actions;
-
-       if (num_hw_actions == 1 &&
-           dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) {
-               action->rewrite->single_action_opt = true;
-       } else {
-               action->rewrite->single_action_opt = false;
-               ret = mlx5dr_ste_alloc_modify_hdr(action);
-               if (ret)
-                       goto free_hw_actions;
-       }
-
-       return 0;
-
-free_hw_actions:
-       kfree(hw_actions);
-       return ret;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
-                                  u32 flags,
-                                  size_t actions_sz,
-                                  __be64 actions[])
-{
-       struct mlx5dr_action *action;
-       int ret = 0;
-
-       refcount_inc(&dmn->refcount);
-
-       if (actions_sz % DR_MODIFY_ACTION_SIZE) {
-               mlx5dr_dbg(dmn, "Invalid modify actions size provided\n");
-               goto dec_ref;
-       }
-
-       action = dr_action_create_generic(DR_ACTION_TYP_MODIFY_HDR);
-       if (!action)
-               goto dec_ref;
-
-       action->rewrite->dmn = dmn;
-
-       ret = dr_action_create_modify_action(dmn,
-                                            actions_sz,
-                                            actions,
-                                            action);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Failed creating modify header action %d\n", ret);
-               goto free_action;
-       }
-
-       return action;
-
-free_action:
-       kfree(action);
-dec_ref:
-       refcount_dec(&dmn->refcount);
-       return NULL;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
-                               u16 vport, u8 vhca_id_valid,
-                               u16 vhca_id)
-{
-       struct mlx5dr_cmd_vport_cap *vport_cap;
-       struct mlx5dr_domain *vport_dmn;
-       struct mlx5dr_action *action;
-       u8 peer_vport;
-
-       peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
-               (vhca_id != dmn->info.caps.gvmi);
-       vport_dmn = peer_vport ? xa_load(&dmn->peer_dmn_xa, vhca_id) : dmn;
-       if (!vport_dmn) {
-               mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
-               return NULL;
-       }
-
-       if (vport_dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
-               mlx5dr_dbg(dmn, "Domain doesn't support vport actions\n");
-               return NULL;
-       }
-
-       vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
-       if (!vport_cap) {
-               mlx5dr_err(dmn,
-                          "Failed to get vport 0x%x caps - vport is disabled or invalid\n",
-                          vport);
-               return NULL;
-       }
-
-       action = dr_action_create_generic(DR_ACTION_TYP_VPORT);
-       if (!action)
-               return NULL;
-
-       action->vport->dmn = vport_dmn;
-       action->vport->caps = vport_cap;
-
-       return action;
-}
-
-struct mlx5dr_action *
-mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
-                        u8 dest_reg_id, u8 aso_type,
-                        u8 init_color, u8 meter_id)
-{
-       struct mlx5dr_action *action;
-
-       if (aso_type != MLX5_EXE_ASO_FLOW_METER)
-               return NULL;
-
-       if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
-               return NULL;
-
-       action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
-       if (!action)
-               return NULL;
-
-       action->aso->obj_id = obj_id;
-       action->aso->offset = meter_id;
-       action->aso->dest_reg_id = dest_reg_id;
-       action->aso->init_color = init_color;
-       action->aso->dmn = dmn;
-
-       refcount_inc(&dmn->refcount);
-
-       return action;
-}
-
-u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action)
-{
-       return action->reformat->id;
-}
-
-int mlx5dr_action_destroy(struct mlx5dr_action *action)
-{
-       if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
-               return -EBUSY;
-
-       switch (action->action_type) {
-       case DR_ACTION_TYP_FT:
-               if (action->dest_tbl->is_fw_tbl)
-                       refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount);
-               else
-                       refcount_dec(&action->dest_tbl->tbl->refcount);
-
-               if (action->dest_tbl->is_fw_tbl &&
-                   action->dest_tbl->fw_tbl.num_of_ref_actions) {
-                       struct mlx5dr_action **ref_actions;
-                       int i;
-
-                       ref_actions = action->dest_tbl->fw_tbl.ref_actions;
-                       for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++)
-                               refcount_dec(&ref_actions[i]->refcount);
-
-                       kfree(ref_actions);
-
-                       mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn,
-                                                action->dest_tbl->fw_tbl.id,
-                                                action->dest_tbl->fw_tbl.group_id);
-               }
-               break;
-       case DR_ACTION_TYP_TNL_L2_TO_L2:
-       case DR_ACTION_TYP_REMOVE_HDR:
-               refcount_dec(&action->reformat->dmn->refcount);
-               break;
-       case DR_ACTION_TYP_TNL_L3_TO_L2:
-               mlx5dr_ste_free_modify_hdr(action);
-               kfree(action->rewrite->data);
-               refcount_dec(&action->rewrite->dmn->refcount);
-               break;
-       case DR_ACTION_TYP_L2_TO_TNL_L2:
-       case DR_ACTION_TYP_L2_TO_TNL_L3:
-       case DR_ACTION_TYP_INSERT_HDR:
-               mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
-                                               action->reformat->id);
-               refcount_dec(&action->reformat->dmn->refcount);
-               break;
-       case DR_ACTION_TYP_MODIFY_HDR:
-               if (!action->rewrite->single_action_opt)
-                       mlx5dr_ste_free_modify_hdr(action);
-               kfree(action->rewrite->data);
-               refcount_dec(&action->rewrite->dmn->refcount);
-               break;
-       case DR_ACTION_TYP_SAMPLER:
-               refcount_dec(&action->sampler->dmn->refcount);
-               break;
-       case DR_ACTION_TYP_ASO_FLOW_METER:
-               refcount_dec(&action->aso->dmn->refcount);
-               break;
-       case DR_ACTION_TYP_RANGE:
-               dr_action_destroy_range_definer(action);
-               mlx5dr_action_destroy(action->range->miss_tbl_action);
-               mlx5dr_action_destroy(action->range->hit_tbl_action);
-               break;
-       default:
-               break;
-       }
-
-       kfree(action);
-       return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_arg.c
deleted file mode 100644 (file)
index 01ed644..0000000
+++ /dev/null
@@ -1,273 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-#include "dr_types.h"
-
-#define DR_ICM_MODIFY_HDR_GRANULARITY_4K 12
-
-/* modify-header arg pool */
-enum dr_arg_chunk_size {
-       DR_ARG_CHUNK_SIZE_1,
-       DR_ARG_CHUNK_SIZE_MIN = DR_ARG_CHUNK_SIZE_1, /* keep updated when changing */
-       DR_ARG_CHUNK_SIZE_2,
-       DR_ARG_CHUNK_SIZE_3,
-       DR_ARG_CHUNK_SIZE_4,
-       DR_ARG_CHUNK_SIZE_MAX,
-};
-
-/* argument pool area */
-struct dr_arg_pool {
-       enum dr_arg_chunk_size log_chunk_size;
-       struct mlx5dr_domain *dmn;
-       struct list_head free_list;
-       struct mutex mutex; /* protect arg pool */
-};
-
-struct mlx5dr_arg_mgr {
-       struct mlx5dr_domain *dmn;
-       struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX];
-};
-
-static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool)
-{
-       struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
-       struct list_head cur_list;
-       u16 object_range;
-       int num_of_objects;
-       u32 obj_id = 0;
-       int i, ret;
-
-       INIT_LIST_HEAD(&cur_list);
-
-       object_range =
-               pool->dmn->info.caps.log_header_modify_argument_granularity;
-
-       object_range =
-               max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity,
-                     DR_ICM_MODIFY_HDR_GRANULARITY_4K);
-       object_range =
-               min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc,
-                     object_range);
-
-       if (pool->log_chunk_size > object_range) {
-               mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n",
-                          pool->log_chunk_size);
-               return -ENOMEM;
-       }
-
-       num_of_objects = (1 << (object_range - pool->log_chunk_size));
-       /* Only one devx object per range */
-       ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev,
-                                                 object_range,
-                                                 pool->dmn->pdn,
-                                                 &obj_id);
-       if (ret) {
-               mlx5dr_err(pool->dmn, "failed allocating object with range: %d:\n",
-                          object_range);
-               return -EAGAIN;
-       }
-
-       for (i = 0; i < num_of_objects; i++) {
-               arg_obj = kzalloc(sizeof(*arg_obj), GFP_KERNEL);
-               if (!arg_obj) {
-                       ret = -ENOMEM;
-                       goto clean_arg_obj;
-               }
-
-               arg_obj->log_chunk_size = pool->log_chunk_size;
-
-               list_add_tail(&arg_obj->list_node, &cur_list);
-
-               arg_obj->obj_id = obj_id;
-               arg_obj->obj_offset = i * (1 << pool->log_chunk_size);
-       }
-       list_splice_tail_init(&cur_list, &pool->free_list);
-
-       return 0;
-
-clean_arg_obj:
-       mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, obj_id);
-       list_for_each_entry_safe(arg_obj, tmp_arg, &cur_list, list_node) {
-               list_del(&arg_obj->list_node);
-               kfree(arg_obj);
-       }
-       return ret;
-}
-
-static struct mlx5dr_arg_obj *dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool)
-{
-       struct mlx5dr_arg_obj *arg_obj = NULL;
-       int ret;
-
-       mutex_lock(&pool->mutex);
-       if (list_empty(&pool->free_list)) {
-               ret = dr_arg_pool_alloc_objs(pool);
-               if (ret)
-                       goto out;
-       }
-
-       arg_obj = list_first_entry_or_null(&pool->free_list,
-                                          struct mlx5dr_arg_obj,
-                                          list_node);
-       WARN(!arg_obj, "couldn't get dr arg obj from pool");
-
-       if (arg_obj)
-               list_del_init(&arg_obj->list_node);
-
-out:
-       mutex_unlock(&pool->mutex);
-       return arg_obj;
-}
-
-static void dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool,
-                                   struct mlx5dr_arg_obj *arg_obj)
-{
-       mutex_lock(&pool->mutex);
-       list_add(&arg_obj->list_node, &pool->free_list);
-       mutex_unlock(&pool->mutex);
-}
-
-static struct dr_arg_pool *dr_arg_pool_create(struct mlx5dr_domain *dmn,
-                                             enum dr_arg_chunk_size chunk_size)
-{
-       struct dr_arg_pool *pool;
-
-       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-       if (!pool)
-               return NULL;
-
-       pool->dmn = dmn;
-
-       INIT_LIST_HEAD(&pool->free_list);
-       mutex_init(&pool->mutex);
-
-       pool->log_chunk_size = chunk_size;
-       if (dr_arg_pool_alloc_objs(pool))
-               goto free_pool;
-
-       return pool;
-
-free_pool:
-       kfree(pool);
-
-       return NULL;
-}
-
-static void dr_arg_pool_destroy(struct dr_arg_pool *pool)
-{
-       struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
-
-       list_for_each_entry_safe(arg_obj, tmp_arg, &pool->free_list, list_node) {
-               list_del(&arg_obj->list_node);
-               if (!arg_obj->obj_offset) /* the first in range */
-                       mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, arg_obj->obj_id);
-               kfree(arg_obj);
-       }
-
-       mutex_destroy(&pool->mutex);
-       kfree(pool);
-}
-
-static enum dr_arg_chunk_size dr_arg_get_chunk_size(u16 num_of_actions)
-{
-       if (num_of_actions <= 8)
-               return DR_ARG_CHUNK_SIZE_1;
-       if (num_of_actions <= 16)
-               return DR_ARG_CHUNK_SIZE_2;
-       if (num_of_actions <= 32)
-               return DR_ARG_CHUNK_SIZE_3;
-       if (num_of_actions <= 64)
-               return DR_ARG_CHUNK_SIZE_4;
-
-       return DR_ARG_CHUNK_SIZE_MAX;
-}
-
-u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj)
-{
-       return (arg_obj->obj_id + arg_obj->obj_offset);
-}
-
-struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
-                                         u16 num_of_actions,
-                                         u8 *data)
-{
-       u32 size = dr_arg_get_chunk_size(num_of_actions);
-       struct mlx5dr_arg_obj *arg_obj;
-       int ret;
-
-       if (size >= DR_ARG_CHUNK_SIZE_MAX)
-               return NULL;
-
-       arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
-       if (!arg_obj) {
-               mlx5dr_err(mgr->dmn, "Failed allocating args object for modify header\n");
-               return NULL;
-       }
-
-       /* write it into the hw */
-       ret = mlx5dr_send_postsend_args(mgr->dmn,
-                                       mlx5dr_arg_get_obj_id(arg_obj),
-                                       num_of_actions, data);
-       if (ret) {
-               mlx5dr_err(mgr->dmn, "Failed writing args object\n");
-               goto put_obj;
-       }
-
-       return arg_obj;
-
-put_obj:
-       mlx5dr_arg_put_obj(mgr, arg_obj);
-       return NULL;
-}
-
-void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
-                       struct mlx5dr_arg_obj *arg_obj)
-{
-       dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
-}
-
-struct mlx5dr_arg_mgr*
-mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_arg_mgr *pool_mgr;
-       int i;
-
-       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
-               return NULL;
-
-       pool_mgr = kzalloc(sizeof(*pool_mgr), GFP_KERNEL);
-       if (!pool_mgr)
-               return NULL;
-
-       pool_mgr->dmn = dmn;
-
-       for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) {
-               pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
-               if (!pool_mgr->pools[i])
-                       goto clean_pools;
-       }
-
-       return pool_mgr;
-
-clean_pools:
-       for (i--; i >= 0; i--)
-               dr_arg_pool_destroy(pool_mgr->pools[i]);
-
-       kfree(pool_mgr);
-       return NULL;
-}
-
-void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr)
-{
-       struct dr_arg_pool **pools;
-       int i;
-
-       if (!mgr)
-               return;
-
-       pools = mgr->pools;
-       for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++)
-               dr_arg_pool_destroy(pools[i]);
-
-       kfree(mgr);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_buddy.c
deleted file mode 100644 (file)
index fe228d9..0000000
+++ /dev/null
@@ -1,168 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2004 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 - 2008 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2006 - 2007 Cisco Systems, Inc. All rights reserved.
- * Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
- */
-
-#include "dr_types.h"
-
-int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
-                     unsigned int max_order)
-{
-       int i;
-
-       buddy->max_order = max_order;
-
-       INIT_LIST_HEAD(&buddy->list_node);
-
-       buddy->bitmap = kcalloc(buddy->max_order + 1,
-                               sizeof(*buddy->bitmap),
-                               GFP_KERNEL);
-       buddy->num_free = kcalloc(buddy->max_order + 1,
-                                 sizeof(*buddy->num_free),
-                                 GFP_KERNEL);
-
-       if (!buddy->bitmap || !buddy->num_free)
-               goto err_free_all;
-
-       /* Allocating max_order bitmaps, one for each order */
-
-       for (i = 0; i <= buddy->max_order; ++i) {
-               unsigned int size = 1 << (buddy->max_order - i);
-
-               buddy->bitmap[i] = bitmap_zalloc(size, GFP_KERNEL);
-               if (!buddy->bitmap[i])
-                       goto err_out_free_each_bit_per_order;
-       }
-
-       /* In the beginning, we have only one order that is available for
-        * use (the biggest one), so mark the first bit in both bitmaps.
-        */
-
-       bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
-
-       buddy->num_free[buddy->max_order] = 1;
-
-       return 0;
-
-err_out_free_each_bit_per_order:
-       for (i = 0; i <= buddy->max_order; ++i)
-               bitmap_free(buddy->bitmap[i]);
-
-err_free_all:
-       kfree(buddy->num_free);
-       kfree(buddy->bitmap);
-       return -ENOMEM;
-}
-
-void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy)
-{
-       int i;
-
-       list_del(&buddy->list_node);
-
-       for (i = 0; i <= buddy->max_order; ++i)
-               bitmap_free(buddy->bitmap[i]);
-
-       kfree(buddy->num_free);
-       kfree(buddy->bitmap);
-}
-
-static int dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy,
-                                 unsigned int start_order,
-                                 unsigned int *segment,
-                                 unsigned int *order)
-{
-       unsigned int seg, order_iter, m;
-
-       for (order_iter = start_order;
-            order_iter <= buddy->max_order; ++order_iter) {
-               if (!buddy->num_free[order_iter])
-                       continue;
-
-               m = 1 << (buddy->max_order - order_iter);
-               seg = find_first_bit(buddy->bitmap[order_iter], m);
-
-               if (WARN(seg >= m,
-                        "ICM Buddy: failed finding free mem for order %d\n",
-                        order_iter))
-                       return -ENOMEM;
-
-               break;
-       }
-
-       if (order_iter > buddy->max_order)
-               return -ENOMEM;
-
-       *segment = seg;
-       *order = order_iter;
-       return 0;
-}
-
-/**
- * mlx5dr_buddy_alloc_mem() - Update second level bitmap.
- * @buddy: Buddy to update.
- * @order: Order of the buddy to update.
- * @segment: Segment number.
- *
- * This function finds the first area of the ICM memory managed by this buddy.
- * It uses the data structures of the buddy system in order to find the first
- * area of free place, starting from the current order till the maximum order
- * in the system.
- *
- * Return: 0 when segment is set, non-zero error status otherwise.
- *
- * The function returns the location (segment) in the whole buddy ICM memory
- * area - the index of the memory segment that is available for use.
- */
-int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
-                          unsigned int order,
-                          unsigned int *segment)
-{
-       unsigned int seg, order_iter;
-       int err;
-
-       err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter);
-       if (err)
-               return err;
-
-       bitmap_clear(buddy->bitmap[order_iter], seg, 1);
-       --buddy->num_free[order_iter];
-
-       /* If we found free memory in some order that is bigger than the
-        * required order, we need to split every order between the required
-        * order and the order that we found into two parts, and mark accordingly.
-        */
-       while (order_iter > order) {
-               --order_iter;
-               seg <<= 1;
-               bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
-               ++buddy->num_free[order_iter];
-       }
-
-       seg <<= order;
-       *segment = seg;
-
-       return 0;
-}
-
-void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
-                          unsigned int seg, unsigned int order)
-{
-       seg >>= order;
-
-       /* Whenever a segment is free,
-        * the mem is added to the buddy that gave it.
-        */
-       while (test_bit(seg ^ 1, buddy->bitmap[order])) {
-               bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
-               --buddy->num_free[order];
-               seg >>= 1;
-               ++order;
-       }
-       bitmap_set(buddy->bitmap[order], seg, 1);
-
-       ++buddy->num_free[order];
-}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
deleted file mode 100644 (file)
index baefb9a..0000000
+++ /dev/null
@@ -1,970 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include "dr_types.h"
-
-int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
-                                      bool other_vport,
-                                      u16 vport_number,
-                                      u64 *icm_address_rx,
-                                      u64 *icm_address_tx)
-{
-       u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
-       int err;
-
-       MLX5_SET(query_esw_vport_context_in, in, opcode,
-                MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
-       MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
-       MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
-
-       err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
-       if (err)
-               return err;
-
-       *icm_address_rx =
-               MLX5_GET64(query_esw_vport_context_out, out,
-                          esw_vport_context.sw_steering_vport_icm_address_rx);
-       *icm_address_tx =
-               MLX5_GET64(query_esw_vport_context_out, out,
-                          esw_vport_context.sw_steering_vport_icm_address_tx);
-       return 0;
-}
-
-int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
-                         u16 vport_number, u16 *gvmi)
-{
-       bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
-       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
-       int out_size;
-       void *out;
-       int err;
-
-       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
-       out = kzalloc(out_size, GFP_KERNEL);
-       if (!out)
-               return -ENOMEM;
-
-       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
-       MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
-       MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
-       MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
-       MLX5_SET(query_hca_cap_in, in, op_mod,
-                MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
-                HCA_CAP_OPMOD_GET_CUR);
-
-       err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
-       if (err) {
-               kfree(out);
-               return err;
-       }
-
-       *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
-
-       kfree(out);
-       return 0;
-}
-
-int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
-                             struct mlx5dr_esw_caps *caps)
-{
-       caps->drop_icm_address_rx =
-               MLX5_CAP64_ESW_FLOWTABLE(mdev,
-                                        sw_steering_fdb_action_drop_icm_address_rx);
-       caps->drop_icm_address_tx =
-               MLX5_CAP64_ESW_FLOWTABLE(mdev,
-                                        sw_steering_fdb_action_drop_icm_address_tx);
-       caps->uplink_icm_address_rx =
-               MLX5_CAP64_ESW_FLOWTABLE(mdev,
-                                        sw_steering_uplink_icm_address_rx);
-       caps->uplink_icm_address_tx =
-               MLX5_CAP64_ESW_FLOWTABLE(mdev,
-                                        sw_steering_uplink_icm_address_tx);
-       caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
-       if (!caps->sw_owner_v2)
-               caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
-
-       return 0;
-}
-
-static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
-                                         u16 vport, bool *roce_en)
-{
-       u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
-       int err;
-
-       MLX5_SET(query_nic_vport_context_in, in, opcode,
-                MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
-       MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
-       MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
-
-       err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (err)
-               return err;
-
-       *roce_en = MLX5_GET(query_nic_vport_context_out, out,
-                           nic_vport_context.roce_en);
-       return 0;
-}
-
-int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
-                           struct mlx5dr_cmd_caps *caps)
-{
-       bool roce_en;
-       int err;
-
-       caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
-       caps->eswitch_manager   = MLX5_CAP_GEN(mdev, eswitch_manager);
-       caps->gvmi              = MLX5_CAP_GEN(mdev, vhca_id);
-       caps->flex_protocols    = MLX5_CAP_GEN(mdev, flex_parser_protocols);
-       caps->sw_format_ver     = MLX5_CAP_GEN(mdev, steering_format_version);
-       caps->roce_caps.fl_rc_qp_when_roce_disabled =
-               MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
-
-       if (MLX5_CAP_GEN(mdev, roce)) {
-               err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
-               if (err)
-                       return err;
-
-               caps->roce_caps.roce_en = roce_en;
-               caps->roce_caps.fl_rc_qp_when_roce_disabled |=
-                       MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
-               caps->roce_caps.fl_rc_qp_when_roce_enabled =
-                       MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
-       }
-
-       caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
-
-       caps->support_modify_argument =
-               MLX5_CAP_GEN_64(mdev, general_obj_types) &
-               MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT;
-
-       if (caps->support_modify_argument) {
-               caps->log_header_modify_argument_granularity =
-                       MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity);
-               caps->log_header_modify_argument_max_alloc =
-                       MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc);
-       }
-
-       /* geneve_tlv_option_0_exist is the indication of
-        * STE support for lookup type flex_parser_ok
-        */
-       caps->flex_parser_ok_bits_supp =
-               MLX5_CAP_FLOWTABLE(mdev,
-                                  flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
-               caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
-               caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
-       }
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
-               caps->flex_parser_id_icmpv6_dw0 =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
-               caps->flex_parser_id_icmpv6_dw1 =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
-       }
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
-               caps->flex_parser_id_geneve_tlv_option_0 =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
-               caps->flex_parser_id_mpls_over_gre =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
-               caps->flex_parser_id_mpls_over_udp =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
-               caps->flex_parser_id_gtpu_dw_0 =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
-               caps->flex_parser_id_gtpu_teid =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
-               caps->flex_parser_id_gtpu_dw_2 =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
-
-       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
-               caps->flex_parser_id_gtpu_first_ext_dw_0 =
-                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
-
-       caps->nic_rx_drop_address =
-               MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
-       caps->nic_tx_drop_address =
-               MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
-       caps->nic_tx_allow_address =
-               MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
-
-       caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
-       caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
-
-       if (!caps->rx_sw_owner_v2)
-               caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
-       if (!caps->tx_sw_owner_v2)
-               caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
-
-       caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
-
-       caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
-       caps->hdr_modify_icm_addr =
-               MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
-
-       caps->log_modify_pattern_icm_size =
-               MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);
-
-       caps->hdr_modify_pattern_icm_addr =
-               MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);
-
-       caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
-
-       caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
-
-       return 0;
-}
-
-int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
-                               enum fs_flow_table_type type,
-                               u32 table_id,
-                               struct mlx5dr_cmd_query_flow_table_details *output)
-{
-       u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
-       int err;
-
-       MLX5_SET(query_flow_table_in, in, opcode,
-                MLX5_CMD_OP_QUERY_FLOW_TABLE);
-
-       MLX5_SET(query_flow_table_in, in, table_type, type);
-       MLX5_SET(query_flow_table_in, in, table_id, table_id);
-
-       err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
-       if (err)
-               return err;
-
-       output->status = MLX5_GET(query_flow_table_out, out, status);
-       output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
-
-       output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
-                                                flow_table_context.sws.sw_owner_icm_root_1);
-       output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
-                                                flow_table_context.sws.sw_owner_icm_root_0);
-
-       return 0;
-}
-
-int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
-                                 u32 sampler_id,
-                                 u64 *rx_icm_addr,
-                                 u64 *tx_icm_addr)
-{
-       u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
-       void *attr;
-       int ret;
-
-       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
-                MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
-                MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
-
-       ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-       if (ret)
-               return ret;
-
-       attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
-
-       *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
-                                 sw_steering_icm_address_rx);
-       *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
-                                 sw_steering_icm_address_tx);
-
-       return 0;
-}
-
-int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
-{
-       u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
-
-       /* Skip SYNC in case the device is internal error state.
-        * Besides a device error, this also happens when we're
-        * in fast teardown
-        */
-       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
-               return 0;
-
-       MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
-
-       return mlx5_cmd_exec_in(mdev, sync_steering, in);
-}
-
-int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
-                                       u32 table_type,
-                                       u32 table_id,
-                                       u32 group_id,
-                                       u32 modify_header_id,
-                                       u16 vport)
-{
-       u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
-       void *in_flow_context;
-       unsigned int inlen;
-       void *in_dests;
-       u32 *in;
-       int err;
-
-       inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
-               1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
-
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-       MLX5_SET(set_fte_in, in, table_type, table_type);
-       MLX5_SET(set_fte_in, in, table_id, table_id);
-
-       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
-       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
-       MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
-       MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
-       MLX5_SET(flow_context, in_flow_context, action,
-                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
-                MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
-
-       in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
-       MLX5_SET(dest_format_struct, in_dests, destination_type,
-                MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
-       MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
-
-       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
-       kvfree(in);
-
-       return err;
-}
-
-int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
-                                   u32 table_type,
-                                   u32 table_id)
-{
-       u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
-
-       MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
-       MLX5_SET(delete_fte_in, in, table_type, table_type);
-       MLX5_SET(delete_fte_in, in, table_id, table_id);
-
-       return mlx5_cmd_exec_in(mdev, delete_fte, in);
-}
-
-int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
-                                  u32 table_type,
-                                  u8 num_of_actions,
-                                  u64 *actions,
-                                  u32 *modify_header_id)
-{
-       u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
-       void *p_actions;
-       u32 inlen;
-       u32 *in;
-       int err;
-
-       inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
-                num_of_actions * sizeof(u64);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(alloc_modify_header_context_in, in, opcode,
-                MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
-       MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
-       MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
-       p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
-       memcpy(p_actions, actions, num_of_actions * sizeof(u64));
-
-       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
-       if (err)
-               goto out;
-
-       *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
-                                    modify_header_id);
-out:
-       kvfree(in);
-       return err;
-}
-
-int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
-                                    u32 modify_header_id)
-{
-       u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
-
-       MLX5_SET(dealloc_modify_header_context_in, in, opcode,
-                MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
-       MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
-                modify_header_id);
-
-       return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
-}
-
-int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
-                                      u32 table_type,
-                                      u32 table_id,
-                                      u32 *group_id)
-{
-       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
-       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
-       u32 *in;
-       int err;
-
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
-       MLX5_SET(create_flow_group_in, in, table_type, table_type);
-       MLX5_SET(create_flow_group_in, in, table_id, table_id);
-
-       err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
-       if (err)
-               goto out;
-
-       *group_id = MLX5_GET(create_flow_group_out, out, group_id);
-
-out:
-       kvfree(in);
-       return err;
-}
-
-int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
-                                 u32 table_type,
-                                 u32 table_id,
-                                 u32 group_id)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
-
-       MLX5_SET(destroy_flow_group_in, in, opcode,
-                MLX5_CMD_OP_DESTROY_FLOW_GROUP);
-       MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
-       MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
-       MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
-
-       return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
-}
-
-int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
-                                struct mlx5dr_cmd_create_flow_table_attr *attr,
-                                u64 *fdb_rx_icm_addr,
-                                u32 *table_id)
-{
-       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
-       void *ft_mdev;
-       int err;
-
-       MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
-       MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
-       MLX5_SET(create_flow_table_in, in, uid, attr->uid);
-
-       ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
-       MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
-       MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
-       MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
-
-       if (attr->sw_owner) {
-               /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
-                * icm_addr_1 used for FDB TX
-                */
-               if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
-                       MLX5_SET64(flow_table_context, ft_mdev,
-                                  sws.sw_owner_icm_root_0, attr->icm_addr_rx);
-               } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
-                       MLX5_SET64(flow_table_context, ft_mdev,
-                                  sws.sw_owner_icm_root_0, attr->icm_addr_tx);
-               } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
-                       MLX5_SET64(flow_table_context, ft_mdev,
-                                  sws.sw_owner_icm_root_0, attr->icm_addr_rx);
-                       MLX5_SET64(flow_table_context, ft_mdev,
-                                  sws.sw_owner_icm_root_1, attr->icm_addr_tx);
-               }
-       }
-
-       MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
-                attr->decap_en);
-       MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
-                attr->reformat_en);
-
-       err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
-       if (err)
-               return err;
-
-       *table_id = MLX5_GET(create_flow_table_out, out, table_id);
-       if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
-           fdb_rx_icm_addr)
-               *fdb_rx_icm_addr =
-               (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
-               (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
-               (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
-
-       return 0;
-}
-
-int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
-                                 u32 table_id,
-                                 u32 table_type)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
-
-       MLX5_SET(destroy_flow_table_in, in, opcode,
-                MLX5_CMD_OP_DESTROY_FLOW_TABLE);
-       MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
-       MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
-
-       return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
-}
-
-int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
-                                  enum mlx5_reformat_ctx_type rt,
-                                  u8 reformat_param_0,
-                                  u8 reformat_param_1,
-                                  size_t reformat_size,
-                                  void *reformat_data,
-                                  u32 *reformat_id)
-{
-       u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
-       size_t inlen, cmd_data_sz, cmd_total_sz;
-       void *prctx;
-       void *pdata;
-       void *in;
-       int err;
-
-       cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
-       cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
-                                       packet_reformat_context.reformat_data);
-       inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
-                MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
-
-       prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
-       pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
-
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
-       MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
-       if (reformat_data && reformat_size)
-               memcpy(pdata, reformat_data, reformat_size);
-
-       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
-       if (err)
-               goto err_free_in;
-
-       *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
-
-err_free_in:
-       kvfree(in);
-       return err;
-}
-
-void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
-                                    u32 reformat_id)
-{
-       u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
-
-       MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
-                MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
-       MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
-                reformat_id);
-
-       mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
-}
-
-static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
-                                     u8 *dw_selectors,
-                                     u8 *byte_selectors)
-{
-       if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
-               return;
-
-       MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
-       MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
-       MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
-       MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
-       MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
-       MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
-       MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
-       MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
-       MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
-
-       MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
-       MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
-       MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
-       MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
-       MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
-       MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
-       MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
-       MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
-}
-
-int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
-                             u16 format_id,
-                             u8 *dw_selectors,
-                             u8 *byte_selectors,
-                             u8 *match_mask,
-                             u32 *definer_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
-       u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
-       void *ptr;
-       int err;
-
-       ptr = MLX5_ADDR_OF(create_match_definer_in, in,
-                          general_obj_in_cmd_hdr);
-       MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
-                MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
-                MLX5_OBJ_TYPE_MATCH_DEFINER);
-
-       ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
-       MLX5_SET(match_definer, ptr, format_id, format_id);
-
-       dr_cmd_set_definer_format(ptr, format_id,
-                                 dw_selectors, byte_selectors);
-
-       ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
-       memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
-
-       err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-       if (err)
-               return err;
-
-       *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-
-       return 0;
-}
-
-void
-mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
-{
-       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
-
-       MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
-
-       mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
-}
-
-int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
-                        u16 index, struct mlx5dr_cmd_gid_attr *attr)
-{
-       u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
-       u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
-       int err;
-
-       MLX5_SET(query_roce_address_in, in, opcode,
-                MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
-
-       MLX5_SET(query_roce_address_in, in, roce_address_index, index);
-       MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
-
-       err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
-       if (err)
-               return err;
-
-       memcpy(&attr->gid,
-              MLX5_ADDR_OF(query_roce_address_out,
-                           out, roce_address.source_l3_address),
-              sizeof(attr->gid));
-       memcpy(attr->mac,
-              MLX5_ADDR_OF(query_roce_address_out, out,
-                           roce_address.source_mac_47_32),
-              sizeof(attr->mac));
-
-       if (MLX5_GET(query_roce_address_out, out,
-                    roce_address.roce_version) == MLX5_ROCE_VERSION_2)
-               attr->roce_ver = MLX5_ROCE_VERSION_2;
-       else
-               attr->roce_ver = MLX5_ROCE_VERSION_1;
-
-       return 0;
-}
-
-int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
-                                       u16 log_obj_range, u32 pd,
-                                       u32 *obj_id)
-{
-       u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {};
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
-       void *attr;
-       int ret;
-
-       attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
-       MLX5_SET(general_obj_in_cmd_hdr, attr, opcode,
-                MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type,
-                MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
-       MLX5_SET(general_obj_in_cmd_hdr, attr,
-                op_param.create.log_obj_range, log_obj_range);
-
-       attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
-       MLX5_SET(modify_header_arg, attr, access_pd, pd);
-
-       ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-       if (ret)
-               return ret;
-
-       *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
-       return 0;
-}
-
-void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
-                                         u32 obj_id)
-{
-       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
-       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
-
-       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
-                MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
-                MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
-       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
-
-       mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
-}
-
-static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
-                                       struct mlx5dr_cmd_fte_info *fte,
-                                       bool *extended_dest)
-{
-       int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
-       int num_fwd_destinations = 0;
-       int num_encap = 0;
-       int i;
-
-       *extended_dest = false;
-       if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
-               return 0;
-       for (i = 0; i < fte->dests_size; i++) {
-               if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
-                   fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
-                       continue;
-               if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
-                    fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
-                   fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
-                       num_encap++;
-               num_fwd_destinations++;
-       }
-
-       if (num_fwd_destinations > 1 && num_encap > 0)
-               *extended_dest = true;
-
-       if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
-               mlx5_core_warn(dev, "FW does not support extended destination");
-               return -EOPNOTSUPP;
-       }
-       if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
-               mlx5_core_warn(dev, "FW does not support more than %d encaps",
-                              1 << fw_log_max_fdb_encap_uplink);
-               return -EOPNOTSUPP;
-       }
-
-       return 0;
-}
-
-int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
-                      int opmod, int modify_mask,
-                      struct mlx5dr_cmd_ft_info *ft,
-                      u32 group_id,
-                      struct mlx5dr_cmd_fte_info *fte)
-{
-       u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
-       void *in_flow_context, *vlan;
-       bool extended_dest = false;
-       void *in_match_value;
-       unsigned int inlen;
-       int dst_cnt_size;
-       void *in_dests;
-       u32 *in;
-       int err;
-       int i;
-
-       if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
-               return -EOPNOTSUPP;
-
-       if (!extended_dest)
-               dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
-       else
-               dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
-
-       inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               return -ENOMEM;
-
-       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
-       MLX5_SET(set_fte_in, in, op_mod, opmod);
-       MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
-       MLX5_SET(set_fte_in, in, table_type, ft->type);
-       MLX5_SET(set_fte_in, in, table_id, ft->id);
-       MLX5_SET(set_fte_in, in, flow_index, fte->index);
-       MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
-       if (ft->vport) {
-               MLX5_SET(set_fte_in, in, vport_number, ft->vport);
-               MLX5_SET(set_fte_in, in, other_vport, 1);
-       }
-
-       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
-       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
-
-       MLX5_SET(flow_context, in_flow_context, flow_tag,
-                fte->flow_context.flow_tag);
-       MLX5_SET(flow_context, in_flow_context, flow_source,
-                fte->flow_context.flow_source);
-
-       MLX5_SET(flow_context, in_flow_context, extended_destination,
-                extended_dest);
-       if (extended_dest) {
-               u32 action;
-
-               action = fte->action.action &
-                       ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
-               MLX5_SET(flow_context, in_flow_context, action, action);
-       } else {
-               MLX5_SET(flow_context, in_flow_context, action,
-                        fte->action.action);
-               if (fte->action.pkt_reformat)
-                       MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
-                                fte->action.pkt_reformat->id);
-       }
-       if (fte->action.modify_hdr)
-               MLX5_SET(flow_context, in_flow_context, modify_header_id,
-                        fte->action.modify_hdr->id);
-
-       vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
-
-       MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
-       MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
-       MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
-
-       vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
-
-       MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
-       MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
-       MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
-
-       in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
-                                     match_value);
-       memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
-
-       in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
-       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-               int list_size = 0;
-
-               for (i = 0; i < fte->dests_size; i++) {
-                       enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
-                       enum mlx5_ifc_flow_destination_type ifc_type;
-                       unsigned int id;
-
-                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-                               continue;
-
-                       switch (type) {
-                       case MLX5_FLOW_DESTINATION_TYPE_NONE:
-                               continue;
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
-                               id = fte->dest_arr[i].ft_num;
-                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
-                               id = fte->dest_arr[i].ft_id;
-                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
-
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
-                       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
-                               if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
-                                       id = fte->dest_arr[i].vport.num;
-                                       MLX5_SET(dest_format_struct, in_dests,
-                                                destination_eswitch_owner_vhca_id_valid,
-                                                !!(fte->dest_arr[i].vport.flags &
-                                                   MLX5_FLOW_DEST_VPORT_VHCA_ID));
-                                       ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
-                               } else {
-                                       id = 0;
-                                       ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
-                                       MLX5_SET(dest_format_struct, in_dests,
-                                                destination_eswitch_owner_vhca_id_valid, 1);
-                               }
-                               MLX5_SET(dest_format_struct, in_dests,
-                                        destination_eswitch_owner_vhca_id,
-                                        fte->dest_arr[i].vport.vhca_id);
-                               if (extended_dest && (fte->dest_arr[i].vport.flags &
-                                                   MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
-                                       MLX5_SET(dest_format_struct, in_dests,
-                                                packet_reformat,
-                                                !!(fte->dest_arr[i].vport.flags &
-                                                   MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
-                                       MLX5_SET(extended_dest_format, in_dests,
-                                                packet_reformat_id,
-                                                fte->dest_arr[i].vport.reformat_id);
-                               }
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
-                               id = fte->dest_arr[i].sampler_id;
-                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
-                               break;
-                       default:
-                               id = fte->dest_arr[i].tir_num;
-                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
-                       }
-
-                       MLX5_SET(dest_format_struct, in_dests, destination_type,
-                                ifc_type);
-                       MLX5_SET(dest_format_struct, in_dests, destination_id, id);
-                       in_dests += dst_cnt_size;
-                       list_size++;
-               }
-
-               MLX5_SET(flow_context, in_flow_context, destination_list_size,
-                        list_size);
-       }
-
-       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-               int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
-                                       log_max_flow_counter,
-                                       ft->type));
-               int list_size = 0;
-
-               for (i = 0; i < fte->dests_size; i++) {
-                       if (fte->dest_arr[i].type !=
-                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-                               continue;
-
-                       MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
-                                fte->dest_arr[i].counter_id);
-                       in_dests += dst_cnt_size;
-                       list_size++;
-               }
-               if (list_size > max_list_size) {
-                       err = -EINVAL;
-                       goto err_out;
-               }
-
-               MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
-                        list_size);
-       }
-
-       err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
-err_out:
-       kvfree(in);
-       return err;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.c
deleted file mode 100644 (file)
index 030a577..0000000
+++ /dev/null
@@ -1,1186 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-#include <linux/debugfs.h>
-#include <linux/kernel.h>
-#include <linux/seq_file.h>
-#include <linux/version.h>
-#include "dr_types.h"
-
-#define DR_DBG_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
-
-enum dr_dump_rec_type {
-       DR_DUMP_REC_TYPE_DOMAIN = 3000,
-       DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER = 3001,
-       DR_DUMP_REC_TYPE_DOMAIN_INFO_DEV_ATTR = 3002,
-       DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT = 3003,
-       DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS = 3004,
-       DR_DUMP_REC_TYPE_DOMAIN_SEND_RING = 3005,
-
-       DR_DUMP_REC_TYPE_TABLE = 3100,
-       DR_DUMP_REC_TYPE_TABLE_RX = 3101,
-       DR_DUMP_REC_TYPE_TABLE_TX = 3102,
-
-       DR_DUMP_REC_TYPE_MATCHER = 3200,
-       DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
-       DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
-       DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
-       DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
-       DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
-
-       DR_DUMP_REC_TYPE_RULE = 3300,
-       DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
-       DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0 = 3302,
-       DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 = 3303,
-       DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1 = 3304,
-
-       DR_DUMP_REC_TYPE_ACTION_ENCAP_L2 = 3400,
-       DR_DUMP_REC_TYPE_ACTION_ENCAP_L3 = 3401,
-       DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR = 3402,
-       DR_DUMP_REC_TYPE_ACTION_DROP = 3403,
-       DR_DUMP_REC_TYPE_ACTION_QP = 3404,
-       DR_DUMP_REC_TYPE_ACTION_FT = 3405,
-       DR_DUMP_REC_TYPE_ACTION_CTR = 3406,
-       DR_DUMP_REC_TYPE_ACTION_TAG = 3407,
-       DR_DUMP_REC_TYPE_ACTION_VPORT = 3408,
-       DR_DUMP_REC_TYPE_ACTION_DECAP_L2 = 3409,
-       DR_DUMP_REC_TYPE_ACTION_DECAP_L3 = 3410,
-       DR_DUMP_REC_TYPE_ACTION_DEVX_TIR = 3411,
-       DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN = 3412,
-       DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
-       DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
-       DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
-       DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421,
-       DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
-};
-
-static struct mlx5dr_dbg_dump_buff *
-mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
-{
-       struct mlx5dr_dbg_dump_buff *new_buff;
-
-       new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
-       if (!new_buff)
-               return NULL;
-
-       new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
-       if (!new_buff->buff) {
-               kfree(new_buff);
-               return NULL;
-       }
-
-       INIT_LIST_HEAD(&new_buff->node);
-       list_add_tail(&new_buff->node, &dump_data->buff_list);
-
-       return new_buff;
-}
-
-static struct mlx5dr_dbg_dump_data *
-mlx5dr_dbg_create_dump_data(void)
-{
-       struct mlx5dr_dbg_dump_data *dump_data;
-
-       dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
-       if (!dump_data)
-               return NULL;
-
-       INIT_LIST_HEAD(&dump_data->buff_list);
-
-       if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
-               kfree(dump_data);
-               return NULL;
-       }
-
-       return dump_data;
-}
-
-static void
-mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
-{
-       struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
-
-       if (!dump_data)
-               return;
-
-       list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
-               kvfree(dump_buff->buff);
-               list_del(&dump_buff->node);
-               kfree(dump_buff);
-       }
-
-       kfree(dump_data);
-}
-
-static int
-mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
-{
-       struct mlx5dr_domain *dmn = file->private;
-       struct mlx5dr_dbg_dump_data *dump_data;
-       struct mlx5dr_dbg_dump_buff *buff;
-       u32 buff_capacity, write_size;
-       int remain_size, ret;
-
-       if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
-               return -EINVAL;
-
-       dump_data = dmn->dump_info.dump_data;
-       buff = list_last_entry(&dump_data->buff_list,
-                              struct mlx5dr_dbg_dump_buff, node);
-
-       buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
-       remain_size = buff_capacity - size;
-       write_size = (remain_size > 0) ? size : buff_capacity;
-
-       if (likely(write_size)) {
-               ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
-               if (ret < 0)
-                       return ret;
-
-               buff->index += write_size;
-       }
-
-       if (remain_size < 0) {
-               remain_size *= -1;
-               buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
-               if (!buff)
-                       return -ENOMEM;
-
-               ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
-               if (ret < 0)
-                       return ret;
-
-               buff->index += remain_size;
-       }
-
-       return 0;
-}
-
-void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
-{
-       mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
-       list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list);
-       mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
-}
-
-void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl)
-{
-       mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
-       list_del(&tbl->dbg_node);
-       mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
-}
-
-void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule)
-{
-       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
-
-       mutex_lock(&dmn->dump_info.dbg_mutex);
-       list_add_tail(&rule->dbg_node, &rule->matcher->dbg_rule_list);
-       mutex_unlock(&dmn->dump_info.dbg_mutex);
-}
-
-void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule)
-{
-       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
-
-       mutex_lock(&dmn->dump_info.dbg_mutex);
-       list_del(&rule->dbg_node);
-       mutex_unlock(&dmn->dump_info.dbg_mutex);
-}
-
-static u64 dr_dump_icm_to_idx(u64 icm_addr)
-{
-       return (icm_addr >> 6) & 0xffffffff;
-}
-
-#define DR_HEX_SIZE 256
-
-static void
-dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
-{
-       if (WARN_ON_ONCE(DR_HEX_SIZE < 2 * size + 1))
-               size = DR_HEX_SIZE / 2 - 1; /* truncate */
-
-       bin2hex(hex, src, size);
-       hex[2 * size] = 0; /* NULL-terminate */
-}
-
-static int
-dr_dump_rule_action_mem(struct seq_file *file, char *buff, const u64 rule_id,
-                       struct mlx5dr_rule_action_member *action_mem)
-{
-       struct mlx5dr_action *action = action_mem->action;
-       const u64 action_id = DR_DBG_PTR_TO_ID(action);
-       u64 hit_tbl_ptr, miss_tbl_ptr;
-       u32 hit_tbl_id, miss_tbl_id;
-       int ret;
-
-       switch (action->action_type) {
-       case DR_ACTION_TYP_DROP:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx\n",
-                              DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
-                              rule_id);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_FT:
-               if (action->dest_tbl->is_fw_tbl)
-                       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                                      "%d,0x%llx,0x%llx,0x%x,0x%x\n",
-                                      DR_DUMP_REC_TYPE_ACTION_FT, action_id,
-                                      rule_id, action->dest_tbl->fw_tbl.id,
-                                      -1);
-               else
-                       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                                      "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
-                                      DR_DUMP_REC_TYPE_ACTION_FT, action_id,
-                                      rule_id, action->dest_tbl->tbl->table_id,
-                                      DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
-
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_CTR:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
-                              action->ctr->ctr_id + action->ctr->offset);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_TAG:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
-                              action->flow_tag->flow_tag);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_MODIFY_HDR:
-       {
-               struct mlx5dr_ptrn_obj *ptrn = action->rewrite->ptrn;
-               struct mlx5dr_arg_obj *arg = action->rewrite->arg;
-               u8 *rewrite_data = action->rewrite->data;
-               bool ptrn_arg;
-               int i;
-
-               ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
-
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
-                              DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
-                              rule_id, action->rewrite->index,
-                              action->rewrite->single_action_opt,
-                              ptrn_arg ? action->rewrite->num_of_actions : 0,
-                              ptrn_arg ? ptrn->index : 0,
-                              ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-
-               if (ptrn_arg) {
-                       for (i = 0; i < action->rewrite->num_of_actions; i++) {
-                               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                                              ",0x%016llx",
-                                              be64_to_cpu(((__be64 *)rewrite_data)[i]));
-                               if (ret < 0)
-                                       return ret;
-
-                               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-                               if (ret)
-                                       return ret;
-                       }
-               }
-
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
-               if (ret < 0)
-                       return ret;
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       }
-       case DR_ACTION_TYP_VPORT:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
-                              action->vport->caps->num);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_TNL_L2_TO_L2:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx\n",
-                              DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
-                              rule_id);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_TNL_L3_TO_L2:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
-                              rule_id,
-                              (action->rewrite->ptrn && action->rewrite->arg) ?
-                              mlx5dr_arg_get_obj_id(action->rewrite->arg) :
-                              action->rewrite->index);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_L2_TO_TNL_L2:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
-                              rule_id, action->reformat->id);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_L2_TO_TNL_L3:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
-                              rule_id, action->reformat->id);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_POP_VLAN:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx\n",
-                              DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
-                              rule_id);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_PUSH_VLAN:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
-                              rule_id, action->push_vlan->vlan_hdr);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_INSERT_HDR:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
-                              rule_id, action->reformat->id,
-                              action->reformat->param_0,
-                              action->reformat->param_1);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_REMOVE_HDR:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
-                              rule_id, action->reformat->id,
-                              action->reformat->param_0,
-                              action->reformat->param_1);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_SAMPLER:
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
-                              DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
-                              rule_id, 0, 0, action->sampler->sampler_id,
-                              action->sampler->rx_icm_addr,
-                              action->sampler->tx_icm_addr);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       case DR_ACTION_TYP_RANGE:
-               if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
-                       hit_tbl_id = action->range->hit_tbl_action->dest_tbl->fw_tbl.id;
-                       hit_tbl_ptr = 0;
-               } else {
-                       hit_tbl_id = action->range->hit_tbl_action->dest_tbl->tbl->table_id;
-                       hit_tbl_ptr =
-                               DR_DBG_PTR_TO_ID(action->range->hit_tbl_action->dest_tbl->tbl);
-               }
-
-               if (action->range->miss_tbl_action->dest_tbl->is_fw_tbl) {
-                       miss_tbl_id = action->range->miss_tbl_action->dest_tbl->fw_tbl.id;
-                       miss_tbl_ptr = 0;
-               } else {
-                       miss_tbl_id = action->range->miss_tbl_action->dest_tbl->tbl->table_id;
-                       miss_tbl_ptr =
-                               DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
-               }
-
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
-                              DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
-                              rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
-                              miss_tbl_ptr, action->range->definer_id);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-               break;
-       default:
-               return 0;
-       }
-
-       return 0;
-}
-
-static int
-dr_dump_rule_mem(struct seq_file *file, char *buff, struct mlx5dr_ste *ste,
-                bool is_rx, const u64 rule_id, u8 format_ver)
-{
-       char hw_ste_dump[DR_HEX_SIZE];
-       u32 mem_rec_type;
-       int ret;
-
-       if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
-               mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
-                                      DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0;
-       } else {
-               mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 :
-                                      DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
-       }
-
-       dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
-                         DR_STE_SIZE_REDUCED);
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
-                      dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
-                      rule_id, hw_ste_dump);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-dr_dump_rule_rx_tx(struct seq_file *file, char *buff,
-                  struct mlx5dr_rule_rx_tx *rule_rx_tx,
-                  bool is_rx, const u64 rule_id, u8 format_ver)
-{
-       struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
-       struct mlx5dr_ste *curr_ste = rule_rx_tx->last_rule_ste;
-       int ret, i;
-
-       if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
-               return 0;
-
-       while (i--) {
-               ret = dr_dump_rule_mem(file, buff, ste_arr[i], is_rx, rule_id,
-                                      format_ver);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static noinline_for_stack int
-dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
-{
-       struct mlx5dr_rule_action_member *action_mem;
-       const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
-       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
-       struct mlx5dr_rule_rx_tx *rx = &rule->rx;
-       struct mlx5dr_rule_rx_tx *tx = &rule->tx;
-       u8 format_ver;
-       int ret;
-
-       format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
-                      rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (rx->nic_matcher) {
-               ret = dr_dump_rule_rx_tx(file, buff, rx, true, rule_id, format_ver);
-               if (ret < 0)
-                       return ret;
-       }
-
-       if (tx->nic_matcher) {
-               ret = dr_dump_rule_rx_tx(file, buff, tx, false, rule_id, format_ver);
-               if (ret < 0)
-                       return ret;
-       }
-
-       list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
-               ret = dr_dump_rule_action_mem(file, buff, rule_id, action_mem);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-dr_dump_matcher_mask(struct seq_file *file, char *buff,
-                    struct mlx5dr_match_param *mask,
-                    u8 criteria, const u64 matcher_id)
-{
-       char dump[DR_HEX_SIZE];
-       int ret;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
-                      DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (criteria & DR_MATCHER_CRITERIA_OUTER) {
-               dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%s,", dump);
-       } else {
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
-       }
-
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (criteria & DR_MATCHER_CRITERIA_INNER) {
-               dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%s,", dump);
-       } else {
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
-       }
-
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (criteria & DR_MATCHER_CRITERIA_MISC) {
-               dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%s,", dump);
-       } else {
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
-       }
-
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (criteria & DR_MATCHER_CRITERIA_MISC2) {
-               dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%s,", dump);
-       } else {
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
-       }
-
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (criteria & DR_MATCHER_CRITERIA_MISC3) {
-               dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%s\n", dump);
-       } else {
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
-       }
-
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-dr_dump_matcher_builder(struct seq_file *file, char *buff,
-                       struct mlx5dr_ste_build *builder,
-                       u32 index, bool is_rx, const u64 matcher_id)
-{
-       int ret;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,%d,%d,0x%x\n",
-                      DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
-                      is_rx, builder->lu_type);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-dr_dump_matcher_rx_tx(struct seq_file *file, char *buff, bool is_rx,
-                     struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
-                     const u64 matcher_id)
-{
-       enum dr_dump_rec_type rec_type;
-       u64 s_icm_addr, e_icm_addr;
-       int i, ret;
-
-       rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
-                          DR_DUMP_REC_TYPE_MATCHER_TX;
-
-       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
-       e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
-                      rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
-                      matcher_id, matcher_rx_tx->num_of_builders,
-                      dr_dump_icm_to_idx(s_icm_addr),
-                      dr_dump_icm_to_idx(e_icm_addr));
-
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
-               ret = dr_dump_matcher_builder(file, buff,
-                                             &matcher_rx_tx->ste_builder[i],
-                                             i, is_rx, matcher_id);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static noinline_for_stack int
-dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
-{
-       struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
-       struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
-       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
-       u64 matcher_id;
-       int ret;
-
-       matcher_id = DR_DBG_PTR_TO_ID(matcher);
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
-                      matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
-                      matcher->prio);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       ret = dr_dump_matcher_mask(file, buff, &matcher->mask,
-                                  matcher->match_criteria, matcher_id);
-       if (ret < 0)
-               return ret;
-
-       if (rx->nic_tbl) {
-               ret = dr_dump_matcher_rx_tx(file, buff, true, rx, matcher_id);
-               if (ret < 0)
-                       return ret;
-       }
-
-       if (tx->nic_tbl) {
-               ret = dr_dump_matcher_rx_tx(file, buff, false, tx, matcher_id);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
-{
-       struct mlx5dr_rule *rule;
-       int ret;
-
-       ret = dr_dump_matcher(file, matcher);
-       if (ret < 0)
-               return ret;
-
-       list_for_each_entry(rule, &matcher->dbg_rule_list, dbg_node) {
-               ret = dr_dump_rule(file, rule);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int
-dr_dump_table_rx_tx(struct seq_file *file, char *buff, bool is_rx,
-                   struct mlx5dr_table_rx_tx *table_rx_tx,
-                   const u64 table_id)
-{
-       enum dr_dump_rec_type rec_type;
-       u64 s_icm_addr;
-       int ret;
-
-       rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
-                          DR_DUMP_REC_TYPE_TABLE_TX;
-
-       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx\n", rec_type, table_id,
-                      dr_dump_icm_to_idx(s_icm_addr));
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static noinline_for_stack int
-dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
-{
-       struct mlx5dr_table_rx_tx *rx = &table->rx;
-       struct mlx5dr_table_rx_tx *tx = &table->tx;
-       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
-       int ret;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
-                      DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
-                      table->table_type, table->level);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       if (rx->nic_dmn) {
-               ret = dr_dump_table_rx_tx(file, buff, true, rx,
-                                         DR_DBG_PTR_TO_ID(table));
-               if (ret < 0)
-                       return ret;
-       }
-
-       if (tx->nic_dmn) {
-               ret = dr_dump_table_rx_tx(file, buff, false, tx,
-                                         DR_DBG_PTR_TO_ID(table));
-               if (ret < 0)
-                       return ret;
-       }
-       return 0;
-}
-
-static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
-{
-       struct mlx5dr_matcher *matcher;
-       int ret;
-
-       ret = dr_dump_table(file, tbl);
-       if (ret < 0)
-               return ret;
-
-       list_for_each_entry(matcher, &tbl->matcher_list, list_node) {
-               ret = dr_dump_matcher_all(file, matcher);
-               if (ret < 0)
-                       return ret;
-       }
-       return 0;
-}
-
-static int
-dr_dump_send_ring(struct seq_file *file, char *buff,
-                 struct mlx5dr_send_ring *ring,
-                 const u64 domain_id)
-{
-       int ret;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%llx,0x%x,0x%x\n",
-                      DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
-                      DR_DBG_PTR_TO_ID(ring), domain_id,
-                      ring->cq->mcq.cqn, ring->qp->qpn);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-dr_dump_domain_info_flex_parser(struct seq_file *file,
-                               char *buff,
-                               const char *flex_parser_name,
-                               const u8 flex_parser_value,
-                               const u64 domain_id)
-{
-       int ret;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,%s,0x%x\n",
-                      DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
-                      flex_parser_name, flex_parser_value);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-static int
-dr_dump_domain_info_caps(struct seq_file *file, char *buff,
-                        struct mlx5dr_cmd_caps *caps,
-                        const u64 domain_id)
-{
-       struct mlx5dr_cmd_vport_cap *vport_caps;
-       unsigned long i, vports_num;
-       int ret;
-
-       xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
-               ; /* count the number of vports in xarray */
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
-                      DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
-                      caps->nic_rx_drop_address, caps->nic_tx_drop_address,
-                      caps->flex_protocols, vports_num, caps->eswitch_manager);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
-               vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
-
-               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                              "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
-                              DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
-                              domain_id, i, vport_caps->vport_gvmi,
-                              vport_caps->icm_address_rx,
-                              vport_caps->icm_address_tx);
-               if (ret < 0)
-                       return ret;
-
-               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-               if (ret)
-                       return ret;
-       }
-       return 0;
-}
-
-static int
-dr_dump_domain_info(struct seq_file *file, char *buff,
-                   struct mlx5dr_domain_info *info,
-                   const u64 domain_id)
-{
-       int ret;
-
-       ret = dr_dump_domain_info_caps(file, buff, &info->caps, domain_id);
-       if (ret < 0)
-               return ret;
-
-       ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw0",
-                                             info->caps.flex_parser_id_icmp_dw0,
-                                             domain_id);
-       if (ret < 0)
-               return ret;
-
-       ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw1",
-                                             info->caps.flex_parser_id_icmp_dw1,
-                                             domain_id);
-       if (ret < 0)
-               return ret;
-
-       ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw0",
-                                             info->caps.flex_parser_id_icmpv6_dw0,
-                                             domain_id);
-       if (ret < 0)
-               return ret;
-
-       ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw1",
-                                             info->caps.flex_parser_id_icmpv6_dw1,
-                                             domain_id);
-       if (ret < 0)
-               return ret;
-
-       return 0;
-}
-
-static noinline_for_stack int
-dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
-{
-       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
-       u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
-       int ret;
-
-       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
-                      "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
-                      DR_DUMP_REC_TYPE_DOMAIN,
-                      domain_id, dmn->type, dmn->info.caps.gvmi,
-                      dmn->info.supp_sw_steering,
-                      /* package version */
-                      LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
-                      LINUX_VERSION_SUBLEVEL,
-                      pci_name(dmn->mdev->pdev),
-                      0, /* domain flags */
-                      dmn->num_buddies[DR_ICM_TYPE_STE],
-                      dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
-                      dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
-       if (ret < 0)
-               return ret;
-
-       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
-       if (ret)
-               return ret;
-
-       ret = dr_dump_domain_info(file, buff, &dmn->info, domain_id);
-       if (ret < 0)
-               return ret;
-
-       if (dmn->info.supp_sw_steering) {
-               ret = dr_dump_send_ring(file, buff, dmn->send_ring, domain_id);
-               if (ret < 0)
-                       return ret;
-       }
-
-       return 0;
-}
-
-static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_table *tbl;
-       int ret;
-
-       mutex_lock(&dmn->dump_info.dbg_mutex);
-       mlx5dr_domain_lock(dmn);
-
-       ret = dr_dump_domain(file, dmn);
-       if (ret < 0)
-               goto unlock_mutex;
-
-       list_for_each_entry(tbl, &dmn->dbg_tbl_list, dbg_node) {
-               ret = dr_dump_table_all(file, tbl);
-               if (ret < 0)
-                       break;
-       }
-
-unlock_mutex:
-       mlx5dr_domain_unlock(dmn);
-       mutex_unlock(&dmn->dump_info.dbg_mutex);
-       return ret;
-}
-
-static void *
-dr_dump_start(struct seq_file *file, loff_t *pos)
-{
-       struct mlx5dr_domain *dmn = file->private;
-       struct mlx5dr_dbg_dump_data *dump_data;
-
-       if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
-               mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
-               return ERR_PTR(-EBUSY);
-       }
-
-       atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
-       dump_data = dmn->dump_info.dump_data;
-
-       if (dump_data) {
-               return seq_list_start(&dump_data->buff_list, *pos);
-       } else if (*pos == 0) {
-               dump_data = mlx5dr_dbg_create_dump_data();
-               if (!dump_data)
-                       goto exit;
-
-               dmn->dump_info.dump_data = dump_data;
-               if (dr_dump_domain_all(file, dmn)) {
-                       mlx5dr_dbg_destroy_dump_data(dump_data);
-                       dmn->dump_info.dump_data = NULL;
-                       goto exit;
-               }
-
-               return seq_list_start(&dump_data->buff_list, *pos);
-       }
-
-exit:
-       atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
-       return NULL;
-}
-
-static void *
-dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
-{
-       struct mlx5dr_domain *dmn = file->private;
-       struct mlx5dr_dbg_dump_data *dump_data;
-
-       dump_data = dmn->dump_info.dump_data;
-
-       return seq_list_next(v, &dump_data->buff_list, pos);
-}
-
-static void
-dr_dump_stop(struct seq_file *file, void *v)
-{
-       struct mlx5dr_domain *dmn = file->private;
-       struct mlx5dr_dbg_dump_data *dump_data;
-
-       if (v && IS_ERR(v))
-               return;
-
-       if (!v) {
-               dump_data = dmn->dump_info.dump_data;
-               if (dump_data) {
-                       mlx5dr_dbg_destroy_dump_data(dump_data);
-                       dmn->dump_info.dump_data = NULL;
-               }
-       }
-
-       atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
-}
-
-static int
-dr_dump_show(struct seq_file *file, void *v)
-{
-       struct mlx5dr_dbg_dump_buff *entry;
-
-       entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
-       seq_printf(file, "%s", entry->buff);
-
-       return 0;
-}
-
-static const struct seq_operations dr_dump_sops = {
-       .start  = dr_dump_start,
-       .next   = dr_dump_next,
-       .stop   = dr_dump_stop,
-       .show   = dr_dump_show,
-};
-DEFINE_SEQ_ATTRIBUTE(dr_dump);
-
-void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
-{
-       struct mlx5_core_dev *dev = dmn->mdev;
-       char file_name[128];
-
-       if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
-               mlx5_core_warn(dev,
-                              "Steering dump is not supported for NIC RX/TX domains\n");
-               return;
-       }
-
-       dmn->dump_info.steering_debugfs =
-               debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
-       dmn->dump_info.fdb_debugfs =
-               debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
-
-       sprintf(file_name, "dmn_%p", dmn);
-       debugfs_create_file(file_name, 0444, dmn->dump_info.fdb_debugfs,
-                           dmn, &dr_dump_fops);
-
-       INIT_LIST_HEAD(&dmn->dbg_tbl_list);
-       mutex_init(&dmn->dump_info.dbg_mutex);
-}
-
-void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn)
-{
-       debugfs_remove_recursive(dmn->dump_info.steering_debugfs);
-       mutex_destroy(&dmn->dump_info.dbg_mutex);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_dbg.h
deleted file mode 100644 (file)
index 57c6b36..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
-#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
-
-enum {
-       MLX5DR_DEBUG_DUMP_STATE_FREE,
-       MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
-};
-
-struct mlx5dr_dbg_dump_buff {
-       char *buff;
-       u32 index;
-       struct list_head node;
-};
-
-struct mlx5dr_dbg_dump_data {
-       struct list_head buff_list;
-};
-
-struct mlx5dr_dbg_dump_info {
-       struct mutex dbg_mutex; /* protect dbg lists */
-       struct dentry *steering_debugfs;
-       struct dentry *fdb_debugfs;
-       struct mlx5dr_dbg_dump_data *dump_data;
-       atomic_t state;
-};
-
-void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
-void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
-void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl);
-void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl);
-void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule);
-void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_definer.c
deleted file mode 100644 (file)
index d5ea977..0000000
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-#include "dr_types.h"
-#include "dr_ste.h"
-
-struct dr_definer_object {
-       u32 id;
-       u16 format_id;
-       u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM];
-       u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM];
-       u8 match_mask[DR_STE_SIZE_MATCH_TAG];
-       refcount_t refcount;
-};
-
-static bool dr_definer_compare(struct dr_definer_object *definer,
-                              u16 format_id, u8 *dw_selectors,
-                              u8 *byte_selectors, u8 *match_mask)
-{
-       int i;
-
-       if (definer->format_id != format_id)
-               return false;
-
-       for (i = 0; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
-               if (definer->dw_selectors[i] != dw_selectors[i])
-                       return false;
-
-       for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
-               if (definer->byte_selectors[i] != byte_selectors[i])
-                       return false;
-
-       if (memcmp(definer->match_mask, match_mask, DR_STE_SIZE_MATCH_TAG))
-               return false;
-
-       return true;
-}
-
-static struct dr_definer_object *
-dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id,
-                   u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
-{
-       struct dr_definer_object *definer_obj;
-       unsigned long id;
-
-       xa_for_each(&dmn->definers_xa, id, definer_obj) {
-               if (dr_definer_compare(definer_obj, format_id,
-                                      dw_selectors, byte_selectors,
-                                      match_mask))
-                       return definer_obj;
-       }
-
-       return NULL;
-}
-
-static struct dr_definer_object *
-dr_definer_create_obj(struct mlx5dr_domain *dmn, u16 format_id,
-                     u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
-{
-       struct dr_definer_object *definer_obj;
-       int ret = 0;
-
-       definer_obj = kzalloc(sizeof(*definer_obj), GFP_KERNEL);
-       if (!definer_obj)
-               return NULL;
-
-       ret = mlx5dr_cmd_create_definer(dmn->mdev,
-                                       format_id,
-                                       dw_selectors,
-                                       byte_selectors,
-                                       match_mask,
-                                       &definer_obj->id);
-       if (ret)
-               goto err_free_definer_obj;
-
-       /* Definer ID can have 32 bits, but STE format
-        * supports only definers with 8 bit IDs.
-        */
-       if (definer_obj->id > 0xff) {
-               mlx5dr_err(dmn, "Unsupported definer ID (%d)\n", definer_obj->id);
-               goto err_destroy_definer;
-       }
-
-       definer_obj->format_id = format_id;
-       memcpy(definer_obj->dw_selectors, dw_selectors, sizeof(definer_obj->dw_selectors));
-       memcpy(definer_obj->byte_selectors, byte_selectors, sizeof(definer_obj->byte_selectors));
-       memcpy(definer_obj->match_mask, match_mask, sizeof(definer_obj->match_mask));
-
-       refcount_set(&definer_obj->refcount, 1);
-
-       ret = xa_insert(&dmn->definers_xa, definer_obj->id, definer_obj, GFP_KERNEL);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Couldn't insert new definer into xarray (%d)\n", ret);
-               goto err_destroy_definer;
-       }
-
-       return definer_obj;
-
-err_destroy_definer:
-       mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
-err_free_definer_obj:
-       kfree(definer_obj);
-
-       return NULL;
-}
-
-static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn,
-                                  struct dr_definer_object *definer_obj)
-{
-       mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
-       xa_erase(&dmn->definers_xa, definer_obj->id);
-       kfree(definer_obj);
-}
-
-int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
-                      u8 *dw_selectors, u8 *byte_selectors,
-                      u8 *match_mask, u32 *definer_id)
-{
-       struct dr_definer_object *definer_obj;
-       int ret = 0;
-
-       definer_obj = dr_definer_find_obj(dmn, format_id, dw_selectors,
-                                         byte_selectors, match_mask);
-       if (!definer_obj) {
-               definer_obj = dr_definer_create_obj(dmn, format_id,
-                                                   dw_selectors, byte_selectors,
-                                                   match_mask);
-               if (!definer_obj)
-                       return -ENOMEM;
-       } else {
-               refcount_inc(&definer_obj->refcount);
-       }
-
-       *definer_id = definer_obj->id;
-
-       return ret;
-}
-
-void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id)
-{
-       struct dr_definer_object *definer_obj;
-
-       definer_obj = xa_load(&dmn->definers_xa, definer_id);
-       if (!definer_obj) {
-               mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id);
-               return;
-       }
-
-       if (refcount_dec_and_test(&definer_obj->refcount))
-               dr_definer_destroy_obj(dmn, definer_obj);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
deleted file mode 100644 (file)
index 3d74109..0000000
+++ /dev/null
@@ -1,579 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include <linux/mlx5/eswitch.h>
-#include <linux/err.h>
-#include "dr_types.h"
-
-#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
-       ((dmn)->info.caps.dmn_type##_sw_owner ||        \
-        ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&    \
-         (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
-
-bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
-{
-       return dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX &&
-              dmn->info.caps.support_modify_argument;
-}
-
-static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)
-{
-       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
-               return 0;
-
-       dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn);
-       if (!dmn->ptrn_mgr) {
-               mlx5dr_err(dmn, "Couldn't create ptrn_mgr\n");
-               return -ENOMEM;
-       }
-
-       /* create argument pool */
-       dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn);
-       if (!dmn->arg_mgr) {
-               mlx5dr_err(dmn, "Couldn't create arg_mgr\n");
-               goto free_modify_header_pattern;
-       }
-
-       return 0;
-
-free_modify_header_pattern:
-       mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
-       return -ENOMEM;
-}
-
-static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn)
-{
-       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
-               return;
-
-       mlx5dr_arg_mgr_destroy(dmn->arg_mgr);
-       mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
-}
-
-static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
-{
-       /* Per vport cached FW FT for checksum recalculation, this
-        * recalculation is needed due to a HW bug in STEv0.
-        */
-       xa_init(&dmn->csum_fts_xa);
-}
-
-static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
-       unsigned long i;
-
-       xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
-               if (recalc_cs_ft)
-                       mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
-       }
-
-       xa_destroy(&dmn->csum_fts_xa);
-}
-
-int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
-                                       u16 vport_num,
-                                       u64 *rx_icm_addr)
-{
-       struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
-       int ret;
-
-       recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
-       if (!recalc_cs_ft) {
-               /* Table hasn't been created yet */
-               recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
-               if (!recalc_cs_ft)
-                       return -EINVAL;
-
-               ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
-                                     recalc_cs_ft, GFP_KERNEL));
-               if (ret)
-                       return ret;
-       }
-
-       *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
-
-       return 0;
-}
-
-static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
-{
-       int ret;
-
-       dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
-                                                  sizeof(struct mlx5dr_icm_chunk), 0,
-                                                  SLAB_HWCACHE_ALIGN, NULL);
-       if (!dmn->chunks_kmem_cache) {
-               mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
-               return -ENOMEM;
-       }
-
-       dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
-                                                 sizeof(struct mlx5dr_ste_htbl), 0,
-                                                 SLAB_HWCACHE_ALIGN, NULL);
-       if (!dmn->htbls_kmem_cache) {
-               mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
-               ret = -ENOMEM;
-               goto free_chunks_kmem_cache;
-       }
-
-       dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
-       if (!dmn->ste_icm_pool) {
-               mlx5dr_err(dmn, "Couldn't get icm memory\n");
-               ret = -ENOMEM;
-               goto free_htbls_kmem_cache;
-       }
-
-       dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
-       if (!dmn->action_icm_pool) {
-               mlx5dr_err(dmn, "Couldn't get action icm memory\n");
-               ret = -ENOMEM;
-               goto free_ste_icm_pool;
-       }
-
-       ret = mlx5dr_send_info_pool_create(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Couldn't create send info pool\n");
-               goto free_action_icm_pool;
-       }
-
-       return 0;
-
-free_action_icm_pool:
-       mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
-free_ste_icm_pool:
-       mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
-free_htbls_kmem_cache:
-       kmem_cache_destroy(dmn->htbls_kmem_cache);
-free_chunks_kmem_cache:
-       kmem_cache_destroy(dmn->chunks_kmem_cache);
-
-       return ret;
-}
-
-static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
-{
-       mlx5dr_send_info_pool_destroy(dmn);
-       mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
-       mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
-       kmem_cache_destroy(dmn->htbls_kmem_cache);
-       kmem_cache_destroy(dmn->chunks_kmem_cache);
-}
-
-static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
-{
-       int ret;
-
-       dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
-       if (!dmn->ste_ctx) {
-               mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
-               return -EOPNOTSUPP;
-       }
-
-       ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
-       if (ret) {
-               mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
-               return ret;
-       }
-
-       dmn->uar = mlx5_get_uars_page(dmn->mdev);
-       if (IS_ERR(dmn->uar)) {
-               mlx5dr_err(dmn, "Couldn't allocate UAR\n");
-               ret = PTR_ERR(dmn->uar);
-               goto clean_pd;
-       }
-
-       ret = dr_domain_init_mem_resources(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
-               goto clean_uar;
-       }
-
-       ret = dr_domain_init_modify_header_resources(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Couldn't create modify-header-resources\n");
-               goto clean_mem_resources;
-       }
-
-       ret = mlx5dr_send_ring_alloc(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Couldn't create send-ring\n");
-               goto clean_modify_hdr;
-       }
-
-       return 0;
-
-clean_modify_hdr:
-       dr_domain_destroy_modify_header_resources(dmn);
-clean_mem_resources:
-       dr_domain_uninit_mem_resources(dmn);
-clean_uar:
-       mlx5_put_uars_page(dmn->mdev, dmn->uar);
-clean_pd:
-       mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
-
-       return ret;
-}
-
-static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
-{
-       mlx5dr_send_ring_free(dmn, dmn->send_ring);
-       dr_domain_destroy_modify_header_resources(dmn);
-       dr_domain_uninit_mem_resources(dmn);
-       mlx5_put_uars_page(dmn->mdev, dmn->uar);
-       mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
-}
-
-static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
-                                      struct mlx5dr_cmd_vport_cap *uplink_vport)
-{
-       struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
-
-       uplink_vport->num = MLX5_VPORT_UPLINK;
-       uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
-       uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
-       uplink_vport->vport_gvmi = 0;
-       uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
-}
-
-static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
-                                u16 vport_number,
-                                bool other_vport,
-                                struct mlx5dr_cmd_vport_cap *vport_caps)
-{
-       int ret;
-
-       ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
-                                                other_vport,
-                                                vport_number,
-                                                &vport_caps->icm_address_rx,
-                                                &vport_caps->icm_address_tx);
-       if (ret)
-               return ret;
-
-       ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
-                                   other_vport,
-                                   vport_number,
-                                   &vport_caps->vport_gvmi);
-       if (ret)
-               return ret;
-
-       vport_caps->num = vport_number;
-       vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
-
-       return 0;
-}
-
-static int dr_domain_query_esw_mgr(struct mlx5dr_domain *dmn)
-{
-       return dr_domain_query_vport(dmn, 0, false,
-                                    &dmn->info.caps.vports.esw_manager_caps);
-}
-
-static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
-{
-       dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
-}
-
-static struct mlx5dr_cmd_vport_cap *
-dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
-{
-       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
-       struct mlx5dr_cmd_vport_cap *vport_caps;
-       int ret;
-
-       vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
-       if (!vport_caps)
-               return NULL;
-
-       ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
-       if (ret) {
-               kvfree(vport_caps);
-               return NULL;
-       }
-
-       ret = xa_insert(&caps->vports.vports_caps_xa, vport,
-                       vport_caps, GFP_KERNEL);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
-               kvfree(vport_caps);
-               return ERR_PTR(ret);
-       }
-
-       return vport_caps;
-}
-
-static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
-{
-       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
-
-       return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
-              (!caps->is_ecpf && vport == 0);
-}
-
-struct mlx5dr_cmd_vport_cap *
-mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
-{
-       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
-       struct mlx5dr_cmd_vport_cap *vport_caps;
-
-       if (dr_domain_is_esw_mgr_vport(dmn, vport))
-               return &caps->vports.esw_manager_caps;
-
-       if (vport == MLX5_VPORT_UPLINK)
-               return &caps->vports.uplink_caps;
-
-vport_load:
-       vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
-       if (vport_caps)
-               return vport_caps;
-
-       vport_caps = dr_domain_add_vport_cap(dmn, vport);
-       if (PTR_ERR(vport_caps) == -EBUSY)
-               /* caps were already stored by another thread */
-               goto vport_load;
-
-       return vport_caps;
-}
-
-static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_cmd_vport_cap *vport_caps;
-       unsigned long i;
-
-       xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
-               vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
-               kvfree(vport_caps);
-       }
-}
-
-static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
-                                   struct mlx5dr_domain *dmn)
-{
-       int ret;
-
-       if (!dmn->info.caps.eswitch_manager)
-               return -EOPNOTSUPP;
-
-       ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
-       if (ret)
-               return ret;
-
-       dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
-       dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
-       dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
-       dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
-
-       xa_init(&dmn->info.caps.vports.vports_caps_xa);
-
-       /* Query eswitch manager and uplink vports only. Rest of the
-        * vports (vport 0, VFs and SFs) will be queried dynamically.
-        */
-
-       ret = dr_domain_query_esw_mgr(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
-               goto free_vports_caps_xa;
-       }
-
-       dr_domain_query_uplink(dmn);
-
-       return 0;
-
-free_vports_caps_xa:
-       xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
-
-       return ret;
-}
-
-static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
-                              struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_cmd_vport_cap *vport_cap;
-       int ret;
-
-       if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
-               mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
-               return -EOPNOTSUPP;
-       }
-
-       ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
-       if (ret)
-               return ret;
-
-       ret = dr_domain_query_fdb_caps(mdev, dmn);
-       if (ret)
-               return ret;
-
-       switch (dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
-                       return -ENOTSUPP;
-
-               dmn->info.supp_sw_steering = true;
-               dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
-               dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
-               dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
-                       return -ENOTSUPP;
-
-               dmn->info.supp_sw_steering = true;
-               dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
-               dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
-               dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               if (!dmn->info.caps.eswitch_manager)
-                       return -ENOTSUPP;
-
-               if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
-                       return -ENOTSUPP;
-
-               dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
-               dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
-               vport_cap = &dmn->info.caps.vports.esw_manager_caps;
-
-               dmn->info.supp_sw_steering = true;
-               dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
-               dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
-               dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
-               dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
-               break;
-       default:
-               mlx5dr_err(dmn, "Invalid domain\n");
-               ret = -EINVAL;
-               break;
-       }
-
-       return ret;
-}
-
-static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
-{
-       dr_domain_clear_vports(dmn);
-       xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
-}
-
-struct mlx5dr_domain *
-mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
-{
-       struct mlx5dr_domain *dmn;
-       int ret;
-
-       if (type > MLX5DR_DOMAIN_TYPE_FDB)
-               return NULL;
-
-       dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
-       if (!dmn)
-               return NULL;
-
-       dmn->mdev = mdev;
-       dmn->type = type;
-       refcount_set(&dmn->refcount, 1);
-       mutex_init(&dmn->info.rx.mutex);
-       mutex_init(&dmn->info.tx.mutex);
-       xa_init(&dmn->definers_xa);
-       xa_init(&dmn->peer_dmn_xa);
-
-       if (dr_domain_caps_init(mdev, dmn)) {
-               mlx5dr_err(dmn, "Failed init domain, no caps\n");
-               goto def_xa_destroy;
-       }
-
-       dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
-       dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
-                                           dmn->info.caps.log_icm_size);
-       dmn->info.max_log_modify_hdr_pattern_icm_sz =
-               min_t(u32, DR_CHUNK_SIZE_4K,
-                     dmn->info.caps.log_modify_pattern_icm_size);
-
-       if (!dmn->info.supp_sw_steering) {
-               mlx5dr_err(dmn, "SW steering is not supported\n");
-               goto uninit_caps;
-       }
-
-       /* Allocate resources */
-       ret = dr_domain_init_resources(dmn);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed init domain resources\n");
-               goto uninit_caps;
-       }
-
-       dr_domain_init_csum_recalc_fts(dmn);
-       mlx5dr_dbg_init_dump(dmn);
-       return dmn;
-
-uninit_caps:
-       dr_domain_caps_uninit(dmn);
-def_xa_destroy:
-       xa_destroy(&dmn->peer_dmn_xa);
-       xa_destroy(&dmn->definers_xa);
-       kfree(dmn);
-       return NULL;
-}
-
-/* Assure synchronization of the device steering tables with updates made by SW
- * insertion.
- */
-int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
-{
-       int ret = 0;
-
-       if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
-               mlx5dr_domain_lock(dmn);
-               ret = mlx5dr_send_ring_force_drain(dmn);
-               mlx5dr_domain_unlock(dmn);
-               if (ret) {
-                       mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
-                                  flags, ret);
-                       return ret;
-               }
-       }
-
-       if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
-               ret = mlx5dr_cmd_sync_steering(dmn->mdev);
-
-       return ret;
-}
-
-int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
-{
-       if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
-               return -EBUSY;
-
-       /* make sure resources are not used by the hardware */
-       mlx5dr_cmd_sync_steering(dmn->mdev);
-       mlx5dr_dbg_uninit_dump(dmn);
-       dr_domain_uninit_csum_recalc_fts(dmn);
-       dr_domain_uninit_resources(dmn);
-       dr_domain_caps_uninit(dmn);
-       xa_destroy(&dmn->peer_dmn_xa);
-       xa_destroy(&dmn->definers_xa);
-       mutex_destroy(&dmn->info.tx.mutex);
-       mutex_destroy(&dmn->info.rx.mutex);
-       kfree(dmn);
-       return 0;
-}
-
-void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
-                           struct mlx5dr_domain *peer_dmn,
-                           u16 peer_vhca_id)
-{
-       struct mlx5dr_domain *peer;
-
-       mlx5dr_domain_lock(dmn);
-
-       peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
-       if (peer)
-               refcount_dec(&peer->refcount);
-
-       WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
-
-       peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
-       if (peer)
-               refcount_inc(&peer->refcount);
-
-       mlx5dr_domain_unlock(dmn);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
deleted file mode 100644 (file)
index f05ef0c..0000000
+++ /dev/null
@@ -1,171 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include <linux/types.h>
-#include "dr_types.h"
-
-struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
-{
-       struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
-       struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
-       u32 table_id, group_id, modify_hdr_id;
-       u64 rx_icm_addr, modify_ttl_action;
-       int ret;
-
-       recalc_cs_ft = kzalloc(sizeof(*recalc_cs_ft), GFP_KERNEL);
-       if (!recalc_cs_ft)
-               return NULL;
-
-       ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
-       ft_attr.level = dmn->info.caps.max_ft_level - 1;
-       ft_attr.term_tbl = true;
-
-       ret = mlx5dr_cmd_create_flow_table(dmn->mdev,
-                                          &ft_attr,
-                                          &rx_icm_addr,
-                                          &table_id);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
-               goto free_ttl_tbl;
-       }
-
-       ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
-                                                MLX5_FLOW_TABLE_TYPE_FDB,
-                                                table_id, &group_id);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed creating TTL W/A FW flow group %d\n", ret);
-               goto destroy_flow_table;
-       }
-
-       /* Modify TTL action by adding zero to trigger CS recalculation */
-       modify_ttl_action = 0;
-       MLX5_SET(set_action_in, &modify_ttl_action, action_type, MLX5_ACTION_TYPE_ADD);
-       MLX5_SET(set_action_in, &modify_ttl_action, field, MLX5_ACTION_IN_FIELD_OUT_IP_TTL);
-
-       ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1,
-                                            &modify_ttl_action,
-                                            &modify_hdr_id);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed modify header TTL %d\n", ret);
-               goto destroy_flow_group;
-       }
-
-       ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev,
-                                                 MLX5_FLOW_TABLE_TYPE_FDB,
-                                                 table_id, group_id, modify_hdr_id,
-                                                 vport_num);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed setting TTL W/A flow table entry %d\n", ret);
-               goto dealloc_modify_header;
-       }
-
-       recalc_cs_ft->modify_hdr_id = modify_hdr_id;
-       recalc_cs_ft->rx_icm_addr = rx_icm_addr;
-       recalc_cs_ft->table_id = table_id;
-       recalc_cs_ft->group_id = group_id;
-
-       return recalc_cs_ft;
-
-dealloc_modify_header:
-       mlx5dr_cmd_dealloc_modify_header(dmn->mdev, modify_hdr_id);
-destroy_flow_group:
-       mlx5dr_cmd_destroy_flow_group(dmn->mdev,
-                                     MLX5_FLOW_TABLE_TYPE_FDB,
-                                     table_id, group_id);
-destroy_flow_table:
-       mlx5dr_cmd_destroy_flow_table(dmn->mdev, table_id, MLX5_FLOW_TABLE_TYPE_FDB);
-free_ttl_tbl:
-       kfree(recalc_cs_ft);
-       return NULL;
-}
-
-void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
-                                   struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft)
-{
-       mlx5dr_cmd_del_flow_table_entry(dmn->mdev,
-                                       MLX5_FLOW_TABLE_TYPE_FDB,
-                                       recalc_cs_ft->table_id);
-       mlx5dr_cmd_dealloc_modify_header(dmn->mdev, recalc_cs_ft->modify_hdr_id);
-       mlx5dr_cmd_destroy_flow_group(dmn->mdev,
-                                     MLX5_FLOW_TABLE_TYPE_FDB,
-                                     recalc_cs_ft->table_id,
-                                     recalc_cs_ft->group_id);
-       mlx5dr_cmd_destroy_flow_table(dmn->mdev,
-                                     recalc_cs_ft->table_id,
-                                     MLX5_FLOW_TABLE_TYPE_FDB);
-
-       kfree(recalc_cs_ft);
-}
-
-int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
-                           struct mlx5dr_cmd_flow_destination_hw_info *dest,
-                           int num_dest,
-                           bool reformat_req,
-                           u32 *tbl_id,
-                           u32 *group_id,
-                           bool ignore_flow_level,
-                           u32 flow_source)
-{
-       struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
-       struct mlx5dr_cmd_fte_info fte_info = {};
-       u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {};
-       struct mlx5dr_cmd_ft_info ft_info = {};
-       int ret;
-
-       ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
-       ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
-                             MLX5_FT_MAX_MULTIPATH_LEVEL);
-       ft_attr.reformat_en = reformat_req;
-       ft_attr.decap_en = reformat_req;
-
-       ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret);
-               return ret;
-       }
-
-       ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
-                                                MLX5_FLOW_TABLE_TYPE_FDB,
-                                                *tbl_id, group_id);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret);
-               goto free_flow_table;
-       }
-
-       ft_info.id = *tbl_id;
-       ft_info.type = FS_FT_FDB;
-       fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
-       fte_info.dests_size = num_dest;
-       fte_info.val = val;
-       fte_info.dest_arr = dest;
-       fte_info.ignore_flow_level = ignore_flow_level;
-       fte_info.flow_context.flow_source = flow_source;
-
-       ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret);
-               goto free_flow_group;
-       }
-
-       return 0;
-
-free_flow_group:
-       mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
-                                     *tbl_id, *group_id);
-free_flow_table:
-       mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id,
-                                     MLX5_FLOW_TABLE_TYPE_FDB);
-       return ret;
-}
-
-void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn,
-                             u32 tbl_id, u32 group_id)
-{
-       mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id);
-       mlx5dr_cmd_destroy_flow_group(dmn->mdev,
-                                     MLX5_FLOW_TABLE_TYPE_FDB,
-                                     tbl_id, group_id);
-       mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id,
-                                     MLX5_FLOW_TABLE_TYPE_FDB);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
deleted file mode 100644 (file)
index 0b5af9f..0000000
+++ /dev/null
@@ -1,576 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include "dr_types.h"
-
-#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
-#define DR_ICM_POOL_STE_HOT_MEM_PERCENT 25
-#define DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT 50
-#define DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT 90
-
-struct mlx5dr_icm_hot_chunk {
-       struct mlx5dr_icm_buddy_mem *buddy_mem;
-       unsigned int seg;
-       enum mlx5dr_icm_chunk_size size;
-};
-
-struct mlx5dr_icm_pool {
-       enum mlx5dr_icm_type icm_type;
-       enum mlx5dr_icm_chunk_size max_log_chunk_sz;
-       struct mlx5dr_domain *dmn;
-       struct kmem_cache *chunks_kmem_cache;
-
-       /* memory management */
-       struct mutex mutex; /* protect the ICM pool and ICM buddy */
-       struct list_head buddy_mem_list;
-
-       /* Hardware may be accessing this memory but at some future,
-        * undetermined time, it might cease to do so.
-        * sync_ste command sets them free.
-        */
-       struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
-       u32 hot_chunks_num;
-       u64 hot_memory_size;
-       /* hot memory size threshold for triggering sync */
-       u64 th;
-};
-
-struct mlx5dr_icm_dm {
-       u32 obj_id;
-       enum mlx5_sw_icm_type type;
-       phys_addr_t addr;
-       size_t length;
-};
-
-struct mlx5dr_icm_mr {
-       u32 mkey;
-       struct mlx5dr_icm_dm dm;
-       struct mlx5dr_domain *dmn;
-       size_t length;
-       u64 icm_start_addr;
-};
-
-static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
-                                u32 pd, u64 length, u64 start_addr, int mode,
-                                u32 *mkey)
-{
-       u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
-       u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
-       void *mkc;
-
-       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-
-       MLX5_SET(mkc, mkc, access_mode_1_0, mode);
-       MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
-       MLX5_SET(mkc, mkc, lw, 1);
-       MLX5_SET(mkc, mkc, lr, 1);
-       if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
-               MLX5_SET(mkc, mkc, rw, 1);
-               MLX5_SET(mkc, mkc, rr, 1);
-       }
-
-       MLX5_SET64(mkc, mkc, len, length);
-       MLX5_SET(mkc, mkc, pd, pd);
-       MLX5_SET(mkc, mkc, qpn, 0xffffff);
-       MLX5_SET64(mkc, mkc, start_addr, start_addr);
-
-       return mlx5_core_create_mkey(mdev, mkey, in, inlen);
-}
-
-u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
-{
-       u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
-
-       return (u64)offset * chunk->seg;
-}
-
-u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
-{
-       return chunk->buddy_mem->icm_mr->mkey;
-}
-
-u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
-{
-       u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
-
-       return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
-}
-
-u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
-{
-       return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
-                       chunk->buddy_mem->pool->icm_type);
-}
-
-u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
-{
-       return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
-}
-
-static struct mlx5dr_icm_mr *
-dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
-{
-       struct mlx5_core_dev *mdev = pool->dmn->mdev;
-       enum mlx5_sw_icm_type dm_type = 0;
-       struct mlx5dr_icm_mr *icm_mr;
-       size_t log_align_base = 0;
-       int err;
-
-       icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
-       if (!icm_mr)
-               return NULL;
-
-       icm_mr->dmn = pool->dmn;
-
-       icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
-                                                              pool->icm_type);
-
-       switch (pool->icm_type) {
-       case DR_ICM_TYPE_STE:
-               dm_type = MLX5_SW_ICM_TYPE_STEERING;
-               log_align_base = ilog2(icm_mr->dm.length);
-               break;
-       case DR_ICM_TYPE_MODIFY_ACTION:
-               dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
-               /* Align base is 64B */
-               log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
-               break;
-       case DR_ICM_TYPE_MODIFY_HDR_PTRN:
-               dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
-               /* Align base is 64B */
-               log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
-               break;
-       default:
-               WARN_ON(pool->icm_type);
-       }
-
-       icm_mr->dm.type = dm_type;
-
-       err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
-                                  log_align_base, 0, &icm_mr->dm.addr,
-                                  &icm_mr->dm.obj_id);
-       if (err) {
-               mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
-               goto free_icm_mr;
-       }
-
-       /* Register device memory */
-       err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
-                                   icm_mr->dm.length,
-                                   icm_mr->dm.addr,
-                                   MLX5_MKC_ACCESS_MODE_SW_ICM,
-                                   &icm_mr->mkey);
-       if (err) {
-               mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
-               goto free_dm;
-       }
-
-       icm_mr->icm_start_addr = icm_mr->dm.addr;
-
-       if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
-               mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
-                          log_align_base);
-               goto free_mkey;
-       }
-
-       return icm_mr;
-
-free_mkey:
-       mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
-free_dm:
-       mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
-                              icm_mr->dm.addr, icm_mr->dm.obj_id);
-free_icm_mr:
-       kvfree(icm_mr);
-       return NULL;
-}
-
-static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
-{
-       struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
-       struct mlx5dr_icm_dm *dm = &icm_mr->dm;
-
-       mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
-       mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
-                              dm->addr, dm->obj_id);
-       kvfree(icm_mr);
-}
-
-static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
-{
-       /* We support only one type of STE size, both for ConnectX-5 and later
-        * devices. Once the support for match STE which has a larger tag is
-        * added (32B instead of 16B), the STE size for devices later than
-        * ConnectX-5 needs to account for that.
-        */
-       return DR_STE_SIZE_REDUCED;
-}
-
-static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
-{
-       int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
-       struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
-       int ste_size = dr_icm_buddy_get_ste_size(buddy);
-       int index = offset / DR_STE_SIZE;
-
-       chunk->ste_arr = &buddy->ste_arr[index];
-       chunk->miss_list = &buddy->miss_list[index];
-       chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
-
-       memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
-       memset(chunk->ste_arr, 0,
-              num_of_entries * sizeof(chunk->ste_arr[0]));
-}
-
-static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
-{
-       int num_of_entries =
-               mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
-
-       buddy->ste_arr = kvcalloc(num_of_entries,
-                                 sizeof(struct mlx5dr_ste), GFP_KERNEL);
-       if (!buddy->ste_arr)
-               return -ENOMEM;
-
-       /* Preallocate full STE size on non-ConnectX-5 devices since
-        * we need to support both full and reduced with the same cache.
-        */
-       buddy->hw_ste_arr = kvcalloc(num_of_entries,
-                                    dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
-       if (!buddy->hw_ste_arr)
-               goto free_ste_arr;
-
-       buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
-       if (!buddy->miss_list)
-               goto free_hw_ste_arr;
-
-       return 0;
-
-free_hw_ste_arr:
-       kvfree(buddy->hw_ste_arr);
-free_ste_arr:
-       kvfree(buddy->ste_arr);
-       return -ENOMEM;
-}
-
-static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
-{
-       kvfree(buddy->ste_arr);
-       kvfree(buddy->hw_ste_arr);
-       kvfree(buddy->miss_list);
-}
-
-static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
-{
-       struct mlx5dr_icm_buddy_mem *buddy;
-       struct mlx5dr_icm_mr *icm_mr;
-
-       icm_mr = dr_icm_pool_mr_create(pool);
-       if (!icm_mr)
-               return -ENOMEM;
-
-       buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
-       if (!buddy)
-               goto free_mr;
-
-       if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
-               goto err_free_buddy;
-
-       buddy->icm_mr = icm_mr;
-       buddy->pool = pool;
-
-       if (pool->icm_type == DR_ICM_TYPE_STE) {
-               /* Reduce allocations by preallocating and reusing the STE structures */
-               if (dr_icm_buddy_init_ste_cache(buddy))
-                       goto err_cleanup_buddy;
-       }
-
-       /* add it to the -start- of the list in order to search in it first */
-       list_add(&buddy->list_node, &pool->buddy_mem_list);
-
-       pool->dmn->num_buddies[pool->icm_type]++;
-
-       return 0;
-
-err_cleanup_buddy:
-       mlx5dr_buddy_cleanup(buddy);
-err_free_buddy:
-       kvfree(buddy);
-free_mr:
-       dr_icm_pool_mr_destroy(icm_mr);
-       return -ENOMEM;
-}
-
-static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
-{
-       enum mlx5dr_icm_type icm_type = buddy->pool->icm_type;
-
-       dr_icm_pool_mr_destroy(buddy->icm_mr);
-
-       mlx5dr_buddy_cleanup(buddy);
-
-       if (icm_type == DR_ICM_TYPE_STE)
-               dr_icm_buddy_cleanup_ste_cache(buddy);
-
-       buddy->pool->dmn->num_buddies[icm_type]--;
-
-       kvfree(buddy);
-}
-
-static void
-dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
-                 struct mlx5dr_icm_pool *pool,
-                 enum mlx5dr_icm_chunk_size chunk_size,
-                 struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
-                 unsigned int seg)
-{
-       int offset;
-
-       chunk->seg = seg;
-       chunk->size = chunk_size;
-       chunk->buddy_mem = buddy_mem_pool;
-
-       if (pool->icm_type == DR_ICM_TYPE_STE) {
-               offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
-               dr_icm_chunk_ste_init(chunk, offset);
-       }
-
-       buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
-}
-
-static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
-{
-       return pool->hot_memory_size > pool->th;
-}
-
-static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
-{
-       struct mlx5dr_icm_hot_chunk *hot_chunk;
-       u32 i, num_entries;
-
-       for (i = 0; i < pool->hot_chunks_num; i++) {
-               hot_chunk = &pool->hot_chunks_arr[i];
-               num_entries = mlx5dr_icm_pool_chunk_size_to_entries(hot_chunk->size);
-               mlx5dr_buddy_free_mem(hot_chunk->buddy_mem,
-                                     hot_chunk->seg, ilog2(num_entries));
-               hot_chunk->buddy_mem->used_memory -=
-                       mlx5dr_icm_pool_chunk_size_to_byte(hot_chunk->size,
-                                                          pool->icm_type);
-       }
-
-       pool->hot_chunks_num = 0;
-       pool->hot_memory_size = 0;
-}
-
-static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
-{
-       struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
-       int err;
-
-       err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
-       if (err) {
-               mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
-               return err;
-       }
-
-       dr_icm_pool_clear_hot_chunks_arr(pool);
-
-       list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
-               if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
-                       dr_icm_buddy_destroy(buddy);
-       }
-
-       return 0;
-}
-
-static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
-                                        enum mlx5dr_icm_chunk_size chunk_size,
-                                        struct mlx5dr_icm_buddy_mem **buddy,
-                                        unsigned int *seg)
-{
-       struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
-       bool new_mem = false;
-       int err;
-
-alloc_buddy_mem:
-       /* find the next free place from the buddy list */
-       list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
-               err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
-                                            chunk_size, seg);
-               if (!err)
-                       goto found;
-
-               if (WARN_ON(new_mem)) {
-                       /* We have new memory pool, first in the list */
-                       mlx5dr_err(pool->dmn,
-                                  "No memory for order: %d\n",
-                                  chunk_size);
-                       goto out;
-               }
-       }
-
-       /* no more available allocators in that pool, create new */
-       err = dr_icm_buddy_create(pool);
-       if (err) {
-               mlx5dr_err(pool->dmn,
-                          "Failed creating buddy for order %d\n",
-                          chunk_size);
-               goto out;
-       }
-
-       /* mark we have new memory, first in list */
-       new_mem = true;
-       goto alloc_buddy_mem;
-
-found:
-       *buddy = buddy_mem_pool;
-out:
-       return err;
-}
-
-/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
- * also memory used for HW STE management for optimizations.
- */
-struct mlx5dr_icm_chunk *
-mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
-                      enum mlx5dr_icm_chunk_size chunk_size)
-{
-       struct mlx5dr_icm_chunk *chunk = NULL;
-       struct mlx5dr_icm_buddy_mem *buddy;
-       unsigned int seg;
-       int ret;
-
-       if (chunk_size > pool->max_log_chunk_sz)
-               return NULL;
-
-       mutex_lock(&pool->mutex);
-       /* find mem, get back the relevant buddy pool and seg in that mem */
-       ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
-       if (ret)
-               goto out;
-
-       chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
-       if (!chunk)
-               goto out_err;
-
-       dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
-
-       goto out;
-
-out_err:
-       mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
-out:
-       mutex_unlock(&pool->mutex);
-       return chunk;
-}
-
-void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
-{
-       struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
-       struct mlx5dr_icm_pool *pool = buddy->pool;
-       struct mlx5dr_icm_hot_chunk *hot_chunk;
-       struct kmem_cache *chunks_cache;
-
-       chunks_cache = pool->chunks_kmem_cache;
-
-       /* move the chunk to the waiting chunks array, AKA "hot" memory */
-       mutex_lock(&pool->mutex);
-
-       pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
-
-       hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++];
-       hot_chunk->buddy_mem = chunk->buddy_mem;
-       hot_chunk->seg = chunk->seg;
-       hot_chunk->size = chunk->size;
-
-       kmem_cache_free(chunks_cache, chunk);
-
-       /* Check if we have chunks that are waiting for sync-ste */
-       if (dr_icm_pool_is_sync_required(pool))
-               dr_icm_pool_sync_all_buddy_pools(pool);
-
-       mutex_unlock(&pool->mutex);
-}
-
-struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool)
-{
-       return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL);
-}
-
-void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl)
-{
-       kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl);
-}
-
-struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
-                                              enum mlx5dr_icm_type icm_type)
-{
-       u32 num_of_chunks, entry_size;
-       struct mlx5dr_icm_pool *pool;
-       u32 max_hot_size = 0;
-
-       pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
-       if (!pool)
-               return NULL;
-
-       pool->dmn = dmn;
-       pool->icm_type = icm_type;
-       pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
-
-       INIT_LIST_HEAD(&pool->buddy_mem_list);
-       mutex_init(&pool->mutex);
-
-       switch (icm_type) {
-       case DR_ICM_TYPE_STE:
-               pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
-               max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
-                                                                 pool->icm_type) *
-                              DR_ICM_POOL_STE_HOT_MEM_PERCENT / 100;
-               break;
-       case DR_ICM_TYPE_MODIFY_ACTION:
-               pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
-               max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
-                                                                 pool->icm_type) *
-                              DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT / 100;
-               break;
-       case DR_ICM_TYPE_MODIFY_HDR_PTRN:
-               pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
-               max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
-                                                                 pool->icm_type) *
-                              DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT / 100;
-               break;
-       default:
-               WARN_ON(icm_type);
-       }
-
-       entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
-
-       num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
-       pool->th = max_hot_size;
-
-       pool->hot_chunks_arr = kvcalloc(num_of_chunks,
-                                       sizeof(struct mlx5dr_icm_hot_chunk),
-                                       GFP_KERNEL);
-       if (!pool->hot_chunks_arr)
-               goto free_pool;
-
-       return pool;
-
-free_pool:
-       kvfree(pool);
-       return NULL;
-}
-
-void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
-{
-       struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
-
-       dr_icm_pool_clear_hot_chunks_arr(pool);
-
-       list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
-               dr_icm_buddy_destroy(buddy);
-
-       kvfree(pool->hot_chunks_arr);
-       mutex_destroy(&pool->mutex);
-       kvfree(pool);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
deleted file mode 100644 (file)
index 0726848..0000000
+++ /dev/null
@@ -1,1108 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include "dr_types.h"
-
-static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->smac_47_16 || spec->smac_15_0);
-}
-
-static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->dmac_47_16 || spec->dmac_15_0);
-}
-
-static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
-               spec->ip_ecn || spec->ip_dscp);
-}
-
-static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->tcp_sport || spec->tcp_dport ||
-               spec->udp_sport || spec->udp_dport);
-}
-
-static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
-{
-       return (spec->dst_ip_31_0 || spec->src_ip_31_0);
-}
-
-static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
-{
-       return (dr_mask_is_l3_base_set(spec) ||
-               dr_mask_is_tcp_udp_base_set(spec) ||
-               dr_mask_is_ipv4_set(spec));
-}
-
-static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
-{
-       return misc->vxlan_vni;
-}
-
-static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
-{
-       return spec->ttl_hoplimit;
-}
-
-static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
-{
-       return spec->ipv4_ihl;
-}
-
-#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
-       (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
-       (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
-       (_spec).ethertype || (_spec).ip_version || \
-       (_misc)._inner_outer##_second_vid || \
-       (_misc)._inner_outer##_second_cfi || \
-       (_misc)._inner_outer##_second_prio || \
-       (_misc)._inner_outer##_second_cvlan_tag || \
-       (_misc)._inner_outer##_second_svlan_tag)
-
-#define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
-       dr_mask_is_l3_base_set(&(_spec)) || \
-       dr_mask_is_tcp_udp_base_set(&(_spec)) || \
-       dr_mask_is_ttl_set(&(_spec)) || \
-       (_misc)._inner_outer##_ipv6_flow_label)
-
-#define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
-       (_misc3)._inner_outer##_tcp_seq_num || \
-       (_misc3)._inner_outer##_tcp_ack_num)
-
-#define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
-       (_misc2)._inner_outer##_first_mpls_label || \
-       (_misc2)._inner_outer##_first_mpls_exp || \
-       (_misc2)._inner_outer##_first_mpls_s_bos || \
-       (_misc2)._inner_outer##_first_mpls_ttl)
-
-static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
-{
-       return (misc->gre_key_h || misc->gre_key_l ||
-               misc->gre_protocol || misc->gre_c_present ||
-               misc->gre_k_present || misc->gre_s_present);
-}
-
-#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
-       (_misc)->outer_first_mpls_over_gre_label || \
-       (_misc)->outer_first_mpls_over_gre_exp || \
-       (_misc)->outer_first_mpls_over_gre_s_bos || \
-       (_misc)->outer_first_mpls_over_gre_ttl)
-
-#define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
-       (_misc)->outer_first_mpls_over_udp_label || \
-       (_misc)->outer_first_mpls_over_udp_exp || \
-       (_misc)->outer_first_mpls_over_udp_s_bos || \
-       (_misc)->outer_first_mpls_over_udp_ttl)
-
-static bool
-dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
-{
-       return (misc3->outer_vxlan_gpe_vni ||
-               misc3->outer_vxlan_gpe_next_protocol ||
-               misc3->outer_vxlan_gpe_flags);
-}
-
-static bool
-dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
-{
-       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
-              (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
-}
-
-static bool
-dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
-                        struct mlx5dr_domain *dmn)
-{
-       return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
-              dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
-}
-
-static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
-{
-       return misc->geneve_vni ||
-              misc->geneve_oam ||
-              misc->geneve_protocol_type ||
-              misc->geneve_opt_len;
-}
-
-static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
-{
-       return misc3->geneve_tlv_option_0_data;
-}
-
-static bool
-dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_parser_ok_bits_supp;
-}
-
-static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
-                                                   struct mlx5dr_domain *dmn)
-{
-       return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
-              misc->geneve_tlv_option_0_exist;
-}
-
-static bool
-dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
-{
-       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
-              (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
-}
-
-static bool
-dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
-                     struct mlx5dr_domain *dmn)
-{
-       return dr_mask_is_tnl_geneve_set(&mask->misc) &&
-              dr_matcher_supp_tnl_geneve(&dmn->info.caps);
-}
-
-static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
-{
-       return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
-}
-
-static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
-}
-
-static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
-                               struct mlx5dr_domain *dmn)
-{
-       return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
-              dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
-}
-
-static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
-}
-
-static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
-                                    struct mlx5dr_domain *dmn)
-{
-       return mask->misc3.gtpu_dw_0 &&
-              dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
-}
-
-static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
-}
-
-static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
-                                    struct mlx5dr_domain *dmn)
-{
-       return mask->misc3.gtpu_teid &&
-              dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
-}
-
-static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
-}
-
-static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
-                                    struct mlx5dr_domain *dmn)
-{
-       return mask->misc3.gtpu_dw_2 &&
-              dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
-}
-
-static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
-}
-
-static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
-                                         struct mlx5dr_domain *dmn)
-{
-       return mask->misc3.gtpu_first_ext_dw_0 &&
-              dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
-}
-
-static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
-                                             struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
-
-       return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
-               dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
-              (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
-               dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
-              (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
-               dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
-              (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
-               dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
-}
-
-static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
-                                             struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
-
-       return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
-               dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
-              (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
-               dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
-              (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
-               dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
-              (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
-               dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
-}
-
-static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
-                                   struct mlx5dr_domain *dmn)
-{
-       return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
-              dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
-              dr_mask_is_tnl_gtpu(mask, dmn);
-}
-
-static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
-{
-       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
-              (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
-}
-
-static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
-{
-       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
-              (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
-}
-
-static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
-{
-       return (misc3->icmpv6_type || misc3->icmpv6_code ||
-               misc3->icmpv6_header_data);
-}
-
-static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
-                           struct mlx5dr_domain *dmn)
-{
-       if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
-               return dr_matcher_supp_icmp_v4(&dmn->info.caps);
-       else if (dr_mask_is_icmpv6_set(&mask->misc3))
-               return dr_matcher_supp_icmp_v6(&dmn->info.caps);
-
-       return false;
-}
-
-static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
-{
-       return misc2->metadata_reg_a;
-}
-
-static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
-{
-       return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
-               misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
-}
-
-static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
-{
-       return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
-               misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
-}
-
-static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
-{
-       return (misc->source_sqn || misc->source_port);
-}
-
-static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
-                                             u32 flex_parser_value)
-{
-       if (flex_parser_id)
-               return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
-
-       /* Using flex_parser 0 means that id is zero, thus value must be set. */
-       return flex_parser_value;
-}
-
-static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
-{
-       return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
-                                                 misc4->prog_sample_field_value_0) ||
-               dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
-                                                 misc4->prog_sample_field_value_1) ||
-               dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
-                                                 misc4->prog_sample_field_value_2) ||
-               dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
-                                                 misc4->prog_sample_field_value_3));
-}
-
-static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
-{
-       return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
-              flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
-}
-
-static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
-{
-       return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
-               dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
-               dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
-               dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
-}
-
-static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
-}
-
-static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
-                                        struct mlx5dr_domain *dmn)
-{
-       return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
-              dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
-}
-
-static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
-}
-
-static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
-                                        struct mlx5dr_domain *dmn)
-{
-       return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
-              dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
-}
-
-static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
-{
-       return misc5->tunnel_header_0 || misc5->tunnel_header_1;
-}
-
-int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
-                                  struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                  enum mlx5dr_ipv outer_ipv,
-                                  enum mlx5dr_ipv inner_ipv)
-{
-       nic_matcher->ste_builder =
-               nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
-       nic_matcher->num_of_builders =
-               nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
-
-       if (!nic_matcher->num_of_builders) {
-               mlx5dr_dbg(matcher->tbl->dmn,
-                          "Rule not supported on this matcher due to IP related fields\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
-                                      struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                      enum mlx5dr_ipv outer_ipv,
-                                      enum mlx5dr_ipv inner_ipv)
-{
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
-       struct mlx5dr_match_param mask = {};
-       bool allow_empty_match = false;
-       struct mlx5dr_ste_build *sb;
-       bool inner, rx;
-       int idx = 0;
-       int ret, i;
-
-       sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
-       rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
-
-       /* Create a temporary mask to track and clear used mask fields */
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
-               mask.outer = matcher->mask.outer;
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
-               mask.misc = matcher->mask.misc;
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
-               mask.inner = matcher->mask.inner;
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
-               mask.misc2 = matcher->mask.misc2;
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
-               mask.misc3 = matcher->mask.misc3;
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
-               mask.misc4 = matcher->mask.misc4;
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
-               mask.misc5 = matcher->mask.misc5;
-
-       ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
-                                        &matcher->mask, NULL);
-       if (ret)
-               return ret;
-
-       /* Optimize RX pipe by reducing source port match, since
-        * the FDB RX part is connected only to the wire.
-        */
-       if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
-           rx && mask.misc.source_port) {
-               mask.misc.source_port = 0;
-               mask.misc.source_eswitch_owner_vhca_id = 0;
-               allow_empty_match = true;
-       }
-
-       /* Outer */
-       if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
-                                      DR_MATCHER_CRITERIA_MISC |
-                                      DR_MATCHER_CRITERIA_MISC2 |
-                                      DR_MATCHER_CRITERIA_MISC3 |
-                                      DR_MATCHER_CRITERIA_MISC5)) {
-               inner = false;
-
-               if (dr_mask_is_wqe_metadata_set(&mask.misc2))
-                       mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
-                                                        &mask, inner, rx);
-
-               if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
-                       mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
-                       mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
-                   (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
-                    dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
-                       mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
-                                                     &mask, dmn, inner, rx);
-               }
-
-               if (dr_mask_is_smac_set(&mask.outer) &&
-                   dr_mask_is_dmac_set(&mask.outer)) {
-                       mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
-                                                       &mask, inner, rx);
-               }
-
-               if (dr_mask_is_smac_set(&mask.outer))
-                       mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
-                       mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (outer_ipv == DR_RULE_IPV6) {
-                       if (DR_MASK_IS_DST_IP_SET(&mask.outer))
-                               mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
-                                                                &mask, inner, rx);
-
-                       if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
-                               mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
-                                                                &mask, inner, rx);
-
-                       if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
-                               mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
-                                                               &mask, inner, rx);
-               } else {
-                       if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
-                               mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
-                                                                    &mask, inner, rx);
-
-                       if (dr_mask_is_ttl_set(&mask.outer) ||
-                           dr_mask_is_ipv4_ihl_set(&mask.outer))
-                               mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
-                                                                 &mask, inner, rx);
-               }
-
-               if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
-                       mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
-                                                      &mask, inner, rx);
-               else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
-                       mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-                       if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
-                               mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
-                                                                   &mask, &dmn->info.caps,
-                                                                   inner, rx);
-                       if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
-                               mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
-                                                                         &mask, &dmn->info.caps,
-                                                                         inner, rx);
-               } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
-                       if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
-                               mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
-                                                                       &mask, &dmn->info.caps,
-                                                                       inner, rx);
-
-                       if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
-                               mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
-                                                                       &mask, &dmn->info.caps,
-                                                                       inner, rx);
-
-                       if (dr_mask_is_tnl_gtpu(&mask, dmn))
-                               mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
-                                                         &mask, inner, rx);
-               } else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
-                       mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
-                                                       &mask, inner, rx);
-               }
-
-               if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
-                       mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
-                                                    &mask, inner, rx);
-
-               if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
-                       mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
-                                             &mask, inner, rx);
-
-               if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
-                       mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
-                                                          &mask, &dmn->info.caps,
-                                                          inner, rx);
-               else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
-                       mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
-                                                          &mask, &dmn->info.caps,
-                                                          inner, rx);
-
-               if (dr_mask_is_icmp(&mask, dmn))
-                       mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
-                                             &mask, &dmn->info.caps,
-                                             inner, rx);
-
-               if (dr_mask_is_tnl_gre_set(&mask.misc))
-                       mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
-                                                &mask, inner, rx);
-       }
-
-       /* Inner */
-       if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
-                                      DR_MATCHER_CRITERIA_MISC |
-                                      DR_MATCHER_CRITERIA_MISC2 |
-                                      DR_MATCHER_CRITERIA_MISC3)) {
-               inner = true;
-
-               if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
-                       mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (dr_mask_is_smac_set(&mask.inner) &&
-                   dr_mask_is_dmac_set(&mask.inner)) {
-                       mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
-                                                       &mask, inner, rx);
-               }
-
-               if (dr_mask_is_smac_set(&mask.inner))
-                       mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
-                       mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
-                                                   &mask, inner, rx);
-
-               if (inner_ipv == DR_RULE_IPV6) {
-                       if (DR_MASK_IS_DST_IP_SET(&mask.inner))
-                               mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
-                                                                &mask, inner, rx);
-
-                       if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
-                               mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
-                                                                &mask, inner, rx);
-
-                       if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
-                               mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
-                                                               &mask, inner, rx);
-               } else {
-                       if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
-                               mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
-                                                                    &mask, inner, rx);
-
-                       if (dr_mask_is_ttl_set(&mask.inner) ||
-                           dr_mask_is_ipv4_ihl_set(&mask.inner))
-                               mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
-                                                                 &mask, inner, rx);
-               }
-
-               if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
-                       mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
-                                                    &mask, inner, rx);
-
-               if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
-                       mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
-                                             &mask, inner, rx);
-
-               if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
-                       mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
-                                                          &mask, &dmn->info.caps,
-                                                          inner, rx);
-               else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
-                       mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
-                                                          &mask, &dmn->info.caps,
-                                                          inner, rx);
-       }
-
-       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
-               if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
-                       mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
-                                                      &mask, false, rx);
-
-               if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
-                       mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
-                                                      &mask, false, rx);
-       }
-
-       /* Empty matcher, takes all */
-       if ((!idx && allow_empty_match) ||
-           matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
-               mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
-
-       if (idx == 0) {
-               mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
-               return -EINVAL;
-       }
-
-       /* Check that all mask fields were consumed */
-       for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
-               if (((u8 *)&mask)[i] != 0) {
-                       mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
-                       return -EOPNOTSUPP;
-               }
-       }
-
-       nic_matcher->ste_builder = sb;
-       nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
-
-       return 0;
-}
-
-static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
-                                 struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
-                                 struct mlx5dr_matcher_rx_tx *next_nic_matcher,
-                                 struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
-{
-       struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
-       struct mlx5dr_htbl_connect_info info;
-       struct mlx5dr_ste_htbl *prev_htbl;
-       int ret;
-
-       /* Connect end anchor hash table to next_htbl or to the default address */
-       if (next_nic_matcher) {
-               info.type = CONNECT_HIT;
-               info.hit_next_htbl = next_nic_matcher->s_htbl;
-       } else {
-               info.type = CONNECT_MISS;
-               info.miss_icm_addr = nic_tbl->default_icm_addr;
-       }
-       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
-                                               curr_nic_matcher->e_anchor,
-                                               &info, info.type == CONNECT_HIT);
-       if (ret)
-               return ret;
-
-       /* Connect start hash table to end anchor */
-       info.type = CONNECT_MISS;
-       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
-       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
-                                               curr_nic_matcher->s_htbl,
-                                               &info, false);
-       if (ret)
-               return ret;
-
-       /* Connect previous hash table to matcher start hash table */
-       if (prev_nic_matcher)
-               prev_htbl = prev_nic_matcher->e_anchor;
-       else
-               prev_htbl = nic_tbl->s_anchor;
-
-       info.type = CONNECT_HIT;
-       info.hit_next_htbl = curr_nic_matcher->s_htbl;
-       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
-                                               &info, true);
-       if (ret)
-               return ret;
-
-       /* Update the pointing ste and next hash table */
-       curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
-       prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
-
-       if (next_nic_matcher) {
-               next_nic_matcher->s_htbl->pointing_ste =
-                       curr_nic_matcher->e_anchor->chunk->ste_arr;
-               curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
-                       next_nic_matcher->s_htbl;
-       }
-
-       return 0;
-}
-
-int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
-                                 struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
-       struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
-       bool first = true;
-       int ret;
-
-       /* If the nic matcher is already on its parent nic table list,
-        * then it is already connected to the chain of nic matchers.
-        */
-       if (!list_empty(&nic_matcher->list_node))
-               return 0;
-
-       next_nic_matcher = NULL;
-       list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
-               if (tmp_nic_matcher->prio >= nic_matcher->prio) {
-                       next_nic_matcher = tmp_nic_matcher;
-                       break;
-               }
-               first = false;
-       }
-
-       prev_nic_matcher = NULL;
-       if (next_nic_matcher && !first)
-               prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
-       else if (!first)
-               prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
-                                                  struct mlx5dr_matcher_rx_tx,
-                                                  list_node);
-
-       ret = dr_nic_matcher_connect(dmn, nic_matcher,
-                                    next_nic_matcher, prev_nic_matcher);
-       if (ret)
-               return ret;
-
-       if (prev_nic_matcher)
-               list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
-       else if (next_nic_matcher)
-               list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
-       else
-               list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
-
-       return ret;
-}
-
-static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       mlx5dr_htbl_put(nic_matcher->s_htbl);
-       mlx5dr_htbl_put(nic_matcher->e_anchor);
-}
-
-static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
-{
-       dr_matcher_uninit_nic(&matcher->rx);
-       dr_matcher_uninit_nic(&matcher->tx);
-}
-
-static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-
-       switch (dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               dr_matcher_uninit_nic(&matcher->rx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               dr_matcher_uninit_nic(&matcher->tx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               dr_matcher_uninit_fdb(matcher);
-               break;
-       default:
-               WARN_ON(true);
-               break;
-       }
-}
-
-static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
-                                          struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-
-       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
-       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
-       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
-       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
-
-       if (!nic_matcher->ste_builder) {
-               mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
-                              struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       int ret;
-
-       nic_matcher->prio = matcher->prio;
-       INIT_LIST_HEAD(&nic_matcher->list_node);
-
-       ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
-       if (ret)
-               return ret;
-
-       nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
-                                                     DR_CHUNK_SIZE_1,
-                                                     MLX5DR_STE_LU_TYPE_DONT_CARE,
-                                                     0);
-       if (!nic_matcher->e_anchor)
-               return -ENOMEM;
-
-       nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
-                                                   DR_CHUNK_SIZE_1,
-                                                   nic_matcher->ste_builder[0].lu_type,
-                                                   nic_matcher->ste_builder[0].byte_mask);
-       if (!nic_matcher->s_htbl) {
-               ret = -ENOMEM;
-               goto free_e_htbl;
-       }
-
-       /* make sure the tables exist while empty */
-       mlx5dr_htbl_get(nic_matcher->s_htbl);
-       mlx5dr_htbl_get(nic_matcher->e_anchor);
-
-       return 0;
-
-free_e_htbl:
-       mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
-       return ret;
-}
-
-static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
-{
-       int ret;
-
-       ret = dr_matcher_init_nic(matcher, &matcher->rx);
-       if (ret)
-               return ret;
-
-       ret = dr_matcher_init_nic(matcher, &matcher->tx);
-       if (ret)
-               goto uninit_nic_rx;
-
-       return 0;
-
-uninit_nic_rx:
-       dr_matcher_uninit_nic(&matcher->rx);
-       return ret;
-}
-
-static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
-                                struct mlx5dr_match_parameters *mask)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_match_parameters consumed_mask;
-       int i, ret = 0;
-
-       if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
-               mlx5dr_err(dmn, "Invalid match criteria attribute\n");
-               return -EINVAL;
-       }
-
-       if (mask) {
-               if (mask->match_sz > DR_SZ_MATCH_PARAM) {
-                       mlx5dr_err(dmn, "Invalid match size attribute\n");
-                       return -EINVAL;
-               }
-
-               consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
-               if (!consumed_mask.match_buf)
-                       return -ENOMEM;
-
-               consumed_mask.match_sz = mask->match_sz;
-               memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
-               mlx5dr_ste_copy_param(matcher->match_criteria,
-                                     &matcher->mask, &consumed_mask, true);
-
-               /* Check that all mask data was consumed */
-               for (i = 0; i < consumed_mask.match_sz; i++) {
-                       if (!((u8 *)consumed_mask.match_buf)[i])
-                               continue;
-
-                       mlx5dr_dbg(dmn,
-                                  "Match param mask contains unsupported parameters\n");
-                       ret = -EOPNOTSUPP;
-                       break;
-               }
-
-               kfree(consumed_mask.match_buf);
-       }
-
-       return ret;
-}
-
-static int dr_matcher_init(struct mlx5dr_matcher *matcher,
-                          struct mlx5dr_match_parameters *mask)
-{
-       struct mlx5dr_table *tbl = matcher->tbl;
-       struct mlx5dr_domain *dmn = tbl->dmn;
-       int ret;
-
-       ret = dr_matcher_copy_param(matcher, mask);
-       if (ret)
-               return ret;
-
-       switch (dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               matcher->rx.nic_tbl = &tbl->rx;
-               ret = dr_matcher_init_nic(matcher, &matcher->rx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               matcher->tx.nic_tbl = &tbl->tx;
-               ret = dr_matcher_init_nic(matcher, &matcher->tx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               matcher->rx.nic_tbl = &tbl->rx;
-               matcher->tx.nic_tbl = &tbl->tx;
-               ret = dr_matcher_init_fdb(matcher);
-               break;
-       default:
-               WARN_ON(true);
-               ret = -EINVAL;
-       }
-
-       return ret;
-}
-
-static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
-{
-       mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
-       list_add(&matcher->list_node, &matcher->tbl->matcher_list);
-       mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
-}
-
-static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
-{
-       mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
-       list_del(&matcher->list_node);
-       mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
-}
-
-struct mlx5dr_matcher *
-mlx5dr_matcher_create(struct mlx5dr_table *tbl,
-                     u32 priority,
-                     u8 match_criteria_enable,
-                     struct mlx5dr_match_parameters *mask)
-{
-       struct mlx5dr_matcher *matcher;
-       int ret;
-
-       refcount_inc(&tbl->refcount);
-
-       matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
-       if (!matcher)
-               goto dec_ref;
-
-       matcher->tbl = tbl;
-       matcher->prio = priority;
-       matcher->match_criteria = match_criteria_enable;
-       refcount_set(&matcher->refcount, 1);
-       INIT_LIST_HEAD(&matcher->list_node);
-       INIT_LIST_HEAD(&matcher->dbg_rule_list);
-
-       mlx5dr_domain_lock(tbl->dmn);
-
-       ret = dr_matcher_init(matcher, mask);
-       if (ret)
-               goto free_matcher;
-
-       dr_matcher_add_to_dbg_list(matcher);
-
-       mlx5dr_domain_unlock(tbl->dmn);
-
-       return matcher;
-
-free_matcher:
-       mlx5dr_domain_unlock(tbl->dmn);
-       kfree(matcher);
-dec_ref:
-       refcount_dec(&tbl->refcount);
-       return NULL;
-}
-
-static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
-                                    struct mlx5dr_table_rx_tx *nic_tbl,
-                                    struct mlx5dr_matcher_rx_tx *next_nic_matcher,
-                                    struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
-{
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
-       struct mlx5dr_htbl_connect_info info;
-       struct mlx5dr_ste_htbl *prev_anchor;
-
-       if (prev_nic_matcher)
-               prev_anchor = prev_nic_matcher->e_anchor;
-       else
-               prev_anchor = nic_tbl->s_anchor;
-
-       /* Connect previous anchor hash table to next matcher or to the default address */
-       if (next_nic_matcher) {
-               info.type = CONNECT_HIT;
-               info.hit_next_htbl = next_nic_matcher->s_htbl;
-               next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
-               prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
-       } else {
-               info.type = CONNECT_MISS;
-               info.miss_icm_addr = nic_tbl->default_icm_addr;
-               prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
-       }
-
-       return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
-                                                &info, true);
-}
-
-int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
-                                      struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
-       struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
-       int ret;
-
-       /* If the nic matcher is not on its parent nic table list,
-        * then it is detached - no need to disconnect it.
-        */
-       if (list_empty(&nic_matcher->list_node))
-               return 0;
-
-       if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
-               next_nic_matcher = NULL;
-       else
-               next_nic_matcher = list_next_entry(nic_matcher, list_node);
-
-       if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
-               prev_nic_matcher = NULL;
-       else
-               prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
-
-       ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
-       if (ret)
-               return ret;
-
-       list_del_init(&nic_matcher->list_node);
-       return 0;
-}
-
-int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
-{
-       struct mlx5dr_table *tbl = matcher->tbl;
-
-       if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
-               return -EBUSY;
-
-       mlx5dr_domain_lock(tbl->dmn);
-
-       dr_matcher_remove_from_dbg_list(matcher);
-       dr_matcher_uninit(matcher);
-       refcount_dec(&matcher->tbl->refcount);
-
-       mlx5dr_domain_unlock(tbl->dmn);
-       kfree(matcher);
-
-       return 0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
deleted file mode 100644 (file)
index 8ca534e..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
-
-#include "dr_types.h"
-#include "mlx5_ifc_dr_ste_v1.h"
-
-enum dr_ptrn_modify_hdr_action_id {
-       DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00,
-       DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05,
-       DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06,
-       DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07,
-       DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a,
-};
-
-struct mlx5dr_ptrn_mgr {
-       struct mlx5dr_domain *dmn;
-       struct mlx5dr_icm_pool *ptrn_icm_pool;
-       /* cache for modify_header ptrn */
-       struct list_head ptrn_list;
-       struct mutex modify_hdr_mutex; /* protect the pattern cache */
-};
-
-/* Cache structure and functions */
-static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,
-                                      __be64 cur_hw_actions[],
-                                      size_t num_of_actions,
-                                      __be64 hw_actions[])
-{
-       int i;
-
-       if (cur_num_of_actions != num_of_actions)
-               return false;
-
-       for (i = 0; i < num_of_actions; i++) {
-               u8 action_id =
-                       MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
-
-               if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) {
-                       if (hw_actions[i] != cur_hw_actions[i])
-                               return false;
-               } else {
-                       if ((__force __be32)hw_actions[i] !=
-                           (__force __be32)cur_hw_actions[i])
-                               return false;
-               }
-       }
-
-       return true;
-}
-
-static struct mlx5dr_ptrn_obj *
-dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr,
-                           size_t num_of_actions,
-                           __be64 hw_actions[])
-{
-       struct mlx5dr_ptrn_obj *cached_pattern;
-       struct mlx5dr_ptrn_obj *tmp;
-
-       list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) {
-               if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions,
-                                              (__be64 *)cached_pattern->data,
-                                              num_of_actions,
-                                              hw_actions)) {
-                       /* Put this pattern in the head of the list,
-                        * as we will probably use it more.
-                        */
-                       list_del_init(&cached_pattern->list);
-                       list_add(&cached_pattern->list, &mgr->ptrn_list);
-                       return cached_pattern;
-               }
-       }
-
-       return NULL;
-}
-
-static struct mlx5dr_ptrn_obj *
-dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
-                     u16 num_of_actions, u8 *data)
-{
-       struct mlx5dr_ptrn_obj *pattern;
-       struct mlx5dr_icm_chunk *chunk;
-       u32 chunk_size;
-       u32 index;
-
-       chunk_size = ilog2(roundup_pow_of_two(num_of_actions));
-       /* HW modify action index granularity is at least 64B */
-       chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
-
-       chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size);
-       if (!chunk)
-               return NULL;
-
-       index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
-                mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) /
-               DR_ACTION_CACHE_LINE_SIZE;
-
-       pattern = kzalloc(sizeof(*pattern), GFP_KERNEL);
-       if (!pattern)
-               goto free_chunk;
-
-       pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE *
-                               sizeof(*pattern->data), GFP_KERNEL);
-       if (!pattern->data)
-               goto free_pattern;
-
-       memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE);
-       pattern->chunk = chunk;
-       pattern->index = index;
-       pattern->num_of_actions = num_of_actions;
-
-       list_add(&pattern->list, &mgr->ptrn_list);
-       refcount_set(&pattern->refcount, 1);
-
-       return pattern;
-
-free_pattern:
-       kfree(pattern);
-free_chunk:
-       mlx5dr_icm_free_chunk(chunk);
-       return NULL;
-}
-
-static void
-dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern)
-{
-       list_del(&pattern->list);
-       mlx5dr_icm_free_chunk(pattern->chunk);
-       kfree(pattern->data);
-       kfree(pattern);
-}
-
-struct mlx5dr_ptrn_obj *
-mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
-                             u16 num_of_actions,
-                             u8 *data)
-{
-       struct mlx5dr_ptrn_obj *pattern;
-       u64 *hw_actions;
-       u8 action_id;
-       int i;
-
-       mutex_lock(&mgr->modify_hdr_mutex);
-       pattern = dr_ptrn_find_cached_pattern(mgr,
-                                             num_of_actions,
-                                             (__be64 *)data);
-       if (!pattern) {
-               /* Alloc and add new pattern to cache */
-               pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data);
-               if (!pattern)
-                       goto out_unlock;
-
-               hw_actions = (u64 *)pattern->data;
-               /* Here we mask the pattern data to create a valid pattern
-                * since we do an OR operation between the arg and pattern
-                */
-               for (i = 0; i < num_of_actions; i++) {
-                       action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
-
-                       if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET ||
-                           action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD ||
-                           action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE)
-                               MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0);
-               }
-
-               if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk,
-                                                num_of_actions, pattern->data)) {
-                       refcount_dec(&pattern->refcount);
-                       goto free_pattern;
-               }
-       } else {
-               refcount_inc(&pattern->refcount);
-       }
-
-       mutex_unlock(&mgr->modify_hdr_mutex);
-
-       return pattern;
-
-free_pattern:
-       dr_ptrn_free_pattern(pattern);
-out_unlock:
-       mutex_unlock(&mgr->modify_hdr_mutex);
-       return NULL;
-}
-
-void
-mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
-                             struct mlx5dr_ptrn_obj *pattern)
-{
-       mutex_lock(&mgr->modify_hdr_mutex);
-
-       if (refcount_dec_and_test(&pattern->refcount))
-               dr_ptrn_free_pattern(pattern);
-
-       mutex_unlock(&mgr->modify_hdr_mutex);
-}
-
-struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_ptrn_mgr *mgr;
-
-       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
-               return NULL;
-
-       mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-       if (!mgr)
-               return NULL;
-
-       mgr->dmn = dmn;
-       mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN);
-       if (!mgr->ptrn_icm_pool) {
-               mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n");
-               goto free_mgr;
-       }
-
-       INIT_LIST_HEAD(&mgr->ptrn_list);
-       mutex_init(&mgr->modify_hdr_mutex);
-
-       return mgr;
-
-free_mgr:
-       kfree(mgr);
-       return NULL;
-}
-
-void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
-{
-       struct mlx5dr_ptrn_obj *pattern;
-       struct mlx5dr_ptrn_obj *tmp;
-
-       if (!mgr)
-               return;
-
-       WARN_ON(!list_empty(&mgr->ptrn_list));
-
-       list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) {
-               list_del(&pattern->list);
-               kfree(pattern->data);
-               kfree(pattern);
-       }
-
-       mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
-       mutex_destroy(&mgr->modify_hdr_mutex);
-       kfree(mgr);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
deleted file mode 100644 (file)
index d1db04b..0000000
+++ /dev/null
@@ -1,1377 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include "dr_types.h"
-
-#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
-/* don't try to optimize STE allocation if the stack is too constaraining */
-#define DR_RULE_MAX_STES_OPTIMIZED 0
-#else
-#define DR_RULE_MAX_STES_OPTIMIZED 2
-#endif
-#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
-
-static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
-                                      enum mlx5dr_domain_nic_type nic_type,
-                                      struct mlx5dr_ste *new_last_ste,
-                                      struct list_head *miss_list,
-                                      struct list_head *send_list)
-{
-       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
-       struct mlx5dr_ste_send_info *ste_info_last;
-       struct mlx5dr_ste *last_ste;
-
-       /* The new entry will be inserted after the last */
-       last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
-       WARN_ON(!last_ste);
-
-       ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
-       if (!ste_info_last)
-               return -ENOMEM;
-
-       mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
-                                mlx5dr_ste_get_icm_addr(new_last_ste));
-       list_add_tail(&new_last_ste->miss_list_node, miss_list);
-
-       mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
-                                                 0, mlx5dr_ste_get_hw_ste(last_ste),
-                                                 ste_info_last, send_list, true);
-
-       return 0;
-}
-
-static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
-                                          struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                          u8 *hw_ste)
-{
-       struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
-       u64 icm_addr;
-
-       if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
-               return;
-
-       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
-       mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
-}
-
-static struct mlx5dr_ste *
-dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
-                             struct mlx5dr_matcher_rx_tx *nic_matcher,
-                             u8 *hw_ste)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_htbl *new_htbl;
-       struct mlx5dr_ste *ste;
-
-       /* Create new table for miss entry */
-       new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
-                                        DR_CHUNK_SIZE_1,
-                                        MLX5DR_STE_LU_TYPE_DONT_CARE,
-                                        0);
-       if (!new_htbl) {
-               mlx5dr_dbg(dmn, "Failed allocating collision table\n");
-               return NULL;
-       }
-
-       /* One and only entry, never grows */
-       ste = new_htbl->chunk->ste_arr;
-       dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
-       mlx5dr_htbl_get(new_htbl);
-
-       return ste;
-}
-
-static struct mlx5dr_ste *
-dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
-                              struct mlx5dr_matcher_rx_tx *nic_matcher,
-                              u8 *hw_ste,
-                              struct mlx5dr_ste *orig_ste)
-{
-       struct mlx5dr_ste *ste;
-
-       ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
-       if (!ste) {
-               mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
-               return NULL;
-       }
-
-       ste->ste_chain_location = orig_ste->ste_chain_location;
-       ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
-
-       /* In collision entry, all members share the same miss_list_head */
-       ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
-
-       /* Next table */
-       if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
-                                       DR_CHUNK_SIZE_1)) {
-               mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
-               goto free_tbl;
-       }
-
-       return ste;
-
-free_tbl:
-       mlx5dr_ste_free(ste, matcher, nic_matcher);
-       return NULL;
-}
-
-static int
-dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
-                                     struct mlx5dr_domain *dmn)
-{
-       int ret;
-
-       list_del(&ste_info->send_list);
-
-       /* Copy data to ste, only reduced size or control, the last 16B (mask)
-        * is already written to the hw.
-        */
-       if (ste_info->size == DR_STE_SIZE_CTRL)
-               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
-                      ste_info->data, DR_STE_SIZE_CTRL);
-       else
-               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
-                      ste_info->data, DR_STE_SIZE_REDUCED);
-
-       ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
-                                      ste_info->size, ste_info->offset);
-       if (ret)
-               goto out;
-
-out:
-       mlx5dr_send_info_free(ste_info);
-       return ret;
-}
-
-static int dr_rule_send_update_list(struct list_head *send_ste_list,
-                                   struct mlx5dr_domain *dmn,
-                                   bool is_reverse)
-{
-       struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
-       int ret;
-
-       if (is_reverse) {
-               list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
-                                                send_ste_list, send_list) {
-                       ret = dr_rule_handle_one_ste_in_update_list(ste_info,
-                                                                   dmn);
-                       if (ret)
-                               return ret;
-               }
-       } else {
-               list_for_each_entry_safe(ste_info, tmp_ste_info,
-                                        send_ste_list, send_list) {
-                       ret = dr_rule_handle_one_ste_in_update_list(ste_info,
-                                                                   dmn);
-                       if (ret)
-                               return ret;
-               }
-       }
-
-       return 0;
-}
-
-static struct mlx5dr_ste *
-dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
-{
-       struct mlx5dr_ste *ste;
-
-       if (list_empty(miss_list))
-               return NULL;
-
-       /* Check if hw_ste is present in the list */
-       list_for_each_entry(ste, miss_list, miss_list_node) {
-               if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
-                       return ste;
-       }
-
-       return NULL;
-}
-
-static struct mlx5dr_ste *
-dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
-                               struct mlx5dr_matcher_rx_tx *nic_matcher,
-                               struct list_head *update_list,
-                               struct mlx5dr_ste *col_ste,
-                               u8 *hw_ste)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste *new_ste;
-       int ret;
-
-       new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
-       if (!new_ste)
-               return NULL;
-
-       /* Update collision pointing STE */
-       new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
-
-       /* In collision entry, all members share the same miss_list_head */
-       new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
-
-       /* Update the previous from the list */
-       ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
-                                         new_ste, mlx5dr_ste_get_miss_list(col_ste),
-                                         update_list);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Failed update dup entry\n");
-               goto err_exit;
-       }
-
-       return new_ste;
-
-err_exit:
-       mlx5dr_ste_free(new_ste, matcher, nic_matcher);
-       return NULL;
-}
-
-static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
-                                        struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                        struct mlx5dr_ste *cur_ste,
-                                        struct mlx5dr_ste *new_ste)
-{
-       new_ste->next_htbl = cur_ste->next_htbl;
-       new_ste->ste_chain_location = cur_ste->ste_chain_location;
-
-       if (new_ste->next_htbl)
-               new_ste->next_htbl->pointing_ste = new_ste;
-
-       /* We need to copy the refcount since this ste
-        * may have been traversed several times
-        */
-       new_ste->refcount = cur_ste->refcount;
-
-       /* Link old STEs rule to the new ste */
-       mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
-}
-
-static struct mlx5dr_ste *
-dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
-                       struct mlx5dr_matcher_rx_tx *nic_matcher,
-                       struct mlx5dr_ste *cur_ste,
-                       struct mlx5dr_ste_htbl *new_htbl,
-                       struct list_head *update_list)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_send_info *ste_info;
-       bool use_update_list = false;
-       u8 hw_ste[DR_STE_SIZE] = {};
-       struct mlx5dr_ste *new_ste;
-       int new_idx;
-       u8 sb_idx;
-
-       /* Copy STE mask from the matcher */
-       sb_idx = cur_ste->ste_chain_location - 1;
-       mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
-
-       /* Copy STE control and tag */
-       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
-       dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
-
-       new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
-       new_ste = &new_htbl->chunk->ste_arr[new_idx];
-
-       if (mlx5dr_ste_is_not_used(new_ste)) {
-               mlx5dr_htbl_get(new_htbl);
-               list_add_tail(&new_ste->miss_list_node,
-                             mlx5dr_ste_get_miss_list(new_ste));
-       } else {
-               new_ste = dr_rule_rehash_handle_collision(matcher,
-                                                         nic_matcher,
-                                                         update_list,
-                                                         new_ste,
-                                                         hw_ste);
-               if (!new_ste) {
-                       mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
-                                  new_idx);
-                       return NULL;
-               }
-               new_htbl->ctrl.num_of_collisions++;
-               use_update_list = true;
-       }
-
-       memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
-
-       new_htbl->ctrl.num_of_valid_entries++;
-
-       if (use_update_list) {
-               ste_info = mlx5dr_send_info_alloc(dmn,
-                                                 nic_matcher->nic_tbl->nic_dmn->type);
-               if (!ste_info)
-                       goto err_exit;
-
-               mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
-                                                         hw_ste, ste_info,
-                                                         update_list, true);
-       }
-
-       dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
-
-       return new_ste;
-
-err_exit:
-       mlx5dr_ste_free(new_ste, matcher, nic_matcher);
-       return NULL;
-}
-
-static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
-                                        struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                        struct list_head *cur_miss_list,
-                                        struct mlx5dr_ste_htbl *new_htbl,
-                                        struct list_head *update_list)
-{
-       struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
-
-       if (list_empty(cur_miss_list))
-               return 0;
-
-       list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
-               new_ste = dr_rule_rehash_copy_ste(matcher,
-                                                 nic_matcher,
-                                                 cur_ste,
-                                                 new_htbl,
-                                                 update_list);
-               if (!new_ste)
-                       goto err_insert;
-
-               list_del(&cur_ste->miss_list_node);
-               mlx5dr_htbl_put(cur_ste->htbl);
-       }
-       return 0;
-
-err_insert:
-       mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
-       WARN_ON(true);
-       return -EINVAL;
-}
-
-static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
-                                   struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                   struct mlx5dr_ste_htbl *cur_htbl,
-                                   struct mlx5dr_ste_htbl *new_htbl,
-                                   struct list_head *update_list)
-{
-       struct mlx5dr_ste *cur_ste;
-       int cur_entries;
-       int err = 0;
-       int i;
-
-       cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
-
-       if (cur_entries < 1) {
-               mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
-               return -EINVAL;
-       }
-
-       for (i = 0; i < cur_entries; i++) {
-               cur_ste = &cur_htbl->chunk->ste_arr[i];
-               if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
-                       continue;
-
-               err = dr_rule_rehash_copy_miss_list(matcher,
-                                                   nic_matcher,
-                                                   mlx5dr_ste_get_miss_list(cur_ste),
-                                                   new_htbl,
-                                                   update_list);
-               if (err)
-                       goto clean_copy;
-
-               /* In order to decrease the number of allocated ste_send_info
-                * structs, send the current table row now.
-                */
-               err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
-               if (err) {
-                       mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
-                       goto clean_copy;
-               }
-       }
-
-clean_copy:
-       return err;
-}
-
-static struct mlx5dr_ste_htbl *
-dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
-                   struct mlx5dr_rule_rx_tx *nic_rule,
-                   struct mlx5dr_ste_htbl *cur_htbl,
-                   u8 ste_location,
-                   struct list_head *update_list,
-                   enum mlx5dr_icm_chunk_size new_size)
-{
-       struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
-       struct mlx5dr_matcher *matcher = rule->matcher;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_matcher_rx_tx *nic_matcher;
-       struct mlx5dr_ste_send_info *ste_info;
-       struct mlx5dr_htbl_connect_info info;
-       struct mlx5dr_domain_rx_tx *nic_dmn;
-       u8 formatted_ste[DR_STE_SIZE] = {};
-       LIST_HEAD(rehash_table_send_list);
-       struct mlx5dr_ste *ste_to_update;
-       struct mlx5dr_ste_htbl *new_htbl;
-       int err;
-
-       nic_matcher = nic_rule->nic_matcher;
-       nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-
-       ste_info = mlx5dr_send_info_alloc(dmn,
-                                         nic_matcher->nic_tbl->nic_dmn->type);
-       if (!ste_info)
-               return NULL;
-
-       new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
-                                        new_size,
-                                        cur_htbl->lu_type,
-                                        cur_htbl->byte_mask);
-       if (!new_htbl) {
-               mlx5dr_err(dmn, "Failed to allocate new hash table\n");
-               goto free_ste_info;
-       }
-
-       /* Write new table to HW */
-       info.type = CONNECT_MISS;
-       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
-       mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
-                                    dmn->info.caps.gvmi,
-                                    nic_dmn->type,
-                                    new_htbl,
-                                    formatted_ste,
-                                    &info);
-
-       new_htbl->pointing_ste = cur_htbl->pointing_ste;
-       new_htbl->pointing_ste->next_htbl = new_htbl;
-       err = dr_rule_rehash_copy_htbl(matcher,
-                                      nic_matcher,
-                                      cur_htbl,
-                                      new_htbl,
-                                      &rehash_table_send_list);
-       if (err)
-               goto free_new_htbl;
-
-       if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
-                                     nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
-               mlx5dr_err(dmn, "Failed writing table to HW\n");
-               goto free_new_htbl;
-       }
-
-       /* Writing to the hw is done in regular order of rehash_table_send_list,
-        * in order to have the origin data written before the miss address of
-        * collision entries, if exists.
-        */
-       if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
-               mlx5dr_err(dmn, "Failed updating table to HW\n");
-               goto free_ste_list;
-       }
-
-       /* Connect previous hash table to current */
-       if (ste_location == 1) {
-               /* The previous table is an anchor, anchors size is always one STE */
-               struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
-
-               /* On matcher s_anchor we keep an extra refcount */
-               mlx5dr_htbl_get(new_htbl);
-               mlx5dr_htbl_put(cur_htbl);
-
-               nic_matcher->s_htbl = new_htbl;
-
-               /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
-                * (48B len) which works only on first 32B
-                */
-               mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
-                                       prev_htbl->chunk->hw_ste_arr,
-                                       mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
-                                       mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
-
-               ste_to_update = &prev_htbl->chunk->ste_arr[0];
-       } else {
-               mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
-                                                    mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
-                                                    new_htbl);
-               ste_to_update = cur_htbl->pointing_ste;
-       }
-
-       mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
-                                                 0, mlx5dr_ste_get_hw_ste(ste_to_update),
-                                                 ste_info, update_list, false);
-
-       return new_htbl;
-
-free_ste_list:
-       /* Clean all ste_info's from the new table */
-       list_for_each_entry_safe(del_ste_info, tmp_ste_info,
-                                &rehash_table_send_list, send_list) {
-               list_del(&del_ste_info->send_list);
-               mlx5dr_send_info_free(del_ste_info);
-       }
-
-free_new_htbl:
-       mlx5dr_ste_htbl_free(new_htbl);
-free_ste_info:
-       mlx5dr_send_info_free(ste_info);
-       mlx5dr_info(dmn, "Failed creating rehash table\n");
-       return NULL;
-}
-
-static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
-                                             struct mlx5dr_rule_rx_tx *nic_rule,
-                                             struct mlx5dr_ste_htbl *cur_htbl,
-                                             u8 ste_location,
-                                             struct list_head *update_list)
-{
-       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
-       enum mlx5dr_icm_chunk_size new_size;
-
-       new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
-       new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
-
-       if (new_size == cur_htbl->chunk->size)
-               return NULL; /* Skip rehash, we already at the max size */
-
-       return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
-                                  update_list, new_size);
-}
-
-static struct mlx5dr_ste *
-dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
-                        struct mlx5dr_matcher_rx_tx *nic_matcher,
-                        struct mlx5dr_ste *ste,
-                        u8 *hw_ste,
-                        struct list_head *miss_list,
-                        struct list_head *send_list)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_send_info *ste_info;
-       struct mlx5dr_ste *new_ste;
-
-       ste_info = mlx5dr_send_info_alloc(dmn,
-                                         nic_matcher->nic_tbl->nic_dmn->type);
-       if (!ste_info)
-               return NULL;
-
-       new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
-       if (!new_ste)
-               goto free_send_info;
-
-       if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
-                                       new_ste, miss_list, send_list)) {
-               mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
-               goto err_exit;
-       }
-
-       mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
-                                                 ste_info, send_list, false);
-
-       ste->htbl->ctrl.num_of_collisions++;
-       ste->htbl->ctrl.num_of_valid_entries++;
-
-       return new_ste;
-
-err_exit:
-       mlx5dr_ste_free(new_ste, matcher, nic_matcher);
-free_send_info:
-       mlx5dr_send_info_free(ste_info);
-       return NULL;
-}
-
-static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
-{
-       struct mlx5dr_rule_action_member *action_mem;
-       struct mlx5dr_rule_action_member *tmp;
-
-       list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
-               list_del(&action_mem->list);
-               refcount_dec(&action_mem->action->refcount);
-               kvfree(action_mem);
-       }
-}
-
-static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
-                                     size_t num_actions,
-                                     struct mlx5dr_action *actions[])
-{
-       struct mlx5dr_rule_action_member *action_mem;
-       int i;
-
-       for (i = 0; i < num_actions; i++) {
-               action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
-               if (!action_mem)
-                       goto free_action_members;
-
-               action_mem->action = actions[i];
-               INIT_LIST_HEAD(&action_mem->list);
-               list_add_tail(&action_mem->list, &rule->rule_actions_list);
-               refcount_inc(&action_mem->action->refcount);
-       }
-
-       return 0;
-
-free_action_members:
-       dr_rule_remove_action_members(rule);
-       return -ENOMEM;
-}
-
-void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
-                                struct mlx5dr_ste *ste,
-                                bool force)
-{
-       /* Update rule member is usually done for the last STE or during rule
-        * creation to recover from mid-creation failure (for this peruse the
-        * force flag is used)
-        */
-       if (ste->next_htbl && !force)
-               return;
-
-       /* Update is required since each rule keeps track of its last STE */
-       ste->rule_rx_tx = nic_rule;
-       nic_rule->last_rule_ste = ste;
-}
-
-static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
-{
-       struct mlx5dr_ste *first_ste;
-
-       first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
-                                    struct mlx5dr_ste, miss_list_node);
-
-       return first_ste->htbl->pointing_ste;
-}
-
-int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
-                                        struct mlx5dr_ste *curr_ste,
-                                        int *num_of_stes)
-{
-       bool first = false;
-
-       *num_of_stes = 0;
-
-       if (!curr_ste)
-               return -ENOENT;
-
-       /* Iterate from last to first */
-       while (!first) {
-               first = curr_ste->ste_chain_location == 1;
-               ste_arr[*num_of_stes] = curr_ste;
-               *num_of_stes += 1;
-               curr_ste = dr_rule_get_pointed_ste(curr_ste);
-       }
-
-       return 0;
-}
-
-static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
-                                      struct mlx5dr_rule_rx_tx *nic_rule)
-{
-       struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
-       struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
-       int i;
-
-       if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
-               return;
-
-       while (i--)
-               mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
-}
-
-static u16 dr_get_bits_per_mask(u16 byte_mask)
-{
-       u16 bits = 0;
-
-       while (byte_mask) {
-               byte_mask = byte_mask & (byte_mask - 1);
-               bits++;
-       }
-
-       return bits;
-}
-
-static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
-                                     struct mlx5dr_domain *dmn,
-                                     struct mlx5dr_domain_rx_tx *nic_dmn)
-{
-       struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
-       int threshold;
-
-       if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
-               return false;
-
-       if (!mlx5dr_ste_htbl_may_grow(htbl))
-               return false;
-
-       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
-               return false;
-
-       threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
-       if (ctrl->num_of_collisions >= threshold &&
-           (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
-               return true;
-
-       return false;
-}
-
-static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
-                                     struct mlx5dr_rule_rx_tx *nic_rule,
-                                     struct list_head *send_ste_list,
-                                     struct mlx5dr_ste *last_ste,
-                                     u8 *hw_ste_arr,
-                                     u32 new_hw_ste_arr_sz)
-{
-       struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
-       struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
-       u8 num_of_builders = nic_matcher->num_of_builders;
-       struct mlx5dr_matcher *matcher = rule->matcher;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       u8 *curr_hw_ste, *prev_hw_ste;
-       struct mlx5dr_ste *action_ste;
-       int i, k;
-
-       /* Two cases:
-        * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
-        * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
-        *    to support the action.
-        */
-
-       for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
-               curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
-               prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
-               action_ste = dr_rule_create_collision_htbl(matcher,
-                                                          nic_matcher,
-                                                          curr_hw_ste);
-               if (!action_ste)
-                       return -ENOMEM;
-
-               mlx5dr_ste_get(action_ste);
-
-               action_ste->htbl->pointing_ste = last_ste;
-               last_ste->next_htbl = action_ste->htbl;
-               last_ste = action_ste;
-
-               /* While free ste we go over the miss list, so add this ste to the list */
-               list_add_tail(&action_ste->miss_list_node,
-                             mlx5dr_ste_get_miss_list(action_ste));
-
-               ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
-                                                        nic_matcher->nic_tbl->nic_dmn->type);
-               if (!ste_info_arr[k])
-                       goto err_exit;
-
-               /* Point current ste to the new action */
-               mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
-                                                    prev_hw_ste,
-                                                    action_ste->htbl);
-
-               mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
-
-               mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
-                                                         curr_hw_ste,
-                                                         ste_info_arr[k],
-                                                         send_ste_list, false);
-       }
-
-       last_ste->next_htbl = NULL;
-
-       return 0;
-
-err_exit:
-       mlx5dr_ste_put(action_ste, matcher, nic_matcher);
-       return -ENOMEM;
-}
-
-static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
-                                     struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                     struct mlx5dr_ste_htbl *cur_htbl,
-                                     struct mlx5dr_ste *ste,
-                                     u8 ste_location,
-                                     u8 *hw_ste,
-                                     struct list_head *miss_list,
-                                     struct list_head *send_list)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_send_info *ste_info;
-
-       /* Take ref on table, only on first time this ste is used */
-       mlx5dr_htbl_get(cur_htbl);
-
-       /* new entry -> new branch */
-       list_add_tail(&ste->miss_list_node, miss_list);
-
-       dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
-
-       ste->ste_chain_location = ste_location;
-
-       ste_info = mlx5dr_send_info_alloc(dmn,
-                                         nic_matcher->nic_tbl->nic_dmn->type);
-       if (!ste_info)
-               goto clean_ste_setting;
-
-       if (mlx5dr_ste_create_next_htbl(matcher,
-                                       nic_matcher,
-                                       ste,
-                                       hw_ste,
-                                       DR_CHUNK_SIZE_1)) {
-               mlx5dr_dbg(dmn, "Failed allocating table\n");
-               goto clean_ste_info;
-       }
-
-       cur_htbl->ctrl.num_of_valid_entries++;
-
-       mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
-                                                 ste_info, send_list, false);
-
-       return 0;
-
-clean_ste_info:
-       mlx5dr_send_info_free(ste_info);
-clean_ste_setting:
-       list_del_init(&ste->miss_list_node);
-       mlx5dr_htbl_put(cur_htbl);
-
-       return -ENOMEM;
-}
-
-static struct mlx5dr_ste *
-dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
-                         struct mlx5dr_rule_rx_tx *nic_rule,
-                         struct list_head *send_ste_list,
-                         struct mlx5dr_ste_htbl *cur_htbl,
-                         u8 *hw_ste,
-                         u8 ste_location,
-                         struct mlx5dr_ste_htbl **put_htbl)
-{
-       struct mlx5dr_matcher *matcher = rule->matcher;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_matcher_rx_tx *nic_matcher;
-       struct mlx5dr_domain_rx_tx *nic_dmn;
-       struct mlx5dr_ste_htbl *new_htbl;
-       struct mlx5dr_ste *matched_ste;
-       struct list_head *miss_list;
-       bool skip_rehash = false;
-       struct mlx5dr_ste *ste;
-       int index;
-
-       nic_matcher = nic_rule->nic_matcher;
-       nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-
-again:
-       index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
-       miss_list = &cur_htbl->chunk->miss_list[index];
-       ste = &cur_htbl->chunk->ste_arr[index];
-
-       if (mlx5dr_ste_is_not_used(ste)) {
-               if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
-                                              ste, ste_location,
-                                              hw_ste, miss_list,
-                                              send_ste_list))
-                       return NULL;
-       } else {
-               /* Hash table index in use, check if this ste is in the miss list */
-               matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
-               if (matched_ste) {
-                       /* If it is last STE in the chain, and has the same tag
-                        * it means that all the previous stes are the same,
-                        * if so, this rule is duplicated.
-                        */
-                       if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
-                               return matched_ste;
-
-                       mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
-               }
-
-               if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
-                       /* Hash table index in use, try to resize of the hash */
-                       skip_rehash = true;
-
-                       /* Hold the table till we update.
-                        * Release in dr_rule_create_rule()
-                        */
-                       *put_htbl = cur_htbl;
-                       mlx5dr_htbl_get(cur_htbl);
-
-                       new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
-                                                 ste_location, send_ste_list);
-                       if (!new_htbl) {
-                               mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
-                                          cur_htbl->chunk->size);
-                               mlx5dr_htbl_put(cur_htbl);
-                       } else {
-                               cur_htbl = new_htbl;
-                       }
-                       goto again;
-               } else {
-                       /* Hash table index in use, add another collision (miss) */
-                       ste = dr_rule_handle_collision(matcher,
-                                                      nic_matcher,
-                                                      ste,
-                                                      hw_ste,
-                                                      miss_list,
-                                                      send_ste_list);
-                       if (!ste) {
-                               mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
-                                          index);
-                               return NULL;
-                       }
-               }
-       }
-       return ste;
-}
-
-static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
-                                     u32 s_idx, u32 e_idx)
-{
-       u32 i;
-
-       for (i = s_idx; i < e_idx; i++) {
-               if (value[i] & ~mask[i]) {
-                       pr_info("Rule parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-       return true;
-}
-
-static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
-                          struct mlx5dr_match_parameters *value,
-                          struct mlx5dr_match_param *param)
-{
-       u8 match_criteria = matcher->match_criteria;
-       size_t value_size = value->match_sz;
-       u8 *mask_p = (u8 *)&matcher->mask;
-       u8 *param_p = (u8 *)param;
-       u32 s_idx, e_idx;
-
-       if (!value_size ||
-           (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
-               mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
-               return false;
-       }
-
-       mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
-               s_idx = offsetof(struct mlx5dr_match_param, outer);
-               e_idx = min(s_idx + sizeof(param->outer), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
-               s_idx = offsetof(struct mlx5dr_match_param, misc);
-               e_idx = min(s_idx + sizeof(param->misc), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-
-       if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
-               s_idx = offsetof(struct mlx5dr_match_param, inner);
-               e_idx = min(s_idx + sizeof(param->inner), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
-               s_idx = offsetof(struct mlx5dr_match_param, misc2);
-               e_idx = min(s_idx + sizeof(param->misc2), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
-               s_idx = offsetof(struct mlx5dr_match_param, misc3);
-               e_idx = min(s_idx + sizeof(param->misc3), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
-               s_idx = offsetof(struct mlx5dr_match_param, misc4);
-               e_idx = min(s_idx + sizeof(param->misc4), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn,
-                                  "Rule misc4 parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
-               s_idx = offsetof(struct mlx5dr_match_param, misc5);
-               e_idx = min(s_idx + sizeof(param->misc5), value_size);
-
-               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
-                       mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
-                       return false;
-               }
-       }
-       return true;
-}
-
-static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
-                                   struct mlx5dr_rule_rx_tx *nic_rule)
-{
-       /* Check if this nic rule was actually created, or was it skipped
-        * and only the other type of the RX/TX nic rule was created.
-        */
-       if (!nic_rule->last_rule_ste)
-               return 0;
-
-       mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
-       dr_rule_clean_rule_members(rule, nic_rule);
-
-       nic_rule->nic_matcher->rules--;
-       if (!nic_rule->nic_matcher->rules)
-               mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
-                                                  nic_rule->nic_matcher);
-
-       mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
-
-       return 0;
-}
-
-static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
-{
-       dr_rule_destroy_rule_nic(rule, &rule->rx);
-       dr_rule_destroy_rule_nic(rule, &rule->tx);
-       return 0;
-}
-
-static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
-{
-       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
-
-       mlx5dr_dbg_rule_del(rule);
-
-       switch (dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               dr_rule_destroy_rule_nic(rule, &rule->rx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               dr_rule_destroy_rule_nic(rule, &rule->tx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               dr_rule_destroy_rule_fdb(rule);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       dr_rule_remove_action_members(rule);
-       kfree(rule);
-       return 0;
-}
-
-static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
-{
-       if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
-               return DR_RULE_IPV6;
-
-       return DR_RULE_IPV4;
-}
-
-static bool dr_rule_skip(enum mlx5dr_domain_type domain,
-                        enum mlx5dr_domain_nic_type nic_type,
-                        struct mlx5dr_match_param *mask,
-                        struct mlx5dr_match_param *value,
-                        u32 flow_source)
-{
-       bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
-
-       if (domain != MLX5DR_DOMAIN_TYPE_FDB)
-               return false;
-
-       if (mask->misc.source_port) {
-               if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
-                       return true;
-
-               if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
-                       return true;
-       }
-
-       if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
-               return true;
-
-       if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
-               return true;
-
-       return false;
-}
-
-static int
-dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
-                       struct mlx5dr_rule_rx_tx *nic_rule,
-                       struct mlx5dr_match_param *param,
-                       size_t num_actions,
-                       struct mlx5dr_action *actions[])
-{
-       u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
-       struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
-       struct mlx5dr_matcher *matcher = rule->matcher;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_matcher_rx_tx *nic_matcher;
-       struct mlx5dr_domain_rx_tx *nic_dmn;
-       struct mlx5dr_ste_htbl *htbl = NULL;
-       struct mlx5dr_ste_htbl *cur_htbl;
-       struct mlx5dr_ste *ste = NULL;
-       LIST_HEAD(send_ste_list);
-       bool hw_ste_arr_is_opt;
-       u8 *hw_ste_arr = NULL;
-       u32 new_hw_ste_arr_sz;
-       int ret, i;
-
-       nic_matcher = nic_rule->nic_matcher;
-       nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-
-       if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
-                        rule->flow_source))
-               return 0;
-
-       mlx5dr_domain_nic_lock(nic_dmn);
-
-       ret = mlx5dr_matcher_select_builders(matcher,
-                                            nic_matcher,
-                                            dr_rule_get_ipv(&param->outer),
-                                            dr_rule_get_ipv(&param->inner));
-       if (ret)
-               goto err_unlock;
-
-       hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
-       if (likely(hw_ste_arr_is_opt)) {
-               hw_ste_arr = hw_ste_arr_optimized;
-       } else {
-               hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
-                                    DR_STE_SIZE, GFP_KERNEL);
-
-               if (!hw_ste_arr) {
-                       ret = -ENOMEM;
-                       goto err_unlock;
-               }
-       }
-
-       ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
-       if (ret)
-               goto free_hw_ste;
-
-       /* Set the tag values inside the ste array */
-       ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
-       if (ret)
-               goto remove_from_nic_tbl;
-
-       /* Set the actions values/addresses inside the ste array */
-       ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
-                                          num_actions, hw_ste_arr,
-                                          &new_hw_ste_arr_sz);
-       if (ret)
-               goto remove_from_nic_tbl;
-
-       cur_htbl = nic_matcher->s_htbl;
-
-       /* Go over the array of STEs, and build dr_ste accordingly.
-        * The loop is over only the builders which are equal or less to the
-        * number of stes, in case we have actions that lives in other stes.
-        */
-       for (i = 0; i < nic_matcher->num_of_builders; i++) {
-               /* Calculate CRC and keep new ste entry */
-               u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
-
-               ste = dr_rule_handle_ste_branch(rule,
-                                               nic_rule,
-                                               &send_ste_list,
-                                               cur_htbl,
-                                               cur_hw_ste_ent,
-                                               i + 1,
-                                               &htbl);
-               if (!ste) {
-                       mlx5dr_err(dmn, "Failed creating next branch\n");
-                       ret = -ENOENT;
-                       goto free_rule;
-               }
-
-               cur_htbl = ste->next_htbl;
-
-               mlx5dr_ste_get(ste);
-               mlx5dr_rule_set_last_member(nic_rule, ste, true);
-       }
-
-       /* Connect actions */
-       ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
-                                        ste, hw_ste_arr, new_hw_ste_arr_sz);
-       if (ret) {
-               mlx5dr_dbg(dmn, "Failed apply actions\n");
-               goto free_rule;
-       }
-       ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed sending ste!\n");
-               goto free_rule;
-       }
-
-       if (htbl)
-               mlx5dr_htbl_put(htbl);
-
-       nic_matcher->rules++;
-
-       mlx5dr_domain_nic_unlock(nic_dmn);
-
-       if (unlikely(!hw_ste_arr_is_opt))
-               kfree(hw_ste_arr);
-
-       return 0;
-
-free_rule:
-       dr_rule_clean_rule_members(rule, nic_rule);
-       /* Clean all ste_info's */
-       list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
-               list_del(&ste_info->send_list);
-               mlx5dr_send_info_free(ste_info);
-       }
-
-remove_from_nic_tbl:
-       if (!nic_matcher->rules)
-               mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
-
-free_hw_ste:
-       if (!hw_ste_arr_is_opt)
-               kfree(hw_ste_arr);
-
-err_unlock:
-       mlx5dr_domain_nic_unlock(nic_dmn);
-
-       return ret;
-}
-
-static int
-dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
-                       struct mlx5dr_match_param *param,
-                       size_t num_actions,
-                       struct mlx5dr_action *actions[])
-{
-       struct mlx5dr_match_param copy_param = {};
-       int ret;
-
-       /* Copy match_param since they will be consumed during the first
-        * nic_rule insertion.
-        */
-       memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
-
-       ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
-                                     num_actions, actions);
-       if (ret)
-               return ret;
-
-       ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
-                                     num_actions, actions);
-       if (ret)
-               goto destroy_rule_nic_rx;
-
-       return 0;
-
-destroy_rule_nic_rx:
-       dr_rule_destroy_rule_nic(rule, &rule->rx);
-       return ret;
-}
-
-static struct mlx5dr_rule *
-dr_rule_create_rule(struct mlx5dr_matcher *matcher,
-                   struct mlx5dr_match_parameters *value,
-                   size_t num_actions,
-                   struct mlx5dr_action *actions[],
-                   u32 flow_source)
-{
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_match_param param = {};
-       struct mlx5dr_rule *rule;
-       int ret;
-
-       if (!dr_rule_verify(matcher, value, &param))
-               return NULL;
-
-       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
-       if (!rule)
-               return NULL;
-
-       rule->matcher = matcher;
-       rule->flow_source = flow_source;
-       INIT_LIST_HEAD(&rule->rule_actions_list);
-
-       ret = dr_rule_add_action_members(rule, num_actions, actions);
-       if (ret)
-               goto free_rule;
-
-       switch (dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               rule->rx.nic_matcher = &matcher->rx;
-               ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
-                                             num_actions, actions);
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               rule->tx.nic_matcher = &matcher->tx;
-               ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
-                                             num_actions, actions);
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               rule->rx.nic_matcher = &matcher->rx;
-               rule->tx.nic_matcher = &matcher->tx;
-               ret = dr_rule_create_rule_fdb(rule, &param,
-                                             num_actions, actions);
-               break;
-       default:
-               ret = -EINVAL;
-               break;
-       }
-
-       if (ret)
-               goto remove_action_members;
-
-       INIT_LIST_HEAD(&rule->dbg_node);
-       mlx5dr_dbg_rule_add(rule);
-       return rule;
-
-remove_action_members:
-       dr_rule_remove_action_members(rule);
-free_rule:
-       kfree(rule);
-       mlx5dr_err(dmn, "Failed creating rule\n");
-       return NULL;
-}
-
-struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
-                                      struct mlx5dr_match_parameters *value,
-                                      size_t num_actions,
-                                      struct mlx5dr_action *actions[],
-                                      u32 flow_source)
-{
-       struct mlx5dr_rule *rule;
-
-       refcount_inc(&matcher->refcount);
-
-       rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
-       if (!rule)
-               refcount_dec(&matcher->refcount);
-
-       return rule;
-}
-
-int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
-{
-       struct mlx5dr_matcher *matcher = rule->matcher;
-       int ret;
-
-       ret = dr_rule_destroy_rule(rule);
-       if (!ret)
-               refcount_dec(&matcher->refcount);
-
-       return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
deleted file mode 100644 (file)
index 6fa06ba..0000000
+++ /dev/null
@@ -1,1368 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include <linux/smp.h>
-#include "dr_types.h"
-
-#define QUEUE_SIZE 128
-#define SIGNAL_PER_DIV_QUEUE 16
-#define TH_NUMS_TO_DRAIN 2
-#define DR_SEND_INFO_POOL_SIZE 1000
-
-enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
-
-struct dr_data_seg {
-       u64 addr;
-       u32 length;
-       u32 lkey;
-       unsigned int send_flags;
-};
-
-enum send_info_type {
-       WRITE_ICM = 0,
-       GTA_ARG   = 1,
-};
-
-struct postsend_info {
-       enum send_info_type type;
-       struct dr_data_seg write;
-       struct dr_data_seg read;
-       u64 remote_addr;
-       u32 rkey;
-};
-
-struct dr_qp_rtr_attr {
-       struct mlx5dr_cmd_gid_attr dgid_attr;
-       enum ib_mtu mtu;
-       u32 qp_num;
-       u16 port_num;
-       u8 min_rnr_timer;
-       u8 sgid_index;
-       u16 udp_src_port;
-       u8 fl:1;
-};
-
-struct dr_qp_rts_attr {
-       u8 timeout;
-       u8 retry_cnt;
-       u8 rnr_retry;
-};
-
-struct dr_qp_init_attr {
-       u32 cqn;
-       u32 pdn;
-       u32 max_send_wr;
-       struct mlx5_uars_page *uar;
-       u8 isolate_vl_tc:1;
-};
-
-struct mlx5dr_send_info_pool_obj {
-       struct mlx5dr_ste_send_info ste_send_info;
-       struct mlx5dr_send_info_pool *pool;
-       struct list_head list_node;
-};
-
-struct mlx5dr_send_info_pool {
-       struct list_head free_list;
-};
-
-static int dr_send_info_pool_fill(struct mlx5dr_send_info_pool *pool)
-{
-       struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
-       int i;
-
-       for (i = 0; i < DR_SEND_INFO_POOL_SIZE; i++) {
-               pool_obj = kzalloc(sizeof(*pool_obj), GFP_KERNEL);
-               if (!pool_obj)
-                       goto clean_pool;
-
-               pool_obj->pool = pool;
-               list_add_tail(&pool_obj->list_node, &pool->free_list);
-       }
-
-       return 0;
-
-clean_pool:
-       list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
-               list_del(&pool_obj->list_node);
-               kfree(pool_obj);
-       }
-
-       return -ENOMEM;
-}
-
-static void dr_send_info_pool_destroy(struct mlx5dr_send_info_pool *pool)
-{
-       struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
-
-       list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
-               list_del(&pool_obj->list_node);
-               kfree(pool_obj);
-       }
-
-       kfree(pool);
-}
-
-void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn)
-{
-       dr_send_info_pool_destroy(dmn->send_info_pool_tx);
-       dr_send_info_pool_destroy(dmn->send_info_pool_rx);
-}
-
-static struct mlx5dr_send_info_pool *dr_send_info_pool_create(void)
-{
-       struct mlx5dr_send_info_pool *pool;
-       int ret;
-
-       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-       if (!pool)
-               return NULL;
-
-       INIT_LIST_HEAD(&pool->free_list);
-
-       ret = dr_send_info_pool_fill(pool);
-       if (ret) {
-               kfree(pool);
-               return NULL;
-       }
-
-       return pool;
-}
-
-int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn)
-{
-       dmn->send_info_pool_rx = dr_send_info_pool_create();
-       if (!dmn->send_info_pool_rx)
-               return -ENOMEM;
-
-       dmn->send_info_pool_tx = dr_send_info_pool_create();
-       if (!dmn->send_info_pool_tx) {
-               dr_send_info_pool_destroy(dmn->send_info_pool_rx);
-               return -ENOMEM;
-       }
-
-       return 0;
-}
-
-struct mlx5dr_ste_send_info
-*mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
-                       enum mlx5dr_domain_nic_type nic_type)
-{
-       struct mlx5dr_send_info_pool_obj *pool_obj;
-       struct mlx5dr_send_info_pool *pool;
-       int ret;
-
-       pool = nic_type == DR_DOMAIN_NIC_TYPE_RX ? dmn->send_info_pool_rx :
-                                                  dmn->send_info_pool_tx;
-
-       if (unlikely(list_empty(&pool->free_list))) {
-               ret = dr_send_info_pool_fill(pool);
-               if (ret)
-                       return NULL;
-       }
-
-       pool_obj = list_first_entry_or_null(&pool->free_list,
-                                           struct mlx5dr_send_info_pool_obj,
-                                           list_node);
-
-       if (likely(pool_obj)) {
-               list_del_init(&pool_obj->list_node);
-       } else {
-               WARN_ONCE(!pool_obj, "Failed getting ste send info obj from pool");
-               return NULL;
-       }
-
-       return &pool_obj->ste_send_info;
-}
-
-void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info)
-{
-       struct mlx5dr_send_info_pool_obj *pool_obj;
-
-       pool_obj = container_of(ste_send_info,
-                               struct mlx5dr_send_info_pool_obj,
-                               ste_send_info);
-
-       list_add(&pool_obj->list_node, &pool_obj->pool->free_list);
-}
-
-static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
-{
-       unsigned int idx;
-       u8 opcode;
-
-       opcode = get_cqe_opcode(cqe64);
-       if (opcode == MLX5_CQE_REQ_ERR) {
-               idx = be16_to_cpu(cqe64->wqe_counter) &
-                       (dr_cq->qp->sq.wqe_cnt - 1);
-               dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
-       } else if (opcode == MLX5_CQE_RESP_ERR) {
-               ++dr_cq->qp->sq.cc;
-       } else {
-               idx = be16_to_cpu(cqe64->wqe_counter) &
-                       (dr_cq->qp->sq.wqe_cnt - 1);
-               dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
-
-               return CQ_OK;
-       }
-
-       return CQ_POLL_ERR;
-}
-
-static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
-{
-       struct mlx5_cqe64 *cqe64;
-       int err;
-
-       cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
-       if (!cqe64) {
-               if (unlikely(dr_cq->mdev->state ==
-                            MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
-                       mlx5_core_dbg_once(dr_cq->mdev,
-                                          "Polling CQ while device is shutting down\n");
-                       return CQ_POLL_ERR;
-               }
-               return CQ_EMPTY;
-       }
-
-       mlx5_cqwq_pop(&dr_cq->wq);
-       err = dr_parse_cqe(dr_cq, cqe64);
-       mlx5_cqwq_update_db_record(&dr_cq->wq);
-
-       return err;
-}
-
-static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
-{
-       int npolled;
-       int err = 0;
-
-       for (npolled = 0; npolled < ne; ++npolled) {
-               err = dr_cq_poll_one(dr_cq);
-               if (err != CQ_OK)
-                       break;
-       }
-
-       return err == CQ_POLL_ERR ? err : npolled;
-}
-
-static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
-                                        struct dr_qp_init_attr *attr)
-{
-       u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
-       u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
-       struct mlx5_wq_param wqp;
-       struct mlx5dr_qp *dr_qp;
-       int inlen;
-       void *qpc;
-       void *in;
-       int err;
-
-       dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL);
-       if (!dr_qp)
-               return NULL;
-
-       wqp.buf_numa_node = mdev->priv.numa_node;
-       wqp.db_numa_node = mdev->priv.numa_node;
-
-       dr_qp->rq.pc = 0;
-       dr_qp->rq.cc = 0;
-       dr_qp->rq.wqe_cnt = 256;
-       dr_qp->sq.pc = 0;
-       dr_qp->sq.cc = 0;
-       dr_qp->sq.head = 0;
-       dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
-
-       MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
-       MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
-       MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
-       err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
-                               &dr_qp->wq_ctrl);
-       if (err) {
-               mlx5_core_warn(mdev, "Can't create QP WQ\n");
-               goto err_wq;
-       }
-
-       dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt,
-                                    sizeof(dr_qp->sq.wqe_head[0]),
-                                    GFP_KERNEL);
-
-       if (!dr_qp->sq.wqe_head) {
-               mlx5_core_warn(mdev, "Can't allocate wqe head\n");
-               goto err_wqe_head;
-       }
-
-       inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
-               MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
-               dr_qp->wq_ctrl.buf.npages;
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in) {
-               err = -ENOMEM;
-               goto err_in;
-       }
-
-       qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
-       MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
-       MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
-       MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
-       MLX5_SET(qpc, qpc, pd, attr->pdn);
-       MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
-       MLX5_SET(qpc, qpc, log_page_size,
-                dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
-       MLX5_SET(qpc, qpc, fre, 1);
-       MLX5_SET(qpc, qpc, rlky, 1);
-       MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
-       MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
-       MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
-       MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
-       MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
-       MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
-       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
-       MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
-       if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
-               MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
-       mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf,
-                                 (__be64 *)MLX5_ADDR_OF(create_qp_in,
-                                                        in, pas));
-
-       MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
-       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
-       dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
-       kvfree(in);
-       if (err)
-               goto err_in;
-       dr_qp->uar = attr->uar;
-
-       return dr_qp;
-
-err_in:
-       kfree(dr_qp->sq.wqe_head);
-err_wqe_head:
-       mlx5_wq_destroy(&dr_qp->wq_ctrl);
-err_wq:
-       kfree(dr_qp);
-       return NULL;
-}
-
-static void dr_destroy_qp(struct mlx5_core_dev *mdev,
-                         struct mlx5dr_qp *dr_qp)
-{
-       u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
-
-       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
-       MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
-       mlx5_cmd_exec_in(mdev, destroy_qp, in);
-
-       kfree(dr_qp->sq.wqe_head);
-       mlx5_wq_destroy(&dr_qp->wq_ctrl);
-       kfree(dr_qp);
-}
-
-static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
-{
-       dma_wmb();
-       *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
-
-       /* After wmb() the hw aware of new work */
-       wmb();
-
-       mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
-}
-
-static void
-dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
-                                       u32 remote_addr,
-                                       struct dr_data_seg *data_seg,
-                                       int *size)
-{
-       struct mlx5_wqe_header_modify_argument_update_seg *wq_arg_seg;
-       struct mlx5_wqe_flow_update_ctrl_seg *wq_flow_seg;
-
-       wq_ctrl->general_id = cpu_to_be32(remote_addr);
-       wq_flow_seg = (void *)(wq_ctrl + 1);
-
-       /* mlx5_wqe_flow_update_ctrl_seg - all reserved */
-       memset(wq_flow_seg, 0, sizeof(*wq_flow_seg));
-       wq_arg_seg = (void *)(wq_flow_seg + 1);
-
-       memcpy(wq_arg_seg->argument_list,
-              (void *)(uintptr_t)data_seg->addr,
-              data_seg->length);
-
-       *size = (sizeof(*wq_ctrl) +      /* WQE ctrl segment */
-                sizeof(*wq_flow_seg) +  /* WQE flow update ctrl seg - reserved */
-                sizeof(*wq_arg_seg)) /  /* WQE hdr modify arg seg - data */
-               MLX5_SEND_WQE_DS;
-}
-
-static void
-dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
-                                 u64 remote_addr,
-                                 u32 rkey,
-                                 struct dr_data_seg *data_seg,
-                                 unsigned int *size)
-{
-       struct mlx5_wqe_raddr_seg *wq_raddr;
-       struct mlx5_wqe_data_seg *wq_dseg;
-
-       wq_raddr = (void *)(wq_ctrl + 1);
-
-       wq_raddr->raddr = cpu_to_be64(remote_addr);
-       wq_raddr->rkey = cpu_to_be32(rkey);
-       wq_raddr->reserved = 0;
-
-       wq_dseg = (void *)(wq_raddr + 1);
-
-       wq_dseg->byte_count = cpu_to_be32(data_seg->length);
-       wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
-       wq_dseg->addr = cpu_to_be64(data_seg->addr);
-
-       *size = (sizeof(*wq_ctrl) +    /* WQE ctrl segment */
-                sizeof(*wq_dseg) +    /* WQE data segment */
-                sizeof(*wq_raddr)) /  /* WQE remote addr segment */
-               MLX5_SEND_WQE_DS;
-}
-
-static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
-                           struct dr_data_seg *data_seg)
-{
-       wq_ctrl->signature = 0;
-       wq_ctrl->rsvd[0] = 0;
-       wq_ctrl->rsvd[1] = 0;
-       wq_ctrl->fm_ce_se = data_seg->send_flags & IB_SEND_SIGNALED ?
-                               MLX5_WQE_CTRL_CQ_UPDATE : 0;
-       wq_ctrl->imm = 0;
-}
-
-static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
-                            u32 rkey, struct dr_data_seg *data_seg,
-                            u32 opcode, bool notify_hw)
-{
-       struct mlx5_wqe_ctrl_seg *wq_ctrl;
-       int opcode_mod = 0;
-       unsigned int size;
-       unsigned int idx;
-
-       idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
-
-       wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
-       dr_set_ctrl_seg(wq_ctrl, data_seg);
-
-       switch (opcode) {
-       case MLX5_OPCODE_RDMA_READ:
-       case MLX5_OPCODE_RDMA_WRITE:
-               dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
-                                                 rkey, data_seg, &size);
-               break;
-       case MLX5_OPCODE_FLOW_TBL_ACCESS:
-               opcode_mod = MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT;
-               dr_rdma_handle_flow_access_arg_segments(wq_ctrl, remote_addr,
-                                                       data_seg, &size);
-               break;
-       default:
-               WARN(true, "illegal opcode %d", opcode);
-               return;
-       }
-
-       /* --------------------------------------------------------
-        * |opcode_mod (8 bit)|wqe_index (16 bits)| opcod (8 bits)|
-        * --------------------------------------------------------
-        */
-       wq_ctrl->opmod_idx_opcode =
-               cpu_to_be32((opcode_mod << 24) |
-                           ((dr_qp->sq.pc & 0xffff) << 8) |
-                           opcode);
-       wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
-
-       dr_qp->sq.pc += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
-       dr_qp->sq.wqe_head[idx] = dr_qp->sq.head++;
-
-       if (notify_hw)
-               dr_cmd_notify_hw(dr_qp, wq_ctrl);
-}
-
-static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
-{
-       if (send_info->type == WRITE_ICM) {
-               dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
-                                &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
-               dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
-                                &send_info->read, MLX5_OPCODE_RDMA_READ, true);
-       } else { /* GTA_ARG */
-               dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
-                                &send_info->write, MLX5_OPCODE_FLOW_TBL_ACCESS, true);
-       }
-
-}
-
-/**
- * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent
- * with send_list parameters:
- *
- *     @ste:       The data that attached to this specific ste
- *     @size:      of data to write
- *     @offset:    of the data from start of the hw_ste entry
- *     @data:      data
- *     @ste_info:  ste to be sent with send_list
- *     @send_list: to append into it
- *     @copy_data: if true indicates that the data should be kept because
- *                 it's not backuped any where (like in re-hash).
- *                 if false, it lets the data to be updated after
- *                 it was added to the list.
- */
-void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
-                                              u16 offset, u8 *data,
-                                              struct mlx5dr_ste_send_info *ste_info,
-                                              struct list_head *send_list,
-                                              bool copy_data)
-{
-       ste_info->size = size;
-       ste_info->ste = ste;
-       ste_info->offset = offset;
-
-       if (copy_data) {
-               memcpy(ste_info->data_cont, data, size);
-               ste_info->data = ste_info->data_cont;
-       } else {
-               ste_info->data = data;
-       }
-
-       list_add_tail(&ste_info->send_list, send_list);
-}
-
-/* The function tries to consume one wc each time, unless the queue is full, in
- * that case, which means that the hw is behind the sw in a full queue len
- * the function will drain the cq till it empty.
- */
-static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
-                               struct mlx5dr_send_ring *send_ring)
-{
-       bool is_drain = false;
-       int ne;
-
-       if (send_ring->pending_wqe < send_ring->signal_th)
-               return 0;
-
-       /* Queue is full start drain it */
-       if (send_ring->pending_wqe >=
-           dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN)
-               is_drain = true;
-
-       do {
-               ne = dr_poll_cq(send_ring->cq, 1);
-               if (unlikely(ne < 0)) {
-                       mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
-                                           send_ring->qp->qpn);
-                       send_ring->err_state = true;
-                       return ne;
-               } else if (ne == 1) {
-                       send_ring->pending_wqe -= send_ring->signal_th;
-               }
-       } while (ne == 1 ||
-                (is_drain && send_ring->pending_wqe  >= send_ring->signal_th));
-
-       return 0;
-}
-
-static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
-                                   struct postsend_info *send_info)
-{
-       send_ring->pending_wqe++;
-
-       if (send_ring->pending_wqe % send_ring->signal_th == 0)
-               send_info->write.send_flags |= IB_SEND_SIGNALED;
-       else
-               send_info->write.send_flags = 0;
-}
-
-static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
-                                  struct mlx5dr_send_ring *send_ring,
-                                  struct postsend_info *send_info)
-{
-       u32 buff_offset;
-
-       if (send_info->write.length > dmn->info.max_inline_size) {
-               buff_offset = (send_ring->tx_head &
-                              (dmn->send_ring->signal_th - 1)) *
-                             send_ring->max_post_send_size;
-               /* Copy to ring mr */
-               memcpy(send_ring->buf + buff_offset,
-                      (void *)(uintptr_t)send_info->write.addr,
-                      send_info->write.length);
-               send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
-               send_info->write.lkey = send_ring->mr->mkey;
-
-               send_ring->tx_head++;
-       }
-
-       send_ring->pending_wqe++;
-
-       if (send_ring->pending_wqe % send_ring->signal_th == 0)
-               send_info->write.send_flags |= IB_SEND_SIGNALED;
-
-       send_ring->pending_wqe++;
-       send_info->read.length = send_info->write.length;
-
-       /* Read into dedicated sync buffer */
-       send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr;
-       send_info->read.lkey = send_ring->sync_mr->mkey;
-
-       if (send_ring->pending_wqe % send_ring->signal_th == 0)
-               send_info->read.send_flags = IB_SEND_SIGNALED;
-       else
-               send_info->read.send_flags = 0;
-}
-
-static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
-                             struct mlx5dr_send_ring *send_ring,
-                             struct postsend_info *send_info)
-{
-       if (send_info->type == WRITE_ICM)
-               dr_fill_write_icm_segs(dmn, send_ring, send_info);
-       else /* args */
-               dr_fill_write_args_segs(send_ring, send_info);
-}
-
-static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
-                               struct postsend_info *send_info)
-{
-       struct mlx5dr_send_ring *send_ring = dmn->send_ring;
-       int ret;
-
-       if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
-                    send_ring->err_state)) {
-               mlx5_core_dbg_once(dmn->mdev,
-                                  "Skipping post send: QP err state: %d, device state: %d\n",
-                                  send_ring->err_state, dmn->mdev->state);
-               return 0;
-       }
-
-       spin_lock(&send_ring->lock);
-
-       ret = dr_handle_pending_wc(dmn, send_ring);
-       if (ret)
-               goto out_unlock;
-
-       dr_fill_data_segs(dmn, send_ring, send_info);
-       dr_post_send(send_ring->qp, send_info);
-
-out_unlock:
-       spin_unlock(&send_ring->lock);
-       return ret;
-}
-
-static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
-                                  struct mlx5dr_ste_htbl *htbl,
-                                  u8 **data,
-                                  u32 *byte_size,
-                                  int *iterations,
-                                  int *num_stes)
-{
-       u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
-       int alloc_size;
-
-       if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
-               *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
-               *byte_size = dmn->send_ring->max_post_send_size;
-               alloc_size = *byte_size;
-               *num_stes = *byte_size / DR_STE_SIZE;
-       } else {
-               *iterations = 1;
-               *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
-               alloc_size = *num_stes * DR_STE_SIZE;
-       }
-
-       *data = kvzalloc(alloc_size, GFP_KERNEL);
-       if (!*data)
-               return -ENOMEM;
-
-       return 0;
-}
-
-/**
- * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm.
- *
- *     @dmn:    Domain
- *     @ste:    The ste struct that contains the data (at
- *              least part of it)
- *     @data:   The real data to send size data
- *     @size:   for writing.
- *     @offset: The offset from the icm mapped data to
- *              start write to this for write only part of the
- *              buffer.
- *
- * Return: 0 on success.
- */
-int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
-                            u8 *data, u16 size, u16 offset)
-{
-       struct postsend_info send_info = {};
-
-       mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
-
-       send_info.write.addr = (uintptr_t)data;
-       send_info.write.length = size;
-       send_info.write.lkey = 0;
-       send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
-       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
-
-       return dr_postsend_icm_data(dmn, &send_info);
-}
-
-int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
-                             struct mlx5dr_ste_htbl *htbl,
-                             u8 *formatted_ste, u8 *mask)
-{
-       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
-       int num_stes_per_iter;
-       int iterations;
-       u8 *data;
-       int ret;
-       int i;
-       int j;
-
-       ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
-                                     &iterations, &num_stes_per_iter);
-       if (ret)
-               return ret;
-
-       mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
-
-       /* Send the data iteration times */
-       for (i = 0; i < iterations; i++) {
-               u32 ste_index = i * (byte_size / DR_STE_SIZE);
-               struct postsend_info send_info = {};
-
-               /* Copy all ste's on the data buffer
-                * need to add the bit_mask
-                */
-               for (j = 0; j < num_stes_per_iter; j++) {
-                       struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
-                       u32 ste_off = j * DR_STE_SIZE;
-
-                       if (mlx5dr_ste_is_not_used(ste)) {
-                               memcpy(data + ste_off,
-                                      formatted_ste, DR_STE_SIZE);
-                       } else {
-                               /* Copy data */
-                               memcpy(data + ste_off,
-                                      htbl->chunk->hw_ste_arr +
-                                      DR_STE_SIZE_REDUCED * (ste_index + j),
-                                      DR_STE_SIZE_REDUCED);
-                               /* Copy bit_mask */
-                               memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
-                                      mask, DR_STE_SIZE_MASK);
-                               /* Only when we have mask we need to re-arrange the STE */
-                               mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
-                                                               data + (j * DR_STE_SIZE),
-                                                               DR_STE_SIZE);
-                       }
-               }
-
-               send_info.write.addr = (uintptr_t)data;
-               send_info.write.length = byte_size;
-               send_info.write.lkey = 0;
-               send_info.remote_addr =
-                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
-               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
-
-               ret = dr_postsend_icm_data(dmn, &send_info);
-               if (ret)
-                       goto out_free;
-       }
-
-out_free:
-       kvfree(data);
-       return ret;
-}
-
-/* Initialize htble with default STEs */
-int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
-                                       struct mlx5dr_ste_htbl *htbl,
-                                       u8 *ste_init_data,
-                                       bool update_hw_ste)
-{
-       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
-       int iterations;
-       int num_stes;
-       u8 *copy_dst;
-       u8 *data;
-       int ret;
-       int i;
-
-       ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
-                                     &iterations, &num_stes);
-       if (ret)
-               return ret;
-
-       if (update_hw_ste) {
-               /* Copy the reduced STE to hash table ste_arr */
-               for (i = 0; i < num_stes; i++) {
-                       copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
-                       memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
-               }
-       }
-
-       mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
-
-       /* Copy the same STE on the data buffer */
-       for (i = 0; i < num_stes; i++) {
-               copy_dst = data + i * DR_STE_SIZE;
-               memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
-       }
-
-       /* Send the data iteration times */
-       for (i = 0; i < iterations; i++) {
-               u8 ste_index = i * (byte_size / DR_STE_SIZE);
-               struct postsend_info send_info = {};
-
-               send_info.write.addr = (uintptr_t)data;
-               send_info.write.length = byte_size;
-               send_info.write.lkey = 0;
-               send_info.remote_addr =
-                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
-               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
-
-               ret = dr_postsend_icm_data(dmn, &send_info);
-               if (ret)
-                       goto out_free;
-       }
-
-out_free:
-       kvfree(data);
-       return ret;
-}
-
-int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
-                               struct mlx5dr_action *action)
-{
-       struct postsend_info send_info = {};
-
-       send_info.write.addr = (uintptr_t)action->rewrite->data;
-       send_info.write.length = action->rewrite->num_of_actions *
-                                DR_MODIFY_ACTION_SIZE;
-       send_info.write.lkey = 0;
-       send_info.remote_addr =
-               mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
-       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
-
-       return dr_postsend_icm_data(dmn, &send_info);
-}
-
-int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
-                                struct mlx5dr_icm_chunk *chunk,
-                                u16 num_of_actions,
-                                u8 *data)
-{
-       struct postsend_info send_info = {};
-       int ret;
-
-       send_info.write.addr = (uintptr_t)data;
-       send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE;
-       send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk);
-       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk);
-
-       ret = dr_postsend_icm_data(dmn, &send_info);
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
-                             u16 num_of_actions, u8 *actions_data)
-{
-       int data_len, iter = 0, cur_sent;
-       u64 addr;
-       int ret;
-
-       addr = (uintptr_t)actions_data;
-       data_len = num_of_actions * DR_MODIFY_ACTION_SIZE;
-
-       do {
-               struct postsend_info send_info = {};
-
-               send_info.type = GTA_ARG;
-               send_info.write.addr = addr;
-               cur_sent = min_t(u32, data_len, DR_ACTION_CACHE_LINE_SIZE);
-               send_info.write.length = cur_sent;
-               send_info.write.lkey = 0;
-               send_info.remote_addr = arg_id + iter;
-
-               ret = dr_postsend_icm_data(dmn, &send_info);
-               if (ret)
-                       goto out;
-
-               iter++;
-               addr += cur_sent;
-               data_len -= cur_sent;
-       } while (data_len > 0);
-
-out:
-       return ret;
-}
-
-static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
-                                struct mlx5dr_qp *dr_qp,
-                                int port)
-{
-       u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
-       void *qpc;
-
-       qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
-
-       MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port);
-       MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
-       MLX5_SET(qpc, qpc, rre, 1);
-       MLX5_SET(qpc, qpc, rwe, 1);
-
-       MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
-       MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
-
-       return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
-}
-
-static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
-                                   struct mlx5dr_qp *dr_qp,
-                                   struct dr_qp_rts_attr *attr)
-{
-       u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
-       void *qpc;
-
-       qpc  = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
-
-       MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
-
-       MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
-       MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
-       MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
-
-       MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
-       MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
-
-       return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
-}
-
-static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
-                                    struct mlx5dr_qp *dr_qp,
-                                    struct dr_qp_rtr_attr *attr)
-{
-       u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
-       void *qpc;
-
-       qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
-
-       MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
-
-       MLX5_SET(qpc, qpc, mtu, attr->mtu);
-       MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
-       MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num);
-       memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
-              attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac));
-       memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
-              attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid));
-       MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
-                attr->sgid_index);
-
-       if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2)
-               MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
-                        attr->udp_src_port);
-
-       MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
-       MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
-       MLX5_SET(qpc, qpc, min_rnr_nak, 1);
-
-       MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
-       MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
-
-       return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
-}
-
-static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
-{
-       /* Check whether RC RoCE QP creation with force loopback is allowed.
-        * There are two separate capability bits for this:
-        *  - force loopback when RoCE is enabled
-        *  - force loopback when RoCE is disabled
-        */
-       return ((caps->roce_caps.roce_en &&
-                caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
-               (!caps->roce_caps.roce_en &&
-                caps->roce_caps.fl_rc_qp_when_roce_disabled));
-}
-
-static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
-       struct dr_qp_rts_attr rts_attr = {};
-       struct dr_qp_rtr_attr rtr_attr = {};
-       enum ib_mtu mtu = IB_MTU_1024;
-       u16 gid_index = 0;
-       int port = 1;
-       int ret;
-
-       /* Init */
-       ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed modify QP rst2init\n");
-               return ret;
-       }
-
-       /* RTR */
-       rtr_attr.mtu            = mtu;
-       rtr_attr.qp_num         = dr_qp->qpn;
-       rtr_attr.min_rnr_timer  = 12;
-       rtr_attr.port_num       = port;
-       rtr_attr.udp_src_port   = dmn->info.caps.roce_min_src_udp;
-
-       /* If QP creation with force loopback is allowed, then there
-        * is no need for GID index when creating the QP.
-        * Otherwise we query GID attributes and use GID index.
-        */
-       rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
-       if (!rtr_attr.fl) {
-               ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
-                                          &rtr_attr.dgid_attr);
-               if (ret)
-                       return ret;
-
-               rtr_attr.sgid_index = gid_index;
-       }
-
-       ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
-               return ret;
-       }
-
-       /* RTS */
-       rts_attr.timeout        = 14;
-       rts_attr.retry_cnt      = 7;
-       rts_attr.rnr_retry      = 7;
-
-       ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
-               return ret;
-       }
-
-       return 0;
-}
-
-static void dr_cq_complete(struct mlx5_core_cq *mcq,
-                          struct mlx5_eqe *eqe)
-{
-       pr_err("CQ completion CQ: #%u\n", mcq->cqn);
-}
-
-static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
-                                     struct mlx5_uars_page *uar,
-                                     size_t ncqe)
-{
-       u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
-       u32 out[MLX5_ST_SZ_DW(create_cq_out)];
-       struct mlx5_wq_param wqp;
-       struct mlx5_cqe64 *cqe;
-       struct mlx5dr_cq *cq;
-       int inlen, err, eqn;
-       void *cqc, *in;
-       __be64 *pas;
-       int vector;
-       u32 i;
-
-       cq = kzalloc(sizeof(*cq), GFP_KERNEL);
-       if (!cq)
-               return NULL;
-
-       ncqe = roundup_pow_of_two(ncqe);
-       MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
-
-       wqp.buf_numa_node = mdev->priv.numa_node;
-       wqp.db_numa_node = mdev->priv.numa_node;
-
-       err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
-                              &cq->wq_ctrl);
-       if (err)
-               goto out;
-
-       for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
-               cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
-               cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
-       }
-
-       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
-               sizeof(u64) * cq->wq_ctrl.buf.npages;
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if (!in)
-               goto err_cqwq;
-
-       vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
-       err = mlx5_comp_eqn_get(mdev, vector, &eqn);
-       if (err) {
-               kvfree(in);
-               goto err_cqwq;
-       }
-
-       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
-       MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
-       MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
-       MLX5_SET(cqc, cqc, uar_page, uar->index);
-       MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
-                MLX5_ADAPTER_PAGE_SHIFT);
-       MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
-
-       pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
-       mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
-
-       cq->mcq.comp  = dr_cq_complete;
-
-       err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
-       kvfree(in);
-
-       if (err)
-               goto err_cqwq;
-
-       cq->mcq.cqe_sz = 64;
-       cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
-       cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
-       *cq->mcq.set_ci_db = 0;
-
-       /* set no-zero value, in order to avoid the HW to run db-recovery on
-        * CQ that used in polling mode.
-        */
-       *cq->mcq.arm_db = cpu_to_be32(2 << 28);
-
-       cq->mcq.vector = 0;
-       cq->mcq.uar = uar;
-       cq->mdev = mdev;
-
-       return cq;
-
-err_cqwq:
-       mlx5_wq_destroy(&cq->wq_ctrl);
-out:
-       kfree(cq);
-       return NULL;
-}
-
-static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
-{
-       mlx5_core_destroy_cq(mdev, &cq->mcq);
-       mlx5_wq_destroy(&cq->wq_ctrl);
-       kfree(cq);
-}
-
-static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
-{
-       u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
-       void *mkc;
-
-       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
-       MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
-       MLX5_SET(mkc, mkc, a, 1);
-       MLX5_SET(mkc, mkc, rw, 1);
-       MLX5_SET(mkc, mkc, rr, 1);
-       MLX5_SET(mkc, mkc, lw, 1);
-       MLX5_SET(mkc, mkc, lr, 1);
-
-       MLX5_SET(mkc, mkc, pd, pdn);
-       MLX5_SET(mkc, mkc, length64, 1);
-       MLX5_SET(mkc, mkc, qpn, 0xffffff);
-
-       return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
-}
-
-static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
-                                  u32 pdn, void *buf, size_t size)
-{
-       struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL);
-       struct device *dma_device;
-       dma_addr_t dma_addr;
-       int err;
-
-       if (!mr)
-               return NULL;
-
-       dma_device = mlx5_core_dma_dev(mdev);
-       dma_addr = dma_map_single(dma_device, buf, size,
-                                 DMA_BIDIRECTIONAL);
-       err = dma_mapping_error(dma_device, dma_addr);
-       if (err) {
-               mlx5_core_warn(mdev, "Can't dma buf\n");
-               kfree(mr);
-               return NULL;
-       }
-
-       err = dr_create_mkey(mdev, pdn, &mr->mkey);
-       if (err) {
-               mlx5_core_warn(mdev, "Can't create mkey\n");
-               dma_unmap_single(dma_device, dma_addr, size,
-                                DMA_BIDIRECTIONAL);
-               kfree(mr);
-               return NULL;
-       }
-
-       mr->dma_addr = dma_addr;
-       mr->size = size;
-       mr->addr = buf;
-
-       return mr;
-}
-
-static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
-{
-       mlx5_core_destroy_mkey(mdev, mr->mkey);
-       dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
-                        DMA_BIDIRECTIONAL);
-       kfree(mr);
-}
-
-int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
-{
-       struct dr_qp_init_attr init_attr = {};
-       int cq_size;
-       int size;
-       int ret;
-
-       dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL);
-       if (!dmn->send_ring)
-               return -ENOMEM;
-
-       cq_size = QUEUE_SIZE + 1;
-       dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
-       if (!dmn->send_ring->cq) {
-               mlx5dr_err(dmn, "Failed creating CQ\n");
-               ret = -ENOMEM;
-               goto free_send_ring;
-       }
-
-       init_attr.cqn = dmn->send_ring->cq->mcq.cqn;
-       init_attr.pdn = dmn->pdn;
-       init_attr.uar = dmn->uar;
-       init_attr.max_send_wr = QUEUE_SIZE;
-
-       /* Isolated VL is applicable only if force loopback is supported */
-       if (dr_send_allow_fl(&dmn->info.caps))
-               init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
-
-       spin_lock_init(&dmn->send_ring->lock);
-
-       dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
-       if (!dmn->send_ring->qp)  {
-               mlx5dr_err(dmn, "Failed creating QP\n");
-               ret = -ENOMEM;
-               goto clean_cq;
-       }
-
-       dmn->send_ring->cq->qp = dmn->send_ring->qp;
-
-       dmn->info.max_send_wr = QUEUE_SIZE;
-       dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
-                                       DR_STE_SIZE);
-
-       dmn->send_ring->signal_th = dmn->info.max_send_wr /
-               SIGNAL_PER_DIV_QUEUE;
-
-       /* Prepare qp to be used */
-       ret = dr_prepare_qp_to_rts(dmn);
-       if (ret)
-               goto clean_qp;
-
-       dmn->send_ring->max_post_send_size =
-               mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K,
-                                                  DR_ICM_TYPE_STE);
-
-       /* Allocating the max size as a buffer for writing */
-       size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size;
-       dmn->send_ring->buf = kzalloc(size, GFP_KERNEL);
-       if (!dmn->send_ring->buf) {
-               ret = -ENOMEM;
-               goto clean_qp;
-       }
-
-       dmn->send_ring->buf_size = size;
-
-       dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
-                                      dmn->pdn, dmn->send_ring->buf, size);
-       if (!dmn->send_ring->mr) {
-               ret = -ENOMEM;
-               goto free_mem;
-       }
-
-       dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size,
-                                           GFP_KERNEL);
-       if (!dmn->send_ring->sync_buff) {
-               ret = -ENOMEM;
-               goto clean_mr;
-       }
-
-       dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
-                                           dmn->pdn, dmn->send_ring->sync_buff,
-                                           dmn->send_ring->max_post_send_size);
-       if (!dmn->send_ring->sync_mr) {
-               ret = -ENOMEM;
-               goto free_sync_mem;
-       }
-
-       return 0;
-
-free_sync_mem:
-       kfree(dmn->send_ring->sync_buff);
-clean_mr:
-       dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
-free_mem:
-       kfree(dmn->send_ring->buf);
-clean_qp:
-       dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
-clean_cq:
-       dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
-free_send_ring:
-       kfree(dmn->send_ring);
-
-       return ret;
-}
-
-void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
-                          struct mlx5dr_send_ring *send_ring)
-{
-       dr_destroy_qp(dmn->mdev, send_ring->qp);
-       dr_destroy_cq(dmn->mdev, send_ring->cq);
-       dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
-       dr_dereg_mr(dmn->mdev, send_ring->mr);
-       kfree(send_ring->buf);
-       kfree(send_ring->sync_buff);
-       kfree(send_ring);
-}
-
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
-{
-       struct mlx5dr_send_ring *send_ring = dmn->send_ring;
-       struct postsend_info send_info = {};
-       u8 data[DR_STE_SIZE];
-       int num_of_sends_req;
-       int ret;
-       int i;
-
-       /* Sending this amount of requests makes sure we will get drain */
-       num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
-
-       /* Send fake requests forcing the last to be signaled */
-       send_info.write.addr = (uintptr_t)data;
-       send_info.write.length = DR_STE_SIZE;
-       send_info.write.lkey = 0;
-       /* Using the sync_mr in order to write/read */
-       send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
-       send_info.rkey = send_ring->sync_mr->mkey;
-
-       for (i = 0; i < num_of_sends_req; i++) {
-               ret = dr_postsend_icm_data(dmn, &send_info);
-               if (ret)
-                       return ret;
-       }
-
-       spin_lock(&send_ring->lock);
-       ret = dr_handle_pending_wc(dmn, send_ring);
-       spin_unlock(&send_ring->lock);
-
-       return ret;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
deleted file mode 100644 (file)
index e94fbb0..0000000
+++ /dev/null
@@ -1,1463 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include <linux/types.h>
-#include <linux/crc32.h>
-#include "dr_ste.h"
-
-struct dr_hw_ste_format {
-       u8 ctrl[DR_STE_SIZE_CTRL];
-       u8 tag[DR_STE_SIZE_TAG];
-       u8 mask[DR_STE_SIZE_MASK];
-};
-
-static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
-{
-       u32 crc = crc32(0, input_data, length);
-
-       return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
-                           ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
-}
-
-bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
-{
-       return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
-}
-
-u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
-{
-       u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
-       struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
-       u8 masked[DR_STE_SIZE_TAG] = {};
-       u32 crc32, index;
-       u16 bit;
-       int i;
-
-       /* Don't calculate CRC if the result is predicted */
-       if (num_entries == 1 || htbl->byte_mask == 0)
-               return 0;
-
-       /* Mask tag using byte mask, bit per byte */
-       bit = 1 << (DR_STE_SIZE_TAG - 1);
-       for (i = 0; i < DR_STE_SIZE_TAG; i++) {
-               if (htbl->byte_mask & bit)
-                       masked[i] = hw_ste->tag[i];
-
-               bit = bit >> 1;
-       }
-
-       crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
-       index = crc32 & (num_entries - 1);
-
-       return index;
-}
-
-u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
-{
-       u16 byte_mask = 0;
-       int i;
-
-       for (i = 0; i < DR_STE_SIZE_MASK; i++) {
-               byte_mask = byte_mask << 1;
-               if (bit_mask[i] == 0xff)
-                       byte_mask |= 1;
-       }
-       return byte_mask;
-}
-
-static u8 *dr_ste_get_tag(u8 *hw_ste_p)
-{
-       struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
-
-       return hw_ste->tag;
-}
-
-void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
-{
-       struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
-
-       memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
-}
-
-static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
-{
-       memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
-       memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
-}
-
-static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
-{
-       hw_ste->tag[0] = 0xdc;
-       hw_ste->mask[0] = 0;
-}
-
-bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
-                                u8 *hw_ste_p)
-{
-       if (!ste_ctx->is_miss_addr_set)
-               return false;
-
-       /* check if miss address is already set for this type of STE */
-       return ste_ctx->is_miss_addr_set(hw_ste_p);
-}
-
-void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                             u8 *hw_ste_p, u64 miss_addr)
-{
-       ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
-}
-
-static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                                   u8 *hw_ste, u64 miss_addr)
-{
-       ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
-       ste_ctx->set_miss_addr(hw_ste, miss_addr);
-       dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
-}
-
-void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                            u8 *hw_ste, u64 icm_addr, u32 ht_size)
-{
-       ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
-}
-
-u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
-{
-       u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
-       u32 index = ste - ste->htbl->chunk->ste_arr;
-
-       return base_icm_addr + DR_STE_SIZE * index;
-}
-
-u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
-{
-       u32 index = ste - ste->htbl->chunk->ste_arr;
-
-       return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
-}
-
-u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
-{
-       u64 index = ste - ste->htbl->chunk->ste_arr;
-
-       return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
-}
-
-struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
-{
-       u32 index = ste - ste->htbl->chunk->ste_arr;
-
-       return &ste->htbl->chunk->miss_list[index];
-}
-
-static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
-                                  u8 *hw_ste,
-                                  struct mlx5dr_ste_htbl *next_htbl)
-{
-       struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
-
-       ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
-       ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
-       ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
-                             mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
-
-       dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
-}
-
-bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
-                               u8 ste_location)
-{
-       return ste_location == nic_matcher->num_of_builders;
-}
-
-/* Replace relevant fields, except of:
- * htbl - keep the origin htbl
- * miss_list + list - already took the src from the list.
- * icm_addr/mr_addr - depends on the hosting table.
- *
- * Before:
- * | a | -> | b | -> | c | ->
- *
- * After:
- * | a | -> | c | ->
- * While the data that was in b copied to a.
- */
-static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
-{
-       memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
-              DR_STE_SIZE_REDUCED);
-       dst->next_htbl = src->next_htbl;
-       if (dst->next_htbl)
-               dst->next_htbl->pointing_ste = dst;
-
-       dst->refcount = src->refcount;
-}
-
-/* Free ste which is the head and the only one in miss_list */
-static void
-dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
-                      struct mlx5dr_ste *ste,
-                      struct mlx5dr_matcher_rx_tx *nic_matcher,
-                      struct mlx5dr_ste_send_info *ste_info_head,
-                      struct list_head *send_ste_list,
-                      struct mlx5dr_ste_htbl *stats_tbl)
-{
-       u8 tmp_data_ste[DR_STE_SIZE] = {};
-       u64 miss_addr;
-
-       miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
-
-       /* Use temp ste because dr_ste_always_miss_addr
-        * touches bit_mask area which doesn't exist at ste->hw_ste.
-        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
-        */
-       memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
-       dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
-       memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
-
-       list_del_init(&ste->miss_list_node);
-
-       /* Write full STE size in order to have "always_miss" */
-       mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
-                                                 0, tmp_data_ste,
-                                                 ste_info_head,
-                                                 send_ste_list,
-                                                 true /* Copy data */);
-
-       stats_tbl->ctrl.num_of_valid_entries--;
-}
-
-/* Free ste which is the head but NOT the only one in miss_list:
- * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
- */
-static void
-dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
-                       struct mlx5dr_ste *ste,
-                       struct mlx5dr_ste *next_ste,
-                       struct mlx5dr_ste_send_info *ste_info_head,
-                       struct list_head *send_ste_list,
-                       struct mlx5dr_ste_htbl *stats_tbl)
-
-{
-       struct mlx5dr_ste_htbl *next_miss_htbl;
-       u8 hw_ste[DR_STE_SIZE] = {};
-       int sb_idx;
-
-       next_miss_htbl = next_ste->htbl;
-
-       /* Remove from the miss_list the next_ste before copy */
-       list_del_init(&next_ste->miss_list_node);
-
-       /* Move data from next into ste */
-       dr_ste_replace(ste, next_ste);
-
-       /* Update the rule on STE change */
-       mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
-
-       /* Copy all 64 hw_ste bytes */
-       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
-       sb_idx = ste->ste_chain_location - 1;
-       mlx5dr_ste_set_bit_mask(hw_ste,
-                               nic_matcher->ste_builder[sb_idx].bit_mask);
-
-       /* Del the htbl that contains the next_ste.
-        * The origin htbl stay with the same number of entries.
-        */
-       mlx5dr_htbl_put(next_miss_htbl);
-
-       mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
-                                                 0, hw_ste,
-                                                 ste_info_head,
-                                                 send_ste_list,
-                                                 true /* Copy data */);
-
-       stats_tbl->ctrl.num_of_collisions--;
-       stats_tbl->ctrl.num_of_valid_entries--;
-}
-
-/* Free ste that is located in the middle of the miss list:
- * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
- */
-static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste *ste,
-                                    struct mlx5dr_ste_send_info *ste_info,
-                                    struct list_head *send_ste_list,
-                                    struct mlx5dr_ste_htbl *stats_tbl)
-{
-       struct mlx5dr_ste *prev_ste;
-       u64 miss_addr;
-
-       prev_ste = list_prev_entry(ste, miss_list_node);
-       if (WARN_ON(!prev_ste))
-               return;
-
-       miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
-       ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
-
-       mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
-                                                 mlx5dr_ste_get_hw_ste(prev_ste),
-                                                 ste_info, send_ste_list,
-                                                 true /* Copy data*/);
-
-       list_del_init(&ste->miss_list_node);
-
-       stats_tbl->ctrl.num_of_valid_entries--;
-       stats_tbl->ctrl.num_of_collisions--;
-}
-
-void mlx5dr_ste_free(struct mlx5dr_ste *ste,
-                    struct mlx5dr_matcher *matcher,
-                    struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
-       struct mlx5dr_ste_send_info ste_info_head;
-       struct mlx5dr_ste *next_ste, *first_ste;
-       bool put_on_origin_table = true;
-       struct mlx5dr_ste_htbl *stats_tbl;
-       LIST_HEAD(send_ste_list);
-
-       first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
-                                    struct mlx5dr_ste, miss_list_node);
-       stats_tbl = first_ste->htbl;
-
-       /* Two options:
-        * 1. ste is head:
-        *      a. head ste is the only ste in the miss list
-        *      b. head ste is not the only ste in the miss-list
-        * 2. ste is not head
-        */
-       if (first_ste == ste) { /* Ste is the head */
-               struct mlx5dr_ste *last_ste;
-
-               last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
-                                          struct mlx5dr_ste, miss_list_node);
-               if (last_ste == first_ste)
-                       next_ste = NULL;
-               else
-                       next_ste = list_next_entry(ste, miss_list_node);
-
-               if (!next_ste) {
-                       /* One and only entry in the list */
-                       dr_ste_remove_head_ste(ste_ctx, ste,
-                                              nic_matcher,
-                                              &ste_info_head,
-                                              &send_ste_list,
-                                              stats_tbl);
-               } else {
-                       /* First but not only entry in the list */
-                       dr_ste_replace_head_ste(nic_matcher, ste,
-                                               next_ste, &ste_info_head,
-                                               &send_ste_list, stats_tbl);
-                       put_on_origin_table = false;
-               }
-       } else { /* Ste in the middle of the list */
-               dr_ste_remove_middle_ste(ste_ctx, ste,
-                                        &ste_info_head, &send_ste_list,
-                                        stats_tbl);
-       }
-
-       /* Update HW */
-       list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
-                                &send_ste_list, send_list) {
-               list_del(&cur_ste_info->send_list);
-               mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
-                                        cur_ste_info->data, cur_ste_info->size,
-                                        cur_ste_info->offset);
-       }
-
-       if (put_on_origin_table)
-               mlx5dr_htbl_put(ste->htbl);
-}
-
-bool mlx5dr_ste_equal_tag(void *src, void *dst)
-{
-       struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
-       struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
-
-       return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
-}
-
-void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
-                                         u8 *hw_ste,
-                                         struct mlx5dr_ste_htbl *next_htbl)
-{
-       u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
-       u32 num_entries =
-               mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
-
-       ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
-}
-
-void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
-                                    u8 *hw_ste_p, u32 ste_size)
-{
-       if (ste_ctx->prepare_for_postsend)
-               ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
-}
-
-/* Init one ste as a pattern for ste data array */
-void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
-                                 u16 gvmi,
-                                 enum mlx5dr_domain_nic_type nic_type,
-                                 struct mlx5dr_ste_htbl *htbl,
-                                 u8 *formatted_ste,
-                                 struct mlx5dr_htbl_connect_info *connect_info)
-{
-       bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
-       u8 tmp_hw_ste[DR_STE_SIZE] = {0};
-
-       ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
-
-       /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
-        * touches bit_mask area which doesn't exist at ste->hw_ste.
-        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
-        */
-       memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
-       if (connect_info->type == CONNECT_HIT)
-               dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
-                                      connect_info->hit_next_htbl);
-       else
-               dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
-                                       connect_info->miss_icm_addr);
-       memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
-}
-
-int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
-                                     struct mlx5dr_domain_rx_tx *nic_dmn,
-                                     struct mlx5dr_ste_htbl *htbl,
-                                     struct mlx5dr_htbl_connect_info *connect_info,
-                                     bool update_hw_ste)
-{
-       u8 formatted_ste[DR_STE_SIZE] = {};
-
-       mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
-                                    dmn->info.caps.gvmi,
-                                    nic_dmn->type,
-                                    htbl,
-                                    formatted_ste,
-                                    connect_info);
-
-       return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
-}
-
-int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
-                               struct mlx5dr_matcher_rx_tx *nic_matcher,
-                               struct mlx5dr_ste *ste,
-                               u8 *cur_hw_ste,
-                               enum mlx5dr_icm_chunk_size log_table_size)
-{
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
-       struct mlx5dr_htbl_connect_info info;
-       struct mlx5dr_ste_htbl *next_htbl;
-
-       if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
-               u16 next_lu_type;
-               u16 byte_mask;
-
-               next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
-               byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
-
-               next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
-                                                 log_table_size,
-                                                 next_lu_type,
-                                                 byte_mask);
-               if (!next_htbl) {
-                       mlx5dr_dbg(dmn, "Failed allocating table\n");
-                       return -ENOMEM;
-               }
-
-               /* Write new table to HW */
-               info.type = CONNECT_MISS;
-               info.miss_icm_addr =
-                       mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
-               if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
-                                                     &info, false)) {
-                       mlx5dr_info(dmn, "Failed writing table to HW\n");
-                       goto free_table;
-               }
-
-               mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
-                                                    cur_hw_ste, next_htbl);
-               ste->next_htbl = next_htbl;
-               next_htbl->pointing_ste = ste;
-       }
-
-       return 0;
-
-free_table:
-       mlx5dr_ste_htbl_free(next_htbl);
-       return -ENOENT;
-}
-
-struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
-                                             enum mlx5dr_icm_chunk_size chunk_size,
-                                             u16 lu_type, u16 byte_mask)
-{
-       struct mlx5dr_icm_chunk *chunk;
-       struct mlx5dr_ste_htbl *htbl;
-       u32 num_entries;
-       int i;
-
-       htbl = mlx5dr_icm_pool_alloc_htbl(pool);
-       if (!htbl)
-               return NULL;
-
-       chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
-       if (!chunk)
-               goto out_free_htbl;
-
-       htbl->chunk = chunk;
-       htbl->lu_type = lu_type;
-       htbl->byte_mask = byte_mask;
-       htbl->refcount = 0;
-       htbl->pointing_ste = NULL;
-       htbl->ctrl.num_of_valid_entries = 0;
-       htbl->ctrl.num_of_collisions = 0;
-       num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
-
-       for (i = 0; i < num_entries; i++) {
-               struct mlx5dr_ste *ste = &chunk->ste_arr[i];
-
-               ste->htbl = htbl;
-               ste->refcount = 0;
-               INIT_LIST_HEAD(&ste->miss_list_node);
-               INIT_LIST_HEAD(&chunk->miss_list[i]);
-       }
-
-       return htbl;
-
-out_free_htbl:
-       mlx5dr_icm_pool_free_htbl(pool, htbl);
-       return NULL;
-}
-
-int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
-{
-       struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool;
-
-       if (htbl->refcount)
-               return -EBUSY;
-
-       mlx5dr_icm_free_chunk(htbl->chunk);
-       mlx5dr_icm_pool_free_htbl(pool, htbl);
-
-       return 0;
-}
-
-void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
-                              struct mlx5dr_domain *dmn,
-                              u8 *action_type_set,
-                              u8 *hw_ste_arr,
-                              struct mlx5dr_ste_actions_attr *attr,
-                              u32 *added_stes)
-{
-       ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
-                               hw_ste_arr, attr, added_stes);
-}
-
-void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
-                              struct mlx5dr_domain *dmn,
-                              u8 *action_type_set,
-                              u8 *hw_ste_arr,
-                              struct mlx5dr_ste_actions_attr *attr,
-                              u32 *added_stes)
-{
-       ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
-                               hw_ste_arr, attr, added_stes);
-}
-
-const struct mlx5dr_ste_action_modify_field *
-mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
-{
-       const struct mlx5dr_ste_action_modify_field *hw_field;
-
-       if (sw_field >= ste_ctx->modify_field_arr_sz)
-               return NULL;
-
-       hw_field = &ste_ctx->modify_field_arr[sw_field];
-       if (!hw_field->end && !hw_field->start)
-               return NULL;
-
-       return hw_field;
-}
-
-void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
-                              __be64 *hw_action,
-                              u8 hw_field,
-                              u8 shifter,
-                              u8 length,
-                              u32 data)
-{
-       ste_ctx->set_action_set((u8 *)hw_action,
-                               hw_field, shifter, length, data);
-}
-
-void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
-                              __be64 *hw_action,
-                              u8 hw_field,
-                              u8 shifter,
-                              u8 length,
-                              u32 data)
-{
-       ste_ctx->set_action_add((u8 *)hw_action,
-                               hw_field, shifter, length, data);
-}
-
-void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
-                               __be64 *hw_action,
-                               u8 dst_hw_field,
-                               u8 dst_shifter,
-                               u8 dst_len,
-                               u8 src_hw_field,
-                               u8 src_shifter)
-{
-       ste_ctx->set_action_copy((u8 *)hw_action,
-                                dst_hw_field, dst_shifter, dst_len,
-                                src_hw_field, src_shifter);
-}
-
-int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
-                                       void *data, u32 data_sz,
-                                       u8 *hw_action, u32 hw_action_sz,
-                                       u16 *used_hw_action_num)
-{
-       /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
-       if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
-               return -EINVAL;
-
-       return ste_ctx->set_action_decap_l3_list(data, data_sz,
-                                                hw_action, hw_action_sz,
-                                                used_hw_action_num);
-}
-
-static int
-dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action)
-{
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-       u32 chunk_size;
-       int ret;
-
-       chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions));
-
-       /* HW modify action index granularity is at least 64B */
-       chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
-
-       action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
-                                                       chunk_size);
-       if (!action->rewrite->chunk)
-               return -ENOMEM;
-
-       action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) -
-                                 dmn->info.caps.hdr_modify_icm_addr) /
-                                DR_ACTION_CACHE_LINE_SIZE;
-
-       ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action);
-       if (ret)
-               goto free_chunk;
-
-       return 0;
-
-free_chunk:
-       mlx5dr_icm_free_chunk(action->rewrite->chunk);
-       return -ENOMEM;
-}
-
-static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action)
-{
-       mlx5dr_icm_free_chunk(action->rewrite->chunk);
-}
-
-int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action)
-{
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-
-       if (mlx5dr_domain_is_support_ptrn_arg(dmn))
-               return dmn->ste_ctx->alloc_modify_hdr_chunk(action);
-
-       return dr_ste_alloc_modify_hdr_chunk(action);
-}
-
-void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action)
-{
-       struct mlx5dr_domain *dmn = action->rewrite->dmn;
-
-       if (mlx5dr_domain_is_support_ptrn_arg(dmn))
-               return dmn->ste_ctx->dealloc_modify_hdr_chunk(action);
-
-       return dr_ste_free_modify_hdr_chunk(action);
-}
-
-static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
-                                      struct mlx5dr_match_spec *spec)
-{
-       if (spec->ip_version) {
-               if (spec->ip_version != 0xf) {
-                       mlx5dr_err(dmn,
-                                  "Partial ip_version mask with src/dst IP is not supported\n");
-                       return -EINVAL;
-               }
-       } else if (spec->ethertype != 0xffff &&
-                  (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
-               mlx5dr_err(dmn,
-                          "Partial/no ethertype mask with src/dst IP is not supported\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
-                              u8 match_criteria,
-                              struct mlx5dr_match_param *mask,
-                              struct mlx5dr_match_param *value)
-{
-       if (value)
-               return 0;
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
-               if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
-                       mlx5dr_err(dmn,
-                                  "Partial mask source_port is not supported\n");
-                       return -EINVAL;
-               }
-               if (mask->misc.source_eswitch_owner_vhca_id &&
-                   mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
-                       mlx5dr_err(dmn,
-                                  "Partial mask source_eswitch_owner_vhca_id is not supported\n");
-                       return -EINVAL;
-               }
-       }
-
-       if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
-           dr_ste_build_pre_check_spec(dmn, &mask->outer))
-               return -EINVAL;
-
-       if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
-           dr_ste_build_pre_check_spec(dmn, &mask->inner))
-               return -EINVAL;
-
-       return 0;
-}
-
-int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
-                            struct mlx5dr_matcher_rx_tx *nic_matcher,
-                            struct mlx5dr_match_param *value,
-                            u8 *ste_arr)
-{
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
-       bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
-       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
-       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
-       struct mlx5dr_ste_build *sb;
-       int ret, i;
-
-       ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
-                                        &matcher->mask, value);
-       if (ret)
-               return ret;
-
-       sb = nic_matcher->ste_builder;
-       for (i = 0; i < nic_matcher->num_of_builders; i++) {
-               ste_ctx->ste_init(ste_arr,
-                                 sb->lu_type,
-                                 is_rx,
-                                 dmn->info.caps.gvmi);
-
-               mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
-
-               ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
-               if (ret)
-                       return ret;
-
-               /* Connect the STEs */
-               if (i < (nic_matcher->num_of_builders - 1)) {
-                       /* Need the next builder for these fields,
-                        * not relevant for the last ste in the chain.
-                        */
-                       sb++;
-                       ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
-                       ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
-               }
-               ste_arr += DR_STE_SIZE;
-       }
-       return 0;
-}
-
-#define IFC_GET_CLR(typ, p, fld, clear) ({ \
-       void *__p = (p); \
-       u32 __t = MLX5_GET(typ, __p, fld); \
-       if (clear) \
-               MLX5_SET(typ, __p, fld, 0); \
-       __t; \
-})
-
-#define memcpy_and_clear(to, from, len, clear) ({ \
-       void *__to = (to), *__from = (from); \
-       size_t __len = (len); \
-       memcpy(__to, __from, __len); \
-       if (clear) \
-               memset(__from, 0, __len); \
-})
-
-static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
-{
-       spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
-       spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
-       spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
-       spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
-       spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
-
-       spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
-       spec->source_eswitch_owner_vhca_id =
-               IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
-
-       spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
-       spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
-       spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
-       spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
-       spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
-       spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
-
-       spec->outer_second_cvlan_tag =
-               IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
-       spec->inner_second_cvlan_tag =
-               IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
-       spec->outer_second_svlan_tag =
-               IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
-       spec->inner_second_svlan_tag =
-               IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
-       spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
-
-       spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
-       spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
-
-       spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
-
-       spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
-       spec->geneve_tlv_option_0_exist =
-               IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
-       spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
-
-       spec->outer_ipv6_flow_label =
-               IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
-
-       spec->inner_ipv6_flow_label =
-               IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
-
-       spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
-       spec->geneve_protocol_type =
-               IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
-
-       spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
-}
-
-static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
-{
-       __be32 raw_ip[4];
-
-       spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
-
-       spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
-       spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
-
-       spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
-
-       spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
-       spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
-       spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
-       spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
-
-       spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
-       spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
-       spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
-       spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
-       spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
-       spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
-       spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
-       spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
-       spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
-       spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
-
-       spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
-       spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
-
-       spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
-       spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
-
-       memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
-                                             src_ipv4_src_ipv6.ipv6_layout.ipv6),
-                        sizeof(raw_ip), clr);
-
-       spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
-       spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
-       spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
-       spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
-
-       memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
-                                             dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
-                        sizeof(raw_ip), clr);
-
-       spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
-       spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
-       spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
-       spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
-}
-
-static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
-{
-       spec->outer_first_mpls_label =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
-       spec->outer_first_mpls_exp =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
-       spec->outer_first_mpls_s_bos =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
-       spec->outer_first_mpls_ttl =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
-       spec->inner_first_mpls_label =
-               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
-       spec->inner_first_mpls_exp =
-               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
-       spec->inner_first_mpls_s_bos =
-               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
-       spec->inner_first_mpls_ttl =
-               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
-       spec->outer_first_mpls_over_gre_label =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
-       spec->outer_first_mpls_over_gre_exp =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
-       spec->outer_first_mpls_over_gre_s_bos =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
-       spec->outer_first_mpls_over_gre_ttl =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
-       spec->outer_first_mpls_over_udp_label =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
-       spec->outer_first_mpls_over_udp_exp =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
-       spec->outer_first_mpls_over_udp_s_bos =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
-       spec->outer_first_mpls_over_udp_ttl =
-               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
-       spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
-       spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
-       spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
-       spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
-       spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
-       spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
-       spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
-       spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
-       spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
-}
-
-static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
-{
-       spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
-       spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
-       spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
-       spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
-       spec->outer_vxlan_gpe_vni =
-               IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
-       spec->outer_vxlan_gpe_next_protocol =
-               IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
-       spec->outer_vxlan_gpe_flags =
-               IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
-       spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
-       spec->icmpv6_header_data =
-               IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
-       spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
-       spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
-       spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
-       spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
-       spec->geneve_tlv_option_0_data =
-               IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
-       spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
-       spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
-       spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
-       spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
-       spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
-       spec->gtpu_first_ext_dw_0 =
-               IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
-}
-
-static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
-{
-       spec->prog_sample_field_id_0 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
-       spec->prog_sample_field_value_0 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
-       spec->prog_sample_field_id_1 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
-       spec->prog_sample_field_value_1 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
-       spec->prog_sample_field_id_2 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
-       spec->prog_sample_field_value_2 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
-       spec->prog_sample_field_id_3 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
-       spec->prog_sample_field_value_3 =
-               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
-}
-
-static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
-{
-       spec->macsec_tag_0 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
-       spec->macsec_tag_1 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
-       spec->macsec_tag_2 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
-       spec->macsec_tag_3 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
-       spec->tunnel_header_0 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
-       spec->tunnel_header_1 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
-       spec->tunnel_header_2 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
-       spec->tunnel_header_3 =
-               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
-}
-
-void mlx5dr_ste_copy_param(u8 match_criteria,
-                          struct mlx5dr_match_param *set_param,
-                          struct mlx5dr_match_parameters *mask,
-                          bool clr)
-{
-       u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
-       u8 *data = (u8 *)mask->match_buf;
-       size_t param_location;
-       void *buff;
-
-       if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
-               if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
-                       memcpy(tail_param, data, mask->match_sz);
-                       buff = tail_param;
-               } else {
-                       buff = mask->match_buf;
-               }
-               dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
-       }
-       param_location = sizeof(struct mlx5dr_match_spec);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
-               if (mask->match_sz < param_location +
-                   sizeof(struct mlx5dr_match_misc)) {
-                       memcpy(tail_param, data + param_location,
-                              mask->match_sz - param_location);
-                       buff = tail_param;
-               } else {
-                       buff = data + param_location;
-               }
-               dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
-       }
-       param_location += sizeof(struct mlx5dr_match_misc);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
-               if (mask->match_sz < param_location +
-                   sizeof(struct mlx5dr_match_spec)) {
-                       memcpy(tail_param, data + param_location,
-                              mask->match_sz - param_location);
-                       buff = tail_param;
-               } else {
-                       buff = data + param_location;
-               }
-               dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
-       }
-       param_location += sizeof(struct mlx5dr_match_spec);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
-               if (mask->match_sz < param_location +
-                   sizeof(struct mlx5dr_match_misc2)) {
-                       memcpy(tail_param, data + param_location,
-                              mask->match_sz - param_location);
-                       buff = tail_param;
-               } else {
-                       buff = data + param_location;
-               }
-               dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
-       }
-
-       param_location += sizeof(struct mlx5dr_match_misc2);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
-               if (mask->match_sz < param_location +
-                   sizeof(struct mlx5dr_match_misc3)) {
-                       memcpy(tail_param, data + param_location,
-                              mask->match_sz - param_location);
-                       buff = tail_param;
-               } else {
-                       buff = data + param_location;
-               }
-               dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
-       }
-
-       param_location += sizeof(struct mlx5dr_match_misc3);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
-               if (mask->match_sz < param_location +
-                   sizeof(struct mlx5dr_match_misc4)) {
-                       memcpy(tail_param, data + param_location,
-                              mask->match_sz - param_location);
-                       buff = tail_param;
-               } else {
-                       buff = data + param_location;
-               }
-               dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
-       }
-
-       param_location += sizeof(struct mlx5dr_match_misc4);
-
-       if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
-               if (mask->match_sz < param_location +
-                   sizeof(struct mlx5dr_match_misc5)) {
-                       memcpy(tail_param, data + param_location,
-                              mask->match_sz - param_location);
-                       buff = tail_param;
-               } else {
-                       buff = data + param_location;
-               }
-               dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
-       }
-}
-
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask,
-                                    bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l2_src_dst_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
-                                     struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask,
-                                     bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
-                                     struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask,
-                                     bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
-                                         struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask,
-                                         bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l2_src_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l2_dst_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask, bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l2_tnl_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
-                                      struct mlx5dr_ste_build *sb,
-                                      struct mlx5dr_match_param *mask,
-                                      bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask,
-                                    bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
-}
-
-static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
-                                            struct mlx5dr_ste_build *sb,
-                                            u8 *tag)
-{
-       return 0;
-}
-
-void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
-{
-       sb->rx = rx;
-       sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
-       sb->byte_mask = 0;
-       sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
-}
-
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
-                          struct mlx5dr_ste_build *sb,
-                          struct mlx5dr_match_param *mask,
-                          bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_mpls_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
-                             struct mlx5dr_ste_build *sb,
-                             struct mlx5dr_match_param *mask,
-                             bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_tnl_gre_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
-                                       struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask,
-                                       struct mlx5dr_cmd_caps *caps,
-                                       bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       sb->caps = caps;
-       return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
-                                       struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask,
-                                       struct mlx5dr_cmd_caps *caps,
-                                       bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       sb->caps = caps;
-       return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
-}
-
-void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
-                          struct mlx5dr_ste_build *sb,
-                          struct mlx5dr_match_param *mask,
-                          struct mlx5dr_cmd_caps *caps,
-                          bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       sb->caps = caps;
-       ste_ctx->build_icmp_init(sb, mask);
-}
-
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
-                                     struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask,
-                                     bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_general_purpose_init(sb, mask);
-}
-
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
-                                 struct mlx5dr_ste_build *sb,
-                                 struct mlx5dr_match_param *mask,
-                                 bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_eth_l4_misc_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask,
-                                   bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_tnl_geneve_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
-                                        struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask,
-                                        struct mlx5dr_cmd_caps *caps,
-                                        bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->caps = caps;
-       sb->inner = inner;
-       ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
-                                              struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask,
-                                              struct mlx5dr_cmd_caps *caps,
-                                              bool inner, bool rx)
-{
-       if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
-               return;
-
-       sb->rx = rx;
-       sb->caps = caps;
-       sb->inner = inner;
-       ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
-                              struct mlx5dr_ste_build *sb,
-                              struct mlx5dr_match_param *mask,
-                              bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_tnl_gtpu_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
-                                            struct mlx5dr_ste_build *sb,
-                                            struct mlx5dr_match_param *mask,
-                                            struct mlx5dr_cmd_caps *caps,
-                                            bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->caps = caps;
-       sb->inner = inner;
-       ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                            struct mlx5dr_ste_build *sb,
-                                            struct mlx5dr_match_param *mask,
-                                            struct mlx5dr_cmd_caps *caps,
-                                            bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->caps = caps;
-       sb->inner = inner;
-       ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
-}
-
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_register_0_init(sb, mask);
-}
-
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_register_1_init(sb, mask);
-}
-
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
-                                  struct mlx5dr_ste_build *sb,
-                                  struct mlx5dr_match_param *mask,
-                                  struct mlx5dr_domain *dmn,
-                                  bool inner, bool rx)
-{
-       /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
-       sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
-
-       sb->rx = rx;
-       sb->dmn = dmn;
-       sb->inner = inner;
-       ste_ctx->build_src_gvmi_qpn_init(sb, mask);
-}
-
-void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask,
-                                   bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_flex_parser_0_init(sb, mask);
-}
-
-void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask,
-                                   bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_flex_parser_1_init(sb, mask);
-}
-
-void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask,
-                                    bool inner, bool rx)
-{
-       sb->rx = rx;
-       sb->inner = inner;
-       ste_ctx->build_tnl_header_0_1_init(sb, mask);
-}
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
-{
-       if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
-               return mlx5dr_ste_get_ctx_v0();
-       else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
-               return mlx5dr_ste_get_ctx_v1();
-       else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
-               return mlx5dr_ste_get_ctx_v2();
-
-       return NULL;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h
deleted file mode 100644 (file)
index 54a6619..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
-
-#ifndef        _DR_STE_
-#define        _DR_STE_
-
-#include "dr_types.h"
-
-#define STE_IPV4 0x1
-#define STE_IPV6 0x2
-#define STE_TCP 0x1
-#define STE_UDP 0x2
-#define STE_SPI 0x3
-#define IP_VERSION_IPV4 0x4
-#define IP_VERSION_IPV6 0x6
-#define STE_SVLAN 0x1
-#define STE_CVLAN 0x2
-#define HDR_LEN_L2_MACS   0xC
-#define HDR_LEN_L2_VLAN   0x4
-#define HDR_LEN_L2_ETHER  0x2
-#define HDR_LEN_L2        (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
-#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
-
-/* Set to STE a specific value using DR_STE_SET */
-#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
-       if ((spec)->s_fname) { \
-               MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
-               (spec)->s_fname = 0; \
-       } \
-} while (0)
-
-/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
-#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
-       DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
-
-/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
-#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
-       DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
-
-#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
-       MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
-       MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
-} while (0)
-
-#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
-       struct mlx5dr_match_misc2 *_mask = mask; \
-       u8 *_tag = tag; \
-       DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
-                      in_out##_first_mpls_label);\
-       DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
-                      in_out##_first_mpls_s_bos); \
-       DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
-                      in_out##_first_mpls_exp); \
-       DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
-                      in_out##_first_mpls_ttl); \
-} while (0)
-
-#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \
-       u8 parser_id = (caps)->flex_parser_id_##fname; \
-       u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \
-       *(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\
-       (spec)->fname = 0;\
-} while (0)
-
-#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
-       (_misc)->outer_first_mpls_over_gre_label || \
-       (_misc)->outer_first_mpls_over_gre_exp || \
-       (_misc)->outer_first_mpls_over_gre_s_bos || \
-       (_misc)->outer_first_mpls_over_gre_ttl)
-
-#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
-       (_misc)->outer_first_mpls_over_udp_label || \
-       (_misc)->outer_first_mpls_over_udp_exp || \
-       (_misc)->outer_first_mpls_over_udp_s_bos || \
-       (_misc)->outer_first_mpls_over_udp_ttl)
-
-enum dr_ste_action_modify_type_l3 {
-       DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
-       DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
-       DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
-};
-
-enum dr_ste_action_modify_type_l4 {
-       DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
-       DR_STE_ACTION_MDFY_TYPE_L4_TCP  = 0x1,
-       DR_STE_ACTION_MDFY_TYPE_L4_UDP  = 0x2,
-};
-
-enum {
-       HDR_MPLS_OFFSET_LABEL   = 12,
-       HDR_MPLS_OFFSET_EXP     = 9,
-       HDR_MPLS_OFFSET_S_BOS   = 8,
-       HDR_MPLS_OFFSET_TTL     = 0,
-};
-
-u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
-
-static inline u8 *
-dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id)
-{
-       /* Calculate tag byte offset based on flex parser id */
-       return tag + 4 * (3 - (parser_id % 4));
-}
-
-#define DR_STE_CTX_BUILDER(fname) \
-       ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
-                                struct mlx5dr_match_param *mask))
-
-struct mlx5dr_ste_ctx {
-       /* Builders */
-       void DR_STE_CTX_BUILDER(eth_l2_src_dst);
-       void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
-       void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
-       void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
-       void DR_STE_CTX_BUILDER(eth_l2_src);
-       void DR_STE_CTX_BUILDER(eth_l2_dst);
-       void DR_STE_CTX_BUILDER(eth_l2_tnl);
-       void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
-       void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
-       void DR_STE_CTX_BUILDER(mpls);
-       void DR_STE_CTX_BUILDER(tnl_gre);
-       void DR_STE_CTX_BUILDER(tnl_mpls);
-       void DR_STE_CTX_BUILDER(tnl_mpls_over_gre);
-       void DR_STE_CTX_BUILDER(tnl_mpls_over_udp);
-       void DR_STE_CTX_BUILDER(icmp);
-       void DR_STE_CTX_BUILDER(general_purpose);
-       void DR_STE_CTX_BUILDER(eth_l4_misc);
-       void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
-       void DR_STE_CTX_BUILDER(tnl_geneve);
-       void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
-       void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist);
-       void DR_STE_CTX_BUILDER(register_0);
-       void DR_STE_CTX_BUILDER(register_1);
-       void DR_STE_CTX_BUILDER(src_gvmi_qpn);
-       void DR_STE_CTX_BUILDER(flex_parser_0);
-       void DR_STE_CTX_BUILDER(flex_parser_1);
-       void DR_STE_CTX_BUILDER(tnl_gtpu);
-       void DR_STE_CTX_BUILDER(tnl_header_0_1);
-       void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
-       void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
-
-       /* Getters and Setters */
-       void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
-                        bool is_rx, u16 gvmi);
-       void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
-       u16  (*get_next_lu_type)(u8 *hw_ste_p);
-       bool (*is_miss_addr_set)(u8 *hw_ste_p);
-       void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
-       u64  (*get_miss_addr)(u8 *hw_ste_p);
-       void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
-       void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
-       u16  (*get_byte_mask)(u8 *hw_ste_p);
-
-       /* Actions */
-       u32 actions_caps;
-       void (*set_actions_rx)(struct mlx5dr_domain *dmn,
-                              u8 *action_type_set,
-                              u32 actions_caps,
-                              u8 *hw_ste_arr,
-                              struct mlx5dr_ste_actions_attr *attr,
-                              u32 *added_stes);
-       void (*set_actions_tx)(struct mlx5dr_domain *dmn,
-                              u8 *action_type_set,
-                              u32 actions_caps,
-                              u8 *hw_ste_arr,
-                              struct mlx5dr_ste_actions_attr *attr,
-                              u32 *added_stes);
-       u32 modify_field_arr_sz;
-       const struct mlx5dr_ste_action_modify_field *modify_field_arr;
-       void (*set_action_set)(u8 *hw_action,
-                              u8 hw_field,
-                              u8 shifter,
-                              u8 length,
-                              u32 data);
-       void (*set_action_add)(u8 *hw_action,
-                              u8 hw_field,
-                              u8 shifter,
-                              u8 length,
-                              u32 data);
-       void (*set_action_copy)(u8 *hw_action,
-                               u8 dst_hw_field,
-                               u8 dst_shifter,
-                               u8 dst_len,
-                               u8 src_hw_field,
-                               u8 src_shifter);
-       int (*set_action_decap_l3_list)(void *data,
-                                       u32 data_sz,
-                                       u8 *hw_action,
-                                       u32 hw_action_sz,
-                                       u16 *used_hw_action_num);
-       int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-       void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
-
-       /* Send */
-       void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
-};
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
-
-#endif  /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
deleted file mode 100644 (file)
index e9f6c7e..0000000
+++ /dev/null
@@ -1,1962 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
-
-#include <linux/types.h>
-#include <linux/crc32.h>
-#include "dr_ste.h"
-
-#define SVLAN_ETHERTYPE                0x88a8
-#define DR_STE_ENABLE_FLOW_TAG BIT(31)
-
-enum dr_ste_v0_entry_type {
-       DR_STE_TYPE_TX          = 1,
-       DR_STE_TYPE_RX          = 2,
-       DR_STE_TYPE_MODIFY_PKT  = 6,
-};
-
-enum dr_ste_v0_action_tunl {
-       DR_STE_TUNL_ACTION_NONE         = 0,
-       DR_STE_TUNL_ACTION_ENABLE       = 1,
-       DR_STE_TUNL_ACTION_DECAP        = 2,
-       DR_STE_TUNL_ACTION_L3_DECAP     = 3,
-       DR_STE_TUNL_ACTION_POP_VLAN     = 4,
-};
-
-enum dr_ste_v0_action_type {
-       DR_STE_ACTION_TYPE_PUSH_VLAN    = 1,
-       DR_STE_ACTION_TYPE_ENCAP_L3     = 3,
-       DR_STE_ACTION_TYPE_ENCAP        = 4,
-};
-
-enum dr_ste_v0_action_mdfy_op {
-       DR_STE_ACTION_MDFY_OP_COPY      = 0x1,
-       DR_STE_ACTION_MDFY_OP_SET       = 0x2,
-       DR_STE_ACTION_MDFY_OP_ADD       = 0x3,
-};
-
-#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
-       ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
-                  (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
-                         DR_STE_V0_LU_TYPE_##lookup_type##_O)
-
-enum {
-       DR_STE_V0_LU_TYPE_NOP                           = 0x00,
-       DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP               = 0x05,
-       DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I             = 0x0a,
-       DR_STE_V0_LU_TYPE_ETHL2_DST_O                   = 0x06,
-       DR_STE_V0_LU_TYPE_ETHL2_DST_I                   = 0x07,
-       DR_STE_V0_LU_TYPE_ETHL2_DST_D                   = 0x1b,
-       DR_STE_V0_LU_TYPE_ETHL2_SRC_O                   = 0x08,
-       DR_STE_V0_LU_TYPE_ETHL2_SRC_I                   = 0x09,
-       DR_STE_V0_LU_TYPE_ETHL2_SRC_D                   = 0x1c,
-       DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O               = 0x36,
-       DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I               = 0x37,
-       DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D               = 0x38,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O              = 0x0d,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I              = 0x0e,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D              = 0x1e,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O              = 0x0f,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I              = 0x10,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D              = 0x1f,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O          = 0x11,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I          = 0x12,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D          = 0x20,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O             = 0x29,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I             = 0x2a,
-       DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D             = 0x2b,
-       DR_STE_V0_LU_TYPE_ETHL4_O                       = 0x13,
-       DR_STE_V0_LU_TYPE_ETHL4_I                       = 0x14,
-       DR_STE_V0_LU_TYPE_ETHL4_D                       = 0x21,
-       DR_STE_V0_LU_TYPE_ETHL4_MISC_O                  = 0x2c,
-       DR_STE_V0_LU_TYPE_ETHL4_MISC_I                  = 0x2d,
-       DR_STE_V0_LU_TYPE_ETHL4_MISC_D                  = 0x2e,
-       DR_STE_V0_LU_TYPE_MPLS_FIRST_O                  = 0x15,
-       DR_STE_V0_LU_TYPE_MPLS_FIRST_I                  = 0x24,
-       DR_STE_V0_LU_TYPE_MPLS_FIRST_D                  = 0x25,
-       DR_STE_V0_LU_TYPE_GRE                           = 0x16,
-       DR_STE_V0_LU_TYPE_FLEX_PARSER_0                 = 0x22,
-       DR_STE_V0_LU_TYPE_FLEX_PARSER_1                 = 0x23,
-       DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER        = 0x19,
-       DR_STE_V0_LU_TYPE_GENERAL_PURPOSE               = 0x18,
-       DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0          = 0x2f,
-       DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1          = 0x30,
-       DR_STE_V0_LU_TYPE_TUNNEL_HEADER                 = 0x34,
-       DR_STE_V0_LU_TYPE_DONT_CARE                     = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum {
-       DR_STE_V0_ACTION_MDFY_FLD_L2_0          = 0,
-       DR_STE_V0_ACTION_MDFY_FLD_L2_1          = 1,
-       DR_STE_V0_ACTION_MDFY_FLD_L2_2          = 2,
-       DR_STE_V0_ACTION_MDFY_FLD_L3_0          = 3,
-       DR_STE_V0_ACTION_MDFY_FLD_L3_1          = 4,
-       DR_STE_V0_ACTION_MDFY_FLD_L3_2          = 5,
-       DR_STE_V0_ACTION_MDFY_FLD_L3_3          = 6,
-       DR_STE_V0_ACTION_MDFY_FLD_L3_4          = 7,
-       DR_STE_V0_ACTION_MDFY_FLD_L4_0          = 8,
-       DR_STE_V0_ACTION_MDFY_FLD_L4_1          = 9,
-       DR_STE_V0_ACTION_MDFY_FLD_MPLS          = 10,
-       DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0      = 11,
-       DR_STE_V0_ACTION_MDFY_FLD_REG_0         = 12,
-       DR_STE_V0_ACTION_MDFY_FLD_REG_1         = 13,
-       DR_STE_V0_ACTION_MDFY_FLD_REG_2         = 14,
-       DR_STE_V0_ACTION_MDFY_FLD_REG_3         = 15,
-       DR_STE_V0_ACTION_MDFY_FLD_L4_2          = 16,
-       DR_STE_V0_ACTION_MDFY_FLD_FLEX_0        = 17,
-       DR_STE_V0_ACTION_MDFY_FLD_FLEX_1        = 18,
-       DR_STE_V0_ACTION_MDFY_FLD_FLEX_2        = 19,
-       DR_STE_V0_ACTION_MDFY_FLD_FLEX_3        = 20,
-       DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1      = 21,
-       DR_STE_V0_ACTION_MDFY_FLD_METADATA      = 22,
-       DR_STE_V0_ACTION_MDFY_FLD_RESERVED      = 23,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
-       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
-               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
-       },
-};
-
-static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
-{
-       MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
-}
-
-static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
-{
-       return MLX5_GET(ste_general, hw_ste_p, entry_type);
-}
-
-static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
-{
-       u64 index = miss_addr >> 6;
-
-       /* Miss address for TX and RX STEs located in the same offsets */
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
-}
-
-static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
-{
-       u64 index =
-               ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
-                ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
-
-       return index << 6;
-}
-
-static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
-{
-       MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
-}
-
-static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
-{
-       return MLX5_GET(ste_general, hw_ste_p, byte_mask);
-}
-
-static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
-{
-       MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
-}
-
-static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
-{
-       MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
-}
-
-static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
-{
-       return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
-}
-
-static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
-{
-       MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
-}
-
-static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
-{
-       u64 index = (icm_addr >> 5) | ht_size;
-
-       MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
-       MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
-}
-
-static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
-                               enum dr_ste_v0_entry_type entry_type, u16 gvmi)
-{
-       dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
-       dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
-       dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
-
-       /* Set GVMI once, this is the same for RX/TX
-        * bits 63_48 of next table base / miss address encode the next GVMI
-        */
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
-}
-
-static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
-                          bool is_rx, u16 gvmi)
-{
-       enum dr_ste_v0_entry_type entry_type;
-
-       entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
-       dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
-}
-
-static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
-{
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
-                DR_STE_ENABLE_FLOW_TAG | flow_tag);
-}
-
-static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
-{
-       /* This can be used for both rx_steering_mult and for sx_transmit */
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
-}
-
-static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
-{
-       MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
-}
-
-static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
-                                      bool go_back)
-{
-       MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
-                DR_STE_ACTION_TYPE_PUSH_VLAN);
-       MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
-       /* Due to HW limitation we need to set this bit, otherwise reformat +
-        * push vlan will not work.
-        */
-       if (go_back)
-               dr_ste_v0_set_go_back_bit(hw_ste_p);
-}
-
-static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
-                                  int size, bool encap_l3)
-{
-       MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
-                encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
-       /* The hardware expects here size in words (2 byte) */
-       MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
-       MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
-}
-
-static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
-{
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
-                DR_STE_TUNL_ACTION_DECAP);
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
-}
-
-static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
-{
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
-                DR_STE_TUNL_ACTION_POP_VLAN);
-}
-
-static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
-{
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
-                DR_STE_TUNL_ACTION_L3_DECAP);
-       MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
-       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
-}
-
-static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
-                                         u32 re_write_index)
-{
-       MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
-                num_of_actions);
-       MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
-                re_write_index);
-}
-
-static void dr_ste_v0_arr_init_next(u8 **last_ste,
-                                   u32 *added_stes,
-                                   enum dr_ste_v0_entry_type entry_type,
-                                   u16 gvmi)
-{
-       (*added_stes)++;
-       *last_ste += DR_STE_SIZE;
-       dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
-                           entry_type, gvmi);
-}
-
-static void
-dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
-                        u8 *action_type_set,
-                        u32 actions_caps,
-                        u8 *last_ste,
-                        struct mlx5dr_ste_actions_attr *attr,
-                        u32 *added_stes)
-{
-       bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
-               action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
-
-       /* We want to make sure the modify header comes before L2
-        * encapsulation. The reason for that is that we support
-        * modify headers for outer headers only
-        */
-       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
-               dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
-               dr_ste_v0_set_rewrite_actions(last_ste,
-                                             attr->modify_actions,
-                                             attr->modify_index);
-       }
-
-       if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
-               int i;
-
-               for (i = 0; i < attr->vlans.count; i++) {
-                       if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
-                               dr_ste_v0_arr_init_next(&last_ste,
-                                                       added_stes,
-                                                       DR_STE_TYPE_TX,
-                                                       attr->gvmi);
-
-                       dr_ste_v0_set_tx_push_vlan(last_ste,
-                                                  attr->vlans.headers[i],
-                                                  encap);
-               }
-       }
-
-       if (encap) {
-               /* Modify header and encapsulation require a different STEs.
-                * Since modify header STE format doesn't support encapsulation
-                * tunneling_action.
-                */
-               if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
-                   action_type_set[DR_ACTION_TYP_PUSH_VLAN])
-                       dr_ste_v0_arr_init_next(&last_ste,
-                                               added_stes,
-                                               DR_STE_TYPE_TX,
-                                               attr->gvmi);
-
-               dr_ste_v0_set_tx_encap(last_ste,
-                                      attr->reformat.id,
-                                      attr->reformat.size,
-                                      action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
-               /* Whenever prio_tag_required enabled, we can be sure that the
-                * previous table (ACL) already push vlan to our packet,
-                * And due to HW limitation we need to set this bit, otherwise
-                * push vlan + reformat will not work.
-                */
-               if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
-                       dr_ste_v0_set_go_back_bit(last_ste);
-       }
-
-       if (action_type_set[DR_ACTION_TYP_CTR])
-               dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
-
-       dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
-       dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
-}
-
-static void
-dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
-                        u8 *action_type_set,
-                        u32 actions_caps,
-                        u8 *last_ste,
-                        struct mlx5dr_ste_actions_attr *attr,
-                        u32 *added_stes)
-{
-       if (action_type_set[DR_ACTION_TYP_CTR])
-               dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
-
-       if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
-               dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
-               dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
-               dr_ste_v0_set_rewrite_actions(last_ste,
-                                             attr->decap_actions,
-                                             attr->decap_index);
-       }
-
-       if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
-               dr_ste_v0_set_rx_decap(last_ste);
-
-       if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
-               int i;
-
-               for (i = 0; i < attr->vlans.count; i++) {
-                       if (i ||
-                           action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
-                           action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
-                               dr_ste_v0_arr_init_next(&last_ste,
-                                                       added_stes,
-                                                       DR_STE_TYPE_RX,
-                                                       attr->gvmi);
-
-                       dr_ste_v0_set_rx_pop_vlan(last_ste);
-               }
-       }
-
-       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
-               if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
-                       dr_ste_v0_arr_init_next(&last_ste,
-                                               added_stes,
-                                               DR_STE_TYPE_MODIFY_PKT,
-                                               attr->gvmi);
-               else
-                       dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
-
-               dr_ste_v0_set_rewrite_actions(last_ste,
-                                             attr->modify_actions,
-                                             attr->modify_index);
-       }
-
-       if (action_type_set[DR_ACTION_TYP_TAG]) {
-               if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
-                       dr_ste_v0_arr_init_next(&last_ste,
-                                               added_stes,
-                                               DR_STE_TYPE_RX,
-                                               attr->gvmi);
-
-               dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
-       }
-
-       dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
-       dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
-}
-
-static void dr_ste_v0_set_action_set(u8 *hw_action,
-                                    u8 hw_field,
-                                    u8 shifter,
-                                    u8 length,
-                                    u32 data)
-{
-       length = (length == 32) ? 0 : length;
-       MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
-       MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
-}
-
-static void dr_ste_v0_set_action_add(u8 *hw_action,
-                                    u8 hw_field,
-                                    u8 shifter,
-                                    u8 length,
-                                    u32 data)
-{
-       length = (length == 32) ? 0 : length;
-       MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
-       MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
-}
-
-static void dr_ste_v0_set_action_copy(u8 *hw_action,
-                                     u8 dst_hw_field,
-                                     u8 dst_shifter,
-                                     u8 dst_len,
-                                     u8 src_hw_field,
-                                     u8 src_shifter)
-{
-       MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
-       MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
-       MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
-       MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
-       MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
-       MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
-}
-
-#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
-
-static int
-dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
-                                  u8 *hw_action, u32 hw_action_sz,
-                                  u16 *used_hw_action_num)
-{
-       struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
-       u32 hw_action_num;
-       int required_actions;
-       u32 hdr_fld_4b;
-       u16 hdr_fld_2b;
-       u16 vlan_type;
-       bool vlan;
-
-       vlan = (data_sz != HDR_LEN_L2);
-       hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
-       required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
-
-       if (hw_action_num < required_actions)
-               return -ENOMEM;
-
-       /* dmac_47_16 */
-       MLX5_SET(dr_action_hw_set, hw_action,
-                opcode, DR_STE_ACTION_MDFY_OP_SET);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_length, 0);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_left_shifter, 16);
-       hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                inline_data, hdr_fld_4b);
-       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
-
-       /* smac_47_16 */
-       MLX5_SET(dr_action_hw_set, hw_action,
-                opcode, DR_STE_ACTION_MDFY_OP_SET);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_length, 0);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
-       MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
-       hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
-                     MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
-       MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
-       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
-
-       /* dmac_15_0 */
-       MLX5_SET(dr_action_hw_set, hw_action,
-                opcode, DR_STE_ACTION_MDFY_OP_SET);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_length, 16);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_left_shifter, 0);
-       hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                inline_data, hdr_fld_2b);
-       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
-
-       /* ethertype + (optional) vlan */
-       MLX5_SET(dr_action_hw_set, hw_action,
-                opcode, DR_STE_ACTION_MDFY_OP_SET);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_left_shifter, 32);
-       if (!vlan) {
-               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
-               MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
-               MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
-       } else {
-               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
-               vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
-               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
-               hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
-               MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
-               MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
-       }
-       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
-
-       /* smac_15_0 */
-       MLX5_SET(dr_action_hw_set, hw_action,
-                opcode, DR_STE_ACTION_MDFY_OP_SET);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_length, 16);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
-       MLX5_SET(dr_action_hw_set, hw_action,
-                destination_left_shifter, 0);
-       hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
-       MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
-       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
-
-       if (vlan) {
-               MLX5_SET(dr_action_hw_set, hw_action,
-                        opcode, DR_STE_ACTION_MDFY_OP_SET);
-               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
-               MLX5_SET(dr_action_hw_set, hw_action,
-                        inline_data, hdr_fld_2b);
-               MLX5_SET(dr_action_hw_set, hw_action,
-                        destination_length, 16);
-               MLX5_SET(dr_action_hw_set, hw_action,
-                        destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
-               MLX5_SET(dr_action_hw_set, hw_action,
-                        destination_left_shifter, 0);
-       }
-
-       *used_hw_action_num = required_actions;
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
-                                       bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
-       if (mask->smac_47_16 || mask->smac_15_0) {
-               MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
-                        mask->smac_47_16 >> 16);
-               MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
-                        mask->smac_47_16 << 16 | mask->smac_15_0);
-               mask->smac_47_16 = 0;
-               mask->smac_15_0 = 0;
-       }
-
-       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
-       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
-
-       if (mask->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
-               mask->cvlan_tag = 0;
-       } else if (mask->svlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
-               mask->svlan_tag = 0;
-       }
-}
-
-static int
-dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
-                                  struct mlx5dr_ste_build *sb,
-                                  u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
-
-       if (spec->smac_47_16 || spec->smac_15_0) {
-               MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
-                        spec->smac_47_16 >> 16);
-               MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
-                        spec->smac_47_16 << 16 | spec->smac_15_0);
-               spec->smac_47_16 = 0;
-               spec->smac_15_0 = 0;
-       }
-
-       if (spec->ip_version) {
-               if (spec->ip_version == IP_VERSION_IPV4) {
-                       MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
-                       spec->ip_version = 0;
-               } else if (spec->ip_version == IP_VERSION_IPV6) {
-                       MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
-                       spec->ip_version = 0;
-               } else {
-                       return -EINVAL;
-               }
-       }
-
-       DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
-       DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
-
-       if (spec->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
-               spec->cvlan_tag = 0;
-       } else if (spec->svlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
-               spec->svlan_tag = 0;
-       }
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
-}
-
-static int
-dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
-                                   struct mlx5dr_ste_build *sb,
-                                   u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
-}
-
-static int
-dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
-                                   struct mlx5dr_ste_build *sb,
-                                   u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
-}
-
-static int
-dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
-                                       struct mlx5dr_ste_build *sb,
-                                       u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
-
-       if (spec->tcp_flags) {
-               DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
-               spec->tcp_flags = 0;
-       }
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
-}
-
-static void
-dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
-                                          bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc_mask = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
-       DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
-
-       if (mask->svlan_tag || mask->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
-               mask->cvlan_tag = 0;
-               mask->svlan_tag = 0;
-       }
-
-       if (inner) {
-               if (misc_mask->inner_second_cvlan_tag ||
-                   misc_mask->inner_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
-                       misc_mask->inner_second_cvlan_tag = 0;
-                       misc_mask->inner_second_svlan_tag = 0;
-               }
-
-               DR_STE_SET_TAG(eth_l2_src, bit_mask,
-                              second_vlan_id, misc_mask, inner_second_vid);
-               DR_STE_SET_TAG(eth_l2_src, bit_mask,
-                              second_cfi, misc_mask, inner_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src, bit_mask,
-                              second_priority, misc_mask, inner_second_prio);
-       } else {
-               if (misc_mask->outer_second_cvlan_tag ||
-                   misc_mask->outer_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
-                       misc_mask->outer_second_cvlan_tag = 0;
-                       misc_mask->outer_second_svlan_tag = 0;
-               }
-
-               DR_STE_SET_TAG(eth_l2_src, bit_mask,
-                              second_vlan_id, misc_mask, outer_second_vid);
-               DR_STE_SET_TAG(eth_l2_src, bit_mask,
-                              second_cfi, misc_mask, outer_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src, bit_mask,
-                              second_priority, misc_mask, outer_second_prio);
-       }
-}
-
-static int
-dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
-                                     bool inner, u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc_spec = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
-       DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
-       DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
-
-       if (spec->ip_version) {
-               if (spec->ip_version == IP_VERSION_IPV4) {
-                       MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
-                       spec->ip_version = 0;
-               } else if (spec->ip_version == IP_VERSION_IPV6) {
-                       MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
-                       spec->ip_version = 0;
-               } else {
-                       return -EINVAL;
-               }
-       }
-
-       if (spec->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
-               spec->cvlan_tag = 0;
-       } else if (spec->svlan_tag) {
-               MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
-               spec->svlan_tag = 0;
-       }
-
-       if (inner) {
-               if (misc_spec->inner_second_cvlan_tag) {
-                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
-                       misc_spec->inner_second_cvlan_tag = 0;
-               } else if (misc_spec->inner_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
-                       misc_spec->inner_second_svlan_tag = 0;
-               }
-
-               DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
-               DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
-       } else {
-               if (misc_spec->outer_second_cvlan_tag) {
-                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
-                       misc_spec->outer_second_cvlan_tag = 0;
-               } else if (misc_spec->outer_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
-                       misc_spec->outer_second_svlan_tag = 0;
-               }
-               DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
-               DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
-       }
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
-                                   bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
-       DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
-
-       dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int
-dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
-                              struct mlx5dr_ste_build *sb,
-                              u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
-       DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
-
-       return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
-}
-
-static void
-dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
-                               struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
-}
-
-static void
-dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
-                                   struct mlx5dr_ste_build *sb,
-                                   u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
-
-       dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
-}
-
-static int
-dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
-                              struct mlx5dr_ste_build *sb,
-                              u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
-
-       return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
-}
-
-static void
-dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
-                               struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
-}
-
-static void
-dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
-                                   bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
-       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
-       DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
-
-       if (misc->vxlan_vni) {
-               MLX5_SET(ste_eth_l2_tnl, bit_mask,
-                        l2_tunneling_network_id, (misc->vxlan_vni << 8));
-               misc->vxlan_vni = 0;
-       }
-
-       if (mask->svlan_tag || mask->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
-               mask->cvlan_tag = 0;
-               mask->svlan_tag = 0;
-       }
-}
-
-static int
-dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
-                              struct mlx5dr_ste_build *sb,
-                              u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
-       DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
-       DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
-       DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
-       DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
-
-       if (misc->vxlan_vni) {
-               MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
-                        (misc->vxlan_vni << 8));
-               misc->vxlan_vni = 0;
-       }
-
-       if (spec->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
-               spec->cvlan_tag = 0;
-       } else if (spec->svlan_tag) {
-               MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
-               spec->svlan_tag = 0;
-       }
-
-       if (spec->ip_version) {
-               if (spec->ip_version == IP_VERSION_IPV4) {
-                       MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
-                       spec->ip_version = 0;
-               } else if (spec->ip_version == IP_VERSION_IPV6) {
-                       MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
-                       spec->ip_version = 0;
-               } else {
-                       return -EINVAL;
-               }
-       }
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
-                               struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
-}
-
-static int
-dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
-                                    struct mlx5dr_ste_build *sb,
-                                    u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
-       DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
-}
-
-static int
-dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
-                                  struct mlx5dr_ste_build *sb,
-                                  u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
-       DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
-       DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
-       DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
-       DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
-       DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
-       DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
-       DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
-
-       if (sb->inner)
-               DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
-       else
-               DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
-
-       if (spec->tcp_flags) {
-               DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
-               spec->tcp_flags = 0;
-       }
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
-}
-
-static int
-dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
-                        struct mlx5dr_ste_build *sb,
-                        u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       if (sb->inner)
-               DR_STE_SET_MPLS(mpls, misc2, inner, tag);
-       else
-               DR_STE_SET_MPLS(mpls, misc2, outer, tag);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
-                         struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
-}
-
-static int
-dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
-                           struct mlx5dr_ste_build *sb,
-                           u8 *tag)
-{
-       struct  mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
-
-       DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
-       DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
-       DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
-
-       DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
-
-       DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
-                            struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
-}
-
-static int
-dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
-                            struct mlx5dr_ste_build *sb,
-                            u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
-       u32 mpls_hdr;
-
-       if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
-               mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
-               misc_2->outer_first_mpls_over_gre_label = 0;
-               mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
-               misc_2->outer_first_mpls_over_gre_exp = 0;
-               mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
-               misc_2->outer_first_mpls_over_gre_s_bos = 0;
-               mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
-               misc_2->outer_first_mpls_over_gre_ttl = 0;
-       } else {
-               mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
-               misc_2->outer_first_mpls_over_udp_label = 0;
-               mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
-               misc_2->outer_first_mpls_over_udp_exp = 0;
-               mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
-               misc_2->outer_first_mpls_over_udp_s_bos = 0;
-               mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
-               misc_2->outer_first_mpls_over_udp_ttl = 0;
-       }
-
-       MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
-       return 0;
-}
-
-static void
-dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
-                             struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
-}
-
-static int
-dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
-                                     struct mlx5dr_ste_build *sb,
-                                     u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-       u8 *parser_ptr;
-       u8 parser_id;
-       u32 mpls_hdr;
-
-       mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
-       misc2->outer_first_mpls_over_udp_label = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
-       misc2->outer_first_mpls_over_udp_exp = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
-       misc2->outer_first_mpls_over_udp_s_bos = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
-       misc2->outer_first_mpls_over_udp_ttl = 0;
-
-       parser_id = sb->caps->flex_parser_id_mpls_over_udp;
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
-       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
-                                      struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
-                     DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
-                     DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
-}
-
-static int
-dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
-                                     struct mlx5dr_ste_build *sb,
-                                     u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-       u8 *parser_ptr;
-       u8 parser_id;
-       u32 mpls_hdr;
-
-       mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
-       misc2->outer_first_mpls_over_gre_label = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
-       misc2->outer_first_mpls_over_gre_exp = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
-       misc2->outer_first_mpls_over_gre_s_bos = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
-       misc2->outer_first_mpls_over_gre_ttl = 0;
-
-       parser_id = sb->caps->flex_parser_id_mpls_over_gre;
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
-       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
-                                      struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
-
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
-                     DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
-                     DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
-}
-
-#define ICMP_TYPE_OFFSET_FIRST_DW      24
-#define ICMP_CODE_OFFSET_FIRST_DW      16
-
-static int
-dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
-                        struct mlx5dr_ste_build *sb,
-                        u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
-       u32 *icmp_header_data;
-       int dw0_location;
-       int dw1_location;
-       u8 *parser_ptr;
-       u8 *icmp_type;
-       u8 *icmp_code;
-       bool is_ipv4;
-       u32 icmp_hdr;
-
-       is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
-       if (is_ipv4) {
-               icmp_header_data        = &misc_3->icmpv4_header_data;
-               icmp_type               = &misc_3->icmpv4_type;
-               icmp_code               = &misc_3->icmpv4_code;
-               dw0_location            = sb->caps->flex_parser_id_icmp_dw0;
-               dw1_location            = sb->caps->flex_parser_id_icmp_dw1;
-       } else {
-               icmp_header_data        = &misc_3->icmpv6_header_data;
-               icmp_type               = &misc_3->icmpv6_type;
-               icmp_code               = &misc_3->icmpv6_code;
-               dw0_location            = sb->caps->flex_parser_id_icmpv6_dw0;
-               dw1_location            = sb->caps->flex_parser_id_icmpv6_dw1;
-       }
-
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
-       icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
-                  (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
-       *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
-       *icmp_code = 0;
-       *icmp_type = 0;
-
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
-       *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
-       *icmp_header_data = 0;
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
-                         struct mlx5dr_match_param *mask)
-{
-       u8 parser_id;
-       bool is_ipv4;
-
-       dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
-
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
-       parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
-                   sb->caps->flex_parser_id_icmpv6_dw0;
-       sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
-                     DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
-                     DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
-}
-
-static int
-dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
-                                   struct mlx5dr_ste_build *sb,
-                                   u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
-
-       DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
-                      misc_2, metadata_reg_a);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
-}
-
-static int
-dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
-                               struct mlx5dr_ste_build *sb,
-                               u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
-       if (sb->inner) {
-               DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
-               DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
-       } else {
-               DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
-               DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
-       }
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
-}
-
-static int
-dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
-                                             struct mlx5dr_ste_build *sb,
-                                             u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
-       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
-                      outer_vxlan_gpe_flags, misc3,
-                      outer_vxlan_gpe_flags);
-       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
-                      outer_vxlan_gpe_next_protocol, misc3,
-                      outer_vxlan_gpe_next_protocol);
-       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
-                      outer_vxlan_gpe_vni, misc3,
-                      outer_vxlan_gpe_vni);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
-}
-
-static int
-dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_protocol_type, misc, geneve_protocol_type);
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_oam, misc, geneve_oam);
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_opt_len, misc, geneve_opt_len);
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_vni, misc, geneve_vni);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
-}
-
-static int
-dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
-                              struct mlx5dr_ste_build *sb,
-                              u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
-       DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
-       DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
-       DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
-                               struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
-}
-
-static int
-dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
-                              struct mlx5dr_ste_build *sb,
-                              u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
-       DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
-       DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
-       DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
-                               struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
-}
-
-static void
-dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
-                                     u8 *bit_mask)
-{
-       struct mlx5dr_match_misc *misc_mask = &value->misc;
-
-       DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
-       DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
-       misc_mask->source_eswitch_owner_vhca_id = 0;
-}
-
-static int
-dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
-                                struct mlx5dr_ste_build *sb,
-                                u8 *tag)
-{
-       struct mlx5dr_match_misc *misc = &value->misc;
-       int id = misc->source_eswitch_owner_vhca_id;
-       struct mlx5dr_cmd_vport_cap *vport_cap;
-       struct mlx5dr_domain *dmn = sb->dmn;
-       struct mlx5dr_domain *vport_dmn;
-       u8 *bit_mask = sb->bit_mask;
-       struct mlx5dr_domain *peer;
-       bool source_gvmi_set;
-
-       DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
-
-       if (sb->vhca_id_valid) {
-               peer = xa_load(&dmn->peer_dmn_xa, id);
-               /* Find port GVMI based on the eswitch_owner_vhca_id */
-               if (id == dmn->info.caps.gvmi)
-                       vport_dmn = dmn;
-               else if (peer && (id == peer->info.caps.gvmi))
-                       vport_dmn = peer;
-               else
-                       return -EINVAL;
-
-               misc->source_eswitch_owner_vhca_id = 0;
-       } else {
-               vport_dmn = dmn;
-       }
-
-       source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
-       if (source_gvmi_set) {
-               vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
-                                                       misc->source_port);
-               if (!vport_cap) {
-                       mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
-                                  misc->source_port);
-                       return -EINVAL;
-               }
-
-               if (vport_cap->vport_gvmi)
-                       MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
-
-               misc->source_port = 0;
-       }
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
-                                 struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
-}
-
-static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
-                                     u32 *misc4_field_value,
-                                     bool *parser_is_used,
-                                     u8 *tag)
-{
-       u32 id = *misc4_field_id;
-       u8 *parser_ptr;
-
-       if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
-               return;
-
-       parser_is_used[id] = true;
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
-
-       *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
-       *misc4_field_id = 0;
-       *misc4_field_value = 0;
-}
-
-static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
-       bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
-
-       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
-                                 &misc_4_mask->prog_sample_field_value_0,
-                                 parser_is_used, tag);
-
-       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
-                                 &misc_4_mask->prog_sample_field_value_1,
-                                 parser_is_used, tag);
-
-       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
-                                 &misc_4_mask->prog_sample_field_value_2,
-                                 parser_is_used, tag);
-
-       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
-                                 &misc_4_mask->prog_sample_field_value_3,
-                                 parser_is_used, tag);
-
-       return 0;
-}
-
-static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-       dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
-}
-
-static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
-       dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
-}
-
-static int
-dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
-                                                  struct mlx5dr_ste_build *sb,
-                                                  u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-       u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
-       u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
-
-       MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
-                misc3->geneve_tlv_option_0_data);
-       misc3->geneve_tlv_option_0_data = 0;
-
-       return 0;
-}
-
-static void
-dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
-                                                   struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
-
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
-               DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
-               DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
-}
-
-static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
-                                                   struct mlx5dr_ste_build *sb,
-                                                   u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
-       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
-                      gtpu_msg_flags, misc3,
-                      gtpu_msg_flags);
-       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
-                      gtpu_msg_type, misc3,
-                      gtpu_msg_type);
-       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
-                      gtpu_teid, misc3,
-                      gtpu_teid);
-
-       return 0;
-}
-
-static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
-                                                     struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
-}
-
-static int
-dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
-       return 0;
-}
-
-static void
-dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
-}
-
-static int
-dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
-       return 0;
-}
-
-static void
-dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
-}
-
-static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
-                                             struct mlx5dr_ste_build *sb,
-                                             u8 *tag)
-{
-       struct mlx5dr_match_misc5 *misc5 = &value->misc5;
-
-       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
-       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
-
-       return 0;
-}
-
-static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
-                                               struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
-       dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
-}
-
-static struct mlx5dr_ste_ctx ste_ctx_v0 = {
-       /* Builders */
-       .build_eth_l2_src_dst_init      = &dr_ste_v0_build_eth_l2_src_dst_init,
-       .build_eth_l3_ipv6_src_init     = &dr_ste_v0_build_eth_l3_ipv6_src_init,
-       .build_eth_l3_ipv6_dst_init     = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
-       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
-       .build_eth_l2_src_init          = &dr_ste_v0_build_eth_l2_src_init,
-       .build_eth_l2_dst_init          = &dr_ste_v0_build_eth_l2_dst_init,
-       .build_eth_l2_tnl_init          = &dr_ste_v0_build_eth_l2_tnl_init,
-       .build_eth_l3_ipv4_misc_init    = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
-       .build_eth_ipv6_l3_l4_init      = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
-       .build_mpls_init                = &dr_ste_v0_build_mpls_init,
-       .build_tnl_gre_init             = &dr_ste_v0_build_tnl_gre_init,
-       .build_tnl_mpls_init            = &dr_ste_v0_build_tnl_mpls_init,
-       .build_tnl_mpls_over_udp_init   = &dr_ste_v0_build_tnl_mpls_over_udp_init,
-       .build_tnl_mpls_over_gre_init   = &dr_ste_v0_build_tnl_mpls_over_gre_init,
-       .build_icmp_init                = &dr_ste_v0_build_icmp_init,
-       .build_general_purpose_init     = &dr_ste_v0_build_general_purpose_init,
-       .build_eth_l4_misc_init         = &dr_ste_v0_build_eth_l4_misc_init,
-       .build_tnl_vxlan_gpe_init       = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
-       .build_tnl_geneve_init          = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
-       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
-       .build_register_0_init          = &dr_ste_v0_build_register_0_init,
-       .build_register_1_init          = &dr_ste_v0_build_register_1_init,
-       .build_src_gvmi_qpn_init        = &dr_ste_v0_build_src_gvmi_qpn_init,
-       .build_flex_parser_0_init       = &dr_ste_v0_build_flex_parser_0_init,
-       .build_flex_parser_1_init       = &dr_ste_v0_build_flex_parser_1_init,
-       .build_tnl_gtpu_init            = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
-       .build_tnl_header_0_1_init      = &dr_ste_v0_build_tnl_header_0_1_init,
-       .build_tnl_gtpu_flex_parser_0_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
-       .build_tnl_gtpu_flex_parser_1_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
-
-       /* Getters and Setters */
-       .ste_init                       = &dr_ste_v0_init,
-       .set_next_lu_type               = &dr_ste_v0_set_next_lu_type,
-       .get_next_lu_type               = &dr_ste_v0_get_next_lu_type,
-       .set_miss_addr                  = &dr_ste_v0_set_miss_addr,
-       .get_miss_addr                  = &dr_ste_v0_get_miss_addr,
-       .set_hit_addr                   = &dr_ste_v0_set_hit_addr,
-       .set_byte_mask                  = &dr_ste_v0_set_byte_mask,
-       .get_byte_mask                  = &dr_ste_v0_get_byte_mask,
-
-       /* Actions */
-       .actions_caps                   = DR_STE_CTX_ACTION_CAP_NONE,
-       .set_actions_rx                 = &dr_ste_v0_set_actions_rx,
-       .set_actions_tx                 = &dr_ste_v0_set_actions_tx,
-       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
-       .modify_field_arr               = dr_ste_v0_action_modify_field_arr,
-       .set_action_set                 = &dr_ste_v0_set_action_set,
-       .set_action_add                 = &dr_ste_v0_set_action_add,
-       .set_action_copy                = &dr_ste_v0_set_action_copy,
-       .set_action_decap_l3_list       = &dr_ste_v0_set_action_decap_l3_list,
-};
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
-{
-       return &ste_ctx_v0;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
deleted file mode 100644 (file)
index 1d49704..0000000
+++ /dev/null
@@ -1,2341 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
-
-#include <linux/types.h>
-#include "mlx5_ifc_dr_ste_v1.h"
-#include "dr_ste_v1.h"
-
-#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
-       ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
-                  DR_STE_V1_LU_TYPE_##lookup_type##_O)
-
-enum dr_ste_v1_entry_format {
-       DR_STE_V1_TYPE_BWC_BYTE = 0x0,
-       DR_STE_V1_TYPE_BWC_DW   = 0x1,
-       DR_STE_V1_TYPE_MATCH    = 0x2,
-       DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
-};
-
-/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
-enum {
-       DR_STE_V1_LU_TYPE_NOP                           = 0x0000,
-       DR_STE_V1_LU_TYPE_ETHL2_TNL                     = 0x0002,
-       DR_STE_V1_LU_TYPE_IBL3_EXT                      = 0x0102,
-       DR_STE_V1_LU_TYPE_ETHL2_O                       = 0x0003,
-       DR_STE_V1_LU_TYPE_IBL4                          = 0x0103,
-       DR_STE_V1_LU_TYPE_ETHL2_I                       = 0x0004,
-       DR_STE_V1_LU_TYPE_SRC_QP_GVMI                   = 0x0104,
-       DR_STE_V1_LU_TYPE_ETHL2_SRC_O                   = 0x0005,
-       DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O               = 0x0105,
-       DR_STE_V1_LU_TYPE_ETHL2_SRC_I                   = 0x0006,
-       DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I               = 0x0106,
-       DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O          = 0x0007,
-       DR_STE_V1_LU_TYPE_IPV6_DES_O                    = 0x0107,
-       DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I          = 0x0008,
-       DR_STE_V1_LU_TYPE_IPV6_DES_I                    = 0x0108,
-       DR_STE_V1_LU_TYPE_ETHL4_O                       = 0x0009,
-       DR_STE_V1_LU_TYPE_IPV6_SRC_O                    = 0x0109,
-       DR_STE_V1_LU_TYPE_ETHL4_I                       = 0x000a,
-       DR_STE_V1_LU_TYPE_IPV6_SRC_I                    = 0x010a,
-       DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O               = 0x000b,
-       DR_STE_V1_LU_TYPE_MPLS_O                        = 0x010b,
-       DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I               = 0x000c,
-       DR_STE_V1_LU_TYPE_MPLS_I                        = 0x010c,
-       DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O             = 0x000d,
-       DR_STE_V1_LU_TYPE_GRE                           = 0x010d,
-       DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER        = 0x000e,
-       DR_STE_V1_LU_TYPE_GENERAL_PURPOSE               = 0x010e,
-       DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I             = 0x000f,
-       DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0          = 0x010f,
-       DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1          = 0x0110,
-       DR_STE_V1_LU_TYPE_FLEX_PARSER_OK                = 0x0011,
-       DR_STE_V1_LU_TYPE_FLEX_PARSER_0                 = 0x0111,
-       DR_STE_V1_LU_TYPE_FLEX_PARSER_1                 = 0x0112,
-       DR_STE_V1_LU_TYPE_ETHL4_MISC_O                  = 0x0113,
-       DR_STE_V1_LU_TYPE_ETHL4_MISC_I                  = 0x0114,
-       DR_STE_V1_LU_TYPE_INVALID                       = 0x00ff,
-       DR_STE_V1_LU_TYPE_DONT_CARE                     = MLX5DR_STE_LU_TYPE_DONT_CARE,
-};
-
-enum dr_ste_v1_header_anchors {
-       DR_STE_HEADER_ANCHOR_START_OUTER                = 0x00,
-       DR_STE_HEADER_ANCHOR_1ST_VLAN                   = 0x02,
-       DR_STE_HEADER_ANCHOR_IPV6_IPV4                  = 0x07,
-       DR_STE_HEADER_ANCHOR_INNER_MAC                  = 0x13,
-       DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4            = 0x19,
-};
-
-enum dr_ste_v1_action_size {
-       DR_STE_ACTION_SINGLE_SZ = 4,
-       DR_STE_ACTION_DOUBLE_SZ = 8,
-       DR_STE_ACTION_TRIPLE_SZ = 12,
-};
-
-enum dr_ste_v1_action_insert_ptr_attr {
-       DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0,  /* Regular push header (e.g. push vlan) */
-       DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
-       DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2,   /* IPsec */
-};
-
-enum dr_ste_v1_action_id {
-       DR_STE_V1_ACTION_ID_NOP                         = 0x00,
-       DR_STE_V1_ACTION_ID_COPY                        = 0x05,
-       DR_STE_V1_ACTION_ID_SET                         = 0x06,
-       DR_STE_V1_ACTION_ID_ADD                         = 0x07,
-       DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE              = 0x08,
-       DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER     = 0x09,
-       DR_STE_V1_ACTION_ID_INSERT_INLINE               = 0x0a,
-       DR_STE_V1_ACTION_ID_INSERT_POINTER              = 0x0b,
-       DR_STE_V1_ACTION_ID_FLOW_TAG                    = 0x0c,
-       DR_STE_V1_ACTION_ID_QUEUE_ID_SEL                = 0x0d,
-       DR_STE_V1_ACTION_ID_ACCELERATED_LIST            = 0x0e,
-       DR_STE_V1_ACTION_ID_MODIFY_LIST                 = 0x0f,
-       DR_STE_V1_ACTION_ID_ASO                         = 0x12,
-       DR_STE_V1_ACTION_ID_TRAILER                     = 0x13,
-       DR_STE_V1_ACTION_ID_COUNTER_ID                  = 0x14,
-       DR_STE_V1_ACTION_ID_MAX                         = 0x21,
-       /* use for special cases */
-       DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3            = 0x22,
-};
-
-enum {
-       DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0              = 0x00,
-       DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1              = 0x01,
-       DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2              = 0x02,
-       DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0          = 0x08,
-       DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1          = 0x09,
-       DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0              = 0x0e,
-       DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0              = 0x18,
-       DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1              = 0x19,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0            = 0x40,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1            = 0x41,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0        = 0x44,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1        = 0x45,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2        = 0x46,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3        = 0x47,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0        = 0x4c,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1        = 0x4d,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2        = 0x4e,
-       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3        = 0x4f,
-       DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0            = 0x5e,
-       DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1            = 0x5f,
-       DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0           = 0x6f,
-       DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1           = 0x70,
-       DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE        = 0x7b,
-       DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE          = 0x7c,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0          = 0x8c,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1          = 0x8d,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0          = 0x8e,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1          = 0x8f,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0          = 0x90,
-       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1          = 0x91,
-};
-
-enum dr_ste_v1_aso_ctx_type {
-       DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
-       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
-               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
-       },
-};
-
-static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
-}
-
-bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
-{
-       u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
-
-       /* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
-        * are part of the action, so they both set as part of STE init
-        */
-       return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
-}
-
-void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
-{
-       u64 index = miss_addr >> 6;
-
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
-}
-
-u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
-{
-       u64 index =
-               ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
-                ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
-
-       return index << 6;
-}
-
-void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
-}
-
-u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
-{
-       return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
-}
-
-static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
-}
-
-void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
-}
-
-u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
-{
-       u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
-       u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
-
-       return (mode << 8 | index);
-}
-
-static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
-}
-
-void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
-{
-       u64 index = (icm_addr >> 5) | ht_size;
-
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
-}
-
-void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
-{
-       dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
-       dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
-
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
-}
-
-void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
-{
-       u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
-       u8 *mask = tag + DR_STE_SIZE_TAG;
-       u8 tmp_tag[DR_STE_SIZE_TAG] = {};
-
-       if (ste_size == DR_STE_SIZE_CTRL)
-               return;
-
-       WARN_ON(ste_size != DR_STE_SIZE);
-
-       /* Backup tag */
-       memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
-
-       /* Swap mask and tag  both are the same size */
-       memcpy(tag, mask, DR_STE_SIZE_MASK);
-       memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
-}
-
-static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
-{
-       MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
-                DR_STE_V1_ACTION_ID_FLOW_TAG);
-       MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
-}
-
-static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
-}
-
-static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
-{
-       MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
-}
-
-static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
-                               u32 reformat_id, int size)
-{
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
-                DR_STE_V1_ACTION_ID_INSERT_POINTER);
-       /* The hardware expects here size in words (2 byte) */
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
-                DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
-                                    u32 reformat_id,
-                                    u8 anchor, u8 offset,
-                                    int size)
-{
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
-                action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
-
-       /* The hardware expects here size and offset in words (2 byte) */
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
-
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
-                DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
-                                    u8 anchor, u8 offset,
-                                    int size)
-{
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
-                action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
-
-       /* The hardware expects here size and offset in words (2 byte) */
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
-                                   u32 vlan_hdr)
-{
-       MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
-                action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
-       /* The hardware expects offset to vlan header in words (2 byte) */
-       MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
-                start_offset, HDR_LEN_L2_MACS >> 1);
-       MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
-                inline_data, vlan_hdr);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
-{
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
-                action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
-                start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
-       /* The hardware expects here size in words (2 byte) */
-       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
-                remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
-                                  u8 *frst_s_action,
-                                  u8 *scnd_d_action,
-                                  u32 reformat_id,
-                                  int size)
-{
-       /* Remove L2 headers */
-       MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
-                DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
-       MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
-                DR_STE_HEADER_ANCHOR_IPV6_IPV4);
-
-       /* Encapsulate with given reformat ID */
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
-                DR_STE_V1_ACTION_ID_INSERT_POINTER);
-       /* The hardware expects here size in words (2 byte) */
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
-       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
-                DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
-{
-       MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
-                DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
-       MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
-       MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
-       MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
-                DR_STE_HEADER_ANCHOR_INNER_MAC);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p,
-                                                     u8 *d_action,
-                                                     u16 num_of_actions,
-                                                     u32 rewrite_pattern,
-                                                     u32 rewrite_args,
-                                                     u8 *action_data)
-{
-       if (action_data) {
-               memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE);
-       } else {
-               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
-                        action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST);
-               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
-                        modify_actions_pattern_pointer, rewrite_pattern);
-               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
-                        number_of_modify_actions, num_of_actions);
-               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
-                        modify_actions_argument_pointer, rewrite_args);
-       }
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p,
-                                               u8 *s_action,
-                                               u16 num_of_actions,
-                                               u32 rewrite_index)
-{
-       MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
-                DR_STE_V1_ACTION_ID_MODIFY_LIST);
-       MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
-                num_of_actions);
-       MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
-                rewrite_index);
-
-       dr_ste_v1_set_reparse(hw_ste_p);
-}
-
-static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
-                                         u8 *action,
-                                         u16 num_of_actions,
-                                         u32 rewrite_pattern,
-                                         u32 rewrite_args,
-                                         u8 *action_data)
-{
-       if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX)
-               return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p,
-                                                                action,
-                                                                num_of_actions,
-                                                                rewrite_pattern,
-                                                                rewrite_args,
-                                                                action_data);
-
-       /* fall back to the code that doesn't support accelerated modify header */
-       return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p,
-                                                  action,
-                                                  num_of_actions,
-                                                  rewrite_args);
-}
-
-static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
-                                        u32 object_id,
-                                        u32 offset,
-                                        u8 dest_reg_id,
-                                        u8 init_color)
-{
-       MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
-                DR_STE_V1_ACTION_ID_ASO);
-       MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
-                object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
-       /* Convert reg_c index to HW 64bit index */
-       MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
-                (dest_reg_id - 1) / 2);
-       MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
-                DR_STE_V1_ASO_CTX_TYPE_POLICERS);
-       MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
-                offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
-       MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
-                init_color);
-}
-
-static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
-                                             u32 min, u32 max)
-{
-       MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
-
-       /* When the STE will be sent, its mask and tags will be swapped in
-        * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
-        * which doesn't have mask, and shouldn't have mask/tag swapped.
-        * We're using the common utilities functions to send this STE, so need
-        * to allow for this swapping - place the values in the corresponding
-        * locations to allow flipping them when writing to ICM.
-        *
-        * min/max_value_2 corresponds to match_dw_0 in its definer.
-        * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
-        *
-        * Pkt len is 2 bytes that are stored in the higher section of the DW.
-        */
-       MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
-       MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
-}
-
-static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
-                                         u32 *added_stes,
-                                         u16 gvmi)
-{
-       u8 *action;
-
-       (*added_stes)++;
-       *last_ste += DR_STE_SIZE;
-       dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
-       dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
-
-       action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
-       memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
-}
-
-static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
-                                               u32 *added_stes,
-                                               u16 gvmi)
-{
-       dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
-       dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
-}
-
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
-                             u8 *action_type_set,
-                             u32 actions_caps,
-                             u8 *last_ste,
-                             struct mlx5dr_ste_actions_attr *attr,
-                             u32 *added_stes)
-{
-       u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
-       u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
-       bool allow_modify_hdr = true;
-       bool allow_encap = true;
-
-       if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
-               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
-                                                     attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1,
-                                             last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
-               action_sz -= DR_STE_ACTION_SINGLE_SZ;
-               action += DR_STE_ACTION_SINGLE_SZ;
-
-               /* Check if vlan_pop and modify_hdr on same STE is supported */
-               if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
-                       allow_modify_hdr = false;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
-               if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
-                                                     attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1,
-                                             last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_rewrite_actions(last_ste, action,
-                                             attr->modify_actions,
-                                             attr->modify_pat_idx,
-                                             attr->modify_index,
-                                             attr->single_modify_action);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-               allow_encap = false;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
-               int i;
-
-               for (i = 0; i < attr->vlans.count; i++) {
-                       if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
-                               dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                               action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                               action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                               allow_encap = true;
-                       }
-                       dr_ste_v1_set_push_vlan(last_ste, action,
-                                               attr->vlans.headers[i]);
-                       action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-                       action += DR_STE_ACTION_DOUBLE_SZ;
-               }
-       }
-
-       if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
-               if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       allow_encap = true;
-               }
-               dr_ste_v1_set_encap(last_ste, action,
-                                   attr->reformat.id,
-                                   attr->reformat.size);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-       } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
-               u8 *d_action;
-
-               if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               d_action = action + DR_STE_ACTION_SINGLE_SZ;
-
-               dr_ste_v1_set_encap_l3(last_ste,
-                                      action, d_action,
-                                      attr->reformat.id,
-                                      attr->reformat.size);
-               action_sz -= DR_STE_ACTION_TRIPLE_SZ;
-               action += DR_STE_ACTION_TRIPLE_SZ;
-       } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
-               if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_insert_hdr(last_ste, action,
-                                        attr->reformat.id,
-                                        attr->reformat.param_0,
-                                        attr->reformat.param_1,
-                                        attr->reformat.size);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-       } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
-               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_remove_hdr(last_ste, action,
-                                        attr->reformat.param_0,
-                                        attr->reformat.param_1,
-                                        attr->reformat.size);
-               action_sz -= DR_STE_ACTION_SINGLE_SZ;
-               action += DR_STE_ACTION_SINGLE_SZ;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
-               if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_aso_flow_meter(action,
-                                            attr->aso_flow_meter.obj_id,
-                                            attr->aso_flow_meter.offset,
-                                            attr->aso_flow_meter.dest_reg_id,
-                                            attr->aso_flow_meter.init_color);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_RANGE]) {
-               /* match ranges requires a new STE of its own type */
-               dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
-               dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
-
-               /* we do not support setting any action on the match ranges STE */
-               action_sz = 0;
-
-               dr_ste_v1_set_match_range_pkt_len(last_ste,
-                                                 attr->range.definer_id,
-                                                 attr->range.min,
-                                                 attr->range.max);
-       }
-
-       /* set counter ID on the last STE to adhere to DMFS behavior */
-       if (action_type_set[DR_ACTION_TYP_CTR])
-               dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
-
-       dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
-       dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
-}
-
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
-                             u8 *action_type_set,
-                             u32 actions_caps,
-                             u8 *last_ste,
-                             struct mlx5dr_ste_actions_attr *attr,
-                             u32 *added_stes)
-{
-       u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
-       u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
-       bool allow_modify_hdr = true;
-       bool allow_ctr = true;
-
-       if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
-               dr_ste_v1_set_rewrite_actions(last_ste, action,
-                                             attr->decap_actions,
-                                             attr->decap_pat_idx,
-                                             attr->decap_index,
-                                             NULL);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-               allow_modify_hdr = false;
-               allow_ctr = false;
-       } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
-               dr_ste_v1_set_rx_decap(last_ste, action);
-               action_sz -= DR_STE_ACTION_SINGLE_SZ;
-               action += DR_STE_ACTION_SINGLE_SZ;
-               allow_modify_hdr = false;
-               allow_ctr = false;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_TAG]) {
-               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       allow_modify_hdr = true;
-                       allow_ctr = true;
-               }
-               dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
-               action_sz -= DR_STE_ACTION_SINGLE_SZ;
-               action += DR_STE_ACTION_SINGLE_SZ;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
-               if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
-                   !allow_modify_hdr) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-
-               dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
-               action_sz -= DR_STE_ACTION_SINGLE_SZ;
-               action += DR_STE_ACTION_SINGLE_SZ;
-               allow_ctr = false;
-
-               /* Check if vlan_pop and modify_hdr on same STE is supported */
-               if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
-                       allow_modify_hdr = false;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
-               /* Modify header and decapsulation must use different STEs */
-               if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       allow_modify_hdr = true;
-                       allow_ctr = true;
-               }
-               dr_ste_v1_set_rewrite_actions(last_ste, action,
-                                             attr->modify_actions,
-                                             attr->modify_pat_idx,
-                                             attr->modify_index,
-                                             attr->single_modify_action);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
-               int i;
-
-               for (i = 0; i < attr->vlans.count; i++) {
-                       if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
-                           !allow_modify_hdr) {
-                               dr_ste_v1_arr_init_next_match(&last_ste,
-                                                             added_stes,
-                                                             attr->gvmi);
-                               action = MLX5_ADDR_OF(ste_mask_and_match_v1,
-                                                     last_ste, action);
-                               action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       }
-                       dr_ste_v1_set_push_vlan(last_ste, action,
-                                               attr->vlans.headers[i]);
-                       action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-                       action += DR_STE_ACTION_DOUBLE_SZ;
-               }
-       }
-
-       if (action_type_set[DR_ACTION_TYP_CTR]) {
-               /* Counter action set after decap and before insert_hdr
-                * to exclude decaped / encaped header respectively.
-                */
-               if (!allow_ctr) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       allow_modify_hdr = true;
-               }
-               dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
-               allow_ctr = false;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
-               if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_encap(last_ste, action,
-                                   attr->reformat.id,
-                                   attr->reformat.size);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-               allow_modify_hdr = false;
-       } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
-               u8 *d_action;
-
-               if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-
-               d_action = action + DR_STE_ACTION_SINGLE_SZ;
-
-               dr_ste_v1_set_encap_l3(last_ste,
-                                      action, d_action,
-                                      attr->reformat.id,
-                                      attr->reformat.size);
-               action_sz -= DR_STE_ACTION_TRIPLE_SZ;
-               allow_modify_hdr = false;
-       } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
-               /* Modify header, decap, and encap must use different STEs */
-               if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_insert_hdr(last_ste, action,
-                                        attr->reformat.id,
-                                        attr->reformat.param_0,
-                                        attr->reformat.param_1,
-                                        attr->reformat.size);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-               allow_modify_hdr = false;
-       } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
-               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-                       allow_modify_hdr = true;
-                       allow_ctr = true;
-               }
-               dr_ste_v1_set_remove_hdr(last_ste, action,
-                                        attr->reformat.param_0,
-                                        attr->reformat.param_1,
-                                        attr->reformat.size);
-               action_sz -= DR_STE_ACTION_SINGLE_SZ;
-               action += DR_STE_ACTION_SINGLE_SZ;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
-               if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
-                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
-                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
-                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
-               }
-               dr_ste_v1_set_aso_flow_meter(action,
-                                            attr->aso_flow_meter.obj_id,
-                                            attr->aso_flow_meter.offset,
-                                            attr->aso_flow_meter.dest_reg_id,
-                                            attr->aso_flow_meter.init_color);
-               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
-               action += DR_STE_ACTION_DOUBLE_SZ;
-       }
-
-       if (action_type_set[DR_ACTION_TYP_RANGE]) {
-               /* match ranges requires a new STE of its own type */
-               dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
-               dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
-
-               /* we do not support setting any action on the match ranges STE */
-               action_sz = 0;
-
-               dr_ste_v1_set_match_range_pkt_len(last_ste,
-                                                 attr->range.definer_id,
-                                                 attr->range.min,
-                                                 attr->range.max);
-       }
-
-       dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
-       dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
-}
-
-void dr_ste_v1_set_action_set(u8 *d_action,
-                             u8 hw_field,
-                             u8 shifter,
-                             u8 length,
-                             u32 data)
-{
-       shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
-       MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
-       MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
-       MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
-       MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
-       MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
-}
-
-void dr_ste_v1_set_action_add(u8 *d_action,
-                             u8 hw_field,
-                             u8 shifter,
-                             u8 length,
-                             u32 data)
-{
-       shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
-       MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
-       MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
-       MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
-       MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
-       MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
-}
-
-void dr_ste_v1_set_action_copy(u8 *d_action,
-                              u8 dst_hw_field,
-                              u8 dst_shifter,
-                              u8 dst_len,
-                              u8 src_hw_field,
-                              u8 src_shifter)
-{
-       dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
-       src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
-       MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
-       MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
-       MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
-       MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
-       MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
-       MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
-}
-
-#define DR_STE_DECAP_L3_ACTION_NUM     8
-#define DR_STE_L2_HDR_MAX_SZ           20
-
-int dr_ste_v1_set_action_decap_l3_list(void *data,
-                                      u32 data_sz,
-                                      u8 *hw_action,
-                                      u32 hw_action_sz,
-                                      u16 *used_hw_action_num)
-{
-       u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
-       void *data_ptr = padded_data;
-       u16 used_actions = 0;
-       u32 inline_data_sz;
-       u32 i;
-
-       if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
-               return -EINVAL;
-
-       inline_data_sz =
-               MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
-
-       /* Add an alignment padding  */
-       memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
-
-       /* Remove L2L3 outer headers */
-       MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
-                DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
-       MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
-       MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
-       MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
-                DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
-       hw_action += DR_STE_ACTION_DOUBLE_SZ;
-       used_actions++; /* Remove and NOP are a single double action */
-
-       /* Point to the last dword of the header */
-       data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
-
-       /* Add the new header using inline action 4Byte at a time, the header
-        * is added in reversed order to the beginning of the packet to avoid
-        * incorrect parsing by the HW. Since header is 14B or 18B an extra
-        * two bytes are padded and later removed.
-        */
-       for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
-               void *addr_inline;
-
-               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
-                        DR_STE_V1_ACTION_ID_INSERT_INLINE);
-               /* The hardware expects here offset to words (2 bytes) */
-               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
-
-               /* Copy bytes one by one to avoid endianness problem */
-               addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
-                                          hw_action, inline_data);
-               memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
-               hw_action += DR_STE_ACTION_DOUBLE_SZ;
-               used_actions++;
-       }
-
-       /* Remove first 2 extra bytes */
-       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
-                DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
-       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
-       /* The hardware expects here size in words (2 bytes) */
-       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
-       used_actions++;
-
-       *used_hw_action_num = used_actions;
-
-       return 0;
-}
-
-static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
-                                                   bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
-
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
-
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
-
-       if (mask->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
-               mask->cvlan_tag = 0;
-       } else if (mask->svlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
-               mask->svlan_tag = 0;
-       }
-}
-
-static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
-                                             struct mlx5dr_ste_build *sb,
-                                             u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
-
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
-
-       if (spec->ip_version == IP_VERSION_IPV4) {
-               MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
-               spec->ip_version = 0;
-       } else if (spec->ip_version == IP_VERSION_IPV6) {
-               MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
-               spec->ip_version = 0;
-       } else if (spec->ip_version) {
-               return -EINVAL;
-       }
-
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
-
-       if (spec->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
-               spec->cvlan_tag = 0;
-       } else if (spec->svlan_tag) {
-               MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
-               spec->svlan_tag = 0;
-       }
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
-}
-
-static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
-                                              struct mlx5dr_ste_build *sb,
-                                              u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
-       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
-}
-
-static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
-                                              struct mlx5dr_ste_build *sb,
-                                              u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
-       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
-}
-
-static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
-                                                  struct mlx5dr_ste_build *sb,
-                                                  u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
-       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
-
-       if (spec->tcp_flags) {
-               DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
-               spec->tcp_flags = 0;
-       }
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
-                                             struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
-}
-
-static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
-                                                      bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc_mask = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
-       DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
-
-       if (mask->svlan_tag || mask->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
-               mask->cvlan_tag = 0;
-               mask->svlan_tag = 0;
-       }
-
-       if (inner) {
-               if (misc_mask->inner_second_cvlan_tag ||
-                   misc_mask->inner_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
-                       misc_mask->inner_second_cvlan_tag = 0;
-                       misc_mask->inner_second_svlan_tag = 0;
-               }
-
-               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
-                              second_vlan_id, misc_mask, inner_second_vid);
-               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
-                              second_cfi, misc_mask, inner_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
-                              second_priority, misc_mask, inner_second_prio);
-       } else {
-               if (misc_mask->outer_second_cvlan_tag ||
-                   misc_mask->outer_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
-                       misc_mask->outer_second_cvlan_tag = 0;
-                       misc_mask->outer_second_svlan_tag = 0;
-               }
-
-               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
-                              second_vlan_id, misc_mask, outer_second_vid);
-               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
-                              second_cfi, misc_mask, outer_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
-                              second_priority, misc_mask, outer_second_prio);
-       }
-}
-
-static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
-                                                bool inner, u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc_spec = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
-
-       if (spec->ip_version == IP_VERSION_IPV4) {
-               MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
-               spec->ip_version = 0;
-       } else if (spec->ip_version == IP_VERSION_IPV6) {
-               MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
-               spec->ip_version = 0;
-       } else if (spec->ip_version) {
-               return -EINVAL;
-       }
-
-       if (spec->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
-               spec->cvlan_tag = 0;
-       } else if (spec->svlan_tag) {
-               MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
-               spec->svlan_tag = 0;
-       }
-
-       if (inner) {
-               if (misc_spec->inner_second_cvlan_tag) {
-                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
-                       misc_spec->inner_second_cvlan_tag = 0;
-               } else if (misc_spec->inner_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
-                       misc_spec->inner_second_svlan_tag = 0;
-               }
-
-               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
-               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
-       } else {
-               if (misc_spec->outer_second_cvlan_tag) {
-                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
-                       misc_spec->outer_second_cvlan_tag = 0;
-               } else if (misc_spec->outer_second_svlan_tag) {
-                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
-                       misc_spec->outer_second_svlan_tag = 0;
-               }
-               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
-               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
-               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
-       }
-
-       return 0;
-}
-
-static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
-                                               bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
-
-       dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
-                                         struct mlx5dr_ste_build *sb,
-                                         u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
-       DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
-
-       return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
-}
-
-void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
-}
-
-static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
-                                               bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
-
-       dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
-}
-
-static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
-                                         struct mlx5dr_ste_build *sb,
-                                         u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
-
-       return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
-}
-
-void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
-}
-
-static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
-                                               bool inner, u8 *bit_mask)
-{
-       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
-       DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
-
-       if (misc->vxlan_vni) {
-               MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
-                        l2_tunneling_network_id, (misc->vxlan_vni << 8));
-               misc->vxlan_vni = 0;
-       }
-
-       if (mask->svlan_tag || mask->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
-               mask->cvlan_tag = 0;
-               mask->svlan_tag = 0;
-       }
-}
-
-static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
-                                         struct mlx5dr_ste_build *sb,
-                                         u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
-       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
-
-       if (misc->vxlan_vni) {
-               MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
-                        (misc->vxlan_vni << 8));
-               misc->vxlan_vni = 0;
-       }
-
-       if (spec->cvlan_tag) {
-               MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
-               spec->cvlan_tag = 0;
-       } else if (spec->svlan_tag) {
-               MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
-               spec->svlan_tag = 0;
-       }
-
-       if (spec->ip_version == IP_VERSION_IPV4) {
-               MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
-               spec->ip_version = 0;
-       } else if (spec->ip_version == IP_VERSION_IPV6) {
-               MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
-               spec->ip_version = 0;
-       } else if (spec->ip_version) {
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
-}
-
-static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
-                                               struct mlx5dr_ste_build *sb,
-                                               u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-
-       DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
-       DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
-                                          struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
-}
-
-static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
-                                             struct mlx5dr_ste_build *sb,
-                                             u8 *tag)
-{
-       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
-       DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
-       DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
-       DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
-       DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
-       DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
-       DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
-       DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
-       DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
-
-       if (sb->inner)
-               DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
-       else
-               DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
-
-       if (spec->tcp_flags) {
-               DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
-               spec->tcp_flags = 0;
-       }
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
-}
-
-static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
-                                   struct mlx5dr_ste_build *sb,
-                                   u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       if (sb->inner)
-               DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
-       else
-               DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
-
-       return 0;
-}
-
-void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
-                              struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
-}
-
-static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
-                                      struct mlx5dr_ste_build *sb,
-                                      u8 *tag)
-{
-       struct  mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
-       DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
-       DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
-       DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
-
-       DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
-       DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
-
-       return 0;
-}
-
-void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
-                                 struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
-}
-
-static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
-                                       struct mlx5dr_ste_build *sb,
-                                       u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
-                              misc2, outer_first_mpls_over_gre_label);
-
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
-                              misc2, outer_first_mpls_over_gre_exp);
-
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
-                              misc2, outer_first_mpls_over_gre_s_bos);
-
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
-                              misc2, outer_first_mpls_over_gre_ttl);
-       } else {
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
-                              misc2, outer_first_mpls_over_udp_label);
-
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
-                              misc2, outer_first_mpls_over_udp_exp);
-
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
-                              misc2, outer_first_mpls_over_udp_s_bos);
-
-               DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
-                              misc2, outer_first_mpls_over_udp_ttl);
-       }
-
-       return 0;
-}
-
-void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
-                                  struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
-}
-
-static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
-                                                struct mlx5dr_ste_build *sb,
-                                                u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-       u8 *parser_ptr;
-       u8 parser_id;
-       u32 mpls_hdr;
-
-       mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
-       misc2->outer_first_mpls_over_udp_label = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
-       misc2->outer_first_mpls_over_udp_exp = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
-       misc2->outer_first_mpls_over_udp_s_bos = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
-       misc2->outer_first_mpls_over_udp_ttl = 0;
-
-       parser_id = sb->caps->flex_parser_id_mpls_over_udp;
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
-       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
-
-       return 0;
-}
-
-void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
-
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
-                     DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
-                     DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
-
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
-}
-
-static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
-                                                struct mlx5dr_ste_build *sb,
-                                                u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-       u8 *parser_ptr;
-       u8 parser_id;
-       u32 mpls_hdr;
-
-       mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
-       misc2->outer_first_mpls_over_gre_label = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
-       misc2->outer_first_mpls_over_gre_exp = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
-       misc2->outer_first_mpls_over_gre_s_bos = 0;
-       mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
-       misc2->outer_first_mpls_over_gre_ttl = 0;
-
-       parser_id = sb->caps->flex_parser_id_mpls_over_gre;
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
-       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
-
-       return 0;
-}
-
-void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
-
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
-                     DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
-                     DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
-
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
-}
-
-static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
-                                   struct mlx5dr_ste_build *sb,
-                                   u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-       bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
-       u32 *icmp_header_data;
-       u8 *icmp_type;
-       u8 *icmp_code;
-
-       if (is_ipv4) {
-               icmp_header_data        = &misc3->icmpv4_header_data;
-               icmp_type               = &misc3->icmpv4_type;
-               icmp_code               = &misc3->icmpv4_code;
-       } else {
-               icmp_header_data        = &misc3->icmpv6_header_data;
-               icmp_type               = &misc3->icmpv6_type;
-               icmp_code               = &misc3->icmpv6_code;
-       }
-
-       MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
-       MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
-       MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
-
-       *icmp_header_data = 0;
-       *icmp_type = 0;
-       *icmp_code = 0;
-
-       return 0;
-}
-
-void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
-                              struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
-}
-
-static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
-                                              struct mlx5dr_ste_build *sb,
-                                              u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
-                      misc2, metadata_reg_a);
-
-       return 0;
-}
-
-void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
-}
-
-static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
-       if (sb->inner) {
-               DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
-               DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
-       } else {
-               DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
-               DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
-       }
-
-       return 0;
-}
-
-void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
-}
-
-static int
-dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
-                                             struct mlx5dr_ste_build *sb,
-                                             u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
-       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
-                      outer_vxlan_gpe_flags, misc3,
-                      outer_vxlan_gpe_flags);
-       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
-                      outer_vxlan_gpe_next_protocol, misc3,
-                      outer_vxlan_gpe_next_protocol);
-       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
-                      outer_vxlan_gpe_vni, misc3,
-                      outer_vxlan_gpe_vni);
-
-       return 0;
-}
-
-void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
-                                                   struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
-}
-
-static int
-dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_protocol_type, misc, geneve_protocol_type);
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_oam, misc, geneve_oam);
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_opt_len, misc, geneve_opt_len);
-       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
-                      geneve_vni, misc, geneve_vni);
-
-       return 0;
-}
-
-void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
-}
-
-static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
-                                             struct mlx5dr_ste_build *sb,
-                                             u8 *tag)
-{
-       struct mlx5dr_match_misc5 *misc5 = &value->misc5;
-
-       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
-       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
-
-       return 0;
-}
-
-void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
-}
-
-static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
-                                         struct mlx5dr_ste_build *sb,
-                                         u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
-       DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
-       DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
-       DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
-
-       return 0;
-}
-
-void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
-}
-
-static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
-                                         struct mlx5dr_ste_build *sb,
-                                         u8 *tag)
-{
-       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
-
-       DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
-       DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
-       DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
-       DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
-
-       return 0;
-}
-
-void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
-}
-
-static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
-                                                 u8 *bit_mask)
-{
-       struct mlx5dr_match_misc *misc_mask = &value->misc;
-
-       DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
-       DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
-       misc_mask->source_eswitch_owner_vhca_id = 0;
-}
-
-static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
-                                           struct mlx5dr_ste_build *sb,
-                                           u8 *tag)
-{
-       struct mlx5dr_match_misc *misc = &value->misc;
-       int id = misc->source_eswitch_owner_vhca_id;
-       struct mlx5dr_cmd_vport_cap *vport_cap;
-       struct mlx5dr_domain *dmn = sb->dmn;
-       struct mlx5dr_domain *vport_dmn;
-       u8 *bit_mask = sb->bit_mask;
-       struct mlx5dr_domain *peer;
-
-       DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
-
-       if (sb->vhca_id_valid) {
-               peer = xa_load(&dmn->peer_dmn_xa, id);
-               /* Find port GVMI based on the eswitch_owner_vhca_id */
-               if (id == dmn->info.caps.gvmi)
-                       vport_dmn = dmn;
-               else if (peer && (id == peer->info.caps.gvmi))
-                       vport_dmn = peer;
-               else
-                       return -EINVAL;
-
-               misc->source_eswitch_owner_vhca_id = 0;
-       } else {
-               vport_dmn = dmn;
-       }
-
-       if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
-               return 0;
-
-       vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
-       if (!vport_cap) {
-               mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
-                          misc->source_port);
-               return -EINVAL;
-       }
-
-       if (vport_cap->vport_gvmi)
-               MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
-
-       misc->source_port = 0;
-       return 0;
-}
-
-void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
-                                      struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
-}
-
-static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
-                                     u32 *misc4_field_value,
-                                     bool *parser_is_used,
-                                     u8 *tag)
-{
-       u32 id = *misc4_field_id;
-       u8 *parser_ptr;
-
-       if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
-               return;
-
-       parser_is_used[id] = true;
-       parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
-
-       *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
-       *misc4_field_id = 0;
-       *misc4_field_value = 0;
-}
-
-static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
-       bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
-
-       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
-                                 &misc_4_mask->prog_sample_field_value_0,
-                                 parser_is_used, tag);
-
-       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
-                                 &misc_4_mask->prog_sample_field_value_1,
-                                 parser_is_used, tag);
-
-       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
-                                 &misc_4_mask->prog_sample_field_value_2,
-                                 parser_is_used, tag);
-
-       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
-                                 &misc_4_mask->prog_sample_field_value_3,
-                                 parser_is_used, tag);
-
-       return 0;
-}
-
-void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
-       dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
-}
-
-void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
-       dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
-}
-
-static int
-dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
-                                                  struct mlx5dr_ste_build *sb,
-                                                  u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-       u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
-       u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
-
-       MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
-                misc3->geneve_tlv_option_0_data);
-       misc3->geneve_tlv_option_0_data = 0;
-
-       return 0;
-}
-
-void
-dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
-                                                   struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
-
-       /* STEs with lookup type FLEX_PARSER_{0/1} includes
-        * flex parsers_{0-3}/{4-7} respectively.
-        */
-       sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
-                     DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
-                     DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
-
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
-}
-
-static int
-dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
-                                                        struct mlx5dr_ste_build *sb,
-                                                        u8 *tag)
-{
-       u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
-       struct mlx5dr_match_misc *misc = &value->misc;
-
-       if (misc->geneve_tlv_option_0_exist) {
-               MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
-               misc->geneve_tlv_option_0_exist = 0;
-       }
-
-       return 0;
-}
-
-void
-dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
-                                                         struct mlx5dr_match_param *mask)
-{
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
-       dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
-}
-
-static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
-                                                   struct mlx5dr_ste_build *sb,
-                                                   u8 *tag)
-{
-       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
-
-       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
-       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
-       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
-
-       return 0;
-}
-
-void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
-}
-
-static int
-dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
-       return 0;
-}
-
-void
-dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
-}
-
-static int
-dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
-                                          struct mlx5dr_ste_build *sb,
-                                          u8 *tag)
-{
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
-       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
-               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
-       return 0;
-}
-
-void
-dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask)
-{
-       dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
-
-       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
-       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
-       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
-}
-
-int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
-{
-       struct mlx5dr_ptrn_mgr *ptrn_mgr;
-       int ret;
-
-       ptrn_mgr = action->rewrite->dmn->ptrn_mgr;
-       if (!ptrn_mgr)
-               return -EOPNOTSUPP;
-
-       action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr,
-                                                 action->rewrite->num_of_actions,
-                                                 action->rewrite->data);
-       if (!action->rewrite->arg) {
-               mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n");
-               return -EAGAIN;
-       }
-
-       action->rewrite->ptrn =
-               mlx5dr_ptrn_cache_get_pattern(ptrn_mgr,
-                                             action->rewrite->num_of_actions,
-                                             action->rewrite->data);
-       if (!action->rewrite->ptrn) {
-               mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n");
-               ret = -EAGAIN;
-               goto put_arg;
-       }
-
-       return 0;
-
-put_arg:
-       mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
-                          action->rewrite->arg);
-       return ret;
-}
-
-void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
-{
-       mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr,
-                                     action->rewrite->ptrn);
-       mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
-                          action->rewrite->arg);
-}
-
-static struct mlx5dr_ste_ctx ste_ctx_v1 = {
-       /* Builders */
-       .build_eth_l2_src_dst_init      = &dr_ste_v1_build_eth_l2_src_dst_init,
-       .build_eth_l3_ipv6_src_init     = &dr_ste_v1_build_eth_l3_ipv6_src_init,
-       .build_eth_l3_ipv6_dst_init     = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
-       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
-       .build_eth_l2_src_init          = &dr_ste_v1_build_eth_l2_src_init,
-       .build_eth_l2_dst_init          = &dr_ste_v1_build_eth_l2_dst_init,
-       .build_eth_l2_tnl_init          = &dr_ste_v1_build_eth_l2_tnl_init,
-       .build_eth_l3_ipv4_misc_init    = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
-       .build_eth_ipv6_l3_l4_init      = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
-       .build_mpls_init                = &dr_ste_v1_build_mpls_init,
-       .build_tnl_gre_init             = &dr_ste_v1_build_tnl_gre_init,
-       .build_tnl_mpls_init            = &dr_ste_v1_build_tnl_mpls_init,
-       .build_tnl_mpls_over_udp_init   = &dr_ste_v1_build_tnl_mpls_over_udp_init,
-       .build_tnl_mpls_over_gre_init   = &dr_ste_v1_build_tnl_mpls_over_gre_init,
-       .build_icmp_init                = &dr_ste_v1_build_icmp_init,
-       .build_general_purpose_init     = &dr_ste_v1_build_general_purpose_init,
-       .build_eth_l4_misc_init         = &dr_ste_v1_build_eth_l4_misc_init,
-       .build_tnl_vxlan_gpe_init       = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
-       .build_tnl_geneve_init          = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
-       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
-       .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
-       .build_register_0_init          = &dr_ste_v1_build_register_0_init,
-       .build_register_1_init          = &dr_ste_v1_build_register_1_init,
-       .build_src_gvmi_qpn_init        = &dr_ste_v1_build_src_gvmi_qpn_init,
-       .build_flex_parser_0_init       = &dr_ste_v1_build_flex_parser_0_init,
-       .build_flex_parser_1_init       = &dr_ste_v1_build_flex_parser_1_init,
-       .build_tnl_gtpu_init            = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
-       .build_tnl_header_0_1_init      = &dr_ste_v1_build_tnl_header_0_1_init,
-       .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
-       .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
-
-       /* Getters and Setters */
-       .ste_init                       = &dr_ste_v1_init,
-       .set_next_lu_type               = &dr_ste_v1_set_next_lu_type,
-       .get_next_lu_type               = &dr_ste_v1_get_next_lu_type,
-       .is_miss_addr_set               = &dr_ste_v1_is_miss_addr_set,
-       .set_miss_addr                  = &dr_ste_v1_set_miss_addr,
-       .get_miss_addr                  = &dr_ste_v1_get_miss_addr,
-       .set_hit_addr                   = &dr_ste_v1_set_hit_addr,
-       .set_byte_mask                  = &dr_ste_v1_set_byte_mask,
-       .get_byte_mask                  = &dr_ste_v1_get_byte_mask,
-       /* Actions */
-       .actions_caps                   = DR_STE_CTX_ACTION_CAP_TX_POP |
-                                         DR_STE_CTX_ACTION_CAP_RX_PUSH |
-                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP |
-                                         DR_STE_CTX_ACTION_CAP_POP_MDFY,
-       .set_actions_rx                 = &dr_ste_v1_set_actions_rx,
-       .set_actions_tx                 = &dr_ste_v1_set_actions_tx,
-       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
-       .modify_field_arr               = dr_ste_v1_action_modify_field_arr,
-       .set_action_set                 = &dr_ste_v1_set_action_set,
-       .set_action_add                 = &dr_ste_v1_set_action_add,
-       .set_action_copy                = &dr_ste_v1_set_action_copy,
-       .set_action_decap_l3_list       = &dr_ste_v1_set_action_decap_l3_list,
-       .alloc_modify_hdr_chunk         = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
-       .dealloc_modify_hdr_chunk       = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
-       /* Send */
-       .prepare_for_postsend           = &dr_ste_v1_prepare_for_postsend,
-};
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
-{
-       return &ste_ctx_v1;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.h
deleted file mode 100644 (file)
index e2fc698..0000000
+++ /dev/null
@@ -1,97 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#ifndef        _DR_STE_V1_
-#define        _DR_STE_V1_
-
-#include "dr_types.h"
-#include "dr_ste.h"
-
-bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
-void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
-u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
-void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
-u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
-void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
-u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
-void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
-void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
-void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
-void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
-                             u32 actions_caps, u8 *last_ste,
-                             struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
-                             u32 actions_caps, u8 *last_ste,
-                             struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
-void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
-                             u8 length, u32 data);
-void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
-                             u8 length, u32 data);
-void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
-                              u8 dst_len, u8 src_hw_field, u8 src_shifter);
-int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
-                                      u32 hw_action_sz, u16 *used_hw_action_num);
-int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
-void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
-void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
-                                             struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
-                                          struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
-                              struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
-                                 struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
-                                  struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
-                                           struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
-                              struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
-                                                   struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
-                                      struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
-                                                        struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
-                                                              struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask);
-void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
-                                                struct mlx5dr_match_param *mask);
-
-#endif  /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v2.c
deleted file mode 100644 (file)
index 808b013..0000000
+++ /dev/null
@@ -1,234 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
-
-#include "dr_ste_v1.h"
-
-enum {
-       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0              = 0x00,
-       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1              = 0x01,
-       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2              = 0x02,
-       DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0          = 0x08,
-       DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1          = 0x09,
-       DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0              = 0x0e,
-       DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0              = 0x18,
-       DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1              = 0x19,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0            = 0x40,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1            = 0x41,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0        = 0x44,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1        = 0x45,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2        = 0x46,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3        = 0x47,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0        = 0x4c,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1        = 0x4d,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2        = 0x4e,
-       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3        = 0x4f,
-       DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0            = 0x5e,
-       DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1            = 0x5f,
-       DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0           = 0x6f,
-       DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1           = 0x70,
-       DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE        = 0x7b,
-       DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE          = 0x7c,
-       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0          = 0x90,
-       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1          = 0x91,
-       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0          = 0x92,
-       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1          = 0x93,
-       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0          = 0x94,
-       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1          = 0x95,
-};
-
-static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
-       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
-               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
-               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
-       },
-       [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
-               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
-       },
-};
-
-static struct mlx5dr_ste_ctx ste_ctx_v2 = {
-       /* Builders */
-       .build_eth_l2_src_dst_init      = &dr_ste_v1_build_eth_l2_src_dst_init,
-       .build_eth_l3_ipv6_src_init     = &dr_ste_v1_build_eth_l3_ipv6_src_init,
-       .build_eth_l3_ipv6_dst_init     = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
-       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
-       .build_eth_l2_src_init          = &dr_ste_v1_build_eth_l2_src_init,
-       .build_eth_l2_dst_init          = &dr_ste_v1_build_eth_l2_dst_init,
-       .build_eth_l2_tnl_init          = &dr_ste_v1_build_eth_l2_tnl_init,
-       .build_eth_l3_ipv4_misc_init    = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
-       .build_eth_ipv6_l3_l4_init      = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
-       .build_mpls_init                = &dr_ste_v1_build_mpls_init,
-       .build_tnl_gre_init             = &dr_ste_v1_build_tnl_gre_init,
-       .build_tnl_mpls_init            = &dr_ste_v1_build_tnl_mpls_init,
-       .build_tnl_mpls_over_udp_init   = &dr_ste_v1_build_tnl_mpls_over_udp_init,
-       .build_tnl_mpls_over_gre_init   = &dr_ste_v1_build_tnl_mpls_over_gre_init,
-       .build_icmp_init                = &dr_ste_v1_build_icmp_init,
-       .build_general_purpose_init     = &dr_ste_v1_build_general_purpose_init,
-       .build_eth_l4_misc_init         = &dr_ste_v1_build_eth_l4_misc_init,
-       .build_tnl_vxlan_gpe_init       = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
-       .build_tnl_geneve_init          = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
-       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
-       .build_tnl_geneve_tlv_opt_exist_init =
-                                 &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
-       .build_register_0_init          = &dr_ste_v1_build_register_0_init,
-       .build_register_1_init          = &dr_ste_v1_build_register_1_init,
-       .build_src_gvmi_qpn_init        = &dr_ste_v1_build_src_gvmi_qpn_init,
-       .build_flex_parser_0_init       = &dr_ste_v1_build_flex_parser_0_init,
-       .build_flex_parser_1_init       = &dr_ste_v1_build_flex_parser_1_init,
-       .build_tnl_gtpu_init            = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
-       .build_tnl_header_0_1_init      = &dr_ste_v1_build_tnl_header_0_1_init,
-       .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
-       .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
-
-       /* Getters and Setters */
-       .ste_init                       = &dr_ste_v1_init,
-       .set_next_lu_type               = &dr_ste_v1_set_next_lu_type,
-       .get_next_lu_type               = &dr_ste_v1_get_next_lu_type,
-       .is_miss_addr_set               = &dr_ste_v1_is_miss_addr_set,
-       .set_miss_addr                  = &dr_ste_v1_set_miss_addr,
-       .get_miss_addr                  = &dr_ste_v1_get_miss_addr,
-       .set_hit_addr                   = &dr_ste_v1_set_hit_addr,
-       .set_byte_mask                  = &dr_ste_v1_set_byte_mask,
-       .get_byte_mask                  = &dr_ste_v1_get_byte_mask,
-
-       /* Actions */
-       .actions_caps                   = DR_STE_CTX_ACTION_CAP_TX_POP |
-                                         DR_STE_CTX_ACTION_CAP_RX_PUSH |
-                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP,
-       .set_actions_rx                 = &dr_ste_v1_set_actions_rx,
-       .set_actions_tx                 = &dr_ste_v1_set_actions_tx,
-       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
-       .modify_field_arr               = dr_ste_v2_action_modify_field_arr,
-       .set_action_set                 = &dr_ste_v1_set_action_set,
-       .set_action_add                 = &dr_ste_v1_set_action_add,
-       .set_action_copy                = &dr_ste_v1_set_action_copy,
-       .set_action_decap_l3_list       = &dr_ste_v1_set_action_decap_l3_list,
-       .alloc_modify_hdr_chunk         = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
-       .dealloc_modify_hdr_chunk       = &dr_ste_v1_free_modify_hdr_ptrn_arg,
-
-       /* Send */
-       .prepare_for_postsend           = &dr_ste_v1_prepare_for_postsend,
-};
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
-{
-       return &ste_ctx_v2;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_table.c
deleted file mode 100644 (file)
index 69294a6..0000000
+++ /dev/null
@@ -1,319 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies. */
-
-#include "dr_types.h"
-
-static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
-                                       struct mlx5dr_table_rx_tx *nic_tbl,
-                                       struct mlx5dr_action *action)
-{
-       struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
-       struct mlx5dr_htbl_connect_info info;
-       struct mlx5dr_ste_htbl *last_htbl;
-       struct mlx5dr_icm_chunk *chunk;
-       int ret;
-
-       if (!list_empty(&nic_tbl->nic_matcher_list))
-               last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
-                                                  struct mlx5dr_matcher_rx_tx,
-                                                  list_node);
-
-       if (last_nic_matcher)
-               last_htbl = last_nic_matcher->e_anchor;
-       else
-               last_htbl = nic_tbl->s_anchor;
-
-       if (action) {
-               chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
-                       action->dest_tbl->tbl->rx.s_anchor->chunk :
-                       action->dest_tbl->tbl->tx.s_anchor->chunk;
-               nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
-       } else {
-               nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
-       }
-
-       info.type = CONNECT_MISS;
-       info.miss_icm_addr = nic_tbl->default_icm_addr;
-
-       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn,
-                                               last_htbl, &info, true);
-       if (ret)
-               mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret);
-
-       return ret;
-}
-
-int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
-                                struct mlx5dr_action *action)
-{
-       int ret = -EOPNOTSUPP;
-
-       if (action && action->action_type != DR_ACTION_TYP_FT)
-               return -EOPNOTSUPP;
-
-       mlx5dr_domain_lock(tbl->dmn);
-
-       if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
-           tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
-               ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
-               if (ret)
-                       goto out;
-       }
-
-       if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
-           tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
-               ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action);
-               if (ret)
-                       goto out;
-       }
-
-       if (ret)
-               goto out;
-
-       /* Release old action */
-       if (tbl->miss_action)
-               refcount_dec(&tbl->miss_action->refcount);
-
-       /* Set new miss action */
-       tbl->miss_action = action;
-       if (tbl->miss_action)
-               refcount_inc(&action->refcount);
-
-out:
-       mlx5dr_domain_unlock(tbl->dmn);
-       return ret;
-}
-
-static void dr_table_uninit_nic(struct mlx5dr_table_rx_tx *nic_tbl)
-{
-       mlx5dr_htbl_put(nic_tbl->s_anchor);
-}
-
-static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
-{
-       dr_table_uninit_nic(&tbl->rx);
-       dr_table_uninit_nic(&tbl->tx);
-}
-
-static void dr_table_uninit(struct mlx5dr_table *tbl)
-{
-       mlx5dr_domain_lock(tbl->dmn);
-
-       switch (tbl->dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               dr_table_uninit_nic(&tbl->rx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               dr_table_uninit_nic(&tbl->tx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               dr_table_uninit_fdb(tbl);
-               break;
-       default:
-               WARN_ON(true);
-               break;
-       }
-
-       mlx5dr_domain_unlock(tbl->dmn);
-}
-
-static int dr_table_init_nic(struct mlx5dr_domain *dmn,
-                            struct mlx5dr_table_rx_tx *nic_tbl)
-{
-       struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
-       struct mlx5dr_htbl_connect_info info;
-       int ret;
-
-       INIT_LIST_HEAD(&nic_tbl->nic_matcher_list);
-
-       nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
-
-       nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
-                                                 DR_CHUNK_SIZE_1,
-                                                 MLX5DR_STE_LU_TYPE_DONT_CARE,
-                                                 0);
-       if (!nic_tbl->s_anchor) {
-               mlx5dr_err(dmn, "Failed allocating htbl\n");
-               return -ENOMEM;
-       }
-
-       info.type = CONNECT_MISS;
-       info.miss_icm_addr = nic_dmn->default_icm_addr;
-       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
-                                               nic_tbl->s_anchor,
-                                               &info, true);
-       if (ret) {
-               mlx5dr_err(dmn, "Failed int and send htbl\n");
-               goto free_s_anchor;
-       }
-
-       mlx5dr_htbl_get(nic_tbl->s_anchor);
-
-       return 0;
-
-free_s_anchor:
-       mlx5dr_ste_htbl_free(nic_tbl->s_anchor);
-       return ret;
-}
-
-static int dr_table_init_fdb(struct mlx5dr_table *tbl)
-{
-       int ret;
-
-       ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
-       if (ret)
-               return ret;
-
-       ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
-       if (ret)
-               goto destroy_rx;
-
-       return 0;
-
-destroy_rx:
-       dr_table_uninit_nic(&tbl->rx);
-       return ret;
-}
-
-static int dr_table_init(struct mlx5dr_table *tbl)
-{
-       int ret = 0;
-
-       INIT_LIST_HEAD(&tbl->matcher_list);
-
-       mlx5dr_domain_lock(tbl->dmn);
-
-       switch (tbl->dmn->type) {
-       case MLX5DR_DOMAIN_TYPE_NIC_RX:
-               tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_RX;
-               tbl->rx.nic_dmn = &tbl->dmn->info.rx;
-               ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_NIC_TX:
-               tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_TX;
-               tbl->tx.nic_dmn = &tbl->dmn->info.tx;
-               ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
-               break;
-       case MLX5DR_DOMAIN_TYPE_FDB:
-               tbl->table_type = MLX5_FLOW_TABLE_TYPE_FDB;
-               tbl->rx.nic_dmn = &tbl->dmn->info.rx;
-               tbl->tx.nic_dmn = &tbl->dmn->info.tx;
-               ret = dr_table_init_fdb(tbl);
-               break;
-       default:
-               WARN_ON(true);
-               break;
-       }
-
-       mlx5dr_domain_unlock(tbl->dmn);
-
-       return ret;
-}
-
-static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
-{
-       return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev,
-                                            tbl->table_id,
-                                            tbl->table_type);
-}
-
-static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid)
-{
-       bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
-       bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-       struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
-       u64 icm_addr_rx = 0;
-       u64 icm_addr_tx = 0;
-       int ret;
-
-       if (tbl->rx.s_anchor)
-               icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
-
-       if (tbl->tx.s_anchor)
-               icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
-
-       ft_attr.table_type = tbl->table_type;
-       ft_attr.icm_addr_rx = icm_addr_rx;
-       ft_attr.icm_addr_tx = icm_addr_tx;
-       ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1;
-       ft_attr.sw_owner = true;
-       ft_attr.decap_en = en_decap;
-       ft_attr.reformat_en = en_encap;
-       ft_attr.uid = uid;
-
-       ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
-                                          NULL, &tbl->table_id);
-
-       return ret;
-}
-
-struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level,
-                                        u32 flags, u16 uid)
-{
-       struct mlx5dr_table *tbl;
-       int ret;
-
-       refcount_inc(&dmn->refcount);
-
-       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
-       if (!tbl)
-               goto dec_ref;
-
-       tbl->dmn = dmn;
-       tbl->level = level;
-       tbl->flags = flags;
-       refcount_set(&tbl->refcount, 1);
-
-       ret = dr_table_init(tbl);
-       if (ret)
-               goto free_tbl;
-
-       ret = dr_table_create_sw_owned_tbl(tbl, uid);
-       if (ret)
-               goto uninit_tbl;
-
-       INIT_LIST_HEAD(&tbl->dbg_node);
-       mlx5dr_dbg_tbl_add(tbl);
-       return tbl;
-
-uninit_tbl:
-       dr_table_uninit(tbl);
-free_tbl:
-       kfree(tbl);
-dec_ref:
-       refcount_dec(&dmn->refcount);
-       return NULL;
-}
-
-int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
-{
-       int ret;
-
-       if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1))
-               return -EBUSY;
-
-       mlx5dr_dbg_tbl_del(tbl);
-       ret = dr_table_destroy_sw_owned_tbl(tbl);
-       if (ret)
-               mlx5dr_err(tbl->dmn, "Failed to destroy sw owned table\n");
-
-       dr_table_uninit(tbl);
-
-       if (tbl->miss_action)
-               refcount_dec(&tbl->miss_action->refcount);
-
-       refcount_dec(&tbl->dmn->refcount);
-       kfree(tbl);
-
-       return ret;
-}
-
-u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
-{
-       return tbl->table_id;
-}
-
-struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft)
-{
-       return ft->fs_dr_table.dr_table;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
deleted file mode 100644 (file)
index 7618c61..0000000
+++ /dev/null
@@ -1,1599 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019, Mellanox Technologies */
-
-#ifndef        _DR_TYPES_
-#define        _DR_TYPES_
-
-#include <linux/mlx5/vport.h>
-#include <linux/refcount.h>
-#include "fs_core.h"
-#include "wq.h"
-#include "lib/mlx5.h"
-#include "mlx5_ifc_dr.h"
-#include "mlx5dr.h"
-#include "dr_dbg.h"
-
-#define DR_RULE_MAX_STES 18
-#define DR_ACTION_MAX_STES 5
-#define DR_STE_SVLAN 0x1
-#define DR_STE_CVLAN 0x2
-#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
-#define DR_NUM_OF_FLEX_PARSERS 8
-#define DR_STE_MAX_FLEX_0_ID 3
-#define DR_STE_MAX_FLEX_1_ID 7
-#define DR_ACTION_CACHE_LINE_SIZE 64
-
-#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
-#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
-#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
-
-struct mlx5dr_ptrn_mgr;
-struct mlx5dr_arg_mgr;
-struct mlx5dr_arg_obj;
-
-static inline bool dr_is_flex_parser_0_id(u8 parser_id)
-{
-       return parser_id <= DR_STE_MAX_FLEX_0_ID;
-}
-
-static inline bool dr_is_flex_parser_1_id(u8 parser_id)
-{
-       return parser_id > DR_STE_MAX_FLEX_0_ID;
-}
-
-enum mlx5dr_icm_chunk_size {
-       DR_CHUNK_SIZE_1,
-       DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
-       DR_CHUNK_SIZE_2,
-       DR_CHUNK_SIZE_4,
-       DR_CHUNK_SIZE_8,
-       DR_CHUNK_SIZE_16,
-       DR_CHUNK_SIZE_32,
-       DR_CHUNK_SIZE_64,
-       DR_CHUNK_SIZE_128,
-       DR_CHUNK_SIZE_256,
-       DR_CHUNK_SIZE_512,
-       DR_CHUNK_SIZE_1K,
-       DR_CHUNK_SIZE_2K,
-       DR_CHUNK_SIZE_4K,
-       DR_CHUNK_SIZE_8K,
-       DR_CHUNK_SIZE_16K,
-       DR_CHUNK_SIZE_32K,
-       DR_CHUNK_SIZE_64K,
-       DR_CHUNK_SIZE_128K,
-       DR_CHUNK_SIZE_256K,
-       DR_CHUNK_SIZE_512K,
-       DR_CHUNK_SIZE_1024K,
-       DR_CHUNK_SIZE_2048K,
-       DR_CHUNK_SIZE_MAX,
-};
-
-enum mlx5dr_icm_type {
-       DR_ICM_TYPE_STE,
-       DR_ICM_TYPE_MODIFY_ACTION,
-       DR_ICM_TYPE_MODIFY_HDR_PTRN,
-       DR_ICM_TYPE_MAX,
-};
-
-static inline enum mlx5dr_icm_chunk_size
-mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
-{
-       chunk += 2;
-       if (chunk < DR_CHUNK_SIZE_MAX)
-               return chunk;
-
-       return DR_CHUNK_SIZE_MAX;
-}
-
-enum {
-       DR_STE_SIZE = 64,
-       DR_STE_SIZE_CTRL = 32,
-       DR_STE_SIZE_MATCH_TAG = 32,
-       DR_STE_SIZE_TAG = 16,
-       DR_STE_SIZE_MASK = 16,
-       DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
-};
-
-enum mlx5dr_ste_ctx_action_cap {
-       DR_STE_CTX_ACTION_CAP_NONE = 0,
-       DR_STE_CTX_ACTION_CAP_TX_POP   = 1 << 0,
-       DR_STE_CTX_ACTION_CAP_RX_PUSH  = 1 << 1,
-       DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
-       DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
-};
-
-enum {
-       DR_MODIFY_ACTION_SIZE = 8,
-};
-
-enum mlx5dr_matcher_criteria {
-       DR_MATCHER_CRITERIA_EMPTY = 0,
-       DR_MATCHER_CRITERIA_OUTER = 1 << 0,
-       DR_MATCHER_CRITERIA_MISC = 1 << 1,
-       DR_MATCHER_CRITERIA_INNER = 1 << 2,
-       DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
-       DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
-       DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
-       DR_MATCHER_CRITERIA_MISC5 = 1 << 6,
-       DR_MATCHER_CRITERIA_MAX = 1 << 7,
-};
-
-enum mlx5dr_action_type {
-       DR_ACTION_TYP_TNL_L2_TO_L2,
-       DR_ACTION_TYP_L2_TO_TNL_L2,
-       DR_ACTION_TYP_TNL_L3_TO_L2,
-       DR_ACTION_TYP_L2_TO_TNL_L3,
-       DR_ACTION_TYP_DROP,
-       DR_ACTION_TYP_QP,
-       DR_ACTION_TYP_FT,
-       DR_ACTION_TYP_CTR,
-       DR_ACTION_TYP_TAG,
-       DR_ACTION_TYP_MODIFY_HDR,
-       DR_ACTION_TYP_VPORT,
-       DR_ACTION_TYP_POP_VLAN,
-       DR_ACTION_TYP_PUSH_VLAN,
-       DR_ACTION_TYP_INSERT_HDR,
-       DR_ACTION_TYP_REMOVE_HDR,
-       DR_ACTION_TYP_SAMPLER,
-       DR_ACTION_TYP_ASO_FLOW_METER,
-       DR_ACTION_TYP_RANGE,
-       DR_ACTION_TYP_MAX,
-};
-
-enum mlx5dr_ipv {
-       DR_RULE_IPV4,
-       DR_RULE_IPV6,
-       DR_RULE_IPV_MAX,
-};
-
-struct mlx5dr_icm_pool;
-struct mlx5dr_icm_chunk;
-struct mlx5dr_icm_buddy_mem;
-struct mlx5dr_ste_htbl;
-struct mlx5dr_match_param;
-struct mlx5dr_cmd_caps;
-struct mlx5dr_rule_rx_tx;
-struct mlx5dr_matcher_rx_tx;
-struct mlx5dr_ste_ctx;
-struct mlx5dr_send_info_pool;
-struct mlx5dr_icm_hot_chunk;
-
-struct mlx5dr_ste {
-       /* refcount: indicates the num of rules that using this ste */
-       u32 refcount;
-
-       /* this ste is part of a rule, located in ste's chain */
-       u8 ste_chain_location;
-
-       /* attached to the miss_list head at each htbl entry */
-       struct list_head miss_list_node;
-
-       /* this ste is member of htbl */
-       struct mlx5dr_ste_htbl *htbl;
-
-       struct mlx5dr_ste_htbl *next_htbl;
-
-       /* The rule this STE belongs to */
-       struct mlx5dr_rule_rx_tx *rule_rx_tx;
-};
-
-struct mlx5dr_ste_htbl_ctrl {
-       /* total number of valid entries belonging to this hash table. This
-        * includes the non collision and collision entries
-        */
-       unsigned int num_of_valid_entries;
-
-       /* total number of collisions entries attached to this table */
-       unsigned int num_of_collisions;
-};
-
-struct mlx5dr_ste_htbl {
-       u16 lu_type;
-       u16 byte_mask;
-       u32 refcount;
-       struct mlx5dr_icm_chunk *chunk;
-       struct mlx5dr_ste *pointing_ste;
-       struct mlx5dr_ste_htbl_ctrl ctrl;
-};
-
-struct mlx5dr_ste_send_info {
-       struct mlx5dr_ste *ste;
-       struct list_head send_list;
-       u16 size;
-       u16 offset;
-       u8 data_cont[DR_STE_SIZE];
-       u8 *data;
-};
-
-void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
-                                              u16 offset, u8 *data,
-                                              struct mlx5dr_ste_send_info *ste_info,
-                                              struct list_head *send_list,
-                                              bool copy_data);
-
-struct mlx5dr_ste_build {
-       u8 inner:1;
-       u8 rx:1;
-       u8 vhca_id_valid:1;
-       struct mlx5dr_domain *dmn;
-       struct mlx5dr_cmd_caps *caps;
-       u16 lu_type;
-       u16 byte_mask;
-       u8 bit_mask[DR_STE_SIZE_MASK];
-       int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
-                                 struct mlx5dr_ste_build *sb,
-                                 u8 *tag);
-};
-
-struct mlx5dr_ste_htbl *
-mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
-                     enum mlx5dr_icm_chunk_size chunk_size,
-                     u16 lu_type, u16 byte_mask);
-
-int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
-
-static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
-{
-       htbl->refcount--;
-       if (!htbl->refcount)
-               mlx5dr_ste_htbl_free(htbl);
-}
-
-static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
-{
-       htbl->refcount++;
-}
-
-/* STE utils */
-u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
-bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_ste_p);
-void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                             u8 *hw_ste, u64 miss_addr);
-void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
-                            u8 *hw_ste, u64 icm_addr, u32 ht_size);
-void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
-                                         u8 *hw_ste,
-                                         struct mlx5dr_ste_htbl *next_htbl);
-void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
-bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
-                               u8 ste_location);
-u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
-u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
-struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
-
-#define MLX5DR_MAX_VLANS 2
-#define MLX5DR_INVALID_PATTERN_INDEX 0xffffffff
-
-struct mlx5dr_ste_actions_attr {
-       u32     modify_index;
-       u32     modify_pat_idx;
-       u16     modify_actions;
-       u8      *single_modify_action;
-       u32     decap_index;
-       u32     decap_pat_idx;
-       u16     decap_actions;
-       u8      decap_with_vlan:1;
-       u64     final_icm_addr;
-       u32     flow_tag;
-       u32     ctr_id;
-       u16     gvmi;
-       u16     hit_gvmi;
-       struct {
-               u32     id;
-               u32     size;
-               u8      param_0;
-               u8      param_1;
-       } reformat;
-       struct {
-               int     count;
-               u32     headers[MLX5DR_MAX_VLANS];
-       } vlans;
-
-       struct {
-               u32 obj_id;
-               u32 offset;
-               u8 dest_reg_id;
-               u8 init_color;
-       } aso_flow_meter;
-
-       struct {
-               u64     miss_icm_addr;
-               u32     definer_id;
-               u32     min;
-               u32     max;
-       } range;
-};
-
-void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
-                              struct mlx5dr_domain *dmn,
-                              u8 *action_type_set,
-                              u8 *last_ste,
-                              struct mlx5dr_ste_actions_attr *attr,
-                              u32 *added_stes);
-void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
-                              struct mlx5dr_domain *dmn,
-                              u8 *action_type_set,
-                              u8 *last_ste,
-                              struct mlx5dr_ste_actions_attr *attr,
-                              u32 *added_stes);
-
-void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
-                              __be64 *hw_action,
-                              u8 hw_field,
-                              u8 shifter,
-                              u8 length,
-                              u32 data);
-void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
-                              __be64 *hw_action,
-                              u8 hw_field,
-                              u8 shifter,
-                              u8 length,
-                              u32 data);
-void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
-                               __be64 *hw_action,
-                               u8 dst_hw_field,
-                               u8 dst_shifter,
-                               u8 dst_len,
-                               u8 src_hw_field,
-                               u8 src_shifter);
-int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
-                                       void *data,
-                                       u32 data_sz,
-                                       u8 *hw_action,
-                                       u32 hw_action_sz,
-                                       u16 *used_hw_action_num);
-int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action);
-void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action);
-
-const struct mlx5dr_ste_action_modify_field *
-mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
-
-struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
-void mlx5dr_ste_free(struct mlx5dr_ste *ste,
-                    struct mlx5dr_matcher *matcher,
-                    struct mlx5dr_matcher_rx_tx *nic_matcher);
-static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
-                                 struct mlx5dr_matcher *matcher,
-                                 struct mlx5dr_matcher_rx_tx *nic_matcher)
-{
-       ste->refcount--;
-       if (!ste->refcount)
-               mlx5dr_ste_free(ste, matcher, nic_matcher);
-}
-
-/* initial as 0, increased only when ste appears in a new rule */
-static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
-{
-       ste->refcount++;
-}
-
-static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
-{
-       return !ste->refcount;
-}
-
-bool mlx5dr_ste_equal_tag(void *src, void *dst);
-int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
-                               struct mlx5dr_matcher_rx_tx *nic_matcher,
-                               struct mlx5dr_ste *ste,
-                               u8 *cur_hw_ste,
-                               enum mlx5dr_icm_chunk_size log_table_size);
-
-/* STE build functions */
-int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
-                              u8 match_criteria,
-                              struct mlx5dr_match_param *mask,
-                              struct mlx5dr_match_param *value);
-int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
-                            struct mlx5dr_matcher_rx_tx *nic_matcher,
-                            struct mlx5dr_match_param *value,
-                            u8 *ste_arr);
-void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste_build *builder,
-                                    struct mlx5dr_match_param *mask,
-                                    bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
-                                         struct mlx5dr_ste_build *sb,
-                                         struct mlx5dr_match_param *mask,
-                                         bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
-                                      struct mlx5dr_ste_build *sb,
-                                      struct mlx5dr_match_param *mask,
-                                      bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
-                                     struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask,
-                                     bool inner, bool rx);
-void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
-                                     struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask,
-                                     bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx);
-void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx);
-void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask,
-                                    bool inner, bool rx);
-void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
-                                 struct mlx5dr_ste_build *sb,
-                                 struct mlx5dr_match_param *mask,
-                                 bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
-                             struct mlx5dr_ste_build *sb,
-                             struct mlx5dr_match_param *mask,
-                             bool inner, bool rx);
-void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
-                          struct mlx5dr_ste_build *sb,
-                          struct mlx5dr_match_param *mask,
-                          bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
-                                       struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask,
-                                       struct mlx5dr_cmd_caps *caps,
-                                       bool inner, bool rx);
-void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
-                                       struct mlx5dr_ste_build *sb,
-                                       struct mlx5dr_match_param *mask,
-                                       struct mlx5dr_cmd_caps *caps,
-                                       bool inner, bool rx);
-void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
-                          struct mlx5dr_ste_build *sb,
-                          struct mlx5dr_match_param *mask,
-                          struct mlx5dr_cmd_caps *caps,
-                          bool inner, bool rx);
-void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask,
-                                   bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
-                                        struct mlx5dr_ste_build *sb,
-                                        struct mlx5dr_match_param *mask,
-                                        struct mlx5dr_cmd_caps *caps,
-                                        bool inner, bool rx);
-void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
-                                              struct mlx5dr_ste_build *sb,
-                                              struct mlx5dr_match_param *mask,
-                                              struct mlx5dr_cmd_caps *caps,
-                                              bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
-                              struct mlx5dr_ste_build *sb,
-                              struct mlx5dr_match_param *mask,
-                              bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
-                                            struct mlx5dr_ste_build *sb,
-                                            struct mlx5dr_match_param *mask,
-                                            struct mlx5dr_cmd_caps *caps,
-                                            bool inner, bool rx);
-void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                            struct mlx5dr_ste_build *sb,
-                                            struct mlx5dr_match_param *mask,
-                                            struct mlx5dr_cmd_caps *caps,
-                                            bool inner, bool rx);
-void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                    struct mlx5dr_ste_build *sb,
-                                    struct mlx5dr_match_param *mask,
-                                    bool inner, bool rx);
-void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
-                                     struct mlx5dr_ste_build *sb,
-                                     struct mlx5dr_match_param *mask,
-                                     bool inner, bool rx);
-void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx);
-void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                struct mlx5dr_ste_build *sb,
-                                struct mlx5dr_match_param *mask,
-                                bool inner, bool rx);
-void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
-                                  struct mlx5dr_ste_build *sb,
-                                  struct mlx5dr_match_param *mask,
-                                  struct mlx5dr_domain *dmn,
-                                  bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask,
-                                   bool inner, bool rx);
-void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
-                                   struct mlx5dr_ste_build *sb,
-                                   struct mlx5dr_match_param *mask,
-                                   bool inner, bool rx);
-void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
-
-/* Actions utils */
-int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
-                                struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                struct mlx5dr_action *actions[],
-                                u32 num_actions,
-                                u8 *ste_arr,
-                                u32 *new_hw_ste_arr_sz);
-
-struct mlx5dr_match_spec {
-       u32 smac_47_16;         /* Source MAC address of incoming packet */
-       /* Incoming packet Ethertype - this is the Ethertype
-        * following the last VLAN tag of the packet
-        */
-       u32 smac_15_0:16;       /* Source MAC address of incoming packet */
-       u32 ethertype:16;
-
-       u32 dmac_47_16;         /* Destination MAC address of incoming packet */
-
-       u32 dmac_15_0:16;       /* Destination MAC address of incoming packet */
-       /* Priority of first VLAN tag in the incoming packet.
-        * Valid only when cvlan_tag==1 or svlan_tag==1
-        */
-       u32 first_prio:3;
-       /* CFI bit of first VLAN tag in the incoming packet.
-        * Valid only when cvlan_tag==1 or svlan_tag==1
-        */
-       u32 first_cfi:1;
-       /* VLAN ID of first VLAN tag in the incoming packet.
-        * Valid only when cvlan_tag==1 or svlan_tag==1
-        */
-       u32 first_vid:12;
-
-       u32 ip_protocol:8;      /* IP protocol */
-       /* Differentiated Services Code Point derived from
-        * Traffic Class/TOS field of IPv6/v4
-        */
-       u32 ip_dscp:6;
-       /* Explicit Congestion Notification derived from
-        * Traffic Class/TOS field of IPv6/v4
-        */
-       u32 ip_ecn:2;
-       /* The first vlan in the packet is c-vlan (0x8100).
-        * cvlan_tag and svlan_tag cannot be set together
-        */
-       u32 cvlan_tag:1;
-       /* The first vlan in the packet is s-vlan (0x8a88).
-        * cvlan_tag and svlan_tag cannot be set together
-        */
-       u32 svlan_tag:1;
-       u32 frag:1;             /* Packet is an IP fragment */
-       u32 ip_version:4;       /* IP version */
-       /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
-        *             Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
-        */
-       u32 tcp_flags:9;
-
-       /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
-       u32 tcp_sport:16;
-       /* TCP destination port.
-        * tcp and udp sport/dport are mutually exclusive
-        */
-       u32 tcp_dport:16;
-
-       u32 reserved_auto1:16;
-       u32 ipv4_ihl:4;
-       u32 reserved_auto2:4;
-       u32 ttl_hoplimit:8;
-
-       /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
-       u32 udp_sport:16;
-       /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
-       u32 udp_dport:16;
-
-       /* IPv6 source address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 src_ip_127_96;
-       /* IPv6 source address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 src_ip_95_64;
-       /* IPv6 source address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 src_ip_63_32;
-       /* IPv6 source address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 src_ip_31_0;
-       /* IPv6 destination address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 dst_ip_127_96;
-       /* IPv6 destination address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 dst_ip_95_64;
-       /* IPv6 destination address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 dst_ip_63_32;
-       /* IPv6 destination address of incoming packets
-        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
-        * This field should be qualified by an appropriate ethertype
-        */
-       u32 dst_ip_31_0;
-};
-
-struct mlx5dr_match_misc {
-       /* used with GRE, checksum exist when gre_c_present == 1 */
-       u32 gre_c_present:1;
-       u32 reserved_auto1:1;
-       /* used with GRE, key exist when gre_k_present == 1 */
-       u32 gre_k_present:1;
-       /* used with GRE, sequence number exist when gre_s_present == 1 */
-       u32 gre_s_present:1;
-       u32 source_vhca_port:4;
-       u32 source_sqn:24;              /* Source SQN */
-
-       u32 source_eswitch_owner_vhca_id:16;
-       /* Source port.;0xffff determines wire port */
-       u32 source_port:16;
-
-       /* Priority of second VLAN tag in the outer header of the incoming packet.
-        * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
-        */
-       u32 outer_second_prio:3;
-       /* CFI bit of first VLAN tag in the outer header of the incoming packet.
-        * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
-        */
-       u32 outer_second_cfi:1;
-       /* VLAN ID of first VLAN tag the outer header of the incoming packet.
-        * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
-        */
-       u32 outer_second_vid:12;
-       /* Priority of second VLAN tag in the inner header of the incoming packet.
-        * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
-        */
-       u32 inner_second_prio:3;
-       /* CFI bit of first VLAN tag in the inner header of the incoming packet.
-        * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
-        */
-       u32 inner_second_cfi:1;
-       /* VLAN ID of first VLAN tag the inner header of the incoming packet.
-        * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
-        */
-       u32 inner_second_vid:12;
-
-       u32 outer_second_cvlan_tag:1;
-       u32 inner_second_cvlan_tag:1;
-       /* The second vlan in the outer header of the packet is c-vlan (0x8100).
-        * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
-        */
-       u32 outer_second_svlan_tag:1;
-       /* The second vlan in the inner header of the packet is c-vlan (0x8100).
-        * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
-        */
-       u32 inner_second_svlan_tag:1;
-       /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
-        * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
-        */
-       u32 reserved_auto2:12;
-       /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
-        * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
-        */
-       u32 gre_protocol:16;            /* GRE Protocol (outer) */
-
-       u32 gre_key_h:24;               /* GRE Key[31:8] (outer) */
-       u32 gre_key_l:8;                /* GRE Key [7:0] (outer) */
-
-       u32 vxlan_vni:24;               /* VXLAN VNI (outer) */
-       u32 reserved_auto3:8;
-
-       u32 geneve_vni:24;              /* GENEVE VNI field (outer) */
-       u32 reserved_auto4:6;
-       u32 geneve_tlv_option_0_exist:1;
-       u32 geneve_oam:1;               /* GENEVE OAM field (outer) */
-
-       u32 reserved_auto5:12;
-       u32 outer_ipv6_flow_label:20;   /* Flow label of incoming IPv6 packet (outer) */
-
-       u32 reserved_auto6:12;
-       u32 inner_ipv6_flow_label:20;   /* Flow label of incoming IPv6 packet (inner) */
-
-       u32 reserved_auto7:10;
-       u32 geneve_opt_len:6;           /* GENEVE OptLen (outer) */
-       u32 geneve_protocol_type:16;    /* GENEVE protocol type (outer) */
-
-       u32 reserved_auto8:8;
-       u32 bth_dst_qp:24;              /* Destination QP in BTH header */
-
-       u32 reserved_auto9;
-       u32 outer_esp_spi;
-       u32 reserved_auto10[3];
-};
-
-struct mlx5dr_match_misc2 {
-       u32 outer_first_mpls_label:20;          /* First MPLS LABEL (outer) */
-       u32 outer_first_mpls_exp:3;             /* First MPLS EXP (outer) */
-       u32 outer_first_mpls_s_bos:1;           /* First MPLS S_BOS (outer) */
-       u32 outer_first_mpls_ttl:8;             /* First MPLS TTL (outer) */
-
-       u32 inner_first_mpls_label:20;          /* First MPLS LABEL (inner) */
-       u32 inner_first_mpls_exp:3;             /* First MPLS EXP (inner) */
-       u32 inner_first_mpls_s_bos:1;           /* First MPLS S_BOS (inner) */
-       u32 inner_first_mpls_ttl:8;             /* First MPLS TTL (inner) */
-
-       u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
-       u32 outer_first_mpls_over_gre_exp:3;    /* last MPLS EXP (outer) */
-       u32 outer_first_mpls_over_gre_s_bos:1;  /* last MPLS S_BOS (outer) */
-       u32 outer_first_mpls_over_gre_ttl:8;    /* last MPLS TTL (outer) */
-
-       u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
-       u32 outer_first_mpls_over_udp_exp:3;    /* last MPLS EXP (outer) */
-       u32 outer_first_mpls_over_udp_s_bos:1;  /* last MPLS S_BOS (outer) */
-       u32 outer_first_mpls_over_udp_ttl:8;    /* last MPLS TTL (outer) */
-
-       u32 metadata_reg_c_7;                   /* metadata_reg_c_7 */
-       u32 metadata_reg_c_6;                   /* metadata_reg_c_6 */
-       u32 metadata_reg_c_5;                   /* metadata_reg_c_5 */
-       u32 metadata_reg_c_4;                   /* metadata_reg_c_4 */
-       u32 metadata_reg_c_3;                   /* metadata_reg_c_3 */
-       u32 metadata_reg_c_2;                   /* metadata_reg_c_2 */
-       u32 metadata_reg_c_1;                   /* metadata_reg_c_1 */
-       u32 metadata_reg_c_0;                   /* metadata_reg_c_0 */
-       u32 metadata_reg_a;                     /* metadata_reg_a */
-       u32 reserved_auto1[3];
-};
-
-struct mlx5dr_match_misc3 {
-       u32 inner_tcp_seq_num;
-       u32 outer_tcp_seq_num;
-       u32 inner_tcp_ack_num;
-       u32 outer_tcp_ack_num;
-
-       u32 reserved_auto1:8;
-       u32 outer_vxlan_gpe_vni:24;
-
-       u32 outer_vxlan_gpe_next_protocol:8;
-       u32 outer_vxlan_gpe_flags:8;
-       u32 reserved_auto2:16;
-
-       u32 icmpv4_header_data;
-       u32 icmpv6_header_data;
-
-       u8 icmpv4_type;
-       u8 icmpv4_code;
-       u8 icmpv6_type;
-       u8 icmpv6_code;
-
-       u32 geneve_tlv_option_0_data;
-
-       u32 gtpu_teid;
-
-       u8 gtpu_msg_type;
-       u8 gtpu_msg_flags;
-       u32 reserved_auto3:16;
-
-       u32 gtpu_dw_2;
-       u32 gtpu_first_ext_dw_0;
-       u32 gtpu_dw_0;
-       u32 reserved_auto4;
-};
-
-struct mlx5dr_match_misc4 {
-       u32 prog_sample_field_value_0;
-       u32 prog_sample_field_id_0;
-       u32 prog_sample_field_value_1;
-       u32 prog_sample_field_id_1;
-       u32 prog_sample_field_value_2;
-       u32 prog_sample_field_id_2;
-       u32 prog_sample_field_value_3;
-       u32 prog_sample_field_id_3;
-       u32 reserved_auto1[8];
-};
-
-struct mlx5dr_match_misc5 {
-       u32 macsec_tag_0;
-       u32 macsec_tag_1;
-       u32 macsec_tag_2;
-       u32 macsec_tag_3;
-       u32 tunnel_header_0;
-       u32 tunnel_header_1;
-       u32 tunnel_header_2;
-       u32 tunnel_header_3;
-};
-
-struct mlx5dr_match_param {
-       struct mlx5dr_match_spec outer;
-       struct mlx5dr_match_misc misc;
-       struct mlx5dr_match_spec inner;
-       struct mlx5dr_match_misc2 misc2;
-       struct mlx5dr_match_misc3 misc3;
-       struct mlx5dr_match_misc4 misc4;
-       struct mlx5dr_match_misc5 misc5;
-};
-
-#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
-                                      (_misc3)->icmpv4_code || \
-                                      (_misc3)->icmpv4_header_data)
-
-#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
-                                     (_spec)->src_ip_95_64  || \
-                                     (_spec)->src_ip_63_32  || \
-                                     (_spec)->src_ip_31_0)
-
-#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
-                                     (_spec)->dst_ip_95_64  || \
-                                     (_spec)->dst_ip_63_32  || \
-                                     (_spec)->dst_ip_31_0)
-
-struct mlx5dr_esw_caps {
-       u64 drop_icm_address_rx;
-       u64 drop_icm_address_tx;
-       u64 uplink_icm_address_rx;
-       u64 uplink_icm_address_tx;
-       u8 sw_owner:1;
-       u8 sw_owner_v2:1;
-};
-
-struct mlx5dr_cmd_vport_cap {
-       u16 vport_gvmi;
-       u16 vhca_gvmi;
-       u16 num;
-       u64 icm_address_rx;
-       u64 icm_address_tx;
-};
-
-struct mlx5dr_roce_cap {
-       u8 roce_en:1;
-       u8 fl_rc_qp_when_roce_disabled:1;
-       u8 fl_rc_qp_when_roce_enabled:1;
-};
-
-struct mlx5dr_vports {
-       struct mlx5dr_cmd_vport_cap esw_manager_caps;
-       struct mlx5dr_cmd_vport_cap uplink_caps;
-       struct xarray vports_caps_xa;
-};
-
-struct mlx5dr_cmd_caps {
-       u16 gvmi;
-       u64 nic_rx_drop_address;
-       u64 nic_tx_drop_address;
-       u64 nic_tx_allow_address;
-       u64 esw_rx_drop_address;
-       u64 esw_tx_drop_address;
-       u32 log_icm_size;
-       u64 hdr_modify_icm_addr;
-       u32 log_modify_pattern_icm_size;
-       u64 hdr_modify_pattern_icm_addr;
-       u32 flex_protocols;
-       u8 flex_parser_id_icmp_dw0;
-       u8 flex_parser_id_icmp_dw1;
-       u8 flex_parser_id_icmpv6_dw0;
-       u8 flex_parser_id_icmpv6_dw1;
-       u8 flex_parser_id_geneve_tlv_option_0;
-       u8 flex_parser_id_mpls_over_gre;
-       u8 flex_parser_id_mpls_over_udp;
-       u8 flex_parser_id_gtpu_dw_0;
-       u8 flex_parser_id_gtpu_teid;
-       u8 flex_parser_id_gtpu_dw_2;
-       u8 flex_parser_id_gtpu_first_ext_dw_0;
-       u8 flex_parser_ok_bits_supp;
-       u8 max_ft_level;
-       u16 roce_min_src_udp;
-       u8 sw_format_ver;
-       bool eswitch_manager;
-       bool rx_sw_owner;
-       bool tx_sw_owner;
-       bool fdb_sw_owner;
-       u8 rx_sw_owner_v2:1;
-       u8 tx_sw_owner_v2:1;
-       u8 fdb_sw_owner_v2:1;
-       struct mlx5dr_esw_caps esw_caps;
-       struct mlx5dr_vports vports;
-       bool prio_tag_required;
-       struct mlx5dr_roce_cap roce_caps;
-       u16 log_header_modify_argument_granularity;
-       u16 log_header_modify_argument_max_alloc;
-       bool support_modify_argument;
-       u8 is_ecpf:1;
-       u8 isolate_vl_tc:1;
-};
-
-enum mlx5dr_domain_nic_type {
-       DR_DOMAIN_NIC_TYPE_RX,
-       DR_DOMAIN_NIC_TYPE_TX,
-};
-
-struct mlx5dr_domain_rx_tx {
-       u64 drop_icm_addr;
-       u64 default_icm_addr;
-       enum mlx5dr_domain_nic_type type;
-       struct mutex mutex; /* protect rx/tx domain */
-};
-
-struct mlx5dr_domain_info {
-       bool supp_sw_steering;
-       u32 max_inline_size;
-       u32 max_send_wr;
-       u32 max_log_sw_icm_sz;
-       u32 max_log_action_icm_sz;
-       u32 max_log_modify_hdr_pattern_icm_sz;
-       struct mlx5dr_domain_rx_tx rx;
-       struct mlx5dr_domain_rx_tx tx;
-       struct mlx5dr_cmd_caps caps;
-};
-
-struct mlx5dr_domain {
-       struct mlx5_core_dev *mdev;
-       u32 pdn;
-       struct mlx5_uars_page *uar;
-       enum mlx5dr_domain_type type;
-       refcount_t refcount;
-       struct mlx5dr_icm_pool *ste_icm_pool;
-       struct mlx5dr_icm_pool *action_icm_pool;
-       struct mlx5dr_send_info_pool *send_info_pool_rx;
-       struct mlx5dr_send_info_pool *send_info_pool_tx;
-       struct kmem_cache *chunks_kmem_cache;
-       struct kmem_cache *htbls_kmem_cache;
-       struct mlx5dr_ptrn_mgr *ptrn_mgr;
-       struct mlx5dr_arg_mgr *arg_mgr;
-       struct mlx5dr_send_ring *send_ring;
-       struct mlx5dr_domain_info info;
-       struct xarray csum_fts_xa;
-       struct mlx5dr_ste_ctx *ste_ctx;
-       struct list_head dbg_tbl_list;
-       struct mlx5dr_dbg_dump_info dump_info;
-       struct xarray definers_xa;
-       struct xarray peer_dmn_xa;
-       /* memory management statistics */
-       u32 num_buddies[DR_ICM_TYPE_MAX];
-};
-
-struct mlx5dr_table_rx_tx {
-       struct mlx5dr_ste_htbl *s_anchor;
-       struct mlx5dr_domain_rx_tx *nic_dmn;
-       u64 default_icm_addr;
-       struct list_head nic_matcher_list;
-};
-
-struct mlx5dr_table {
-       struct mlx5dr_domain *dmn;
-       struct mlx5dr_table_rx_tx rx;
-       struct mlx5dr_table_rx_tx tx;
-       u32 level;
-       u32 table_type;
-       u32 table_id;
-       u32 flags;
-       struct list_head matcher_list;
-       struct mlx5dr_action *miss_action;
-       refcount_t refcount;
-       struct list_head dbg_node;
-};
-
-struct mlx5dr_matcher_rx_tx {
-       struct mlx5dr_ste_htbl *s_htbl;
-       struct mlx5dr_ste_htbl *e_anchor;
-       struct mlx5dr_ste_build *ste_builder;
-       struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
-                                              [DR_RULE_IPV_MAX]
-                                              [DR_RULE_MAX_STES];
-       u8 num_of_builders;
-       u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
-       u64 default_icm_addr;
-       struct mlx5dr_table_rx_tx *nic_tbl;
-       u32 prio;
-       struct list_head list_node;
-       u32 rules;
-};
-
-struct mlx5dr_matcher {
-       struct mlx5dr_table *tbl;
-       struct mlx5dr_matcher_rx_tx rx;
-       struct mlx5dr_matcher_rx_tx tx;
-       struct list_head list_node; /* Used for both matchers and dbg managing */
-       u32 prio;
-       struct mlx5dr_match_param mask;
-       u8 match_criteria;
-       refcount_t refcount;
-       struct list_head dbg_rule_list;
-};
-
-struct mlx5dr_ste_action_modify_field {
-       u16 hw_field;
-       u8 start;
-       u8 end;
-       u8 l3_type;
-       u8 l4_type;
-};
-
-struct mlx5dr_ptrn_obj {
-       struct mlx5dr_icm_chunk *chunk;
-       u8 *data;
-       u16 num_of_actions;
-       u32 index;
-       refcount_t refcount;
-       struct list_head list;
-};
-
-struct mlx5dr_arg_obj {
-       u32 obj_id;
-       u32 obj_offset;
-       struct list_head list_node;
-       u32 log_chunk_size;
-};
-
-struct mlx5dr_action_rewrite {
-       struct mlx5dr_domain *dmn;
-       struct mlx5dr_icm_chunk *chunk;
-       u8 *data;
-       u16 num_of_actions;
-       u32 index;
-       u8 single_action_opt:1;
-       u8 allow_rx:1;
-       u8 allow_tx:1;
-       u8 modify_ttl:1;
-       struct mlx5dr_ptrn_obj *ptrn;
-       struct mlx5dr_arg_obj *arg;
-};
-
-struct mlx5dr_action_reformat {
-       struct mlx5dr_domain *dmn;
-       u32 id;
-       u32 size;
-       u8 param_0;
-       u8 param_1;
-};
-
-struct mlx5dr_action_sampler {
-       struct mlx5dr_domain *dmn;
-       u64 rx_icm_addr;
-       u64 tx_icm_addr;
-       u32 sampler_id;
-};
-
-struct mlx5dr_action_dest_tbl {
-       u8 is_fw_tbl:1;
-       u8 is_wire_ft:1;
-       union {
-               struct mlx5dr_table *tbl;
-               struct {
-                       struct mlx5dr_domain *dmn;
-                       u32 id;
-                       u32 group_id;
-                       enum fs_flow_table_type type;
-                       u64 rx_icm_addr;
-                       u64 tx_icm_addr;
-                       struct mlx5dr_action **ref_actions;
-                       u32 num_of_ref_actions;
-               } fw_tbl;
-       };
-};
-
-struct mlx5dr_action_range {
-       struct mlx5dr_domain *dmn;
-       struct mlx5dr_action *hit_tbl_action;
-       struct mlx5dr_action *miss_tbl_action;
-       u32 definer_id;
-       u32 min;
-       u32 max;
-};
-
-struct mlx5dr_action_ctr {
-       u32 ctr_id;
-       u32 offset;
-};
-
-struct mlx5dr_action_vport {
-       struct mlx5dr_domain *dmn;
-       struct mlx5dr_cmd_vport_cap *caps;
-};
-
-struct mlx5dr_action_push_vlan {
-       u32 vlan_hdr; /* tpid_pcp_dei_vid */
-};
-
-struct mlx5dr_action_flow_tag {
-       u32 flow_tag;
-};
-
-struct mlx5dr_rule_action_member {
-       struct mlx5dr_action *action;
-       struct list_head list;
-};
-
-struct mlx5dr_action_aso_flow_meter {
-       struct mlx5dr_domain *dmn;
-       u32 obj_id;
-       u32 offset;
-       u8 dest_reg_id;
-       u8 init_color;
-};
-
-struct mlx5dr_action {
-       enum mlx5dr_action_type action_type;
-       refcount_t refcount;
-
-       union {
-               void *data;
-               struct mlx5dr_action_rewrite *rewrite;
-               struct mlx5dr_action_reformat *reformat;
-               struct mlx5dr_action_sampler *sampler;
-               struct mlx5dr_action_dest_tbl *dest_tbl;
-               struct mlx5dr_action_ctr *ctr;
-               struct mlx5dr_action_vport *vport;
-               struct mlx5dr_action_push_vlan *push_vlan;
-               struct mlx5dr_action_flow_tag *flow_tag;
-               struct mlx5dr_action_aso_flow_meter *aso;
-               struct mlx5dr_action_range *range;
-       };
-};
-
-enum mlx5dr_connect_type {
-       CONNECT_HIT     = 1,
-       CONNECT_MISS    = 2,
-};
-
-struct mlx5dr_htbl_connect_info {
-       enum mlx5dr_connect_type type;
-       union {
-               struct mlx5dr_ste_htbl *hit_next_htbl;
-               u64 miss_icm_addr;
-       };
-};
-
-struct mlx5dr_rule_rx_tx {
-       struct mlx5dr_matcher_rx_tx *nic_matcher;
-       struct mlx5dr_ste *last_rule_ste;
-};
-
-struct mlx5dr_rule {
-       struct mlx5dr_matcher *matcher;
-       struct mlx5dr_rule_rx_tx rx;
-       struct mlx5dr_rule_rx_tx tx;
-       struct list_head rule_actions_list;
-       struct list_head dbg_node;
-       u32 flow_source;
-};
-
-void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
-                                struct mlx5dr_ste *ste,
-                                bool force);
-int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
-                                        struct mlx5dr_ste *curr_ste,
-                                        int *num_of_stes);
-
-struct mlx5dr_icm_chunk {
-       struct mlx5dr_icm_buddy_mem *buddy_mem;
-
-       /* indicates the index of this chunk in the whole memory,
-        * used for deleting the chunk from the buddy
-        */
-       unsigned int seg;
-       enum mlx5dr_icm_chunk_size size;
-
-       /* Memory optimisation */
-       struct mlx5dr_ste *ste_arr;
-       u8 *hw_ste_arr;
-       struct list_head *miss_list;
-};
-
-static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
-{
-       mutex_lock(&nic_dmn->mutex);
-}
-
-static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
-{
-       mutex_unlock(&nic_dmn->mutex);
-}
-
-static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
-{
-       mlx5dr_domain_nic_lock(&dmn->info.rx);
-       mlx5dr_domain_nic_lock(&dmn->info.tx);
-}
-
-static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
-{
-       mlx5dr_domain_nic_unlock(&dmn->info.tx);
-       mlx5dr_domain_nic_unlock(&dmn->info.rx);
-}
-
-int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
-                                 struct mlx5dr_matcher_rx_tx *nic_matcher);
-int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
-                                      struct mlx5dr_matcher_rx_tx *nic_matcher);
-
-int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
-                                  struct mlx5dr_matcher_rx_tx *nic_matcher,
-                                  enum mlx5dr_ipv outer_ipv,
-                                  enum mlx5dr_ipv inner_ipv);
-
-u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
-u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
-u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
-u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
-u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
-u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
-
-struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool);
-void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl);
-
-static inline int
-mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
-{
-       if (icm_type == DR_ICM_TYPE_STE)
-               return DR_STE_SIZE;
-
-       return DR_MODIFY_ACTION_SIZE;
-}
-
-static inline u32
-mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
-{
-       return 1 << chunk_size;
-}
-
-static inline int
-mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
-                                  enum mlx5dr_icm_type icm_type)
-{
-       int num_of_entries;
-       int entry_size;
-
-       entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
-       num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
-
-       return entry_size * num_of_entries;
-}
-
-static inline int
-mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
-{
-       int num_of_entries =
-               mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
-
-       /* Threshold is 50%, one is added to table of size 1 */
-       return (num_of_entries + 1) / 2;
-}
-
-static inline bool
-mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
-{
-       if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
-               return false;
-
-       return true;
-}
-
-struct mlx5dr_cmd_vport_cap *
-mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
-
-struct mlx5dr_cmd_query_flow_table_details {
-       u8 status;
-       u8 level;
-       u64 sw_owner_icm_root_1;
-       u64 sw_owner_icm_root_0;
-};
-
-struct mlx5dr_cmd_create_flow_table_attr {
-       u32 table_type;
-       u16 uid;
-       u64 icm_addr_rx;
-       u64 icm_addr_tx;
-       u8 level;
-       bool sw_owner;
-       bool term_tbl;
-       bool decap_en;
-       bool reformat_en;
-};
-
-/* internal API functions */
-int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
-                           struct mlx5dr_cmd_caps *caps);
-int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
-                                      bool other_vport, u16 vport_number,
-                                      u64 *icm_address_rx,
-                                      u64 *icm_address_tx);
-int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
-                         bool other_vport, u16 vport_number, u16 *gvmi);
-int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
-                             struct mlx5dr_esw_caps *caps);
-int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
-                                 u32 sampler_id,
-                                 u64 *rx_icm_addr,
-                                 u64 *tx_icm_addr);
-int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
-int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
-                                       u32 table_type,
-                                       u32 table_id,
-                                       u32 group_id,
-                                       u32 modify_header_id,
-                                       u16 vport_id);
-int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
-                                   u32 table_type,
-                                   u32 table_id);
-int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
-                                  u32 table_type,
-                                  u8 num_of_actions,
-                                  u64 *actions,
-                                  u32 *modify_header_id);
-int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
-                                    u32 modify_header_id);
-int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
-                                      u32 table_type,
-                                      u32 table_id,
-                                      u32 *group_id);
-int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
-                                 u32 table_type,
-                                 u32 table_id,
-                                 u32 group_id);
-int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
-                                struct mlx5dr_cmd_create_flow_table_attr *attr,
-                                u64 *fdb_rx_icm_addr,
-                                u32 *table_id);
-int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
-                                 u32 table_id,
-                                 u32 table_type);
-int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
-                               enum fs_flow_table_type type,
-                               u32 table_id,
-                               struct mlx5dr_cmd_query_flow_table_details *output);
-int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
-                                  enum mlx5_reformat_ctx_type rt,
-                                  u8 reformat_param_0,
-                                  u8 reformat_param_1,
-                                  size_t reformat_size,
-                                  void *reformat_data,
-                                  u32 *reformat_id);
-void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
-                                    u32 reformat_id);
-int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
-                             u16 format_id,
-                             u8 *dw_selectors,
-                             u8 *byte_selectors,
-                             u8 *match_mask,
-                             u32 *definer_id);
-void mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev,
-                               u32 definer_id);
-
-struct mlx5dr_cmd_gid_attr {
-       u8 gid[16];
-       u8 mac[6];
-       u32 roce_ver;
-};
-
-int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
-                        u16 index, struct mlx5dr_cmd_gid_attr *attr);
-
-int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
-                                       u16 log_obj_range, u32 pd,
-                                       u32 *obj_id);
-void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
-                                         u32 obj_id);
-
-int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
-                      u8 *dw_selectors, u8 *byte_selectors,
-                      u8 *match_mask, u32 *definer_id);
-void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
-
-struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
-                                              enum mlx5dr_icm_type icm_type);
-void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
-
-struct mlx5dr_icm_chunk *
-mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
-                      enum mlx5dr_icm_chunk_size chunk_size);
-void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
-
-void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
-                                    u8 *hw_ste_p, u32 ste_size);
-int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
-                                     struct mlx5dr_domain_rx_tx *nic_dmn,
-                                     struct mlx5dr_ste_htbl *htbl,
-                                     struct mlx5dr_htbl_connect_info *connect_info,
-                                     bool update_hw_ste);
-void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
-                                 u16 gvmi,
-                                 enum mlx5dr_domain_nic_type nic_type,
-                                 struct mlx5dr_ste_htbl *htbl,
-                                 u8 *formatted_ste,
-                                 struct mlx5dr_htbl_connect_info *connect_info);
-void mlx5dr_ste_copy_param(u8 match_criteria,
-                          struct mlx5dr_match_param *set_param,
-                          struct mlx5dr_match_parameters *mask,
-                          bool clear);
-
-struct mlx5dr_qp {
-       struct mlx5_core_dev *mdev;
-       struct mlx5_wq_qp wq;
-       struct mlx5_uars_page *uar;
-       struct mlx5_wq_ctrl wq_ctrl;
-       u32 qpn;
-       struct {
-               unsigned int head;
-               unsigned int pc;
-               unsigned int cc;
-               unsigned int size;
-               unsigned int *wqe_head;
-               unsigned int wqe_cnt;
-       } sq;
-       struct {
-               unsigned int pc;
-               unsigned int cc;
-               unsigned int size;
-               unsigned int wqe_cnt;
-       } rq;
-       int max_inline_data;
-};
-
-struct mlx5dr_cq {
-       struct mlx5_core_dev *mdev;
-       struct mlx5_cqwq wq;
-       struct mlx5_wq_ctrl wq_ctrl;
-       struct mlx5_core_cq mcq;
-       struct mlx5dr_qp *qp;
-};
-
-struct mlx5dr_mr {
-       struct mlx5_core_dev *mdev;
-       u32 mkey;
-       dma_addr_t dma_addr;
-       void *addr;
-       size_t size;
-};
-
-struct mlx5dr_send_ring {
-       struct mlx5dr_cq *cq;
-       struct mlx5dr_qp *qp;
-       struct mlx5dr_mr *mr;
-       /* How much wqes are waiting for completion */
-       u32 pending_wqe;
-       /* Signal request per this trash hold value */
-       u16 signal_th;
-       /* Each post_send_size less than max_post_send_size */
-       u32 max_post_send_size;
-       /* manage the send queue */
-       u32 tx_head;
-       void *buf;
-       u32 buf_size;
-       u8 *sync_buff;
-       struct mlx5dr_mr *sync_mr;
-       spinlock_t lock; /* Protect the data path of the send ring */
-       bool err_state; /* send_ring is not usable in err state */
-};
-
-int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
-void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
-                          struct mlx5dr_send_ring *send_ring);
-int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
-int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
-                            struct mlx5dr_ste *ste,
-                            u8 *data,
-                            u16 size,
-                            u16 offset);
-int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
-                             struct mlx5dr_ste_htbl *htbl,
-                             u8 *formatted_ste, u8 *mask);
-int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
-                                       struct mlx5dr_ste_htbl *htbl,
-                                       u8 *ste_init_data,
-                                       bool update_hw_ste);
-int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
-                               struct mlx5dr_action *action);
-int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
-                                struct mlx5dr_icm_chunk *chunk,
-                                u16 num_of_actions,
-                                u8 *data);
-int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
-                             u16 num_of_actions, u8 *actions_data);
-
-int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
-void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
-struct mlx5dr_ste_send_info *mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
-                                                   enum mlx5dr_domain_nic_type nic_type);
-void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info);
-
-struct mlx5dr_cmd_ft_info {
-       u32 id;
-       u16 vport;
-       enum fs_flow_table_type type;
-};
-
-struct mlx5dr_cmd_flow_destination_hw_info {
-       enum mlx5_flow_destination_type type;
-       union {
-               u32 tir_num;
-               u32 ft_num;
-               u32 ft_id;
-               u32 counter_id;
-               u32 sampler_id;
-               struct {
-                       u16 num;
-                       u16 vhca_id;
-                       u32 reformat_id;
-                       u8 flags;
-               } vport;
-       };
-};
-
-struct mlx5dr_cmd_fte_info {
-       u32 dests_size;
-       u32 index;
-       struct mlx5_flow_context flow_context;
-       u32 *val;
-       struct mlx5_flow_act action;
-       struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
-       bool ignore_flow_level;
-};
-
-int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
-                      int opmod, int modify_mask,
-                      struct mlx5dr_cmd_ft_info *ft,
-                      u32 group_id,
-                      struct mlx5dr_cmd_fte_info *fte);
-
-bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps);
-
-struct mlx5dr_fw_recalc_cs_ft {
-       u64 rx_icm_addr;
-       u32 table_id;
-       u32 group_id;
-       u32 modify_hdr_id;
-};
-
-struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
-void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
-                                   struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
-int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
-                                       u16 vport_num,
-                                       u64 *rx_icm_addr);
-int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
-                           struct mlx5dr_cmd_flow_destination_hw_info *dest,
-                           int num_dest,
-                           bool reformat_req,
-                           u32 *tbl_id,
-                           u32 *group_id,
-                           bool ignore_flow_level,
-                           u32 flow_source);
-void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
-                             u32 group_id);
-
-static inline bool mlx5dr_is_fw_table(struct mlx5_flow_table *ft)
-{
-       return !ft->fs_dr_table.dr_table;
-}
-
-static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
-{
-       return (MLX5_CAP_GEN(dev, steering_format_version) >=
-               MLX5_STEERING_FORMAT_CONNECTX_6DX) &&
-              (MLX5_CAP_GEN_64(dev, match_definer_format_supported) &
-                       (1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
-}
-
-bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn);
-struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn);
-void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr);
-struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
-                                                     u16 num_of_actions, u8 *data);
-void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
-                                  struct mlx5dr_ptrn_obj *pattern);
-struct mlx5dr_arg_mgr *mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn);
-void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr);
-struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
-                                         u16 num_of_actions,
-                                         u8 *data);
-void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
-                       struct mlx5dr_arg_obj *arg_obj);
-u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj);
-
-#endif  /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
deleted file mode 100644 (file)
index 4b349d4..0000000
+++ /dev/null
@@ -1,879 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
-/* Copyright (c) 2019 Mellanox Technologies */
-
-#include <linux/mlx5/vport.h>
-#include "mlx5_core.h"
-#include "fs_core.h"
-#include "fs_cmd.h"
-#include "mlx5dr.h"
-#include "fs_dr.h"
-#include "dr_types.h"
-
-static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
-                                     struct mlx5_flow_table *ft,
-                                     u32 underlay_qpn,
-                                     bool disconnect)
-{
-       return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
-                                                        disconnect);
-}
-
-static int set_miss_action(struct mlx5_flow_root_namespace *ns,
-                          struct mlx5_flow_table *ft,
-                          struct mlx5_flow_table *next_ft)
-{
-       struct mlx5dr_action *old_miss_action;
-       struct mlx5dr_action *action = NULL;
-       struct mlx5dr_table *next_tbl;
-       int err;
-
-       next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
-       if (next_tbl) {
-               action = mlx5dr_action_create_dest_table(next_tbl);
-               if (!action)
-                       return -EINVAL;
-       }
-       old_miss_action = ft->fs_dr_table.miss_action;
-       err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
-       if (err && action) {
-               err = mlx5dr_action_destroy(action);
-               if (err)
-                       mlx5_core_err(ns->dev,
-                                     "Failed to destroy action (%d)\n", err);
-               action = NULL;
-       }
-       ft->fs_dr_table.miss_action = action;
-       if (old_miss_action) {
-               err = mlx5dr_action_destroy(old_miss_action);
-               if (err)
-                       mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
-                                     err);
-       }
-
-       return err;
-}
-
-static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
-                                        struct mlx5_flow_table *ft,
-                                        struct mlx5_flow_table_attr *ft_attr,
-                                        struct mlx5_flow_table *next_ft)
-{
-       struct mlx5dr_table *tbl;
-       u32 flags;
-       int err;
-
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
-                                                                   ft_attr,
-                                                                   next_ft);
-       flags = ft->flags;
-       /* turn off encap/decap if not supported for sw-str by fw */
-       if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported))
-               flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
-                                     MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
-
-       tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
-                                 ft_attr->uid);
-       if (!tbl) {
-               mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
-               return -EINVAL;
-       }
-
-       ft->fs_dr_table.dr_table = tbl;
-       ft->id = mlx5dr_table_get_id(tbl);
-
-       if (next_ft) {
-               err = set_miss_action(ns, ft, next_ft);
-               if (err) {
-                       mlx5dr_table_destroy(tbl);
-                       ft->fs_dr_table.dr_table = NULL;
-                       return err;
-               }
-       }
-
-       ft->max_fte = INT_MAX;
-
-       return 0;
-}
-
-static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
-                                         struct mlx5_flow_table *ft)
-{
-       struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
-       int err;
-
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
-
-       err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
-       if (err) {
-               mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
-                             err);
-               return err;
-       }
-       if (action) {
-               err = mlx5dr_action_destroy(action);
-               if (err) {
-                       mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
-                                     err);
-                       return err;
-               }
-       }
-
-       return err;
-}
-
-static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
-                                        struct mlx5_flow_table *ft,
-                                        struct mlx5_flow_table *next_ft)
-{
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
-
-       return set_miss_action(ns, ft, next_ft);
-}
-
-static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
-                                        struct mlx5_flow_table *ft,
-                                        u32 *in,
-                                        struct mlx5_flow_group *fg)
-{
-       struct mlx5dr_matcher *matcher;
-       u32 priority = MLX5_GET(create_flow_group_in, in,
-                               start_flow_index);
-       u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
-                                           in,
-                                           match_criteria_enable);
-       struct mlx5dr_match_parameters mask;
-
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
-                                                                   fg);
-
-       mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
-                                     in, match_criteria);
-       mask.match_sz = sizeof(fg->mask.match_criteria);
-
-       matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
-                                       priority,
-                                       match_criteria_enable,
-                                       &mask);
-       if (!matcher) {
-               mlx5_core_err(ns->dev, "Failed creating matcher\n");
-               return -EINVAL;
-       }
-
-       fg->fs_dr_matcher.dr_matcher = matcher;
-       return 0;
-}
-
-static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
-                                         struct mlx5_flow_table *ft,
-                                         struct mlx5_flow_group *fg)
-{
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
-
-       return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
-}
-
-static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
-                                                struct mlx5_flow_rule *dst)
-{
-       struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
-
-       return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
-                                              dest_attr->vport.flags &
-                                              MLX5_FLOW_DEST_VPORT_VHCA_ID,
-                                              dest_attr->vport.vhca_id);
-}
-
-static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
-                                                 struct mlx5_flow_rule *dst)
-{
-       struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
-
-       return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
-                                              dest_attr->vport.vhca_id);
-}
-
-static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
-                                             struct mlx5_flow_rule *dst)
-{
-       struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
-       struct mlx5dr_action *tbl_action;
-
-       if (mlx5dr_is_fw_table(dest_ft))
-               return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
-
-       tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
-       if (tbl_action)
-               tbl_action->dest_tbl->is_wire_ft =
-                       dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
-
-       return tbl_action;
-}
-
-static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
-                                                struct mlx5_flow_rule *dst)
-{
-       return mlx5dr_action_create_dest_match_range(domain,
-                                                    dst->dest_attr.range.field,
-                                                    dst->dest_attr.range.hit_ft,
-                                                    dst->dest_attr.range.miss_ft,
-                                                    dst->dest_attr.range.min,
-                                                    dst->dest_attr.range.max);
-}
-
-static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
-                                                    struct mlx5_fs_vlan *vlan)
-{
-       u16 n_ethtype = vlan->ethtype;
-       u8  prio = vlan->prio;
-       u16 vid = vlan->vid;
-       u32 vlan_hdr;
-
-       vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 |  (u32)vid;
-       return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
-}
-
-static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
-{
-       return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
-               dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
-               dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
-}
-
-/* We want to support a rule with 32 destinations, which means we need to
- * account for 32 destinations plus usually a counter plus one more action
- * for a multi-destination flow table.
- */
-#define MLX5_FLOW_CONTEXT_ACTION_MAX  34
-static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
-                                 struct mlx5_flow_table *ft,
-                                 struct mlx5_flow_group *group,
-                                 struct fs_fte *fte)
-{
-       struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
-       struct mlx5dr_action_dest *term_actions;
-       struct mlx5_pkt_reformat *pkt_reformat;
-       struct mlx5dr_match_parameters params;
-       struct mlx5_core_dev *dev = ns->dev;
-       struct mlx5dr_action **fs_dr_actions;
-       struct mlx5dr_action *tmp_action;
-       struct mlx5dr_action **actions;
-       bool delay_encap_set = false;
-       struct mlx5dr_rule *rule;
-       struct mlx5_flow_rule *dst;
-       int fs_dr_num_actions = 0;
-       int num_term_actions = 0;
-       int num_actions = 0;
-       size_t match_sz;
-       int err = 0;
-       int i;
-
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
-
-       actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
-                         GFP_KERNEL);
-       if (!actions) {
-               err = -ENOMEM;
-               goto out_err;
-       }
-
-       fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
-                               sizeof(*fs_dr_actions), GFP_KERNEL);
-       if (!fs_dr_actions) {
-               err = -ENOMEM;
-               goto free_actions_alloc;
-       }
-
-       term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
-                              sizeof(*term_actions), GFP_KERNEL);
-       if (!term_actions) {
-               err = -ENOMEM;
-               goto free_fs_dr_actions_alloc;
-       }
-
-       match_sz = sizeof(fte->val);
-
-       /* Drop reformat action bit if destination vport set with reformat */
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-               list_for_each_entry(dst, &fte->node.children, node.list) {
-                       if (!contain_vport_reformat_action(dst))
-                               continue;
-
-                       fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
-                       break;
-               }
-       }
-
-       /* The order of the actions are must to be keep, only the following
-        * order is supported by SW steering:
-        * TX: modify header -> push vlan -> encap
-        * RX: decap -> pop vlan -> modify header
-        */
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
-               enum mlx5dr_action_reformat_type decap_type =
-                       DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
-
-               tmp_action = mlx5dr_action_create_packet_reformat(domain,
-                                                                 decap_type,
-                                                                 0, 0, 0,
-                                                                 NULL);
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
-               bool is_decap;
-
-               pkt_reformat = fte->act_dests.action.pkt_reformat;
-               if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
-                       err = -EINVAL;
-                       mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
-                       goto free_actions;
-               }
-
-               is_decap = pkt_reformat->reformat_type ==
-                          MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
-
-               if (is_decap)
-                       actions[num_actions++] =
-                               pkt_reformat->fs_dr_action.dr_action;
-               else
-                       delay_encap_set = true;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
-               tmp_action =
-                       mlx5dr_action_create_pop_vlan();
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
-               tmp_action =
-                       mlx5dr_action_create_pop_vlan();
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
-               struct mlx5_modify_hdr *modify_hdr = fte->act_dests.action.modify_hdr;
-
-               actions[num_actions++] = modify_hdr->fs_dr_action.dr_action;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
-               tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
-               tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       if (delay_encap_set)
-               actions[num_actions++] = pkt_reformat->fs_dr_action.dr_action;
-
-       /* The order of the actions below is not important */
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
-               tmp_action = mlx5dr_action_create_drop();
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               term_actions[num_term_actions++].dest = tmp_action;
-       }
-
-       if (fte->act_dests.flow_context.flow_tag) {
-               tmp_action =
-                       mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-               list_for_each_entry(dst, &fte->node.children, node.list) {
-                       enum mlx5_flow_destination_type type = dst->dest_attr.type;
-                       u32 id;
-
-                       if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
-                           num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                               err = -EOPNOTSUPP;
-                               goto free_actions;
-                       }
-
-                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-                               continue;
-
-                       switch (type) {
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
-                               tmp_action = create_ft_action(domain, dst);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               term_actions[num_term_actions++].dest = tmp_action;
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
-                       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
-                               tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
-                                            create_vport_action(domain, dst) :
-                                            create_uplink_action(domain, dst);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               term_actions[num_term_actions].dest = tmp_action;
-
-                               if (dst->dest_attr.vport.flags &
-                                   MLX5_FLOW_DEST_VPORT_REFORMAT_ID) {
-                                       pkt_reformat = dst->dest_attr.vport.pkt_reformat;
-                                       term_actions[num_term_actions].reformat =
-                                               pkt_reformat->fs_dr_action.dr_action;
-                               }
-
-                               num_term_actions++;
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
-                               id = dst->dest_attr.ft_num;
-                               tmp_action = mlx5dr_action_create_dest_table_num(domain,
-                                                                                id);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               term_actions[num_term_actions++].dest = tmp_action;
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
-                               id = dst->dest_attr.sampler_id;
-                               tmp_action = mlx5dr_action_create_flow_sampler(domain,
-                                                                              id);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               term_actions[num_term_actions++].dest = tmp_action;
-                               break;
-                       case MLX5_FLOW_DESTINATION_TYPE_RANGE:
-                               tmp_action = create_range_action(domain, dst);
-                               if (!tmp_action) {
-                                       err = -ENOMEM;
-                                       goto free_actions;
-                               }
-                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                               term_actions[num_term_actions++].dest = tmp_action;
-                               break;
-                       default:
-                               err = -EOPNOTSUPP;
-                               goto free_actions;
-                       }
-               }
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
-               list_for_each_entry(dst, &fte->node.children, node.list) {
-                       u32 id;
-
-                       if (dst->dest_attr.type !=
-                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
-                               continue;
-
-                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
-                           fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                               err = -EOPNOTSUPP;
-                               goto free_actions;
-                       }
-
-                       id = dst->dest_attr.counter_id;
-                       tmp_action =
-                               mlx5dr_action_create_flow_counter(id);
-                       if (!tmp_action) {
-                               err = -ENOMEM;
-                               goto free_actions;
-                       }
-
-                       fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-                       actions[num_actions++] = tmp_action;
-               }
-       }
-
-       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
-               struct mlx5_flow_act *action = &fte->act_dests.action;
-
-               if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
-                       err = -EOPNOTSUPP;
-                       goto free_actions;
-               }
-
-               tmp_action =
-                       mlx5dr_action_create_aso(domain,
-                                                action->exe_aso.object_id,
-                                                action->exe_aso.return_reg_id,
-                                                action->exe_aso.type,
-                                                action->exe_aso.flow_meter.init_color,
-                                                action->exe_aso.flow_meter.meter_idx);
-               if (!tmp_action) {
-                       err = -ENOMEM;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       params.match_sz = match_sz;
-       params.match_buf = (u64 *)fte->val;
-       if (num_term_actions == 1) {
-               if (term_actions->reformat) {
-                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                               err = -EOPNOTSUPP;
-                               goto free_actions;
-                       }
-                       actions[num_actions++] = term_actions->reformat;
-               }
-
-               if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                       err = -EOPNOTSUPP;
-                       goto free_actions;
-               }
-               actions[num_actions++] = term_actions->dest;
-       } else if (num_term_actions > 1) {
-               bool ignore_flow_level =
-                       !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
-               u32 flow_source = fte->act_dests.flow_context.flow_source;
-
-               if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
-                   fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
-                       err = -EOPNOTSUPP;
-                       goto free_actions;
-               }
-               tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
-                                                               term_actions,
-                                                               num_term_actions,
-                                                               ignore_flow_level,
-                                                               flow_source);
-               if (!tmp_action) {
-                       err = -EOPNOTSUPP;
-                       goto free_actions;
-               }
-               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
-               actions[num_actions++] = tmp_action;
-       }
-
-       rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
-                                 &params,
-                                 num_actions,
-                                 actions,
-                                 fte->act_dests.flow_context.flow_source);
-       if (!rule) {
-               err = -EINVAL;
-               goto free_actions;
-       }
-
-       kfree(term_actions);
-       kfree(actions);
-
-       fte->fs_dr_rule.dr_rule = rule;
-       fte->fs_dr_rule.num_actions = fs_dr_num_actions;
-       fte->fs_dr_rule.dr_actions = fs_dr_actions;
-
-       return 0;
-
-free_actions:
-       /* Free in reverse order to handle action dependencies */
-       for (i = fs_dr_num_actions - 1; i >= 0; i--)
-               if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
-                       mlx5dr_action_destroy(fs_dr_actions[i]);
-
-       kfree(term_actions);
-free_fs_dr_actions_alloc:
-       kfree(fs_dr_actions);
-free_actions_alloc:
-       kfree(actions);
-out_err:
-       mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
-       return err;
-}
-
-static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
-                                            struct mlx5_pkt_reformat_params *params,
-                                            enum mlx5_flow_namespace_type namespace,
-                                            struct mlx5_pkt_reformat *pkt_reformat)
-{
-       struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
-       struct mlx5dr_action *action;
-       int dr_reformat;
-
-       switch (params->type) {
-       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
-       case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
-       case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
-               dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
-               break;
-       case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
-               dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
-               break;
-       case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
-               dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
-               break;
-       case MLX5_REFORMAT_TYPE_INSERT_HDR:
-               dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
-               break;
-       case MLX5_REFORMAT_TYPE_REMOVE_HDR:
-               dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR;
-               break;
-       default:
-               mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
-                             params->type);
-               return -EOPNOTSUPP;
-       }
-
-       action = mlx5dr_action_create_packet_reformat(dr_domain,
-                                                     dr_reformat,
-                                                     params->param_0,
-                                                     params->param_1,
-                                                     params->size,
-                                                     params->data);
-       if (!action) {
-               mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
-               return -EINVAL;
-       }
-
-       pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
-       pkt_reformat->fs_dr_action.dr_action = action;
-
-       return 0;
-}
-
-static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
-                                               struct mlx5_pkt_reformat *pkt_reformat)
-{
-       mlx5dr_action_destroy(pkt_reformat->fs_dr_action.dr_action);
-}
-
-static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
-                                          u8 namespace, u8 num_actions,
-                                          void *modify_actions,
-                                          struct mlx5_modify_hdr *modify_hdr)
-{
-       struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
-       struct mlx5dr_action *action;
-       size_t actions_sz;
-
-       actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
-               num_actions;
-       action = mlx5dr_action_create_modify_header(dr_domain, 0,
-                                                   actions_sz,
-                                                   modify_actions);
-       if (!action) {
-               mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
-               return -EINVAL;
-       }
-
-       modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
-       modify_hdr->fs_dr_action.dr_action = action;
-
-       return 0;
-}
-
-static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
-                                             struct mlx5_modify_hdr *modify_hdr)
-{
-       mlx5dr_action_destroy(modify_hdr->fs_dr_action.dr_action);
-}
-
-static int
-mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
-                                 int definer_id)
-{
-       return -EOPNOTSUPP;
-}
-
-static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
-                                           u16 format_id, u32 *match_mask)
-{
-       return -EOPNOTSUPP;
-}
-
-static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
-                                 struct mlx5_flow_table *ft,
-                                 struct fs_fte *fte)
-{
-       struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
-       int err;
-       int i;
-
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
-
-       err = mlx5dr_rule_destroy(rule->dr_rule);
-       if (err)
-               return err;
-
-       /* Free in reverse order to handle action dependencies */
-       for (i = rule->num_actions - 1; i >= 0; i--)
-               if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
-                       mlx5dr_action_destroy(rule->dr_actions[i]);
-
-       kfree(rule->dr_actions);
-       return 0;
-}
-
-static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
-                                 struct mlx5_flow_table *ft,
-                                 struct mlx5_flow_group *group,
-                                 int modify_mask,
-                                 struct fs_fte *fte)
-{
-       struct fs_fte fte_tmp = {};
-       int ret;
-
-       if (mlx5_fs_cmd_is_fw_term_table(ft))
-               return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
-
-       /* Backup current dr rule details */
-       fte_tmp.fs_dr_rule = fte->fs_dr_rule;
-       memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule));
-
-       /* First add the new updated rule, then delete the old rule */
-       ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte);
-       if (ret)
-               goto restore_fte;
-
-       ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp);
-       WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n");
-       return ret;
-
-restore_fte:
-       fte->fs_dr_rule = fte_tmp.fs_dr_rule;
-       return ret;
-}
-
-static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
-                               struct mlx5_flow_root_namespace *peer_ns,
-                               u16 peer_vhca_id)
-{
-       struct mlx5dr_domain *peer_domain = NULL;
-
-       if (peer_ns)
-               peer_domain = peer_ns->fs_dr_domain.dr_domain;
-       mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
-                              peer_domain, peer_vhca_id);
-       return 0;
-}
-
-static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
-{
-       ns->fs_dr_domain.dr_domain =
-               mlx5dr_domain_create(ns->dev,
-                                    MLX5DR_DOMAIN_TYPE_FDB);
-       if (!ns->fs_dr_domain.dr_domain) {
-               mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
-               return -EOPNOTSUPP;
-       }
-       return 0;
-}
-
-static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
-{
-       return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
-}
-
-static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
-                                       enum fs_flow_table_type ft_type)
-{
-       u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
-
-       if (ft_type != FS_FT_FDB ||
-           MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
-               return steering_caps;
-
-       steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
-       steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
-
-       if (mlx5dr_supp_match_ranges(ns->dev))
-               steering_caps |= MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
-
-       return steering_caps;
-}
-
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
-{
-       switch (pkt_reformat->reformat_type) {
-       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
-       case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
-       case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
-       case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
-       case MLX5_REFORMAT_TYPE_INSERT_HDR:
-               return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
-       }
-       return -EOPNOTSUPP;
-}
-
-bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
-{
-       return mlx5dr_is_supported(dev);
-}
-
-static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
-       .create_flow_table = mlx5_cmd_dr_create_flow_table,
-       .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
-       .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
-       .create_flow_group = mlx5_cmd_dr_create_flow_group,
-       .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
-       .create_fte = mlx5_cmd_dr_create_fte,
-       .update_fte = mlx5_cmd_dr_update_fte,
-       .delete_fte = mlx5_cmd_dr_delete_fte,
-       .update_root_ft = mlx5_cmd_dr_update_root_ft,
-       .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
-       .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
-       .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
-       .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
-       .create_match_definer = mlx5_cmd_dr_create_match_definer,
-       .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
-       .set_peer = mlx5_cmd_dr_set_peer,
-       .create_ns = mlx5_cmd_dr_create_ns,
-       .destroy_ns = mlx5_cmd_dr_destroy_ns,
-       .get_capabilities = mlx5_cmd_dr_get_capabilities,
-};
-
-const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
-{
-               return &mlx5_flow_cmds_dr;
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
deleted file mode 100644 (file)
index 99a3b2e..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
- * Copyright (c) 2019 Mellanox Technologies
- */
-
-#ifndef _MLX5_FS_DR_
-#define _MLX5_FS_DR_
-
-#include "mlx5dr.h"
-
-struct mlx5_flow_root_namespace;
-struct fs_fte;
-
-struct mlx5_fs_dr_action {
-       struct mlx5dr_action *dr_action;
-};
-
-struct mlx5_fs_dr_rule {
-       struct mlx5dr_rule    *dr_rule;
-       /* Only actions created by fs_dr */
-       struct mlx5dr_action  **dr_actions;
-       int                      num_actions;
-};
-
-struct mlx5_fs_dr_domain {
-       struct mlx5dr_domain    *dr_domain;
-};
-
-struct mlx5_fs_dr_matcher {
-       struct mlx5dr_matcher *dr_matcher;
-};
-
-struct mlx5_fs_dr_table {
-       struct mlx5dr_table  *dr_table;
-       struct mlx5dr_action *miss_action;
-};
-
-#ifdef CONFIG_MLX5_SW_STEERING
-
-bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
-
-int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
-
-const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
-
-#else
-
-static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
-{
-       return NULL;
-}
-
-static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
-{
-       return 0;
-}
-
-static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
-{
-       return false;
-}
-
-#endif /* CONFIG_MLX5_SW_STEERING */
-#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h
deleted file mode 100644 (file)
index fb078fa..0000000
+++ /dev/null
@@ -1,603 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019, Mellanox Technologies */
-
-#ifndef MLX5_IFC_DR_H
-#define MLX5_IFC_DR_H
-
-enum {
-       MLX5DR_STE_LU_TYPE_DONT_CARE                    = 0x0f,
-};
-
-struct mlx5_ifc_ste_general_bits {
-       u8         entry_type[0x4];
-       u8         reserved_at_4[0x4];
-       u8         entry_sub_type[0x8];
-       u8         byte_mask[0x10];
-
-       u8         next_table_base_63_48[0x10];
-       u8         next_lu_type[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         linear_hash_enable[0x1];
-       u8         reserved_at_5c[0x2];
-       u8         next_table_rank[0x2];
-
-       u8         reserved_at_60[0xa0];
-       u8         tag_value[0x60];
-       u8         bit_mask[0x60];
-};
-
-struct mlx5_ifc_ste_sx_transmit_bits {
-       u8         entry_type[0x4];
-       u8         reserved_at_4[0x4];
-       u8         entry_sub_type[0x8];
-       u8         byte_mask[0x10];
-
-       u8         next_table_base_63_48[0x10];
-       u8         next_lu_type[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         linear_hash_enable[0x1];
-       u8         reserved_at_5c[0x2];
-       u8         next_table_rank[0x2];
-
-       u8         sx_wire[0x1];
-       u8         sx_func_lb[0x1];
-       u8         sx_sniffer[0x1];
-       u8         sx_wire_enable[0x1];
-       u8         sx_func_lb_enable[0x1];
-       u8         sx_sniffer_enable[0x1];
-       u8         action_type[0x3];
-       u8         reserved_at_69[0x1];
-       u8         action_description[0x6];
-       u8         gvmi[0x10];
-
-       u8         encap_pointer_vlan_data[0x20];
-
-       u8         loopback_syndome_en[0x8];
-       u8         loopback_syndome[0x8];
-       u8         counter_trigger[0x10];
-
-       u8         miss_address_63_48[0x10];
-       u8         counter_trigger_23_16[0x8];
-       u8         miss_address_39_32[0x8];
-
-       u8         miss_address_31_6[0x1a];
-       u8         learning_point[0x1];
-       u8         go_back[0x1];
-       u8         match_polarity[0x1];
-       u8         mask_mode[0x1];
-       u8         miss_rank[0x2];
-};
-
-struct mlx5_ifc_ste_rx_steering_mult_bits {
-       u8         entry_type[0x4];
-       u8         reserved_at_4[0x4];
-       u8         entry_sub_type[0x8];
-       u8         byte_mask[0x10];
-
-       u8         next_table_base_63_48[0x10];
-       u8         next_lu_type[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         linear_hash_enable[0x1];
-       u8         reserved_at_[0x2];
-       u8         next_table_rank[0x2];
-
-       u8         member_count[0x10];
-       u8         gvmi[0x10];
-
-       u8         qp_list_pointer[0x20];
-
-       u8         reserved_at_a0[0x1];
-       u8         tunneling_action[0x3];
-       u8         action_description[0x4];
-       u8         reserved_at_a8[0x8];
-       u8         counter_trigger_15_0[0x10];
-
-       u8         miss_address_63_48[0x10];
-       u8         counter_trigger_23_16[0x08];
-       u8         miss_address_39_32[0x8];
-
-       u8         miss_address_31_6[0x1a];
-       u8         learning_point[0x1];
-       u8         fail_on_error[0x1];
-       u8         match_polarity[0x1];
-       u8         mask_mode[0x1];
-       u8         miss_rank[0x2];
-};
-
-struct mlx5_ifc_ste_modify_packet_bits {
-       u8         entry_type[0x4];
-       u8         reserved_at_4[0x4];
-       u8         entry_sub_type[0x8];
-       u8         byte_mask[0x10];
-
-       u8         next_table_base_63_48[0x10];
-       u8         next_lu_type[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         linear_hash_enable[0x1];
-       u8         reserved_at_[0x2];
-       u8         next_table_rank[0x2];
-
-       u8         number_of_re_write_actions[0x10];
-       u8         gvmi[0x10];
-
-       u8         header_re_write_actions_pointer[0x20];
-
-       u8         reserved_at_a0[0x1];
-       u8         tunneling_action[0x3];
-       u8         action_description[0x4];
-       u8         reserved_at_a8[0x8];
-       u8         counter_trigger_15_0[0x10];
-
-       u8         miss_address_63_48[0x10];
-       u8         counter_trigger_23_16[0x08];
-       u8         miss_address_39_32[0x8];
-
-       u8         miss_address_31_6[0x1a];
-       u8         learning_point[0x1];
-       u8         fail_on_error[0x1];
-       u8         match_polarity[0x1];
-       u8         mask_mode[0x1];
-       u8         miss_rank[0x2];
-};
-
-struct mlx5_ifc_ste_eth_l2_src_bits {
-       u8         smac_47_16[0x20];
-
-       u8         smac_15_0[0x10];
-       u8         l3_ethertype[0x10];
-
-       u8         qp_type[0x2];
-       u8         ethertype_filter[0x1];
-       u8         reserved_at_43[0x1];
-       u8         sx_sniffer[0x1];
-       u8         force_lb[0x1];
-       u8         functional_lb[0x1];
-       u8         port[0x1];
-       u8         reserved_at_48[0x4];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_qualifier[0x2];
-       u8         reserved_at_52[0x2];
-       u8         first_vlan_id[0xc];
-
-       u8         ip_fragmented[0x1];
-       u8         tcp_syn[0x1];
-       u8         encp_type[0x2];
-       u8         l3_type[0x2];
-       u8         l4_type[0x2];
-       u8         reserved_at_68[0x4];
-       u8         second_priority[0x3];
-       u8         second_cfi[0x1];
-       u8         second_vlan_qualifier[0x2];
-       u8         reserved_at_72[0x2];
-       u8         second_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l2_dst_bits {
-       u8         dmac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         l3_ethertype[0x10];
-
-       u8         qp_type[0x2];
-       u8         ethertype_filter[0x1];
-       u8         reserved_at_43[0x1];
-       u8         sx_sniffer[0x1];
-       u8         force_lb[0x1];
-       u8         functional_lb[0x1];
-       u8         port[0x1];
-       u8         reserved_at_48[0x4];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_qualifier[0x2];
-       u8         reserved_at_52[0x2];
-       u8         first_vlan_id[0xc];
-
-       u8         ip_fragmented[0x1];
-       u8         tcp_syn[0x1];
-       u8         encp_type[0x2];
-       u8         l3_type[0x2];
-       u8         l4_type[0x2];
-       u8         reserved_at_68[0x4];
-       u8         second_priority[0x3];
-       u8         second_cfi[0x1];
-       u8         second_vlan_qualifier[0x2];
-       u8         reserved_at_72[0x2];
-       u8         second_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l2_src_dst_bits {
-       u8         dmac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         smac_47_32[0x10];
-
-       u8         smac_31_0[0x20];
-
-       u8         sx_sniffer[0x1];
-       u8         force_lb[0x1];
-       u8         functional_lb[0x1];
-       u8         port[0x1];
-       u8         l3_type[0x2];
-       u8         reserved_at_66[0x6];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_qualifier[0x2];
-       u8         reserved_at_72[0x2];
-       u8         first_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_bits {
-       u8         destination_address[0x20];
-
-       u8         source_address[0x20];
-
-       u8         source_port[0x10];
-       u8         destination_port[0x10];
-
-       u8         fragmented[0x1];
-       u8         first_fragment[0x1];
-       u8         reserved_at_62[0x2];
-       u8         reserved_at_64[0x1];
-       u8         ecn[0x2];
-       u8         tcp_ns[0x1];
-       u8         tcp_cwr[0x1];
-       u8         tcp_ece[0x1];
-       u8         tcp_urg[0x1];
-       u8         tcp_ack[0x1];
-       u8         tcp_psh[0x1];
-       u8         tcp_rst[0x1];
-       u8         tcp_syn[0x1];
-       u8         tcp_fin[0x1];
-       u8         dscp[0x6];
-       u8         reserved_at_76[0x2];
-       u8         protocol[0x8];
-};
-
-struct mlx5_ifc_ste_eth_l3_ipv6_dst_bits {
-       u8         dst_ip_127_96[0x20];
-
-       u8         dst_ip_95_64[0x20];
-
-       u8         dst_ip_63_32[0x20];
-
-       u8         dst_ip_31_0[0x20];
-};
-
-struct mlx5_ifc_ste_eth_l2_tnl_bits {
-       u8         dmac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         l3_ethertype[0x10];
-
-       u8         l2_tunneling_network_id[0x20];
-
-       u8         ip_fragmented[0x1];
-       u8         tcp_syn[0x1];
-       u8         encp_type[0x2];
-       u8         l3_type[0x2];
-       u8         l4_type[0x2];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         reserved_at_6c[0x3];
-       u8         gre_key_flag[0x1];
-       u8         first_vlan_qualifier[0x2];
-       u8         reserved_at_72[0x2];
-       u8         first_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l3_ipv6_src_bits {
-       u8         src_ip_127_96[0x20];
-
-       u8         src_ip_95_64[0x20];
-
-       u8         src_ip_63_32[0x20];
-
-       u8         src_ip_31_0[0x20];
-};
-
-struct mlx5_ifc_ste_eth_l3_ipv4_misc_bits {
-       u8         version[0x4];
-       u8         ihl[0x4];
-       u8         reserved_at_8[0x8];
-       u8         total_length[0x10];
-
-       u8         identification[0x10];
-       u8         flags[0x3];
-       u8         fragment_offset[0xd];
-
-       u8         time_to_live[0x8];
-       u8         reserved_at_48[0x8];
-       u8         checksum[0x10];
-
-       u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_ste_eth_l4_bits {
-       u8         fragmented[0x1];
-       u8         first_fragment[0x1];
-       u8         reserved_at_2[0x6];
-       u8         protocol[0x8];
-       u8         dst_port[0x10];
-
-       u8         ipv6_version[0x4];
-       u8         reserved_at_24[0x1];
-       u8         ecn[0x2];
-       u8         tcp_ns[0x1];
-       u8         tcp_cwr[0x1];
-       u8         tcp_ece[0x1];
-       u8         tcp_urg[0x1];
-       u8         tcp_ack[0x1];
-       u8         tcp_psh[0x1];
-       u8         tcp_rst[0x1];
-       u8         tcp_syn[0x1];
-       u8         tcp_fin[0x1];
-       u8         src_port[0x10];
-
-       u8         ipv6_payload_length[0x10];
-       u8         ipv6_hop_limit[0x8];
-       u8         dscp[0x6];
-       u8         reserved_at_5e[0x2];
-
-       u8         tcp_data_offset[0x4];
-       u8         reserved_at_64[0x8];
-       u8         flow_label[0x14];
-};
-
-struct mlx5_ifc_ste_eth_l4_misc_bits {
-       u8         checksum[0x10];
-       u8         length[0x10];
-
-       u8         seq_num[0x20];
-
-       u8         ack_num[0x20];
-
-       u8         urgent_pointer[0x10];
-       u8         window_size[0x10];
-};
-
-struct mlx5_ifc_ste_mpls_bits {
-       u8         mpls0_label[0x14];
-       u8         mpls0_exp[0x3];
-       u8         mpls0_s_bos[0x1];
-       u8         mpls0_ttl[0x8];
-
-       u8         mpls1_label[0x20];
-
-       u8         mpls2_label[0x20];
-
-       u8         reserved_at_60[0x16];
-       u8         mpls4_s_bit[0x1];
-       u8         mpls4_qualifier[0x1];
-       u8         mpls3_s_bit[0x1];
-       u8         mpls3_qualifier[0x1];
-       u8         mpls2_s_bit[0x1];
-       u8         mpls2_qualifier[0x1];
-       u8         mpls1_s_bit[0x1];
-       u8         mpls1_qualifier[0x1];
-       u8         mpls0_s_bit[0x1];
-       u8         mpls0_qualifier[0x1];
-};
-
-struct mlx5_ifc_ste_register_0_bits {
-       u8         register_0_h[0x20];
-
-       u8         register_0_l[0x20];
-
-       u8         register_1_h[0x20];
-
-       u8         register_1_l[0x20];
-};
-
-struct mlx5_ifc_ste_register_1_bits {
-       u8         register_2_h[0x20];
-
-       u8         register_2_l[0x20];
-
-       u8         register_3_h[0x20];
-
-       u8         register_3_l[0x20];
-};
-
-struct mlx5_ifc_ste_gre_bits {
-       u8         gre_c_present[0x1];
-       u8         reserved_at_30[0x1];
-       u8         gre_k_present[0x1];
-       u8         gre_s_present[0x1];
-       u8         strict_src_route[0x1];
-       u8         recur[0x3];
-       u8         flags[0x5];
-       u8         version[0x3];
-       u8         gre_protocol[0x10];
-
-       u8         checksum[0x10];
-       u8         offset[0x10];
-
-       u8         gre_key_h[0x18];
-       u8         gre_key_l[0x8];
-
-       u8         seq_num[0x20];
-};
-
-struct mlx5_ifc_ste_flex_parser_0_bits {
-       u8         flex_parser_3[0x20];
-
-       u8         flex_parser_2[0x20];
-
-       u8         flex_parser_1[0x20];
-
-       u8         flex_parser_0[0x20];
-};
-
-struct mlx5_ifc_ste_flex_parser_1_bits {
-       u8         flex_parser_7[0x20];
-
-       u8         flex_parser_6[0x20];
-
-       u8         flex_parser_5[0x20];
-
-       u8         flex_parser_4[0x20];
-};
-
-struct mlx5_ifc_ste_flex_parser_ok_bits {
-       u8         flex_parser_3[0x20];
-       u8         flex_parser_2[0x20];
-       u8         flex_parsers_ok[0x8];
-       u8         reserved_at_48[0x18];
-       u8         flex_parser_0[0x20];
-};
-
-struct mlx5_ifc_ste_flex_parser_tnl_bits {
-       u8         flex_parser_tunneling_header_63_32[0x20];
-
-       u8         flex_parser_tunneling_header_31_0[0x20];
-
-       u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits {
-       u8         outer_vxlan_gpe_flags[0x8];
-       u8         reserved_at_8[0x10];
-       u8         outer_vxlan_gpe_next_protocol[0x8];
-
-       u8         outer_vxlan_gpe_vni[0x18];
-       u8         reserved_at_38[0x8];
-
-       u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
-       u8         reserved_at_0[0x2];
-       u8         geneve_opt_len[0x6];
-       u8         geneve_oam[0x1];
-       u8         reserved_at_9[0x7];
-       u8         geneve_protocol_type[0x10];
-
-       u8         geneve_vni[0x18];
-       u8         reserved_at_38[0x8];
-
-       u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
-       u8         reserved_at_0[0x5];
-       u8         gtpu_msg_flags[0x3];
-       u8         gtpu_msg_type[0x8];
-       u8         reserved_at_10[0x10];
-
-       u8         gtpu_teid[0x20];
-
-       u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_ste_tunnel_header_bits {
-       u8         tunnel_header_0[0x20];
-
-       u8         tunnel_header_1[0x20];
-
-       u8         reserved_at_40[0x40];
-};
-
-struct mlx5_ifc_ste_general_purpose_bits {
-       u8         general_purpose_lookup_field[0x20];
-
-       u8         reserved_at_20[0x20];
-
-       u8         reserved_at_40[0x20];
-
-       u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_ste_src_gvmi_qp_bits {
-       u8         loopback_syndrome[0x8];
-       u8         reserved_at_8[0x8];
-       u8         source_gvmi[0x10];
-
-       u8         reserved_at_20[0x5];
-       u8         force_lb[0x1];
-       u8         functional_lb[0x1];
-       u8         source_is_requestor[0x1];
-       u8         source_qp[0x18];
-
-       u8         reserved_at_40[0x20];
-
-       u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_l2_hdr_bits {
-       u8         dmac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         smac_47_32[0x10];
-
-       u8         smac_31_0[0x20];
-
-       u8         ethertype[0x10];
-       u8         vlan_type[0x10];
-
-       u8         vlan[0x10];
-       u8         reserved_at_90[0x10];
-};
-
-/* Both HW set and HW add share the same HW format with different opcodes */
-struct mlx5_ifc_dr_action_hw_set_bits {
-       u8         opcode[0x8];
-       u8         destination_field_code[0x8];
-       u8         reserved_at_10[0x2];
-       u8         destination_left_shifter[0x6];
-       u8         reserved_at_18[0x3];
-       u8         destination_length[0x5];
-
-       u8         inline_data[0x20];
-};
-
-struct mlx5_ifc_dr_action_hw_copy_bits {
-       u8         opcode[0x8];
-       u8         destination_field_code[0x8];
-       u8         reserved_at_10[0x2];
-       u8         destination_left_shifter[0x6];
-       u8         reserved_at_18[0x2];
-       u8         destination_length[0x6];
-
-       u8         reserved_at_20[0x8];
-       u8         source_field_code[0x8];
-       u8         reserved_at_30[0x2];
-       u8         source_left_shifter[0x6];
-       u8         reserved_at_38[0x8];
-};
-
-enum {
-       MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
-};
-
-struct mlx5_ifc_ste_aso_flow_meter_action_bits {
-       u8         reserved_at_0[0xc];
-       u8         action[0x1];
-       u8         initial_color[0x2];
-       u8         line_id[0x1];
-};
-
-struct mlx5_ifc_ste_double_action_aso_v1_bits {
-       u8         action_id[0x8];
-       u8         aso_context_number[0x18];
-
-       u8         dest_reg_id[0x2];
-       u8         change_ordering_tag[0x1];
-       u8         aso_check_ordering[0x1];
-       u8         aso_context_type[0x4];
-       u8         reserved_at_28[0x8];
-       union {
-               u8 aso_fields[0x10];
-               struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
-       };
-};
-
-#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr_ste_v1.h
deleted file mode 100644 (file)
index ca3b0f1..0000000
+++ /dev/null
@@ -1,469 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
-
-#ifndef MLX5_IFC_DR_STE_V1_H
-#define MLX5_IFC_DR_STE_V1_H
-
-enum mlx5_ifc_ste_v1_modify_hdr_offset {
-       MLX5_MODIFY_HEADER_V1_QW_OFFSET = 0x20,
-};
-
-struct mlx5_ifc_ste_single_action_flow_tag_v1_bits {
-       u8         action_id[0x8];
-       u8         flow_tag[0x18];
-};
-
-struct mlx5_ifc_ste_single_action_modify_list_v1_bits {
-       u8         action_id[0x8];
-       u8         num_of_modify_actions[0x8];
-       u8         modify_actions_ptr[0x10];
-};
-
-struct mlx5_ifc_ste_single_action_remove_header_v1_bits {
-       u8         action_id[0x8];
-       u8         reserved_at_8[0x2];
-       u8         start_anchor[0x6];
-       u8         reserved_at_10[0x2];
-       u8         end_anchor[0x6];
-       u8         reserved_at_18[0x4];
-       u8         decap[0x1];
-       u8         vni_to_cqe[0x1];
-       u8         qos_profile[0x2];
-};
-
-struct mlx5_ifc_ste_single_action_remove_header_size_v1_bits {
-       u8         action_id[0x8];
-       u8         reserved_at_8[0x2];
-       u8         start_anchor[0x6];
-       u8         outer_l4_remove[0x1];
-       u8         reserved_at_11[0x1];
-       u8         start_offset[0x7];
-       u8         reserved_at_18[0x1];
-       u8         remove_size[0x6];
-};
-
-struct mlx5_ifc_ste_double_action_copy_v1_bits {
-       u8         action_id[0x8];
-       u8         destination_dw_offset[0x8];
-       u8         reserved_at_10[0x2];
-       u8         destination_left_shifter[0x6];
-       u8         reserved_at_17[0x2];
-       u8         destination_length[0x6];
-
-       u8         reserved_at_20[0x8];
-       u8         source_dw_offset[0x8];
-       u8         reserved_at_30[0x2];
-       u8         source_right_shifter[0x6];
-       u8         reserved_at_38[0x8];
-};
-
-struct mlx5_ifc_ste_double_action_set_v1_bits {
-       u8         action_id[0x8];
-       u8         destination_dw_offset[0x8];
-       u8         reserved_at_10[0x2];
-       u8         destination_left_shifter[0x6];
-       u8         reserved_at_18[0x2];
-       u8         destination_length[0x6];
-
-       u8         inline_data[0x20];
-};
-
-struct mlx5_ifc_ste_double_action_add_v1_bits {
-       u8         action_id[0x8];
-       u8         destination_dw_offset[0x8];
-       u8         reserved_at_10[0x2];
-       u8         destination_left_shifter[0x6];
-       u8         reserved_at_18[0x2];
-       u8         destination_length[0x6];
-
-       u8         add_value[0x20];
-};
-
-struct mlx5_ifc_ste_double_action_insert_with_inline_v1_bits {
-       u8         action_id[0x8];
-       u8         reserved_at_8[0x2];
-       u8         start_anchor[0x6];
-       u8         start_offset[0x7];
-       u8         reserved_at_17[0x9];
-
-       u8         inline_data[0x20];
-};
-
-struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits {
-       u8         action_id[0x8];
-       u8         reserved_at_8[0x2];
-       u8         start_anchor[0x6];
-       u8         start_offset[0x7];
-       u8         size[0x6];
-       u8         attributes[0x3];
-
-       u8         pointer[0x20];
-};
-
-struct mlx5_ifc_ste_double_action_accelerated_modify_action_list_v1_bits {
-       u8         action_id[0x8];
-       u8         modify_actions_pattern_pointer[0x18];
-
-       u8         number_of_modify_actions[0x8];
-       u8         modify_actions_argument_pointer[0x18];
-};
-
-struct mlx5_ifc_ste_match_bwc_v1_bits {
-       u8         entry_format[0x8];
-       u8         counter_id[0x18];
-
-       u8         miss_address_63_48[0x10];
-       u8         match_definer_ctx_idx[0x8];
-       u8         miss_address_39_32[0x8];
-
-       u8         miss_address_31_6[0x1a];
-       u8         reserved_at_5a[0x1];
-       u8         match_polarity[0x1];
-       u8         reparse[0x1];
-       u8         reserved_at_5d[0x3];
-
-       u8         next_table_base_63_48[0x10];
-       u8         hash_definer_ctx_idx[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         hash_type[0x2];
-       u8         hash_after_actions[0x1];
-       u8         reserved_at_9e[0x2];
-
-       u8         byte_mask[0x10];
-       u8         next_entry_format[0x1];
-       u8         mask_mode[0x1];
-       u8         gvmi[0xe];
-
-       u8         action[0x40];
-};
-
-struct mlx5_ifc_ste_mask_and_match_v1_bits {
-       u8         entry_format[0x8];
-       u8         counter_id[0x18];
-
-       u8         miss_address_63_48[0x10];
-       u8         match_definer_ctx_idx[0x8];
-       u8         miss_address_39_32[0x8];
-
-       u8         miss_address_31_6[0x1a];
-       u8         reserved_at_5a[0x1];
-       u8         match_polarity[0x1];
-       u8         reparse[0x1];
-       u8         reserved_at_5d[0x3];
-
-       u8         next_table_base_63_48[0x10];
-       u8         hash_definer_ctx_idx[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         hash_type[0x2];
-       u8         hash_after_actions[0x1];
-       u8         reserved_at_9e[0x2];
-
-       u8         action[0x60];
-};
-
-struct mlx5_ifc_ste_match_ranges_v1_bits {
-       u8         entry_format[0x8];
-       u8         counter_id[0x18];
-
-       u8         miss_address_63_48[0x10];
-       u8         match_definer_ctx_idx[0x8];
-       u8         miss_address_39_32[0x8];
-
-       u8         miss_address_31_6[0x1a];
-       u8         reserved_at_5a[0x1];
-       u8         match_polarity[0x1];
-       u8         reparse[0x1];
-       u8         reserved_at_5d[0x3];
-
-       u8         next_table_base_63_48[0x10];
-       u8         hash_definer_ctx_idx[0x8];
-       u8         next_table_base_39_32_size[0x8];
-
-       u8         next_table_base_31_5_size[0x1b];
-       u8         hash_type[0x2];
-       u8         hash_after_actions[0x1];
-       u8         reserved_at_9e[0x2];
-
-       u8         action[0x60];
-
-       u8         max_value_0[0x20];
-       u8         min_value_0[0x20];
-       u8         max_value_1[0x20];
-       u8         min_value_1[0x20];
-       u8         max_value_2[0x20];
-       u8         min_value_2[0x20];
-       u8         max_value_3[0x20];
-       u8         min_value_3[0x20];
-};
-
-struct mlx5_ifc_ste_eth_l2_src_v1_bits {
-       u8         reserved_at_0[0x1];
-       u8         sx_sniffer[0x1];
-       u8         functional_loopback[0x1];
-       u8         ip_fragmented[0x1];
-       u8         qp_type[0x2];
-       u8         encapsulation_type[0x2];
-       u8         port[0x2];
-       u8         l3_type[0x2];
-       u8         l4_type[0x2];
-       u8         first_vlan_qualifier[0x2];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_id[0xc];
-
-       u8         smac_47_16[0x20];
-
-       u8         smac_15_0[0x10];
-       u8         l3_ethertype[0x10];
-
-       u8         reserved_at_60[0x6];
-       u8         tcp_syn[0x1];
-       u8         reserved_at_67[0x3];
-       u8         force_loopback[0x1];
-       u8         l2_ok[0x1];
-       u8         l3_ok[0x1];
-       u8         l4_ok[0x1];
-       u8         second_vlan_qualifier[0x2];
-
-       u8         second_priority[0x3];
-       u8         second_cfi[0x1];
-       u8         second_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l2_dst_v1_bits {
-       u8         reserved_at_0[0x1];
-       u8         sx_sniffer[0x1];
-       u8         functional_lb[0x1];
-       u8         ip_fragmented[0x1];
-       u8         qp_type[0x2];
-       u8         encapsulation_type[0x2];
-       u8         port[0x2];
-       u8         l3_type[0x2];
-       u8         l4_type[0x2];
-       u8         first_vlan_qualifier[0x2];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_id[0xc];
-
-       u8         dmac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         l3_ethertype[0x10];
-
-       u8         reserved_at_60[0x6];
-       u8         tcp_syn[0x1];
-       u8         reserved_at_67[0x3];
-       u8         force_lb[0x1];
-       u8         l2_ok[0x1];
-       u8         l3_ok[0x1];
-       u8         l4_ok[0x1];
-       u8         second_vlan_qualifier[0x2];
-       u8         second_priority[0x3];
-       u8         second_cfi[0x1];
-       u8         second_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l2_src_dst_v1_bits {
-       u8         dmac_47_16[0x20];
-
-       u8         smac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         reserved_at_50[0x2];
-       u8         functional_lb[0x1];
-       u8         reserved_at_53[0x5];
-       u8         port[0x2];
-       u8         l3_type[0x2];
-       u8         reserved_at_5c[0x2];
-       u8         first_vlan_qualifier[0x2];
-
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_id[0xc];
-       u8         smac_15_0[0x10];
-};
-
-struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_v1_bits {
-       u8         source_address[0x20];
-
-       u8         destination_address[0x20];
-
-       u8         source_port[0x10];
-       u8         destination_port[0x10];
-
-       u8         reserved_at_60[0x4];
-       u8         l4_ok[0x1];
-       u8         l3_ok[0x1];
-       u8         fragmented[0x1];
-       u8         tcp_ns[0x1];
-       u8         tcp_cwr[0x1];
-       u8         tcp_ece[0x1];
-       u8         tcp_urg[0x1];
-       u8         tcp_ack[0x1];
-       u8         tcp_psh[0x1];
-       u8         tcp_rst[0x1];
-       u8         tcp_syn[0x1];
-       u8         tcp_fin[0x1];
-       u8         dscp[0x6];
-       u8         ecn[0x2];
-       u8         protocol[0x8];
-};
-
-struct mlx5_ifc_ste_eth_l2_tnl_v1_bits {
-       u8         l2_tunneling_network_id[0x20];
-
-       u8         dmac_47_16[0x20];
-
-       u8         dmac_15_0[0x10];
-       u8         l3_ethertype[0x10];
-
-       u8         reserved_at_60[0x3];
-       u8         ip_fragmented[0x1];
-       u8         reserved_at_64[0x2];
-       u8         encp_type[0x2];
-       u8         reserved_at_68[0x2];
-       u8         l3_type[0x2];
-       u8         l4_type[0x2];
-       u8         first_vlan_qualifier[0x2];
-       u8         first_priority[0x3];
-       u8         first_cfi[0x1];
-       u8         first_vlan_id[0xc];
-};
-
-struct mlx5_ifc_ste_eth_l3_ipv4_misc_v1_bits {
-       u8         identification[0x10];
-       u8         flags[0x3];
-       u8         fragment_offset[0xd];
-
-       u8         total_length[0x10];
-       u8         checksum[0x10];
-
-       u8         version[0x4];
-       u8         ihl[0x4];
-       u8         time_to_live[0x8];
-       u8         reserved_at_50[0x10];
-
-       u8         reserved_at_60[0x1c];
-       u8         voq_internal_prio[0x4];
-};
-
-struct mlx5_ifc_ste_eth_l4_v1_bits {
-       u8         ipv6_version[0x4];
-       u8         reserved_at_4[0x4];
-       u8         dscp[0x6];
-       u8         ecn[0x2];
-       u8         ipv6_hop_limit[0x8];
-       u8         protocol[0x8];
-
-       u8         src_port[0x10];
-       u8         dst_port[0x10];
-
-       u8         first_fragment[0x1];
-       u8         reserved_at_41[0xb];
-       u8         flow_label[0x14];
-
-       u8         tcp_data_offset[0x4];
-       u8         l4_ok[0x1];
-       u8         l3_ok[0x1];
-       u8         fragmented[0x1];
-       u8         tcp_ns[0x1];
-       u8         tcp_cwr[0x1];
-       u8         tcp_ece[0x1];
-       u8         tcp_urg[0x1];
-       u8         tcp_ack[0x1];
-       u8         tcp_psh[0x1];
-       u8         tcp_rst[0x1];
-       u8         tcp_syn[0x1];
-       u8         tcp_fin[0x1];
-       u8         ipv6_paylen[0x10];
-};
-
-struct mlx5_ifc_ste_eth_l4_misc_v1_bits {
-       u8         window_size[0x10];
-       u8         urgent_pointer[0x10];
-
-       u8         ack_num[0x20];
-
-       u8         seq_num[0x20];
-
-       u8         length[0x10];
-       u8         checksum[0x10];
-};
-
-struct mlx5_ifc_ste_mpls_v1_bits {
-       u8         reserved_at_0[0x15];
-       u8         mpls_ok[0x1];
-       u8         mpls4_s_bit[0x1];
-       u8         mpls4_qualifier[0x1];
-       u8         mpls3_s_bit[0x1];
-       u8         mpls3_qualifier[0x1];
-       u8         mpls2_s_bit[0x1];
-       u8         mpls2_qualifier[0x1];
-       u8         mpls1_s_bit[0x1];
-       u8         mpls1_qualifier[0x1];
-       u8         mpls0_s_bit[0x1];
-       u8         mpls0_qualifier[0x1];
-
-       u8         mpls0_label[0x14];
-       u8         mpls0_exp[0x3];
-       u8         mpls0_s_bos[0x1];
-       u8         mpls0_ttl[0x8];
-
-       u8         mpls1_label[0x20];
-
-       u8         mpls2_label[0x20];
-};
-
-struct mlx5_ifc_ste_gre_v1_bits {
-       u8         gre_c_present[0x1];
-       u8         reserved_at_1[0x1];
-       u8         gre_k_present[0x1];
-       u8         gre_s_present[0x1];
-       u8         strict_src_route[0x1];
-       u8         recur[0x3];
-       u8         flags[0x5];
-       u8         version[0x3];
-       u8         gre_protocol[0x10];
-
-       u8         reserved_at_20[0x20];
-
-       u8         gre_key_h[0x18];
-       u8         gre_key_l[0x8];
-
-       u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_ste_src_gvmi_qp_v1_bits {
-       u8         loopback_synd[0x8];
-       u8         reserved_at_8[0x7];
-       u8         functional_lb[0x1];
-       u8         source_gvmi[0x10];
-
-       u8         force_lb[0x1];
-       u8         reserved_at_21[0x1];
-       u8         source_is_requestor[0x1];
-       u8         reserved_at_23[0x5];
-       u8         source_qp[0x18];
-
-       u8         reserved_at_40[0x20];
-
-       u8         reserved_at_60[0x20];
-};
-
-struct mlx5_ifc_ste_icmp_v1_bits {
-       u8         icmp_payload_data[0x20];
-
-       u8         icmp_header_data[0x20];
-
-       u8         icmp_type[0x8];
-       u8         icmp_code[0x8];
-       u8         reserved_at_50[0x10];
-
-       u8         reserved_at_60[0x20];
-};
-
-#endif /* MLX5_IFC_DR_STE_V1_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
deleted file mode 100644 (file)
index 3ac7dc6..0000000
+++ /dev/null
@@ -1,196 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
-/* Copyright (c) 2019, Mellanox Technologies */
-
-#ifndef _MLX5DR_H_
-#define _MLX5DR_H_
-
-struct mlx5dr_domain;
-struct mlx5dr_table;
-struct mlx5dr_matcher;
-struct mlx5dr_rule;
-struct mlx5dr_action;
-
-enum mlx5dr_domain_type {
-       MLX5DR_DOMAIN_TYPE_NIC_RX,
-       MLX5DR_DOMAIN_TYPE_NIC_TX,
-       MLX5DR_DOMAIN_TYPE_FDB,
-};
-
-enum mlx5dr_domain_sync_flags {
-       MLX5DR_DOMAIN_SYNC_FLAGS_SW = 1 << 0,
-       MLX5DR_DOMAIN_SYNC_FLAGS_HW = 1 << 1,
-};
-
-enum mlx5dr_action_reformat_type {
-       DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2,
-       DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
-       DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
-       DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
-       DR_ACTION_REFORMAT_TYP_INSERT_HDR,
-       DR_ACTION_REFORMAT_TYP_REMOVE_HDR,
-};
-
-struct mlx5dr_match_parameters {
-       size_t match_sz;
-       u64 *match_buf; /* Device spec format */
-};
-
-struct mlx5dr_action_dest {
-       struct mlx5dr_action *dest;
-       struct mlx5dr_action *reformat;
-};
-
-struct mlx5dr_domain *
-mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
-
-int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
-
-int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
-
-void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
-                           struct mlx5dr_domain *peer_dmn,
-                           u16 peer_vhca_id);
-
-struct mlx5dr_table *
-mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
-                   u16 uid);
-
-struct mlx5dr_table *
-mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
-
-int mlx5dr_table_destroy(struct mlx5dr_table *table);
-
-u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
-
-struct mlx5dr_matcher *
-mlx5dr_matcher_create(struct mlx5dr_table *table,
-                     u32 priority,
-                     u8 match_criteria_enable,
-                     struct mlx5dr_match_parameters *mask);
-
-int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher);
-
-struct mlx5dr_rule *
-mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
-                  struct mlx5dr_match_parameters *value,
-                  size_t num_actions,
-                  struct mlx5dr_action *actions[],
-                  u32 flow_source);
-
-int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
-
-int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
-                                struct mlx5dr_action *action);
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num);
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
-                                       struct mlx5_flow_table *ft);
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
-                               u16 vport, u8 vhca_id_valid,
-                               u16 vhca_id);
-
-struct mlx5dr_action *
-mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
-                                  struct mlx5dr_action_dest *dests,
-                                  u32 num_of_dests,
-                                  bool ignore_flow_level,
-                                  u32 flow_source);
-
-struct mlx5dr_action *mlx5dr_action_create_drop(void);
-
-struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
-
-struct mlx5dr_action *
-mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id);
-
-struct mlx5dr_action *
-mlx5dr_action_create_flow_counter(u32 counter_id);
-
-struct mlx5dr_action *
-mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
-                                    enum mlx5dr_action_reformat_type reformat_type,
-                                    u8 reformat_param_0,
-                                    u8 reformat_param_1,
-                                    size_t data_sz,
-                                    void *data);
-
-struct mlx5dr_action *
-mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
-                                  u32 flags,
-                                  size_t actions_sz,
-                                  __be64 actions[]);
-
-struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
-
-struct mlx5dr_action *
-mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
-
-struct mlx5dr_action *
-mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
-                        u32 obj_id,
-                        u8 return_reg_id,
-                        u8 aso_type,
-                        u8 init_color,
-                        u8 meter_id);
-
-struct mlx5dr_action *
-mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
-                                     u32 field,
-                                     struct mlx5_flow_table *hit_ft,
-                                     struct mlx5_flow_table *miss_ft,
-                                     u32 min,
-                                     u32 max);
-
-int mlx5dr_action_destroy(struct mlx5dr_action *action);
-
-u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
-
-static inline bool
-mlx5dr_is_supported(struct mlx5_core_dev *dev)
-{
-       return MLX5_CAP_GEN(dev, roce) &&
-              (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
-               (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
-                (MLX5_CAP_GEN(dev, steering_format_version) <=
-                 MLX5_STEERING_FORMAT_CONNECTX_7)));
-}
-
-/* buddy functions & structure */
-
-struct mlx5dr_icm_mr;
-
-struct mlx5dr_icm_buddy_mem {
-       unsigned long           **bitmap;
-       unsigned int            *num_free;
-       u32                     max_order;
-       struct list_head        list_node;
-       struct mlx5dr_icm_mr    *icm_mr;
-       struct mlx5dr_icm_pool  *pool;
-
-       /* Amount of memory in used chunks - HW may be accessing this memory */
-       u64                     used_memory;
-
-       /* Memory optimisation */
-       struct mlx5dr_ste       *ste_arr;
-       struct list_head        *miss_list;
-       u8                      *hw_ste_arr;
-};
-
-int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
-                     unsigned int max_order);
-void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy);
-int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
-                          unsigned int order,
-                          unsigned int *segment);
-void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
-                          unsigned int seg, unsigned int order);
-
-#endif /* _MLX5DR_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_action.c
new file mode 100644 (file)
index 0000000..2ebb61e
--- /dev/null
@@ -0,0 +1,2245 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+enum dr_action_domain {
+       DR_ACTION_DOMAIN_NIC_INGRESS,
+       DR_ACTION_DOMAIN_NIC_EGRESS,
+       DR_ACTION_DOMAIN_FDB_INGRESS,
+       DR_ACTION_DOMAIN_FDB_EGRESS,
+       DR_ACTION_DOMAIN_MAX,
+};
+
+enum dr_action_valid_state {
+       DR_ACTION_STATE_ERR,
+       DR_ACTION_STATE_NO_ACTION,
+       DR_ACTION_STATE_ENCAP,
+       DR_ACTION_STATE_DECAP,
+       DR_ACTION_STATE_MODIFY_HDR,
+       DR_ACTION_STATE_POP_VLAN,
+       DR_ACTION_STATE_PUSH_VLAN,
+       DR_ACTION_STATE_NON_TERM,
+       DR_ACTION_STATE_TERM,
+       DR_ACTION_STATE_ASO,
+       DR_ACTION_STATE_MAX,
+};
+
+static const char * const action_type_to_str[] = {
+       [DR_ACTION_TYP_TNL_L2_TO_L2] = "DR_ACTION_TYP_TNL_L2_TO_L2",
+       [DR_ACTION_TYP_L2_TO_TNL_L2] = "DR_ACTION_TYP_L2_TO_TNL_L2",
+       [DR_ACTION_TYP_TNL_L3_TO_L2] = "DR_ACTION_TYP_TNL_L3_TO_L2",
+       [DR_ACTION_TYP_L2_TO_TNL_L3] = "DR_ACTION_TYP_L2_TO_TNL_L3",
+       [DR_ACTION_TYP_DROP] = "DR_ACTION_TYP_DROP",
+       [DR_ACTION_TYP_QP] = "DR_ACTION_TYP_QP",
+       [DR_ACTION_TYP_FT] = "DR_ACTION_TYP_FT",
+       [DR_ACTION_TYP_CTR] = "DR_ACTION_TYP_CTR",
+       [DR_ACTION_TYP_TAG] = "DR_ACTION_TYP_TAG",
+       [DR_ACTION_TYP_MODIFY_HDR] = "DR_ACTION_TYP_MODIFY_HDR",
+       [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
+       [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
+       [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
+       [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
+       [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
+       [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
+       [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER",
+       [DR_ACTION_TYP_RANGE] = "DR_ACTION_TYP_RANGE",
+       [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
+};
+
+static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
+{
+       if (action_id > DR_ACTION_TYP_MAX)
+               action_id = DR_ACTION_TYP_MAX;
+       return action_type_to_str[action_id];
+}
+
+static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
+{
+       return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
+               MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
+               MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
+}
+
+static const enum dr_action_valid_state
+next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = {
+       [DR_ACTION_DOMAIN_NIC_INGRESS] = {
+               [DR_ACTION_STATE_NO_ACTION] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_DECAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ENCAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_MODIFY_HDR] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_POP_VLAN] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_PUSH_VLAN] = {
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_NON_TERM] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_TAG]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ASO] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_TERM] = {
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
+               },
+       },
+       [DR_ACTION_DOMAIN_NIC_EGRESS] = {
+               [DR_ACTION_STATE_NO_ACTION] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_DECAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ENCAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_MODIFY_HDR] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_POP_VLAN] = {
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_PUSH_VLAN] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_NON_TERM] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ASO] = {
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+               },
+               [DR_ACTION_STATE_TERM] = {
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
+               },
+       },
+       [DR_ACTION_DOMAIN_FDB_INGRESS] = {
+               [DR_ACTION_STATE_NO_ACTION] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_DECAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ENCAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_QP]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_MODIFY_HDR] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_POP_VLAN] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_PUSH_VLAN] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_NON_TERM] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_TNL_L2_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_TNL_L3_TO_L2]    = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ASO] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_TERM] = {
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
+               },
+       },
+       [DR_ACTION_DOMAIN_FDB_EGRESS] = {
+               [DR_ACTION_STATE_NO_ACTION] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_DECAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ENCAP] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_MODIFY_HDR] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_POP_VLAN] = {
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_PUSH_VLAN] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_NON_TERM] = {
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_SAMPLER]         = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_NON_TERM,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_INSERT_HDR]      = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_REMOVE_HDR]      = DR_ACTION_STATE_DECAP,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_POP_VLAN]        = DR_ACTION_STATE_POP_VLAN,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_ASO_FLOW_METER]  = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_ASO] = {
+                       [DR_ACTION_TYP_L2_TO_TNL_L2]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_L2_TO_TNL_L3]    = DR_ACTION_STATE_ENCAP,
+                       [DR_ACTION_TYP_MODIFY_HDR]      = DR_ACTION_STATE_MODIFY_HDR,
+                       [DR_ACTION_TYP_PUSH_VLAN]       = DR_ACTION_STATE_PUSH_VLAN,
+                       [DR_ACTION_TYP_DROP]            = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_FT]              = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_RANGE]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_VPORT]           = DR_ACTION_STATE_TERM,
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_ASO,
+               },
+               [DR_ACTION_STATE_TERM] = {
+                       [DR_ACTION_TYP_CTR]             = DR_ACTION_STATE_TERM,
+               },
+       },
+};
+
+static int
+dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type,
+                                 enum mlx5dr_action_type *action_type)
+{
+       switch (reformat_type) {
+       case DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2:
+               *action_type = DR_ACTION_TYP_TNL_L2_TO_L2;
+               break;
+       case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2:
+               *action_type = DR_ACTION_TYP_L2_TO_TNL_L2;
+               break;
+       case DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2:
+               *action_type = DR_ACTION_TYP_TNL_L3_TO_L2;
+               break;
+       case DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3:
+               *action_type = DR_ACTION_TYP_L2_TO_TNL_L3;
+               break;
+       case DR_ACTION_REFORMAT_TYP_INSERT_HDR:
+               *action_type = DR_ACTION_TYP_INSERT_HDR;
+               break;
+       case DR_ACTION_REFORMAT_TYP_REMOVE_HDR:
+               *action_type = DR_ACTION_TYP_REMOVE_HDR;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Apply the actions on the rule STE array starting from the last_ste.
+ * Actions might require more than one STE, new_num_stes will return
+ * the new size of the STEs array, rule with actions.
+ */
+static void dr_actions_apply(struct mlx5dr_domain *dmn,
+                            enum mlx5dr_domain_nic_type nic_type,
+                            u8 *action_type_set,
+                            u8 *last_ste,
+                            struct mlx5dr_ste_actions_attr *attr,
+                            u32 *new_num_stes)
+{
+       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
+       u32 added_stes = 0;
+
+       if (nic_type == DR_DOMAIN_NIC_TYPE_RX)
+               mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set,
+                                         last_ste, attr, &added_stes);
+       else
+               mlx5dr_ste_set_actions_tx(ste_ctx, dmn, action_type_set,
+                                         last_ste, attr, &added_stes);
+
+       *new_num_stes += added_stes;
+}
+
+static enum dr_action_domain
+dr_action_get_action_domain(enum mlx5dr_domain_type domain,
+                           enum mlx5dr_domain_nic_type nic_type)
+{
+       switch (domain) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               return DR_ACTION_DOMAIN_NIC_INGRESS;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               return DR_ACTION_DOMAIN_NIC_EGRESS;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               if (nic_type == DR_DOMAIN_NIC_TYPE_RX)
+                       return DR_ACTION_DOMAIN_FDB_INGRESS;
+               return DR_ACTION_DOMAIN_FDB_EGRESS;
+       default:
+               WARN_ON(true);
+               return DR_ACTION_DOMAIN_MAX;
+       }
+}
+
+static
+int dr_action_validate_and_get_next_state(enum dr_action_domain action_domain,
+                                         u32 action_type,
+                                         u32 *state)
+{
+       u32 cur_state = *state;
+
+       /* Check action state machine is valid */
+       *state = next_action_state[action_domain][cur_state][action_type];
+
+       if (*state == DR_ACTION_STATE_ERR)
+               return -EOPNOTSUPP;
+
+       return 0;
+}
+
+static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
+                                     struct mlx5dr_action *dest_action,
+                                     u64 *final_icm_addr)
+{
+       int ret;
+
+       switch (dest_action->action_type) {
+       case DR_ACTION_TYP_FT:
+               /* Allow destination flow table only if table is a terminating
+                * table, since there is an *assumption* that in such case FW
+                * will recalculate the CS.
+                */
+               if (dest_action->dest_tbl->is_fw_tbl) {
+                       *final_icm_addr = dest_action->dest_tbl->fw_tbl.rx_icm_addr;
+               } else {
+                       mlx5dr_dbg(dmn,
+                                  "Destination FT should be terminating when modify TTL is used\n");
+                       return -EINVAL;
+               }
+               break;
+
+       case DR_ACTION_TYP_VPORT:
+               /* If destination is vport we will get the FW flow table
+                * that recalculates the CS and forwards to the vport.
+                */
+               ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+                                                         dest_action->vport->caps->num,
+                                                         final_icm_addr);
+               if (ret) {
+                       mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
+                       return ret;
+               }
+               break;
+
+       default:
+               break;
+       }
+
+       return 0;
+}
+
+static void dr_action_modify_ttl_adjust(struct mlx5dr_domain *dmn,
+                                       struct mlx5dr_ste_actions_attr *attr,
+                                       bool rx_rule,
+                                       bool *recalc_cs_required)
+{
+       *recalc_cs_required = false;
+
+       /* if device supports csum recalculation - no adjustment needed */
+       if (mlx5dr_ste_supp_ttl_cs_recalc(&dmn->info.caps))
+               return;
+
+       /* no adjustment needed on TX rules */
+       if (!rx_rule)
+               return;
+
+       if (!MLX5_CAP_ESW_FLOWTABLE(dmn->mdev, fdb_ipv4_ttl_modify)) {
+               /* Ignore the modify TTL action.
+                * It is always kept as last HW action.
+                */
+               attr->modify_actions--;
+               return;
+       }
+
+       if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+               /* Due to a HW bug on some devices, modifying TTL on RX flows
+                * will cause an incorrect checksum calculation. In such cases
+                * we will use a FW table to recalculate the checksum.
+                */
+               *recalc_cs_required = true;
+}
+
+static void dr_action_print_sequence(struct mlx5dr_domain *dmn,
+                                    struct mlx5dr_action *actions[],
+                                    int last_idx)
+{
+       int i;
+
+       for (i = 0; i <= last_idx; i++)
+               mlx5dr_err(dmn, "< %s (%d) > ",
+                          dr_action_id_to_str(actions[i]->action_type),
+                          actions[i]->action_type);
+}
+
+static int dr_action_get_dest_fw_tbl_addr(struct mlx5dr_matcher *matcher,
+                                         struct mlx5dr_action_dest_tbl *dest_tbl,
+                                         bool is_rx_rule,
+                                         u64 *final_icm_addr)
+{
+       struct mlx5dr_cmd_query_flow_table_details output;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       int ret;
+
+       if (!dest_tbl->fw_tbl.rx_icm_addr) {
+               ret = mlx5dr_cmd_query_flow_table(dmn->mdev,
+                                                 dest_tbl->fw_tbl.type,
+                                                 dest_tbl->fw_tbl.id,
+                                                 &output);
+               if (ret) {
+                       mlx5dr_err(dmn,
+                                  "Failed mlx5_cmd_query_flow_table ret: %d\n",
+                                  ret);
+                       return ret;
+               }
+
+               dest_tbl->fw_tbl.tx_icm_addr = output.sw_owner_icm_root_1;
+               dest_tbl->fw_tbl.rx_icm_addr = output.sw_owner_icm_root_0;
+       }
+
+       *final_icm_addr = is_rx_rule ? dest_tbl->fw_tbl.rx_icm_addr :
+                                      dest_tbl->fw_tbl.tx_icm_addr;
+       return 0;
+}
+
+static int dr_action_get_dest_sw_tbl_addr(struct mlx5dr_matcher *matcher,
+                                         struct mlx5dr_action_dest_tbl *dest_tbl,
+                                         bool is_rx_rule,
+                                         u64 *final_icm_addr)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_icm_chunk *chunk;
+
+       if (dest_tbl->tbl->dmn != dmn) {
+               mlx5dr_err(dmn,
+                          "Destination table belongs to a different domain\n");
+               return -EINVAL;
+       }
+
+       if (dest_tbl->tbl->level <= matcher->tbl->level) {
+               mlx5_core_dbg_once(dmn->mdev,
+                                  "Connecting table to a lower/same level destination table\n");
+               mlx5dr_dbg(dmn,
+                          "Connecting table at level %d to a destination table at level %d\n",
+                          matcher->tbl->level,
+                          dest_tbl->tbl->level);
+       }
+
+       chunk = is_rx_rule ? dest_tbl->tbl->rx.s_anchor->chunk :
+                            dest_tbl->tbl->tx.s_anchor->chunk;
+
+       *final_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+       return 0;
+}
+
+static int dr_action_get_dest_tbl_addr(struct mlx5dr_matcher *matcher,
+                                      struct mlx5dr_action_dest_tbl *dest_tbl,
+                                      bool is_rx_rule,
+                                      u64 *final_icm_addr)
+{
+       if (dest_tbl->is_fw_tbl)
+               return dr_action_get_dest_fw_tbl_addr(matcher,
+                                                     dest_tbl,
+                                                     is_rx_rule,
+                                                     final_icm_addr);
+
+       return dr_action_get_dest_sw_tbl_addr(matcher,
+                                             dest_tbl,
+                                             is_rx_rule,
+                                             final_icm_addr);
+}
+
+#define WITH_VLAN_NUM_HW_ACTIONS 6
+
+int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+                                struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                struct mlx5dr_action *actions[],
+                                u32 num_actions,
+                                u8 *ste_arr,
+                                u32 *new_hw_ste_arr_sz)
+{
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+       bool rx_rule = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       u8 action_type_set[DR_ACTION_TYP_MAX] = {};
+       struct mlx5dr_ste_actions_attr attr = {};
+       struct mlx5dr_action *dest_action = NULL;
+       u32 state = DR_ACTION_STATE_NO_ACTION;
+       enum dr_action_domain action_domain;
+       bool recalc_cs_required = false;
+       u8 *last_ste;
+       int i, ret;
+
+       attr.gvmi = dmn->info.caps.gvmi;
+       attr.hit_gvmi = dmn->info.caps.gvmi;
+       attr.final_icm_addr = nic_dmn->default_icm_addr;
+       action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type);
+
+       for (i = 0; i < num_actions; i++) {
+               struct mlx5dr_action *action;
+               int max_actions_type = 1;
+               u32 action_type;
+
+               action = actions[i];
+               action_type = action->action_type;
+
+               switch (action_type) {
+               case DR_ACTION_TYP_DROP:
+                       attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                       attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+                       break;
+               case DR_ACTION_TYP_FT:
+                       dest_action = action;
+                       ret = dr_action_get_dest_tbl_addr(matcher, action->dest_tbl,
+                                                         rx_rule, &attr.final_icm_addr);
+                       if (ret)
+                               return ret;
+                       break;
+               case DR_ACTION_TYP_RANGE:
+                       ret = dr_action_get_dest_tbl_addr(matcher,
+                                                         action->range->hit_tbl_action->dest_tbl,
+                                                         rx_rule, &attr.final_icm_addr);
+                       if (ret)
+                               return ret;
+
+                       ret = dr_action_get_dest_tbl_addr(matcher,
+                                                         action->range->miss_tbl_action->dest_tbl,
+                                                         rx_rule, &attr.range.miss_icm_addr);
+                       if (ret)
+                               return ret;
+
+                       attr.range.definer_id = action->range->definer_id;
+                       attr.range.min = action->range->min;
+                       attr.range.max = action->range->max;
+                       break;
+               case DR_ACTION_TYP_QP:
+                       mlx5dr_info(dmn, "Domain doesn't support QP\n");
+                       return -EOPNOTSUPP;
+               case DR_ACTION_TYP_CTR:
+                       attr.ctr_id = action->ctr->ctr_id +
+                               action->ctr->offset;
+                       break;
+               case DR_ACTION_TYP_TAG:
+                       attr.flow_tag = action->flow_tag->flow_tag;
+                       break;
+               case DR_ACTION_TYP_TNL_L2_TO_L2:
+                       break;
+               case DR_ACTION_TYP_TNL_L3_TO_L2:
+                       if (action->rewrite->ptrn && action->rewrite->arg) {
+                               attr.decap_index = mlx5dr_arg_get_obj_id(action->rewrite->arg);
+                               attr.decap_actions = action->rewrite->ptrn->num_of_actions;
+                               attr.decap_pat_idx = action->rewrite->ptrn->index;
+                       } else {
+                               attr.decap_index = action->rewrite->index;
+                               attr.decap_actions = action->rewrite->num_of_actions;
+                               attr.decap_with_vlan =
+                                       attr.decap_actions == WITH_VLAN_NUM_HW_ACTIONS;
+                               attr.decap_pat_idx = MLX5DR_INVALID_PATTERN_INDEX;
+                       }
+                       break;
+               case DR_ACTION_TYP_MODIFY_HDR:
+                       if (action->rewrite->single_action_opt) {
+                               attr.modify_actions = action->rewrite->num_of_actions;
+                               attr.single_modify_action = action->rewrite->data;
+                       } else {
+                               if (action->rewrite->ptrn && action->rewrite->arg) {
+                                       attr.modify_index =
+                                               mlx5dr_arg_get_obj_id(action->rewrite->arg);
+                                       attr.modify_actions = action->rewrite->ptrn->num_of_actions;
+                                       attr.modify_pat_idx = action->rewrite->ptrn->index;
+                               } else {
+                                       attr.modify_index = action->rewrite->index;
+                                       attr.modify_actions = action->rewrite->num_of_actions;
+                                       attr.modify_pat_idx = MLX5DR_INVALID_PATTERN_INDEX;
+                               }
+                       }
+                       if (action->rewrite->modify_ttl)
+                               dr_action_modify_ttl_adjust(dmn, &attr, rx_rule,
+                                                           &recalc_cs_required);
+                       break;
+               case DR_ACTION_TYP_L2_TO_TNL_L2:
+               case DR_ACTION_TYP_L2_TO_TNL_L3:
+                       if (rx_rule &&
+                           !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) {
+                               mlx5dr_info(dmn, "Device doesn't support Encap on RX\n");
+                               return -EOPNOTSUPP;
+                       }
+                       attr.reformat.size = action->reformat->size;
+                       attr.reformat.id = action->reformat->id;
+                       break;
+               case DR_ACTION_TYP_SAMPLER:
+                       attr.final_icm_addr = rx_rule ? action->sampler->rx_icm_addr :
+                                                       action->sampler->tx_icm_addr;
+                       break;
+               case DR_ACTION_TYP_VPORT:
+                       if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+                               /* can't go to uplink on RX rule - dropping instead */
+                               attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                               attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+                       } else {
+                               attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+                               dest_action = action;
+                               attr.final_icm_addr = rx_rule ?
+                                                     action->vport->caps->icm_address_rx :
+                                                     action->vport->caps->icm_address_tx;
+                       }
+                       break;
+               case DR_ACTION_TYP_POP_VLAN:
+                       if (!rx_rule && !(dmn->ste_ctx->actions_caps &
+                                         DR_STE_CTX_ACTION_CAP_TX_POP)) {
+                               mlx5dr_dbg(dmn, "Device doesn't support POP VLAN action on TX\n");
+                               return -EOPNOTSUPP;
+                       }
+
+                       max_actions_type = MLX5DR_MAX_VLANS;
+                       attr.vlans.count++;
+                       break;
+               case DR_ACTION_TYP_PUSH_VLAN:
+                       if (rx_rule && !(dmn->ste_ctx->actions_caps &
+                                        DR_STE_CTX_ACTION_CAP_RX_PUSH)) {
+                               mlx5dr_dbg(dmn, "Device doesn't support PUSH VLAN action on RX\n");
+                               return -EOPNOTSUPP;
+                       }
+
+                       max_actions_type = MLX5DR_MAX_VLANS;
+                       if (attr.vlans.count == MLX5DR_MAX_VLANS) {
+                               mlx5dr_dbg(dmn, "Max VLAN push/pop count exceeded\n");
+                               return -EINVAL;
+                       }
+
+                       attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr;
+                       break;
+               case DR_ACTION_TYP_INSERT_HDR:
+               case DR_ACTION_TYP_REMOVE_HDR:
+                       attr.reformat.size = action->reformat->size;
+                       attr.reformat.id = action->reformat->id;
+                       attr.reformat.param_0 = action->reformat->param_0;
+                       attr.reformat.param_1 = action->reformat->param_1;
+                       break;
+               case DR_ACTION_TYP_ASO_FLOW_METER:
+                       attr.aso_flow_meter.obj_id = action->aso->obj_id;
+                       attr.aso_flow_meter.offset = action->aso->offset;
+                       attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id;
+                       attr.aso_flow_meter.init_color = action->aso->init_color;
+                       break;
+               default:
+                       mlx5dr_err(dmn, "Unsupported action type %d\n", action_type);
+                       return -EINVAL;
+               }
+
+               /* Check action duplication */
+               if (++action_type_set[action_type] > max_actions_type) {
+                       mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n",
+                                  action_type, max_actions_type);
+                       return -EINVAL;
+               }
+
+               /* Check action state machine is valid */
+               if (dr_action_validate_and_get_next_state(action_domain,
+                                                         action_type,
+                                                         &state)) {
+                       mlx5dr_err(dmn, "Invalid action (gvmi: %d, is_rx: %d) sequence provided:",
+                                  attr.gvmi, rx_rule);
+                       dr_action_print_sequence(dmn, actions, i);
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       *new_hw_ste_arr_sz = nic_matcher->num_of_builders;
+       last_ste = ste_arr + DR_STE_SIZE * (nic_matcher->num_of_builders - 1);
+
+       if (recalc_cs_required && dest_action) {
+               ret = dr_action_handle_cs_recalc(dmn, dest_action, &attr.final_icm_addr);
+               if (ret) {
+                       mlx5dr_err(dmn,
+                                  "Failed to handle checksum recalculation err %d\n",
+                                  ret);
+                       return ret;
+               }
+       }
+
+       dr_actions_apply(dmn,
+                        nic_dmn->type,
+                        action_type_set,
+                        last_ste,
+                        &attr,
+                        new_hw_ste_arr_sz);
+
+       return 0;
+}
+
+static unsigned int action_size[DR_ACTION_TYP_MAX] = {
+       [DR_ACTION_TYP_TNL_L2_TO_L2] = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_L2_TO_TNL_L2] = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_TNL_L3_TO_L2] = sizeof(struct mlx5dr_action_rewrite),
+       [DR_ACTION_TYP_L2_TO_TNL_L3] = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_FT]           = sizeof(struct mlx5dr_action_dest_tbl),
+       [DR_ACTION_TYP_CTR]          = sizeof(struct mlx5dr_action_ctr),
+       [DR_ACTION_TYP_TAG]          = sizeof(struct mlx5dr_action_flow_tag),
+       [DR_ACTION_TYP_MODIFY_HDR]   = sizeof(struct mlx5dr_action_rewrite),
+       [DR_ACTION_TYP_VPORT]        = sizeof(struct mlx5dr_action_vport),
+       [DR_ACTION_TYP_PUSH_VLAN]    = sizeof(struct mlx5dr_action_push_vlan),
+       [DR_ACTION_TYP_INSERT_HDR]   = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_REMOVE_HDR]   = sizeof(struct mlx5dr_action_reformat),
+       [DR_ACTION_TYP_SAMPLER]      = sizeof(struct mlx5dr_action_sampler),
+       [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter),
+       [DR_ACTION_TYP_RANGE]        = sizeof(struct mlx5dr_action_range),
+};
+
+static struct mlx5dr_action *
+dr_action_create_generic(enum mlx5dr_action_type action_type)
+{
+       struct mlx5dr_action *action;
+       int extra_size;
+
+       if (action_type < DR_ACTION_TYP_MAX)
+               extra_size = action_size[action_type];
+       else
+               return NULL;
+
+       action = kzalloc(sizeof(*action) + extra_size, GFP_KERNEL);
+       if (!action)
+               return NULL;
+
+       action->action_type = action_type;
+       refcount_set(&action->refcount, 1);
+       action->data = action + 1;
+
+       return action;
+}
+
+struct mlx5dr_action *mlx5dr_action_create_drop(void)
+{
+       return dr_action_create_generic(DR_ACTION_TYP_DROP);
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num)
+{
+       struct mlx5dr_action *action;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_FT);
+       if (!action)
+               return NULL;
+
+       action->dest_tbl->is_fw_tbl = true;
+       action->dest_tbl->fw_tbl.dmn = dmn;
+       action->dest_tbl->fw_tbl.id = table_num;
+       action->dest_tbl->fw_tbl.type = FS_FT_FDB;
+       refcount_inc(&dmn->refcount);
+
+       return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *tbl)
+{
+       struct mlx5dr_action *action;
+
+       refcount_inc(&tbl->refcount);
+
+       action = dr_action_create_generic(DR_ACTION_TYP_FT);
+       if (!action)
+               goto dec_ref;
+
+       action->dest_tbl->tbl = tbl;
+
+       return action;
+
+dec_ref:
+       refcount_dec(&tbl->refcount);
+       return NULL;
+}
+
+static void dr_action_range_definer_fill(u16 *format_id,
+                                        u8 *dw_selectors,
+                                        u8 *byte_selectors,
+                                        u8 *match_mask)
+{
+       int i;
+
+       *format_id = MLX5_IFC_DEFINER_FORMAT_ID_SELECT;
+
+       dw_selectors[0] = MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN / 4;
+
+       for (i = 1; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
+               dw_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
+
+       for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
+               byte_selectors[i] = MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED;
+
+       MLX5_SET(match_definer_match_mask, match_mask,
+                match_dw_0, 0xffffUL << 16);
+}
+
+static int dr_action_create_range_definer(struct mlx5dr_action *action)
+{
+       u8 match_mask[MLX5_FLD_SZ_BYTES(match_definer, match_mask)] = {};
+       u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM] = {};
+       u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM] = {};
+       struct mlx5dr_domain *dmn = action->range->dmn;
+       u32 definer_id;
+       u16 format_id;
+       int ret;
+
+       dr_action_range_definer_fill(&format_id,
+                                    dw_selectors,
+                                    byte_selectors,
+                                    match_mask);
+
+       ret = mlx5dr_definer_get(dmn, format_id,
+                                dw_selectors, byte_selectors,
+                                match_mask, &definer_id);
+       if (ret)
+               return ret;
+
+       action->range->definer_id = definer_id;
+       return 0;
+}
+
+static void dr_action_destroy_range_definer(struct mlx5dr_action *action)
+{
+       mlx5dr_definer_put(action->range->dmn, action->range->definer_id);
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
+                                     u32 field,
+                                     struct mlx5_flow_table *hit_ft,
+                                     struct mlx5_flow_table *miss_ft,
+                                     u32 min,
+                                     u32 max)
+{
+       struct mlx5dr_action *action;
+       int ret;
+
+       if (!mlx5dr_supp_match_ranges(dmn->mdev)) {
+               mlx5dr_dbg(dmn, "SELECT definer support is needed for match range\n");
+               return NULL;
+       }
+
+       if (field != MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN ||
+           min > 0xffff || max > 0xffff) {
+               mlx5dr_err(dmn, "Invalid match range parameters\n");
+               return NULL;
+       }
+
+       action = dr_action_create_generic(DR_ACTION_TYP_RANGE);
+       if (!action)
+               return NULL;
+
+       action->range->hit_tbl_action =
+               mlx5dr_is_fw_table(hit_ft) ?
+                       mlx5dr_action_create_dest_flow_fw_table(dmn, hit_ft) :
+                       mlx5dr_action_create_dest_table(hit_ft->fs_dr_table.dr_table);
+
+       if (!action->range->hit_tbl_action)
+               goto free_action;
+
+       action->range->miss_tbl_action =
+               mlx5dr_is_fw_table(miss_ft) ?
+                       mlx5dr_action_create_dest_flow_fw_table(dmn, miss_ft) :
+                       mlx5dr_action_create_dest_table(miss_ft->fs_dr_table.dr_table);
+
+       if (!action->range->miss_tbl_action)
+               goto free_hit_tbl_action;
+
+       action->range->min = min;
+       action->range->max = max;
+       action->range->dmn = dmn;
+
+       ret = dr_action_create_range_definer(action);
+       if (ret)
+               goto free_miss_tbl_action;
+
+       /* No need to increase refcount on domain for this action,
+        * the hit/miss table actions will do it internally.
+        */
+
+       return action;
+
+free_miss_tbl_action:
+       mlx5dr_action_destroy(action->range->miss_tbl_action);
+free_hit_tbl_action:
+       mlx5dr_action_destroy(action->range->hit_tbl_action);
+free_action:
+       kfree(action);
+
+       return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+                                  struct mlx5dr_action_dest *dests,
+                                  u32 num_of_dests,
+                                  bool ignore_flow_level,
+                                  u32 flow_source)
+{
+       struct mlx5dr_cmd_flow_destination_hw_info *hw_dests;
+       struct mlx5dr_action **ref_actions;
+       struct mlx5dr_action *action;
+       bool reformat_req = false;
+       bool is_ft_wire = false;
+       u16 num_dst_ft = 0;
+       u32 num_of_ref = 0;
+       u32 ref_act_cnt;
+       u16 last_dest;
+       int ret;
+       int i;
+
+       if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+               mlx5dr_err(dmn, "Multiple destination support is for FDB only\n");
+               return NULL;
+       }
+
+       hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL);
+       if (!hw_dests)
+               return NULL;
+
+       if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt)))
+               goto free_hw_dests;
+
+       ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL);
+       if (!ref_actions)
+               goto free_hw_dests;
+
+       for (i = 0; i < num_of_dests; i++) {
+               struct mlx5dr_action *reformat_action = dests[i].reformat;
+               struct mlx5dr_action *dest_action = dests[i].dest;
+
+               ref_actions[num_of_ref++] = dest_action;
+
+               switch (dest_action->action_type) {
+               case DR_ACTION_TYP_VPORT:
+                       hw_dests[i].vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
+                       hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+                       hw_dests[i].vport.num = dest_action->vport->caps->num;
+                       hw_dests[i].vport.vhca_id = dest_action->vport->caps->vhca_gvmi;
+                       if (reformat_action) {
+                               reformat_req = true;
+                               hw_dests[i].vport.reformat_id =
+                                       reformat_action->reformat->id;
+                               ref_actions[num_of_ref++] = reformat_action;
+                               hw_dests[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+                       }
+                       break;
+
+               case DR_ACTION_TYP_FT:
+                       if (num_dst_ft &&
+                           !mlx5dr_action_supp_fwd_fdb_multi_ft(dmn->mdev)) {
+                               mlx5dr_dbg(dmn, "multiple FT destinations not supported\n");
+                               goto free_ref_actions;
+                       }
+                       num_dst_ft++;
+                       hw_dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+                       if (dest_action->dest_tbl->is_fw_tbl) {
+                               hw_dests[i].ft_id = dest_action->dest_tbl->fw_tbl.id;
+                       } else {
+                               hw_dests[i].ft_id = dest_action->dest_tbl->tbl->table_id;
+                               if (dest_action->dest_tbl->is_wire_ft) {
+                                       is_ft_wire = true;
+                                       last_dest = i;
+                               }
+                       }
+                       break;
+
+               default:
+                       mlx5dr_dbg(dmn, "Invalid multiple destinations action\n");
+                       goto free_ref_actions;
+               }
+       }
+
+       /* In multidest, the FW does the iterator in the RX except of the last
+        * one that done in the TX.
+        * So, if one of the ft target is wire, put it at the end of the dest list.
+        */
+       if (is_ft_wire && num_dst_ft > 1)
+               swap(hw_dests[last_dest], hw_dests[num_of_dests - 1]);
+
+       action = dr_action_create_generic(DR_ACTION_TYP_FT);
+       if (!action)
+               goto free_ref_actions;
+
+       ret = mlx5dr_fw_create_md_tbl(dmn,
+                                     hw_dests,
+                                     num_of_dests,
+                                     reformat_req,
+                                     &action->dest_tbl->fw_tbl.id,
+                                     &action->dest_tbl->fw_tbl.group_id,
+                                     ignore_flow_level,
+                                     flow_source);
+       if (ret)
+               goto free_action;
+
+       refcount_inc(&dmn->refcount);
+
+       for (i = 0; i < num_of_ref; i++)
+               refcount_inc(&ref_actions[i]->refcount);
+
+       action->dest_tbl->is_fw_tbl = true;
+       action->dest_tbl->fw_tbl.dmn = dmn;
+       action->dest_tbl->fw_tbl.type = FS_FT_FDB;
+       action->dest_tbl->fw_tbl.ref_actions = ref_actions;
+       action->dest_tbl->fw_tbl.num_of_ref_actions = num_of_ref;
+
+       kfree(hw_dests);
+
+       return action;
+
+free_action:
+       kfree(action);
+free_ref_actions:
+       kfree(ref_actions);
+free_hw_dests:
+       kfree(hw_dests);
+       return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *dmn,
+                                       struct mlx5_flow_table *ft)
+{
+       struct mlx5dr_action *action;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_FT);
+       if (!action)
+               return NULL;
+
+       action->dest_tbl->is_fw_tbl = 1;
+       action->dest_tbl->fw_tbl.type = ft->type;
+       action->dest_tbl->fw_tbl.id = ft->id;
+       action->dest_tbl->fw_tbl.dmn = dmn;
+
+       refcount_inc(&dmn->refcount);
+
+       return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id)
+{
+       struct mlx5dr_action *action;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_CTR);
+       if (!action)
+               return NULL;
+
+       action->ctr->ctr_id = counter_id;
+
+       return action;
+}
+
+struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value)
+{
+       struct mlx5dr_action *action;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_TAG);
+       if (!action)
+               return NULL;
+
+       action->flow_tag->flow_tag = tag_value & 0xffffff;
+
+       return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id)
+{
+       struct mlx5dr_action *action;
+       u64 icm_rx, icm_tx;
+       int ret;
+
+       ret = mlx5dr_cmd_query_flow_sampler(dmn->mdev, sampler_id,
+                                           &icm_rx, &icm_tx);
+       if (ret)
+               return NULL;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_SAMPLER);
+       if (!action)
+               return NULL;
+
+       action->sampler->dmn = dmn;
+       action->sampler->sampler_id = sampler_id;
+       action->sampler->rx_icm_addr = icm_rx;
+       action->sampler->tx_icm_addr = icm_tx;
+
+       refcount_inc(&dmn->refcount);
+       return action;
+}
+
+static int
+dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type,
+                                struct mlx5dr_domain *dmn,
+                                u8 reformat_param_0,
+                                u8 reformat_param_1,
+                                size_t data_sz,
+                                void *data)
+{
+       if (reformat_type == DR_ACTION_TYP_INSERT_HDR) {
+               if ((!data && data_sz) || (data && !data_sz) ||
+                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_size) < data_sz ||
+                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_offset) < reformat_param_1) {
+                       mlx5dr_dbg(dmn, "Invalid reformat parameters for INSERT_HDR\n");
+                       goto out_err;
+               }
+       } else if (reformat_type == DR_ACTION_TYP_REMOVE_HDR) {
+               if (data ||
+                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_size) < data_sz ||
+                   MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_offset) < reformat_param_1) {
+                       mlx5dr_dbg(dmn, "Invalid reformat parameters for REMOVE_HDR\n");
+                       goto out_err;
+               }
+       } else if (reformat_param_0 || reformat_param_1 ||
+                  reformat_type > DR_ACTION_TYP_REMOVE_HDR) {
+               mlx5dr_dbg(dmn, "Invalid reformat parameters\n");
+               goto out_err;
+       }
+
+       if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB)
+               return 0;
+
+       if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX) {
+               if (reformat_type != DR_ACTION_TYP_TNL_L2_TO_L2 &&
+                   reformat_type != DR_ACTION_TYP_TNL_L3_TO_L2) {
+                       mlx5dr_dbg(dmn, "Action reformat type not support on RX domain\n");
+                       goto out_err;
+               }
+       } else if (dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX) {
+               if (reformat_type != DR_ACTION_TYP_L2_TO_TNL_L2 &&
+                   reformat_type != DR_ACTION_TYP_L2_TO_TNL_L3) {
+                       mlx5dr_dbg(dmn, "Action reformat type not support on TX domain\n");
+                       goto out_err;
+               }
+       }
+
+       return 0;
+
+out_err:
+       return -EINVAL;
+}
+
+static int
+dr_action_create_reformat_action(struct mlx5dr_domain *dmn,
+                                u8 reformat_param_0, u8 reformat_param_1,
+                                size_t data_sz, void *data,
+                                struct mlx5dr_action *action)
+{
+       u32 reformat_id;
+       int ret;
+
+       switch (action->action_type) {
+       case DR_ACTION_TYP_L2_TO_TNL_L2:
+       case DR_ACTION_TYP_L2_TO_TNL_L3:
+       {
+               enum mlx5_reformat_ctx_type rt;
+
+               if (action->action_type == DR_ACTION_TYP_L2_TO_TNL_L2)
+                       rt = MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL;
+               else
+                       rt = MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL;
+
+               ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, rt, 0, 0,
+                                                    data_sz, data,
+                                                    &reformat_id);
+               if (ret)
+                       return ret;
+
+               action->reformat->id = reformat_id;
+               action->reformat->size = data_sz;
+               return 0;
+       }
+       case DR_ACTION_TYP_TNL_L2_TO_L2:
+       {
+               return 0;
+       }
+       case DR_ACTION_TYP_TNL_L3_TO_L2:
+       {
+               u8 *hw_actions;
+
+               hw_actions = kzalloc(DR_ACTION_CACHE_LINE_SIZE, GFP_KERNEL);
+               if (!hw_actions)
+                       return -ENOMEM;
+
+               ret = mlx5dr_ste_set_action_decap_l3_list(dmn->ste_ctx,
+                                                         data, data_sz,
+                                                         hw_actions,
+                                                         DR_ACTION_CACHE_LINE_SIZE,
+                                                         &action->rewrite->num_of_actions);
+               if (ret) {
+                       mlx5dr_dbg(dmn, "Failed creating decap l3 action list\n");
+                       kfree(hw_actions);
+                       return ret;
+               }
+
+               action->rewrite->data = hw_actions;
+               action->rewrite->dmn = dmn;
+
+               ret = mlx5dr_ste_alloc_modify_hdr(action);
+               if (ret) {
+                       mlx5dr_dbg(dmn, "Failed preparing reformat data\n");
+                       kfree(hw_actions);
+                       return ret;
+               }
+               return 0;
+       }
+       case DR_ACTION_TYP_INSERT_HDR:
+               ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev,
+                                                    MLX5_REFORMAT_TYPE_INSERT_HDR,
+                                                    reformat_param_0,
+                                                    reformat_param_1,
+                                                    data_sz, data,
+                                                    &reformat_id);
+               if (ret)
+                       return ret;
+
+               action->reformat->id = reformat_id;
+               action->reformat->size = data_sz;
+               action->reformat->param_0 = reformat_param_0;
+               action->reformat->param_1 = reformat_param_1;
+               return 0;
+       case DR_ACTION_TYP_REMOVE_HDR:
+               action->reformat->id = 0;
+               action->reformat->size = data_sz;
+               action->reformat->param_0 = reformat_param_0;
+               action->reformat->param_1 = reformat_param_1;
+               return 0;
+       default:
+               mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type);
+               return -EINVAL;
+       }
+}
+
+#define CVLAN_ETHERTYPE 0x8100
+#define SVLAN_ETHERTYPE 0x88a8
+
+struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void)
+{
+       return dr_action_create_generic(DR_ACTION_TYP_POP_VLAN);
+}
+
+struct mlx5dr_action *mlx5dr_action_create_push_vlan(struct mlx5dr_domain *dmn,
+                                                    __be32 vlan_hdr)
+{
+       u32 vlan_hdr_h = ntohl(vlan_hdr);
+       u16 ethertype = vlan_hdr_h >> 16;
+       struct mlx5dr_action *action;
+
+       if (ethertype != SVLAN_ETHERTYPE && ethertype != CVLAN_ETHERTYPE) {
+               mlx5dr_dbg(dmn, "Invalid vlan ethertype\n");
+               return NULL;
+       }
+
+       action = dr_action_create_generic(DR_ACTION_TYP_PUSH_VLAN);
+       if (!action)
+               return NULL;
+
+       action->push_vlan->vlan_hdr = vlan_hdr_h;
+       return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+                                    enum mlx5dr_action_reformat_type reformat_type,
+                                    u8 reformat_param_0,
+                                    u8 reformat_param_1,
+                                    size_t data_sz,
+                                    void *data)
+{
+       enum mlx5dr_action_type action_type;
+       struct mlx5dr_action *action;
+       int ret;
+
+       refcount_inc(&dmn->refcount);
+
+       /* General checks */
+       ret = dr_action_reformat_to_action_type(reformat_type, &action_type);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Invalid reformat_type provided\n");
+               goto dec_ref;
+       }
+
+       ret = dr_action_verify_reformat_params(action_type, dmn,
+                                              reformat_param_0, reformat_param_1,
+                                              data_sz, data);
+       if (ret)
+               goto dec_ref;
+
+       action = dr_action_create_generic(action_type);
+       if (!action)
+               goto dec_ref;
+
+       action->reformat->dmn = dmn;
+
+       ret = dr_action_create_reformat_action(dmn,
+                                              reformat_param_0,
+                                              reformat_param_1,
+                                              data_sz,
+                                              data,
+                                              action);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Failed creating reformat action %d\n", ret);
+               goto free_action;
+       }
+
+       return action;
+
+free_action:
+       kfree(action);
+dec_ref:
+       refcount_dec(&dmn->refcount);
+       return NULL;
+}
+
+static int
+dr_action_modify_sw_to_hw_add(struct mlx5dr_domain *dmn,
+                             __be64 *sw_action,
+                             __be64 *hw_action,
+                             const struct mlx5dr_ste_action_modify_field **ret_hw_info)
+{
+       const struct mlx5dr_ste_action_modify_field *hw_action_info;
+       u8 max_length;
+       u16 sw_field;
+       u32 data;
+
+       /* Get SW modify action data */
+       sw_field = MLX5_GET(set_action_in, sw_action, field);
+       data = MLX5_GET(set_action_in, sw_action, data);
+
+       /* Convert SW data to HW modify action format */
+       hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
+       if (!hw_action_info) {
+               mlx5dr_dbg(dmn, "Modify add action invalid field given\n");
+               return -EINVAL;
+       }
+
+       max_length = hw_action_info->end - hw_action_info->start + 1;
+
+       mlx5dr_ste_set_action_add(dmn->ste_ctx,
+                                 hw_action,
+                                 hw_action_info->hw_field,
+                                 hw_action_info->start,
+                                 max_length,
+                                 data);
+
+       *ret_hw_info = hw_action_info;
+
+       return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw_set(struct mlx5dr_domain *dmn,
+                             __be64 *sw_action,
+                             __be64 *hw_action,
+                             const struct mlx5dr_ste_action_modify_field **ret_hw_info)
+{
+       const struct mlx5dr_ste_action_modify_field *hw_action_info;
+       u8 offset, length, max_length;
+       u16 sw_field;
+       u32 data;
+
+       /* Get SW modify action data */
+       length = MLX5_GET(set_action_in, sw_action, length);
+       offset = MLX5_GET(set_action_in, sw_action, offset);
+       sw_field = MLX5_GET(set_action_in, sw_action, field);
+       data = MLX5_GET(set_action_in, sw_action, data);
+
+       /* Convert SW data to HW modify action format */
+       hw_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, sw_field);
+       if (!hw_action_info) {
+               mlx5dr_dbg(dmn, "Modify set action invalid field given\n");
+               return -EINVAL;
+       }
+
+       /* PRM defines that length zero specific length of 32bits */
+       length = length ? length : 32;
+
+       max_length = hw_action_info->end - hw_action_info->start + 1;
+
+       if (length + offset > max_length) {
+               mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+               return -EINVAL;
+       }
+
+       mlx5dr_ste_set_action_set(dmn->ste_ctx,
+                                 hw_action,
+                                 hw_action_info->hw_field,
+                                 hw_action_info->start + offset,
+                                 length,
+                                 data);
+
+       *ret_hw_info = hw_action_info;
+
+       return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw_copy(struct mlx5dr_domain *dmn,
+                              __be64 *sw_action,
+                              __be64 *hw_action,
+                              const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+                              const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
+{
+       u8 src_offset, dst_offset, src_max_length, dst_max_length, length;
+       const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+       const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
+       u16 src_field, dst_field;
+
+       /* Get SW modify action data */
+       src_field = MLX5_GET(copy_action_in, sw_action, src_field);
+       dst_field = MLX5_GET(copy_action_in, sw_action, dst_field);
+       src_offset = MLX5_GET(copy_action_in, sw_action, src_offset);
+       dst_offset = MLX5_GET(copy_action_in, sw_action, dst_offset);
+       length = MLX5_GET(copy_action_in, sw_action, length);
+
+       /* Convert SW data to HW modify action format */
+       hw_src_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, src_field);
+       hw_dst_action_info = mlx5dr_ste_conv_modify_hdr_sw_field(dmn->ste_ctx, dst_field);
+       if (!hw_src_action_info || !hw_dst_action_info) {
+               mlx5dr_dbg(dmn, "Modify copy action invalid field given\n");
+               return -EINVAL;
+       }
+
+       /* PRM defines that length zero specific length of 32bits */
+       length = length ? length : 32;
+
+       src_max_length = hw_src_action_info->end -
+                        hw_src_action_info->start + 1;
+       dst_max_length = hw_dst_action_info->end -
+                        hw_dst_action_info->start + 1;
+
+       if (length + src_offset > src_max_length ||
+           length + dst_offset > dst_max_length) {
+               mlx5dr_dbg(dmn, "Modify action length + offset exceeds limit\n");
+               return -EINVAL;
+       }
+
+       mlx5dr_ste_set_action_copy(dmn->ste_ctx,
+                                  hw_action,
+                                  hw_dst_action_info->hw_field,
+                                  hw_dst_action_info->start + dst_offset,
+                                  length,
+                                  hw_src_action_info->hw_field,
+                                  hw_src_action_info->start + src_offset);
+
+       *ret_dst_hw_info = hw_dst_action_info;
+       *ret_src_hw_info = hw_src_action_info;
+
+       return 0;
+}
+
+static int
+dr_action_modify_sw_to_hw(struct mlx5dr_domain *dmn,
+                         __be64 *sw_action,
+                         __be64 *hw_action,
+                         const struct mlx5dr_ste_action_modify_field **ret_dst_hw_info,
+                         const struct mlx5dr_ste_action_modify_field **ret_src_hw_info)
+{
+       u8 action;
+       int ret;
+
+       *hw_action = 0;
+       *ret_src_hw_info = NULL;
+
+       /* Get SW modify action type */
+       action = MLX5_GET(set_action_in, sw_action, action_type);
+
+       switch (action) {
+       case MLX5_ACTION_TYPE_SET:
+               ret = dr_action_modify_sw_to_hw_set(dmn, sw_action,
+                                                   hw_action,
+                                                   ret_dst_hw_info);
+               break;
+
+       case MLX5_ACTION_TYPE_ADD:
+               ret = dr_action_modify_sw_to_hw_add(dmn, sw_action,
+                                                   hw_action,
+                                                   ret_dst_hw_info);
+               break;
+
+       case MLX5_ACTION_TYPE_COPY:
+               ret = dr_action_modify_sw_to_hw_copy(dmn, sw_action,
+                                                    hw_action,
+                                                    ret_dst_hw_info,
+                                                    ret_src_hw_info);
+               break;
+
+       default:
+               mlx5dr_info(dmn, "Unsupported action_type for modify action\n");
+               ret = -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
+static int
+dr_action_modify_check_set_field_limitation(struct mlx5dr_action *action,
+                                           const __be64 *sw_action)
+{
+       u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+
+       if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+               action->rewrite->allow_rx = 0;
+               if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+                       mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+                                  sw_field);
+                       return -EINVAL;
+               }
+       } else if (sw_field == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+               action->rewrite->allow_tx = 0;
+               if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+                       mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+                                  sw_field);
+                       return -EINVAL;
+               }
+       }
+
+       if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
+               mlx5dr_dbg(dmn, "Modify SET actions not supported on both RX and TX\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+dr_action_modify_check_add_field_limitation(struct mlx5dr_action *action,
+                                           const __be64 *sw_action)
+{
+       u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+
+       if (sw_field != MLX5_ACTION_IN_FIELD_OUT_IP_TTL &&
+           sw_field != MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT &&
+           sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM &&
+           sw_field != MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM) {
+               mlx5dr_dbg(dmn, "Unsupported field %d for add action\n",
+                          sw_field);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+dr_action_modify_check_copy_field_limitation(struct mlx5dr_action *action,
+                                            const __be64 *sw_action)
+{
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+       u16 sw_fields[2];
+       int i;
+
+       sw_fields[0] = MLX5_GET(copy_action_in, sw_action, src_field);
+       sw_fields[1] = MLX5_GET(copy_action_in, sw_action, dst_field);
+
+       for (i = 0; i < 2; i++) {
+               if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_A) {
+                       action->rewrite->allow_rx = 0;
+                       if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_TX) {
+                               mlx5dr_dbg(dmn, "Unsupported field %d for RX/FDB set action\n",
+                                          sw_fields[i]);
+                               return -EINVAL;
+                       }
+               } else if (sw_fields[i] == MLX5_ACTION_IN_FIELD_METADATA_REG_B) {
+                       action->rewrite->allow_tx = 0;
+                       if (dmn->type != MLX5DR_DOMAIN_TYPE_NIC_RX) {
+                               mlx5dr_dbg(dmn, "Unsupported field %d for TX/FDB set action\n",
+                                          sw_fields[i]);
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       if (!action->rewrite->allow_rx && !action->rewrite->allow_tx) {
+               mlx5dr_dbg(dmn, "Modify copy actions not supported on both RX and TX\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int
+dr_action_modify_check_field_limitation(struct mlx5dr_action *action,
+                                       const __be64 *sw_action)
+{
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+       u8 action_type;
+       int ret;
+
+       action_type = MLX5_GET(set_action_in, sw_action, action_type);
+
+       switch (action_type) {
+       case MLX5_ACTION_TYPE_SET:
+               ret = dr_action_modify_check_set_field_limitation(action,
+                                                                 sw_action);
+               break;
+
+       case MLX5_ACTION_TYPE_ADD:
+               ret = dr_action_modify_check_add_field_limitation(action,
+                                                                 sw_action);
+               break;
+
+       case MLX5_ACTION_TYPE_COPY:
+               ret = dr_action_modify_check_copy_field_limitation(action,
+                                                                  sw_action);
+               break;
+
+       default:
+               mlx5dr_info(dmn, "Unsupported action %d modify action\n",
+                           action_type);
+               ret = -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
+static bool
+dr_action_modify_check_is_ttl_modify(const void *sw_action)
+{
+       u16 sw_field = MLX5_GET(set_action_in, sw_action, field);
+
+       return sw_field == MLX5_ACTION_IN_FIELD_OUT_IP_TTL;
+}
+
+static int dr_actions_convert_modify_header(struct mlx5dr_action *action,
+                                           u32 max_hw_actions,
+                                           u32 num_sw_actions,
+                                           __be64 sw_actions[],
+                                           __be64 hw_actions[],
+                                           u32 *num_hw_actions,
+                                           bool *modify_ttl)
+{
+       const struct mlx5dr_ste_action_modify_field *hw_dst_action_info;
+       const struct mlx5dr_ste_action_modify_field *hw_src_action_info;
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+       __be64 *modify_ttl_sw_action = NULL;
+       int ret, i, hw_idx = 0;
+       __be64 *sw_action;
+       __be64 hw_action;
+       u16 hw_field = 0;
+       u32 l3_type = 0;
+       u32 l4_type = 0;
+
+       *modify_ttl = false;
+
+       action->rewrite->allow_rx = 1;
+       action->rewrite->allow_tx = 1;
+
+       for (i = 0; i < num_sw_actions || modify_ttl_sw_action; i++) {
+               /* modify TTL is handled separately, as a last action */
+               if (i == num_sw_actions) {
+                       sw_action = modify_ttl_sw_action;
+                       modify_ttl_sw_action = NULL;
+               } else {
+                       sw_action = &sw_actions[i];
+               }
+
+               ret = dr_action_modify_check_field_limitation(action,
+                                                             sw_action);
+               if (ret)
+                       return ret;
+
+               if (!(*modify_ttl) &&
+                   dr_action_modify_check_is_ttl_modify(sw_action)) {
+                       modify_ttl_sw_action = sw_action;
+                       *modify_ttl = true;
+                       continue;
+               }
+
+               /* Convert SW action to HW action */
+               ret = dr_action_modify_sw_to_hw(dmn,
+                                               sw_action,
+                                               &hw_action,
+                                               &hw_dst_action_info,
+                                               &hw_src_action_info);
+               if (ret)
+                       return ret;
+
+               /* Due to a HW limitation we cannot modify 2 different L3 types */
+               if (l3_type && hw_dst_action_info->l3_type &&
+                   hw_dst_action_info->l3_type != l3_type) {
+                       mlx5dr_dbg(dmn, "Action list can't support two different L3 types\n");
+                       return -EINVAL;
+               }
+               if (hw_dst_action_info->l3_type)
+                       l3_type = hw_dst_action_info->l3_type;
+
+               /* Due to a HW limitation we cannot modify two different L4 types */
+               if (l4_type && hw_dst_action_info->l4_type &&
+                   hw_dst_action_info->l4_type != l4_type) {
+                       mlx5dr_dbg(dmn, "Action list can't support two different L4 types\n");
+                       return -EINVAL;
+               }
+               if (hw_dst_action_info->l4_type)
+                       l4_type = hw_dst_action_info->l4_type;
+
+               /* HW reads and executes two actions at once this means we
+                * need to create a gap if two actions access the same field
+                */
+               if ((hw_idx % 2) && (hw_field == hw_dst_action_info->hw_field ||
+                                    (hw_src_action_info &&
+                                     hw_field == hw_src_action_info->hw_field))) {
+                       /* Check if after gap insertion the total number of HW
+                        * modify actions doesn't exceeds the limit
+                        */
+                       hw_idx++;
+                       if (hw_idx >= max_hw_actions) {
+                               mlx5dr_dbg(dmn, "Modify header action number exceeds HW limit\n");
+                               return -EINVAL;
+                       }
+               }
+               hw_field = hw_dst_action_info->hw_field;
+
+               hw_actions[hw_idx] = hw_action;
+               hw_idx++;
+       }
+
+       /* if the resulting HW actions list is empty, add NOP action */
+       if (!hw_idx)
+               hw_idx++;
+
+       *num_hw_actions = hw_idx;
+
+       return 0;
+}
+
+static int dr_action_create_modify_action(struct mlx5dr_domain *dmn,
+                                         size_t actions_sz,
+                                         __be64 actions[],
+                                         struct mlx5dr_action *action)
+{
+       u32 max_hw_actions;
+       u32 num_hw_actions;
+       u32 num_sw_actions;
+       __be64 *hw_actions;
+       bool modify_ttl;
+       int ret;
+
+       num_sw_actions = actions_sz / DR_MODIFY_ACTION_SIZE;
+       max_hw_actions = mlx5dr_icm_pool_chunk_size_to_entries(DR_CHUNK_SIZE_16);
+
+       if (num_sw_actions > max_hw_actions) {
+               mlx5dr_dbg(dmn, "Max number of actions %d exceeds limit %d\n",
+                          num_sw_actions, max_hw_actions);
+               return -EINVAL;
+       }
+
+       hw_actions = kcalloc(1, max_hw_actions * DR_MODIFY_ACTION_SIZE, GFP_KERNEL);
+       if (!hw_actions)
+               return -ENOMEM;
+
+       ret = dr_actions_convert_modify_header(action,
+                                              max_hw_actions,
+                                              num_sw_actions,
+                                              actions,
+                                              hw_actions,
+                                              &num_hw_actions,
+                                              &modify_ttl);
+       if (ret)
+               goto free_hw_actions;
+
+       action->rewrite->modify_ttl = modify_ttl;
+       action->rewrite->data = (u8 *)hw_actions;
+       action->rewrite->num_of_actions = num_hw_actions;
+
+       if (num_hw_actions == 1 &&
+           dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) {
+               action->rewrite->single_action_opt = true;
+       } else {
+               action->rewrite->single_action_opt = false;
+               ret = mlx5dr_ste_alloc_modify_hdr(action);
+               if (ret)
+                       goto free_hw_actions;
+       }
+
+       return 0;
+
+free_hw_actions:
+       kfree(hw_actions);
+       return ret;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *dmn,
+                                  u32 flags,
+                                  size_t actions_sz,
+                                  __be64 actions[])
+{
+       struct mlx5dr_action *action;
+       int ret = 0;
+
+       refcount_inc(&dmn->refcount);
+
+       if (actions_sz % DR_MODIFY_ACTION_SIZE) {
+               mlx5dr_dbg(dmn, "Invalid modify actions size provided\n");
+               goto dec_ref;
+       }
+
+       action = dr_action_create_generic(DR_ACTION_TYP_MODIFY_HDR);
+       if (!action)
+               goto dec_ref;
+
+       action->rewrite->dmn = dmn;
+
+       ret = dr_action_create_modify_action(dmn,
+                                            actions_sz,
+                                            actions,
+                                            action);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Failed creating modify header action %d\n", ret);
+               goto free_action;
+       }
+
+       return action;
+
+free_action:
+       kfree(action);
+dec_ref:
+       refcount_dec(&dmn->refcount);
+       return NULL;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
+                               u16 vport, u8 vhca_id_valid,
+                               u16 vhca_id)
+{
+       struct mlx5dr_cmd_vport_cap *vport_cap;
+       struct mlx5dr_domain *vport_dmn;
+       struct mlx5dr_action *action;
+       u8 peer_vport;
+
+       peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
+               (vhca_id != dmn->info.caps.gvmi);
+       vport_dmn = peer_vport ? xa_load(&dmn->peer_dmn_xa, vhca_id) : dmn;
+       if (!vport_dmn) {
+               mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
+               return NULL;
+       }
+
+       if (vport_dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+               mlx5dr_dbg(dmn, "Domain doesn't support vport actions\n");
+               return NULL;
+       }
+
+       vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
+       if (!vport_cap) {
+               mlx5dr_err(dmn,
+                          "Failed to get vport 0x%x caps - vport is disabled or invalid\n",
+                          vport);
+               return NULL;
+       }
+
+       action = dr_action_create_generic(DR_ACTION_TYP_VPORT);
+       if (!action)
+               return NULL;
+
+       action->vport->dmn = vport_dmn;
+       action->vport->caps = vport_cap;
+
+       return action;
+}
+
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id,
+                        u8 dest_reg_id, u8 aso_type,
+                        u8 init_color, u8 meter_id)
+{
+       struct mlx5dr_action *action;
+
+       if (aso_type != MLX5_EXE_ASO_FLOW_METER)
+               return NULL;
+
+       if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED)
+               return NULL;
+
+       action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER);
+       if (!action)
+               return NULL;
+
+       action->aso->obj_id = obj_id;
+       action->aso->offset = meter_id;
+       action->aso->dest_reg_id = dest_reg_id;
+       action->aso->init_color = init_color;
+       action->aso->dmn = dmn;
+
+       refcount_inc(&dmn->refcount);
+
+       return action;
+}
+
+u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action)
+{
+       return action->reformat->id;
+}
+
+int mlx5dr_action_destroy(struct mlx5dr_action *action)
+{
+       if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1))
+               return -EBUSY;
+
+       switch (action->action_type) {
+       case DR_ACTION_TYP_FT:
+               if (action->dest_tbl->is_fw_tbl)
+                       refcount_dec(&action->dest_tbl->fw_tbl.dmn->refcount);
+               else
+                       refcount_dec(&action->dest_tbl->tbl->refcount);
+
+               if (action->dest_tbl->is_fw_tbl &&
+                   action->dest_tbl->fw_tbl.num_of_ref_actions) {
+                       struct mlx5dr_action **ref_actions;
+                       int i;
+
+                       ref_actions = action->dest_tbl->fw_tbl.ref_actions;
+                       for (i = 0; i < action->dest_tbl->fw_tbl.num_of_ref_actions; i++)
+                               refcount_dec(&ref_actions[i]->refcount);
+
+                       kfree(ref_actions);
+
+                       mlx5dr_fw_destroy_md_tbl(action->dest_tbl->fw_tbl.dmn,
+                                                action->dest_tbl->fw_tbl.id,
+                                                action->dest_tbl->fw_tbl.group_id);
+               }
+               break;
+       case DR_ACTION_TYP_TNL_L2_TO_L2:
+       case DR_ACTION_TYP_REMOVE_HDR:
+               refcount_dec(&action->reformat->dmn->refcount);
+               break;
+       case DR_ACTION_TYP_TNL_L3_TO_L2:
+               mlx5dr_ste_free_modify_hdr(action);
+               kfree(action->rewrite->data);
+               refcount_dec(&action->rewrite->dmn->refcount);
+               break;
+       case DR_ACTION_TYP_L2_TO_TNL_L2:
+       case DR_ACTION_TYP_L2_TO_TNL_L3:
+       case DR_ACTION_TYP_INSERT_HDR:
+               mlx5dr_cmd_destroy_reformat_ctx((action->reformat->dmn)->mdev,
+                                               action->reformat->id);
+               refcount_dec(&action->reformat->dmn->refcount);
+               break;
+       case DR_ACTION_TYP_MODIFY_HDR:
+               if (!action->rewrite->single_action_opt)
+                       mlx5dr_ste_free_modify_hdr(action);
+               kfree(action->rewrite->data);
+               refcount_dec(&action->rewrite->dmn->refcount);
+               break;
+       case DR_ACTION_TYP_SAMPLER:
+               refcount_dec(&action->sampler->dmn->refcount);
+               break;
+       case DR_ACTION_TYP_ASO_FLOW_METER:
+               refcount_dec(&action->aso->dmn->refcount);
+               break;
+       case DR_ACTION_TYP_RANGE:
+               dr_action_destroy_range_definer(action);
+               mlx5dr_action_destroy(action->range->miss_tbl_action);
+               mlx5dr_action_destroy(action->range->hit_tbl_action);
+               break;
+       default:
+               break;
+       }
+
+       kfree(action);
+       return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_arg.c
new file mode 100644 (file)
index 0000000..01ed644
--- /dev/null
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+
+#define DR_ICM_MODIFY_HDR_GRANULARITY_4K 12
+
+/* modify-header arg pool */
+enum dr_arg_chunk_size {
+       DR_ARG_CHUNK_SIZE_1,
+       DR_ARG_CHUNK_SIZE_MIN = DR_ARG_CHUNK_SIZE_1, /* keep updated when changing */
+       DR_ARG_CHUNK_SIZE_2,
+       DR_ARG_CHUNK_SIZE_3,
+       DR_ARG_CHUNK_SIZE_4,
+       DR_ARG_CHUNK_SIZE_MAX,
+};
+
+/* argument pool area */
+struct dr_arg_pool {
+       enum dr_arg_chunk_size log_chunk_size;
+       struct mlx5dr_domain *dmn;
+       struct list_head free_list;
+       struct mutex mutex; /* protect arg pool */
+};
+
+struct mlx5dr_arg_mgr {
+       struct mlx5dr_domain *dmn;
+       struct dr_arg_pool *pools[DR_ARG_CHUNK_SIZE_MAX];
+};
+
+static int dr_arg_pool_alloc_objs(struct dr_arg_pool *pool)
+{
+       struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
+       struct list_head cur_list;
+       u16 object_range;
+       int num_of_objects;
+       u32 obj_id = 0;
+       int i, ret;
+
+       INIT_LIST_HEAD(&cur_list);
+
+       object_range =
+               pool->dmn->info.caps.log_header_modify_argument_granularity;
+
+       object_range =
+               max_t(u32, pool->dmn->info.caps.log_header_modify_argument_granularity,
+                     DR_ICM_MODIFY_HDR_GRANULARITY_4K);
+       object_range =
+               min_t(u32, pool->dmn->info.caps.log_header_modify_argument_max_alloc,
+                     object_range);
+
+       if (pool->log_chunk_size > object_range) {
+               mlx5dr_err(pool->dmn, "Required chunk size (%d) is not supported\n",
+                          pool->log_chunk_size);
+               return -ENOMEM;
+       }
+
+       num_of_objects = (1 << (object_range - pool->log_chunk_size));
+       /* Only one devx object per range */
+       ret = mlx5dr_cmd_create_modify_header_arg(pool->dmn->mdev,
+                                                 object_range,
+                                                 pool->dmn->pdn,
+                                                 &obj_id);
+       if (ret) {
+               mlx5dr_err(pool->dmn, "failed allocating object with range: %d:\n",
+                          object_range);
+               return -EAGAIN;
+       }
+
+       for (i = 0; i < num_of_objects; i++) {
+               arg_obj = kzalloc(sizeof(*arg_obj), GFP_KERNEL);
+               if (!arg_obj) {
+                       ret = -ENOMEM;
+                       goto clean_arg_obj;
+               }
+
+               arg_obj->log_chunk_size = pool->log_chunk_size;
+
+               list_add_tail(&arg_obj->list_node, &cur_list);
+
+               arg_obj->obj_id = obj_id;
+               arg_obj->obj_offset = i * (1 << pool->log_chunk_size);
+       }
+       list_splice_tail_init(&cur_list, &pool->free_list);
+
+       return 0;
+
+clean_arg_obj:
+       mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, obj_id);
+       list_for_each_entry_safe(arg_obj, tmp_arg, &cur_list, list_node) {
+               list_del(&arg_obj->list_node);
+               kfree(arg_obj);
+       }
+       return ret;
+}
+
+static struct mlx5dr_arg_obj *dr_arg_pool_get_arg_obj(struct dr_arg_pool *pool)
+{
+       struct mlx5dr_arg_obj *arg_obj = NULL;
+       int ret;
+
+       mutex_lock(&pool->mutex);
+       if (list_empty(&pool->free_list)) {
+               ret = dr_arg_pool_alloc_objs(pool);
+               if (ret)
+                       goto out;
+       }
+
+       arg_obj = list_first_entry_or_null(&pool->free_list,
+                                          struct mlx5dr_arg_obj,
+                                          list_node);
+       WARN(!arg_obj, "couldn't get dr arg obj from pool");
+
+       if (arg_obj)
+               list_del_init(&arg_obj->list_node);
+
+out:
+       mutex_unlock(&pool->mutex);
+       return arg_obj;
+}
+
+static void dr_arg_pool_put_arg_obj(struct dr_arg_pool *pool,
+                                   struct mlx5dr_arg_obj *arg_obj)
+{
+       mutex_lock(&pool->mutex);
+       list_add(&arg_obj->list_node, &pool->free_list);
+       mutex_unlock(&pool->mutex);
+}
+
+static struct dr_arg_pool *dr_arg_pool_create(struct mlx5dr_domain *dmn,
+                                             enum dr_arg_chunk_size chunk_size)
+{
+       struct dr_arg_pool *pool;
+
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return NULL;
+
+       pool->dmn = dmn;
+
+       INIT_LIST_HEAD(&pool->free_list);
+       mutex_init(&pool->mutex);
+
+       pool->log_chunk_size = chunk_size;
+       if (dr_arg_pool_alloc_objs(pool))
+               goto free_pool;
+
+       return pool;
+
+free_pool:
+       kfree(pool);
+
+       return NULL;
+}
+
+static void dr_arg_pool_destroy(struct dr_arg_pool *pool)
+{
+       struct mlx5dr_arg_obj *arg_obj, *tmp_arg;
+
+       list_for_each_entry_safe(arg_obj, tmp_arg, &pool->free_list, list_node) {
+               list_del(&arg_obj->list_node);
+               if (!arg_obj->obj_offset) /* the first in range */
+                       mlx5dr_cmd_destroy_modify_header_arg(pool->dmn->mdev, arg_obj->obj_id);
+               kfree(arg_obj);
+       }
+
+       mutex_destroy(&pool->mutex);
+       kfree(pool);
+}
+
+static enum dr_arg_chunk_size dr_arg_get_chunk_size(u16 num_of_actions)
+{
+       if (num_of_actions <= 8)
+               return DR_ARG_CHUNK_SIZE_1;
+       if (num_of_actions <= 16)
+               return DR_ARG_CHUNK_SIZE_2;
+       if (num_of_actions <= 32)
+               return DR_ARG_CHUNK_SIZE_3;
+       if (num_of_actions <= 64)
+               return DR_ARG_CHUNK_SIZE_4;
+
+       return DR_ARG_CHUNK_SIZE_MAX;
+}
+
+u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj)
+{
+       return (arg_obj->obj_id + arg_obj->obj_offset);
+}
+
+struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
+                                         u16 num_of_actions,
+                                         u8 *data)
+{
+       u32 size = dr_arg_get_chunk_size(num_of_actions);
+       struct mlx5dr_arg_obj *arg_obj;
+       int ret;
+
+       if (size >= DR_ARG_CHUNK_SIZE_MAX)
+               return NULL;
+
+       arg_obj = dr_arg_pool_get_arg_obj(mgr->pools[size]);
+       if (!arg_obj) {
+               mlx5dr_err(mgr->dmn, "Failed allocating args object for modify header\n");
+               return NULL;
+       }
+
+       /* write it into the hw */
+       ret = mlx5dr_send_postsend_args(mgr->dmn,
+                                       mlx5dr_arg_get_obj_id(arg_obj),
+                                       num_of_actions, data);
+       if (ret) {
+               mlx5dr_err(mgr->dmn, "Failed writing args object\n");
+               goto put_obj;
+       }
+
+       return arg_obj;
+
+put_obj:
+       mlx5dr_arg_put_obj(mgr, arg_obj);
+       return NULL;
+}
+
+void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
+                       struct mlx5dr_arg_obj *arg_obj)
+{
+       dr_arg_pool_put_arg_obj(mgr->pools[arg_obj->log_chunk_size], arg_obj);
+}
+
+struct mlx5dr_arg_mgr*
+mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_arg_mgr *pool_mgr;
+       int i;
+
+       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+               return NULL;
+
+       pool_mgr = kzalloc(sizeof(*pool_mgr), GFP_KERNEL);
+       if (!pool_mgr)
+               return NULL;
+
+       pool_mgr->dmn = dmn;
+
+       for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++) {
+               pool_mgr->pools[i] = dr_arg_pool_create(dmn, i);
+               if (!pool_mgr->pools[i])
+                       goto clean_pools;
+       }
+
+       return pool_mgr;
+
+clean_pools:
+       for (i--; i >= 0; i--)
+               dr_arg_pool_destroy(pool_mgr->pools[i]);
+
+       kfree(pool_mgr);
+       return NULL;
+}
+
+void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr)
+{
+       struct dr_arg_pool **pools;
+       int i;
+
+       if (!mgr)
+               return;
+
+       pools = mgr->pools;
+       for (i = 0; i < DR_ARG_CHUNK_SIZE_MAX; i++)
+               dr_arg_pool_destroy(pools[i]);
+
+       kfree(mgr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_buddy.c
new file mode 100644 (file)
index 0000000..fe228d9
--- /dev/null
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2004 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005 - 2008 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2006 - 2007 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include "dr_types.h"
+
+int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+                     unsigned int max_order)
+{
+       int i;
+
+       buddy->max_order = max_order;
+
+       INIT_LIST_HEAD(&buddy->list_node);
+
+       buddy->bitmap = kcalloc(buddy->max_order + 1,
+                               sizeof(*buddy->bitmap),
+                               GFP_KERNEL);
+       buddy->num_free = kcalloc(buddy->max_order + 1,
+                                 sizeof(*buddy->num_free),
+                                 GFP_KERNEL);
+
+       if (!buddy->bitmap || !buddy->num_free)
+               goto err_free_all;
+
+       /* Allocating max_order bitmaps, one for each order */
+
+       for (i = 0; i <= buddy->max_order; ++i) {
+               unsigned int size = 1 << (buddy->max_order - i);
+
+               buddy->bitmap[i] = bitmap_zalloc(size, GFP_KERNEL);
+               if (!buddy->bitmap[i])
+                       goto err_out_free_each_bit_per_order;
+       }
+
+       /* In the beginning, we have only one order that is available for
+        * use (the biggest one), so mark the first bit in both bitmaps.
+        */
+
+       bitmap_set(buddy->bitmap[buddy->max_order], 0, 1);
+
+       buddy->num_free[buddy->max_order] = 1;
+
+       return 0;
+
+err_out_free_each_bit_per_order:
+       for (i = 0; i <= buddy->max_order; ++i)
+               bitmap_free(buddy->bitmap[i]);
+
+err_free_all:
+       kfree(buddy->num_free);
+       kfree(buddy->bitmap);
+       return -ENOMEM;
+}
+
+void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       int i;
+
+       list_del(&buddy->list_node);
+
+       for (i = 0; i <= buddy->max_order; ++i)
+               bitmap_free(buddy->bitmap[i]);
+
+       kfree(buddy->num_free);
+       kfree(buddy->bitmap);
+}
+
+static int dr_buddy_find_free_seg(struct mlx5dr_icm_buddy_mem *buddy,
+                                 unsigned int start_order,
+                                 unsigned int *segment,
+                                 unsigned int *order)
+{
+       unsigned int seg, order_iter, m;
+
+       for (order_iter = start_order;
+            order_iter <= buddy->max_order; ++order_iter) {
+               if (!buddy->num_free[order_iter])
+                       continue;
+
+               m = 1 << (buddy->max_order - order_iter);
+               seg = find_first_bit(buddy->bitmap[order_iter], m);
+
+               if (WARN(seg >= m,
+                        "ICM Buddy: failed finding free mem for order %d\n",
+                        order_iter))
+                       return -ENOMEM;
+
+               break;
+       }
+
+       if (order_iter > buddy->max_order)
+               return -ENOMEM;
+
+       *segment = seg;
+       *order = order_iter;
+       return 0;
+}
+
+/**
+ * mlx5dr_buddy_alloc_mem() - Update second level bitmap.
+ * @buddy: Buddy to update.
+ * @order: Order of the buddy to update.
+ * @segment: Segment number.
+ *
+ * This function finds the first area of the ICM memory managed by this buddy.
+ * It uses the data structures of the buddy system in order to find the first
+ * area of free place, starting from the current order till the maximum order
+ * in the system.
+ *
+ * Return: 0 when segment is set, non-zero error status otherwise.
+ *
+ * The function returns the location (segment) in the whole buddy ICM memory
+ * area - the index of the memory segment that is available for use.
+ */
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
+                          unsigned int order,
+                          unsigned int *segment)
+{
+       unsigned int seg, order_iter;
+       int err;
+
+       err = dr_buddy_find_free_seg(buddy, order, &seg, &order_iter);
+       if (err)
+               return err;
+
+       bitmap_clear(buddy->bitmap[order_iter], seg, 1);
+       --buddy->num_free[order_iter];
+
+       /* If we found free memory in some order that is bigger than the
+        * required order, we need to split every order between the required
+        * order and the order that we found into two parts, and mark accordingly.
+        */
+       while (order_iter > order) {
+               --order_iter;
+               seg <<= 1;
+               bitmap_set(buddy->bitmap[order_iter], seg ^ 1, 1);
+               ++buddy->num_free[order_iter];
+       }
+
+       seg <<= order;
+       *segment = seg;
+
+       return 0;
+}
+
+void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
+                          unsigned int seg, unsigned int order)
+{
+       seg >>= order;
+
+       /* Whenever a segment is free,
+        * the mem is added to the buddy that gave it.
+        */
+       while (test_bit(seg ^ 1, buddy->bitmap[order])) {
+               bitmap_clear(buddy->bitmap[order], seg ^ 1, 1);
+               --buddy->num_free[order];
+               seg >>= 1;
+               ++order;
+       }
+       bitmap_set(buddy->bitmap[order], seg, 1);
+
+       ++buddy->num_free[order];
+}
+
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_cmd.c
new file mode 100644 (file)
index 0000000..baefb9a
--- /dev/null
@@ -0,0 +1,970 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
+                                      bool other_vport,
+                                      u16 vport_number,
+                                      u64 *icm_address_rx,
+                                      u64 *icm_address_tx)
+{
+       u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
+       int err;
+
+       MLX5_SET(query_esw_vport_context_in, in, opcode,
+                MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
+       MLX5_SET(query_esw_vport_context_in, in, other_vport, other_vport);
+       MLX5_SET(query_esw_vport_context_in, in, vport_number, vport_number);
+
+       err = mlx5_cmd_exec_inout(mdev, query_esw_vport_context, in, out);
+       if (err)
+               return err;
+
+       *icm_address_rx =
+               MLX5_GET64(query_esw_vport_context_out, out,
+                          esw_vport_context.sw_steering_vport_icm_address_rx);
+       *icm_address_tx =
+               MLX5_GET64(query_esw_vport_context_out, out,
+                          esw_vport_context.sw_steering_vport_icm_address_tx);
+       return 0;
+}
+
+int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
+                         u16 vport_number, u16 *gvmi)
+{
+       bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
+       u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
+       int out_size;
+       void *out;
+       int err;
+
+       out_size = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+       out = kzalloc(out_size, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
+       MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
+       MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+       MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
+       MLX5_SET(query_hca_cap_in, in, op_mod,
+                MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
+                HCA_CAP_OPMOD_GET_CUR);
+
+       err = mlx5_cmd_exec_inout(mdev, query_hca_cap, in, out);
+       if (err) {
+               kfree(out);
+               return err;
+       }
+
+       *gvmi = MLX5_GET(query_hca_cap_out, out, capability.cmd_hca_cap.vhca_id);
+
+       kfree(out);
+       return 0;
+}
+
+int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
+                             struct mlx5dr_esw_caps *caps)
+{
+       caps->drop_icm_address_rx =
+               MLX5_CAP64_ESW_FLOWTABLE(mdev,
+                                        sw_steering_fdb_action_drop_icm_address_rx);
+       caps->drop_icm_address_tx =
+               MLX5_CAP64_ESW_FLOWTABLE(mdev,
+                                        sw_steering_fdb_action_drop_icm_address_tx);
+       caps->uplink_icm_address_rx =
+               MLX5_CAP64_ESW_FLOWTABLE(mdev,
+                                        sw_steering_uplink_icm_address_rx);
+       caps->uplink_icm_address_tx =
+               MLX5_CAP64_ESW_FLOWTABLE(mdev,
+                                        sw_steering_uplink_icm_address_tx);
+       caps->sw_owner_v2 = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner_v2);
+       if (!caps->sw_owner_v2)
+               caps->sw_owner = MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, sw_owner);
+
+       return 0;
+}
+
+static int dr_cmd_query_nic_vport_roce_en(struct mlx5_core_dev *mdev,
+                                         u16 vport, bool *roce_en)
+{
+       u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)] = {};
+       int err;
+
+       MLX5_SET(query_nic_vport_context_in, in, opcode,
+                MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+       MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
+       MLX5_SET(query_nic_vport_context_in, in, other_vport, !!vport);
+
+       err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return err;
+
+       *roce_en = MLX5_GET(query_nic_vport_context_out, out,
+                           nic_vport_context.roce_en);
+       return 0;
+}
+
+int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+                           struct mlx5dr_cmd_caps *caps)
+{
+       bool roce_en;
+       int err;
+
+       caps->prio_tag_required = MLX5_CAP_GEN(mdev, prio_tag_required);
+       caps->eswitch_manager   = MLX5_CAP_GEN(mdev, eswitch_manager);
+       caps->gvmi              = MLX5_CAP_GEN(mdev, vhca_id);
+       caps->flex_protocols    = MLX5_CAP_GEN(mdev, flex_parser_protocols);
+       caps->sw_format_ver     = MLX5_CAP_GEN(mdev, steering_format_version);
+       caps->roce_caps.fl_rc_qp_when_roce_disabled =
+               MLX5_CAP_GEN(mdev, fl_rc_qp_when_roce_disabled);
+
+       if (MLX5_CAP_GEN(mdev, roce)) {
+               err = dr_cmd_query_nic_vport_roce_en(mdev, 0, &roce_en);
+               if (err)
+                       return err;
+
+               caps->roce_caps.roce_en = roce_en;
+               caps->roce_caps.fl_rc_qp_when_roce_disabled |=
+                       MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_disabled);
+               caps->roce_caps.fl_rc_qp_when_roce_enabled =
+                       MLX5_CAP_ROCE(mdev, fl_rc_qp_when_roce_enabled);
+       }
+
+       caps->isolate_vl_tc = MLX5_CAP_GEN(mdev, isolate_vl_tc_new);
+
+       caps->support_modify_argument =
+               MLX5_CAP_GEN_64(mdev, general_obj_types) &
+               MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT;
+
+       if (caps->support_modify_argument) {
+               caps->log_header_modify_argument_granularity =
+                       MLX5_CAP_GEN(mdev, log_header_modify_argument_granularity);
+               caps->log_header_modify_argument_max_alloc =
+                       MLX5_CAP_GEN(mdev, log_header_modify_argument_max_alloc);
+       }
+
+       /* geneve_tlv_option_0_exist is the indication of
+        * STE support for lookup type flex_parser_ok
+        */
+       caps->flex_parser_ok_bits_supp =
+               MLX5_CAP_FLOWTABLE(mdev,
+                                  flow_table_properties_nic_receive.ft_field_support.geneve_tlv_option_0_exist);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED) {
+               caps->flex_parser_id_icmp_dw0 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw0);
+               caps->flex_parser_id_icmp_dw1 = MLX5_CAP_GEN(mdev, flex_parser_id_icmp_dw1);
+       }
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED) {
+               caps->flex_parser_id_icmpv6_dw0 =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw0);
+               caps->flex_parser_id_icmpv6_dw1 =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_icmpv6_dw1);
+       }
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_TLV_OPTION_0_ENABLED)
+               caps->flex_parser_id_geneve_tlv_option_0 =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_geneve_tlv_option_0);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED)
+               caps->flex_parser_id_mpls_over_gre =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_gre);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED)
+               caps->flex_parser_id_mpls_over_udp =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_outer_first_mpls_over_udp_label);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED)
+               caps->flex_parser_id_gtpu_dw_0 =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_0);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED)
+               caps->flex_parser_id_gtpu_teid =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_teid);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED)
+               caps->flex_parser_id_gtpu_dw_2 =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_dw_2);
+
+       if (caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED)
+               caps->flex_parser_id_gtpu_first_ext_dw_0 =
+                       MLX5_CAP_GEN(mdev, flex_parser_id_gtpu_first_ext_dw_0);
+
+       caps->nic_rx_drop_address =
+               MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_rx_action_drop_icm_address);
+       caps->nic_tx_drop_address =
+               MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_drop_icm_address);
+       caps->nic_tx_allow_address =
+               MLX5_CAP64_FLOWTABLE(mdev, sw_steering_nic_tx_action_allow_icm_address);
+
+       caps->rx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner_v2);
+       caps->tx_sw_owner_v2 = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner_v2);
+
+       if (!caps->rx_sw_owner_v2)
+               caps->rx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, sw_owner);
+       if (!caps->tx_sw_owner_v2)
+               caps->tx_sw_owner = MLX5_CAP_FLOWTABLE_NIC_TX(mdev, sw_owner);
+
+       caps->max_ft_level = MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_ft_level);
+
+       caps->log_icm_size = MLX5_CAP_DEV_MEM(mdev, log_steering_sw_icm_size);
+       caps->hdr_modify_icm_addr =
+               MLX5_CAP64_DEV_MEM(mdev, header_modify_sw_icm_start_address);
+
+       caps->log_modify_pattern_icm_size =
+               MLX5_CAP_DEV_MEM(mdev, log_header_modify_pattern_sw_icm_size);
+
+       caps->hdr_modify_pattern_icm_addr =
+               MLX5_CAP64_DEV_MEM(mdev, header_modify_pattern_sw_icm_start_address);
+
+       caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+
+       caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
+       return 0;
+}
+
+int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
+                               enum fs_flow_table_type type,
+                               u32 table_id,
+                               struct mlx5dr_cmd_query_flow_table_details *output)
+{
+       u32 out[MLX5_ST_SZ_DW(query_flow_table_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_flow_table_in)] = {};
+       int err;
+
+       MLX5_SET(query_flow_table_in, in, opcode,
+                MLX5_CMD_OP_QUERY_FLOW_TABLE);
+
+       MLX5_SET(query_flow_table_in, in, table_type, type);
+       MLX5_SET(query_flow_table_in, in, table_id, table_id);
+
+       err = mlx5_cmd_exec_inout(dev, query_flow_table, in, out);
+       if (err)
+               return err;
+
+       output->status = MLX5_GET(query_flow_table_out, out, status);
+       output->level = MLX5_GET(query_flow_table_out, out, flow_table_context.level);
+
+       output->sw_owner_icm_root_1 = MLX5_GET64(query_flow_table_out, out,
+                                                flow_table_context.sws.sw_owner_icm_root_1);
+       output->sw_owner_icm_root_0 = MLX5_GET64(query_flow_table_out, out,
+                                                flow_table_context.sws.sw_owner_icm_root_0);
+
+       return 0;
+}
+
+int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
+                                 u32 sampler_id,
+                                 u64 *rx_icm_addr,
+                                 u64 *tx_icm_addr)
+{
+       u32 out[MLX5_ST_SZ_DW(query_sampler_obj_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+       void *attr;
+       int ret;
+
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+                MLX5_CMD_OP_QUERY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+                MLX5_GENERAL_OBJECT_TYPES_SAMPLER);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, sampler_id);
+
+       ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               return ret;
+
+       attr = MLX5_ADDR_OF(query_sampler_obj_out, out, sampler_object);
+
+       *rx_icm_addr = MLX5_GET64(sampler_obj, attr,
+                                 sw_steering_icm_address_rx);
+       *tx_icm_addr = MLX5_GET64(sampler_obj, attr,
+                                 sw_steering_icm_address_tx);
+
+       return 0;
+}
+
+int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev)
+{
+       u32 in[MLX5_ST_SZ_DW(sync_steering_in)] = {};
+
+       /* Skip SYNC in case the device is internal error state.
+        * Besides a device error, this also happens when we're
+        * in fast teardown
+        */
+       if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+               return 0;
+
+       MLX5_SET(sync_steering_in, in, opcode, MLX5_CMD_OP_SYNC_STEERING);
+
+       return mlx5_cmd_exec_in(mdev, sync_steering, in);
+}
+
+int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
+                                       u32 table_type,
+                                       u32 table_id,
+                                       u32 group_id,
+                                       u32 modify_header_id,
+                                       u16 vport)
+{
+       u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+       void *in_flow_context;
+       unsigned int inlen;
+       void *in_dests;
+       u32 *in;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(set_fte_in) +
+               1 * MLX5_ST_SZ_BYTES(dest_format_struct); /* One destination only */
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+       MLX5_SET(set_fte_in, in, table_type, table_type);
+       MLX5_SET(set_fte_in, in, table_id, table_id);
+
+       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+       MLX5_SET(flow_context, in_flow_context, modify_header_id, modify_header_id);
+       MLX5_SET(flow_context, in_flow_context, destination_list_size, 1);
+       MLX5_SET(flow_context, in_flow_context, action,
+                MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+                MLX5_FLOW_CONTEXT_ACTION_MOD_HDR);
+
+       in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+       MLX5_SET(dest_format_struct, in_dests, destination_type,
+                MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT);
+       MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
+
+       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+       kvfree(in);
+
+       return err;
+}
+
+int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
+                                   u32 table_type,
+                                   u32 table_id)
+{
+       u32 in[MLX5_ST_SZ_DW(delete_fte_in)] = {};
+
+       MLX5_SET(delete_fte_in, in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+       MLX5_SET(delete_fte_in, in, table_type, table_type);
+       MLX5_SET(delete_fte_in, in, table_id, table_id);
+
+       return mlx5_cmd_exec_in(mdev, delete_fte, in);
+}
+
+int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
+                                  u32 table_type,
+                                  u8 num_of_actions,
+                                  u64 *actions,
+                                  u32 *modify_header_id)
+{
+       u32 out[MLX5_ST_SZ_DW(alloc_modify_header_context_out)] = {};
+       void *p_actions;
+       u32 inlen;
+       u32 *in;
+       int err;
+
+       inlen = MLX5_ST_SZ_BYTES(alloc_modify_header_context_in) +
+                num_of_actions * sizeof(u64);
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(alloc_modify_header_context_in, in, opcode,
+                MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT);
+       MLX5_SET(alloc_modify_header_context_in, in, table_type, table_type);
+       MLX5_SET(alloc_modify_header_context_in, in, num_of_actions, num_of_actions);
+       p_actions = MLX5_ADDR_OF(alloc_modify_header_context_in, in, actions);
+       memcpy(p_actions, actions, num_of_actions * sizeof(u64));
+
+       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+       if (err)
+               goto out;
+
+       *modify_header_id = MLX5_GET(alloc_modify_header_context_out, out,
+                                    modify_header_id);
+out:
+       kvfree(in);
+       return err;
+}
+
+int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
+                                    u32 modify_header_id)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_modify_header_context_in)] = {};
+
+       MLX5_SET(dealloc_modify_header_context_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
+       MLX5_SET(dealloc_modify_header_context_in, in, modify_header_id,
+                modify_header_id);
+
+       return mlx5_cmd_exec_in(mdev, dealloc_modify_header_context, in);
+}
+
+int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
+                                      u32 table_type,
+                                      u32 table_id,
+                                      u32 *group_id)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_group_out)] = {};
+       int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+       u32 *in;
+       int err;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(create_flow_group_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+       MLX5_SET(create_flow_group_in, in, table_type, table_type);
+       MLX5_SET(create_flow_group_in, in, table_id, table_id);
+
+       err = mlx5_cmd_exec_inout(mdev, create_flow_group, in, out);
+       if (err)
+               goto out;
+
+       *group_id = MLX5_GET(create_flow_group_out, out, group_id);
+
+out:
+       kvfree(in);
+       return err;
+}
+
+int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
+                                 u32 table_type,
+                                 u32 table_id,
+                                 u32 group_id)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)] = {};
+
+       MLX5_SET(destroy_flow_group_in, in, opcode,
+                MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+       MLX5_SET(destroy_flow_group_in, in, table_type, table_type);
+       MLX5_SET(destroy_flow_group_in, in, table_id, table_id);
+       MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+
+       return mlx5_cmd_exec_in(mdev, destroy_flow_group, in);
+}
+
+int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
+                                struct mlx5dr_cmd_create_flow_table_attr *attr,
+                                u64 *fdb_rx_icm_addr,
+                                u32 *table_id)
+{
+       u32 out[MLX5_ST_SZ_DW(create_flow_table_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(create_flow_table_in)] = {};
+       void *ft_mdev;
+       int err;
+
+       MLX5_SET(create_flow_table_in, in, opcode, MLX5_CMD_OP_CREATE_FLOW_TABLE);
+       MLX5_SET(create_flow_table_in, in, table_type, attr->table_type);
+       MLX5_SET(create_flow_table_in, in, uid, attr->uid);
+
+       ft_mdev = MLX5_ADDR_OF(create_flow_table_in, in, flow_table_context);
+       MLX5_SET(flow_table_context, ft_mdev, termination_table, attr->term_tbl);
+       MLX5_SET(flow_table_context, ft_mdev, sw_owner, attr->sw_owner);
+       MLX5_SET(flow_table_context, ft_mdev, level, attr->level);
+
+       if (attr->sw_owner) {
+               /* icm_addr_0 used for FDB RX / NIC TX / NIC_RX
+                * icm_addr_1 used for FDB TX
+                */
+               if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_RX) {
+                       MLX5_SET64(flow_table_context, ft_mdev,
+                                  sws.sw_owner_icm_root_0, attr->icm_addr_rx);
+               } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_NIC_TX) {
+                       MLX5_SET64(flow_table_context, ft_mdev,
+                                  sws.sw_owner_icm_root_0, attr->icm_addr_tx);
+               } else if (attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB) {
+                       MLX5_SET64(flow_table_context, ft_mdev,
+                                  sws.sw_owner_icm_root_0, attr->icm_addr_rx);
+                       MLX5_SET64(flow_table_context, ft_mdev,
+                                  sws.sw_owner_icm_root_1, attr->icm_addr_tx);
+               }
+       }
+
+       MLX5_SET(create_flow_table_in, in, flow_table_context.decap_en,
+                attr->decap_en);
+       MLX5_SET(create_flow_table_in, in, flow_table_context.reformat_en,
+                attr->reformat_en);
+
+       err = mlx5_cmd_exec_inout(mdev, create_flow_table, in, out);
+       if (err)
+               return err;
+
+       *table_id = MLX5_GET(create_flow_table_out, out, table_id);
+       if (!attr->sw_owner && attr->table_type == MLX5_FLOW_TABLE_TYPE_FDB &&
+           fdb_rx_icm_addr)
+               *fdb_rx_icm_addr =
+               (u64)MLX5_GET(create_flow_table_out, out, icm_address_31_0) |
+               (u64)MLX5_GET(create_flow_table_out, out, icm_address_39_32) << 32 |
+               (u64)MLX5_GET(create_flow_table_out, out, icm_address_63_40) << 40;
+
+       return 0;
+}
+
+int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
+                                 u32 table_id,
+                                 u32 table_type)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)] = {};
+
+       MLX5_SET(destroy_flow_table_in, in, opcode,
+                MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+       MLX5_SET(destroy_flow_table_in, in, table_type, table_type);
+       MLX5_SET(destroy_flow_table_in, in, table_id, table_id);
+
+       return mlx5_cmd_exec_in(mdev, destroy_flow_table, in);
+}
+
+int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+                                  enum mlx5_reformat_ctx_type rt,
+                                  u8 reformat_param_0,
+                                  u8 reformat_param_1,
+                                  size_t reformat_size,
+                                  void *reformat_data,
+                                  u32 *reformat_id)
+{
+       u32 out[MLX5_ST_SZ_DW(alloc_packet_reformat_context_out)] = {};
+       size_t inlen, cmd_data_sz, cmd_total_sz;
+       void *prctx;
+       void *pdata;
+       void *in;
+       int err;
+
+       cmd_total_sz = MLX5_ST_SZ_BYTES(alloc_packet_reformat_context_in);
+       cmd_data_sz = MLX5_FLD_SZ_BYTES(alloc_packet_reformat_context_in,
+                                       packet_reformat_context.reformat_data);
+       inlen = ALIGN(cmd_total_sz + reformat_size - cmd_data_sz, 4);
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(alloc_packet_reformat_context_in, in, opcode,
+                MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT);
+
+       prctx = MLX5_ADDR_OF(alloc_packet_reformat_context_in, in, packet_reformat_context);
+       pdata = MLX5_ADDR_OF(packet_reformat_context_in, prctx, reformat_data);
+
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_type, rt);
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_param_0, reformat_param_0);
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_param_1, reformat_param_1);
+       MLX5_SET(packet_reformat_context_in, prctx, reformat_data_size, reformat_size);
+       if (reformat_data && reformat_size)
+               memcpy(pdata, reformat_data, reformat_size);
+
+       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+       if (err)
+               goto err_free_in;
+
+       *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id);
+
+err_free_in:
+       kvfree(in);
+       return err;
+}
+
+void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
+                                    u32 reformat_id)
+{
+       u32 in[MLX5_ST_SZ_DW(dealloc_packet_reformat_context_in)] = {};
+
+       MLX5_SET(dealloc_packet_reformat_context_in, in, opcode,
+                MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
+       MLX5_SET(dealloc_packet_reformat_context_in, in, packet_reformat_id,
+                reformat_id);
+
+       mlx5_cmd_exec_in(mdev, dealloc_packet_reformat_context, in);
+}
+
+static void dr_cmd_set_definer_format(void *ptr, u16 format_id,
+                                     u8 *dw_selectors,
+                                     u8 *byte_selectors)
+{
+       if (format_id != MLX5_IFC_DEFINER_FORMAT_ID_SELECT)
+               return;
+
+       MLX5_SET(match_definer, ptr, format_select_dw0, dw_selectors[0]);
+       MLX5_SET(match_definer, ptr, format_select_dw1, dw_selectors[1]);
+       MLX5_SET(match_definer, ptr, format_select_dw2, dw_selectors[2]);
+       MLX5_SET(match_definer, ptr, format_select_dw3, dw_selectors[3]);
+       MLX5_SET(match_definer, ptr, format_select_dw4, dw_selectors[4]);
+       MLX5_SET(match_definer, ptr, format_select_dw5, dw_selectors[5]);
+       MLX5_SET(match_definer, ptr, format_select_dw6, dw_selectors[6]);
+       MLX5_SET(match_definer, ptr, format_select_dw7, dw_selectors[7]);
+       MLX5_SET(match_definer, ptr, format_select_dw8, dw_selectors[8]);
+
+       MLX5_SET(match_definer, ptr, format_select_byte0, byte_selectors[0]);
+       MLX5_SET(match_definer, ptr, format_select_byte1, byte_selectors[1]);
+       MLX5_SET(match_definer, ptr, format_select_byte2, byte_selectors[2]);
+       MLX5_SET(match_definer, ptr, format_select_byte3, byte_selectors[3]);
+       MLX5_SET(match_definer, ptr, format_select_byte4, byte_selectors[4]);
+       MLX5_SET(match_definer, ptr, format_select_byte5, byte_selectors[5]);
+       MLX5_SET(match_definer, ptr, format_select_byte6, byte_selectors[6]);
+       MLX5_SET(match_definer, ptr, format_select_byte7, byte_selectors[7]);
+}
+
+int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
+                             u16 format_id,
+                             u8 *dw_selectors,
+                             u8 *byte_selectors,
+                             u8 *match_mask,
+                             u32 *definer_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+       u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+       void *ptr;
+       int err;
+
+       ptr = MLX5_ADDR_OF(create_match_definer_in, in,
+                          general_obj_in_cmd_hdr);
+       MLX5_SET(general_obj_in_cmd_hdr, ptr, opcode,
+                MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, ptr, obj_type,
+                MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+       ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+       MLX5_SET(match_definer, ptr, format_id, format_id);
+
+       dr_cmd_set_definer_format(ptr, format_id,
+                                 dw_selectors, byte_selectors);
+
+       ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+       memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+       err = mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+       if (err)
+               return err;
+
+       *definer_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+
+       return 0;
+}
+
+void
+mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev, u32 definer_id)
+{
+       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type, MLX5_OBJ_TYPE_MATCH_DEFINER);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+       mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
+                        u16 index, struct mlx5dr_cmd_gid_attr *attr)
+{
+       u32 out[MLX5_ST_SZ_DW(query_roce_address_out)] = {};
+       u32 in[MLX5_ST_SZ_DW(query_roce_address_in)] = {};
+       int err;
+
+       MLX5_SET(query_roce_address_in, in, opcode,
+                MLX5_CMD_OP_QUERY_ROCE_ADDRESS);
+
+       MLX5_SET(query_roce_address_in, in, roce_address_index, index);
+       MLX5_SET(query_roce_address_in, in, vhca_port_num, vhca_port_num);
+
+       err = mlx5_cmd_exec_inout(mdev, query_roce_address, in, out);
+       if (err)
+               return err;
+
+       memcpy(&attr->gid,
+              MLX5_ADDR_OF(query_roce_address_out,
+                           out, roce_address.source_l3_address),
+              sizeof(attr->gid));
+       memcpy(attr->mac,
+              MLX5_ADDR_OF(query_roce_address_out, out,
+                           roce_address.source_mac_47_32),
+              sizeof(attr->mac));
+
+       if (MLX5_GET(query_roce_address_out, out,
+                    roce_address.roce_version) == MLX5_ROCE_VERSION_2)
+               attr->roce_ver = MLX5_ROCE_VERSION_2;
+       else
+               attr->roce_ver = MLX5_ROCE_VERSION_1;
+
+       return 0;
+}
+
+int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
+                                       u16 log_obj_range, u32 pd,
+                                       u32 *obj_id)
+{
+       u32 in[MLX5_ST_SZ_DW(create_modify_header_arg_in)] = {};
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+       void *attr;
+       int ret;
+
+       attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, hdr);
+       MLX5_SET(general_obj_in_cmd_hdr, attr, opcode,
+                MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, attr, obj_type,
+                MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+       MLX5_SET(general_obj_in_cmd_hdr, attr,
+                op_param.create.log_obj_range, log_obj_range);
+
+       attr = MLX5_ADDR_OF(create_modify_header_arg_in, in, arg);
+       MLX5_SET(modify_header_arg, attr, access_pd, pd);
+
+       ret = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+       if (ret)
+               return ret;
+
+       *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+       return 0;
+}
+
+void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
+                                         u32 obj_id)
+{
+       u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {};
+       u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+
+       MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+                MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+                MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT);
+       MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, obj_id);
+
+       mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5dr_cmd_set_extended_dest(struct mlx5_core_dev *dev,
+                                       struct mlx5dr_cmd_fte_info *fte,
+                                       bool *extended_dest)
+{
+       int fw_log_max_fdb_encap_uplink = MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink);
+       int num_fwd_destinations = 0;
+       int num_encap = 0;
+       int i;
+
+       *extended_dest = false;
+       if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+               return 0;
+       for (i = 0; i < fte->dests_size; i++) {
+               if (fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ||
+                   fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_NONE)
+                       continue;
+               if ((fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+                    fte->dest_arr[i].type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
+                   fte->dest_arr[i].vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID)
+                       num_encap++;
+               num_fwd_destinations++;
+       }
+
+       if (num_fwd_destinations > 1 && num_encap > 0)
+               *extended_dest = true;
+
+       if (*extended_dest && !fw_log_max_fdb_encap_uplink) {
+               mlx5_core_warn(dev, "FW does not support extended destination");
+               return -EOPNOTSUPP;
+       }
+       if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) {
+               mlx5_core_warn(dev, "FW does not support more than %d encaps",
+                              1 << fw_log_max_fdb_encap_uplink);
+               return -EOPNOTSUPP;
+       }
+
+       return 0;
+}
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+                      int opmod, int modify_mask,
+                      struct mlx5dr_cmd_ft_info *ft,
+                      u32 group_id,
+                      struct mlx5dr_cmd_fte_info *fte)
+{
+       u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
+       void *in_flow_context, *vlan;
+       bool extended_dest = false;
+       void *in_match_value;
+       unsigned int inlen;
+       int dst_cnt_size;
+       void *in_dests;
+       u32 *in;
+       int err;
+       int i;
+
+       if (mlx5dr_cmd_set_extended_dest(dev, fte, &extended_dest))
+               return -EOPNOTSUPP;
+
+       if (!extended_dest)
+               dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct);
+       else
+               dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format);
+
+       inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size;
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               return -ENOMEM;
+
+       MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+       MLX5_SET(set_fte_in, in, op_mod, opmod);
+       MLX5_SET(set_fte_in, in, modify_enable_mask, modify_mask);
+       MLX5_SET(set_fte_in, in, table_type, ft->type);
+       MLX5_SET(set_fte_in, in, table_id, ft->id);
+       MLX5_SET(set_fte_in, in, flow_index, fte->index);
+       MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level);
+       if (ft->vport) {
+               MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+               MLX5_SET(set_fte_in, in, other_vport, 1);
+       }
+
+       in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+       MLX5_SET(flow_context, in_flow_context, group_id, group_id);
+
+       MLX5_SET(flow_context, in_flow_context, flow_tag,
+                fte->flow_context.flow_tag);
+       MLX5_SET(flow_context, in_flow_context, flow_source,
+                fte->flow_context.flow_source);
+
+       MLX5_SET(flow_context, in_flow_context, extended_destination,
+                extended_dest);
+       if (extended_dest) {
+               u32 action;
+
+               action = fte->action.action &
+                       ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+               MLX5_SET(flow_context, in_flow_context, action, action);
+       } else {
+               MLX5_SET(flow_context, in_flow_context, action,
+                        fte->action.action);
+               if (fte->action.pkt_reformat)
+                       MLX5_SET(flow_context, in_flow_context, packet_reformat_id,
+                                fte->action.pkt_reformat->id);
+       }
+       if (fte->action.modify_hdr)
+               MLX5_SET(flow_context, in_flow_context, modify_header_id,
+                        fte->action.modify_hdr->id);
+
+       vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan);
+
+       MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype);
+       MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid);
+       MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio);
+
+       vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2);
+
+       MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype);
+       MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid);
+       MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio);
+
+       in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
+                                     match_value);
+       memcpy(in_match_value, fte->val, sizeof(u32) * MLX5_ST_SZ_DW_MATCH_PARAM);
+
+       in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+               int list_size = 0;
+
+               for (i = 0; i < fte->dests_size; i++) {
+                       enum mlx5_flow_destination_type type = fte->dest_arr[i].type;
+                       enum mlx5_ifc_flow_destination_type ifc_type;
+                       unsigned int id;
+
+                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       switch (type) {
+                       case MLX5_FLOW_DESTINATION_TYPE_NONE:
+                               continue;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+                               id = fte->dest_arr[i].ft_num;
+                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+                               id = fte->dest_arr[i].ft_id;
+                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+                       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+                               if (type == MLX5_FLOW_DESTINATION_TYPE_VPORT) {
+                                       id = fte->dest_arr[i].vport.num;
+                                       MLX5_SET(dest_format_struct, in_dests,
+                                                destination_eswitch_owner_vhca_id_valid,
+                                                !!(fte->dest_arr[i].vport.flags &
+                                                   MLX5_FLOW_DEST_VPORT_VHCA_ID));
+                                       ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_VPORT;
+                               } else {
+                                       id = 0;
+                                       ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_UPLINK;
+                                       MLX5_SET(dest_format_struct, in_dests,
+                                                destination_eswitch_owner_vhca_id_valid, 1);
+                               }
+                               MLX5_SET(dest_format_struct, in_dests,
+                                        destination_eswitch_owner_vhca_id,
+                                        fte->dest_arr[i].vport.vhca_id);
+                               if (extended_dest && (fte->dest_arr[i].vport.flags &
+                                                   MLX5_FLOW_DEST_VPORT_REFORMAT_ID)) {
+                                       MLX5_SET(dest_format_struct, in_dests,
+                                                packet_reformat,
+                                                !!(fte->dest_arr[i].vport.flags &
+                                                   MLX5_FLOW_DEST_VPORT_REFORMAT_ID));
+                                       MLX5_SET(extended_dest_format, in_dests,
+                                                packet_reformat_id,
+                                                fte->dest_arr[i].vport.reformat_id);
+                               }
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+                               id = fte->dest_arr[i].sampler_id;
+                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
+                               break;
+                       default:
+                               id = fte->dest_arr[i].tir_num;
+                               ifc_type = MLX5_IFC_FLOW_DESTINATION_TYPE_TIR;
+                       }
+
+                       MLX5_SET(dest_format_struct, in_dests, destination_type,
+                                ifc_type);
+                       MLX5_SET(dest_format_struct, in_dests, destination_id, id);
+                       in_dests += dst_cnt_size;
+                       list_size++;
+               }
+
+               MLX5_SET(flow_context, in_flow_context, destination_list_size,
+                        list_size);
+       }
+
+       if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               int max_list_size = BIT(MLX5_CAP_FLOWTABLE_TYPE(dev,
+                                       log_max_flow_counter,
+                                       ft->type));
+               int list_size = 0;
+
+               for (i = 0; i < fte->dests_size; i++) {
+                       if (fte->dest_arr[i].type !=
+                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+                                fte->dest_arr[i].counter_id);
+                       in_dests += dst_cnt_size;
+                       list_size++;
+               }
+               if (list_size > max_list_size) {
+                       err = -EINVAL;
+                       goto err_out;
+               }
+
+               MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+                        list_size);
+       }
+
+       err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
+err_out:
+       kvfree(in);
+       return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.c
new file mode 100644 (file)
index 0000000..030a577
--- /dev/null
@@ -0,0 +1,1186 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/version.h>
+#include "dr_types.h"
+
+#define DR_DBG_PTR_TO_ID(p) ((u64)(uintptr_t)(p) & 0xFFFFFFFFULL)
+
+enum dr_dump_rec_type {
+       DR_DUMP_REC_TYPE_DOMAIN = 3000,
+       DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER = 3001,
+       DR_DUMP_REC_TYPE_DOMAIN_INFO_DEV_ATTR = 3002,
+       DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT = 3003,
+       DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS = 3004,
+       DR_DUMP_REC_TYPE_DOMAIN_SEND_RING = 3005,
+
+       DR_DUMP_REC_TYPE_TABLE = 3100,
+       DR_DUMP_REC_TYPE_TABLE_RX = 3101,
+       DR_DUMP_REC_TYPE_TABLE_TX = 3102,
+
+       DR_DUMP_REC_TYPE_MATCHER = 3200,
+       DR_DUMP_REC_TYPE_MATCHER_MASK_DEPRECATED = 3201,
+       DR_DUMP_REC_TYPE_MATCHER_RX = 3202,
+       DR_DUMP_REC_TYPE_MATCHER_TX = 3203,
+       DR_DUMP_REC_TYPE_MATCHER_BUILDER = 3204,
+       DR_DUMP_REC_TYPE_MATCHER_MASK = 3205,
+
+       DR_DUMP_REC_TYPE_RULE = 3300,
+       DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 = 3301,
+       DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0 = 3302,
+       DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 = 3303,
+       DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1 = 3304,
+
+       DR_DUMP_REC_TYPE_ACTION_ENCAP_L2 = 3400,
+       DR_DUMP_REC_TYPE_ACTION_ENCAP_L3 = 3401,
+       DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR = 3402,
+       DR_DUMP_REC_TYPE_ACTION_DROP = 3403,
+       DR_DUMP_REC_TYPE_ACTION_QP = 3404,
+       DR_DUMP_REC_TYPE_ACTION_FT = 3405,
+       DR_DUMP_REC_TYPE_ACTION_CTR = 3406,
+       DR_DUMP_REC_TYPE_ACTION_TAG = 3407,
+       DR_DUMP_REC_TYPE_ACTION_VPORT = 3408,
+       DR_DUMP_REC_TYPE_ACTION_DECAP_L2 = 3409,
+       DR_DUMP_REC_TYPE_ACTION_DECAP_L3 = 3410,
+       DR_DUMP_REC_TYPE_ACTION_DEVX_TIR = 3411,
+       DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN = 3412,
+       DR_DUMP_REC_TYPE_ACTION_POP_VLAN = 3413,
+       DR_DUMP_REC_TYPE_ACTION_SAMPLER = 3415,
+       DR_DUMP_REC_TYPE_ACTION_INSERT_HDR = 3420,
+       DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR = 3421,
+       DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE = 3425,
+};
+
+static struct mlx5dr_dbg_dump_buff *
+mlx5dr_dbg_dump_data_init_new_buff(struct mlx5dr_dbg_dump_data *dump_data)
+{
+       struct mlx5dr_dbg_dump_buff *new_buff;
+
+       new_buff = kzalloc(sizeof(*new_buff), GFP_KERNEL);
+       if (!new_buff)
+               return NULL;
+
+       new_buff->buff = kvzalloc(MLX5DR_DEBUG_DUMP_BUFF_SIZE, GFP_KERNEL);
+       if (!new_buff->buff) {
+               kfree(new_buff);
+               return NULL;
+       }
+
+       INIT_LIST_HEAD(&new_buff->node);
+       list_add_tail(&new_buff->node, &dump_data->buff_list);
+
+       return new_buff;
+}
+
+static struct mlx5dr_dbg_dump_data *
+mlx5dr_dbg_create_dump_data(void)
+{
+       struct mlx5dr_dbg_dump_data *dump_data;
+
+       dump_data = kzalloc(sizeof(*dump_data), GFP_KERNEL);
+       if (!dump_data)
+               return NULL;
+
+       INIT_LIST_HEAD(&dump_data->buff_list);
+
+       if (!mlx5dr_dbg_dump_data_init_new_buff(dump_data)) {
+               kfree(dump_data);
+               return NULL;
+       }
+
+       return dump_data;
+}
+
+static void
+mlx5dr_dbg_destroy_dump_data(struct mlx5dr_dbg_dump_data *dump_data)
+{
+       struct mlx5dr_dbg_dump_buff *dump_buff, *tmp_buff;
+
+       if (!dump_data)
+               return;
+
+       list_for_each_entry_safe(dump_buff, tmp_buff, &dump_data->buff_list, node) {
+               kvfree(dump_buff->buff);
+               list_del(&dump_buff->node);
+               kfree(dump_buff);
+       }
+
+       kfree(dump_data);
+}
+
+static int
+mlx5dr_dbg_dump_data_print(struct seq_file *file, char *str, u32 size)
+{
+       struct mlx5dr_domain *dmn = file->private;
+       struct mlx5dr_dbg_dump_data *dump_data;
+       struct mlx5dr_dbg_dump_buff *buff;
+       u32 buff_capacity, write_size;
+       int remain_size, ret;
+
+       if (size >= MLX5DR_DEBUG_DUMP_BUFF_SIZE)
+               return -EINVAL;
+
+       dump_data = dmn->dump_info.dump_data;
+       buff = list_last_entry(&dump_data->buff_list,
+                              struct mlx5dr_dbg_dump_buff, node);
+
+       buff_capacity = (MLX5DR_DEBUG_DUMP_BUFF_SIZE - 1) - buff->index;
+       remain_size = buff_capacity - size;
+       write_size = (remain_size > 0) ? size : buff_capacity;
+
+       if (likely(write_size)) {
+               ret = snprintf(buff->buff + buff->index, write_size + 1, "%s", str);
+               if (ret < 0)
+                       return ret;
+
+               buff->index += write_size;
+       }
+
+       if (remain_size < 0) {
+               remain_size *= -1;
+               buff = mlx5dr_dbg_dump_data_init_new_buff(dump_data);
+               if (!buff)
+                       return -ENOMEM;
+
+               ret = snprintf(buff->buff, remain_size + 1, "%s", str + write_size);
+               if (ret < 0)
+                       return ret;
+
+               buff->index += remain_size;
+       }
+
+       return 0;
+}
+
+void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl)
+{
+       mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
+       list_add_tail(&tbl->dbg_node, &tbl->dmn->dbg_tbl_list);
+       mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl)
+{
+       mutex_lock(&tbl->dmn->dump_info.dbg_mutex);
+       list_del(&tbl->dbg_node);
+       mutex_unlock(&tbl->dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule)
+{
+       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+       mutex_lock(&dmn->dump_info.dbg_mutex);
+       list_add_tail(&rule->dbg_node, &rule->matcher->dbg_rule_list);
+       mutex_unlock(&dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule)
+{
+       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+       mutex_lock(&dmn->dump_info.dbg_mutex);
+       list_del(&rule->dbg_node);
+       mutex_unlock(&dmn->dump_info.dbg_mutex);
+}
+
+static u64 dr_dump_icm_to_idx(u64 icm_addr)
+{
+       return (icm_addr >> 6) & 0xffffffff;
+}
+
+#define DR_HEX_SIZE 256
+
+static void
+dr_dump_hex_print(char hex[DR_HEX_SIZE], char *src, u32 size)
+{
+       if (WARN_ON_ONCE(DR_HEX_SIZE < 2 * size + 1))
+               size = DR_HEX_SIZE / 2 - 1; /* truncate */
+
+       bin2hex(hex, src, size);
+       hex[2 * size] = 0; /* NULL-terminate */
+}
+
+static int
+dr_dump_rule_action_mem(struct seq_file *file, char *buff, const u64 rule_id,
+                       struct mlx5dr_rule_action_member *action_mem)
+{
+       struct mlx5dr_action *action = action_mem->action;
+       const u64 action_id = DR_DBG_PTR_TO_ID(action);
+       u64 hit_tbl_ptr, miss_tbl_ptr;
+       u32 hit_tbl_id, miss_tbl_id;
+       int ret;
+
+       switch (action->action_type) {
+       case DR_ACTION_TYP_DROP:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx\n",
+                              DR_DUMP_REC_TYPE_ACTION_DROP, action_id,
+                              rule_id);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_FT:
+               if (action->dest_tbl->is_fw_tbl)
+                       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                                      "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+                                      DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+                                      rule_id, action->dest_tbl->fw_tbl.id,
+                                      -1);
+               else
+                       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                                      "%d,0x%llx,0x%llx,0x%x,0x%llx\n",
+                                      DR_DUMP_REC_TYPE_ACTION_FT, action_id,
+                                      rule_id, action->dest_tbl->tbl->table_id,
+                                      DR_DBG_PTR_TO_ID(action->dest_tbl->tbl));
+
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_CTR:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_CTR, action_id, rule_id,
+                              action->ctr->ctr_id + action->ctr->offset);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_TAG:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_TAG, action_id, rule_id,
+                              action->flow_tag->flow_tag);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_MODIFY_HDR:
+       {
+               struct mlx5dr_ptrn_obj *ptrn = action->rewrite->ptrn;
+               struct mlx5dr_arg_obj *arg = action->rewrite->arg;
+               u8 *rewrite_data = action->rewrite->data;
+               bool ptrn_arg;
+               int i;
+
+               ptrn_arg = !action->rewrite->single_action_opt && ptrn && arg;
+
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x,%d,0x%x,0x%x,0x%x",
+                              DR_DUMP_REC_TYPE_ACTION_MODIFY_HDR, action_id,
+                              rule_id, action->rewrite->index,
+                              action->rewrite->single_action_opt,
+                              ptrn_arg ? action->rewrite->num_of_actions : 0,
+                              ptrn_arg ? ptrn->index : 0,
+                              ptrn_arg ? mlx5dr_arg_get_obj_id(arg) : 0);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+
+               if (ptrn_arg) {
+                       for (i = 0; i < action->rewrite->num_of_actions; i++) {
+                               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                                              ",0x%016llx",
+                                              be64_to_cpu(((__be64 *)rewrite_data)[i]));
+                               if (ret < 0)
+                                       return ret;
+
+                               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+                               if (ret)
+                                       return ret;
+                       }
+               }
+
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "\n");
+               if (ret < 0)
+                       return ret;
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       }
+       case DR_ACTION_TYP_VPORT:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_VPORT, action_id, rule_id,
+                              action->vport->caps->num);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_TNL_L2_TO_L2:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx\n",
+                              DR_DUMP_REC_TYPE_ACTION_DECAP_L2, action_id,
+                              rule_id);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_TNL_L3_TO_L2:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_DECAP_L3, action_id,
+                              rule_id,
+                              (action->rewrite->ptrn && action->rewrite->arg) ?
+                              mlx5dr_arg_get_obj_id(action->rewrite->arg) :
+                              action->rewrite->index);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_L2_TO_TNL_L2:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_ENCAP_L2, action_id,
+                              rule_id, action->reformat->id);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_L2_TO_TNL_L3:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_ENCAP_L3, action_id,
+                              rule_id, action->reformat->id);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_POP_VLAN:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx\n",
+                              DR_DUMP_REC_TYPE_ACTION_POP_VLAN, action_id,
+                              rule_id);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_PUSH_VLAN:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_PUSH_VLAN, action_id,
+                              rule_id, action->push_vlan->vlan_hdr);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_INSERT_HDR:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_INSERT_HDR, action_id,
+                              rule_id, action->reformat->id,
+                              action->reformat->param_0,
+                              action->reformat->param_1);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_REMOVE_HDR:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_REMOVE_HDR, action_id,
+                              rule_id, action->reformat->id,
+                              action->reformat->param_0,
+                              action->reformat->param_1);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_SAMPLER:
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x,0x%x,0x%x,0x%llx,0x%llx\n",
+                              DR_DUMP_REC_TYPE_ACTION_SAMPLER, action_id,
+                              rule_id, 0, 0, action->sampler->sampler_id,
+                              action->sampler->rx_icm_addr,
+                              action->sampler->tx_icm_addr);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       case DR_ACTION_TYP_RANGE:
+               if (action->range->hit_tbl_action->dest_tbl->is_fw_tbl) {
+                       hit_tbl_id = action->range->hit_tbl_action->dest_tbl->fw_tbl.id;
+                       hit_tbl_ptr = 0;
+               } else {
+                       hit_tbl_id = action->range->hit_tbl_action->dest_tbl->tbl->table_id;
+                       hit_tbl_ptr =
+                               DR_DBG_PTR_TO_ID(action->range->hit_tbl_action->dest_tbl->tbl);
+               }
+
+               if (action->range->miss_tbl_action->dest_tbl->is_fw_tbl) {
+                       miss_tbl_id = action->range->miss_tbl_action->dest_tbl->fw_tbl.id;
+                       miss_tbl_ptr = 0;
+               } else {
+                       miss_tbl_id = action->range->miss_tbl_action->dest_tbl->tbl->table_id;
+                       miss_tbl_ptr =
+                               DR_DBG_PTR_TO_ID(action->range->miss_tbl_action->dest_tbl->tbl);
+               }
+
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,0x%llx,0x%x,0x%llx,0x%x,0x%llx,0x%x\n",
+                              DR_DUMP_REC_TYPE_ACTION_MATCH_RANGE, action_id,
+                              rule_id, hit_tbl_id, hit_tbl_ptr, miss_tbl_id,
+                              miss_tbl_ptr, action->range->definer_id);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+               break;
+       default:
+               return 0;
+       }
+
+       return 0;
+}
+
+static int
+dr_dump_rule_mem(struct seq_file *file, char *buff, struct mlx5dr_ste *ste,
+                bool is_rx, const u64 rule_id, u8 format_ver)
+{
+       char hw_ste_dump[DR_HEX_SIZE];
+       u32 mem_rec_type;
+       int ret;
+
+       if (format_ver == MLX5_STEERING_FORMAT_CONNECTX_5) {
+               mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V0 :
+                                      DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V0;
+       } else {
+               mem_rec_type = is_rx ? DR_DUMP_REC_TYPE_RULE_RX_ENTRY_V1 :
+                                      DR_DUMP_REC_TYPE_RULE_TX_ENTRY_V1;
+       }
+
+       dr_dump_hex_print(hw_ste_dump, (char *)mlx5dr_ste_get_hw_ste(ste),
+                         DR_STE_SIZE_REDUCED);
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx,%s\n", mem_rec_type,
+                      dr_dump_icm_to_idx(mlx5dr_ste_get_icm_addr(ste)),
+                      rule_id, hw_ste_dump);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+dr_dump_rule_rx_tx(struct seq_file *file, char *buff,
+                  struct mlx5dr_rule_rx_tx *rule_rx_tx,
+                  bool is_rx, const u64 rule_id, u8 format_ver)
+{
+       struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
+       struct mlx5dr_ste *curr_ste = rule_rx_tx->last_rule_ste;
+       int ret, i;
+
+       if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
+               return 0;
+
+       while (i--) {
+               ret = dr_dump_rule_mem(file, buff, ste_arr[i], is_rx, rule_id,
+                                      format_ver);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static noinline_for_stack int
+dr_dump_rule(struct seq_file *file, struct mlx5dr_rule *rule)
+{
+       struct mlx5dr_rule_action_member *action_mem;
+       const u64 rule_id = DR_DBG_PTR_TO_ID(rule);
+       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+       struct mlx5dr_rule_rx_tx *rx = &rule->rx;
+       struct mlx5dr_rule_rx_tx *tx = &rule->tx;
+       u8 format_ver;
+       int ret;
+
+       format_ver = rule->matcher->tbl->dmn->info.caps.sw_format_ver;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx\n", DR_DUMP_REC_TYPE_RULE,
+                      rule_id, DR_DBG_PTR_TO_ID(rule->matcher));
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (rx->nic_matcher) {
+               ret = dr_dump_rule_rx_tx(file, buff, rx, true, rule_id, format_ver);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (tx->nic_matcher) {
+               ret = dr_dump_rule_rx_tx(file, buff, tx, false, rule_id, format_ver);
+               if (ret < 0)
+                       return ret;
+       }
+
+       list_for_each_entry(action_mem, &rule->rule_actions_list, list) {
+               ret = dr_dump_rule_action_mem(file, buff, rule_id, action_mem);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+dr_dump_matcher_mask(struct seq_file *file, char *buff,
+                    struct mlx5dr_match_param *mask,
+                    u8 criteria, const u64 matcher_id)
+{
+       char dump[DR_HEX_SIZE];
+       int ret;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, "%d,0x%llx,",
+                      DR_DUMP_REC_TYPE_MATCHER_MASK, matcher_id);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (criteria & DR_MATCHER_CRITERIA_OUTER) {
+               dr_dump_hex_print(dump, (char *)&mask->outer, sizeof(mask->outer));
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%s,", dump);
+       } else {
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
+       }
+
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (criteria & DR_MATCHER_CRITERIA_INNER) {
+               dr_dump_hex_print(dump, (char *)&mask->inner, sizeof(mask->inner));
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%s,", dump);
+       } else {
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
+       }
+
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (criteria & DR_MATCHER_CRITERIA_MISC) {
+               dr_dump_hex_print(dump, (char *)&mask->misc, sizeof(mask->misc));
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%s,", dump);
+       } else {
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
+       }
+
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (criteria & DR_MATCHER_CRITERIA_MISC2) {
+               dr_dump_hex_print(dump, (char *)&mask->misc2, sizeof(mask->misc2));
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%s,", dump);
+       } else {
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",");
+       }
+
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (criteria & DR_MATCHER_CRITERIA_MISC3) {
+               dr_dump_hex_print(dump, (char *)&mask->misc3, sizeof(mask->misc3));
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%s\n", dump);
+       } else {
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH, ",\n");
+       }
+
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+dr_dump_matcher_builder(struct seq_file *file, char *buff,
+                       struct mlx5dr_ste_build *builder,
+                       u32 index, bool is_rx, const u64 matcher_id)
+{
+       int ret;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,%d,%d,0x%x\n",
+                      DR_DUMP_REC_TYPE_MATCHER_BUILDER, matcher_id, index,
+                      is_rx, builder->lu_type);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+dr_dump_matcher_rx_tx(struct seq_file *file, char *buff, bool is_rx,
+                     struct mlx5dr_matcher_rx_tx *matcher_rx_tx,
+                     const u64 matcher_id)
+{
+       enum dr_dump_rec_type rec_type;
+       u64 s_icm_addr, e_icm_addr;
+       int i, ret;
+
+       rec_type = is_rx ? DR_DUMP_REC_TYPE_MATCHER_RX :
+                          DR_DUMP_REC_TYPE_MATCHER_TX;
+
+       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->s_htbl->chunk);
+       e_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(matcher_rx_tx->e_anchor->chunk);
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx,%d,0x%llx,0x%llx\n",
+                      rec_type, DR_DBG_PTR_TO_ID(matcher_rx_tx),
+                      matcher_id, matcher_rx_tx->num_of_builders,
+                      dr_dump_icm_to_idx(s_icm_addr),
+                      dr_dump_icm_to_idx(e_icm_addr));
+
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       for (i = 0; i < matcher_rx_tx->num_of_builders; i++) {
+               ret = dr_dump_matcher_builder(file, buff,
+                                             &matcher_rx_tx->ste_builder[i],
+                                             i, is_rx, matcher_id);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static noinline_for_stack int
+dr_dump_matcher(struct seq_file *file, struct mlx5dr_matcher *matcher)
+{
+       struct mlx5dr_matcher_rx_tx *rx = &matcher->rx;
+       struct mlx5dr_matcher_rx_tx *tx = &matcher->tx;
+       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+       u64 matcher_id;
+       int ret;
+
+       matcher_id = DR_DBG_PTR_TO_ID(matcher);
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx,%d\n", DR_DUMP_REC_TYPE_MATCHER,
+                      matcher_id, DR_DBG_PTR_TO_ID(matcher->tbl),
+                      matcher->prio);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       ret = dr_dump_matcher_mask(file, buff, &matcher->mask,
+                                  matcher->match_criteria, matcher_id);
+       if (ret < 0)
+               return ret;
+
+       if (rx->nic_tbl) {
+               ret = dr_dump_matcher_rx_tx(file, buff, true, rx, matcher_id);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (tx->nic_tbl) {
+               ret = dr_dump_matcher_rx_tx(file, buff, false, tx, matcher_id);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+dr_dump_matcher_all(struct seq_file *file, struct mlx5dr_matcher *matcher)
+{
+       struct mlx5dr_rule *rule;
+       int ret;
+
+       ret = dr_dump_matcher(file, matcher);
+       if (ret < 0)
+               return ret;
+
+       list_for_each_entry(rule, &matcher->dbg_rule_list, dbg_node) {
+               ret = dr_dump_rule(file, rule);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int
+dr_dump_table_rx_tx(struct seq_file *file, char *buff, bool is_rx,
+                   struct mlx5dr_table_rx_tx *table_rx_tx,
+                   const u64 table_id)
+{
+       enum dr_dump_rec_type rec_type;
+       u64 s_icm_addr;
+       int ret;
+
+       rec_type = is_rx ? DR_DUMP_REC_TYPE_TABLE_RX :
+                          DR_DUMP_REC_TYPE_TABLE_TX;
+
+       s_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(table_rx_tx->s_anchor->chunk);
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx\n", rec_type, table_id,
+                      dr_dump_icm_to_idx(s_icm_addr));
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static noinline_for_stack int
+dr_dump_table(struct seq_file *file, struct mlx5dr_table *table)
+{
+       struct mlx5dr_table_rx_tx *rx = &table->rx;
+       struct mlx5dr_table_rx_tx *tx = &table->tx;
+       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+       int ret;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx,%d,%d\n", DR_DUMP_REC_TYPE_TABLE,
+                      DR_DBG_PTR_TO_ID(table), DR_DBG_PTR_TO_ID(table->dmn),
+                      table->table_type, table->level);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       if (rx->nic_dmn) {
+               ret = dr_dump_table_rx_tx(file, buff, true, rx,
+                                         DR_DBG_PTR_TO_ID(table));
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (tx->nic_dmn) {
+               ret = dr_dump_table_rx_tx(file, buff, false, tx,
+                                         DR_DBG_PTR_TO_ID(table));
+               if (ret < 0)
+                       return ret;
+       }
+       return 0;
+}
+
+static int dr_dump_table_all(struct seq_file *file, struct mlx5dr_table *tbl)
+{
+       struct mlx5dr_matcher *matcher;
+       int ret;
+
+       ret = dr_dump_table(file, tbl);
+       if (ret < 0)
+               return ret;
+
+       list_for_each_entry(matcher, &tbl->matcher_list, list_node) {
+               ret = dr_dump_matcher_all(file, matcher);
+               if (ret < 0)
+                       return ret;
+       }
+       return 0;
+}
+
+static int
+dr_dump_send_ring(struct seq_file *file, char *buff,
+                 struct mlx5dr_send_ring *ring,
+                 const u64 domain_id)
+{
+       int ret;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%llx,0x%x,0x%x\n",
+                      DR_DUMP_REC_TYPE_DOMAIN_SEND_RING,
+                      DR_DBG_PTR_TO_ID(ring), domain_id,
+                      ring->cq->mcq.cqn, ring->qp->qpn);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+dr_dump_domain_info_flex_parser(struct seq_file *file,
+                               char *buff,
+                               const char *flex_parser_name,
+                               const u8 flex_parser_value,
+                               const u64 domain_id)
+{
+       int ret;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,%s,0x%x\n",
+                      DR_DUMP_REC_TYPE_DOMAIN_INFO_FLEX_PARSER, domain_id,
+                      flex_parser_name, flex_parser_value);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+static int
+dr_dump_domain_info_caps(struct seq_file *file, char *buff,
+                        struct mlx5dr_cmd_caps *caps,
+                        const u64 domain_id)
+{
+       struct mlx5dr_cmd_vport_cap *vport_caps;
+       unsigned long i, vports_num;
+       int ret;
+
+       xa_for_each(&caps->vports.vports_caps_xa, vports_num, vport_caps)
+               ; /* count the number of vports in xarray */
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,0x%x,0x%llx,0x%llx,0x%x,%lu,%d\n",
+                      DR_DUMP_REC_TYPE_DOMAIN_INFO_CAPS, domain_id, caps->gvmi,
+                      caps->nic_rx_drop_address, caps->nic_tx_drop_address,
+                      caps->flex_protocols, vports_num, caps->eswitch_manager);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       xa_for_each(&caps->vports.vports_caps_xa, i, vport_caps) {
+               vport_caps = xa_load(&caps->vports.vports_caps_xa, i);
+
+               ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                              "%d,0x%llx,%lu,0x%x,0x%llx,0x%llx\n",
+                              DR_DUMP_REC_TYPE_DOMAIN_INFO_VPORT,
+                              domain_id, i, vport_caps->vport_gvmi,
+                              vport_caps->icm_address_rx,
+                              vport_caps->icm_address_tx);
+               if (ret < 0)
+                       return ret;
+
+               ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+               if (ret)
+                       return ret;
+       }
+       return 0;
+}
+
+static int
+dr_dump_domain_info(struct seq_file *file, char *buff,
+                   struct mlx5dr_domain_info *info,
+                   const u64 domain_id)
+{
+       int ret;
+
+       ret = dr_dump_domain_info_caps(file, buff, &info->caps, domain_id);
+       if (ret < 0)
+               return ret;
+
+       ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw0",
+                                             info->caps.flex_parser_id_icmp_dw0,
+                                             domain_id);
+       if (ret < 0)
+               return ret;
+
+       ret = dr_dump_domain_info_flex_parser(file, buff, "icmp_dw1",
+                                             info->caps.flex_parser_id_icmp_dw1,
+                                             domain_id);
+       if (ret < 0)
+               return ret;
+
+       ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw0",
+                                             info->caps.flex_parser_id_icmpv6_dw0,
+                                             domain_id);
+       if (ret < 0)
+               return ret;
+
+       ret = dr_dump_domain_info_flex_parser(file, buff, "icmpv6_dw1",
+                                             info->caps.flex_parser_id_icmpv6_dw1,
+                                             domain_id);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static noinline_for_stack int
+dr_dump_domain(struct seq_file *file, struct mlx5dr_domain *dmn)
+{
+       char buff[MLX5DR_DEBUG_DUMP_BUFF_LENGTH];
+       u64 domain_id = DR_DBG_PTR_TO_ID(dmn);
+       int ret;
+
+       ret = snprintf(buff, MLX5DR_DEBUG_DUMP_BUFF_LENGTH,
+                      "%d,0x%llx,%d,0%x,%d,%u.%u.%u,%s,%d,%u,%u,%u\n",
+                      DR_DUMP_REC_TYPE_DOMAIN,
+                      domain_id, dmn->type, dmn->info.caps.gvmi,
+                      dmn->info.supp_sw_steering,
+                      /* package version */
+                      LINUX_VERSION_MAJOR, LINUX_VERSION_PATCHLEVEL,
+                      LINUX_VERSION_SUBLEVEL,
+                      pci_name(dmn->mdev->pdev),
+                      0, /* domain flags */
+                      dmn->num_buddies[DR_ICM_TYPE_STE],
+                      dmn->num_buddies[DR_ICM_TYPE_MODIFY_ACTION],
+                      dmn->num_buddies[DR_ICM_TYPE_MODIFY_HDR_PTRN]);
+       if (ret < 0)
+               return ret;
+
+       ret = mlx5dr_dbg_dump_data_print(file, buff, ret);
+       if (ret)
+               return ret;
+
+       ret = dr_dump_domain_info(file, buff, &dmn->info, domain_id);
+       if (ret < 0)
+               return ret;
+
+       if (dmn->info.supp_sw_steering) {
+               ret = dr_dump_send_ring(file, buff, dmn->send_ring, domain_id);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+static int dr_dump_domain_all(struct seq_file *file, struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_table *tbl;
+       int ret;
+
+       mutex_lock(&dmn->dump_info.dbg_mutex);
+       mlx5dr_domain_lock(dmn);
+
+       ret = dr_dump_domain(file, dmn);
+       if (ret < 0)
+               goto unlock_mutex;
+
+       list_for_each_entry(tbl, &dmn->dbg_tbl_list, dbg_node) {
+               ret = dr_dump_table_all(file, tbl);
+               if (ret < 0)
+                       break;
+       }
+
+unlock_mutex:
+       mlx5dr_domain_unlock(dmn);
+       mutex_unlock(&dmn->dump_info.dbg_mutex);
+       return ret;
+}
+
+static void *
+dr_dump_start(struct seq_file *file, loff_t *pos)
+{
+       struct mlx5dr_domain *dmn = file->private;
+       struct mlx5dr_dbg_dump_data *dump_data;
+
+       if (atomic_read(&dmn->dump_info.state) != MLX5DR_DEBUG_DUMP_STATE_FREE) {
+               mlx5_core_warn(dmn->mdev, "Dump already in progress\n");
+               return ERR_PTR(-EBUSY);
+       }
+
+       atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS);
+       dump_data = dmn->dump_info.dump_data;
+
+       if (dump_data) {
+               return seq_list_start(&dump_data->buff_list, *pos);
+       } else if (*pos == 0) {
+               dump_data = mlx5dr_dbg_create_dump_data();
+               if (!dump_data)
+                       goto exit;
+
+               dmn->dump_info.dump_data = dump_data;
+               if (dr_dump_domain_all(file, dmn)) {
+                       mlx5dr_dbg_destroy_dump_data(dump_data);
+                       dmn->dump_info.dump_data = NULL;
+                       goto exit;
+               }
+
+               return seq_list_start(&dump_data->buff_list, *pos);
+       }
+
+exit:
+       atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+       return NULL;
+}
+
+static void *
+dr_dump_next(struct seq_file *file, void *v, loff_t *pos)
+{
+       struct mlx5dr_domain *dmn = file->private;
+       struct mlx5dr_dbg_dump_data *dump_data;
+
+       dump_data = dmn->dump_info.dump_data;
+
+       return seq_list_next(v, &dump_data->buff_list, pos);
+}
+
+static void
+dr_dump_stop(struct seq_file *file, void *v)
+{
+       struct mlx5dr_domain *dmn = file->private;
+       struct mlx5dr_dbg_dump_data *dump_data;
+
+       if (v && IS_ERR(v))
+               return;
+
+       if (!v) {
+               dump_data = dmn->dump_info.dump_data;
+               if (dump_data) {
+                       mlx5dr_dbg_destroy_dump_data(dump_data);
+                       dmn->dump_info.dump_data = NULL;
+               }
+       }
+
+       atomic_set(&dmn->dump_info.state, MLX5DR_DEBUG_DUMP_STATE_FREE);
+}
+
+static int
+dr_dump_show(struct seq_file *file, void *v)
+{
+       struct mlx5dr_dbg_dump_buff *entry;
+
+       entry = list_entry(v, struct mlx5dr_dbg_dump_buff, node);
+       seq_printf(file, "%s", entry->buff);
+
+       return 0;
+}
+
+static const struct seq_operations dr_dump_sops = {
+       .start  = dr_dump_start,
+       .next   = dr_dump_next,
+       .stop   = dr_dump_stop,
+       .show   = dr_dump_show,
+};
+DEFINE_SEQ_ATTRIBUTE(dr_dump);
+
+void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn)
+{
+       struct mlx5_core_dev *dev = dmn->mdev;
+       char file_name[128];
+
+       if (dmn->type != MLX5DR_DOMAIN_TYPE_FDB) {
+               mlx5_core_warn(dev,
+                              "Steering dump is not supported for NIC RX/TX domains\n");
+               return;
+       }
+
+       dmn->dump_info.steering_debugfs =
+               debugfs_create_dir("steering", mlx5_debugfs_get_dev_root(dev));
+       dmn->dump_info.fdb_debugfs =
+               debugfs_create_dir("fdb", dmn->dump_info.steering_debugfs);
+
+       sprintf(file_name, "dmn_%p", dmn);
+       debugfs_create_file(file_name, 0444, dmn->dump_info.fdb_debugfs,
+                           dmn, &dr_dump_fops);
+
+       INIT_LIST_HEAD(&dmn->dbg_tbl_list);
+       mutex_init(&dmn->dump_info.dbg_mutex);
+}
+
+void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn)
+{
+       debugfs_remove_recursive(dmn->dump_info.steering_debugfs);
+       mutex_destroy(&dmn->dump_info.dbg_mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_dbg.h
new file mode 100644 (file)
index 0000000..57c6b36
--- /dev/null
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#define MLX5DR_DEBUG_DUMP_BUFF_SIZE (64 * 1024 * 1024)
+#define MLX5DR_DEBUG_DUMP_BUFF_LENGTH 512
+
+enum {
+       MLX5DR_DEBUG_DUMP_STATE_FREE,
+       MLX5DR_DEBUG_DUMP_STATE_IN_PROGRESS,
+};
+
+struct mlx5dr_dbg_dump_buff {
+       char *buff;
+       u32 index;
+       struct list_head node;
+};
+
+struct mlx5dr_dbg_dump_data {
+       struct list_head buff_list;
+};
+
+struct mlx5dr_dbg_dump_info {
+       struct mutex dbg_mutex; /* protect dbg lists */
+       struct dentry *steering_debugfs;
+       struct dentry *fdb_debugfs;
+       struct mlx5dr_dbg_dump_data *dump_data;
+       atomic_t state;
+};
+
+void mlx5dr_dbg_init_dump(struct mlx5dr_domain *dmn);
+void mlx5dr_dbg_uninit_dump(struct mlx5dr_domain *dmn);
+void mlx5dr_dbg_tbl_add(struct mlx5dr_table *tbl);
+void mlx5dr_dbg_tbl_del(struct mlx5dr_table *tbl);
+void mlx5dr_dbg_rule_add(struct mlx5dr_rule *rule);
+void mlx5dr_dbg_rule_del(struct mlx5dr_rule *rule);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_definer.c
new file mode 100644 (file)
index 0000000..d5ea977
--- /dev/null
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+struct dr_definer_object {
+       u32 id;
+       u16 format_id;
+       u8 dw_selectors[MLX5_IFC_DEFINER_DW_SELECTORS_NUM];
+       u8 byte_selectors[MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM];
+       u8 match_mask[DR_STE_SIZE_MATCH_TAG];
+       refcount_t refcount;
+};
+
+static bool dr_definer_compare(struct dr_definer_object *definer,
+                              u16 format_id, u8 *dw_selectors,
+                              u8 *byte_selectors, u8 *match_mask)
+{
+       int i;
+
+       if (definer->format_id != format_id)
+               return false;
+
+       for (i = 0; i < MLX5_IFC_DEFINER_DW_SELECTORS_NUM; i++)
+               if (definer->dw_selectors[i] != dw_selectors[i])
+                       return false;
+
+       for (i = 0; i < MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM; i++)
+               if (definer->byte_selectors[i] != byte_selectors[i])
+                       return false;
+
+       if (memcmp(definer->match_mask, match_mask, DR_STE_SIZE_MATCH_TAG))
+               return false;
+
+       return true;
+}
+
+static struct dr_definer_object *
+dr_definer_find_obj(struct mlx5dr_domain *dmn, u16 format_id,
+                   u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
+{
+       struct dr_definer_object *definer_obj;
+       unsigned long id;
+
+       xa_for_each(&dmn->definers_xa, id, definer_obj) {
+               if (dr_definer_compare(definer_obj, format_id,
+                                      dw_selectors, byte_selectors,
+                                      match_mask))
+                       return definer_obj;
+       }
+
+       return NULL;
+}
+
+static struct dr_definer_object *
+dr_definer_create_obj(struct mlx5dr_domain *dmn, u16 format_id,
+                     u8 *dw_selectors, u8 *byte_selectors, u8 *match_mask)
+{
+       struct dr_definer_object *definer_obj;
+       int ret = 0;
+
+       definer_obj = kzalloc(sizeof(*definer_obj), GFP_KERNEL);
+       if (!definer_obj)
+               return NULL;
+
+       ret = mlx5dr_cmd_create_definer(dmn->mdev,
+                                       format_id,
+                                       dw_selectors,
+                                       byte_selectors,
+                                       match_mask,
+                                       &definer_obj->id);
+       if (ret)
+               goto err_free_definer_obj;
+
+       /* Definer ID can have 32 bits, but STE format
+        * supports only definers with 8 bit IDs.
+        */
+       if (definer_obj->id > 0xff) {
+               mlx5dr_err(dmn, "Unsupported definer ID (%d)\n", definer_obj->id);
+               goto err_destroy_definer;
+       }
+
+       definer_obj->format_id = format_id;
+       memcpy(definer_obj->dw_selectors, dw_selectors, sizeof(definer_obj->dw_selectors));
+       memcpy(definer_obj->byte_selectors, byte_selectors, sizeof(definer_obj->byte_selectors));
+       memcpy(definer_obj->match_mask, match_mask, sizeof(definer_obj->match_mask));
+
+       refcount_set(&definer_obj->refcount, 1);
+
+       ret = xa_insert(&dmn->definers_xa, definer_obj->id, definer_obj, GFP_KERNEL);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Couldn't insert new definer into xarray (%d)\n", ret);
+               goto err_destroy_definer;
+       }
+
+       return definer_obj;
+
+err_destroy_definer:
+       mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
+err_free_definer_obj:
+       kfree(definer_obj);
+
+       return NULL;
+}
+
+static void dr_definer_destroy_obj(struct mlx5dr_domain *dmn,
+                                  struct dr_definer_object *definer_obj)
+{
+       mlx5dr_cmd_destroy_definer(dmn->mdev, definer_obj->id);
+       xa_erase(&dmn->definers_xa, definer_obj->id);
+       kfree(definer_obj);
+}
+
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+                      u8 *dw_selectors, u8 *byte_selectors,
+                      u8 *match_mask, u32 *definer_id)
+{
+       struct dr_definer_object *definer_obj;
+       int ret = 0;
+
+       definer_obj = dr_definer_find_obj(dmn, format_id, dw_selectors,
+                                         byte_selectors, match_mask);
+       if (!definer_obj) {
+               definer_obj = dr_definer_create_obj(dmn, format_id,
+                                                   dw_selectors, byte_selectors,
+                                                   match_mask);
+               if (!definer_obj)
+                       return -ENOMEM;
+       } else {
+               refcount_inc(&definer_obj->refcount);
+       }
+
+       *definer_id = definer_obj->id;
+
+       return ret;
+}
+
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id)
+{
+       struct dr_definer_object *definer_obj;
+
+       definer_obj = xa_load(&dmn->definers_xa, definer_id);
+       if (!definer_obj) {
+               mlx5dr_err(dmn, "Definer ID %d not found\n", definer_id);
+               return;
+       }
+
+       if (refcount_dec_and_test(&definer_obj->refcount))
+               dr_definer_destroy_obj(dmn, definer_obj);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_domain.c
new file mode 100644 (file)
index 0000000..3d74109
--- /dev/null
@@ -0,0 +1,579 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/mlx5/eswitch.h>
+#include <linux/err.h>
+#include "dr_types.h"
+
+#define DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, dmn_type) \
+       ((dmn)->info.caps.dmn_type##_sw_owner ||        \
+        ((dmn)->info.caps.dmn_type##_sw_owner_v2 &&    \
+         (dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_7))
+
+bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn)
+{
+       return dmn->info.caps.sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX &&
+              dmn->info.caps.support_modify_argument;
+}
+
+static int dr_domain_init_modify_header_resources(struct mlx5dr_domain *dmn)
+{
+       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+               return 0;
+
+       dmn->ptrn_mgr = mlx5dr_ptrn_mgr_create(dmn);
+       if (!dmn->ptrn_mgr) {
+               mlx5dr_err(dmn, "Couldn't create ptrn_mgr\n");
+               return -ENOMEM;
+       }
+
+       /* create argument pool */
+       dmn->arg_mgr = mlx5dr_arg_mgr_create(dmn);
+       if (!dmn->arg_mgr) {
+               mlx5dr_err(dmn, "Couldn't create arg_mgr\n");
+               goto free_modify_header_pattern;
+       }
+
+       return 0;
+
+free_modify_header_pattern:
+       mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
+       return -ENOMEM;
+}
+
+static void dr_domain_destroy_modify_header_resources(struct mlx5dr_domain *dmn)
+{
+       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+               return;
+
+       mlx5dr_arg_mgr_destroy(dmn->arg_mgr);
+       mlx5dr_ptrn_mgr_destroy(dmn->ptrn_mgr);
+}
+
+static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
+{
+       /* Per vport cached FW FT for checksum recalculation, this
+        * recalculation is needed due to a HW bug in STEv0.
+        */
+       xa_init(&dmn->csum_fts_xa);
+}
+
+static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+       unsigned long i;
+
+       xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
+               if (recalc_cs_ft)
+                       mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
+       }
+
+       xa_destroy(&dmn->csum_fts_xa);
+}
+
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+                                       u16 vport_num,
+                                       u64 *rx_icm_addr)
+{
+       struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+       int ret;
+
+       recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
+       if (!recalc_cs_ft) {
+               /* Table hasn't been created yet */
+               recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
+               if (!recalc_cs_ft)
+                       return -EINVAL;
+
+               ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
+                                     recalc_cs_ft, GFP_KERNEL));
+               if (ret)
+                       return ret;
+       }
+
+       *rx_icm_addr = recalc_cs_ft->rx_icm_addr;
+
+       return 0;
+}
+
+static int dr_domain_init_mem_resources(struct mlx5dr_domain *dmn)
+{
+       int ret;
+
+       dmn->chunks_kmem_cache = kmem_cache_create("mlx5_dr_chunks",
+                                                  sizeof(struct mlx5dr_icm_chunk), 0,
+                                                  SLAB_HWCACHE_ALIGN, NULL);
+       if (!dmn->chunks_kmem_cache) {
+               mlx5dr_err(dmn, "Couldn't create chunks kmem_cache\n");
+               return -ENOMEM;
+       }
+
+       dmn->htbls_kmem_cache = kmem_cache_create("mlx5_dr_htbls",
+                                                 sizeof(struct mlx5dr_ste_htbl), 0,
+                                                 SLAB_HWCACHE_ALIGN, NULL);
+       if (!dmn->htbls_kmem_cache) {
+               mlx5dr_err(dmn, "Couldn't create hash tables kmem_cache\n");
+               ret = -ENOMEM;
+               goto free_chunks_kmem_cache;
+       }
+
+       dmn->ste_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_STE);
+       if (!dmn->ste_icm_pool) {
+               mlx5dr_err(dmn, "Couldn't get icm memory\n");
+               ret = -ENOMEM;
+               goto free_htbls_kmem_cache;
+       }
+
+       dmn->action_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_ACTION);
+       if (!dmn->action_icm_pool) {
+               mlx5dr_err(dmn, "Couldn't get action icm memory\n");
+               ret = -ENOMEM;
+               goto free_ste_icm_pool;
+       }
+
+       ret = mlx5dr_send_info_pool_create(dmn);
+       if (ret) {
+               mlx5dr_err(dmn, "Couldn't create send info pool\n");
+               goto free_action_icm_pool;
+       }
+
+       return 0;
+
+free_action_icm_pool:
+       mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+free_ste_icm_pool:
+       mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+free_htbls_kmem_cache:
+       kmem_cache_destroy(dmn->htbls_kmem_cache);
+free_chunks_kmem_cache:
+       kmem_cache_destroy(dmn->chunks_kmem_cache);
+
+       return ret;
+}
+
+static void dr_domain_uninit_mem_resources(struct mlx5dr_domain *dmn)
+{
+       mlx5dr_send_info_pool_destroy(dmn);
+       mlx5dr_icm_pool_destroy(dmn->action_icm_pool);
+       mlx5dr_icm_pool_destroy(dmn->ste_icm_pool);
+       kmem_cache_destroy(dmn->htbls_kmem_cache);
+       kmem_cache_destroy(dmn->chunks_kmem_cache);
+}
+
+static int dr_domain_init_resources(struct mlx5dr_domain *dmn)
+{
+       int ret;
+
+       dmn->ste_ctx = mlx5dr_ste_get_ctx(dmn->info.caps.sw_format_ver);
+       if (!dmn->ste_ctx) {
+               mlx5dr_err(dmn, "SW Steering on this device is unsupported\n");
+               return -EOPNOTSUPP;
+       }
+
+       ret = mlx5_core_alloc_pd(dmn->mdev, &dmn->pdn);
+       if (ret) {
+               mlx5dr_err(dmn, "Couldn't allocate PD, ret: %d", ret);
+               return ret;
+       }
+
+       dmn->uar = mlx5_get_uars_page(dmn->mdev);
+       if (IS_ERR(dmn->uar)) {
+               mlx5dr_err(dmn, "Couldn't allocate UAR\n");
+               ret = PTR_ERR(dmn->uar);
+               goto clean_pd;
+       }
+
+       ret = dr_domain_init_mem_resources(dmn);
+       if (ret) {
+               mlx5dr_err(dmn, "Couldn't create domain memory resources\n");
+               goto clean_uar;
+       }
+
+       ret = dr_domain_init_modify_header_resources(dmn);
+       if (ret) {
+               mlx5dr_err(dmn, "Couldn't create modify-header-resources\n");
+               goto clean_mem_resources;
+       }
+
+       ret = mlx5dr_send_ring_alloc(dmn);
+       if (ret) {
+               mlx5dr_err(dmn, "Couldn't create send-ring\n");
+               goto clean_modify_hdr;
+       }
+
+       return 0;
+
+clean_modify_hdr:
+       dr_domain_destroy_modify_header_resources(dmn);
+clean_mem_resources:
+       dr_domain_uninit_mem_resources(dmn);
+clean_uar:
+       mlx5_put_uars_page(dmn->mdev, dmn->uar);
+clean_pd:
+       mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
+
+       return ret;
+}
+
+static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
+{
+       mlx5dr_send_ring_free(dmn, dmn->send_ring);
+       dr_domain_destroy_modify_header_resources(dmn);
+       dr_domain_uninit_mem_resources(dmn);
+       mlx5_put_uars_page(dmn->mdev, dmn->uar);
+       mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
+}
+
+static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
+                                      struct mlx5dr_cmd_vport_cap *uplink_vport)
+{
+       struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+
+       uplink_vport->num = MLX5_VPORT_UPLINK;
+       uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+       uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+       uplink_vport->vport_gvmi = 0;
+       uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
+}
+
+static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
+                                u16 vport_number,
+                                bool other_vport,
+                                struct mlx5dr_cmd_vport_cap *vport_caps)
+{
+       int ret;
+
+       ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
+                                                other_vport,
+                                                vport_number,
+                                                &vport_caps->icm_address_rx,
+                                                &vport_caps->icm_address_tx);
+       if (ret)
+               return ret;
+
+       ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
+                                   other_vport,
+                                   vport_number,
+                                   &vport_caps->vport_gvmi);
+       if (ret)
+               return ret;
+
+       vport_caps->num = vport_number;
+       vport_caps->vhca_gvmi = dmn->info.caps.gvmi;
+
+       return 0;
+}
+
+static int dr_domain_query_esw_mgr(struct mlx5dr_domain *dmn)
+{
+       return dr_domain_query_vport(dmn, 0, false,
+                                    &dmn->info.caps.vports.esw_manager_caps);
+}
+
+static void dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+       dr_domain_fill_uplink_caps(dmn, &dmn->info.caps.vports.uplink_caps);
+}
+
+static struct mlx5dr_cmd_vport_cap *
+dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+       struct mlx5dr_cmd_vport_cap *vport_caps;
+       int ret;
+
+       vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
+       if (!vport_caps)
+               return NULL;
+
+       ret = dr_domain_query_vport(dmn, vport, true, vport_caps);
+       if (ret) {
+               kvfree(vport_caps);
+               return NULL;
+       }
+
+       ret = xa_insert(&caps->vports.vports_caps_xa, vport,
+                       vport_caps, GFP_KERNEL);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
+               kvfree(vport_caps);
+               return ERR_PTR(ret);
+       }
+
+       return vport_caps;
+}
+
+static bool dr_domain_is_esw_mgr_vport(struct mlx5dr_domain *dmn, u16 vport)
+{
+       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+       return (caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+              (!caps->is_ecpf && vport == 0);
+}
+
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+       struct mlx5dr_cmd_vport_cap *vport_caps;
+
+       if (dr_domain_is_esw_mgr_vport(dmn, vport))
+               return &caps->vports.esw_manager_caps;
+
+       if (vport == MLX5_VPORT_UPLINK)
+               return &caps->vports.uplink_caps;
+
+vport_load:
+       vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
+       if (vport_caps)
+               return vport_caps;
+
+       vport_caps = dr_domain_add_vport_cap(dmn, vport);
+       if (PTR_ERR(vport_caps) == -EBUSY)
+               /* caps were already stored by another thread */
+               goto vport_load;
+
+       return vport_caps;
+}
+
+static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_cmd_vport_cap *vport_caps;
+       unsigned long i;
+
+       xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
+               vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
+               kvfree(vport_caps);
+       }
+}
+
+static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
+                                   struct mlx5dr_domain *dmn)
+{
+       int ret;
+
+       if (!dmn->info.caps.eswitch_manager)
+               return -EOPNOTSUPP;
+
+       ret = mlx5dr_cmd_query_esw_caps(mdev, &dmn->info.caps.esw_caps);
+       if (ret)
+               return ret;
+
+       dmn->info.caps.fdb_sw_owner = dmn->info.caps.esw_caps.sw_owner;
+       dmn->info.caps.fdb_sw_owner_v2 = dmn->info.caps.esw_caps.sw_owner_v2;
+       dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
+       dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
+
+       xa_init(&dmn->info.caps.vports.vports_caps_xa);
+
+       /* Query eswitch manager and uplink vports only. Rest of the
+        * vports (vport 0, VFs and SFs) will be queried dynamically.
+        */
+
+       ret = dr_domain_query_esw_mgr(dmn);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
+               goto free_vports_caps_xa;
+       }
+
+       dr_domain_query_uplink(dmn);
+
+       return 0;
+
+free_vports_caps_xa:
+       xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
+
+       return ret;
+}
+
+static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
+                              struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_cmd_vport_cap *vport_cap;
+       int ret;
+
+       if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) {
+               mlx5dr_err(dmn, "Failed to allocate domain, bad link type\n");
+               return -EOPNOTSUPP;
+       }
+
+       ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
+       if (ret)
+               return ret;
+
+       ret = dr_domain_query_fdb_caps(mdev, dmn);
+       if (ret)
+               return ret;
+
+       switch (dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, rx))
+                       return -ENOTSUPP;
+
+               dmn->info.supp_sw_steering = true;
+               dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
+               dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address;
+               dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address;
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, tx))
+                       return -ENOTSUPP;
+
+               dmn->info.supp_sw_steering = true;
+               dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
+               dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address;
+               dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address;
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               if (!dmn->info.caps.eswitch_manager)
+                       return -ENOTSUPP;
+
+               if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb))
+                       return -ENOTSUPP;
+
+               dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
+               dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
+               vport_cap = &dmn->info.caps.vports.esw_manager_caps;
+
+               dmn->info.supp_sw_steering = true;
+               dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
+               dmn->info.rx.default_icm_addr = vport_cap->icm_address_rx;
+               dmn->info.rx.drop_icm_addr = dmn->info.caps.esw_rx_drop_address;
+               dmn->info.tx.drop_icm_addr = dmn->info.caps.esw_tx_drop_address;
+               break;
+       default:
+               mlx5dr_err(dmn, "Invalid domain\n");
+               ret = -EINVAL;
+               break;
+       }
+
+       return ret;
+}
+
+static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
+{
+       dr_domain_clear_vports(dmn);
+       xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
+}
+
+struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
+{
+       struct mlx5dr_domain *dmn;
+       int ret;
+
+       if (type > MLX5DR_DOMAIN_TYPE_FDB)
+               return NULL;
+
+       dmn = kzalloc(sizeof(*dmn), GFP_KERNEL);
+       if (!dmn)
+               return NULL;
+
+       dmn->mdev = mdev;
+       dmn->type = type;
+       refcount_set(&dmn->refcount, 1);
+       mutex_init(&dmn->info.rx.mutex);
+       mutex_init(&dmn->info.tx.mutex);
+       xa_init(&dmn->definers_xa);
+       xa_init(&dmn->peer_dmn_xa);
+
+       if (dr_domain_caps_init(mdev, dmn)) {
+               mlx5dr_err(dmn, "Failed init domain, no caps\n");
+               goto def_xa_destroy;
+       }
+
+       dmn->info.max_log_action_icm_sz = DR_CHUNK_SIZE_4K;
+       dmn->info.max_log_sw_icm_sz = min_t(u32, DR_CHUNK_SIZE_1024K,
+                                           dmn->info.caps.log_icm_size);
+       dmn->info.max_log_modify_hdr_pattern_icm_sz =
+               min_t(u32, DR_CHUNK_SIZE_4K,
+                     dmn->info.caps.log_modify_pattern_icm_size);
+
+       if (!dmn->info.supp_sw_steering) {
+               mlx5dr_err(dmn, "SW steering is not supported\n");
+               goto uninit_caps;
+       }
+
+       /* Allocate resources */
+       ret = dr_domain_init_resources(dmn);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed init domain resources\n");
+               goto uninit_caps;
+       }
+
+       dr_domain_init_csum_recalc_fts(dmn);
+       mlx5dr_dbg_init_dump(dmn);
+       return dmn;
+
+uninit_caps:
+       dr_domain_caps_uninit(dmn);
+def_xa_destroy:
+       xa_destroy(&dmn->peer_dmn_xa);
+       xa_destroy(&dmn->definers_xa);
+       kfree(dmn);
+       return NULL;
+}
+
+/* Assure synchronization of the device steering tables with updates made by SW
+ * insertion.
+ */
+int mlx5dr_domain_sync(struct mlx5dr_domain *dmn, u32 flags)
+{
+       int ret = 0;
+
+       if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_SW) {
+               mlx5dr_domain_lock(dmn);
+               ret = mlx5dr_send_ring_force_drain(dmn);
+               mlx5dr_domain_unlock(dmn);
+               if (ret) {
+                       mlx5dr_err(dmn, "Force drain failed flags: %d, ret: %d\n",
+                                  flags, ret);
+                       return ret;
+               }
+       }
+
+       if (flags & MLX5DR_DOMAIN_SYNC_FLAGS_HW)
+               ret = mlx5dr_cmd_sync_steering(dmn->mdev);
+
+       return ret;
+}
+
+int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
+{
+       if (WARN_ON_ONCE(refcount_read(&dmn->refcount) > 1))
+               return -EBUSY;
+
+       /* make sure resources are not used by the hardware */
+       mlx5dr_cmd_sync_steering(dmn->mdev);
+       mlx5dr_dbg_uninit_dump(dmn);
+       dr_domain_uninit_csum_recalc_fts(dmn);
+       dr_domain_uninit_resources(dmn);
+       dr_domain_caps_uninit(dmn);
+       xa_destroy(&dmn->peer_dmn_xa);
+       xa_destroy(&dmn->definers_xa);
+       mutex_destroy(&dmn->info.tx.mutex);
+       mutex_destroy(&dmn->info.rx.mutex);
+       kfree(dmn);
+       return 0;
+}
+
+void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+                           struct mlx5dr_domain *peer_dmn,
+                           u16 peer_vhca_id)
+{
+       struct mlx5dr_domain *peer;
+
+       mlx5dr_domain_lock(dmn);
+
+       peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
+       if (peer)
+               refcount_dec(&peer->refcount);
+
+       WARN_ON(xa_err(xa_store(&dmn->peer_dmn_xa, peer_vhca_id, peer_dmn, GFP_KERNEL)));
+
+       peer = xa_load(&dmn->peer_dmn_xa, peer_vhca_id);
+       if (peer)
+               refcount_inc(&peer->refcount);
+
+       mlx5dr_domain_unlock(dmn);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_fw.c
new file mode 100644 (file)
index 0000000..f05ef0c
--- /dev/null
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/types.h>
+#include "dr_types.h"
+
+struct mlx5dr_fw_recalc_cs_ft *
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
+{
+       struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+       struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+       u32 table_id, group_id, modify_hdr_id;
+       u64 rx_icm_addr, modify_ttl_action;
+       int ret;
+
+       recalc_cs_ft = kzalloc(sizeof(*recalc_cs_ft), GFP_KERNEL);
+       if (!recalc_cs_ft)
+               return NULL;
+
+       ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+       ft_attr.level = dmn->info.caps.max_ft_level - 1;
+       ft_attr.term_tbl = true;
+
+       ret = mlx5dr_cmd_create_flow_table(dmn->mdev,
+                                          &ft_attr,
+                                          &rx_icm_addr,
+                                          &table_id);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed creating TTL W/A FW flow table %d\n", ret);
+               goto free_ttl_tbl;
+       }
+
+       ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+                                                MLX5_FLOW_TABLE_TYPE_FDB,
+                                                table_id, &group_id);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed creating TTL W/A FW flow group %d\n", ret);
+               goto destroy_flow_table;
+       }
+
+       /* Modify TTL action by adding zero to trigger CS recalculation */
+       modify_ttl_action = 0;
+       MLX5_SET(set_action_in, &modify_ttl_action, action_type, MLX5_ACTION_TYPE_ADD);
+       MLX5_SET(set_action_in, &modify_ttl_action, field, MLX5_ACTION_IN_FIELD_OUT_IP_TTL);
+
+       ret = mlx5dr_cmd_alloc_modify_header(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB, 1,
+                                            &modify_ttl_action,
+                                            &modify_hdr_id);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed modify header TTL %d\n", ret);
+               goto destroy_flow_group;
+       }
+
+       ret = mlx5dr_cmd_set_fte_modify_and_vport(dmn->mdev,
+                                                 MLX5_FLOW_TABLE_TYPE_FDB,
+                                                 table_id, group_id, modify_hdr_id,
+                                                 vport_num);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed setting TTL W/A flow table entry %d\n", ret);
+               goto dealloc_modify_header;
+       }
+
+       recalc_cs_ft->modify_hdr_id = modify_hdr_id;
+       recalc_cs_ft->rx_icm_addr = rx_icm_addr;
+       recalc_cs_ft->table_id = table_id;
+       recalc_cs_ft->group_id = group_id;
+
+       return recalc_cs_ft;
+
+dealloc_modify_header:
+       mlx5dr_cmd_dealloc_modify_header(dmn->mdev, modify_hdr_id);
+destroy_flow_group:
+       mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+                                     MLX5_FLOW_TABLE_TYPE_FDB,
+                                     table_id, group_id);
+destroy_flow_table:
+       mlx5dr_cmd_destroy_flow_table(dmn->mdev, table_id, MLX5_FLOW_TABLE_TYPE_FDB);
+free_ttl_tbl:
+       kfree(recalc_cs_ft);
+       return NULL;
+}
+
+void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
+                                   struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft)
+{
+       mlx5dr_cmd_del_flow_table_entry(dmn->mdev,
+                                       MLX5_FLOW_TABLE_TYPE_FDB,
+                                       recalc_cs_ft->table_id);
+       mlx5dr_cmd_dealloc_modify_header(dmn->mdev, recalc_cs_ft->modify_hdr_id);
+       mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+                                     MLX5_FLOW_TABLE_TYPE_FDB,
+                                     recalc_cs_ft->table_id,
+                                     recalc_cs_ft->group_id);
+       mlx5dr_cmd_destroy_flow_table(dmn->mdev,
+                                     recalc_cs_ft->table_id,
+                                     MLX5_FLOW_TABLE_TYPE_FDB);
+
+       kfree(recalc_cs_ft);
+}
+
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+                           struct mlx5dr_cmd_flow_destination_hw_info *dest,
+                           int num_dest,
+                           bool reformat_req,
+                           u32 *tbl_id,
+                           u32 *group_id,
+                           bool ignore_flow_level,
+                           u32 flow_source)
+{
+       struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+       struct mlx5dr_cmd_fte_info fte_info = {};
+       u32 val[MLX5_ST_SZ_DW_MATCH_PARAM] = {};
+       struct mlx5dr_cmd_ft_info ft_info = {};
+       int ret;
+
+       ft_attr.table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+       ft_attr.level = min_t(int, dmn->info.caps.max_ft_level - 2,
+                             MLX5_FT_MAX_MULTIPATH_LEVEL);
+       ft_attr.reformat_en = reformat_req;
+       ft_attr.decap_en = reformat_req;
+
+       ret = mlx5dr_cmd_create_flow_table(dmn->mdev, &ft_attr, NULL, tbl_id);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed creating multi dest FW flow table %d\n", ret);
+               return ret;
+       }
+
+       ret = mlx5dr_cmd_create_empty_flow_group(dmn->mdev,
+                                                MLX5_FLOW_TABLE_TYPE_FDB,
+                                                *tbl_id, group_id);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed creating multi dest FW flow group %d\n", ret);
+               goto free_flow_table;
+       }
+
+       ft_info.id = *tbl_id;
+       ft_info.type = FS_FT_FDB;
+       fte_info.action.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+       fte_info.dests_size = num_dest;
+       fte_info.val = val;
+       fte_info.dest_arr = dest;
+       fte_info.ignore_flow_level = ignore_flow_level;
+       fte_info.flow_context.flow_source = flow_source;
+
+       ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed setting fte into table %d\n", ret);
+               goto free_flow_group;
+       }
+
+       return 0;
+
+free_flow_group:
+       mlx5dr_cmd_destroy_flow_group(dmn->mdev, MLX5_FLOW_TABLE_TYPE_FDB,
+                                     *tbl_id, *group_id);
+free_flow_table:
+       mlx5dr_cmd_destroy_flow_table(dmn->mdev, *tbl_id,
+                                     MLX5_FLOW_TABLE_TYPE_FDB);
+       return ret;
+}
+
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn,
+                             u32 tbl_id, u32 group_id)
+{
+       mlx5dr_cmd_del_flow_table_entry(dmn->mdev, FS_FT_FDB, tbl_id);
+       mlx5dr_cmd_destroy_flow_group(dmn->mdev,
+                                     MLX5_FLOW_TABLE_TYPE_FDB,
+                                     tbl_id, group_id);
+       mlx5dr_cmd_destroy_flow_table(dmn->mdev, tbl_id,
+                                     MLX5_FLOW_TABLE_TYPE_FDB);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_icm_pool.c
new file mode 100644 (file)
index 0000000..0b5af9f
--- /dev/null
@@ -0,0 +1,576 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
+#define DR_ICM_POOL_STE_HOT_MEM_PERCENT 25
+#define DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT 50
+#define DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT 90
+
+struct mlx5dr_icm_hot_chunk {
+       struct mlx5dr_icm_buddy_mem *buddy_mem;
+       unsigned int seg;
+       enum mlx5dr_icm_chunk_size size;
+};
+
+struct mlx5dr_icm_pool {
+       enum mlx5dr_icm_type icm_type;
+       enum mlx5dr_icm_chunk_size max_log_chunk_sz;
+       struct mlx5dr_domain *dmn;
+       struct kmem_cache *chunks_kmem_cache;
+
+       /* memory management */
+       struct mutex mutex; /* protect the ICM pool and ICM buddy */
+       struct list_head buddy_mem_list;
+
+       /* Hardware may be accessing this memory but at some future,
+        * undetermined time, it might cease to do so.
+        * sync_ste command sets them free.
+        */
+       struct mlx5dr_icm_hot_chunk *hot_chunks_arr;
+       u32 hot_chunks_num;
+       u64 hot_memory_size;
+       /* hot memory size threshold for triggering sync */
+       u64 th;
+};
+
+struct mlx5dr_icm_dm {
+       u32 obj_id;
+       enum mlx5_sw_icm_type type;
+       phys_addr_t addr;
+       size_t length;
+};
+
+struct mlx5dr_icm_mr {
+       u32 mkey;
+       struct mlx5dr_icm_dm dm;
+       struct mlx5dr_domain *dmn;
+       size_t length;
+       u64 icm_start_addr;
+};
+
+static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
+                                u32 pd, u64 length, u64 start_addr, int mode,
+                                u32 *mkey)
+{
+       u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+       u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
+       void *mkc;
+
+       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+       MLX5_SET(mkc, mkc, access_mode_1_0, mode);
+       MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
+       MLX5_SET(mkc, mkc, lw, 1);
+       MLX5_SET(mkc, mkc, lr, 1);
+       if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
+               MLX5_SET(mkc, mkc, rw, 1);
+               MLX5_SET(mkc, mkc, rr, 1);
+       }
+
+       MLX5_SET64(mkc, mkc, len, length);
+       MLX5_SET(mkc, mkc, pd, pd);
+       MLX5_SET(mkc, mkc, qpn, 0xffffff);
+       MLX5_SET64(mkc, mkc, start_addr, start_addr);
+
+       return mlx5_core_create_mkey(mdev, mkey, in, inlen);
+}
+
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
+{
+       u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+       return (u64)offset * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
+{
+       return chunk->buddy_mem->icm_mr->mkey;
+}
+
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
+{
+       u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
+
+       return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
+}
+
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
+{
+       return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
+                       chunk->buddy_mem->pool->icm_type);
+}
+
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
+{
+       return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
+}
+
+static struct mlx5dr_icm_mr *
+dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
+{
+       struct mlx5_core_dev *mdev = pool->dmn->mdev;
+       enum mlx5_sw_icm_type dm_type = 0;
+       struct mlx5dr_icm_mr *icm_mr;
+       size_t log_align_base = 0;
+       int err;
+
+       icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
+       if (!icm_mr)
+               return NULL;
+
+       icm_mr->dmn = pool->dmn;
+
+       icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+                                                              pool->icm_type);
+
+       switch (pool->icm_type) {
+       case DR_ICM_TYPE_STE:
+               dm_type = MLX5_SW_ICM_TYPE_STEERING;
+               log_align_base = ilog2(icm_mr->dm.length);
+               break;
+       case DR_ICM_TYPE_MODIFY_ACTION:
+               dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
+               /* Align base is 64B */
+               log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+               break;
+       case DR_ICM_TYPE_MODIFY_HDR_PTRN:
+               dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN;
+               /* Align base is 64B */
+               log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
+               break;
+       default:
+               WARN_ON(pool->icm_type);
+       }
+
+       icm_mr->dm.type = dm_type;
+
+       err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
+                                  log_align_base, 0, &icm_mr->dm.addr,
+                                  &icm_mr->dm.obj_id);
+       if (err) {
+               mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
+               goto free_icm_mr;
+       }
+
+       /* Register device memory */
+       err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
+                                   icm_mr->dm.length,
+                                   icm_mr->dm.addr,
+                                   MLX5_MKC_ACCESS_MODE_SW_ICM,
+                                   &icm_mr->mkey);
+       if (err) {
+               mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
+               goto free_dm;
+       }
+
+       icm_mr->icm_start_addr = icm_mr->dm.addr;
+
+       if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
+               mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
+                          log_align_base);
+               goto free_mkey;
+       }
+
+       return icm_mr;
+
+free_mkey:
+       mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
+free_dm:
+       mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
+                              icm_mr->dm.addr, icm_mr->dm.obj_id);
+free_icm_mr:
+       kvfree(icm_mr);
+       return NULL;
+}
+
+static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
+{
+       struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
+       struct mlx5dr_icm_dm *dm = &icm_mr->dm;
+
+       mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
+       mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
+                              dm->addr, dm->obj_id);
+       kvfree(icm_mr);
+}
+
+static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       /* We support only one type of STE size, both for ConnectX-5 and later
+        * devices. Once the support for match STE which has a larger tag is
+        * added (32B instead of 16B), the STE size for devices later than
+        * ConnectX-5 needs to account for that.
+        */
+       return DR_STE_SIZE_REDUCED;
+}
+
+static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
+{
+       int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+       struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+       int ste_size = dr_icm_buddy_get_ste_size(buddy);
+       int index = offset / DR_STE_SIZE;
+
+       chunk->ste_arr = &buddy->ste_arr[index];
+       chunk->miss_list = &buddy->miss_list[index];
+       chunk->hw_ste_arr = buddy->hw_ste_arr + index * ste_size;
+
+       memset(chunk->hw_ste_arr, 0, num_of_entries * ste_size);
+       memset(chunk->ste_arr, 0,
+              num_of_entries * sizeof(chunk->ste_arr[0]));
+}
+
+static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       int num_of_entries =
+               mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
+
+       buddy->ste_arr = kvcalloc(num_of_entries,
+                                 sizeof(struct mlx5dr_ste), GFP_KERNEL);
+       if (!buddy->ste_arr)
+               return -ENOMEM;
+
+       /* Preallocate full STE size on non-ConnectX-5 devices since
+        * we need to support both full and reduced with the same cache.
+        */
+       buddy->hw_ste_arr = kvcalloc(num_of_entries,
+                                    dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
+       if (!buddy->hw_ste_arr)
+               goto free_ste_arr;
+
+       buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
+       if (!buddy->miss_list)
+               goto free_hw_ste_arr;
+
+       return 0;
+
+free_hw_ste_arr:
+       kvfree(buddy->hw_ste_arr);
+free_ste_arr:
+       kvfree(buddy->ste_arr);
+       return -ENOMEM;
+}
+
+static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       kvfree(buddy->ste_arr);
+       kvfree(buddy->hw_ste_arr);
+       kvfree(buddy->miss_list);
+}
+
+static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
+{
+       struct mlx5dr_icm_buddy_mem *buddy;
+       struct mlx5dr_icm_mr *icm_mr;
+
+       icm_mr = dr_icm_pool_mr_create(pool);
+       if (!icm_mr)
+               return -ENOMEM;
+
+       buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
+       if (!buddy)
+               goto free_mr;
+
+       if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
+               goto err_free_buddy;
+
+       buddy->icm_mr = icm_mr;
+       buddy->pool = pool;
+
+       if (pool->icm_type == DR_ICM_TYPE_STE) {
+               /* Reduce allocations by preallocating and reusing the STE structures */
+               if (dr_icm_buddy_init_ste_cache(buddy))
+                       goto err_cleanup_buddy;
+       }
+
+       /* add it to the -start- of the list in order to search in it first */
+       list_add(&buddy->list_node, &pool->buddy_mem_list);
+
+       pool->dmn->num_buddies[pool->icm_type]++;
+
+       return 0;
+
+err_cleanup_buddy:
+       mlx5dr_buddy_cleanup(buddy);
+err_free_buddy:
+       kvfree(buddy);
+free_mr:
+       dr_icm_pool_mr_destroy(icm_mr);
+       return -ENOMEM;
+}
+
+static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
+{
+       enum mlx5dr_icm_type icm_type = buddy->pool->icm_type;
+
+       dr_icm_pool_mr_destroy(buddy->icm_mr);
+
+       mlx5dr_buddy_cleanup(buddy);
+
+       if (icm_type == DR_ICM_TYPE_STE)
+               dr_icm_buddy_cleanup_ste_cache(buddy);
+
+       buddy->pool->dmn->num_buddies[icm_type]--;
+
+       kvfree(buddy);
+}
+
+static void
+dr_icm_chunk_init(struct mlx5dr_icm_chunk *chunk,
+                 struct mlx5dr_icm_pool *pool,
+                 enum mlx5dr_icm_chunk_size chunk_size,
+                 struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
+                 unsigned int seg)
+{
+       int offset;
+
+       chunk->seg = seg;
+       chunk->size = chunk_size;
+       chunk->buddy_mem = buddy_mem_pool;
+
+       if (pool->icm_type == DR_ICM_TYPE_STE) {
+               offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
+               dr_icm_chunk_ste_init(chunk, offset);
+       }
+
+       buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
+}
+
+static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
+{
+       return pool->hot_memory_size > pool->th;
+}
+
+static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool)
+{
+       struct mlx5dr_icm_hot_chunk *hot_chunk;
+       u32 i, num_entries;
+
+       for (i = 0; i < pool->hot_chunks_num; i++) {
+               hot_chunk = &pool->hot_chunks_arr[i];
+               num_entries = mlx5dr_icm_pool_chunk_size_to_entries(hot_chunk->size);
+               mlx5dr_buddy_free_mem(hot_chunk->buddy_mem,
+                                     hot_chunk->seg, ilog2(num_entries));
+               hot_chunk->buddy_mem->used_memory -=
+                       mlx5dr_icm_pool_chunk_size_to_byte(hot_chunk->size,
+                                                          pool->icm_type);
+       }
+
+       pool->hot_chunks_num = 0;
+       pool->hot_memory_size = 0;
+}
+
+static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
+{
+       struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+       int err;
+
+       err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
+       if (err) {
+               mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
+               return err;
+       }
+
+       dr_icm_pool_clear_hot_chunks_arr(pool);
+
+       list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
+               if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
+                       dr_icm_buddy_destroy(buddy);
+       }
+
+       return 0;
+}
+
+static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
+                                        enum mlx5dr_icm_chunk_size chunk_size,
+                                        struct mlx5dr_icm_buddy_mem **buddy,
+                                        unsigned int *seg)
+{
+       struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
+       bool new_mem = false;
+       int err;
+
+alloc_buddy_mem:
+       /* find the next free place from the buddy list */
+       list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
+               err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
+                                            chunk_size, seg);
+               if (!err)
+                       goto found;
+
+               if (WARN_ON(new_mem)) {
+                       /* We have new memory pool, first in the list */
+                       mlx5dr_err(pool->dmn,
+                                  "No memory for order: %d\n",
+                                  chunk_size);
+                       goto out;
+               }
+       }
+
+       /* no more available allocators in that pool, create new */
+       err = dr_icm_buddy_create(pool);
+       if (err) {
+               mlx5dr_err(pool->dmn,
+                          "Failed creating buddy for order %d\n",
+                          chunk_size);
+               goto out;
+       }
+
+       /* mark we have new memory, first in list */
+       new_mem = true;
+       goto alloc_buddy_mem;
+
+found:
+       *buddy = buddy_mem_pool;
+out:
+       return err;
+}
+
+/* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
+ * also memory used for HW STE management for optimizations.
+ */
+struct mlx5dr_icm_chunk *
+mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
+                      enum mlx5dr_icm_chunk_size chunk_size)
+{
+       struct mlx5dr_icm_chunk *chunk = NULL;
+       struct mlx5dr_icm_buddy_mem *buddy;
+       unsigned int seg;
+       int ret;
+
+       if (chunk_size > pool->max_log_chunk_sz)
+               return NULL;
+
+       mutex_lock(&pool->mutex);
+       /* find mem, get back the relevant buddy pool and seg in that mem */
+       ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
+       if (ret)
+               goto out;
+
+       chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL);
+       if (!chunk)
+               goto out_err;
+
+       dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg);
+
+       goto out;
+
+out_err:
+       mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
+out:
+       mutex_unlock(&pool->mutex);
+       return chunk;
+}
+
+void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
+{
+       struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
+       struct mlx5dr_icm_pool *pool = buddy->pool;
+       struct mlx5dr_icm_hot_chunk *hot_chunk;
+       struct kmem_cache *chunks_cache;
+
+       chunks_cache = pool->chunks_kmem_cache;
+
+       /* move the chunk to the waiting chunks array, AKA "hot" memory */
+       mutex_lock(&pool->mutex);
+
+       pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
+
+       hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++];
+       hot_chunk->buddy_mem = chunk->buddy_mem;
+       hot_chunk->seg = chunk->seg;
+       hot_chunk->size = chunk->size;
+
+       kmem_cache_free(chunks_cache, chunk);
+
+       /* Check if we have chunks that are waiting for sync-ste */
+       if (dr_icm_pool_is_sync_required(pool))
+               dr_icm_pool_sync_all_buddy_pools(pool);
+
+       mutex_unlock(&pool->mutex);
+}
+
+struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool)
+{
+       return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL);
+}
+
+void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl)
+{
+       kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl);
+}
+
+struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
+                                              enum mlx5dr_icm_type icm_type)
+{
+       u32 num_of_chunks, entry_size;
+       struct mlx5dr_icm_pool *pool;
+       u32 max_hot_size = 0;
+
+       pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return NULL;
+
+       pool->dmn = dmn;
+       pool->icm_type = icm_type;
+       pool->chunks_kmem_cache = dmn->chunks_kmem_cache;
+
+       INIT_LIST_HEAD(&pool->buddy_mem_list);
+       mutex_init(&pool->mutex);
+
+       switch (icm_type) {
+       case DR_ICM_TYPE_STE:
+               pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
+               max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+                                                                 pool->icm_type) *
+                              DR_ICM_POOL_STE_HOT_MEM_PERCENT / 100;
+               break;
+       case DR_ICM_TYPE_MODIFY_ACTION:
+               pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
+               max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+                                                                 pool->icm_type) *
+                              DR_ICM_POOL_MODIFY_ACTION_HOT_MEM_PERCENT / 100;
+               break;
+       case DR_ICM_TYPE_MODIFY_HDR_PTRN:
+               pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz;
+               max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
+                                                                 pool->icm_type) *
+                              DR_ICM_POOL_MODIFY_HDR_PTRN_HOT_MEM_PERCENT / 100;
+               break;
+       default:
+               WARN_ON(icm_type);
+       }
+
+       entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type);
+
+       num_of_chunks = DIV_ROUND_UP(max_hot_size, entry_size) + 1;
+       pool->th = max_hot_size;
+
+       pool->hot_chunks_arr = kvcalloc(num_of_chunks,
+                                       sizeof(struct mlx5dr_icm_hot_chunk),
+                                       GFP_KERNEL);
+       if (!pool->hot_chunks_arr)
+               goto free_pool;
+
+       return pool;
+
+free_pool:
+       kvfree(pool);
+       return NULL;
+}
+
+void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
+{
+       struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
+
+       dr_icm_pool_clear_hot_chunks_arr(pool);
+
+       list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
+               dr_icm_buddy_destroy(buddy);
+
+       kvfree(pool->hot_chunks_arr);
+       mutex_destroy(&pool->mutex);
+       kvfree(pool);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_matcher.c
new file mode 100644 (file)
index 0000000..0726848
--- /dev/null
@@ -0,0 +1,1108 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+static bool dr_mask_is_smac_set(struct mlx5dr_match_spec *spec)
+{
+       return (spec->smac_47_16 || spec->smac_15_0);
+}
+
+static bool dr_mask_is_dmac_set(struct mlx5dr_match_spec *spec)
+{
+       return (spec->dmac_47_16 || spec->dmac_15_0);
+}
+
+static bool dr_mask_is_l3_base_set(struct mlx5dr_match_spec *spec)
+{
+       return (spec->ip_protocol || spec->frag || spec->tcp_flags ||
+               spec->ip_ecn || spec->ip_dscp);
+}
+
+static bool dr_mask_is_tcp_udp_base_set(struct mlx5dr_match_spec *spec)
+{
+       return (spec->tcp_sport || spec->tcp_dport ||
+               spec->udp_sport || spec->udp_dport);
+}
+
+static bool dr_mask_is_ipv4_set(struct mlx5dr_match_spec *spec)
+{
+       return (spec->dst_ip_31_0 || spec->src_ip_31_0);
+}
+
+static bool dr_mask_is_ipv4_5_tuple_set(struct mlx5dr_match_spec *spec)
+{
+       return (dr_mask_is_l3_base_set(spec) ||
+               dr_mask_is_tcp_udp_base_set(spec) ||
+               dr_mask_is_ipv4_set(spec));
+}
+
+static bool dr_mask_is_eth_l2_tnl_set(struct mlx5dr_match_misc *misc)
+{
+       return misc->vxlan_vni;
+}
+
+static bool dr_mask_is_ttl_set(struct mlx5dr_match_spec *spec)
+{
+       return spec->ttl_hoplimit;
+}
+
+static bool dr_mask_is_ipv4_ihl_set(struct mlx5dr_match_spec *spec)
+{
+       return spec->ipv4_ihl;
+}
+
+#define DR_MASK_IS_L2_DST(_spec, _misc, _inner_outer) (_spec.first_vid || \
+       (_spec).first_cfi || (_spec).first_prio || (_spec).cvlan_tag || \
+       (_spec).svlan_tag || (_spec).dmac_47_16 || (_spec).dmac_15_0 || \
+       (_spec).ethertype || (_spec).ip_version || \
+       (_misc)._inner_outer##_second_vid || \
+       (_misc)._inner_outer##_second_cfi || \
+       (_misc)._inner_outer##_second_prio || \
+       (_misc)._inner_outer##_second_cvlan_tag || \
+       (_misc)._inner_outer##_second_svlan_tag)
+
+#define DR_MASK_IS_ETH_L4_SET(_spec, _misc, _inner_outer) ( \
+       dr_mask_is_l3_base_set(&(_spec)) || \
+       dr_mask_is_tcp_udp_base_set(&(_spec)) || \
+       dr_mask_is_ttl_set(&(_spec)) || \
+       (_misc)._inner_outer##_ipv6_flow_label)
+
+#define DR_MASK_IS_ETH_L4_MISC_SET(_misc3, _inner_outer) ( \
+       (_misc3)._inner_outer##_tcp_seq_num || \
+       (_misc3)._inner_outer##_tcp_ack_num)
+
+#define DR_MASK_IS_FIRST_MPLS_SET(_misc2, _inner_outer) ( \
+       (_misc2)._inner_outer##_first_mpls_label || \
+       (_misc2)._inner_outer##_first_mpls_exp || \
+       (_misc2)._inner_outer##_first_mpls_s_bos || \
+       (_misc2)._inner_outer##_first_mpls_ttl)
+
+static bool dr_mask_is_tnl_gre_set(struct mlx5dr_match_misc *misc)
+{
+       return (misc->gre_key_h || misc->gre_key_l ||
+               misc->gre_protocol || misc->gre_c_present ||
+               misc->gre_k_present || misc->gre_s_present);
+}
+
+#define DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+       (_misc)->outer_first_mpls_over_gre_label || \
+       (_misc)->outer_first_mpls_over_gre_exp || \
+       (_misc)->outer_first_mpls_over_gre_s_bos || \
+       (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+       (_misc)->outer_first_mpls_over_udp_label || \
+       (_misc)->outer_first_mpls_over_udp_exp || \
+       (_misc)->outer_first_mpls_over_udp_s_bos || \
+       (_misc)->outer_first_mpls_over_udp_ttl)
+
+static bool
+dr_mask_is_vxlan_gpe_set(struct mlx5dr_match_misc3 *misc3)
+{
+       return (misc3->outer_vxlan_gpe_vni ||
+               misc3->outer_vxlan_gpe_next_protocol ||
+               misc3->outer_vxlan_gpe_flags);
+}
+
+static bool
+dr_matcher_supp_vxlan_gpe(struct mlx5dr_cmd_caps *caps)
+{
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+              (caps->flex_protocols & MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED);
+}
+
+static bool
+dr_mask_is_tnl_vxlan_gpe(struct mlx5dr_match_param *mask,
+                        struct mlx5dr_domain *dmn)
+{
+       return dr_mask_is_vxlan_gpe_set(&mask->misc3) &&
+              dr_matcher_supp_vxlan_gpe(&dmn->info.caps);
+}
+
+static bool dr_mask_is_tnl_geneve_set(struct mlx5dr_match_misc *misc)
+{
+       return misc->geneve_vni ||
+              misc->geneve_oam ||
+              misc->geneve_protocol_type ||
+              misc->geneve_opt_len;
+}
+
+static bool dr_mask_is_tnl_geneve_tlv_opt(struct mlx5dr_match_misc3 *misc3)
+{
+       return misc3->geneve_tlv_option_0_data;
+}
+
+static bool
+dr_matcher_supp_flex_parser_ok(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_parser_ok_bits_supp;
+}
+
+static bool dr_mask_is_tnl_geneve_tlv_opt_exist_set(struct mlx5dr_match_misc *misc,
+                                                   struct mlx5dr_domain *dmn)
+{
+       return dr_matcher_supp_flex_parser_ok(&dmn->info.caps) &&
+              misc->geneve_tlv_option_0_exist;
+}
+
+static bool
+dr_matcher_supp_tnl_geneve(struct mlx5dr_cmd_caps *caps)
+{
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+              (caps->flex_protocols & MLX5_FLEX_PARSER_GENEVE_ENABLED);
+}
+
+static bool
+dr_mask_is_tnl_geneve(struct mlx5dr_match_param *mask,
+                     struct mlx5dr_domain *dmn)
+{
+       return dr_mask_is_tnl_geneve_set(&mask->misc) &&
+              dr_matcher_supp_tnl_geneve(&dmn->info.caps);
+}
+
+static bool dr_mask_is_tnl_gtpu_set(struct mlx5dr_match_misc3 *misc3)
+{
+       return misc3->gtpu_msg_flags || misc3->gtpu_msg_type || misc3->gtpu_teid;
+}
+
+static bool dr_matcher_supp_tnl_gtpu(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu(struct mlx5dr_match_param *mask,
+                               struct mlx5dr_domain *dmn)
+{
+       return dr_mask_is_tnl_gtpu_set(&mask->misc3) &&
+              dr_matcher_supp_tnl_gtpu(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_dw_0(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_0_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_dw_0(struct mlx5dr_match_param *mask,
+                                    struct mlx5dr_domain *dmn)
+{
+       return mask->misc3.gtpu_dw_0 &&
+              dr_matcher_supp_tnl_gtpu_dw_0(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_teid(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_TEID_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_teid(struct mlx5dr_match_param *mask,
+                                    struct mlx5dr_domain *dmn)
+{
+       return mask->misc3.gtpu_teid &&
+              dr_matcher_supp_tnl_gtpu_teid(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_dw_2(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_DW_2_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_dw_2(struct mlx5dr_match_param *mask,
+                                    struct mlx5dr_domain *dmn)
+{
+       return mask->misc3.gtpu_dw_2 &&
+              dr_matcher_supp_tnl_gtpu_dw_2(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_gtpu_first_ext(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_GTPU_FIRST_EXT_DW_0_ENABLED;
+}
+
+static bool dr_mask_is_tnl_gtpu_first_ext(struct mlx5dr_match_param *mask,
+                                         struct mlx5dr_domain *dmn)
+{
+       return mask->misc3.gtpu_first_ext_dw_0 &&
+              dr_matcher_supp_tnl_gtpu_first_ext(&dmn->info.caps);
+}
+
+static bool dr_mask_is_tnl_gtpu_flex_parser_0(struct mlx5dr_match_param *mask,
+                                             struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+       return (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_0) &&
+               dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
+              (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_teid) &&
+               dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
+              (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_dw_2) &&
+               dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
+              (dr_is_flex_parser_0_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
+               dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
+}
+
+static bool dr_mask_is_tnl_gtpu_flex_parser_1(struct mlx5dr_match_param *mask,
+                                             struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+
+       return (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_0) &&
+               dr_mask_is_tnl_gtpu_dw_0(mask, dmn)) ||
+              (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_teid) &&
+               dr_mask_is_tnl_gtpu_teid(mask, dmn)) ||
+              (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_dw_2) &&
+               dr_mask_is_tnl_gtpu_dw_2(mask, dmn)) ||
+              (dr_is_flex_parser_1_id(caps->flex_parser_id_gtpu_first_ext_dw_0) &&
+               dr_mask_is_tnl_gtpu_first_ext(mask, dmn));
+}
+
+static bool dr_mask_is_tnl_gtpu_any(struct mlx5dr_match_param *mask,
+                                   struct mlx5dr_domain *dmn)
+{
+       return dr_mask_is_tnl_gtpu_flex_parser_0(mask, dmn) ||
+              dr_mask_is_tnl_gtpu_flex_parser_1(mask, dmn) ||
+              dr_mask_is_tnl_gtpu(mask, dmn);
+}
+
+static int dr_matcher_supp_icmp_v4(struct mlx5dr_cmd_caps *caps)
+{
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+              (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V4_ENABLED);
+}
+
+static int dr_matcher_supp_icmp_v6(struct mlx5dr_cmd_caps *caps)
+{
+       return (caps->sw_format_ver >= MLX5_STEERING_FORMAT_CONNECTX_6DX) ||
+              (caps->flex_protocols & MLX5_FLEX_PARSER_ICMP_V6_ENABLED);
+}
+
+static bool dr_mask_is_icmpv6_set(struct mlx5dr_match_misc3 *misc3)
+{
+       return (misc3->icmpv6_type || misc3->icmpv6_code ||
+               misc3->icmpv6_header_data);
+}
+
+static bool dr_mask_is_icmp(struct mlx5dr_match_param *mask,
+                           struct mlx5dr_domain *dmn)
+{
+       if (DR_MASK_IS_ICMPV4_SET(&mask->misc3))
+               return dr_matcher_supp_icmp_v4(&dmn->info.caps);
+       else if (dr_mask_is_icmpv6_set(&mask->misc3))
+               return dr_matcher_supp_icmp_v6(&dmn->info.caps);
+
+       return false;
+}
+
+static bool dr_mask_is_wqe_metadata_set(struct mlx5dr_match_misc2 *misc2)
+{
+       return misc2->metadata_reg_a;
+}
+
+static bool dr_mask_is_reg_c_0_3_set(struct mlx5dr_match_misc2 *misc2)
+{
+       return (misc2->metadata_reg_c_0 || misc2->metadata_reg_c_1 ||
+               misc2->metadata_reg_c_2 || misc2->metadata_reg_c_3);
+}
+
+static bool dr_mask_is_reg_c_4_7_set(struct mlx5dr_match_misc2 *misc2)
+{
+       return (misc2->metadata_reg_c_4 || misc2->metadata_reg_c_5 ||
+               misc2->metadata_reg_c_6 || misc2->metadata_reg_c_7);
+}
+
+static bool dr_mask_is_gvmi_or_qpn_set(struct mlx5dr_match_misc *misc)
+{
+       return (misc->source_sqn || misc->source_port);
+}
+
+static bool dr_mask_is_flex_parser_id_0_3_set(u32 flex_parser_id,
+                                             u32 flex_parser_value)
+{
+       if (flex_parser_id)
+               return flex_parser_id <= DR_STE_MAX_FLEX_0_ID;
+
+       /* Using flex_parser 0 means that id is zero, thus value must be set. */
+       return flex_parser_value;
+}
+
+static bool dr_mask_is_flex_parser_0_3_set(struct mlx5dr_match_misc4 *misc4)
+{
+       return (dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_0,
+                                                 misc4->prog_sample_field_value_0) ||
+               dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_1,
+                                                 misc4->prog_sample_field_value_1) ||
+               dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_2,
+                                                 misc4->prog_sample_field_value_2) ||
+               dr_mask_is_flex_parser_id_0_3_set(misc4->prog_sample_field_id_3,
+                                                 misc4->prog_sample_field_value_3));
+}
+
+static bool dr_mask_is_flex_parser_id_4_7_set(u32 flex_parser_id)
+{
+       return flex_parser_id > DR_STE_MAX_FLEX_0_ID &&
+              flex_parser_id <= DR_STE_MAX_FLEX_1_ID;
+}
+
+static bool dr_mask_is_flex_parser_4_7_set(struct mlx5dr_match_misc4 *misc4)
+{
+       return (dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_0) ||
+               dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_1) ||
+               dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_2) ||
+               dr_mask_is_flex_parser_id_4_7_set(misc4->prog_sample_field_id_3));
+}
+
+static int dr_matcher_supp_tnl_mpls_over_gre(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED;
+}
+
+static bool dr_mask_is_tnl_mpls_over_gre(struct mlx5dr_match_param *mask,
+                                        struct mlx5dr_domain *dmn)
+{
+       return DR_MASK_IS_OUTER_MPLS_OVER_GRE_SET(&mask->misc2) &&
+              dr_matcher_supp_tnl_mpls_over_gre(&dmn->info.caps);
+}
+
+static int dr_matcher_supp_tnl_mpls_over_udp(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->flex_protocols & MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED;
+}
+
+static bool dr_mask_is_tnl_mpls_over_udp(struct mlx5dr_match_param *mask,
+                                        struct mlx5dr_domain *dmn)
+{
+       return DR_MASK_IS_OUTER_MPLS_OVER_UDP_SET(&mask->misc2) &&
+              dr_matcher_supp_tnl_mpls_over_udp(&dmn->info.caps);
+}
+
+static bool dr_mask_is_tnl_header_0_1_set(struct mlx5dr_match_misc5 *misc5)
+{
+       return misc5->tunnel_header_0 || misc5->tunnel_header_1;
+}
+
+int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
+                                  struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                  enum mlx5dr_ipv outer_ipv,
+                                  enum mlx5dr_ipv inner_ipv)
+{
+       nic_matcher->ste_builder =
+               nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
+       nic_matcher->num_of_builders =
+               nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv];
+
+       if (!nic_matcher->num_of_builders) {
+               mlx5dr_dbg(matcher->tbl->dmn,
+                          "Rule not supported on this matcher due to IP related fields\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
+                                      struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                      enum mlx5dr_ipv outer_ipv,
+                                      enum mlx5dr_ipv inner_ipv)
+{
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
+       struct mlx5dr_match_param mask = {};
+       bool allow_empty_match = false;
+       struct mlx5dr_ste_build *sb;
+       bool inner, rx;
+       int idx = 0;
+       int ret, i;
+
+       sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv];
+       rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
+
+       /* Create a temporary mask to track and clear used mask fields */
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER)
+               mask.outer = matcher->mask.outer;
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC)
+               mask.misc = matcher->mask.misc;
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_INNER)
+               mask.inner = matcher->mask.inner;
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC2)
+               mask.misc2 = matcher->mask.misc2;
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC3)
+               mask.misc3 = matcher->mask.misc3;
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4)
+               mask.misc4 = matcher->mask.misc4;
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC5)
+               mask.misc5 = matcher->mask.misc5;
+
+       ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
+                                        &matcher->mask, NULL);
+       if (ret)
+               return ret;
+
+       /* Optimize RX pipe by reducing source port match, since
+        * the FDB RX part is connected only to the wire.
+        */
+       if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB &&
+           rx && mask.misc.source_port) {
+               mask.misc.source_port = 0;
+               mask.misc.source_eswitch_owner_vhca_id = 0;
+               allow_empty_match = true;
+       }
+
+       /* Outer */
+       if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER |
+                                      DR_MATCHER_CRITERIA_MISC |
+                                      DR_MATCHER_CRITERIA_MISC2 |
+                                      DR_MATCHER_CRITERIA_MISC3 |
+                                      DR_MATCHER_CRITERIA_MISC5)) {
+               inner = false;
+
+               if (dr_mask_is_wqe_metadata_set(&mask.misc2))
+                       mlx5dr_ste_build_general_purpose(ste_ctx, &sb[idx++],
+                                                        &mask, inner, rx);
+
+               if (dr_mask_is_reg_c_0_3_set(&mask.misc2))
+                       mlx5dr_ste_build_register_0(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (dr_mask_is_reg_c_4_7_set(&mask.misc2))
+                       mlx5dr_ste_build_register_1(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
+                   (dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
+                    dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
+                       mlx5dr_ste_build_src_gvmi_qpn(ste_ctx, &sb[idx++],
+                                                     &mask, dmn, inner, rx);
+               }
+
+               if (dr_mask_is_smac_set(&mask.outer) &&
+                   dr_mask_is_dmac_set(&mask.outer)) {
+                       mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
+                                                       &mask, inner, rx);
+               }
+
+               if (dr_mask_is_smac_set(&mask.outer))
+                       mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (DR_MASK_IS_L2_DST(mask.outer, mask.misc, outer))
+                       mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (outer_ipv == DR_RULE_IPV6) {
+                       if (DR_MASK_IS_DST_IP_SET(&mask.outer))
+                               mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+                                                                &mask, inner, rx);
+
+                       if (DR_MASK_IS_SRC_IP_SET(&mask.outer))
+                               mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+                                                                &mask, inner, rx);
+
+                       if (DR_MASK_IS_ETH_L4_SET(mask.outer, mask.misc, outer))
+                               mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+                                                               &mask, inner, rx);
+               } else {
+                       if (dr_mask_is_ipv4_5_tuple_set(&mask.outer))
+                               mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+                                                                    &mask, inner, rx);
+
+                       if (dr_mask_is_ttl_set(&mask.outer) ||
+                           dr_mask_is_ipv4_ihl_set(&mask.outer))
+                               mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+                                                                 &mask, inner, rx);
+               }
+
+               if (dr_mask_is_tnl_vxlan_gpe(&mask, dmn))
+                       mlx5dr_ste_build_tnl_vxlan_gpe(ste_ctx, &sb[idx++],
+                                                      &mask, inner, rx);
+               else if (dr_mask_is_tnl_geneve(&mask, dmn)) {
+                       mlx5dr_ste_build_tnl_geneve(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+                       if (dr_mask_is_tnl_geneve_tlv_opt(&mask.misc3))
+                               mlx5dr_ste_build_tnl_geneve_tlv_opt(ste_ctx, &sb[idx++],
+                                                                   &mask, &dmn->info.caps,
+                                                                   inner, rx);
+                       if (dr_mask_is_tnl_geneve_tlv_opt_exist_set(&mask.misc, dmn))
+                               mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(ste_ctx, &sb[idx++],
+                                                                         &mask, &dmn->info.caps,
+                                                                         inner, rx);
+               } else if (dr_mask_is_tnl_gtpu_any(&mask, dmn)) {
+                       if (dr_mask_is_tnl_gtpu_flex_parser_0(&mask, dmn))
+                               mlx5dr_ste_build_tnl_gtpu_flex_parser_0(ste_ctx, &sb[idx++],
+                                                                       &mask, &dmn->info.caps,
+                                                                       inner, rx);
+
+                       if (dr_mask_is_tnl_gtpu_flex_parser_1(&mask, dmn))
+                               mlx5dr_ste_build_tnl_gtpu_flex_parser_1(ste_ctx, &sb[idx++],
+                                                                       &mask, &dmn->info.caps,
+                                                                       inner, rx);
+
+                       if (dr_mask_is_tnl_gtpu(&mask, dmn))
+                               mlx5dr_ste_build_tnl_gtpu(ste_ctx, &sb[idx++],
+                                                         &mask, inner, rx);
+               } else if (dr_mask_is_tnl_header_0_1_set(&mask.misc5)) {
+                       mlx5dr_ste_build_tnl_header_0_1(ste_ctx, &sb[idx++],
+                                                       &mask, inner, rx);
+               }
+
+               if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, outer))
+                       mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+                                                    &mask, inner, rx);
+
+               if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, outer))
+                       mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+                                             &mask, inner, rx);
+
+               if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
+                       mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
+                                                          &mask, &dmn->info.caps,
+                                                          inner, rx);
+               else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
+                       mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
+                                                          &mask, &dmn->info.caps,
+                                                          inner, rx);
+
+               if (dr_mask_is_icmp(&mask, dmn))
+                       mlx5dr_ste_build_icmp(ste_ctx, &sb[idx++],
+                                             &mask, &dmn->info.caps,
+                                             inner, rx);
+
+               if (dr_mask_is_tnl_gre_set(&mask.misc))
+                       mlx5dr_ste_build_tnl_gre(ste_ctx, &sb[idx++],
+                                                &mask, inner, rx);
+       }
+
+       /* Inner */
+       if (matcher->match_criteria & (DR_MATCHER_CRITERIA_INNER |
+                                      DR_MATCHER_CRITERIA_MISC |
+                                      DR_MATCHER_CRITERIA_MISC2 |
+                                      DR_MATCHER_CRITERIA_MISC3)) {
+               inner = true;
+
+               if (dr_mask_is_eth_l2_tnl_set(&mask.misc))
+                       mlx5dr_ste_build_eth_l2_tnl(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (dr_mask_is_smac_set(&mask.inner) &&
+                   dr_mask_is_dmac_set(&mask.inner)) {
+                       mlx5dr_ste_build_eth_l2_src_dst(ste_ctx, &sb[idx++],
+                                                       &mask, inner, rx);
+               }
+
+               if (dr_mask_is_smac_set(&mask.inner))
+                       mlx5dr_ste_build_eth_l2_src(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (DR_MASK_IS_L2_DST(mask.inner, mask.misc, inner))
+                       mlx5dr_ste_build_eth_l2_dst(ste_ctx, &sb[idx++],
+                                                   &mask, inner, rx);
+
+               if (inner_ipv == DR_RULE_IPV6) {
+                       if (DR_MASK_IS_DST_IP_SET(&mask.inner))
+                               mlx5dr_ste_build_eth_l3_ipv6_dst(ste_ctx, &sb[idx++],
+                                                                &mask, inner, rx);
+
+                       if (DR_MASK_IS_SRC_IP_SET(&mask.inner))
+                               mlx5dr_ste_build_eth_l3_ipv6_src(ste_ctx, &sb[idx++],
+                                                                &mask, inner, rx);
+
+                       if (DR_MASK_IS_ETH_L4_SET(mask.inner, mask.misc, inner))
+                               mlx5dr_ste_build_eth_ipv6_l3_l4(ste_ctx, &sb[idx++],
+                                                               &mask, inner, rx);
+               } else {
+                       if (dr_mask_is_ipv4_5_tuple_set(&mask.inner))
+                               mlx5dr_ste_build_eth_l3_ipv4_5_tuple(ste_ctx, &sb[idx++],
+                                                                    &mask, inner, rx);
+
+                       if (dr_mask_is_ttl_set(&mask.inner) ||
+                           dr_mask_is_ipv4_ihl_set(&mask.inner))
+                               mlx5dr_ste_build_eth_l3_ipv4_misc(ste_ctx, &sb[idx++],
+                                                                 &mask, inner, rx);
+               }
+
+               if (DR_MASK_IS_ETH_L4_MISC_SET(mask.misc3, inner))
+                       mlx5dr_ste_build_eth_l4_misc(ste_ctx, &sb[idx++],
+                                                    &mask, inner, rx);
+
+               if (DR_MASK_IS_FIRST_MPLS_SET(mask.misc2, inner))
+                       mlx5dr_ste_build_mpls(ste_ctx, &sb[idx++],
+                                             &mask, inner, rx);
+
+               if (dr_mask_is_tnl_mpls_over_gre(&mask, dmn))
+                       mlx5dr_ste_build_tnl_mpls_over_gre(ste_ctx, &sb[idx++],
+                                                          &mask, &dmn->info.caps,
+                                                          inner, rx);
+               else if (dr_mask_is_tnl_mpls_over_udp(&mask, dmn))
+                       mlx5dr_ste_build_tnl_mpls_over_udp(ste_ctx, &sb[idx++],
+                                                          &mask, &dmn->info.caps,
+                                                          inner, rx);
+       }
+
+       if (matcher->match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+               if (dr_mask_is_flex_parser_0_3_set(&mask.misc4))
+                       mlx5dr_ste_build_flex_parser_0(ste_ctx, &sb[idx++],
+                                                      &mask, false, rx);
+
+               if (dr_mask_is_flex_parser_4_7_set(&mask.misc4))
+                       mlx5dr_ste_build_flex_parser_1(ste_ctx, &sb[idx++],
+                                                      &mask, false, rx);
+       }
+
+       /* Empty matcher, takes all */
+       if ((!idx && allow_empty_match) ||
+           matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY)
+               mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx);
+
+       if (idx == 0) {
+               mlx5dr_err(dmn, "Cannot generate any valid rules from mask\n");
+               return -EINVAL;
+       }
+
+       /* Check that all mask fields were consumed */
+       for (i = 0; i < sizeof(struct mlx5dr_match_param); i++) {
+               if (((u8 *)&mask)[i] != 0) {
+                       mlx5dr_dbg(dmn, "Mask contains unsupported parameters\n");
+                       return -EOPNOTSUPP;
+               }
+       }
+
+       nic_matcher->ste_builder = sb;
+       nic_matcher->num_of_builders_arr[outer_ipv][inner_ipv] = idx;
+
+       return 0;
+}
+
+static int dr_nic_matcher_connect(struct mlx5dr_domain *dmn,
+                                 struct mlx5dr_matcher_rx_tx *curr_nic_matcher,
+                                 struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+                                 struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+{
+       struct mlx5dr_table_rx_tx *nic_tbl = curr_nic_matcher->nic_tbl;
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+       struct mlx5dr_htbl_connect_info info;
+       struct mlx5dr_ste_htbl *prev_htbl;
+       int ret;
+
+       /* Connect end anchor hash table to next_htbl or to the default address */
+       if (next_nic_matcher) {
+               info.type = CONNECT_HIT;
+               info.hit_next_htbl = next_nic_matcher->s_htbl;
+       } else {
+               info.type = CONNECT_MISS;
+               info.miss_icm_addr = nic_tbl->default_icm_addr;
+       }
+       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+                                               curr_nic_matcher->e_anchor,
+                                               &info, info.type == CONNECT_HIT);
+       if (ret)
+               return ret;
+
+       /* Connect start hash table to end anchor */
+       info.type = CONNECT_MISS;
+       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(curr_nic_matcher->e_anchor->chunk);
+       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+                                               curr_nic_matcher->s_htbl,
+                                               &info, false);
+       if (ret)
+               return ret;
+
+       /* Connect previous hash table to matcher start hash table */
+       if (prev_nic_matcher)
+               prev_htbl = prev_nic_matcher->e_anchor;
+       else
+               prev_htbl = nic_tbl->s_anchor;
+
+       info.type = CONNECT_HIT;
+       info.hit_next_htbl = curr_nic_matcher->s_htbl;
+       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_htbl,
+                                               &info, true);
+       if (ret)
+               return ret;
+
+       /* Update the pointing ste and next hash table */
+       curr_nic_matcher->s_htbl->pointing_ste = prev_htbl->chunk->ste_arr;
+       prev_htbl->chunk->ste_arr[0].next_htbl = curr_nic_matcher->s_htbl;
+
+       if (next_nic_matcher) {
+               next_nic_matcher->s_htbl->pointing_ste =
+                       curr_nic_matcher->e_anchor->chunk->ste_arr;
+               curr_nic_matcher->e_anchor->chunk->ste_arr[0].next_htbl =
+                       next_nic_matcher->s_htbl;
+       }
+
+       return 0;
+}
+
+int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
+                                 struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       struct mlx5dr_matcher_rx_tx *next_nic_matcher, *prev_nic_matcher, *tmp_nic_matcher;
+       struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
+       bool first = true;
+       int ret;
+
+       /* If the nic matcher is already on its parent nic table list,
+        * then it is already connected to the chain of nic matchers.
+        */
+       if (!list_empty(&nic_matcher->list_node))
+               return 0;
+
+       next_nic_matcher = NULL;
+       list_for_each_entry(tmp_nic_matcher, &nic_tbl->nic_matcher_list, list_node) {
+               if (tmp_nic_matcher->prio >= nic_matcher->prio) {
+                       next_nic_matcher = tmp_nic_matcher;
+                       break;
+               }
+               first = false;
+       }
+
+       prev_nic_matcher = NULL;
+       if (next_nic_matcher && !first)
+               prev_nic_matcher = list_prev_entry(next_nic_matcher, list_node);
+       else if (!first)
+               prev_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
+                                                  struct mlx5dr_matcher_rx_tx,
+                                                  list_node);
+
+       ret = dr_nic_matcher_connect(dmn, nic_matcher,
+                                    next_nic_matcher, prev_nic_matcher);
+       if (ret)
+               return ret;
+
+       if (prev_nic_matcher)
+               list_add(&nic_matcher->list_node, &prev_nic_matcher->list_node);
+       else if (next_nic_matcher)
+               list_add_tail(&nic_matcher->list_node, &next_nic_matcher->list_node);
+       else
+               list_add(&nic_matcher->list_node, &nic_matcher->nic_tbl->nic_matcher_list);
+
+       return ret;
+}
+
+static void dr_matcher_uninit_nic(struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       mlx5dr_htbl_put(nic_matcher->s_htbl);
+       mlx5dr_htbl_put(nic_matcher->e_anchor);
+}
+
+static void dr_matcher_uninit_fdb(struct mlx5dr_matcher *matcher)
+{
+       dr_matcher_uninit_nic(&matcher->rx);
+       dr_matcher_uninit_nic(&matcher->tx);
+}
+
+static void dr_matcher_uninit(struct mlx5dr_matcher *matcher)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+
+       switch (dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               dr_matcher_uninit_nic(&matcher->rx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               dr_matcher_uninit_nic(&matcher->tx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               dr_matcher_uninit_fdb(matcher);
+               break;
+       default:
+               WARN_ON(true);
+               break;
+       }
+}
+
+static int dr_matcher_set_all_ste_builders(struct mlx5dr_matcher *matcher,
+                                          struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+
+       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV4);
+       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV4, DR_RULE_IPV6);
+       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV4);
+       dr_matcher_set_ste_builders(matcher, nic_matcher, DR_RULE_IPV6, DR_RULE_IPV6);
+
+       if (!nic_matcher->ste_builder) {
+               mlx5dr_err(dmn, "Cannot generate IPv4 or IPv6 rules with given mask\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int dr_matcher_init_nic(struct mlx5dr_matcher *matcher,
+                              struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       int ret;
+
+       nic_matcher->prio = matcher->prio;
+       INIT_LIST_HEAD(&nic_matcher->list_node);
+
+       ret = dr_matcher_set_all_ste_builders(matcher, nic_matcher);
+       if (ret)
+               return ret;
+
+       nic_matcher->e_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+                                                     DR_CHUNK_SIZE_1,
+                                                     MLX5DR_STE_LU_TYPE_DONT_CARE,
+                                                     0);
+       if (!nic_matcher->e_anchor)
+               return -ENOMEM;
+
+       nic_matcher->s_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+                                                   DR_CHUNK_SIZE_1,
+                                                   nic_matcher->ste_builder[0].lu_type,
+                                                   nic_matcher->ste_builder[0].byte_mask);
+       if (!nic_matcher->s_htbl) {
+               ret = -ENOMEM;
+               goto free_e_htbl;
+       }
+
+       /* make sure the tables exist while empty */
+       mlx5dr_htbl_get(nic_matcher->s_htbl);
+       mlx5dr_htbl_get(nic_matcher->e_anchor);
+
+       return 0;
+
+free_e_htbl:
+       mlx5dr_ste_htbl_free(nic_matcher->e_anchor);
+       return ret;
+}
+
+static int dr_matcher_init_fdb(struct mlx5dr_matcher *matcher)
+{
+       int ret;
+
+       ret = dr_matcher_init_nic(matcher, &matcher->rx);
+       if (ret)
+               return ret;
+
+       ret = dr_matcher_init_nic(matcher, &matcher->tx);
+       if (ret)
+               goto uninit_nic_rx;
+
+       return 0;
+
+uninit_nic_rx:
+       dr_matcher_uninit_nic(&matcher->rx);
+       return ret;
+}
+
+static int dr_matcher_copy_param(struct mlx5dr_matcher *matcher,
+                                struct mlx5dr_match_parameters *mask)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_match_parameters consumed_mask;
+       int i, ret = 0;
+
+       if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
+               mlx5dr_err(dmn, "Invalid match criteria attribute\n");
+               return -EINVAL;
+       }
+
+       if (mask) {
+               if (mask->match_sz > DR_SZ_MATCH_PARAM) {
+                       mlx5dr_err(dmn, "Invalid match size attribute\n");
+                       return -EINVAL;
+               }
+
+               consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
+               if (!consumed_mask.match_buf)
+                       return -ENOMEM;
+
+               consumed_mask.match_sz = mask->match_sz;
+               memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
+               mlx5dr_ste_copy_param(matcher->match_criteria,
+                                     &matcher->mask, &consumed_mask, true);
+
+               /* Check that all mask data was consumed */
+               for (i = 0; i < consumed_mask.match_sz; i++) {
+                       if (!((u8 *)consumed_mask.match_buf)[i])
+                               continue;
+
+                       mlx5dr_dbg(dmn,
+                                  "Match param mask contains unsupported parameters\n");
+                       ret = -EOPNOTSUPP;
+                       break;
+               }
+
+               kfree(consumed_mask.match_buf);
+       }
+
+       return ret;
+}
+
+static int dr_matcher_init(struct mlx5dr_matcher *matcher,
+                          struct mlx5dr_match_parameters *mask)
+{
+       struct mlx5dr_table *tbl = matcher->tbl;
+       struct mlx5dr_domain *dmn = tbl->dmn;
+       int ret;
+
+       ret = dr_matcher_copy_param(matcher, mask);
+       if (ret)
+               return ret;
+
+       switch (dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               matcher->rx.nic_tbl = &tbl->rx;
+               ret = dr_matcher_init_nic(matcher, &matcher->rx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               matcher->tx.nic_tbl = &tbl->tx;
+               ret = dr_matcher_init_nic(matcher, &matcher->tx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               matcher->rx.nic_tbl = &tbl->rx;
+               matcher->tx.nic_tbl = &tbl->tx;
+               ret = dr_matcher_init_fdb(matcher);
+               break;
+       default:
+               WARN_ON(true);
+               ret = -EINVAL;
+       }
+
+       return ret;
+}
+
+static void dr_matcher_add_to_dbg_list(struct mlx5dr_matcher *matcher)
+{
+       mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+       list_add(&matcher->list_node, &matcher->tbl->matcher_list);
+       mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+}
+
+static void dr_matcher_remove_from_dbg_list(struct mlx5dr_matcher *matcher)
+{
+       mutex_lock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+       list_del(&matcher->list_node);
+       mutex_unlock(&matcher->tbl->dmn->dump_info.dbg_mutex);
+}
+
+struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *tbl,
+                     u32 priority,
+                     u8 match_criteria_enable,
+                     struct mlx5dr_match_parameters *mask)
+{
+       struct mlx5dr_matcher *matcher;
+       int ret;
+
+       refcount_inc(&tbl->refcount);
+
+       matcher = kzalloc(sizeof(*matcher), GFP_KERNEL);
+       if (!matcher)
+               goto dec_ref;
+
+       matcher->tbl = tbl;
+       matcher->prio = priority;
+       matcher->match_criteria = match_criteria_enable;
+       refcount_set(&matcher->refcount, 1);
+       INIT_LIST_HEAD(&matcher->list_node);
+       INIT_LIST_HEAD(&matcher->dbg_rule_list);
+
+       mlx5dr_domain_lock(tbl->dmn);
+
+       ret = dr_matcher_init(matcher, mask);
+       if (ret)
+               goto free_matcher;
+
+       dr_matcher_add_to_dbg_list(matcher);
+
+       mlx5dr_domain_unlock(tbl->dmn);
+
+       return matcher;
+
+free_matcher:
+       mlx5dr_domain_unlock(tbl->dmn);
+       kfree(matcher);
+dec_ref:
+       refcount_dec(&tbl->refcount);
+       return NULL;
+}
+
+static int dr_matcher_disconnect_nic(struct mlx5dr_domain *dmn,
+                                    struct mlx5dr_table_rx_tx *nic_tbl,
+                                    struct mlx5dr_matcher_rx_tx *next_nic_matcher,
+                                    struct mlx5dr_matcher_rx_tx *prev_nic_matcher)
+{
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+       struct mlx5dr_htbl_connect_info info;
+       struct mlx5dr_ste_htbl *prev_anchor;
+
+       if (prev_nic_matcher)
+               prev_anchor = prev_nic_matcher->e_anchor;
+       else
+               prev_anchor = nic_tbl->s_anchor;
+
+       /* Connect previous anchor hash table to next matcher or to the default address */
+       if (next_nic_matcher) {
+               info.type = CONNECT_HIT;
+               info.hit_next_htbl = next_nic_matcher->s_htbl;
+               next_nic_matcher->s_htbl->pointing_ste = prev_anchor->chunk->ste_arr;
+               prev_anchor->chunk->ste_arr[0].next_htbl = next_nic_matcher->s_htbl;
+       } else {
+               info.type = CONNECT_MISS;
+               info.miss_icm_addr = nic_tbl->default_icm_addr;
+               prev_anchor->chunk->ste_arr[0].next_htbl = NULL;
+       }
+
+       return mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, prev_anchor,
+                                                &info, true);
+}
+
+int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
+                                      struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       struct mlx5dr_matcher_rx_tx *prev_nic_matcher, *next_nic_matcher;
+       struct mlx5dr_table_rx_tx *nic_tbl = nic_matcher->nic_tbl;
+       int ret;
+
+       /* If the nic matcher is not on its parent nic table list,
+        * then it is detached - no need to disconnect it.
+        */
+       if (list_empty(&nic_matcher->list_node))
+               return 0;
+
+       if (list_is_last(&nic_matcher->list_node, &nic_tbl->nic_matcher_list))
+               next_nic_matcher = NULL;
+       else
+               next_nic_matcher = list_next_entry(nic_matcher, list_node);
+
+       if (nic_matcher->list_node.prev == &nic_tbl->nic_matcher_list)
+               prev_nic_matcher = NULL;
+       else
+               prev_nic_matcher = list_prev_entry(nic_matcher, list_node);
+
+       ret = dr_matcher_disconnect_nic(dmn, nic_tbl, next_nic_matcher, prev_nic_matcher);
+       if (ret)
+               return ret;
+
+       list_del_init(&nic_matcher->list_node);
+       return 0;
+}
+
+int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher)
+{
+       struct mlx5dr_table *tbl = matcher->tbl;
+
+       if (WARN_ON_ONCE(refcount_read(&matcher->refcount) > 1))
+               return -EBUSY;
+
+       mlx5dr_domain_lock(tbl->dmn);
+
+       dr_matcher_remove_from_dbg_list(matcher);
+       dr_matcher_uninit(matcher);
+       refcount_dec(&matcher->tbl->refcount);
+
+       mlx5dr_domain_unlock(tbl->dmn);
+       kfree(matcher);
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ptrn.c
new file mode 100644 (file)
index 0000000..8ca534e
--- /dev/null
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "dr_types.h"
+#include "mlx5_ifc_dr_ste_v1.h"
+
+enum dr_ptrn_modify_hdr_action_id {
+       DR_PTRN_MODIFY_HDR_ACTION_ID_NOP = 0x00,
+       DR_PTRN_MODIFY_HDR_ACTION_ID_COPY = 0x05,
+       DR_PTRN_MODIFY_HDR_ACTION_ID_SET = 0x06,
+       DR_PTRN_MODIFY_HDR_ACTION_ID_ADD = 0x07,
+       DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE = 0x0a,
+};
+
+struct mlx5dr_ptrn_mgr {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_icm_pool *ptrn_icm_pool;
+       /* cache for modify_header ptrn */
+       struct list_head ptrn_list;
+       struct mutex modify_hdr_mutex; /* protect the pattern cache */
+};
+
+/* Cache structure and functions */
+static bool dr_ptrn_compare_modify_hdr(size_t cur_num_of_actions,
+                                      __be64 cur_hw_actions[],
+                                      size_t num_of_actions,
+                                      __be64 hw_actions[])
+{
+       int i;
+
+       if (cur_num_of_actions != num_of_actions)
+               return false;
+
+       for (i = 0; i < num_of_actions; i++) {
+               u8 action_id =
+                       MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
+
+               if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_COPY) {
+                       if (hw_actions[i] != cur_hw_actions[i])
+                               return false;
+               } else {
+                       if ((__force __be32)hw_actions[i] !=
+                           (__force __be32)cur_hw_actions[i])
+                               return false;
+               }
+       }
+
+       return true;
+}
+
+static struct mlx5dr_ptrn_obj *
+dr_ptrn_find_cached_pattern(struct mlx5dr_ptrn_mgr *mgr,
+                           size_t num_of_actions,
+                           __be64 hw_actions[])
+{
+       struct mlx5dr_ptrn_obj *cached_pattern;
+       struct mlx5dr_ptrn_obj *tmp;
+
+       list_for_each_entry_safe(cached_pattern, tmp, &mgr->ptrn_list, list) {
+               if (dr_ptrn_compare_modify_hdr(cached_pattern->num_of_actions,
+                                              (__be64 *)cached_pattern->data,
+                                              num_of_actions,
+                                              hw_actions)) {
+                       /* Put this pattern in the head of the list,
+                        * as we will probably use it more.
+                        */
+                       list_del_init(&cached_pattern->list);
+                       list_add(&cached_pattern->list, &mgr->ptrn_list);
+                       return cached_pattern;
+               }
+       }
+
+       return NULL;
+}
+
+static struct mlx5dr_ptrn_obj *
+dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
+                     u16 num_of_actions, u8 *data)
+{
+       struct mlx5dr_ptrn_obj *pattern;
+       struct mlx5dr_icm_chunk *chunk;
+       u32 chunk_size;
+       u32 index;
+
+       chunk_size = ilog2(roundup_pow_of_two(num_of_actions));
+       /* HW modify action index granularity is at least 64B */
+       chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
+
+       chunk = mlx5dr_icm_alloc_chunk(mgr->ptrn_icm_pool, chunk_size);
+       if (!chunk)
+               return NULL;
+
+       index = (mlx5dr_icm_pool_get_chunk_icm_addr(chunk) -
+                mgr->dmn->info.caps.hdr_modify_pattern_icm_addr) /
+               DR_ACTION_CACHE_LINE_SIZE;
+
+       pattern = kzalloc(sizeof(*pattern), GFP_KERNEL);
+       if (!pattern)
+               goto free_chunk;
+
+       pattern->data = kzalloc(num_of_actions * DR_MODIFY_ACTION_SIZE *
+                               sizeof(*pattern->data), GFP_KERNEL);
+       if (!pattern->data)
+               goto free_pattern;
+
+       memcpy(pattern->data, data, num_of_actions * DR_MODIFY_ACTION_SIZE);
+       pattern->chunk = chunk;
+       pattern->index = index;
+       pattern->num_of_actions = num_of_actions;
+
+       list_add(&pattern->list, &mgr->ptrn_list);
+       refcount_set(&pattern->refcount, 1);
+
+       return pattern;
+
+free_pattern:
+       kfree(pattern);
+free_chunk:
+       mlx5dr_icm_free_chunk(chunk);
+       return NULL;
+}
+
+static void
+dr_ptrn_free_pattern(struct mlx5dr_ptrn_obj *pattern)
+{
+       list_del(&pattern->list);
+       mlx5dr_icm_free_chunk(pattern->chunk);
+       kfree(pattern->data);
+       kfree(pattern);
+}
+
+struct mlx5dr_ptrn_obj *
+mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
+                             u16 num_of_actions,
+                             u8 *data)
+{
+       struct mlx5dr_ptrn_obj *pattern;
+       u64 *hw_actions;
+       u8 action_id;
+       int i;
+
+       mutex_lock(&mgr->modify_hdr_mutex);
+       pattern = dr_ptrn_find_cached_pattern(mgr,
+                                             num_of_actions,
+                                             (__be64 *)data);
+       if (!pattern) {
+               /* Alloc and add new pattern to cache */
+               pattern = dr_ptrn_alloc_pattern(mgr, num_of_actions, data);
+               if (!pattern)
+                       goto out_unlock;
+
+               hw_actions = (u64 *)pattern->data;
+               /* Here we mask the pattern data to create a valid pattern
+                * since we do an OR operation between the arg and pattern
+                */
+               for (i = 0; i < num_of_actions; i++) {
+                       action_id = MLX5_GET(ste_double_action_set_v1, &hw_actions[i], action_id);
+
+                       if (action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_SET ||
+                           action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_ADD ||
+                           action_id == DR_PTRN_MODIFY_HDR_ACTION_ID_INSERT_INLINE)
+                               MLX5_SET(ste_double_action_set_v1, &hw_actions[i], inline_data, 0);
+               }
+
+               if (mlx5dr_send_postsend_pattern(mgr->dmn, pattern->chunk,
+                                                num_of_actions, pattern->data)) {
+                       refcount_dec(&pattern->refcount);
+                       goto free_pattern;
+               }
+       } else {
+               refcount_inc(&pattern->refcount);
+       }
+
+       mutex_unlock(&mgr->modify_hdr_mutex);
+
+       return pattern;
+
+free_pattern:
+       dr_ptrn_free_pattern(pattern);
+out_unlock:
+       mutex_unlock(&mgr->modify_hdr_mutex);
+       return NULL;
+}
+
+void
+mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
+                             struct mlx5dr_ptrn_obj *pattern)
+{
+       mutex_lock(&mgr->modify_hdr_mutex);
+
+       if (refcount_dec_and_test(&pattern->refcount))
+               dr_ptrn_free_pattern(pattern);
+
+       mutex_unlock(&mgr->modify_hdr_mutex);
+}
+
+struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_ptrn_mgr *mgr;
+
+       if (!mlx5dr_domain_is_support_ptrn_arg(dmn))
+               return NULL;
+
+       mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+       if (!mgr)
+               return NULL;
+
+       mgr->dmn = dmn;
+       mgr->ptrn_icm_pool = mlx5dr_icm_pool_create(dmn, DR_ICM_TYPE_MODIFY_HDR_PTRN);
+       if (!mgr->ptrn_icm_pool) {
+               mlx5dr_err(dmn, "Couldn't get modify-header-pattern memory\n");
+               goto free_mgr;
+       }
+
+       INIT_LIST_HEAD(&mgr->ptrn_list);
+       mutex_init(&mgr->modify_hdr_mutex);
+
+       return mgr;
+
+free_mgr:
+       kfree(mgr);
+       return NULL;
+}
+
+void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr)
+{
+       struct mlx5dr_ptrn_obj *pattern;
+       struct mlx5dr_ptrn_obj *tmp;
+
+       if (!mgr)
+               return;
+
+       WARN_ON(!list_empty(&mgr->ptrn_list));
+
+       list_for_each_entry_safe(pattern, tmp, &mgr->ptrn_list, list) {
+               list_del(&pattern->list);
+               kfree(pattern->data);
+               kfree(pattern);
+       }
+
+       mlx5dr_icm_pool_destroy(mgr->ptrn_icm_pool);
+       mutex_destroy(&mgr->modify_hdr_mutex);
+       kfree(mgr);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_rule.c
new file mode 100644 (file)
index 0000000..d1db04b
--- /dev/null
@@ -0,0 +1,1377 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048)
+/* don't try to optimize STE allocation if the stack is too constaraining */
+#define DR_RULE_MAX_STES_OPTIMIZED 0
+#else
+#define DR_RULE_MAX_STES_OPTIMIZED 2
+#endif
+#define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES)
+
+static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn,
+                                      enum mlx5dr_domain_nic_type nic_type,
+                                      struct mlx5dr_ste *new_last_ste,
+                                      struct list_head *miss_list,
+                                      struct list_head *send_list)
+{
+       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
+       struct mlx5dr_ste_send_info *ste_info_last;
+       struct mlx5dr_ste *last_ste;
+
+       /* The new entry will be inserted after the last */
+       last_ste = list_last_entry(miss_list, struct mlx5dr_ste, miss_list_node);
+       WARN_ON(!last_ste);
+
+       ste_info_last = mlx5dr_send_info_alloc(dmn, nic_type);
+       if (!ste_info_last)
+               return -ENOMEM;
+
+       mlx5dr_ste_set_miss_addr(ste_ctx, mlx5dr_ste_get_hw_ste(last_ste),
+                                mlx5dr_ste_get_icm_addr(new_last_ste));
+       list_add_tail(&new_last_ste->miss_list_node, miss_list);
+
+       mlx5dr_send_fill_and_append_ste_send_info(last_ste, DR_STE_SIZE_CTRL,
+                                                 0, mlx5dr_ste_get_hw_ste(last_ste),
+                                                 ste_info_last, send_list, true);
+
+       return 0;
+}
+
+static void dr_rule_set_last_ste_miss_addr(struct mlx5dr_matcher *matcher,
+                                          struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                          u8 *hw_ste)
+{
+       struct mlx5dr_ste_ctx *ste_ctx = matcher->tbl->dmn->ste_ctx;
+       u64 icm_addr;
+
+       if (mlx5dr_ste_is_miss_addr_set(ste_ctx, hw_ste))
+               return;
+
+       icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       mlx5dr_ste_set_miss_addr(ste_ctx, hw_ste, icm_addr);
+}
+
+static struct mlx5dr_ste *
+dr_rule_create_collision_htbl(struct mlx5dr_matcher *matcher,
+                             struct mlx5dr_matcher_rx_tx *nic_matcher,
+                             u8 *hw_ste)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_htbl *new_htbl;
+       struct mlx5dr_ste *ste;
+
+       /* Create new table for miss entry */
+       new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+                                        DR_CHUNK_SIZE_1,
+                                        MLX5DR_STE_LU_TYPE_DONT_CARE,
+                                        0);
+       if (!new_htbl) {
+               mlx5dr_dbg(dmn, "Failed allocating collision table\n");
+               return NULL;
+       }
+
+       /* One and only entry, never grows */
+       ste = new_htbl->chunk->ste_arr;
+       dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
+       mlx5dr_htbl_get(new_htbl);
+
+       return ste;
+}
+
+static struct mlx5dr_ste *
+dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher,
+                              struct mlx5dr_matcher_rx_tx *nic_matcher,
+                              u8 *hw_ste,
+                              struct mlx5dr_ste *orig_ste)
+{
+       struct mlx5dr_ste *ste;
+
+       ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
+       if (!ste) {
+               mlx5dr_dbg(matcher->tbl->dmn, "Failed creating collision entry\n");
+               return NULL;
+       }
+
+       ste->ste_chain_location = orig_ste->ste_chain_location;
+       ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste;
+
+       /* In collision entry, all members share the same miss_list_head */
+       ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(orig_ste);
+
+       /* Next table */
+       if (mlx5dr_ste_create_next_htbl(matcher, nic_matcher, ste, hw_ste,
+                                       DR_CHUNK_SIZE_1)) {
+               mlx5dr_dbg(matcher->tbl->dmn, "Failed allocating table\n");
+               goto free_tbl;
+       }
+
+       return ste;
+
+free_tbl:
+       mlx5dr_ste_free(ste, matcher, nic_matcher);
+       return NULL;
+}
+
+static int
+dr_rule_handle_one_ste_in_update_list(struct mlx5dr_ste_send_info *ste_info,
+                                     struct mlx5dr_domain *dmn)
+{
+       int ret;
+
+       list_del(&ste_info->send_list);
+
+       /* Copy data to ste, only reduced size or control, the last 16B (mask)
+        * is already written to the hw.
+        */
+       if (ste_info->size == DR_STE_SIZE_CTRL)
+               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+                      ste_info->data, DR_STE_SIZE_CTRL);
+       else
+               memcpy(mlx5dr_ste_get_hw_ste(ste_info->ste),
+                      ste_info->data, DR_STE_SIZE_REDUCED);
+
+       ret = mlx5dr_send_postsend_ste(dmn, ste_info->ste, ste_info->data,
+                                      ste_info->size, ste_info->offset);
+       if (ret)
+               goto out;
+
+out:
+       mlx5dr_send_info_free(ste_info);
+       return ret;
+}
+
+static int dr_rule_send_update_list(struct list_head *send_ste_list,
+                                   struct mlx5dr_domain *dmn,
+                                   bool is_reverse)
+{
+       struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
+       int ret;
+
+       if (is_reverse) {
+               list_for_each_entry_safe_reverse(ste_info, tmp_ste_info,
+                                                send_ste_list, send_list) {
+                       ret = dr_rule_handle_one_ste_in_update_list(ste_info,
+                                                                   dmn);
+                       if (ret)
+                               return ret;
+               }
+       } else {
+               list_for_each_entry_safe(ste_info, tmp_ste_info,
+                                        send_ste_list, send_list) {
+                       ret = dr_rule_handle_one_ste_in_update_list(ste_info,
+                                                                   dmn);
+                       if (ret)
+                               return ret;
+               }
+       }
+
+       return 0;
+}
+
+static struct mlx5dr_ste *
+dr_rule_find_ste_in_miss_list(struct list_head *miss_list, u8 *hw_ste)
+{
+       struct mlx5dr_ste *ste;
+
+       if (list_empty(miss_list))
+               return NULL;
+
+       /* Check if hw_ste is present in the list */
+       list_for_each_entry(ste, miss_list, miss_list_node) {
+               if (mlx5dr_ste_equal_tag(mlx5dr_ste_get_hw_ste(ste), hw_ste))
+                       return ste;
+       }
+
+       return NULL;
+}
+
+static struct mlx5dr_ste *
+dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher,
+                               struct mlx5dr_matcher_rx_tx *nic_matcher,
+                               struct list_head *update_list,
+                               struct mlx5dr_ste *col_ste,
+                               u8 *hw_ste)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste *new_ste;
+       int ret;
+
+       new_ste = dr_rule_create_collision_htbl(matcher, nic_matcher, hw_ste);
+       if (!new_ste)
+               return NULL;
+
+       /* Update collision pointing STE */
+       new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste;
+
+       /* In collision entry, all members share the same miss_list_head */
+       new_ste->htbl->chunk->miss_list = mlx5dr_ste_get_miss_list(col_ste);
+
+       /* Update the previous from the list */
+       ret = dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
+                                         new_ste, mlx5dr_ste_get_miss_list(col_ste),
+                                         update_list);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Failed update dup entry\n");
+               goto err_exit;
+       }
+
+       return new_ste;
+
+err_exit:
+       mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+       return NULL;
+}
+
+static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher,
+                                        struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                        struct mlx5dr_ste *cur_ste,
+                                        struct mlx5dr_ste *new_ste)
+{
+       new_ste->next_htbl = cur_ste->next_htbl;
+       new_ste->ste_chain_location = cur_ste->ste_chain_location;
+
+       if (new_ste->next_htbl)
+               new_ste->next_htbl->pointing_ste = new_ste;
+
+       /* We need to copy the refcount since this ste
+        * may have been traversed several times
+        */
+       new_ste->refcount = cur_ste->refcount;
+
+       /* Link old STEs rule to the new ste */
+       mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false);
+}
+
+static struct mlx5dr_ste *
+dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
+                       struct mlx5dr_matcher_rx_tx *nic_matcher,
+                       struct mlx5dr_ste *cur_ste,
+                       struct mlx5dr_ste_htbl *new_htbl,
+                       struct list_head *update_list)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_send_info *ste_info;
+       bool use_update_list = false;
+       u8 hw_ste[DR_STE_SIZE] = {};
+       struct mlx5dr_ste *new_ste;
+       int new_idx;
+       u8 sb_idx;
+
+       /* Copy STE mask from the matcher */
+       sb_idx = cur_ste->ste_chain_location - 1;
+       mlx5dr_ste_set_bit_mask(hw_ste, nic_matcher->ste_builder[sb_idx].bit_mask);
+
+       /* Copy STE control and tag */
+       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(cur_ste), DR_STE_SIZE_REDUCED);
+       dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
+
+       new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
+       new_ste = &new_htbl->chunk->ste_arr[new_idx];
+
+       if (mlx5dr_ste_is_not_used(new_ste)) {
+               mlx5dr_htbl_get(new_htbl);
+               list_add_tail(&new_ste->miss_list_node,
+                             mlx5dr_ste_get_miss_list(new_ste));
+       } else {
+               new_ste = dr_rule_rehash_handle_collision(matcher,
+                                                         nic_matcher,
+                                                         update_list,
+                                                         new_ste,
+                                                         hw_ste);
+               if (!new_ste) {
+                       mlx5dr_dbg(dmn, "Failed adding collision entry, index: %d\n",
+                                  new_idx);
+                       return NULL;
+               }
+               new_htbl->ctrl.num_of_collisions++;
+               use_update_list = true;
+       }
+
+       memcpy(mlx5dr_ste_get_hw_ste(new_ste), hw_ste, DR_STE_SIZE_REDUCED);
+
+       new_htbl->ctrl.num_of_valid_entries++;
+
+       if (use_update_list) {
+               ste_info = mlx5dr_send_info_alloc(dmn,
+                                                 nic_matcher->nic_tbl->nic_dmn->type);
+               if (!ste_info)
+                       goto err_exit;
+
+               mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0,
+                                                         hw_ste, ste_info,
+                                                         update_list, true);
+       }
+
+       dr_rule_rehash_copy_ste_ctrl(matcher, nic_matcher, cur_ste, new_ste);
+
+       return new_ste;
+
+err_exit:
+       mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+       return NULL;
+}
+
+static int dr_rule_rehash_copy_miss_list(struct mlx5dr_matcher *matcher,
+                                        struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                        struct list_head *cur_miss_list,
+                                        struct mlx5dr_ste_htbl *new_htbl,
+                                        struct list_head *update_list)
+{
+       struct mlx5dr_ste *tmp_ste, *cur_ste, *new_ste;
+
+       if (list_empty(cur_miss_list))
+               return 0;
+
+       list_for_each_entry_safe(cur_ste, tmp_ste, cur_miss_list, miss_list_node) {
+               new_ste = dr_rule_rehash_copy_ste(matcher,
+                                                 nic_matcher,
+                                                 cur_ste,
+                                                 new_htbl,
+                                                 update_list);
+               if (!new_ste)
+                       goto err_insert;
+
+               list_del(&cur_ste->miss_list_node);
+               mlx5dr_htbl_put(cur_ste->htbl);
+       }
+       return 0;
+
+err_insert:
+       mlx5dr_err(matcher->tbl->dmn, "Fatal error during resize\n");
+       WARN_ON(true);
+       return -EINVAL;
+}
+
+static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
+                                   struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                   struct mlx5dr_ste_htbl *cur_htbl,
+                                   struct mlx5dr_ste_htbl *new_htbl,
+                                   struct list_head *update_list)
+{
+       struct mlx5dr_ste *cur_ste;
+       int cur_entries;
+       int err = 0;
+       int i;
+
+       cur_entries = mlx5dr_icm_pool_chunk_size_to_entries(cur_htbl->chunk->size);
+
+       if (cur_entries < 1) {
+               mlx5dr_dbg(matcher->tbl->dmn, "Invalid number of entries\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < cur_entries; i++) {
+               cur_ste = &cur_htbl->chunk->ste_arr[i];
+               if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
+                       continue;
+
+               err = dr_rule_rehash_copy_miss_list(matcher,
+                                                   nic_matcher,
+                                                   mlx5dr_ste_get_miss_list(cur_ste),
+                                                   new_htbl,
+                                                   update_list);
+               if (err)
+                       goto clean_copy;
+
+               /* In order to decrease the number of allocated ste_send_info
+                * structs, send the current table row now.
+                */
+               err = dr_rule_send_update_list(update_list, matcher->tbl->dmn, false);
+               if (err) {
+                       mlx5dr_dbg(matcher->tbl->dmn, "Failed updating table to HW\n");
+                       goto clean_copy;
+               }
+       }
+
+clean_copy:
+       return err;
+}
+
+static struct mlx5dr_ste_htbl *
+dr_rule_rehash_htbl(struct mlx5dr_rule *rule,
+                   struct mlx5dr_rule_rx_tx *nic_rule,
+                   struct mlx5dr_ste_htbl *cur_htbl,
+                   u8 ste_location,
+                   struct list_head *update_list,
+                   enum mlx5dr_icm_chunk_size new_size)
+{
+       struct mlx5dr_ste_send_info *del_ste_info, *tmp_ste_info;
+       struct mlx5dr_matcher *matcher = rule->matcher;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_matcher_rx_tx *nic_matcher;
+       struct mlx5dr_ste_send_info *ste_info;
+       struct mlx5dr_htbl_connect_info info;
+       struct mlx5dr_domain_rx_tx *nic_dmn;
+       u8 formatted_ste[DR_STE_SIZE] = {};
+       LIST_HEAD(rehash_table_send_list);
+       struct mlx5dr_ste *ste_to_update;
+       struct mlx5dr_ste_htbl *new_htbl;
+       int err;
+
+       nic_matcher = nic_rule->nic_matcher;
+       nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+       ste_info = mlx5dr_send_info_alloc(dmn,
+                                         nic_matcher->nic_tbl->nic_dmn->type);
+       if (!ste_info)
+               return NULL;
+
+       new_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+                                        new_size,
+                                        cur_htbl->lu_type,
+                                        cur_htbl->byte_mask);
+       if (!new_htbl) {
+               mlx5dr_err(dmn, "Failed to allocate new hash table\n");
+               goto free_ste_info;
+       }
+
+       /* Write new table to HW */
+       info.type = CONNECT_MISS;
+       info.miss_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+       mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+                                    dmn->info.caps.gvmi,
+                                    nic_dmn->type,
+                                    new_htbl,
+                                    formatted_ste,
+                                    &info);
+
+       new_htbl->pointing_ste = cur_htbl->pointing_ste;
+       new_htbl->pointing_ste->next_htbl = new_htbl;
+       err = dr_rule_rehash_copy_htbl(matcher,
+                                      nic_matcher,
+                                      cur_htbl,
+                                      new_htbl,
+                                      &rehash_table_send_list);
+       if (err)
+               goto free_new_htbl;
+
+       if (mlx5dr_send_postsend_htbl(dmn, new_htbl, formatted_ste,
+                                     nic_matcher->ste_builder[ste_location - 1].bit_mask)) {
+               mlx5dr_err(dmn, "Failed writing table to HW\n");
+               goto free_new_htbl;
+       }
+
+       /* Writing to the hw is done in regular order of rehash_table_send_list,
+        * in order to have the origin data written before the miss address of
+        * collision entries, if exists.
+        */
+       if (dr_rule_send_update_list(&rehash_table_send_list, dmn, false)) {
+               mlx5dr_err(dmn, "Failed updating table to HW\n");
+               goto free_ste_list;
+       }
+
+       /* Connect previous hash table to current */
+       if (ste_location == 1) {
+               /* The previous table is an anchor, anchors size is always one STE */
+               struct mlx5dr_ste_htbl *prev_htbl = cur_htbl->pointing_ste->htbl;
+
+               /* On matcher s_anchor we keep an extra refcount */
+               mlx5dr_htbl_get(new_htbl);
+               mlx5dr_htbl_put(cur_htbl);
+
+               nic_matcher->s_htbl = new_htbl;
+
+               /* It is safe to operate dr_ste_set_hit_addr on the hw_ste here
+                * (48B len) which works only on first 32B
+                */
+               mlx5dr_ste_set_hit_addr(dmn->ste_ctx,
+                                       prev_htbl->chunk->hw_ste_arr,
+                                       mlx5dr_icm_pool_get_chunk_icm_addr(new_htbl->chunk),
+                                       mlx5dr_icm_pool_get_chunk_num_of_entries(new_htbl->chunk));
+
+               ste_to_update = &prev_htbl->chunk->ste_arr[0];
+       } else {
+               mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+                                                    mlx5dr_ste_get_hw_ste(cur_htbl->pointing_ste),
+                                                    new_htbl);
+               ste_to_update = cur_htbl->pointing_ste;
+       }
+
+       mlx5dr_send_fill_and_append_ste_send_info(ste_to_update, DR_STE_SIZE_CTRL,
+                                                 0, mlx5dr_ste_get_hw_ste(ste_to_update),
+                                                 ste_info, update_list, false);
+
+       return new_htbl;
+
+free_ste_list:
+       /* Clean all ste_info's from the new table */
+       list_for_each_entry_safe(del_ste_info, tmp_ste_info,
+                                &rehash_table_send_list, send_list) {
+               list_del(&del_ste_info->send_list);
+               mlx5dr_send_info_free(del_ste_info);
+       }
+
+free_new_htbl:
+       mlx5dr_ste_htbl_free(new_htbl);
+free_ste_info:
+       mlx5dr_send_info_free(ste_info);
+       mlx5dr_info(dmn, "Failed creating rehash table\n");
+       return NULL;
+}
+
+static struct mlx5dr_ste_htbl *dr_rule_rehash(struct mlx5dr_rule *rule,
+                                             struct mlx5dr_rule_rx_tx *nic_rule,
+                                             struct mlx5dr_ste_htbl *cur_htbl,
+                                             u8 ste_location,
+                                             struct list_head *update_list)
+{
+       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+       enum mlx5dr_icm_chunk_size new_size;
+
+       new_size = mlx5dr_icm_next_higher_chunk(cur_htbl->chunk->size);
+       new_size = min_t(u32, new_size, dmn->info.max_log_sw_icm_sz);
+
+       if (new_size == cur_htbl->chunk->size)
+               return NULL; /* Skip rehash, we already at the max size */
+
+       return dr_rule_rehash_htbl(rule, nic_rule, cur_htbl, ste_location,
+                                  update_list, new_size);
+}
+
+static struct mlx5dr_ste *
+dr_rule_handle_collision(struct mlx5dr_matcher *matcher,
+                        struct mlx5dr_matcher_rx_tx *nic_matcher,
+                        struct mlx5dr_ste *ste,
+                        u8 *hw_ste,
+                        struct list_head *miss_list,
+                        struct list_head *send_list)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_send_info *ste_info;
+       struct mlx5dr_ste *new_ste;
+
+       ste_info = mlx5dr_send_info_alloc(dmn,
+                                         nic_matcher->nic_tbl->nic_dmn->type);
+       if (!ste_info)
+               return NULL;
+
+       new_ste = dr_rule_create_collision_entry(matcher, nic_matcher, hw_ste, ste);
+       if (!new_ste)
+               goto free_send_info;
+
+       if (dr_rule_append_to_miss_list(dmn, nic_matcher->nic_tbl->nic_dmn->type,
+                                       new_ste, miss_list, send_list)) {
+               mlx5dr_dbg(dmn, "Failed to update prev miss_list\n");
+               goto err_exit;
+       }
+
+       mlx5dr_send_fill_and_append_ste_send_info(new_ste, DR_STE_SIZE, 0, hw_ste,
+                                                 ste_info, send_list, false);
+
+       ste->htbl->ctrl.num_of_collisions++;
+       ste->htbl->ctrl.num_of_valid_entries++;
+
+       return new_ste;
+
+err_exit:
+       mlx5dr_ste_free(new_ste, matcher, nic_matcher);
+free_send_info:
+       mlx5dr_send_info_free(ste_info);
+       return NULL;
+}
+
+static void dr_rule_remove_action_members(struct mlx5dr_rule *rule)
+{
+       struct mlx5dr_rule_action_member *action_mem;
+       struct mlx5dr_rule_action_member *tmp;
+
+       list_for_each_entry_safe(action_mem, tmp, &rule->rule_actions_list, list) {
+               list_del(&action_mem->list);
+               refcount_dec(&action_mem->action->refcount);
+               kvfree(action_mem);
+       }
+}
+
+static int dr_rule_add_action_members(struct mlx5dr_rule *rule,
+                                     size_t num_actions,
+                                     struct mlx5dr_action *actions[])
+{
+       struct mlx5dr_rule_action_member *action_mem;
+       int i;
+
+       for (i = 0; i < num_actions; i++) {
+               action_mem = kvzalloc(sizeof(*action_mem), GFP_KERNEL);
+               if (!action_mem)
+                       goto free_action_members;
+
+               action_mem->action = actions[i];
+               INIT_LIST_HEAD(&action_mem->list);
+               list_add_tail(&action_mem->list, &rule->rule_actions_list);
+               refcount_inc(&action_mem->action->refcount);
+       }
+
+       return 0;
+
+free_action_members:
+       dr_rule_remove_action_members(rule);
+       return -ENOMEM;
+}
+
+void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
+                                struct mlx5dr_ste *ste,
+                                bool force)
+{
+       /* Update rule member is usually done for the last STE or during rule
+        * creation to recover from mid-creation failure (for this peruse the
+        * force flag is used)
+        */
+       if (ste->next_htbl && !force)
+               return;
+
+       /* Update is required since each rule keeps track of its last STE */
+       ste->rule_rx_tx = nic_rule;
+       nic_rule->last_rule_ste = ste;
+}
+
+static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste)
+{
+       struct mlx5dr_ste *first_ste;
+
+       first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste),
+                                    struct mlx5dr_ste, miss_list_node);
+
+       return first_ste->htbl->pointing_ste;
+}
+
+int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
+                                        struct mlx5dr_ste *curr_ste,
+                                        int *num_of_stes)
+{
+       bool first = false;
+
+       *num_of_stes = 0;
+
+       if (!curr_ste)
+               return -ENOENT;
+
+       /* Iterate from last to first */
+       while (!first) {
+               first = curr_ste->ste_chain_location == 1;
+               ste_arr[*num_of_stes] = curr_ste;
+               *num_of_stes += 1;
+               curr_ste = dr_rule_get_pointed_ste(curr_ste);
+       }
+
+       return 0;
+}
+
+static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
+                                      struct mlx5dr_rule_rx_tx *nic_rule)
+{
+       struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES];
+       struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste;
+       int i;
+
+       if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i))
+               return;
+
+       while (i--)
+               mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher);
+}
+
+static u16 dr_get_bits_per_mask(u16 byte_mask)
+{
+       u16 bits = 0;
+
+       while (byte_mask) {
+               byte_mask = byte_mask & (byte_mask - 1);
+               bits++;
+       }
+
+       return bits;
+}
+
+static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
+                                     struct mlx5dr_domain *dmn,
+                                     struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+       struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl;
+       int threshold;
+
+       if (dmn->info.max_log_sw_icm_sz <= htbl->chunk->size)
+               return false;
+
+       if (!mlx5dr_ste_htbl_may_grow(htbl))
+               return false;
+
+       if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk->size)
+               return false;
+
+       threshold = mlx5dr_ste_htbl_increase_threshold(htbl);
+       if (ctrl->num_of_collisions >= threshold &&
+           (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold)
+               return true;
+
+       return false;
+}
+
+static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule,
+                                     struct mlx5dr_rule_rx_tx *nic_rule,
+                                     struct list_head *send_ste_list,
+                                     struct mlx5dr_ste *last_ste,
+                                     u8 *hw_ste_arr,
+                                     u32 new_hw_ste_arr_sz)
+{
+       struct mlx5dr_matcher_rx_tx *nic_matcher = nic_rule->nic_matcher;
+       struct mlx5dr_ste_send_info *ste_info_arr[DR_ACTION_MAX_STES];
+       u8 num_of_builders = nic_matcher->num_of_builders;
+       struct mlx5dr_matcher *matcher = rule->matcher;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       u8 *curr_hw_ste, *prev_hw_ste;
+       struct mlx5dr_ste *action_ste;
+       int i, k;
+
+       /* Two cases:
+        * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste
+        * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added
+        *    to support the action.
+        */
+
+       for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) {
+               curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE;
+               prev_hw_ste = (i == 0) ? curr_hw_ste : hw_ste_arr + ((i - 1) * DR_STE_SIZE);
+               action_ste = dr_rule_create_collision_htbl(matcher,
+                                                          nic_matcher,
+                                                          curr_hw_ste);
+               if (!action_ste)
+                       return -ENOMEM;
+
+               mlx5dr_ste_get(action_ste);
+
+               action_ste->htbl->pointing_ste = last_ste;
+               last_ste->next_htbl = action_ste->htbl;
+               last_ste = action_ste;
+
+               /* While free ste we go over the miss list, so add this ste to the list */
+               list_add_tail(&action_ste->miss_list_node,
+                             mlx5dr_ste_get_miss_list(action_ste));
+
+               ste_info_arr[k] = mlx5dr_send_info_alloc(dmn,
+                                                        nic_matcher->nic_tbl->nic_dmn->type);
+               if (!ste_info_arr[k])
+                       goto err_exit;
+
+               /* Point current ste to the new action */
+               mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx,
+                                                    prev_hw_ste,
+                                                    action_ste->htbl);
+
+               mlx5dr_rule_set_last_member(nic_rule, action_ste, true);
+
+               mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0,
+                                                         curr_hw_ste,
+                                                         ste_info_arr[k],
+                                                         send_ste_list, false);
+       }
+
+       last_ste->next_htbl = NULL;
+
+       return 0;
+
+err_exit:
+       mlx5dr_ste_put(action_ste, matcher, nic_matcher);
+       return -ENOMEM;
+}
+
+static int dr_rule_handle_empty_entry(struct mlx5dr_matcher *matcher,
+                                     struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                     struct mlx5dr_ste_htbl *cur_htbl,
+                                     struct mlx5dr_ste *ste,
+                                     u8 ste_location,
+                                     u8 *hw_ste,
+                                     struct list_head *miss_list,
+                                     struct list_head *send_list)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_send_info *ste_info;
+
+       /* Take ref on table, only on first time this ste is used */
+       mlx5dr_htbl_get(cur_htbl);
+
+       /* new entry -> new branch */
+       list_add_tail(&ste->miss_list_node, miss_list);
+
+       dr_rule_set_last_ste_miss_addr(matcher, nic_matcher, hw_ste);
+
+       ste->ste_chain_location = ste_location;
+
+       ste_info = mlx5dr_send_info_alloc(dmn,
+                                         nic_matcher->nic_tbl->nic_dmn->type);
+       if (!ste_info)
+               goto clean_ste_setting;
+
+       if (mlx5dr_ste_create_next_htbl(matcher,
+                                       nic_matcher,
+                                       ste,
+                                       hw_ste,
+                                       DR_CHUNK_SIZE_1)) {
+               mlx5dr_dbg(dmn, "Failed allocating table\n");
+               goto clean_ste_info;
+       }
+
+       cur_htbl->ctrl.num_of_valid_entries++;
+
+       mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE, 0, hw_ste,
+                                                 ste_info, send_list, false);
+
+       return 0;
+
+clean_ste_info:
+       mlx5dr_send_info_free(ste_info);
+clean_ste_setting:
+       list_del_init(&ste->miss_list_node);
+       mlx5dr_htbl_put(cur_htbl);
+
+       return -ENOMEM;
+}
+
+static struct mlx5dr_ste *
+dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
+                         struct mlx5dr_rule_rx_tx *nic_rule,
+                         struct list_head *send_ste_list,
+                         struct mlx5dr_ste_htbl *cur_htbl,
+                         u8 *hw_ste,
+                         u8 ste_location,
+                         struct mlx5dr_ste_htbl **put_htbl)
+{
+       struct mlx5dr_matcher *matcher = rule->matcher;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_matcher_rx_tx *nic_matcher;
+       struct mlx5dr_domain_rx_tx *nic_dmn;
+       struct mlx5dr_ste_htbl *new_htbl;
+       struct mlx5dr_ste *matched_ste;
+       struct list_head *miss_list;
+       bool skip_rehash = false;
+       struct mlx5dr_ste *ste;
+       int index;
+
+       nic_matcher = nic_rule->nic_matcher;
+       nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+again:
+       index = mlx5dr_ste_calc_hash_index(hw_ste, cur_htbl);
+       miss_list = &cur_htbl->chunk->miss_list[index];
+       ste = &cur_htbl->chunk->ste_arr[index];
+
+       if (mlx5dr_ste_is_not_used(ste)) {
+               if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
+                                              ste, ste_location,
+                                              hw_ste, miss_list,
+                                              send_ste_list))
+                       return NULL;
+       } else {
+               /* Hash table index in use, check if this ste is in the miss list */
+               matched_ste = dr_rule_find_ste_in_miss_list(miss_list, hw_ste);
+               if (matched_ste) {
+                       /* If it is last STE in the chain, and has the same tag
+                        * it means that all the previous stes are the same,
+                        * if so, this rule is duplicated.
+                        */
+                       if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste_location))
+                               return matched_ste;
+
+                       mlx5dr_dbg(dmn, "Duplicate rule inserted\n");
+               }
+
+               if (!skip_rehash && dr_rule_need_enlarge_hash(cur_htbl, dmn, nic_dmn)) {
+                       /* Hash table index in use, try to resize of the hash */
+                       skip_rehash = true;
+
+                       /* Hold the table till we update.
+                        * Release in dr_rule_create_rule()
+                        */
+                       *put_htbl = cur_htbl;
+                       mlx5dr_htbl_get(cur_htbl);
+
+                       new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl,
+                                                 ste_location, send_ste_list);
+                       if (!new_htbl) {
+                               mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n",
+                                          cur_htbl->chunk->size);
+                               mlx5dr_htbl_put(cur_htbl);
+                       } else {
+                               cur_htbl = new_htbl;
+                       }
+                       goto again;
+               } else {
+                       /* Hash table index in use, add another collision (miss) */
+                       ste = dr_rule_handle_collision(matcher,
+                                                      nic_matcher,
+                                                      ste,
+                                                      hw_ste,
+                                                      miss_list,
+                                                      send_ste_list);
+                       if (!ste) {
+                               mlx5dr_dbg(dmn, "failed adding collision entry, index: %d\n",
+                                          index);
+                               return NULL;
+                       }
+               }
+       }
+       return ste;
+}
+
+static bool dr_rule_cmp_value_to_mask(u8 *mask, u8 *value,
+                                     u32 s_idx, u32 e_idx)
+{
+       u32 i;
+
+       for (i = s_idx; i < e_idx; i++) {
+               if (value[i] & ~mask[i]) {
+                       pr_info("Rule parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+       return true;
+}
+
+static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
+                          struct mlx5dr_match_parameters *value,
+                          struct mlx5dr_match_param *param)
+{
+       u8 match_criteria = matcher->match_criteria;
+       size_t value_size = value->match_sz;
+       u8 *mask_p = (u8 *)&matcher->mask;
+       u8 *param_p = (u8 *)param;
+       u32 s_idx, e_idx;
+
+       if (!value_size ||
+           (value_size > DR_SZ_MATCH_PARAM || (value_size % sizeof(u32)))) {
+               mlx5dr_err(matcher->tbl->dmn, "Rule parameters length is incorrect\n");
+               return false;
+       }
+
+       mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
+               s_idx = offsetof(struct mlx5dr_match_param, outer);
+               e_idx = min(s_idx + sizeof(param->outer), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn, "Rule outer parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+               s_idx = offsetof(struct mlx5dr_match_param, misc);
+               e_idx = min(s_idx + sizeof(param->misc), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn, "Rule misc parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+
+       if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
+               s_idx = offsetof(struct mlx5dr_match_param, inner);
+               e_idx = min(s_idx + sizeof(param->inner), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn, "Rule inner parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
+               s_idx = offsetof(struct mlx5dr_match_param, misc2);
+               e_idx = min(s_idx + sizeof(param->misc2), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn, "Rule misc2 parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
+               s_idx = offsetof(struct mlx5dr_match_param, misc3);
+               e_idx = min(s_idx + sizeof(param->misc3), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn, "Rule misc3 parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+               s_idx = offsetof(struct mlx5dr_match_param, misc4);
+               e_idx = min(s_idx + sizeof(param->misc4), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn,
+                                  "Rule misc4 parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
+               s_idx = offsetof(struct mlx5dr_match_param, misc5);
+               e_idx = min(s_idx + sizeof(param->misc5), value_size);
+
+               if (!dr_rule_cmp_value_to_mask(mask_p, param_p, s_idx, e_idx)) {
+                       mlx5dr_err(matcher->tbl->dmn, "Rule misc5 parameters contains a value not specified by mask\n");
+                       return false;
+               }
+       }
+       return true;
+}
+
+static int dr_rule_destroy_rule_nic(struct mlx5dr_rule *rule,
+                                   struct mlx5dr_rule_rx_tx *nic_rule)
+{
+       /* Check if this nic rule was actually created, or was it skipped
+        * and only the other type of the RX/TX nic rule was created.
+        */
+       if (!nic_rule->last_rule_ste)
+               return 0;
+
+       mlx5dr_domain_nic_lock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
+       dr_rule_clean_rule_members(rule, nic_rule);
+
+       nic_rule->nic_matcher->rules--;
+       if (!nic_rule->nic_matcher->rules)
+               mlx5dr_matcher_remove_from_tbl_nic(rule->matcher->tbl->dmn,
+                                                  nic_rule->nic_matcher);
+
+       mlx5dr_domain_nic_unlock(nic_rule->nic_matcher->nic_tbl->nic_dmn);
+
+       return 0;
+}
+
+static int dr_rule_destroy_rule_fdb(struct mlx5dr_rule *rule)
+{
+       dr_rule_destroy_rule_nic(rule, &rule->rx);
+       dr_rule_destroy_rule_nic(rule, &rule->tx);
+       return 0;
+}
+
+static int dr_rule_destroy_rule(struct mlx5dr_rule *rule)
+{
+       struct mlx5dr_domain *dmn = rule->matcher->tbl->dmn;
+
+       mlx5dr_dbg_rule_del(rule);
+
+       switch (dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               dr_rule_destroy_rule_nic(rule, &rule->rx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               dr_rule_destroy_rule_nic(rule, &rule->tx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               dr_rule_destroy_rule_fdb(rule);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       dr_rule_remove_action_members(rule);
+       kfree(rule);
+       return 0;
+}
+
+static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
+{
+       if (spec->ip_version == 6 || spec->ethertype == ETH_P_IPV6)
+               return DR_RULE_IPV6;
+
+       return DR_RULE_IPV4;
+}
+
+static bool dr_rule_skip(enum mlx5dr_domain_type domain,
+                        enum mlx5dr_domain_nic_type nic_type,
+                        struct mlx5dr_match_param *mask,
+                        struct mlx5dr_match_param *value,
+                        u32 flow_source)
+{
+       bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
+
+       if (domain != MLX5DR_DOMAIN_TYPE_FDB)
+               return false;
+
+       if (mask->misc.source_port) {
+               if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
+                       return true;
+
+               if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
+                       return true;
+       }
+
+       if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
+               return true;
+
+       if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
+               return true;
+
+       return false;
+}
+
+static int
+dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
+                       struct mlx5dr_rule_rx_tx *nic_rule,
+                       struct mlx5dr_match_param *param,
+                       size_t num_actions,
+                       struct mlx5dr_action *actions[])
+{
+       u8 hw_ste_arr_optimized[DR_RULE_MAX_STE_CHAIN_OPTIMIZED * DR_STE_SIZE] = {};
+       struct mlx5dr_ste_send_info *ste_info, *tmp_ste_info;
+       struct mlx5dr_matcher *matcher = rule->matcher;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_matcher_rx_tx *nic_matcher;
+       struct mlx5dr_domain_rx_tx *nic_dmn;
+       struct mlx5dr_ste_htbl *htbl = NULL;
+       struct mlx5dr_ste_htbl *cur_htbl;
+       struct mlx5dr_ste *ste = NULL;
+       LIST_HEAD(send_ste_list);
+       bool hw_ste_arr_is_opt;
+       u8 *hw_ste_arr = NULL;
+       u32 new_hw_ste_arr_sz;
+       int ret, i;
+
+       nic_matcher = nic_rule->nic_matcher;
+       nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+
+       if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param,
+                        rule->flow_source))
+               return 0;
+
+       mlx5dr_domain_nic_lock(nic_dmn);
+
+       ret = mlx5dr_matcher_select_builders(matcher,
+                                            nic_matcher,
+                                            dr_rule_get_ipv(&param->outer),
+                                            dr_rule_get_ipv(&param->inner));
+       if (ret)
+               goto err_unlock;
+
+       hw_ste_arr_is_opt = nic_matcher->num_of_builders <= DR_RULE_MAX_STES_OPTIMIZED;
+       if (likely(hw_ste_arr_is_opt)) {
+               hw_ste_arr = hw_ste_arr_optimized;
+       } else {
+               hw_ste_arr = kzalloc((nic_matcher->num_of_builders + DR_ACTION_MAX_STES) *
+                                    DR_STE_SIZE, GFP_KERNEL);
+
+               if (!hw_ste_arr) {
+                       ret = -ENOMEM;
+                       goto err_unlock;
+               }
+       }
+
+       ret = mlx5dr_matcher_add_to_tbl_nic(dmn, nic_matcher);
+       if (ret)
+               goto free_hw_ste;
+
+       /* Set the tag values inside the ste array */
+       ret = mlx5dr_ste_build_ste_arr(matcher, nic_matcher, param, hw_ste_arr);
+       if (ret)
+               goto remove_from_nic_tbl;
+
+       /* Set the actions values/addresses inside the ste array */
+       ret = mlx5dr_actions_build_ste_arr(matcher, nic_matcher, actions,
+                                          num_actions, hw_ste_arr,
+                                          &new_hw_ste_arr_sz);
+       if (ret)
+               goto remove_from_nic_tbl;
+
+       cur_htbl = nic_matcher->s_htbl;
+
+       /* Go over the array of STEs, and build dr_ste accordingly.
+        * The loop is over only the builders which are equal or less to the
+        * number of stes, in case we have actions that lives in other stes.
+        */
+       for (i = 0; i < nic_matcher->num_of_builders; i++) {
+               /* Calculate CRC and keep new ste entry */
+               u8 *cur_hw_ste_ent = hw_ste_arr + (i * DR_STE_SIZE);
+
+               ste = dr_rule_handle_ste_branch(rule,
+                                               nic_rule,
+                                               &send_ste_list,
+                                               cur_htbl,
+                                               cur_hw_ste_ent,
+                                               i + 1,
+                                               &htbl);
+               if (!ste) {
+                       mlx5dr_err(dmn, "Failed creating next branch\n");
+                       ret = -ENOENT;
+                       goto free_rule;
+               }
+
+               cur_htbl = ste->next_htbl;
+
+               mlx5dr_ste_get(ste);
+               mlx5dr_rule_set_last_member(nic_rule, ste, true);
+       }
+
+       /* Connect actions */
+       ret = dr_rule_handle_action_stes(rule, nic_rule, &send_ste_list,
+                                        ste, hw_ste_arr, new_hw_ste_arr_sz);
+       if (ret) {
+               mlx5dr_dbg(dmn, "Failed apply actions\n");
+               goto free_rule;
+       }
+       ret = dr_rule_send_update_list(&send_ste_list, dmn, true);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed sending ste!\n");
+               goto free_rule;
+       }
+
+       if (htbl)
+               mlx5dr_htbl_put(htbl);
+
+       nic_matcher->rules++;
+
+       mlx5dr_domain_nic_unlock(nic_dmn);
+
+       if (unlikely(!hw_ste_arr_is_opt))
+               kfree(hw_ste_arr);
+
+       return 0;
+
+free_rule:
+       dr_rule_clean_rule_members(rule, nic_rule);
+       /* Clean all ste_info's */
+       list_for_each_entry_safe(ste_info, tmp_ste_info, &send_ste_list, send_list) {
+               list_del(&ste_info->send_list);
+               mlx5dr_send_info_free(ste_info);
+       }
+
+remove_from_nic_tbl:
+       if (!nic_matcher->rules)
+               mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher);
+
+free_hw_ste:
+       if (!hw_ste_arr_is_opt)
+               kfree(hw_ste_arr);
+
+err_unlock:
+       mlx5dr_domain_nic_unlock(nic_dmn);
+
+       return ret;
+}
+
+static int
+dr_rule_create_rule_fdb(struct mlx5dr_rule *rule,
+                       struct mlx5dr_match_param *param,
+                       size_t num_actions,
+                       struct mlx5dr_action *actions[])
+{
+       struct mlx5dr_match_param copy_param = {};
+       int ret;
+
+       /* Copy match_param since they will be consumed during the first
+        * nic_rule insertion.
+        */
+       memcpy(&copy_param, param, sizeof(struct mlx5dr_match_param));
+
+       ret = dr_rule_create_rule_nic(rule, &rule->rx, param,
+                                     num_actions, actions);
+       if (ret)
+               return ret;
+
+       ret = dr_rule_create_rule_nic(rule, &rule->tx, &copy_param,
+                                     num_actions, actions);
+       if (ret)
+               goto destroy_rule_nic_rx;
+
+       return 0;
+
+destroy_rule_nic_rx:
+       dr_rule_destroy_rule_nic(rule, &rule->rx);
+       return ret;
+}
+
+static struct mlx5dr_rule *
+dr_rule_create_rule(struct mlx5dr_matcher *matcher,
+                   struct mlx5dr_match_parameters *value,
+                   size_t num_actions,
+                   struct mlx5dr_action *actions[],
+                   u32 flow_source)
+{
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_match_param param = {};
+       struct mlx5dr_rule *rule;
+       int ret;
+
+       if (!dr_rule_verify(matcher, value, &param))
+               return NULL;
+
+       rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+       if (!rule)
+               return NULL;
+
+       rule->matcher = matcher;
+       rule->flow_source = flow_source;
+       INIT_LIST_HEAD(&rule->rule_actions_list);
+
+       ret = dr_rule_add_action_members(rule, num_actions, actions);
+       if (ret)
+               goto free_rule;
+
+       switch (dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               rule->rx.nic_matcher = &matcher->rx;
+               ret = dr_rule_create_rule_nic(rule, &rule->rx, &param,
+                                             num_actions, actions);
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               rule->tx.nic_matcher = &matcher->tx;
+               ret = dr_rule_create_rule_nic(rule, &rule->tx, &param,
+                                             num_actions, actions);
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               rule->rx.nic_matcher = &matcher->rx;
+               rule->tx.nic_matcher = &matcher->tx;
+               ret = dr_rule_create_rule_fdb(rule, &param,
+                                             num_actions, actions);
+               break;
+       default:
+               ret = -EINVAL;
+               break;
+       }
+
+       if (ret)
+               goto remove_action_members;
+
+       INIT_LIST_HEAD(&rule->dbg_node);
+       mlx5dr_dbg_rule_add(rule);
+       return rule;
+
+remove_action_members:
+       dr_rule_remove_action_members(rule);
+free_rule:
+       kfree(rule);
+       mlx5dr_err(dmn, "Failed creating rule\n");
+       return NULL;
+}
+
+struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+                                      struct mlx5dr_match_parameters *value,
+                                      size_t num_actions,
+                                      struct mlx5dr_action *actions[],
+                                      u32 flow_source)
+{
+       struct mlx5dr_rule *rule;
+
+       refcount_inc(&matcher->refcount);
+
+       rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
+       if (!rule)
+               refcount_dec(&matcher->refcount);
+
+       return rule;
+}
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule)
+{
+       struct mlx5dr_matcher *matcher = rule->matcher;
+       int ret;
+
+       ret = dr_rule_destroy_rule(rule);
+       if (!ret)
+               refcount_dec(&matcher->refcount);
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_send.c
new file mode 100644 (file)
index 0000000..6fa06ba
--- /dev/null
@@ -0,0 +1,1368 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/smp.h>
+#include "dr_types.h"
+
+#define QUEUE_SIZE 128
+#define SIGNAL_PER_DIV_QUEUE 16
+#define TH_NUMS_TO_DRAIN 2
+#define DR_SEND_INFO_POOL_SIZE 1000
+
+enum { CQ_OK = 0, CQ_EMPTY = -1, CQ_POLL_ERR = -2 };
+
+struct dr_data_seg {
+       u64 addr;
+       u32 length;
+       u32 lkey;
+       unsigned int send_flags;
+};
+
+enum send_info_type {
+       WRITE_ICM = 0,
+       GTA_ARG   = 1,
+};
+
+struct postsend_info {
+       enum send_info_type type;
+       struct dr_data_seg write;
+       struct dr_data_seg read;
+       u64 remote_addr;
+       u32 rkey;
+};
+
+struct dr_qp_rtr_attr {
+       struct mlx5dr_cmd_gid_attr dgid_attr;
+       enum ib_mtu mtu;
+       u32 qp_num;
+       u16 port_num;
+       u8 min_rnr_timer;
+       u8 sgid_index;
+       u16 udp_src_port;
+       u8 fl:1;
+};
+
+struct dr_qp_rts_attr {
+       u8 timeout;
+       u8 retry_cnt;
+       u8 rnr_retry;
+};
+
+struct dr_qp_init_attr {
+       u32 cqn;
+       u32 pdn;
+       u32 max_send_wr;
+       struct mlx5_uars_page *uar;
+       u8 isolate_vl_tc:1;
+};
+
+struct mlx5dr_send_info_pool_obj {
+       struct mlx5dr_ste_send_info ste_send_info;
+       struct mlx5dr_send_info_pool *pool;
+       struct list_head list_node;
+};
+
+struct mlx5dr_send_info_pool {
+       struct list_head free_list;
+};
+
+static int dr_send_info_pool_fill(struct mlx5dr_send_info_pool *pool)
+{
+       struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
+       int i;
+
+       for (i = 0; i < DR_SEND_INFO_POOL_SIZE; i++) {
+               pool_obj = kzalloc(sizeof(*pool_obj), GFP_KERNEL);
+               if (!pool_obj)
+                       goto clean_pool;
+
+               pool_obj->pool = pool;
+               list_add_tail(&pool_obj->list_node, &pool->free_list);
+       }
+
+       return 0;
+
+clean_pool:
+       list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
+               list_del(&pool_obj->list_node);
+               kfree(pool_obj);
+       }
+
+       return -ENOMEM;
+}
+
+static void dr_send_info_pool_destroy(struct mlx5dr_send_info_pool *pool)
+{
+       struct mlx5dr_send_info_pool_obj *pool_obj, *tmp_pool_obj;
+
+       list_for_each_entry_safe(pool_obj, tmp_pool_obj, &pool->free_list, list_node) {
+               list_del(&pool_obj->list_node);
+               kfree(pool_obj);
+       }
+
+       kfree(pool);
+}
+
+void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn)
+{
+       dr_send_info_pool_destroy(dmn->send_info_pool_tx);
+       dr_send_info_pool_destroy(dmn->send_info_pool_rx);
+}
+
+static struct mlx5dr_send_info_pool *dr_send_info_pool_create(void)
+{
+       struct mlx5dr_send_info_pool *pool;
+       int ret;
+
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
+               return NULL;
+
+       INIT_LIST_HEAD(&pool->free_list);
+
+       ret = dr_send_info_pool_fill(pool);
+       if (ret) {
+               kfree(pool);
+               return NULL;
+       }
+
+       return pool;
+}
+
+int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn)
+{
+       dmn->send_info_pool_rx = dr_send_info_pool_create();
+       if (!dmn->send_info_pool_rx)
+               return -ENOMEM;
+
+       dmn->send_info_pool_tx = dr_send_info_pool_create();
+       if (!dmn->send_info_pool_tx) {
+               dr_send_info_pool_destroy(dmn->send_info_pool_rx);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+struct mlx5dr_ste_send_info
+*mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
+                       enum mlx5dr_domain_nic_type nic_type)
+{
+       struct mlx5dr_send_info_pool_obj *pool_obj;
+       struct mlx5dr_send_info_pool *pool;
+       int ret;
+
+       pool = nic_type == DR_DOMAIN_NIC_TYPE_RX ? dmn->send_info_pool_rx :
+                                                  dmn->send_info_pool_tx;
+
+       if (unlikely(list_empty(&pool->free_list))) {
+               ret = dr_send_info_pool_fill(pool);
+               if (ret)
+                       return NULL;
+       }
+
+       pool_obj = list_first_entry_or_null(&pool->free_list,
+                                           struct mlx5dr_send_info_pool_obj,
+                                           list_node);
+
+       if (likely(pool_obj)) {
+               list_del_init(&pool_obj->list_node);
+       } else {
+               WARN_ONCE(!pool_obj, "Failed getting ste send info obj from pool");
+               return NULL;
+       }
+
+       return &pool_obj->ste_send_info;
+}
+
+void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info)
+{
+       struct mlx5dr_send_info_pool_obj *pool_obj;
+
+       pool_obj = container_of(ste_send_info,
+                               struct mlx5dr_send_info_pool_obj,
+                               ste_send_info);
+
+       list_add(&pool_obj->list_node, &pool_obj->pool->free_list);
+}
+
+static int dr_parse_cqe(struct mlx5dr_cq *dr_cq, struct mlx5_cqe64 *cqe64)
+{
+       unsigned int idx;
+       u8 opcode;
+
+       opcode = get_cqe_opcode(cqe64);
+       if (opcode == MLX5_CQE_REQ_ERR) {
+               idx = be16_to_cpu(cqe64->wqe_counter) &
+                       (dr_cq->qp->sq.wqe_cnt - 1);
+               dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
+       } else if (opcode == MLX5_CQE_RESP_ERR) {
+               ++dr_cq->qp->sq.cc;
+       } else {
+               idx = be16_to_cpu(cqe64->wqe_counter) &
+                       (dr_cq->qp->sq.wqe_cnt - 1);
+               dr_cq->qp->sq.cc = dr_cq->qp->sq.wqe_head[idx] + 1;
+
+               return CQ_OK;
+       }
+
+       return CQ_POLL_ERR;
+}
+
+static int dr_cq_poll_one(struct mlx5dr_cq *dr_cq)
+{
+       struct mlx5_cqe64 *cqe64;
+       int err;
+
+       cqe64 = mlx5_cqwq_get_cqe(&dr_cq->wq);
+       if (!cqe64) {
+               if (unlikely(dr_cq->mdev->state ==
+                            MLX5_DEVICE_STATE_INTERNAL_ERROR)) {
+                       mlx5_core_dbg_once(dr_cq->mdev,
+                                          "Polling CQ while device is shutting down\n");
+                       return CQ_POLL_ERR;
+               }
+               return CQ_EMPTY;
+       }
+
+       mlx5_cqwq_pop(&dr_cq->wq);
+       err = dr_parse_cqe(dr_cq, cqe64);
+       mlx5_cqwq_update_db_record(&dr_cq->wq);
+
+       return err;
+}
+
+static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
+{
+       int npolled;
+       int err = 0;
+
+       for (npolled = 0; npolled < ne; ++npolled) {
+               err = dr_cq_poll_one(dr_cq);
+               if (err != CQ_OK)
+                       break;
+       }
+
+       return err == CQ_POLL_ERR ? err : npolled;
+}
+
+static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
+                                        struct dr_qp_init_attr *attr)
+{
+       u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
+       u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
+       struct mlx5_wq_param wqp;
+       struct mlx5dr_qp *dr_qp;
+       int inlen;
+       void *qpc;
+       void *in;
+       int err;
+
+       dr_qp = kzalloc(sizeof(*dr_qp), GFP_KERNEL);
+       if (!dr_qp)
+               return NULL;
+
+       wqp.buf_numa_node = mdev->priv.numa_node;
+       wqp.db_numa_node = mdev->priv.numa_node;
+
+       dr_qp->rq.pc = 0;
+       dr_qp->rq.cc = 0;
+       dr_qp->rq.wqe_cnt = 256;
+       dr_qp->sq.pc = 0;
+       dr_qp->sq.cc = 0;
+       dr_qp->sq.head = 0;
+       dr_qp->sq.wqe_cnt = roundup_pow_of_two(attr->max_send_wr);
+
+       MLX5_SET(qpc, temp_qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+       MLX5_SET(qpc, temp_qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
+       MLX5_SET(qpc, temp_qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+       err = mlx5_wq_qp_create(mdev, &wqp, temp_qpc, &dr_qp->wq,
+                               &dr_qp->wq_ctrl);
+       if (err) {
+               mlx5_core_warn(mdev, "Can't create QP WQ\n");
+               goto err_wq;
+       }
+
+       dr_qp->sq.wqe_head = kcalloc(dr_qp->sq.wqe_cnt,
+                                    sizeof(dr_qp->sq.wqe_head[0]),
+                                    GFP_KERNEL);
+
+       if (!dr_qp->sq.wqe_head) {
+               mlx5_core_warn(mdev, "Can't allocate wqe head\n");
+               goto err_wqe_head;
+       }
+
+       inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
+               MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) *
+               dr_qp->wq_ctrl.buf.npages;
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_in;
+       }
+
+       qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
+       MLX5_SET(qpc, qpc, st, MLX5_QP_ST_RC);
+       MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
+       MLX5_SET(qpc, qpc, isolate_vl_tc, attr->isolate_vl_tc);
+       MLX5_SET(qpc, qpc, pd, attr->pdn);
+       MLX5_SET(qpc, qpc, uar_page, attr->uar->index);
+       MLX5_SET(qpc, qpc, log_page_size,
+                dr_qp->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET(qpc, qpc, fre, 1);
+       MLX5_SET(qpc, qpc, rlky, 1);
+       MLX5_SET(qpc, qpc, cqn_snd, attr->cqn);
+       MLX5_SET(qpc, qpc, cqn_rcv, attr->cqn);
+       MLX5_SET(qpc, qpc, log_rq_stride, ilog2(MLX5_SEND_WQE_DS) - 4);
+       MLX5_SET(qpc, qpc, log_rq_size, ilog2(dr_qp->rq.wqe_cnt));
+       MLX5_SET(qpc, qpc, rq_type, MLX5_NON_ZERO_RQ);
+       MLX5_SET(qpc, qpc, log_sq_size, ilog2(dr_qp->sq.wqe_cnt));
+       MLX5_SET(qpc, qpc, ts_format, mlx5_get_qp_default_ts(mdev));
+       MLX5_SET64(qpc, qpc, dbr_addr, dr_qp->wq_ctrl.db.dma);
+       if (MLX5_CAP_GEN(mdev, cqe_version) == 1)
+               MLX5_SET(qpc, qpc, user_index, 0xFFFFFF);
+       mlx5_fill_page_frag_array(&dr_qp->wq_ctrl.buf,
+                                 (__be64 *)MLX5_ADDR_OF(create_qp_in,
+                                                        in, pas));
+
+       MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
+       err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
+       dr_qp->qpn = MLX5_GET(create_qp_out, out, qpn);
+       kvfree(in);
+       if (err)
+               goto err_in;
+       dr_qp->uar = attr->uar;
+
+       return dr_qp;
+
+err_in:
+       kfree(dr_qp->sq.wqe_head);
+err_wqe_head:
+       mlx5_wq_destroy(&dr_qp->wq_ctrl);
+err_wq:
+       kfree(dr_qp);
+       return NULL;
+}
+
+static void dr_destroy_qp(struct mlx5_core_dev *mdev,
+                         struct mlx5dr_qp *dr_qp)
+{
+       u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
+
+       MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
+       MLX5_SET(destroy_qp_in, in, qpn, dr_qp->qpn);
+       mlx5_cmd_exec_in(mdev, destroy_qp, in);
+
+       kfree(dr_qp->sq.wqe_head);
+       mlx5_wq_destroy(&dr_qp->wq_ctrl);
+       kfree(dr_qp);
+}
+
+static void dr_cmd_notify_hw(struct mlx5dr_qp *dr_qp, void *ctrl)
+{
+       dma_wmb();
+       *dr_qp->wq.sq.db = cpu_to_be32(dr_qp->sq.pc & 0xffff);
+
+       /* After wmb() the hw aware of new work */
+       wmb();
+
+       mlx5_write64(ctrl, dr_qp->uar->map + MLX5_BF_OFFSET);
+}
+
+static void
+dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+                                       u32 remote_addr,
+                                       struct dr_data_seg *data_seg,
+                                       int *size)
+{
+       struct mlx5_wqe_header_modify_argument_update_seg *wq_arg_seg;
+       struct mlx5_wqe_flow_update_ctrl_seg *wq_flow_seg;
+
+       wq_ctrl->general_id = cpu_to_be32(remote_addr);
+       wq_flow_seg = (void *)(wq_ctrl + 1);
+
+       /* mlx5_wqe_flow_update_ctrl_seg - all reserved */
+       memset(wq_flow_seg, 0, sizeof(*wq_flow_seg));
+       wq_arg_seg = (void *)(wq_flow_seg + 1);
+
+       memcpy(wq_arg_seg->argument_list,
+              (void *)(uintptr_t)data_seg->addr,
+              data_seg->length);
+
+       *size = (sizeof(*wq_ctrl) +      /* WQE ctrl segment */
+                sizeof(*wq_flow_seg) +  /* WQE flow update ctrl seg - reserved */
+                sizeof(*wq_arg_seg)) /  /* WQE hdr modify arg seg - data */
+               MLX5_SEND_WQE_DS;
+}
+
+static void
+dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+                                 u64 remote_addr,
+                                 u32 rkey,
+                                 struct dr_data_seg *data_seg,
+                                 unsigned int *size)
+{
+       struct mlx5_wqe_raddr_seg *wq_raddr;
+       struct mlx5_wqe_data_seg *wq_dseg;
+
+       wq_raddr = (void *)(wq_ctrl + 1);
+
+       wq_raddr->raddr = cpu_to_be64(remote_addr);
+       wq_raddr->rkey = cpu_to_be32(rkey);
+       wq_raddr->reserved = 0;
+
+       wq_dseg = (void *)(wq_raddr + 1);
+
+       wq_dseg->byte_count = cpu_to_be32(data_seg->length);
+       wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
+       wq_dseg->addr = cpu_to_be64(data_seg->addr);
+
+       *size = (sizeof(*wq_ctrl) +    /* WQE ctrl segment */
+                sizeof(*wq_dseg) +    /* WQE data segment */
+                sizeof(*wq_raddr)) /  /* WQE remote addr segment */
+               MLX5_SEND_WQE_DS;
+}
+
+static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
+                           struct dr_data_seg *data_seg)
+{
+       wq_ctrl->signature = 0;
+       wq_ctrl->rsvd[0] = 0;
+       wq_ctrl->rsvd[1] = 0;
+       wq_ctrl->fm_ce_se = data_seg->send_flags & IB_SEND_SIGNALED ?
+                               MLX5_WQE_CTRL_CQ_UPDATE : 0;
+       wq_ctrl->imm = 0;
+}
+
+static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
+                            u32 rkey, struct dr_data_seg *data_seg,
+                            u32 opcode, bool notify_hw)
+{
+       struct mlx5_wqe_ctrl_seg *wq_ctrl;
+       int opcode_mod = 0;
+       unsigned int size;
+       unsigned int idx;
+
+       idx = dr_qp->sq.pc & (dr_qp->sq.wqe_cnt - 1);
+
+       wq_ctrl = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
+       dr_set_ctrl_seg(wq_ctrl, data_seg);
+
+       switch (opcode) {
+       case MLX5_OPCODE_RDMA_READ:
+       case MLX5_OPCODE_RDMA_WRITE:
+               dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
+                                                 rkey, data_seg, &size);
+               break;
+       case MLX5_OPCODE_FLOW_TBL_ACCESS:
+               opcode_mod = MLX5_CMD_OP_MOD_UPDATE_HEADER_MODIFY_ARGUMENT;
+               dr_rdma_handle_flow_access_arg_segments(wq_ctrl, remote_addr,
+                                                       data_seg, &size);
+               break;
+       default:
+               WARN(true, "illegal opcode %d", opcode);
+               return;
+       }
+
+       /* --------------------------------------------------------
+        * |opcode_mod (8 bit)|wqe_index (16 bits)| opcod (8 bits)|
+        * --------------------------------------------------------
+        */
+       wq_ctrl->opmod_idx_opcode =
+               cpu_to_be32((opcode_mod << 24) |
+                           ((dr_qp->sq.pc & 0xffff) << 8) |
+                           opcode);
+       wq_ctrl->qpn_ds = cpu_to_be32(size | dr_qp->qpn << 8);
+
+       dr_qp->sq.pc += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
+       dr_qp->sq.wqe_head[idx] = dr_qp->sq.head++;
+
+       if (notify_hw)
+               dr_cmd_notify_hw(dr_qp, wq_ctrl);
+}
+
+static void dr_post_send(struct mlx5dr_qp *dr_qp, struct postsend_info *send_info)
+{
+       if (send_info->type == WRITE_ICM) {
+               dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+                                &send_info->write, MLX5_OPCODE_RDMA_WRITE, false);
+               dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+                                &send_info->read, MLX5_OPCODE_RDMA_READ, true);
+       } else { /* GTA_ARG */
+               dr_rdma_segments(dr_qp, send_info->remote_addr, send_info->rkey,
+                                &send_info->write, MLX5_OPCODE_FLOW_TBL_ACCESS, true);
+       }
+
+}
+
+/**
+ * mlx5dr_send_fill_and_append_ste_send_info: Add data to be sent
+ * with send_list parameters:
+ *
+ *     @ste:       The data that attached to this specific ste
+ *     @size:      of data to write
+ *     @offset:    of the data from start of the hw_ste entry
+ *     @data:      data
+ *     @ste_info:  ste to be sent with send_list
+ *     @send_list: to append into it
+ *     @copy_data: if true indicates that the data should be kept because
+ *                 it's not backuped any where (like in re-hash).
+ *                 if false, it lets the data to be updated after
+ *                 it was added to the list.
+ */
+void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
+                                              u16 offset, u8 *data,
+                                              struct mlx5dr_ste_send_info *ste_info,
+                                              struct list_head *send_list,
+                                              bool copy_data)
+{
+       ste_info->size = size;
+       ste_info->ste = ste;
+       ste_info->offset = offset;
+
+       if (copy_data) {
+               memcpy(ste_info->data_cont, data, size);
+               ste_info->data = ste_info->data_cont;
+       } else {
+               ste_info->data = data;
+       }
+
+       list_add_tail(&ste_info->send_list, send_list);
+}
+
+/* The function tries to consume one wc each time, unless the queue is full, in
+ * that case, which means that the hw is behind the sw in a full queue len
+ * the function will drain the cq till it empty.
+ */
+static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
+                               struct mlx5dr_send_ring *send_ring)
+{
+       bool is_drain = false;
+       int ne;
+
+       if (send_ring->pending_wqe < send_ring->signal_th)
+               return 0;
+
+       /* Queue is full start drain it */
+       if (send_ring->pending_wqe >=
+           dmn->send_ring->signal_th * TH_NUMS_TO_DRAIN)
+               is_drain = true;
+
+       do {
+               ne = dr_poll_cq(send_ring->cq, 1);
+               if (unlikely(ne < 0)) {
+                       mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
+                                           send_ring->qp->qpn);
+                       send_ring->err_state = true;
+                       return ne;
+               } else if (ne == 1) {
+                       send_ring->pending_wqe -= send_ring->signal_th;
+               }
+       } while (ne == 1 ||
+                (is_drain && send_ring->pending_wqe  >= send_ring->signal_th));
+
+       return 0;
+}
+
+static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
+                                   struct postsend_info *send_info)
+{
+       send_ring->pending_wqe++;
+
+       if (send_ring->pending_wqe % send_ring->signal_th == 0)
+               send_info->write.send_flags |= IB_SEND_SIGNALED;
+       else
+               send_info->write.send_flags = 0;
+}
+
+static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
+                                  struct mlx5dr_send_ring *send_ring,
+                                  struct postsend_info *send_info)
+{
+       u32 buff_offset;
+
+       if (send_info->write.length > dmn->info.max_inline_size) {
+               buff_offset = (send_ring->tx_head &
+                              (dmn->send_ring->signal_th - 1)) *
+                             send_ring->max_post_send_size;
+               /* Copy to ring mr */
+               memcpy(send_ring->buf + buff_offset,
+                      (void *)(uintptr_t)send_info->write.addr,
+                      send_info->write.length);
+               send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
+               send_info->write.lkey = send_ring->mr->mkey;
+
+               send_ring->tx_head++;
+       }
+
+       send_ring->pending_wqe++;
+
+       if (send_ring->pending_wqe % send_ring->signal_th == 0)
+               send_info->write.send_flags |= IB_SEND_SIGNALED;
+
+       send_ring->pending_wqe++;
+       send_info->read.length = send_info->write.length;
+
+       /* Read into dedicated sync buffer */
+       send_info->read.addr = (uintptr_t)send_ring->sync_mr->dma_addr;
+       send_info->read.lkey = send_ring->sync_mr->mkey;
+
+       if (send_ring->pending_wqe % send_ring->signal_th == 0)
+               send_info->read.send_flags = IB_SEND_SIGNALED;
+       else
+               send_info->read.send_flags = 0;
+}
+
+static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
+                             struct mlx5dr_send_ring *send_ring,
+                             struct postsend_info *send_info)
+{
+       if (send_info->type == WRITE_ICM)
+               dr_fill_write_icm_segs(dmn, send_ring, send_info);
+       else /* args */
+               dr_fill_write_args_segs(send_ring, send_info);
+}
+
+static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
+                               struct postsend_info *send_info)
+{
+       struct mlx5dr_send_ring *send_ring = dmn->send_ring;
+       int ret;
+
+       if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
+                    send_ring->err_state)) {
+               mlx5_core_dbg_once(dmn->mdev,
+                                  "Skipping post send: QP err state: %d, device state: %d\n",
+                                  send_ring->err_state, dmn->mdev->state);
+               return 0;
+       }
+
+       spin_lock(&send_ring->lock);
+
+       ret = dr_handle_pending_wc(dmn, send_ring);
+       if (ret)
+               goto out_unlock;
+
+       dr_fill_data_segs(dmn, send_ring, send_info);
+       dr_post_send(send_ring->qp, send_info);
+
+out_unlock:
+       spin_unlock(&send_ring->lock);
+       return ret;
+}
+
+static int dr_get_tbl_copy_details(struct mlx5dr_domain *dmn,
+                                  struct mlx5dr_ste_htbl *htbl,
+                                  u8 **data,
+                                  u32 *byte_size,
+                                  int *iterations,
+                                  int *num_stes)
+{
+       u32 chunk_byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
+       int alloc_size;
+
+       if (chunk_byte_size > dmn->send_ring->max_post_send_size) {
+               *iterations = chunk_byte_size / dmn->send_ring->max_post_send_size;
+               *byte_size = dmn->send_ring->max_post_send_size;
+               alloc_size = *byte_size;
+               *num_stes = *byte_size / DR_STE_SIZE;
+       } else {
+               *iterations = 1;
+               *num_stes = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
+               alloc_size = *num_stes * DR_STE_SIZE;
+       }
+
+       *data = kvzalloc(alloc_size, GFP_KERNEL);
+       if (!*data)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/**
+ * mlx5dr_send_postsend_ste: write size bytes into offset from the hw cm.
+ *
+ *     @dmn:    Domain
+ *     @ste:    The ste struct that contains the data (at
+ *              least part of it)
+ *     @data:   The real data to send size data
+ *     @size:   for writing.
+ *     @offset: The offset from the icm mapped data to
+ *              start write to this for write only part of the
+ *              buffer.
+ *
+ * Return: 0 on success.
+ */
+int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn, struct mlx5dr_ste *ste,
+                            u8 *data, u16 size, u16 offset)
+{
+       struct postsend_info send_info = {};
+
+       mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, data, size);
+
+       send_info.write.addr = (uintptr_t)data;
+       send_info.write.length = size;
+       send_info.write.lkey = 0;
+       send_info.remote_addr = mlx5dr_ste_get_mr_addr(ste) + offset;
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(ste->htbl->chunk);
+
+       return dr_postsend_icm_data(dmn, &send_info);
+}
+
+int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
+                             struct mlx5dr_ste_htbl *htbl,
+                             u8 *formatted_ste, u8 *mask)
+{
+       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
+       int num_stes_per_iter;
+       int iterations;
+       u8 *data;
+       int ret;
+       int i;
+       int j;
+
+       ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
+                                     &iterations, &num_stes_per_iter);
+       if (ret)
+               return ret;
+
+       mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, formatted_ste, DR_STE_SIZE);
+
+       /* Send the data iteration times */
+       for (i = 0; i < iterations; i++) {
+               u32 ste_index = i * (byte_size / DR_STE_SIZE);
+               struct postsend_info send_info = {};
+
+               /* Copy all ste's on the data buffer
+                * need to add the bit_mask
+                */
+               for (j = 0; j < num_stes_per_iter; j++) {
+                       struct mlx5dr_ste *ste = &htbl->chunk->ste_arr[ste_index + j];
+                       u32 ste_off = j * DR_STE_SIZE;
+
+                       if (mlx5dr_ste_is_not_used(ste)) {
+                               memcpy(data + ste_off,
+                                      formatted_ste, DR_STE_SIZE);
+                       } else {
+                               /* Copy data */
+                               memcpy(data + ste_off,
+                                      htbl->chunk->hw_ste_arr +
+                                      DR_STE_SIZE_REDUCED * (ste_index + j),
+                                      DR_STE_SIZE_REDUCED);
+                               /* Copy bit_mask */
+                               memcpy(data + ste_off + DR_STE_SIZE_REDUCED,
+                                      mask, DR_STE_SIZE_MASK);
+                               /* Only when we have mask we need to re-arrange the STE */
+                               mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx,
+                                                               data + (j * DR_STE_SIZE),
+                                                               DR_STE_SIZE);
+                       }
+               }
+
+               send_info.write.addr = (uintptr_t)data;
+               send_info.write.length = byte_size;
+               send_info.write.lkey = 0;
+               send_info.remote_addr =
+                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
+
+               ret = dr_postsend_icm_data(dmn, &send_info);
+               if (ret)
+                       goto out_free;
+       }
+
+out_free:
+       kvfree(data);
+       return ret;
+}
+
+/* Initialize htble with default STEs */
+int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
+                                       struct mlx5dr_ste_htbl *htbl,
+                                       u8 *ste_init_data,
+                                       bool update_hw_ste)
+{
+       u32 byte_size = mlx5dr_icm_pool_get_chunk_byte_size(htbl->chunk);
+       int iterations;
+       int num_stes;
+       u8 *copy_dst;
+       u8 *data;
+       int ret;
+       int i;
+
+       ret = dr_get_tbl_copy_details(dmn, htbl, &data, &byte_size,
+                                     &iterations, &num_stes);
+       if (ret)
+               return ret;
+
+       if (update_hw_ste) {
+               /* Copy the reduced STE to hash table ste_arr */
+               for (i = 0; i < num_stes; i++) {
+                       copy_dst = htbl->chunk->hw_ste_arr + i * DR_STE_SIZE_REDUCED;
+                       memcpy(copy_dst, ste_init_data, DR_STE_SIZE_REDUCED);
+               }
+       }
+
+       mlx5dr_ste_prepare_for_postsend(dmn->ste_ctx, ste_init_data, DR_STE_SIZE);
+
+       /* Copy the same STE on the data buffer */
+       for (i = 0; i < num_stes; i++) {
+               copy_dst = data + i * DR_STE_SIZE;
+               memcpy(copy_dst, ste_init_data, DR_STE_SIZE);
+       }
+
+       /* Send the data iteration times */
+       for (i = 0; i < iterations; i++) {
+               u8 ste_index = i * (byte_size / DR_STE_SIZE);
+               struct postsend_info send_info = {};
+
+               send_info.write.addr = (uintptr_t)data;
+               send_info.write.length = byte_size;
+               send_info.write.lkey = 0;
+               send_info.remote_addr =
+                       mlx5dr_ste_get_mr_addr(htbl->chunk->ste_arr + ste_index);
+               send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(htbl->chunk);
+
+               ret = dr_postsend_icm_data(dmn, &send_info);
+               if (ret)
+                       goto out_free;
+       }
+
+out_free:
+       kvfree(data);
+       return ret;
+}
+
+int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
+                               struct mlx5dr_action *action)
+{
+       struct postsend_info send_info = {};
+
+       send_info.write.addr = (uintptr_t)action->rewrite->data;
+       send_info.write.length = action->rewrite->num_of_actions *
+                                DR_MODIFY_ACTION_SIZE;
+       send_info.write.lkey = 0;
+       send_info.remote_addr =
+               mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk);
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk);
+
+       return dr_postsend_icm_data(dmn, &send_info);
+}
+
+int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
+                                struct mlx5dr_icm_chunk *chunk,
+                                u16 num_of_actions,
+                                u8 *data)
+{
+       struct postsend_info send_info = {};
+       int ret;
+
+       send_info.write.addr = (uintptr_t)data;
+       send_info.write.length = num_of_actions * DR_MODIFY_ACTION_SIZE;
+       send_info.remote_addr = mlx5dr_icm_pool_get_chunk_mr_addr(chunk);
+       send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(chunk);
+
+       ret = dr_postsend_icm_data(dmn, &send_info);
+       if (ret)
+               return ret;
+
+       return 0;
+}
+
+int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
+                             u16 num_of_actions, u8 *actions_data)
+{
+       int data_len, iter = 0, cur_sent;
+       u64 addr;
+       int ret;
+
+       addr = (uintptr_t)actions_data;
+       data_len = num_of_actions * DR_MODIFY_ACTION_SIZE;
+
+       do {
+               struct postsend_info send_info = {};
+
+               send_info.type = GTA_ARG;
+               send_info.write.addr = addr;
+               cur_sent = min_t(u32, data_len, DR_ACTION_CACHE_LINE_SIZE);
+               send_info.write.length = cur_sent;
+               send_info.write.lkey = 0;
+               send_info.remote_addr = arg_id + iter;
+
+               ret = dr_postsend_icm_data(dmn, &send_info);
+               if (ret)
+                       goto out;
+
+               iter++;
+               addr += cur_sent;
+               data_len -= cur_sent;
+       } while (data_len > 0);
+
+out:
+       return ret;
+}
+
+static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
+                                struct mlx5dr_qp *dr_qp,
+                                int port)
+{
+       u32 in[MLX5_ST_SZ_DW(rst2init_qp_in)] = {};
+       void *qpc;
+
+       qpc = MLX5_ADDR_OF(rst2init_qp_in, in, qpc);
+
+       MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, port);
+       MLX5_SET(qpc, qpc, pm_state, MLX5_QPC_PM_STATE_MIGRATED);
+       MLX5_SET(qpc, qpc, rre, 1);
+       MLX5_SET(qpc, qpc, rwe, 1);
+
+       MLX5_SET(rst2init_qp_in, in, opcode, MLX5_CMD_OP_RST2INIT_QP);
+       MLX5_SET(rst2init_qp_in, in, qpn, dr_qp->qpn);
+
+       return mlx5_cmd_exec_in(mdev, rst2init_qp, in);
+}
+
+static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev,
+                                   struct mlx5dr_qp *dr_qp,
+                                   struct dr_qp_rts_attr *attr)
+{
+       u32 in[MLX5_ST_SZ_DW(rtr2rts_qp_in)] = {};
+       void *qpc;
+
+       qpc  = MLX5_ADDR_OF(rtr2rts_qp_in, in, qpc);
+
+       MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
+
+       MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt);
+       MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry);
+       MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */
+
+       MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP);
+       MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn);
+
+       return mlx5_cmd_exec_in(mdev, rtr2rts_qp, in);
+}
+
+static int dr_cmd_modify_qp_init2rtr(struct mlx5_core_dev *mdev,
+                                    struct mlx5dr_qp *dr_qp,
+                                    struct dr_qp_rtr_attr *attr)
+{
+       u32 in[MLX5_ST_SZ_DW(init2rtr_qp_in)] = {};
+       void *qpc;
+
+       qpc = MLX5_ADDR_OF(init2rtr_qp_in, in, qpc);
+
+       MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
+
+       MLX5_SET(qpc, qpc, mtu, attr->mtu);
+       MLX5_SET(qpc, qpc, log_msg_max, DR_CHUNK_SIZE_MAX - 1);
+       MLX5_SET(qpc, qpc, remote_qpn, attr->qp_num);
+       memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rmac_47_32),
+              attr->dgid_attr.mac, sizeof(attr->dgid_attr.mac));
+       memcpy(MLX5_ADDR_OF(qpc, qpc, primary_address_path.rgid_rip),
+              attr->dgid_attr.gid, sizeof(attr->dgid_attr.gid));
+       MLX5_SET(qpc, qpc, primary_address_path.src_addr_index,
+                attr->sgid_index);
+
+       if (attr->dgid_attr.roce_ver == MLX5_ROCE_VERSION_2)
+               MLX5_SET(qpc, qpc, primary_address_path.udp_sport,
+                        attr->udp_src_port);
+
+       MLX5_SET(qpc, qpc, primary_address_path.vhca_port_num, attr->port_num);
+       MLX5_SET(qpc, qpc, primary_address_path.fl, attr->fl);
+       MLX5_SET(qpc, qpc, min_rnr_nak, 1);
+
+       MLX5_SET(init2rtr_qp_in, in, opcode, MLX5_CMD_OP_INIT2RTR_QP);
+       MLX5_SET(init2rtr_qp_in, in, qpn, dr_qp->qpn);
+
+       return mlx5_cmd_exec_in(mdev, init2rtr_qp, in);
+}
+
+static bool dr_send_allow_fl(struct mlx5dr_cmd_caps *caps)
+{
+       /* Check whether RC RoCE QP creation with force loopback is allowed.
+        * There are two separate capability bits for this:
+        *  - force loopback when RoCE is enabled
+        *  - force loopback when RoCE is disabled
+        */
+       return ((caps->roce_caps.roce_en &&
+                caps->roce_caps.fl_rc_qp_when_roce_enabled) ||
+               (!caps->roce_caps.roce_en &&
+                caps->roce_caps.fl_rc_qp_when_roce_disabled));
+}
+
+static int dr_prepare_qp_to_rts(struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_qp *dr_qp = dmn->send_ring->qp;
+       struct dr_qp_rts_attr rts_attr = {};
+       struct dr_qp_rtr_attr rtr_attr = {};
+       enum ib_mtu mtu = IB_MTU_1024;
+       u16 gid_index = 0;
+       int port = 1;
+       int ret;
+
+       /* Init */
+       ret = dr_modify_qp_rst2init(dmn->mdev, dr_qp, port);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed modify QP rst2init\n");
+               return ret;
+       }
+
+       /* RTR */
+       rtr_attr.mtu            = mtu;
+       rtr_attr.qp_num         = dr_qp->qpn;
+       rtr_attr.min_rnr_timer  = 12;
+       rtr_attr.port_num       = port;
+       rtr_attr.udp_src_port   = dmn->info.caps.roce_min_src_udp;
+
+       /* If QP creation with force loopback is allowed, then there
+        * is no need for GID index when creating the QP.
+        * Otherwise we query GID attributes and use GID index.
+        */
+       rtr_attr.fl = dr_send_allow_fl(&dmn->info.caps);
+       if (!rtr_attr.fl) {
+               ret = mlx5dr_cmd_query_gid(dmn->mdev, port, gid_index,
+                                          &rtr_attr.dgid_attr);
+               if (ret)
+                       return ret;
+
+               rtr_attr.sgid_index = gid_index;
+       }
+
+       ret = dr_cmd_modify_qp_init2rtr(dmn->mdev, dr_qp, &rtr_attr);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed modify QP init2rtr\n");
+               return ret;
+       }
+
+       /* RTS */
+       rts_attr.timeout        = 14;
+       rts_attr.retry_cnt      = 7;
+       rts_attr.rnr_retry      = 7;
+
+       ret = dr_cmd_modify_qp_rtr2rts(dmn->mdev, dr_qp, &rts_attr);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed modify QP rtr2rts\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static void dr_cq_complete(struct mlx5_core_cq *mcq,
+                          struct mlx5_eqe *eqe)
+{
+       pr_err("CQ completion CQ: #%u\n", mcq->cqn);
+}
+
+static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
+                                     struct mlx5_uars_page *uar,
+                                     size_t ncqe)
+{
+       u32 temp_cqc[MLX5_ST_SZ_DW(cqc)] = {};
+       u32 out[MLX5_ST_SZ_DW(create_cq_out)];
+       struct mlx5_wq_param wqp;
+       struct mlx5_cqe64 *cqe;
+       struct mlx5dr_cq *cq;
+       int inlen, err, eqn;
+       void *cqc, *in;
+       __be64 *pas;
+       int vector;
+       u32 i;
+
+       cq = kzalloc(sizeof(*cq), GFP_KERNEL);
+       if (!cq)
+               return NULL;
+
+       ncqe = roundup_pow_of_two(ncqe);
+       MLX5_SET(cqc, temp_cqc, log_cq_size, ilog2(ncqe));
+
+       wqp.buf_numa_node = mdev->priv.numa_node;
+       wqp.db_numa_node = mdev->priv.numa_node;
+
+       err = mlx5_cqwq_create(mdev, &wqp, temp_cqc, &cq->wq,
+                              &cq->wq_ctrl);
+       if (err)
+               goto out;
+
+       for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+               cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+               cqe->op_own = MLX5_CQE_INVALID << 4 | MLX5_CQE_OWNER_MASK;
+       }
+
+       inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+               sizeof(u64) * cq->wq_ctrl.buf.npages;
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in)
+               goto err_cqwq;
+
+       vector = raw_smp_processor_id() % mlx5_comp_vectors_max(mdev);
+       err = mlx5_comp_eqn_get(mdev, vector, &eqn);
+       if (err) {
+               kvfree(in);
+               goto err_cqwq;
+       }
+
+       cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+       MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe));
+       MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn);
+       MLX5_SET(cqc, cqc, uar_page, uar->index);
+       MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+                MLX5_ADAPTER_PAGE_SHIFT);
+       MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+       pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas);
+       mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, pas);
+
+       cq->mcq.comp  = dr_cq_complete;
+
+       err = mlx5_core_create_cq(mdev, &cq->mcq, in, inlen, out, sizeof(out));
+       kvfree(in);
+
+       if (err)
+               goto err_cqwq;
+
+       cq->mcq.cqe_sz = 64;
+       cq->mcq.set_ci_db = cq->wq_ctrl.db.db;
+       cq->mcq.arm_db = cq->wq_ctrl.db.db + 1;
+       *cq->mcq.set_ci_db = 0;
+
+       /* set no-zero value, in order to avoid the HW to run db-recovery on
+        * CQ that used in polling mode.
+        */
+       *cq->mcq.arm_db = cpu_to_be32(2 << 28);
+
+       cq->mcq.vector = 0;
+       cq->mcq.uar = uar;
+       cq->mdev = mdev;
+
+       return cq;
+
+err_cqwq:
+       mlx5_wq_destroy(&cq->wq_ctrl);
+out:
+       kfree(cq);
+       return NULL;
+}
+
+static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
+{
+       mlx5_core_destroy_cq(mdev, &cq->mcq);
+       mlx5_wq_destroy(&cq->wq_ctrl);
+       kfree(cq);
+}
+
+static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
+{
+       u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
+       void *mkc;
+
+       mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+       MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_PA);
+       MLX5_SET(mkc, mkc, a, 1);
+       MLX5_SET(mkc, mkc, rw, 1);
+       MLX5_SET(mkc, mkc, rr, 1);
+       MLX5_SET(mkc, mkc, lw, 1);
+       MLX5_SET(mkc, mkc, lr, 1);
+
+       MLX5_SET(mkc, mkc, pd, pdn);
+       MLX5_SET(mkc, mkc, length64, 1);
+       MLX5_SET(mkc, mkc, qpn, 0xffffff);
+
+       return mlx5_core_create_mkey(mdev, mkey, in, sizeof(in));
+}
+
+static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
+                                  u32 pdn, void *buf, size_t size)
+{
+       struct mlx5dr_mr *mr = kzalloc(sizeof(*mr), GFP_KERNEL);
+       struct device *dma_device;
+       dma_addr_t dma_addr;
+       int err;
+
+       if (!mr)
+               return NULL;
+
+       dma_device = mlx5_core_dma_dev(mdev);
+       dma_addr = dma_map_single(dma_device, buf, size,
+                                 DMA_BIDIRECTIONAL);
+       err = dma_mapping_error(dma_device, dma_addr);
+       if (err) {
+               mlx5_core_warn(mdev, "Can't dma buf\n");
+               kfree(mr);
+               return NULL;
+       }
+
+       err = dr_create_mkey(mdev, pdn, &mr->mkey);
+       if (err) {
+               mlx5_core_warn(mdev, "Can't create mkey\n");
+               dma_unmap_single(dma_device, dma_addr, size,
+                                DMA_BIDIRECTIONAL);
+               kfree(mr);
+               return NULL;
+       }
+
+       mr->dma_addr = dma_addr;
+       mr->size = size;
+       mr->addr = buf;
+
+       return mr;
+}
+
+static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
+{
+       mlx5_core_destroy_mkey(mdev, mr->mkey);
+       dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
+                        DMA_BIDIRECTIONAL);
+       kfree(mr);
+}
+
+int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
+{
+       struct dr_qp_init_attr init_attr = {};
+       int cq_size;
+       int size;
+       int ret;
+
+       dmn->send_ring = kzalloc(sizeof(*dmn->send_ring), GFP_KERNEL);
+       if (!dmn->send_ring)
+               return -ENOMEM;
+
+       cq_size = QUEUE_SIZE + 1;
+       dmn->send_ring->cq = dr_create_cq(dmn->mdev, dmn->uar, cq_size);
+       if (!dmn->send_ring->cq) {
+               mlx5dr_err(dmn, "Failed creating CQ\n");
+               ret = -ENOMEM;
+               goto free_send_ring;
+       }
+
+       init_attr.cqn = dmn->send_ring->cq->mcq.cqn;
+       init_attr.pdn = dmn->pdn;
+       init_attr.uar = dmn->uar;
+       init_attr.max_send_wr = QUEUE_SIZE;
+
+       /* Isolated VL is applicable only if force loopback is supported */
+       if (dr_send_allow_fl(&dmn->info.caps))
+               init_attr.isolate_vl_tc = dmn->info.caps.isolate_vl_tc;
+
+       spin_lock_init(&dmn->send_ring->lock);
+
+       dmn->send_ring->qp = dr_create_rc_qp(dmn->mdev, &init_attr);
+       if (!dmn->send_ring->qp)  {
+               mlx5dr_err(dmn, "Failed creating QP\n");
+               ret = -ENOMEM;
+               goto clean_cq;
+       }
+
+       dmn->send_ring->cq->qp = dmn->send_ring->qp;
+
+       dmn->info.max_send_wr = QUEUE_SIZE;
+       dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
+                                       DR_STE_SIZE);
+
+       dmn->send_ring->signal_th = dmn->info.max_send_wr /
+               SIGNAL_PER_DIV_QUEUE;
+
+       /* Prepare qp to be used */
+       ret = dr_prepare_qp_to_rts(dmn);
+       if (ret)
+               goto clean_qp;
+
+       dmn->send_ring->max_post_send_size =
+               mlx5dr_icm_pool_chunk_size_to_byte(DR_CHUNK_SIZE_1K,
+                                                  DR_ICM_TYPE_STE);
+
+       /* Allocating the max size as a buffer for writing */
+       size = dmn->send_ring->signal_th * dmn->send_ring->max_post_send_size;
+       dmn->send_ring->buf = kzalloc(size, GFP_KERNEL);
+       if (!dmn->send_ring->buf) {
+               ret = -ENOMEM;
+               goto clean_qp;
+       }
+
+       dmn->send_ring->buf_size = size;
+
+       dmn->send_ring->mr = dr_reg_mr(dmn->mdev,
+                                      dmn->pdn, dmn->send_ring->buf, size);
+       if (!dmn->send_ring->mr) {
+               ret = -ENOMEM;
+               goto free_mem;
+       }
+
+       dmn->send_ring->sync_buff = kzalloc(dmn->send_ring->max_post_send_size,
+                                           GFP_KERNEL);
+       if (!dmn->send_ring->sync_buff) {
+               ret = -ENOMEM;
+               goto clean_mr;
+       }
+
+       dmn->send_ring->sync_mr = dr_reg_mr(dmn->mdev,
+                                           dmn->pdn, dmn->send_ring->sync_buff,
+                                           dmn->send_ring->max_post_send_size);
+       if (!dmn->send_ring->sync_mr) {
+               ret = -ENOMEM;
+               goto free_sync_mem;
+       }
+
+       return 0;
+
+free_sync_mem:
+       kfree(dmn->send_ring->sync_buff);
+clean_mr:
+       dr_dereg_mr(dmn->mdev, dmn->send_ring->mr);
+free_mem:
+       kfree(dmn->send_ring->buf);
+clean_qp:
+       dr_destroy_qp(dmn->mdev, dmn->send_ring->qp);
+clean_cq:
+       dr_destroy_cq(dmn->mdev, dmn->send_ring->cq);
+free_send_ring:
+       kfree(dmn->send_ring);
+
+       return ret;
+}
+
+void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
+                          struct mlx5dr_send_ring *send_ring)
+{
+       dr_destroy_qp(dmn->mdev, send_ring->qp);
+       dr_destroy_cq(dmn->mdev, send_ring->cq);
+       dr_dereg_mr(dmn->mdev, send_ring->sync_mr);
+       dr_dereg_mr(dmn->mdev, send_ring->mr);
+       kfree(send_ring->buf);
+       kfree(send_ring->sync_buff);
+       kfree(send_ring);
+}
+
+int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
+{
+       struct mlx5dr_send_ring *send_ring = dmn->send_ring;
+       struct postsend_info send_info = {};
+       u8 data[DR_STE_SIZE];
+       int num_of_sends_req;
+       int ret;
+       int i;
+
+       /* Sending this amount of requests makes sure we will get drain */
+       num_of_sends_req = send_ring->signal_th * TH_NUMS_TO_DRAIN / 2;
+
+       /* Send fake requests forcing the last to be signaled */
+       send_info.write.addr = (uintptr_t)data;
+       send_info.write.length = DR_STE_SIZE;
+       send_info.write.lkey = 0;
+       /* Using the sync_mr in order to write/read */
+       send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
+       send_info.rkey = send_ring->sync_mr->mkey;
+
+       for (i = 0; i < num_of_sends_req; i++) {
+               ret = dr_postsend_icm_data(dmn, &send_info);
+               if (ret)
+                       return ret;
+       }
+
+       spin_lock(&send_ring->lock);
+       ret = dr_handle_pending_wc(dmn, send_ring);
+       spin_unlock(&send_ring->lock);
+
+       return ret;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.c
new file mode 100644 (file)
index 0000000..e94fbb0
--- /dev/null
@@ -0,0 +1,1463 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include "dr_ste.h"
+
+struct dr_hw_ste_format {
+       u8 ctrl[DR_STE_SIZE_CTRL];
+       u8 tag[DR_STE_SIZE_TAG];
+       u8 mask[DR_STE_SIZE_MASK];
+};
+
+static u32 dr_ste_crc32_calc(const void *input_data, size_t length)
+{
+       u32 crc = crc32(0, input_data, length);
+
+       return (__force u32)((crc >> 24) & 0xff) | ((crc << 8) & 0xff0000) |
+                           ((crc >> 8) & 0xff00) | ((crc << 24) & 0xff000000);
+}
+
+bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps)
+{
+       return caps->sw_format_ver > MLX5_STEERING_FORMAT_CONNECTX_5;
+}
+
+u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl)
+{
+       u32 num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(htbl->chunk);
+       struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+       u8 masked[DR_STE_SIZE_TAG] = {};
+       u32 crc32, index;
+       u16 bit;
+       int i;
+
+       /* Don't calculate CRC if the result is predicted */
+       if (num_entries == 1 || htbl->byte_mask == 0)
+               return 0;
+
+       /* Mask tag using byte mask, bit per byte */
+       bit = 1 << (DR_STE_SIZE_TAG - 1);
+       for (i = 0; i < DR_STE_SIZE_TAG; i++) {
+               if (htbl->byte_mask & bit)
+                       masked[i] = hw_ste->tag[i];
+
+               bit = bit >> 1;
+       }
+
+       crc32 = dr_ste_crc32_calc(masked, DR_STE_SIZE_TAG);
+       index = crc32 & (num_entries - 1);
+
+       return index;
+}
+
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
+{
+       u16 byte_mask = 0;
+       int i;
+
+       for (i = 0; i < DR_STE_SIZE_MASK; i++) {
+               byte_mask = byte_mask << 1;
+               if (bit_mask[i] == 0xff)
+                       byte_mask |= 1;
+       }
+       return byte_mask;
+}
+
+static u8 *dr_ste_get_tag(u8 *hw_ste_p)
+{
+       struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+
+       return hw_ste->tag;
+}
+
+void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
+{
+       struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
+
+       memcpy(hw_ste->mask, bit_mask, DR_STE_SIZE_MASK);
+}
+
+static void dr_ste_set_always_hit(struct dr_hw_ste_format *hw_ste)
+{
+       memset(&hw_ste->tag, 0, sizeof(hw_ste->tag));
+       memset(&hw_ste->mask, 0, sizeof(hw_ste->mask));
+}
+
+static void dr_ste_set_always_miss(struct dr_hw_ste_format *hw_ste)
+{
+       hw_ste->tag[0] = 0xdc;
+       hw_ste->mask[0] = 0;
+}
+
+bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx,
+                                u8 *hw_ste_p)
+{
+       if (!ste_ctx->is_miss_addr_set)
+               return false;
+
+       /* check if miss address is already set for this type of STE */
+       return ste_ctx->is_miss_addr_set(hw_ste_p);
+}
+
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+                             u8 *hw_ste_p, u64 miss_addr)
+{
+       ste_ctx->set_miss_addr(hw_ste_p, miss_addr);
+}
+
+static void dr_ste_always_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+                                   u8 *hw_ste, u64 miss_addr)
+{
+       ste_ctx->set_next_lu_type(hw_ste, MLX5DR_STE_LU_TYPE_DONT_CARE);
+       ste_ctx->set_miss_addr(hw_ste, miss_addr);
+       dr_ste_set_always_miss((struct dr_hw_ste_format *)hw_ste);
+}
+
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+                            u8 *hw_ste, u64 icm_addr, u32 ht_size)
+{
+       ste_ctx->set_hit_addr(hw_ste, icm_addr, ht_size);
+}
+
+u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste)
+{
+       u64 base_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(ste->htbl->chunk);
+       u32 index = ste - ste->htbl->chunk->ste_arr;
+
+       return base_icm_addr + DR_STE_SIZE * index;
+}
+
+u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste)
+{
+       u32 index = ste - ste->htbl->chunk->ste_arr;
+
+       return mlx5dr_icm_pool_get_chunk_mr_addr(ste->htbl->chunk) + DR_STE_SIZE * index;
+}
+
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste)
+{
+       u64 index = ste - ste->htbl->chunk->ste_arr;
+
+       return ste->htbl->chunk->hw_ste_arr + DR_STE_SIZE_REDUCED * index;
+}
+
+struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste)
+{
+       u32 index = ste - ste->htbl->chunk->ste_arr;
+
+       return &ste->htbl->chunk->miss_list[index];
+}
+
+static void dr_ste_always_hit_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+                                  u8 *hw_ste,
+                                  struct mlx5dr_ste_htbl *next_htbl)
+{
+       struct mlx5dr_icm_chunk *chunk = next_htbl->chunk;
+
+       ste_ctx->set_byte_mask(hw_ste, next_htbl->byte_mask);
+       ste_ctx->set_next_lu_type(hw_ste, next_htbl->lu_type);
+       ste_ctx->set_hit_addr(hw_ste, mlx5dr_icm_pool_get_chunk_icm_addr(chunk),
+                             mlx5dr_icm_pool_get_chunk_num_of_entries(chunk));
+
+       dr_ste_set_always_hit((struct dr_hw_ste_format *)hw_ste);
+}
+
+bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
+                               u8 ste_location)
+{
+       return ste_location == nic_matcher->num_of_builders;
+}
+
+/* Replace relevant fields, except of:
+ * htbl - keep the origin htbl
+ * miss_list + list - already took the src from the list.
+ * icm_addr/mr_addr - depends on the hosting table.
+ *
+ * Before:
+ * | a | -> | b | -> | c | ->
+ *
+ * After:
+ * | a | -> | c | ->
+ * While the data that was in b copied to a.
+ */
+static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src)
+{
+       memcpy(mlx5dr_ste_get_hw_ste(dst), mlx5dr_ste_get_hw_ste(src),
+              DR_STE_SIZE_REDUCED);
+       dst->next_htbl = src->next_htbl;
+       if (dst->next_htbl)
+               dst->next_htbl->pointing_ste = dst;
+
+       dst->refcount = src->refcount;
+}
+
+/* Free ste which is the head and the only one in miss_list */
+static void
+dr_ste_remove_head_ste(struct mlx5dr_ste_ctx *ste_ctx,
+                      struct mlx5dr_ste *ste,
+                      struct mlx5dr_matcher_rx_tx *nic_matcher,
+                      struct mlx5dr_ste_send_info *ste_info_head,
+                      struct list_head *send_ste_list,
+                      struct mlx5dr_ste_htbl *stats_tbl)
+{
+       u8 tmp_data_ste[DR_STE_SIZE] = {};
+       u64 miss_addr;
+
+       miss_addr = mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+
+       /* Use temp ste because dr_ste_always_miss_addr
+        * touches bit_mask area which doesn't exist at ste->hw_ste.
+        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+        */
+       memcpy(tmp_data_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+       dr_ste_always_miss_addr(ste_ctx, tmp_data_ste, miss_addr);
+       memcpy(mlx5dr_ste_get_hw_ste(ste), tmp_data_ste, DR_STE_SIZE_REDUCED);
+
+       list_del_init(&ste->miss_list_node);
+
+       /* Write full STE size in order to have "always_miss" */
+       mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
+                                                 0, tmp_data_ste,
+                                                 ste_info_head,
+                                                 send_ste_list,
+                                                 true /* Copy data */);
+
+       stats_tbl->ctrl.num_of_valid_entries--;
+}
+
+/* Free ste which is the head but NOT the only one in miss_list:
+ * |_ste_| --> |_next_ste_| -->|__| -->|__| -->/0
+ */
+static void
+dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher,
+                       struct mlx5dr_ste *ste,
+                       struct mlx5dr_ste *next_ste,
+                       struct mlx5dr_ste_send_info *ste_info_head,
+                       struct list_head *send_ste_list,
+                       struct mlx5dr_ste_htbl *stats_tbl)
+
+{
+       struct mlx5dr_ste_htbl *next_miss_htbl;
+       u8 hw_ste[DR_STE_SIZE] = {};
+       int sb_idx;
+
+       next_miss_htbl = next_ste->htbl;
+
+       /* Remove from the miss_list the next_ste before copy */
+       list_del_init(&next_ste->miss_list_node);
+
+       /* Move data from next into ste */
+       dr_ste_replace(ste, next_ste);
+
+       /* Update the rule on STE change */
+       mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false);
+
+       /* Copy all 64 hw_ste bytes */
+       memcpy(hw_ste, mlx5dr_ste_get_hw_ste(ste), DR_STE_SIZE_REDUCED);
+       sb_idx = ste->ste_chain_location - 1;
+       mlx5dr_ste_set_bit_mask(hw_ste,
+                               nic_matcher->ste_builder[sb_idx].bit_mask);
+
+       /* Del the htbl that contains the next_ste.
+        * The origin htbl stay with the same number of entries.
+        */
+       mlx5dr_htbl_put(next_miss_htbl);
+
+       mlx5dr_send_fill_and_append_ste_send_info(ste, DR_STE_SIZE,
+                                                 0, hw_ste,
+                                                 ste_info_head,
+                                                 send_ste_list,
+                                                 true /* Copy data */);
+
+       stats_tbl->ctrl.num_of_collisions--;
+       stats_tbl->ctrl.num_of_valid_entries--;
+}
+
+/* Free ste that is located in the middle of the miss list:
+ * |__| -->|_prev_ste_|->|_ste_|-->|_next_ste_|
+ */
+static void dr_ste_remove_middle_ste(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste *ste,
+                                    struct mlx5dr_ste_send_info *ste_info,
+                                    struct list_head *send_ste_list,
+                                    struct mlx5dr_ste_htbl *stats_tbl)
+{
+       struct mlx5dr_ste *prev_ste;
+       u64 miss_addr;
+
+       prev_ste = list_prev_entry(ste, miss_list_node);
+       if (WARN_ON(!prev_ste))
+               return;
+
+       miss_addr = ste_ctx->get_miss_addr(mlx5dr_ste_get_hw_ste(ste));
+       ste_ctx->set_miss_addr(mlx5dr_ste_get_hw_ste(prev_ste), miss_addr);
+
+       mlx5dr_send_fill_and_append_ste_send_info(prev_ste, DR_STE_SIZE_CTRL, 0,
+                                                 mlx5dr_ste_get_hw_ste(prev_ste),
+                                                 ste_info, send_ste_list,
+                                                 true /* Copy data*/);
+
+       list_del_init(&ste->miss_list_node);
+
+       stats_tbl->ctrl.num_of_valid_entries--;
+       stats_tbl->ctrl.num_of_collisions--;
+}
+
+void mlx5dr_ste_free(struct mlx5dr_ste *ste,
+                    struct mlx5dr_matcher *matcher,
+                    struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       struct mlx5dr_ste_send_info *cur_ste_info, *tmp_ste_info;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
+       struct mlx5dr_ste_send_info ste_info_head;
+       struct mlx5dr_ste *next_ste, *first_ste;
+       bool put_on_origin_table = true;
+       struct mlx5dr_ste_htbl *stats_tbl;
+       LIST_HEAD(send_ste_list);
+
+       first_ste = list_first_entry(mlx5dr_ste_get_miss_list(ste),
+                                    struct mlx5dr_ste, miss_list_node);
+       stats_tbl = first_ste->htbl;
+
+       /* Two options:
+        * 1. ste is head:
+        *      a. head ste is the only ste in the miss list
+        *      b. head ste is not the only ste in the miss-list
+        * 2. ste is not head
+        */
+       if (first_ste == ste) { /* Ste is the head */
+               struct mlx5dr_ste *last_ste;
+
+               last_ste = list_last_entry(mlx5dr_ste_get_miss_list(ste),
+                                          struct mlx5dr_ste, miss_list_node);
+               if (last_ste == first_ste)
+                       next_ste = NULL;
+               else
+                       next_ste = list_next_entry(ste, miss_list_node);
+
+               if (!next_ste) {
+                       /* One and only entry in the list */
+                       dr_ste_remove_head_ste(ste_ctx, ste,
+                                              nic_matcher,
+                                              &ste_info_head,
+                                              &send_ste_list,
+                                              stats_tbl);
+               } else {
+                       /* First but not only entry in the list */
+                       dr_ste_replace_head_ste(nic_matcher, ste,
+                                               next_ste, &ste_info_head,
+                                               &send_ste_list, stats_tbl);
+                       put_on_origin_table = false;
+               }
+       } else { /* Ste in the middle of the list */
+               dr_ste_remove_middle_ste(ste_ctx, ste,
+                                        &ste_info_head, &send_ste_list,
+                                        stats_tbl);
+       }
+
+       /* Update HW */
+       list_for_each_entry_safe(cur_ste_info, tmp_ste_info,
+                                &send_ste_list, send_list) {
+               list_del(&cur_ste_info->send_list);
+               mlx5dr_send_postsend_ste(dmn, cur_ste_info->ste,
+                                        cur_ste_info->data, cur_ste_info->size,
+                                        cur_ste_info->offset);
+       }
+
+       if (put_on_origin_table)
+               mlx5dr_htbl_put(ste->htbl);
+}
+
+bool mlx5dr_ste_equal_tag(void *src, void *dst)
+{
+       struct dr_hw_ste_format *s_hw_ste = (struct dr_hw_ste_format *)src;
+       struct dr_hw_ste_format *d_hw_ste = (struct dr_hw_ste_format *)dst;
+
+       return !memcmp(s_hw_ste->tag, d_hw_ste->tag, DR_STE_SIZE_TAG);
+}
+
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+                                         u8 *hw_ste,
+                                         struct mlx5dr_ste_htbl *next_htbl)
+{
+       u64 icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(next_htbl->chunk);
+       u32 num_entries =
+               mlx5dr_icm_pool_get_chunk_num_of_entries(next_htbl->chunk);
+
+       ste_ctx->set_hit_addr(hw_ste, icm_addr, num_entries);
+}
+
+void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
+                                    u8 *hw_ste_p, u32 ste_size)
+{
+       if (ste_ctx->prepare_for_postsend)
+               ste_ctx->prepare_for_postsend(hw_ste_p, ste_size);
+}
+
+/* Init one ste as a pattern for ste data array */
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+                                 u16 gvmi,
+                                 enum mlx5dr_domain_nic_type nic_type,
+                                 struct mlx5dr_ste_htbl *htbl,
+                                 u8 *formatted_ste,
+                                 struct mlx5dr_htbl_connect_info *connect_info)
+{
+       bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX;
+       u8 tmp_hw_ste[DR_STE_SIZE] = {0};
+
+       ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi);
+
+       /* Use temp ste because dr_ste_always_miss_addr/hit_htbl
+        * touches bit_mask area which doesn't exist at ste->hw_ste.
+        * Need to use a full-sized (DR_STE_SIZE) hw_ste.
+        */
+       memcpy(tmp_hw_ste, formatted_ste, DR_STE_SIZE_REDUCED);
+       if (connect_info->type == CONNECT_HIT)
+               dr_ste_always_hit_htbl(ste_ctx, tmp_hw_ste,
+                                      connect_info->hit_next_htbl);
+       else
+               dr_ste_always_miss_addr(ste_ctx, tmp_hw_ste,
+                                       connect_info->miss_icm_addr);
+       memcpy(formatted_ste, tmp_hw_ste, DR_STE_SIZE_REDUCED);
+}
+
+int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
+                                     struct mlx5dr_domain_rx_tx *nic_dmn,
+                                     struct mlx5dr_ste_htbl *htbl,
+                                     struct mlx5dr_htbl_connect_info *connect_info,
+                                     bool update_hw_ste)
+{
+       u8 formatted_ste[DR_STE_SIZE] = {};
+
+       mlx5dr_ste_set_formatted_ste(dmn->ste_ctx,
+                                    dmn->info.caps.gvmi,
+                                    nic_dmn->type,
+                                    htbl,
+                                    formatted_ste,
+                                    connect_info);
+
+       return mlx5dr_send_postsend_formatted_htbl(dmn, htbl, formatted_ste, update_hw_ste);
+}
+
+int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
+                               struct mlx5dr_matcher_rx_tx *nic_matcher,
+                               struct mlx5dr_ste *ste,
+                               u8 *cur_hw_ste,
+                               enum mlx5dr_icm_chunk_size log_table_size)
+{
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
+       struct mlx5dr_htbl_connect_info info;
+       struct mlx5dr_ste_htbl *next_htbl;
+
+       if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
+               u16 next_lu_type;
+               u16 byte_mask;
+
+               next_lu_type = ste_ctx->get_next_lu_type(cur_hw_ste);
+               byte_mask = ste_ctx->get_byte_mask(cur_hw_ste);
+
+               next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+                                                 log_table_size,
+                                                 next_lu_type,
+                                                 byte_mask);
+               if (!next_htbl) {
+                       mlx5dr_dbg(dmn, "Failed allocating table\n");
+                       return -ENOMEM;
+               }
+
+               /* Write new table to HW */
+               info.type = CONNECT_MISS;
+               info.miss_icm_addr =
+                       mlx5dr_icm_pool_get_chunk_icm_addr(nic_matcher->e_anchor->chunk);
+               if (mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn, next_htbl,
+                                                     &info, false)) {
+                       mlx5dr_info(dmn, "Failed writing table to HW\n");
+                       goto free_table;
+               }
+
+               mlx5dr_ste_set_hit_addr_by_next_htbl(ste_ctx,
+                                                    cur_hw_ste, next_htbl);
+               ste->next_htbl = next_htbl;
+               next_htbl->pointing_ste = ste;
+       }
+
+       return 0;
+
+free_table:
+       mlx5dr_ste_htbl_free(next_htbl);
+       return -ENOENT;
+}
+
+struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
+                                             enum mlx5dr_icm_chunk_size chunk_size,
+                                             u16 lu_type, u16 byte_mask)
+{
+       struct mlx5dr_icm_chunk *chunk;
+       struct mlx5dr_ste_htbl *htbl;
+       u32 num_entries;
+       int i;
+
+       htbl = mlx5dr_icm_pool_alloc_htbl(pool);
+       if (!htbl)
+               return NULL;
+
+       chunk = mlx5dr_icm_alloc_chunk(pool, chunk_size);
+       if (!chunk)
+               goto out_free_htbl;
+
+       htbl->chunk = chunk;
+       htbl->lu_type = lu_type;
+       htbl->byte_mask = byte_mask;
+       htbl->refcount = 0;
+       htbl->pointing_ste = NULL;
+       htbl->ctrl.num_of_valid_entries = 0;
+       htbl->ctrl.num_of_collisions = 0;
+       num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
+
+       for (i = 0; i < num_entries; i++) {
+               struct mlx5dr_ste *ste = &chunk->ste_arr[i];
+
+               ste->htbl = htbl;
+               ste->refcount = 0;
+               INIT_LIST_HEAD(&ste->miss_list_node);
+               INIT_LIST_HEAD(&chunk->miss_list[i]);
+       }
+
+       return htbl;
+
+out_free_htbl:
+       mlx5dr_icm_pool_free_htbl(pool, htbl);
+       return NULL;
+}
+
+int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl)
+{
+       struct mlx5dr_icm_pool *pool = htbl->chunk->buddy_mem->pool;
+
+       if (htbl->refcount)
+               return -EBUSY;
+
+       mlx5dr_icm_free_chunk(htbl->chunk);
+       mlx5dr_icm_pool_free_htbl(pool, htbl);
+
+       return 0;
+}
+
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+                              struct mlx5dr_domain *dmn,
+                              u8 *action_type_set,
+                              u8 *hw_ste_arr,
+                              struct mlx5dr_ste_actions_attr *attr,
+                              u32 *added_stes)
+{
+       ste_ctx->set_actions_tx(dmn, action_type_set, ste_ctx->actions_caps,
+                               hw_ste_arr, attr, added_stes);
+}
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+                              struct mlx5dr_domain *dmn,
+                              u8 *action_type_set,
+                              u8 *hw_ste_arr,
+                              struct mlx5dr_ste_actions_attr *attr,
+                              u32 *added_stes)
+{
+       ste_ctx->set_actions_rx(dmn, action_type_set, ste_ctx->actions_caps,
+                               hw_ste_arr, attr, added_stes);
+}
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field)
+{
+       const struct mlx5dr_ste_action_modify_field *hw_field;
+
+       if (sw_field >= ste_ctx->modify_field_arr_sz)
+               return NULL;
+
+       hw_field = &ste_ctx->modify_field_arr[sw_field];
+       if (!hw_field->end && !hw_field->start)
+               return NULL;
+
+       return hw_field;
+}
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+                              __be64 *hw_action,
+                              u8 hw_field,
+                              u8 shifter,
+                              u8 length,
+                              u32 data)
+{
+       ste_ctx->set_action_set((u8 *)hw_action,
+                               hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+                              __be64 *hw_action,
+                              u8 hw_field,
+                              u8 shifter,
+                              u8 length,
+                              u32 data)
+{
+       ste_ctx->set_action_add((u8 *)hw_action,
+                               hw_field, shifter, length, data);
+}
+
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+                               __be64 *hw_action,
+                               u8 dst_hw_field,
+                               u8 dst_shifter,
+                               u8 dst_len,
+                               u8 src_hw_field,
+                               u8 src_shifter)
+{
+       ste_ctx->set_action_copy((u8 *)hw_action,
+                                dst_hw_field, dst_shifter, dst_len,
+                                src_hw_field, src_shifter);
+}
+
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+                                       void *data, u32 data_sz,
+                                       u8 *hw_action, u32 hw_action_sz,
+                                       u16 *used_hw_action_num)
+{
+       /* Only Ethernet frame is supported, with VLAN (18) or without (14) */
+       if (data_sz != HDR_LEN_L2 && data_sz != HDR_LEN_L2_W_VLAN)
+               return -EINVAL;
+
+       return ste_ctx->set_action_decap_l3_list(data, data_sz,
+                                                hw_action, hw_action_sz,
+                                                used_hw_action_num);
+}
+
+static int
+dr_ste_alloc_modify_hdr_chunk(struct mlx5dr_action *action)
+{
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+       u32 chunk_size;
+       int ret;
+
+       chunk_size = ilog2(roundup_pow_of_two(action->rewrite->num_of_actions));
+
+       /* HW modify action index granularity is at least 64B */
+       chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
+
+       action->rewrite->chunk = mlx5dr_icm_alloc_chunk(dmn->action_icm_pool,
+                                                       chunk_size);
+       if (!action->rewrite->chunk)
+               return -ENOMEM;
+
+       action->rewrite->index = (mlx5dr_icm_pool_get_chunk_icm_addr(action->rewrite->chunk) -
+                                 dmn->info.caps.hdr_modify_icm_addr) /
+                                DR_ACTION_CACHE_LINE_SIZE;
+
+       ret = mlx5dr_send_postsend_action(action->rewrite->dmn, action);
+       if (ret)
+               goto free_chunk;
+
+       return 0;
+
+free_chunk:
+       mlx5dr_icm_free_chunk(action->rewrite->chunk);
+       return -ENOMEM;
+}
+
+static void dr_ste_free_modify_hdr_chunk(struct mlx5dr_action *action)
+{
+       mlx5dr_icm_free_chunk(action->rewrite->chunk);
+}
+
+int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action)
+{
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+
+       if (mlx5dr_domain_is_support_ptrn_arg(dmn))
+               return dmn->ste_ctx->alloc_modify_hdr_chunk(action);
+
+       return dr_ste_alloc_modify_hdr_chunk(action);
+}
+
+void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action)
+{
+       struct mlx5dr_domain *dmn = action->rewrite->dmn;
+
+       if (mlx5dr_domain_is_support_ptrn_arg(dmn))
+               return dmn->ste_ctx->dealloc_modify_hdr_chunk(action);
+
+       return dr_ste_free_modify_hdr_chunk(action);
+}
+
+static int dr_ste_build_pre_check_spec(struct mlx5dr_domain *dmn,
+                                      struct mlx5dr_match_spec *spec)
+{
+       if (spec->ip_version) {
+               if (spec->ip_version != 0xf) {
+                       mlx5dr_err(dmn,
+                                  "Partial ip_version mask with src/dst IP is not supported\n");
+                       return -EINVAL;
+               }
+       } else if (spec->ethertype != 0xffff &&
+                  (DR_MASK_IS_SRC_IP_SET(spec) || DR_MASK_IS_DST_IP_SET(spec))) {
+               mlx5dr_err(dmn,
+                          "Partial/no ethertype mask with src/dst IP is not supported\n");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
+                              u8 match_criteria,
+                              struct mlx5dr_match_param *mask,
+                              struct mlx5dr_match_param *value)
+{
+       if (value)
+               return 0;
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+               if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
+                       mlx5dr_err(dmn,
+                                  "Partial mask source_port is not supported\n");
+                       return -EINVAL;
+               }
+               if (mask->misc.source_eswitch_owner_vhca_id &&
+                   mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
+                       mlx5dr_err(dmn,
+                                  "Partial mask source_eswitch_owner_vhca_id is not supported\n");
+                       return -EINVAL;
+               }
+       }
+
+       if ((match_criteria & DR_MATCHER_CRITERIA_OUTER) &&
+           dr_ste_build_pre_check_spec(dmn, &mask->outer))
+               return -EINVAL;
+
+       if ((match_criteria & DR_MATCHER_CRITERIA_INNER) &&
+           dr_ste_build_pre_check_spec(dmn, &mask->inner))
+               return -EINVAL;
+
+       return 0;
+}
+
+int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
+                            struct mlx5dr_matcher_rx_tx *nic_matcher,
+                            struct mlx5dr_match_param *value,
+                            u8 *ste_arr)
+{
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
+       bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX;
+       struct mlx5dr_domain *dmn = matcher->tbl->dmn;
+       struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx;
+       struct mlx5dr_ste_build *sb;
+       int ret, i;
+
+       ret = mlx5dr_ste_build_pre_check(dmn, matcher->match_criteria,
+                                        &matcher->mask, value);
+       if (ret)
+               return ret;
+
+       sb = nic_matcher->ste_builder;
+       for (i = 0; i < nic_matcher->num_of_builders; i++) {
+               ste_ctx->ste_init(ste_arr,
+                                 sb->lu_type,
+                                 is_rx,
+                                 dmn->info.caps.gvmi);
+
+               mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
+
+               ret = sb->ste_build_tag_func(value, sb, dr_ste_get_tag(ste_arr));
+               if (ret)
+                       return ret;
+
+               /* Connect the STEs */
+               if (i < (nic_matcher->num_of_builders - 1)) {
+                       /* Need the next builder for these fields,
+                        * not relevant for the last ste in the chain.
+                        */
+                       sb++;
+                       ste_ctx->set_next_lu_type(ste_arr, sb->lu_type);
+                       ste_ctx->set_byte_mask(ste_arr, sb->byte_mask);
+               }
+               ste_arr += DR_STE_SIZE;
+       }
+       return 0;
+}
+
+#define IFC_GET_CLR(typ, p, fld, clear) ({ \
+       void *__p = (p); \
+       u32 __t = MLX5_GET(typ, __p, fld); \
+       if (clear) \
+               MLX5_SET(typ, __p, fld, 0); \
+       __t; \
+})
+
+#define memcpy_and_clear(to, from, len, clear) ({ \
+       void *__to = (to), *__from = (from); \
+       size_t __len = (len); \
+       memcpy(__to, __from, __len); \
+       if (clear) \
+               memset(__from, 0, __len); \
+})
+
+static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
+{
+       spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
+       spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
+       spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
+       spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
+       spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
+
+       spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
+       spec->source_eswitch_owner_vhca_id =
+               IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
+
+       spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
+       spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
+       spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
+       spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
+       spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
+       spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
+
+       spec->outer_second_cvlan_tag =
+               IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
+       spec->inner_second_cvlan_tag =
+               IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
+       spec->outer_second_svlan_tag =
+               IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
+       spec->inner_second_svlan_tag =
+               IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
+       spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
+
+       spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
+       spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
+
+       spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
+
+       spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
+       spec->geneve_tlv_option_0_exist =
+               IFC_GET_CLR(fte_match_set_misc, mask, geneve_tlv_option_0_exist, clr);
+       spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
+
+       spec->outer_ipv6_flow_label =
+               IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
+
+       spec->inner_ipv6_flow_label =
+               IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
+
+       spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
+       spec->geneve_protocol_type =
+               IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
+
+       spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
+}
+
+static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
+{
+       __be32 raw_ip[4];
+
+       spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
+
+       spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
+       spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
+
+       spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
+
+       spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
+       spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
+       spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
+       spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
+
+       spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
+       spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
+       spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
+       spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
+       spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
+       spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
+       spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
+       spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
+       spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
+       spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
+
+       spec->ipv4_ihl = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ipv4_ihl, clr);
+       spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
+
+       spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
+       spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
+
+       memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+                                             src_ipv4_src_ipv6.ipv6_layout.ipv6),
+                        sizeof(raw_ip), clr);
+
+       spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
+       spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
+       spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
+       spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
+
+       memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+                                             dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+                        sizeof(raw_ip), clr);
+
+       spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
+       spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
+       spec->dst_ip_63_32 = be32_to_cpu(raw_ip[2]);
+       spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
+}
+
+static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
+{
+       spec->outer_first_mpls_label =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
+       spec->outer_first_mpls_exp =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
+       spec->outer_first_mpls_s_bos =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
+       spec->outer_first_mpls_ttl =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
+       spec->inner_first_mpls_label =
+               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
+       spec->inner_first_mpls_exp =
+               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
+       spec->inner_first_mpls_s_bos =
+               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
+       spec->inner_first_mpls_ttl =
+               IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
+       spec->outer_first_mpls_over_gre_label =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
+       spec->outer_first_mpls_over_gre_exp =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
+       spec->outer_first_mpls_over_gre_s_bos =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
+       spec->outer_first_mpls_over_gre_ttl =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
+       spec->outer_first_mpls_over_udp_label =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
+       spec->outer_first_mpls_over_udp_exp =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
+       spec->outer_first_mpls_over_udp_s_bos =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
+       spec->outer_first_mpls_over_udp_ttl =
+               IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
+       spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
+       spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
+       spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
+       spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
+       spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
+       spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
+       spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
+       spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
+       spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
+}
+
+static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
+{
+       spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
+       spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
+       spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
+       spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
+       spec->outer_vxlan_gpe_vni =
+               IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
+       spec->outer_vxlan_gpe_next_protocol =
+               IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
+       spec->outer_vxlan_gpe_flags =
+               IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
+       spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
+       spec->icmpv6_header_data =
+               IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
+       spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
+       spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
+       spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
+       spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
+       spec->geneve_tlv_option_0_data =
+               IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
+       spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
+       spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
+       spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
+       spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
+       spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
+       spec->gtpu_first_ext_dw_0 =
+               IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
+}
+
+static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
+{
+       spec->prog_sample_field_id_0 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
+       spec->prog_sample_field_value_0 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
+       spec->prog_sample_field_id_1 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
+       spec->prog_sample_field_value_1 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
+       spec->prog_sample_field_id_2 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
+       spec->prog_sample_field_value_2 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
+       spec->prog_sample_field_id_3 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
+       spec->prog_sample_field_value_3 =
+               IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
+}
+
+static void dr_ste_copy_mask_misc5(char *mask, struct mlx5dr_match_misc5 *spec, bool clr)
+{
+       spec->macsec_tag_0 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_0, clr);
+       spec->macsec_tag_1 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_1, clr);
+       spec->macsec_tag_2 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_2, clr);
+       spec->macsec_tag_3 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, macsec_tag_3, clr);
+       spec->tunnel_header_0 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_0, clr);
+       spec->tunnel_header_1 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_1, clr);
+       spec->tunnel_header_2 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_2, clr);
+       spec->tunnel_header_3 =
+               IFC_GET_CLR(fte_match_set_misc5, mask, tunnel_header_3, clr);
+}
+
+void mlx5dr_ste_copy_param(u8 match_criteria,
+                          struct mlx5dr_match_param *set_param,
+                          struct mlx5dr_match_parameters *mask,
+                          bool clr)
+{
+       u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
+       u8 *data = (u8 *)mask->match_buf;
+       size_t param_location;
+       void *buff;
+
+       if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
+               if (mask->match_sz < sizeof(struct mlx5dr_match_spec)) {
+                       memcpy(tail_param, data, mask->match_sz);
+                       buff = tail_param;
+               } else {
+                       buff = mask->match_buf;
+               }
+               dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
+       }
+       param_location = sizeof(struct mlx5dr_match_spec);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC) {
+               if (mask->match_sz < param_location +
+                   sizeof(struct mlx5dr_match_misc)) {
+                       memcpy(tail_param, data + param_location,
+                              mask->match_sz - param_location);
+                       buff = tail_param;
+               } else {
+                       buff = data + param_location;
+               }
+               dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
+       }
+       param_location += sizeof(struct mlx5dr_match_misc);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_INNER) {
+               if (mask->match_sz < param_location +
+                   sizeof(struct mlx5dr_match_spec)) {
+                       memcpy(tail_param, data + param_location,
+                              mask->match_sz - param_location);
+                       buff = tail_param;
+               } else {
+                       buff = data + param_location;
+               }
+               dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
+       }
+       param_location += sizeof(struct mlx5dr_match_spec);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC2) {
+               if (mask->match_sz < param_location +
+                   sizeof(struct mlx5dr_match_misc2)) {
+                       memcpy(tail_param, data + param_location,
+                              mask->match_sz - param_location);
+                       buff = tail_param;
+               } else {
+                       buff = data + param_location;
+               }
+               dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
+       }
+
+       param_location += sizeof(struct mlx5dr_match_misc2);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC3) {
+               if (mask->match_sz < param_location +
+                   sizeof(struct mlx5dr_match_misc3)) {
+                       memcpy(tail_param, data + param_location,
+                              mask->match_sz - param_location);
+                       buff = tail_param;
+               } else {
+                       buff = data + param_location;
+               }
+               dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
+       }
+
+       param_location += sizeof(struct mlx5dr_match_misc3);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC4) {
+               if (mask->match_sz < param_location +
+                   sizeof(struct mlx5dr_match_misc4)) {
+                       memcpy(tail_param, data + param_location,
+                              mask->match_sz - param_location);
+                       buff = tail_param;
+               } else {
+                       buff = data + param_location;
+               }
+               dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
+       }
+
+       param_location += sizeof(struct mlx5dr_match_misc4);
+
+       if (match_criteria & DR_MATCHER_CRITERIA_MISC5) {
+               if (mask->match_sz < param_location +
+                   sizeof(struct mlx5dr_match_misc5)) {
+                       memcpy(tail_param, data + param_location,
+                              mask->match_sz - param_location);
+                       buff = tail_param;
+               } else {
+                       buff = data + param_location;
+               }
+               dr_ste_copy_mask_misc5(buff, &set_param->misc5, clr);
+       }
+}
+
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask,
+                                    bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l2_src_dst_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+                                     struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask,
+                                     bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l3_ipv6_dst_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+                                     struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask,
+                                     bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l3_ipv6_src_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+                                         struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask,
+                                         bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l3_ipv4_5_tuple_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l2_src_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l2_dst_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask, bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l2_tnl_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+                                      struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask,
+                                      bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l3_ipv4_misc_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask,
+                                    bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_ipv6_l3_l4_init(sb, mask);
+}
+
+static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
+                                            struct mlx5dr_ste_build *sb,
+                                            u8 *tag)
+{
+       return 0;
+}
+
+void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx)
+{
+       sb->rx = rx;
+       sb->lu_type = MLX5DR_STE_LU_TYPE_DONT_CARE;
+       sb->byte_mask = 0;
+       sb->ste_build_tag_func = &dr_ste_build_empty_always_hit_tag;
+}
+
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+                          struct mlx5dr_ste_build *sb,
+                          struct mlx5dr_match_param *mask,
+                          bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_mpls_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+                             struct mlx5dr_ste_build *sb,
+                             struct mlx5dr_match_param *mask,
+                             bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_tnl_gre_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
+                                       struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask,
+                                       struct mlx5dr_cmd_caps *caps,
+                                       bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       sb->caps = caps;
+       return ste_ctx->build_tnl_mpls_over_gre_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
+                                       struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask,
+                                       struct mlx5dr_cmd_caps *caps,
+                                       bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       sb->caps = caps;
+       return ste_ctx->build_tnl_mpls_over_udp_init(sb, mask);
+}
+
+void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+                          struct mlx5dr_ste_build *sb,
+                          struct mlx5dr_match_param *mask,
+                          struct mlx5dr_cmd_caps *caps,
+                          bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       sb->caps = caps;
+       ste_ctx->build_icmp_init(sb, mask);
+}
+
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+                                     struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask,
+                                     bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_general_purpose_init(sb, mask);
+}
+
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+                                 struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask,
+                                 bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_eth_l4_misc_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+                                   struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask,
+                                   bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_tnl_vxlan_gpe_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_tnl_geneve_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
+                                        struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask,
+                                        struct mlx5dr_cmd_caps *caps,
+                                        bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->caps = caps;
+       sb->inner = inner;
+       ste_ctx->build_tnl_geneve_tlv_opt_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
+                                              struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask,
+                                              struct mlx5dr_cmd_caps *caps,
+                                              bool inner, bool rx)
+{
+       if (!ste_ctx->build_tnl_geneve_tlv_opt_exist_init)
+               return;
+
+       sb->rx = rx;
+       sb->caps = caps;
+       sb->inner = inner;
+       ste_ctx->build_tnl_geneve_tlv_opt_exist_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
+                              struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask,
+                              bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_tnl_gtpu_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+                                            struct mlx5dr_ste_build *sb,
+                                            struct mlx5dr_match_param *mask,
+                                            struct mlx5dr_cmd_caps *caps,
+                                            bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->caps = caps;
+       sb->inner = inner;
+       ste_ctx->build_tnl_gtpu_flex_parser_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                            struct mlx5dr_ste_build *sb,
+                                            struct mlx5dr_match_param *mask,
+                                            struct mlx5dr_cmd_caps *caps,
+                                            bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->caps = caps;
+       sb->inner = inner;
+       ste_ctx->build_tnl_gtpu_flex_parser_1_init(sb, mask);
+}
+
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_register_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_register_1_init(sb, mask);
+}
+
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+                                  struct mlx5dr_ste_build *sb,
+                                  struct mlx5dr_match_param *mask,
+                                  struct mlx5dr_domain *dmn,
+                                  bool inner, bool rx)
+{
+       /* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
+       sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
+
+       sb->rx = rx;
+       sb->dmn = dmn;
+       sb->inner = inner;
+       ste_ctx->build_src_gvmi_qpn_init(sb, mask);
+}
+
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+                                   struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask,
+                                   bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_flex_parser_0_init(sb, mask);
+}
+
+void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                   struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask,
+                                   bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_flex_parser_1_init(sb, mask);
+}
+
+void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask,
+                                    bool inner, bool rx)
+{
+       sb->rx = rx;
+       sb->inner = inner;
+       ste_ctx->build_tnl_header_0_1_init(sb, mask);
+}
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version)
+{
+       if (version == MLX5_STEERING_FORMAT_CONNECTX_5)
+               return mlx5dr_ste_get_ctx_v0();
+       else if (version == MLX5_STEERING_FORMAT_CONNECTX_6DX)
+               return mlx5dr_ste_get_ctx_v1();
+       else if (version == MLX5_STEERING_FORMAT_CONNECTX_7)
+               return mlx5dr_ste_get_ctx_v2();
+
+       return NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste.h
new file mode 100644 (file)
index 0000000..54a6619
--- /dev/null
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef        _DR_STE_
+#define        _DR_STE_
+
+#include "dr_types.h"
+
+#define STE_IPV4 0x1
+#define STE_IPV6 0x2
+#define STE_TCP 0x1
+#define STE_UDP 0x2
+#define STE_SPI 0x3
+#define IP_VERSION_IPV4 0x4
+#define IP_VERSION_IPV6 0x6
+#define STE_SVLAN 0x1
+#define STE_CVLAN 0x2
+#define HDR_LEN_L2_MACS   0xC
+#define HDR_LEN_L2_VLAN   0x4
+#define HDR_LEN_L2_ETHER  0x2
+#define HDR_LEN_L2        (HDR_LEN_L2_MACS + HDR_LEN_L2_ETHER)
+#define HDR_LEN_L2_W_VLAN (HDR_LEN_L2 + HDR_LEN_L2_VLAN)
+
+/* Set to STE a specific value using DR_STE_SET */
+#define DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, value) do { \
+       if ((spec)->s_fname) { \
+               MLX5_SET(ste_##lookup_type, tag, t_fname, value); \
+               (spec)->s_fname = 0; \
+       } \
+} while (0)
+
+/* Set to STE spec->s_fname to tag->t_fname set spec->s_fname as used */
+#define DR_STE_SET_TAG(lookup_type, tag, t_fname, spec, s_fname) \
+       DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, spec->s_fname)
+
+/* Set to STE -1 to tag->t_fname and set spec->s_fname as used */
+#define DR_STE_SET_ONES(lookup_type, tag, t_fname, spec, s_fname) \
+       DR_STE_SET_VAL(lookup_type, tag, t_fname, spec, s_fname, -1)
+
+#define DR_STE_SET_TCP_FLAGS(lookup_type, tag, spec) do { \
+       MLX5_SET(ste_##lookup_type, tag, tcp_ns, !!((spec)->tcp_flags & (1 << 8))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_cwr, !!((spec)->tcp_flags & (1 << 7))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_ece, !!((spec)->tcp_flags & (1 << 6))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_urg, !!((spec)->tcp_flags & (1 << 5))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_ack, !!((spec)->tcp_flags & (1 << 4))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_psh, !!((spec)->tcp_flags & (1 << 3))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_rst, !!((spec)->tcp_flags & (1 << 2))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_syn, !!((spec)->tcp_flags & (1 << 1))); \
+       MLX5_SET(ste_##lookup_type, tag, tcp_fin, !!((spec)->tcp_flags & (1 << 0))); \
+} while (0)
+
+#define DR_STE_SET_MPLS(lookup_type, mask, in_out, tag) do { \
+       struct mlx5dr_match_misc2 *_mask = mask; \
+       u8 *_tag = tag; \
+       DR_STE_SET_TAG(lookup_type, _tag, mpls0_label, _mask, \
+                      in_out##_first_mpls_label);\
+       DR_STE_SET_TAG(lookup_type, _tag, mpls0_s_bos, _mask, \
+                      in_out##_first_mpls_s_bos); \
+       DR_STE_SET_TAG(lookup_type, _tag, mpls0_exp, _mask, \
+                      in_out##_first_mpls_exp); \
+       DR_STE_SET_TAG(lookup_type, _tag, mpls0_ttl, _mask, \
+                      in_out##_first_mpls_ttl); \
+} while (0)
+
+#define DR_STE_SET_FLEX_PARSER_FIELD(tag, fname, caps, spec) do { \
+       u8 parser_id = (caps)->flex_parser_id_##fname; \
+       u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id); \
+       *(__be32 *)parser_ptr = cpu_to_be32((spec)->fname);\
+       (spec)->fname = 0;\
+} while (0)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(_misc) (\
+       (_misc)->outer_first_mpls_over_gre_label || \
+       (_misc)->outer_first_mpls_over_gre_exp || \
+       (_misc)->outer_first_mpls_over_gre_s_bos || \
+       (_misc)->outer_first_mpls_over_gre_ttl)
+
+#define DR_STE_IS_OUTER_MPLS_OVER_UDP_SET(_misc) (\
+       (_misc)->outer_first_mpls_over_udp_label || \
+       (_misc)->outer_first_mpls_over_udp_exp || \
+       (_misc)->outer_first_mpls_over_udp_s_bos || \
+       (_misc)->outer_first_mpls_over_udp_ttl)
+
+enum dr_ste_action_modify_type_l3 {
+       DR_STE_ACTION_MDFY_TYPE_L3_NONE = 0x0,
+       DR_STE_ACTION_MDFY_TYPE_L3_IPV4 = 0x1,
+       DR_STE_ACTION_MDFY_TYPE_L3_IPV6 = 0x2,
+};
+
+enum dr_ste_action_modify_type_l4 {
+       DR_STE_ACTION_MDFY_TYPE_L4_NONE = 0x0,
+       DR_STE_ACTION_MDFY_TYPE_L4_TCP  = 0x1,
+       DR_STE_ACTION_MDFY_TYPE_L4_UDP  = 0x2,
+};
+
+enum {
+       HDR_MPLS_OFFSET_LABEL   = 12,
+       HDR_MPLS_OFFSET_EXP     = 9,
+       HDR_MPLS_OFFSET_S_BOS   = 8,
+       HDR_MPLS_OFFSET_TTL     = 0,
+};
+
+u16 mlx5dr_ste_conv_bit_to_byte_mask(u8 *bit_mask);
+
+static inline u8 *
+dr_ste_calc_flex_parser_offset(u8 *tag, u8 parser_id)
+{
+       /* Calculate tag byte offset based on flex parser id */
+       return tag + 4 * (3 - (parser_id % 4));
+}
+
+#define DR_STE_CTX_BUILDER(fname) \
+       ((*build_##fname##_init)(struct mlx5dr_ste_build *sb, \
+                                struct mlx5dr_match_param *mask))
+
+struct mlx5dr_ste_ctx {
+       /* Builders */
+       void DR_STE_CTX_BUILDER(eth_l2_src_dst);
+       void DR_STE_CTX_BUILDER(eth_l3_ipv6_src);
+       void DR_STE_CTX_BUILDER(eth_l3_ipv6_dst);
+       void DR_STE_CTX_BUILDER(eth_l3_ipv4_5_tuple);
+       void DR_STE_CTX_BUILDER(eth_l2_src);
+       void DR_STE_CTX_BUILDER(eth_l2_dst);
+       void DR_STE_CTX_BUILDER(eth_l2_tnl);
+       void DR_STE_CTX_BUILDER(eth_l3_ipv4_misc);
+       void DR_STE_CTX_BUILDER(eth_ipv6_l3_l4);
+       void DR_STE_CTX_BUILDER(mpls);
+       void DR_STE_CTX_BUILDER(tnl_gre);
+       void DR_STE_CTX_BUILDER(tnl_mpls);
+       void DR_STE_CTX_BUILDER(tnl_mpls_over_gre);
+       void DR_STE_CTX_BUILDER(tnl_mpls_over_udp);
+       void DR_STE_CTX_BUILDER(icmp);
+       void DR_STE_CTX_BUILDER(general_purpose);
+       void DR_STE_CTX_BUILDER(eth_l4_misc);
+       void DR_STE_CTX_BUILDER(tnl_vxlan_gpe);
+       void DR_STE_CTX_BUILDER(tnl_geneve);
+       void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt);
+       void DR_STE_CTX_BUILDER(tnl_geneve_tlv_opt_exist);
+       void DR_STE_CTX_BUILDER(register_0);
+       void DR_STE_CTX_BUILDER(register_1);
+       void DR_STE_CTX_BUILDER(src_gvmi_qpn);
+       void DR_STE_CTX_BUILDER(flex_parser_0);
+       void DR_STE_CTX_BUILDER(flex_parser_1);
+       void DR_STE_CTX_BUILDER(tnl_gtpu);
+       void DR_STE_CTX_BUILDER(tnl_header_0_1);
+       void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_0);
+       void DR_STE_CTX_BUILDER(tnl_gtpu_flex_parser_1);
+
+       /* Getters and Setters */
+       void (*ste_init)(u8 *hw_ste_p, u16 lu_type,
+                        bool is_rx, u16 gvmi);
+       void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type);
+       u16  (*get_next_lu_type)(u8 *hw_ste_p);
+       bool (*is_miss_addr_set)(u8 *hw_ste_p);
+       void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr);
+       u64  (*get_miss_addr)(u8 *hw_ste_p);
+       void (*set_hit_addr)(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+       void (*set_byte_mask)(u8 *hw_ste_p, u16 byte_mask);
+       u16  (*get_byte_mask)(u8 *hw_ste_p);
+
+       /* Actions */
+       u32 actions_caps;
+       void (*set_actions_rx)(struct mlx5dr_domain *dmn,
+                              u8 *action_type_set,
+                              u32 actions_caps,
+                              u8 *hw_ste_arr,
+                              struct mlx5dr_ste_actions_attr *attr,
+                              u32 *added_stes);
+       void (*set_actions_tx)(struct mlx5dr_domain *dmn,
+                              u8 *action_type_set,
+                              u32 actions_caps,
+                              u8 *hw_ste_arr,
+                              struct mlx5dr_ste_actions_attr *attr,
+                              u32 *added_stes);
+       u32 modify_field_arr_sz;
+       const struct mlx5dr_ste_action_modify_field *modify_field_arr;
+       void (*set_action_set)(u8 *hw_action,
+                              u8 hw_field,
+                              u8 shifter,
+                              u8 length,
+                              u32 data);
+       void (*set_action_add)(u8 *hw_action,
+                              u8 hw_field,
+                              u8 shifter,
+                              u8 length,
+                              u32 data);
+       void (*set_action_copy)(u8 *hw_action,
+                               u8 dst_hw_field,
+                               u8 dst_shifter,
+                               u8 dst_len,
+                               u8 src_hw_field,
+                               u8 src_shifter);
+       int (*set_action_decap_l3_list)(void *data,
+                                       u32 data_sz,
+                                       u8 *hw_action,
+                                       u32 hw_action_sz,
+                                       u16 *used_hw_action_num);
+       int (*alloc_modify_hdr_chunk)(struct mlx5dr_action *action);
+       void (*dealloc_modify_hdr_chunk)(struct mlx5dr_action *action);
+
+       /* Send */
+       void (*prepare_for_postsend)(u8 *hw_ste_p, u32 ste_size);
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void);
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void);
+
+#endif  /* _DR_STE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v0.c
new file mode 100644 (file)
index 0000000..e9f6c7e
--- /dev/null
@@ -0,0 +1,1962 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include <linux/crc32.h>
+#include "dr_ste.h"
+
+#define SVLAN_ETHERTYPE                0x88a8
+#define DR_STE_ENABLE_FLOW_TAG BIT(31)
+
+enum dr_ste_v0_entry_type {
+       DR_STE_TYPE_TX          = 1,
+       DR_STE_TYPE_RX          = 2,
+       DR_STE_TYPE_MODIFY_PKT  = 6,
+};
+
+enum dr_ste_v0_action_tunl {
+       DR_STE_TUNL_ACTION_NONE         = 0,
+       DR_STE_TUNL_ACTION_ENABLE       = 1,
+       DR_STE_TUNL_ACTION_DECAP        = 2,
+       DR_STE_TUNL_ACTION_L3_DECAP     = 3,
+       DR_STE_TUNL_ACTION_POP_VLAN     = 4,
+};
+
+enum dr_ste_v0_action_type {
+       DR_STE_ACTION_TYPE_PUSH_VLAN    = 1,
+       DR_STE_ACTION_TYPE_ENCAP_L3     = 3,
+       DR_STE_ACTION_TYPE_ENCAP        = 4,
+};
+
+enum dr_ste_v0_action_mdfy_op {
+       DR_STE_ACTION_MDFY_OP_COPY      = 0x1,
+       DR_STE_ACTION_MDFY_OP_SET       = 0x2,
+       DR_STE_ACTION_MDFY_OP_ADD       = 0x3,
+};
+
+#define DR_STE_CALC_LU_TYPE(lookup_type, rx, inner) \
+       ((inner) ? DR_STE_V0_LU_TYPE_##lookup_type##_I : \
+                  (rx) ? DR_STE_V0_LU_TYPE_##lookup_type##_D : \
+                         DR_STE_V0_LU_TYPE_##lookup_type##_O)
+
+enum {
+       DR_STE_V0_LU_TYPE_NOP                           = 0x00,
+       DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP               = 0x05,
+       DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I             = 0x0a,
+       DR_STE_V0_LU_TYPE_ETHL2_DST_O                   = 0x06,
+       DR_STE_V0_LU_TYPE_ETHL2_DST_I                   = 0x07,
+       DR_STE_V0_LU_TYPE_ETHL2_DST_D                   = 0x1b,
+       DR_STE_V0_LU_TYPE_ETHL2_SRC_O                   = 0x08,
+       DR_STE_V0_LU_TYPE_ETHL2_SRC_I                   = 0x09,
+       DR_STE_V0_LU_TYPE_ETHL2_SRC_D                   = 0x1c,
+       DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_O               = 0x36,
+       DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_I               = 0x37,
+       DR_STE_V0_LU_TYPE_ETHL2_SRC_DST_D               = 0x38,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_O              = 0x0d,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_I              = 0x0e,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV6_DST_D              = 0x1e,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_O              = 0x0f,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_I              = 0x10,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV6_SRC_D              = 0x1f,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_O          = 0x11,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_I          = 0x12,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV4_5_TUPLE_D          = 0x20,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_O             = 0x29,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_I             = 0x2a,
+       DR_STE_V0_LU_TYPE_ETHL3_IPV4_MISC_D             = 0x2b,
+       DR_STE_V0_LU_TYPE_ETHL4_O                       = 0x13,
+       DR_STE_V0_LU_TYPE_ETHL4_I                       = 0x14,
+       DR_STE_V0_LU_TYPE_ETHL4_D                       = 0x21,
+       DR_STE_V0_LU_TYPE_ETHL4_MISC_O                  = 0x2c,
+       DR_STE_V0_LU_TYPE_ETHL4_MISC_I                  = 0x2d,
+       DR_STE_V0_LU_TYPE_ETHL4_MISC_D                  = 0x2e,
+       DR_STE_V0_LU_TYPE_MPLS_FIRST_O                  = 0x15,
+       DR_STE_V0_LU_TYPE_MPLS_FIRST_I                  = 0x24,
+       DR_STE_V0_LU_TYPE_MPLS_FIRST_D                  = 0x25,
+       DR_STE_V0_LU_TYPE_GRE                           = 0x16,
+       DR_STE_V0_LU_TYPE_FLEX_PARSER_0                 = 0x22,
+       DR_STE_V0_LU_TYPE_FLEX_PARSER_1                 = 0x23,
+       DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER        = 0x19,
+       DR_STE_V0_LU_TYPE_GENERAL_PURPOSE               = 0x18,
+       DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0          = 0x2f,
+       DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1          = 0x30,
+       DR_STE_V0_LU_TYPE_TUNNEL_HEADER                 = 0x34,
+       DR_STE_V0_LU_TYPE_DONT_CARE                     = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum {
+       DR_STE_V0_ACTION_MDFY_FLD_L2_0          = 0,
+       DR_STE_V0_ACTION_MDFY_FLD_L2_1          = 1,
+       DR_STE_V0_ACTION_MDFY_FLD_L2_2          = 2,
+       DR_STE_V0_ACTION_MDFY_FLD_L3_0          = 3,
+       DR_STE_V0_ACTION_MDFY_FLD_L3_1          = 4,
+       DR_STE_V0_ACTION_MDFY_FLD_L3_2          = 5,
+       DR_STE_V0_ACTION_MDFY_FLD_L3_3          = 6,
+       DR_STE_V0_ACTION_MDFY_FLD_L3_4          = 7,
+       DR_STE_V0_ACTION_MDFY_FLD_L4_0          = 8,
+       DR_STE_V0_ACTION_MDFY_FLD_L4_1          = 9,
+       DR_STE_V0_ACTION_MDFY_FLD_MPLS          = 10,
+       DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_0      = 11,
+       DR_STE_V0_ACTION_MDFY_FLD_REG_0         = 12,
+       DR_STE_V0_ACTION_MDFY_FLD_REG_1         = 13,
+       DR_STE_V0_ACTION_MDFY_FLD_REG_2         = 14,
+       DR_STE_V0_ACTION_MDFY_FLD_REG_3         = 15,
+       DR_STE_V0_ACTION_MDFY_FLD_L4_2          = 16,
+       DR_STE_V0_ACTION_MDFY_FLD_FLEX_0        = 17,
+       DR_STE_V0_ACTION_MDFY_FLD_FLEX_1        = 18,
+       DR_STE_V0_ACTION_MDFY_FLD_FLEX_2        = 19,
+       DR_STE_V0_ACTION_MDFY_FLD_FLEX_3        = 20,
+       DR_STE_V0_ACTION_MDFY_FLD_L2_TNL_1      = 21,
+       DR_STE_V0_ACTION_MDFY_FLD_METADATA      = 22,
+       DR_STE_V0_ACTION_MDFY_FLD_RESERVED      = 23,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v0_action_modify_field_arr[] = {
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 16, .end = 47,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_1, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 32, .end = 47,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 16, .end = 47,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_0, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 0, .end = 5,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 48, .end = 56,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_1, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 32, .end = 63,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 32, .end = 63,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_4, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 32, .end = 63,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L3_0, .start = 32, .end = 63,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_METADATA, .start = 32, .end = 63,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 32, .end = 63,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 32, .end = 63,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 32, .end = 63,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_REG_2, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 32, .end = 63,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L4_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+               .hw_field = DR_STE_V0_ACTION_MDFY_FLD_L2_2, .start = 0, .end = 15,
+       },
+};
+
+static void dr_ste_v0_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+       MLX5_SET(ste_general, hw_ste_p, entry_type, entry_type);
+}
+
+static u8 dr_ste_v0_get_entry_type(u8 *hw_ste_p)
+{
+       return MLX5_GET(ste_general, hw_ste_p, entry_type);
+}
+
+static void dr_ste_v0_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+       u64 index = miss_addr >> 6;
+
+       /* Miss address for TX and RX STEs located in the same offsets */
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32, index >> 26);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6, index);
+}
+
+static u64 dr_ste_v0_get_miss_addr(u8 *hw_ste_p)
+{
+       u64 index =
+               ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_31_6) |
+                ((u64)MLX5_GET(ste_rx_steering_mult, hw_ste_p, miss_address_39_32)) << 26);
+
+       return index << 6;
+}
+
+static void dr_ste_v0_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+       MLX5_SET(ste_general, hw_ste_p, byte_mask, byte_mask);
+}
+
+static u16 dr_ste_v0_get_byte_mask(u8 *hw_ste_p)
+{
+       return MLX5_GET(ste_general, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v0_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+       MLX5_SET(ste_general, hw_ste_p, entry_sub_type, lu_type);
+}
+
+static void dr_ste_v0_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+       MLX5_SET(ste_general, hw_ste_p, next_lu_type, lu_type);
+}
+
+static u16 dr_ste_v0_get_next_lu_type(u8 *hw_ste_p)
+{
+       return MLX5_GET(ste_general, hw_ste_p, next_lu_type);
+}
+
+static void dr_ste_v0_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+       MLX5_SET(ste_general, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+       u64 index = (icm_addr >> 5) | ht_size;
+
+       MLX5_SET(ste_general, hw_ste_p, next_table_base_39_32_size, index >> 27);
+       MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type,
+                               enum dr_ste_v0_entry_type entry_type, u16 gvmi)
+{
+       dr_ste_v0_set_entry_type(hw_ste_p, entry_type);
+       dr_ste_v0_set_lu_type(hw_ste_p, lu_type);
+       dr_ste_v0_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+       /* Set GVMI once, this is the same for RX/TX
+        * bits 63_48 of next table base / miss address encode the next GVMI
+        */
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, gvmi, gvmi);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, next_table_base_63_48, gvmi);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type,
+                          bool is_rx, u16 gvmi)
+{
+       enum dr_ste_v0_entry_type entry_type;
+
+       entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX;
+       dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi);
+}
+
+static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag)
+{
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer,
+                DR_STE_ENABLE_FLOW_TAG | flow_tag);
+}
+
+static void dr_ste_v0_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+       /* This can be used for both rx_steering_mult and for sx_transmit */
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_15_0, ctr_id);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, counter_trigger_23_16, ctr_id >> 16);
+}
+
+static void dr_ste_v0_set_go_back_bit(u8 *hw_ste_p)
+{
+       MLX5_SET(ste_sx_transmit, hw_ste_p, go_back, 1);
+}
+
+static void dr_ste_v0_set_tx_push_vlan(u8 *hw_ste_p, u32 vlan_hdr,
+                                      bool go_back)
+{
+       MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+                DR_STE_ACTION_TYPE_PUSH_VLAN);
+       MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, vlan_hdr);
+       /* Due to HW limitation we need to set this bit, otherwise reformat +
+        * push vlan will not work.
+        */
+       if (go_back)
+               dr_ste_v0_set_go_back_bit(hw_ste_p);
+}
+
+static void dr_ste_v0_set_tx_encap(void *hw_ste_p, u32 reformat_id,
+                                  int size, bool encap_l3)
+{
+       MLX5_SET(ste_sx_transmit, hw_ste_p, action_type,
+                encap_l3 ? DR_STE_ACTION_TYPE_ENCAP_L3 : DR_STE_ACTION_TYPE_ENCAP);
+       /* The hardware expects here size in words (2 byte) */
+       MLX5_SET(ste_sx_transmit, hw_ste_p, action_description, size / 2);
+       MLX5_SET(ste_sx_transmit, hw_ste_p, encap_pointer_vlan_data, reformat_id);
+}
+
+static void dr_ste_v0_set_rx_decap(u8 *hw_ste_p)
+{
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+                DR_STE_TUNL_ACTION_DECAP);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
+}
+
+static void dr_ste_v0_set_rx_pop_vlan(u8 *hw_ste_p)
+{
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+                DR_STE_TUNL_ACTION_POP_VLAN);
+}
+
+static void dr_ste_v0_set_rx_decap_l3(u8 *hw_ste_p, bool vlan)
+{
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, tunneling_action,
+                DR_STE_TUNL_ACTION_L3_DECAP);
+       MLX5_SET(ste_modify_packet, hw_ste_p, action_description, vlan ? 1 : 0);
+       MLX5_SET(ste_rx_steering_mult, hw_ste_p, fail_on_error, 1);
+}
+
+static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions,
+                                         u32 re_write_index)
+{
+       MLX5_SET(ste_modify_packet, hw_ste_p, number_of_re_write_actions,
+                num_of_actions);
+       MLX5_SET(ste_modify_packet, hw_ste_p, header_re_write_actions_pointer,
+                re_write_index);
+}
+
+static void dr_ste_v0_arr_init_next(u8 **last_ste,
+                                   u32 *added_stes,
+                                   enum dr_ste_v0_entry_type entry_type,
+                                   u16 gvmi)
+{
+       (*added_stes)++;
+       *last_ste += DR_STE_SIZE;
+       dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE,
+                           entry_type, gvmi);
+}
+
+static void
+dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn,
+                        u8 *action_type_set,
+                        u32 actions_caps,
+                        u8 *last_ste,
+                        struct mlx5dr_ste_actions_attr *attr,
+                        u32 *added_stes)
+{
+       bool encap = action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2] ||
+               action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3];
+
+       /* We want to make sure the modify header comes before L2
+        * encapsulation. The reason for that is that we support
+        * modify headers for outer headers only
+        */
+       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
+               dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
+               dr_ste_v0_set_rewrite_actions(last_ste,
+                                             attr->modify_actions,
+                                             attr->modify_index);
+       }
+
+       if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+               int i;
+
+               for (i = 0; i < attr->vlans.count; i++) {
+                       if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR])
+                               dr_ste_v0_arr_init_next(&last_ste,
+                                                       added_stes,
+                                                       DR_STE_TYPE_TX,
+                                                       attr->gvmi);
+
+                       dr_ste_v0_set_tx_push_vlan(last_ste,
+                                                  attr->vlans.headers[i],
+                                                  encap);
+               }
+       }
+
+       if (encap) {
+               /* Modify header and encapsulation require a different STEs.
+                * Since modify header STE format doesn't support encapsulation
+                * tunneling_action.
+                */
+               if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] ||
+                   action_type_set[DR_ACTION_TYP_PUSH_VLAN])
+                       dr_ste_v0_arr_init_next(&last_ste,
+                                               added_stes,
+                                               DR_STE_TYPE_TX,
+                                               attr->gvmi);
+
+               dr_ste_v0_set_tx_encap(last_ste,
+                                      attr->reformat.id,
+                                      attr->reformat.size,
+                                      action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]);
+               /* Whenever prio_tag_required enabled, we can be sure that the
+                * previous table (ACL) already push vlan to our packet,
+                * And due to HW limitation we need to set this bit, otherwise
+                * push vlan + reformat will not work.
+                */
+               if (MLX5_CAP_GEN(dmn->mdev, prio_tag_required))
+                       dr_ste_v0_set_go_back_bit(last_ste);
+       }
+
+       if (action_type_set[DR_ACTION_TYP_CTR])
+               dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+       dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+       dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void
+dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn,
+                        u8 *action_type_set,
+                        u32 actions_caps,
+                        u8 *last_ste,
+                        struct mlx5dr_ste_actions_attr *attr,
+                        u32 *added_stes)
+{
+       if (action_type_set[DR_ACTION_TYP_CTR])
+               dr_ste_v0_set_counter_id(last_ste, attr->ctr_id);
+
+       if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+               dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
+               dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan);
+               dr_ste_v0_set_rewrite_actions(last_ste,
+                                             attr->decap_actions,
+                                             attr->decap_index);
+       }
+
+       if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2])
+               dr_ste_v0_set_rx_decap(last_ste);
+
+       if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+               int i;
+
+               for (i = 0; i < attr->vlans.count; i++) {
+                       if (i ||
+                           action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2] ||
+                           action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2])
+                               dr_ste_v0_arr_init_next(&last_ste,
+                                                       added_stes,
+                                                       DR_STE_TYPE_RX,
+                                                       attr->gvmi);
+
+                       dr_ste_v0_set_rx_pop_vlan(last_ste);
+               }
+       }
+
+       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR] && attr->modify_actions) {
+               if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
+                       dr_ste_v0_arr_init_next(&last_ste,
+                                               added_stes,
+                                               DR_STE_TYPE_MODIFY_PKT,
+                                               attr->gvmi);
+               else
+                       dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT);
+
+               dr_ste_v0_set_rewrite_actions(last_ste,
+                                             attr->modify_actions,
+                                             attr->modify_index);
+       }
+
+       if (action_type_set[DR_ACTION_TYP_TAG]) {
+               if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT)
+                       dr_ste_v0_arr_init_next(&last_ste,
+                                               added_stes,
+                                               DR_STE_TYPE_RX,
+                                               attr->gvmi);
+
+               dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag);
+       }
+
+       dr_ste_v0_set_hit_gvmi(last_ste, attr->hit_gvmi);
+       dr_ste_v0_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+static void dr_ste_v0_set_action_set(u8 *hw_action,
+                                    u8 hw_field,
+                                    u8 shifter,
+                                    u8 length,
+                                    u32 data)
+{
+       length = (length == 32) ? 0 : length;
+       MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_SET);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+       MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_add(u8 *hw_action,
+                                    u8 hw_field,
+                                    u8 shifter,
+                                    u8 length,
+                                    u32 data)
+{
+       length = (length == 32) ? 0 : length;
+       MLX5_SET(dr_action_hw_set, hw_action, opcode, DR_STE_ACTION_MDFY_OP_ADD);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_field_code, hw_field);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, shifter);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_length, length);
+       MLX5_SET(dr_action_hw_set, hw_action, inline_data, data);
+}
+
+static void dr_ste_v0_set_action_copy(u8 *hw_action,
+                                     u8 dst_hw_field,
+                                     u8 dst_shifter,
+                                     u8 dst_len,
+                                     u8 src_hw_field,
+                                     u8 src_shifter)
+{
+       MLX5_SET(dr_action_hw_copy, hw_action, opcode, DR_STE_ACTION_MDFY_OP_COPY);
+       MLX5_SET(dr_action_hw_copy, hw_action, destination_field_code, dst_hw_field);
+       MLX5_SET(dr_action_hw_copy, hw_action, destination_left_shifter, dst_shifter);
+       MLX5_SET(dr_action_hw_copy, hw_action, destination_length, dst_len);
+       MLX5_SET(dr_action_hw_copy, hw_action, source_field_code, src_hw_field);
+       MLX5_SET(dr_action_hw_copy, hw_action, source_left_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_MIN_ACTION_NUM 5
+
+static int
+dr_ste_v0_set_action_decap_l3_list(void *data, u32 data_sz,
+                                  u8 *hw_action, u32 hw_action_sz,
+                                  u16 *used_hw_action_num)
+{
+       struct mlx5_ifc_l2_hdr_bits *l2_hdr = data;
+       u32 hw_action_num;
+       int required_actions;
+       u32 hdr_fld_4b;
+       u16 hdr_fld_2b;
+       u16 vlan_type;
+       bool vlan;
+
+       vlan = (data_sz != HDR_LEN_L2);
+       hw_action_num = hw_action_sz / MLX5_ST_SZ_BYTES(dr_action_hw_set);
+       required_actions = DR_STE_DECAP_L3_MIN_ACTION_NUM + !!vlan;
+
+       if (hw_action_num < required_actions)
+               return -ENOMEM;
+
+       /* dmac_47_16 */
+       MLX5_SET(dr_action_hw_set, hw_action,
+                opcode, DR_STE_ACTION_MDFY_OP_SET);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_length, 0);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_left_shifter, 16);
+       hdr_fld_4b = MLX5_GET(l2_hdr, l2_hdr, dmac_47_16);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                inline_data, hdr_fld_4b);
+       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+       /* smac_47_16 */
+       MLX5_SET(dr_action_hw_set, hw_action,
+                opcode, DR_STE_ACTION_MDFY_OP_SET);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_length, 0);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+       MLX5_SET(dr_action_hw_set, hw_action, destination_left_shifter, 16);
+       hdr_fld_4b = (MLX5_GET(l2_hdr, l2_hdr, smac_31_0) >> 16 |
+                     MLX5_GET(l2_hdr, l2_hdr, smac_47_32) << 16);
+       MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+       /* dmac_15_0 */
+       MLX5_SET(dr_action_hw_set, hw_action,
+                opcode, DR_STE_ACTION_MDFY_OP_SET);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_length, 16);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_0);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_left_shifter, 0);
+       hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, dmac_15_0);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                inline_data, hdr_fld_2b);
+       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+       /* ethertype + (optional) vlan */
+       MLX5_SET(dr_action_hw_set, hw_action,
+                opcode, DR_STE_ACTION_MDFY_OP_SET);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_left_shifter, 32);
+       if (!vlan) {
+               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+               MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+               MLX5_SET(dr_action_hw_set, hw_action, destination_length, 16);
+       } else {
+               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, ethertype);
+               vlan_type = hdr_fld_2b == SVLAN_ETHERTYPE ? DR_STE_SVLAN : DR_STE_CVLAN;
+               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan);
+               hdr_fld_4b = (vlan_type << 16) | hdr_fld_2b;
+               MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_4b);
+               MLX5_SET(dr_action_hw_set, hw_action, destination_length, 18);
+       }
+       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+       /* smac_15_0 */
+       MLX5_SET(dr_action_hw_set, hw_action,
+                opcode, DR_STE_ACTION_MDFY_OP_SET);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_length, 16);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_1);
+       MLX5_SET(dr_action_hw_set, hw_action,
+                destination_left_shifter, 0);
+       hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, smac_31_0);
+       MLX5_SET(dr_action_hw_set, hw_action, inline_data, hdr_fld_2b);
+       hw_action += MLX5_ST_SZ_BYTES(dr_action_hw_set);
+
+       if (vlan) {
+               MLX5_SET(dr_action_hw_set, hw_action,
+                        opcode, DR_STE_ACTION_MDFY_OP_SET);
+               hdr_fld_2b = MLX5_GET(l2_hdr, l2_hdr, vlan_type);
+               MLX5_SET(dr_action_hw_set, hw_action,
+                        inline_data, hdr_fld_2b);
+               MLX5_SET(dr_action_hw_set, hw_action,
+                        destination_length, 16);
+               MLX5_SET(dr_action_hw_set, hw_action,
+                        destination_field_code, DR_STE_V0_ACTION_MDFY_FLD_L2_2);
+               MLX5_SET(dr_action_hw_set, hw_action,
+                        destination_left_shifter, 0);
+       }
+
+       *used_hw_action_num = required_actions;
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+                                       bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+       if (mask->smac_47_16 || mask->smac_15_0) {
+               MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_47_32,
+                        mask->smac_47_16 >> 16);
+               MLX5_SET(ste_eth_l2_src_dst, bit_mask, smac_31_0,
+                        mask->smac_47_16 << 16 | mask->smac_15_0);
+               mask->smac_47_16 = 0;
+               mask->smac_15_0 = 0;
+       }
+
+       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_vlan_id, mask, first_vid);
+       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_cfi, mask, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src_dst, bit_mask, first_priority, mask, first_prio);
+       DR_STE_SET_ONES(eth_l2_src_dst, bit_mask, l3_type, mask, ip_version);
+
+       if (mask->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+               mask->cvlan_tag = 0;
+       } else if (mask->svlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
+               mask->svlan_tag = 0;
+       }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+                                  struct mlx5dr_ste_build *sb,
+                                  u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+       if (spec->smac_47_16 || spec->smac_15_0) {
+               MLX5_SET(ste_eth_l2_src_dst, tag, smac_47_32,
+                        spec->smac_47_16 >> 16);
+               MLX5_SET(ste_eth_l2_src_dst, tag, smac_31_0,
+                        spec->smac_47_16 << 16 | spec->smac_15_0);
+               spec->smac_47_16 = 0;
+               spec->smac_15_0 = 0;
+       }
+
+       if (spec->ip_version) {
+               if (spec->ip_version == IP_VERSION_IPV4) {
+                       MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV4);
+                       spec->ip_version = 0;
+               } else if (spec->ip_version == IP_VERSION_IPV6) {
+                       MLX5_SET(ste_eth_l2_src_dst, tag, l3_type, STE_IPV6);
+                       spec->ip_version = 0;
+               } else {
+                       return -EINVAL;
+               }
+       }
+
+       DR_STE_SET_TAG(eth_l2_src_dst, tag, first_vlan_id, spec, first_vid);
+       DR_STE_SET_TAG(eth_l2_src_dst, tag, first_cfi, spec, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src_dst, tag, first_priority, spec, first_prio);
+
+       if (spec->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_CVLAN);
+               spec->cvlan_tag = 0;
+       } else if (spec->svlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst, tag, first_vlan_qualifier, DR_STE_SVLAN);
+               spec->svlan_tag = 0;
+       }
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+                                   struct mlx5dr_ste_build *sb,
+                                   u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_DST, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_dst_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+                                   struct mlx5dr_ste_build *sb,
+                                   u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV6_SRC, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv6_src_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+                                       struct mlx5dr_ste_build *sb,
+                                       u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, tcp_dport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_port, spec, udp_dport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, tcp_sport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_port, spec, udp_sport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, protocol, spec, ip_protocol);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, dscp, spec, ip_dscp);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, ecn, spec, ip_ecn);
+
+       if (spec->tcp_flags) {
+               DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple, tag, spec);
+               spec->tcp_flags = 0;
+       }
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_5_TUPLE, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+                                          bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, first_vlan_id, mask, first_vid);
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, first_cfi, mask, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, first_priority, mask, first_prio);
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, ip_fragmented, mask, frag);
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, l3_ethertype, mask, ethertype);
+       DR_STE_SET_ONES(eth_l2_src, bit_mask, l3_type, mask, ip_version);
+
+       if (mask->svlan_tag || mask->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src, bit_mask, first_vlan_qualifier, -1);
+               mask->cvlan_tag = 0;
+               mask->svlan_tag = 0;
+       }
+
+       if (inner) {
+               if (misc_mask->inner_second_cvlan_tag ||
+                   misc_mask->inner_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+                       misc_mask->inner_second_cvlan_tag = 0;
+                       misc_mask->inner_second_svlan_tag = 0;
+               }
+
+               DR_STE_SET_TAG(eth_l2_src, bit_mask,
+                              second_vlan_id, misc_mask, inner_second_vid);
+               DR_STE_SET_TAG(eth_l2_src, bit_mask,
+                              second_cfi, misc_mask, inner_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src, bit_mask,
+                              second_priority, misc_mask, inner_second_prio);
+       } else {
+               if (misc_mask->outer_second_cvlan_tag ||
+                   misc_mask->outer_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src, bit_mask, second_vlan_qualifier, -1);
+                       misc_mask->outer_second_cvlan_tag = 0;
+                       misc_mask->outer_second_svlan_tag = 0;
+               }
+
+               DR_STE_SET_TAG(eth_l2_src, bit_mask,
+                              second_vlan_id, misc_mask, outer_second_vid);
+               DR_STE_SET_TAG(eth_l2_src, bit_mask,
+                              second_cfi, misc_mask, outer_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src, bit_mask,
+                              second_priority, misc_mask, outer_second_prio);
+       }
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+                                     bool inner, u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
+       DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src, tag, first_priority, spec, first_prio);
+       DR_STE_SET_TAG(eth_l2_src, tag, ip_fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l2_src, tag, l3_ethertype, spec, ethertype);
+
+       if (spec->ip_version) {
+               if (spec->ip_version == IP_VERSION_IPV4) {
+                       MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV4);
+                       spec->ip_version = 0;
+               } else if (spec->ip_version == IP_VERSION_IPV6) {
+                       MLX5_SET(ste_eth_l2_src, tag, l3_type, STE_IPV6);
+                       spec->ip_version = 0;
+               } else {
+                       return -EINVAL;
+               }
+       }
+
+       if (spec->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_CVLAN);
+               spec->cvlan_tag = 0;
+       } else if (spec->svlan_tag) {
+               MLX5_SET(ste_eth_l2_src, tag, first_vlan_qualifier, DR_STE_SVLAN);
+               spec->svlan_tag = 0;
+       }
+
+       if (inner) {
+               if (misc_spec->inner_second_cvlan_tag) {
+                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+                       misc_spec->inner_second_cvlan_tag = 0;
+               } else if (misc_spec->inner_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+                       misc_spec->inner_second_svlan_tag = 0;
+               }
+
+               DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, inner_second_vid);
+               DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, inner_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, inner_second_prio);
+       } else {
+               if (misc_spec->outer_second_cvlan_tag) {
+                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_CVLAN);
+                       misc_spec->outer_second_cvlan_tag = 0;
+               } else if (misc_spec->outer_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src, tag, second_vlan_qualifier, DR_STE_SVLAN);
+                       misc_spec->outer_second_svlan_tag = 0;
+               }
+               DR_STE_SET_TAG(eth_l2_src, tag, second_vlan_id, misc_spec, outer_second_vid);
+               DR_STE_SET_TAG(eth_l2_src, tag, second_cfi, misc_spec, outer_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src, tag, second_priority, misc_spec, outer_second_prio);
+       }
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+                                   bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_47_16, mask, smac_47_16);
+       DR_STE_SET_TAG(eth_l2_src, bit_mask, smac_15_0, mask, smac_15_0);
+
+       dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+                              struct mlx5dr_ste_build *sb,
+                              u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
+       DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
+
+       return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+                               struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_src_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+                                   struct mlx5dr_ste_build *sb,
+                                   u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_47_16, mask, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_dst, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+       dr_ste_v0_build_eth_l2_src_or_dst_bit_mask(value, sb->inner, bit_mask);
+}
+
+static int
+dr_ste_v0_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+                              struct mlx5dr_ste_build *sb,
+                              u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
+
+       return dr_ste_v0_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+static void
+dr_ste_v0_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+                               struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l2_dst_bit_mask(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_DST, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_dst_tag;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+                                   bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_47_16, mask, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, dmac_15_0, mask, dmac_15_0);
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_vlan_id, mask, first_vid);
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_cfi, mask, first_cfi);
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, first_priority, mask, first_prio);
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, ip_fragmented, mask, frag);
+       DR_STE_SET_TAG(eth_l2_tnl, bit_mask, l3_ethertype, mask, ethertype);
+       DR_STE_SET_ONES(eth_l2_tnl, bit_mask, l3_type, mask, ip_version);
+
+       if (misc->vxlan_vni) {
+               MLX5_SET(ste_eth_l2_tnl, bit_mask,
+                        l2_tunneling_network_id, (misc->vxlan_vni << 8));
+               misc->vxlan_vni = 0;
+       }
+
+       if (mask->svlan_tag || mask->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_tnl, bit_mask, first_vlan_qualifier, -1);
+               mask->cvlan_tag = 0;
+               mask->svlan_tag = 0;
+       }
+}
+
+static int
+dr_ste_v0_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+                              struct mlx5dr_ste_build *sb,
+                              u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
+       DR_STE_SET_TAG(eth_l2_tnl, tag, first_vlan_id, spec, first_vid);
+       DR_STE_SET_TAG(eth_l2_tnl, tag, first_cfi, spec, first_cfi);
+       DR_STE_SET_TAG(eth_l2_tnl, tag, ip_fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l2_tnl, tag, first_priority, spec, first_prio);
+       DR_STE_SET_TAG(eth_l2_tnl, tag, l3_ethertype, spec, ethertype);
+
+       if (misc->vxlan_vni) {
+               MLX5_SET(ste_eth_l2_tnl, tag, l2_tunneling_network_id,
+                        (misc->vxlan_vni << 8));
+               misc->vxlan_vni = 0;
+       }
+
+       if (spec->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_CVLAN);
+               spec->cvlan_tag = 0;
+       } else if (spec->svlan_tag) {
+               MLX5_SET(ste_eth_l2_tnl, tag, first_vlan_qualifier, DR_STE_SVLAN);
+               spec->svlan_tag = 0;
+       }
+
+       if (spec->ip_version) {
+               if (spec->ip_version == IP_VERSION_IPV4) {
+                       MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV4);
+                       spec->ip_version = 0;
+               } else if (spec->ip_version == IP_VERSION_IPV6) {
+                       MLX5_SET(ste_eth_l2_tnl, tag, l3_type, STE_IPV6);
+                       spec->ip_version = 0;
+               } else {
+                       return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+                               struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_ETHL2_TUNNELING_I;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l2_tnl_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+                                    struct mlx5dr_ste_build *sb,
+                                    u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
+       DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, ihl, spec, ipv4_ihl);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL3_IPV4_MISC, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l3_ipv4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+                                  struct mlx5dr_ste_build *sb,
+                                  u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
+       DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
+       DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, udp_dport);
+       DR_STE_SET_TAG(eth_l4, tag, src_port, spec, udp_sport);
+       DR_STE_SET_TAG(eth_l4, tag, protocol, spec, ip_protocol);
+       DR_STE_SET_TAG(eth_l4, tag, fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l4, tag, dscp, spec, ip_dscp);
+       DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn);
+       DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+       if (sb->inner)
+               DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label);
+       else
+               DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label);
+
+       if (spec->tcp_flags) {
+               DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec);
+               spec->tcp_flags = 0;
+       }
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_ipv6_l3_l4_tag;
+}
+
+static int
+dr_ste_v0_build_mpls_tag(struct mlx5dr_match_param *value,
+                        struct mlx5dr_ste_build *sb,
+                        u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       if (sb->inner)
+               DR_STE_SET_MPLS(mpls, misc2, inner, tag);
+       else
+               DR_STE_SET_MPLS(mpls, misc2, outer, tag);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_mpls_init(struct mlx5dr_ste_build *sb,
+                         struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_mpls_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(MPLS_FIRST, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_mpls_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+                           struct mlx5dr_ste_build *sb,
+                           u8 *tag)
+{
+       struct  mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
+
+       DR_STE_SET_TAG(gre, tag, gre_k_present, misc, gre_k_present);
+       DR_STE_SET_TAG(gre, tag, gre_key_h, misc, gre_key_h);
+       DR_STE_SET_TAG(gre, tag, gre_key_l, misc, gre_key_l);
+
+       DR_STE_SET_TAG(gre, tag, gre_c_present, misc, gre_c_present);
+
+       DR_STE_SET_TAG(gre, tag, gre_s_present, misc, gre_s_present);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+                            struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_GRE;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gre_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+                            struct mlx5dr_ste_build *sb,
+                            u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+       u32 mpls_hdr;
+
+       if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2)) {
+               mpls_hdr = misc_2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+               misc_2->outer_first_mpls_over_gre_label = 0;
+               mpls_hdr |= misc_2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+               misc_2->outer_first_mpls_over_gre_exp = 0;
+               mpls_hdr |= misc_2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+               misc_2->outer_first_mpls_over_gre_s_bos = 0;
+               mpls_hdr |= misc_2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+               misc_2->outer_first_mpls_over_gre_ttl = 0;
+       } else {
+               mpls_hdr = misc_2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+               misc_2->outer_first_mpls_over_udp_label = 0;
+               mpls_hdr |= misc_2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+               misc_2->outer_first_mpls_over_udp_exp = 0;
+               mpls_hdr |= misc_2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+               misc_2->outer_first_mpls_over_udp_s_bos = 0;
+               mpls_hdr |= misc_2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+               misc_2->outer_first_mpls_over_udp_ttl = 0;
+       }
+
+       MLX5_SET(ste_flex_parser_0, tag, flex_parser_3, mpls_hdr);
+       return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+                             struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
+                                     struct mlx5dr_ste_build *sb,
+                                     u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+       u8 *parser_ptr;
+       u8 parser_id;
+       u32 mpls_hdr;
+
+       mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+       misc2->outer_first_mpls_over_udp_label = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+       misc2->outer_first_mpls_over_udp_exp = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+       misc2->outer_first_mpls_over_udp_s_bos = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+       misc2->outer_first_mpls_over_udp_ttl = 0;
+
+       parser_id = sb->caps->flex_parser_id_mpls_over_udp;
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
+                     DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+                     DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_udp_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
+                                     struct mlx5dr_ste_build *sb,
+                                     u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+       u8 *parser_ptr;
+       u8 parser_id;
+       u32 mpls_hdr;
+
+       mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+       misc2->outer_first_mpls_over_gre_label = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+       misc2->outer_first_mpls_over_gre_exp = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+       misc2->outer_first_mpls_over_gre_s_bos = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+       misc2->outer_first_mpls_over_gre_ttl = 0;
+
+       parser_id = sb->caps->flex_parser_id_mpls_over_gre;
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
+
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
+                     DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+                     DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_mpls_over_gre_tag;
+}
+
+#define ICMP_TYPE_OFFSET_FIRST_DW      24
+#define ICMP_CODE_OFFSET_FIRST_DW      16
+
+static int
+dr_ste_v0_build_icmp_tag(struct mlx5dr_match_param *value,
+                        struct mlx5dr_ste_build *sb,
+                        u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
+       u32 *icmp_header_data;
+       int dw0_location;
+       int dw1_location;
+       u8 *parser_ptr;
+       u8 *icmp_type;
+       u8 *icmp_code;
+       bool is_ipv4;
+       u32 icmp_hdr;
+
+       is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc_3);
+       if (is_ipv4) {
+               icmp_header_data        = &misc_3->icmpv4_header_data;
+               icmp_type               = &misc_3->icmpv4_type;
+               icmp_code               = &misc_3->icmpv4_code;
+               dw0_location            = sb->caps->flex_parser_id_icmp_dw0;
+               dw1_location            = sb->caps->flex_parser_id_icmp_dw1;
+       } else {
+               icmp_header_data        = &misc_3->icmpv6_header_data;
+               icmp_type               = &misc_3->icmpv6_type;
+               icmp_code               = &misc_3->icmpv6_code;
+               dw0_location            = sb->caps->flex_parser_id_icmpv6_dw0;
+               dw1_location            = sb->caps->flex_parser_id_icmpv6_dw1;
+       }
+
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw0_location);
+       icmp_hdr = (*icmp_type << ICMP_TYPE_OFFSET_FIRST_DW) |
+                  (*icmp_code << ICMP_CODE_OFFSET_FIRST_DW);
+       *(__be32 *)parser_ptr = cpu_to_be32(icmp_hdr);
+       *icmp_code = 0;
+       *icmp_type = 0;
+
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, dw1_location);
+       *(__be32 *)parser_ptr = cpu_to_be32(*icmp_header_data);
+       *icmp_header_data = 0;
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_icmp_init(struct mlx5dr_ste_build *sb,
+                         struct mlx5dr_match_param *mask)
+{
+       u8 parser_id;
+       bool is_ipv4;
+
+       dr_ste_v0_build_icmp_tag(mask, sb, sb->bit_mask);
+
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       is_ipv4 = DR_MASK_IS_ICMPV4_SET(&mask->misc3);
+       parser_id = is_ipv4 ? sb->caps->flex_parser_id_icmp_dw0 :
+                   sb->caps->flex_parser_id_icmpv6_dw0;
+       sb->lu_type = parser_id > DR_STE_MAX_FLEX_0_ID ?
+                     DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+                     DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_icmp_tag;
+}
+
+static int
+dr_ste_v0_build_general_purpose_tag(struct mlx5dr_match_param *value,
+                                   struct mlx5dr_ste_build *sb,
+                                   u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc_2 = &value->misc2;
+
+       DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+                      misc_2, metadata_reg_a);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_GENERAL_PURPOSE;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_general_purpose_tag;
+}
+
+static int
+dr_ste_v0_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+                               struct mlx5dr_ste_build *sb,
+                               u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+       if (sb->inner) {
+               DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
+               DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, inner_tcp_ack_num);
+       } else {
+               DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, outer_tcp_seq_num);
+               DR_STE_SET_TAG(eth_l4_misc, tag, ack_num, misc3, outer_tcp_ack_num);
+       }
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL4_MISC, sb->rx, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+                                             struct mlx5dr_ste_build *sb,
+                                             u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+                      outer_vxlan_gpe_flags, misc3,
+                      outer_vxlan_gpe_flags);
+       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+                      outer_vxlan_gpe_next_protocol, misc3,
+                      outer_vxlan_gpe_next_protocol);
+       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+                      outer_vxlan_gpe_vni, misc3,
+                      outer_vxlan_gpe_vni);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_protocol_type, misc, geneve_protocol_type);
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_oam, misc, geneve_oam);
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_opt_len, misc, geneve_opt_len);
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_vni, misc, geneve_vni);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tag;
+}
+
+static int
+dr_ste_v0_build_register_0_tag(struct mlx5dr_match_param *value,
+                              struct mlx5dr_ste_build *sb,
+                              u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+       DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+       DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+       DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_register_0_init(struct mlx5dr_ste_build *sb,
+                               struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_register_0_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_0;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_register_0_tag;
+}
+
+static int
+dr_ste_v0_build_register_1_tag(struct mlx5dr_match_param *value,
+                              struct mlx5dr_ste_build *sb,
+                              u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+       DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+       DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+       DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_register_1_init(struct mlx5dr_ste_build *sb,
+                               struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_register_1_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_STEERING_REGISTERS_1;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_register_1_tag;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+                                     u8 *bit_mask)
+{
+       struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+       DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
+       DR_STE_SET_ONES(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
+       misc_mask->source_eswitch_owner_vhca_id = 0;
+}
+
+static int
+dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+                                struct mlx5dr_ste_build *sb,
+                                u8 *tag)
+{
+       struct mlx5dr_match_misc *misc = &value->misc;
+       int id = misc->source_eswitch_owner_vhca_id;
+       struct mlx5dr_cmd_vport_cap *vport_cap;
+       struct mlx5dr_domain *dmn = sb->dmn;
+       struct mlx5dr_domain *vport_dmn;
+       u8 *bit_mask = sb->bit_mask;
+       struct mlx5dr_domain *peer;
+       bool source_gvmi_set;
+
+       DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
+
+       if (sb->vhca_id_valid) {
+               peer = xa_load(&dmn->peer_dmn_xa, id);
+               /* Find port GVMI based on the eswitch_owner_vhca_id */
+               if (id == dmn->info.caps.gvmi)
+                       vport_dmn = dmn;
+               else if (peer && (id == peer->info.caps.gvmi))
+                       vport_dmn = peer;
+               else
+                       return -EINVAL;
+
+               misc->source_eswitch_owner_vhca_id = 0;
+       } else {
+               vport_dmn = dmn;
+       }
+
+       source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
+       if (source_gvmi_set) {
+               vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
+                                                       misc->source_port);
+               if (!vport_cap) {
+                       mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
+                                  misc->source_port);
+                       return -EINVAL;
+               }
+
+               if (vport_cap->vport_gvmi)
+                       MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
+
+               misc->source_port = 0;
+       }
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_SRC_GVMI_AND_QP;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_src_gvmi_qpn_tag;
+}
+
+static void dr_ste_v0_set_flex_parser(u32 *misc4_field_id,
+                                     u32 *misc4_field_value,
+                                     bool *parser_is_used,
+                                     u8 *tag)
+{
+       u32 id = *misc4_field_id;
+       u8 *parser_ptr;
+
+       if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
+               return;
+
+       parser_is_used[id] = true;
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
+
+       *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
+       *misc4_field_id = 0;
+       *misc4_field_value = 0;
+}
+
+static int dr_ste_v0_build_flex_parser_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
+       bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
+
+       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
+                                 &misc_4_mask->prog_sample_field_value_0,
+                                 parser_is_used, tag);
+
+       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
+                                 &misc_4_mask->prog_sample_field_value_1,
+                                 parser_is_used, tag);
+
+       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
+                                 &misc_4_mask->prog_sample_field_value_2,
+                                 parser_is_used, tag);
+
+       dr_ste_v0_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
+                                 &misc_4_mask->prog_sample_field_value_3,
+                                 parser_is_used, tag);
+
+       return 0;
+}
+
+static void dr_ste_v0_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+       dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
+}
+
+static void dr_ste_v0_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+       dr_ste_v0_build_flex_parser_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tag;
+}
+
+static int
+dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
+                                                  struct mlx5dr_ste_build *sb,
+                                                  u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+       u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+       u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+
+       MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
+                misc3->geneve_tlv_option_0_data);
+       misc3->geneve_tlv_option_0_data = 0;
+
+       return 0;
+}
+
+static void
+dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+                                                   struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
+
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
+               DR_STE_V0_LU_TYPE_FLEX_PARSER_1 :
+               DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_tag;
+}
+
+static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
+                                                   struct mlx5dr_ste_build *sb,
+                                                   u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+                      gtpu_msg_flags, misc3,
+                      gtpu_msg_flags);
+       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+                      gtpu_msg_type, misc3,
+                      gtpu_msg_type);
+       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag,
+                      gtpu_teid, misc3,
+                      gtpu_teid);
+
+       return 0;
+}
+
+static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+                                                     struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_flex_parser_tnl_gtpu_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+       return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_0;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag;
+}
+
+static int
+dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+       return 0;
+}
+
+static void
+dr_ste_v0_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V0_LU_TYPE_FLEX_PARSER_1;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag;
+}
+
+static int dr_ste_v0_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
+                                             struct mlx5dr_ste_build *sb,
+                                             u8 *tag)
+{
+       struct mlx5dr_match_misc5 *misc5 = &value->misc5;
+
+       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
+       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
+
+       return 0;
+}
+
+static void dr_ste_v0_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+                                               struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V0_LU_TYPE_TUNNEL_HEADER;
+       dr_ste_v0_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v0_build_tnl_header_0_1_tag;
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v0 = {
+       /* Builders */
+       .build_eth_l2_src_dst_init      = &dr_ste_v0_build_eth_l2_src_dst_init,
+       .build_eth_l3_ipv6_src_init     = &dr_ste_v0_build_eth_l3_ipv6_src_init,
+       .build_eth_l3_ipv6_dst_init     = &dr_ste_v0_build_eth_l3_ipv6_dst_init,
+       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v0_build_eth_l3_ipv4_5_tuple_init,
+       .build_eth_l2_src_init          = &dr_ste_v0_build_eth_l2_src_init,
+       .build_eth_l2_dst_init          = &dr_ste_v0_build_eth_l2_dst_init,
+       .build_eth_l2_tnl_init          = &dr_ste_v0_build_eth_l2_tnl_init,
+       .build_eth_l3_ipv4_misc_init    = &dr_ste_v0_build_eth_l3_ipv4_misc_init,
+       .build_eth_ipv6_l3_l4_init      = &dr_ste_v0_build_eth_ipv6_l3_l4_init,
+       .build_mpls_init                = &dr_ste_v0_build_mpls_init,
+       .build_tnl_gre_init             = &dr_ste_v0_build_tnl_gre_init,
+       .build_tnl_mpls_init            = &dr_ste_v0_build_tnl_mpls_init,
+       .build_tnl_mpls_over_udp_init   = &dr_ste_v0_build_tnl_mpls_over_udp_init,
+       .build_tnl_mpls_over_gre_init   = &dr_ste_v0_build_tnl_mpls_over_gre_init,
+       .build_icmp_init                = &dr_ste_v0_build_icmp_init,
+       .build_general_purpose_init     = &dr_ste_v0_build_general_purpose_init,
+       .build_eth_l4_misc_init         = &dr_ste_v0_build_eth_l4_misc_init,
+       .build_tnl_vxlan_gpe_init       = &dr_ste_v0_build_flex_parser_tnl_vxlan_gpe_init,
+       .build_tnl_geneve_init          = &dr_ste_v0_build_flex_parser_tnl_geneve_init,
+       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init,
+       .build_register_0_init          = &dr_ste_v0_build_register_0_init,
+       .build_register_1_init          = &dr_ste_v0_build_register_1_init,
+       .build_src_gvmi_qpn_init        = &dr_ste_v0_build_src_gvmi_qpn_init,
+       .build_flex_parser_0_init       = &dr_ste_v0_build_flex_parser_0_init,
+       .build_flex_parser_1_init       = &dr_ste_v0_build_flex_parser_1_init,
+       .build_tnl_gtpu_init            = &dr_ste_v0_build_flex_parser_tnl_gtpu_init,
+       .build_tnl_header_0_1_init      = &dr_ste_v0_build_tnl_header_0_1_init,
+       .build_tnl_gtpu_flex_parser_0_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_0_init,
+       .build_tnl_gtpu_flex_parser_1_init   = &dr_ste_v0_build_tnl_gtpu_flex_parser_1_init,
+
+       /* Getters and Setters */
+       .ste_init                       = &dr_ste_v0_init,
+       .set_next_lu_type               = &dr_ste_v0_set_next_lu_type,
+       .get_next_lu_type               = &dr_ste_v0_get_next_lu_type,
+       .set_miss_addr                  = &dr_ste_v0_set_miss_addr,
+       .get_miss_addr                  = &dr_ste_v0_get_miss_addr,
+       .set_hit_addr                   = &dr_ste_v0_set_hit_addr,
+       .set_byte_mask                  = &dr_ste_v0_set_byte_mask,
+       .get_byte_mask                  = &dr_ste_v0_get_byte_mask,
+
+       /* Actions */
+       .actions_caps                   = DR_STE_CTX_ACTION_CAP_NONE,
+       .set_actions_rx                 = &dr_ste_v0_set_actions_rx,
+       .set_actions_tx                 = &dr_ste_v0_set_actions_tx,
+       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v0_action_modify_field_arr),
+       .modify_field_arr               = dr_ste_v0_action_modify_field_arr,
+       .set_action_set                 = &dr_ste_v0_set_action_set,
+       .set_action_add                 = &dr_ste_v0_set_action_add,
+       .set_action_copy                = &dr_ste_v0_set_action_copy,
+       .set_action_decap_l3_list       = &dr_ste_v0_set_action_decap_l3_list,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v0(void)
+{
+       return &ste_ctx_v0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.c
new file mode 100644 (file)
index 0000000..1d49704
--- /dev/null
@@ -0,0 +1,2341 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#include <linux/types.h>
+#include "mlx5_ifc_dr_ste_v1.h"
+#include "dr_ste_v1.h"
+
+#define DR_STE_CALC_DFNR_TYPE(lookup_type, inner) \
+       ((inner) ? DR_STE_V1_LU_TYPE_##lookup_type##_I : \
+                  DR_STE_V1_LU_TYPE_##lookup_type##_O)
+
+enum dr_ste_v1_entry_format {
+       DR_STE_V1_TYPE_BWC_BYTE = 0x0,
+       DR_STE_V1_TYPE_BWC_DW   = 0x1,
+       DR_STE_V1_TYPE_MATCH    = 0x2,
+       DR_STE_V1_TYPE_MATCH_RANGES = 0x7,
+};
+
+/* Lookup type is built from 2B: [ Definer mode 1B ][ Definer index 1B ] */
+enum {
+       DR_STE_V1_LU_TYPE_NOP                           = 0x0000,
+       DR_STE_V1_LU_TYPE_ETHL2_TNL                     = 0x0002,
+       DR_STE_V1_LU_TYPE_IBL3_EXT                      = 0x0102,
+       DR_STE_V1_LU_TYPE_ETHL2_O                       = 0x0003,
+       DR_STE_V1_LU_TYPE_IBL4                          = 0x0103,
+       DR_STE_V1_LU_TYPE_ETHL2_I                       = 0x0004,
+       DR_STE_V1_LU_TYPE_SRC_QP_GVMI                   = 0x0104,
+       DR_STE_V1_LU_TYPE_ETHL2_SRC_O                   = 0x0005,
+       DR_STE_V1_LU_TYPE_ETHL2_HEADERS_O               = 0x0105,
+       DR_STE_V1_LU_TYPE_ETHL2_SRC_I                   = 0x0006,
+       DR_STE_V1_LU_TYPE_ETHL2_HEADERS_I               = 0x0106,
+       DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_O          = 0x0007,
+       DR_STE_V1_LU_TYPE_IPV6_DES_O                    = 0x0107,
+       DR_STE_V1_LU_TYPE_ETHL3_IPV4_5_TUPLE_I          = 0x0008,
+       DR_STE_V1_LU_TYPE_IPV6_DES_I                    = 0x0108,
+       DR_STE_V1_LU_TYPE_ETHL4_O                       = 0x0009,
+       DR_STE_V1_LU_TYPE_IPV6_SRC_O                    = 0x0109,
+       DR_STE_V1_LU_TYPE_ETHL4_I                       = 0x000a,
+       DR_STE_V1_LU_TYPE_IPV6_SRC_I                    = 0x010a,
+       DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_O               = 0x000b,
+       DR_STE_V1_LU_TYPE_MPLS_O                        = 0x010b,
+       DR_STE_V1_LU_TYPE_ETHL2_SRC_DST_I               = 0x000c,
+       DR_STE_V1_LU_TYPE_MPLS_I                        = 0x010c,
+       DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_O             = 0x000d,
+       DR_STE_V1_LU_TYPE_GRE                           = 0x010d,
+       DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER        = 0x000e,
+       DR_STE_V1_LU_TYPE_GENERAL_PURPOSE               = 0x010e,
+       DR_STE_V1_LU_TYPE_ETHL3_IPV4_MISC_I             = 0x000f,
+       DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0          = 0x010f,
+       DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1          = 0x0110,
+       DR_STE_V1_LU_TYPE_FLEX_PARSER_OK                = 0x0011,
+       DR_STE_V1_LU_TYPE_FLEX_PARSER_0                 = 0x0111,
+       DR_STE_V1_LU_TYPE_FLEX_PARSER_1                 = 0x0112,
+       DR_STE_V1_LU_TYPE_ETHL4_MISC_O                  = 0x0113,
+       DR_STE_V1_LU_TYPE_ETHL4_MISC_I                  = 0x0114,
+       DR_STE_V1_LU_TYPE_INVALID                       = 0x00ff,
+       DR_STE_V1_LU_TYPE_DONT_CARE                     = MLX5DR_STE_LU_TYPE_DONT_CARE,
+};
+
+enum dr_ste_v1_header_anchors {
+       DR_STE_HEADER_ANCHOR_START_OUTER                = 0x00,
+       DR_STE_HEADER_ANCHOR_1ST_VLAN                   = 0x02,
+       DR_STE_HEADER_ANCHOR_IPV6_IPV4                  = 0x07,
+       DR_STE_HEADER_ANCHOR_INNER_MAC                  = 0x13,
+       DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4            = 0x19,
+};
+
+enum dr_ste_v1_action_size {
+       DR_STE_ACTION_SINGLE_SZ = 4,
+       DR_STE_ACTION_DOUBLE_SZ = 8,
+       DR_STE_ACTION_TRIPLE_SZ = 12,
+};
+
+enum dr_ste_v1_action_insert_ptr_attr {
+       DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE = 0,  /* Regular push header (e.g. push vlan) */
+       DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP = 1, /* Encapsulation / Tunneling */
+       DR_STE_V1_ACTION_INSERT_PTR_ATTR_ESP = 2,   /* IPsec */
+};
+
+enum dr_ste_v1_action_id {
+       DR_STE_V1_ACTION_ID_NOP                         = 0x00,
+       DR_STE_V1_ACTION_ID_COPY                        = 0x05,
+       DR_STE_V1_ACTION_ID_SET                         = 0x06,
+       DR_STE_V1_ACTION_ID_ADD                         = 0x07,
+       DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE              = 0x08,
+       DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER     = 0x09,
+       DR_STE_V1_ACTION_ID_INSERT_INLINE               = 0x0a,
+       DR_STE_V1_ACTION_ID_INSERT_POINTER              = 0x0b,
+       DR_STE_V1_ACTION_ID_FLOW_TAG                    = 0x0c,
+       DR_STE_V1_ACTION_ID_QUEUE_ID_SEL                = 0x0d,
+       DR_STE_V1_ACTION_ID_ACCELERATED_LIST            = 0x0e,
+       DR_STE_V1_ACTION_ID_MODIFY_LIST                 = 0x0f,
+       DR_STE_V1_ACTION_ID_ASO                         = 0x12,
+       DR_STE_V1_ACTION_ID_TRAILER                     = 0x13,
+       DR_STE_V1_ACTION_ID_COUNTER_ID                  = 0x14,
+       DR_STE_V1_ACTION_ID_MAX                         = 0x21,
+       /* use for special cases */
+       DR_STE_V1_ACTION_ID_SPECIAL_ENCAP_L3            = 0x22,
+};
+
+enum {
+       DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0              = 0x00,
+       DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1              = 0x01,
+       DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2              = 0x02,
+       DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0          = 0x08,
+       DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1          = 0x09,
+       DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0              = 0x0e,
+       DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0              = 0x18,
+       DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1              = 0x19,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0            = 0x40,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1            = 0x41,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0        = 0x44,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1        = 0x45,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2        = 0x46,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3        = 0x47,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0        = 0x4c,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1        = 0x4d,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2        = 0x4e,
+       DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3        = 0x4f,
+       DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0            = 0x5e,
+       DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1            = 0x5f,
+       DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0           = 0x6f,
+       DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1           = 0x70,
+       DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE        = 0x7b,
+       DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE          = 0x7c,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0          = 0x8c,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1          = 0x8d,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0          = 0x8e,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1          = 0x8f,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0          = 0x90,
+       DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1          = 0x91,
+};
+
+enum dr_ste_v1_aso_ctx_type {
+       DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+               .hw_field = DR_STE_V1_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+       },
+};
+
+static void dr_ste_v1_set_entry_type(u8 *hw_ste_p, u8 entry_type)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, entry_type);
+}
+
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p)
+{
+       u8 entry_type = MLX5_GET(ste_match_bwc_v1, hw_ste_p, entry_format);
+
+       /* unlike MATCH STE, for MATCH_RANGES STE both hit and miss addresses
+        * are part of the action, so they both set as part of STE init
+        */
+       return entry_type == DR_STE_V1_TYPE_MATCH_RANGES;
+}
+
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr)
+{
+       u64 index = miss_addr >> 6;
+
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32, index >> 26);
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6, index);
+}
+
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p)
+{
+       u64 index =
+               ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_31_6) |
+                ((u64)MLX5_GET(ste_match_bwc_v1, hw_ste_p, miss_address_39_32)) << 26);
+
+       return index << 6;
+}
+
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, byte_mask, byte_mask);
+}
+
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p)
+{
+       return MLX5_GET(ste_match_bwc_v1, hw_ste_p, byte_mask);
+}
+
+static void dr_ste_v1_set_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, entry_format, lu_type >> 8);
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, match_definer_ctx_idx, lu_type & 0xFF);
+}
+
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_entry_format, lu_type >> 8);
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx, lu_type & 0xFF);
+}
+
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p)
+{
+       u8 mode = MLX5_GET(ste_match_bwc_v1, hw_ste_p, next_entry_format);
+       u8 index = MLX5_GET(ste_match_bwc_v1, hw_ste_p, hash_definer_ctx_idx);
+
+       return (mode << 8 | index);
+}
+
+static void dr_ste_v1_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
+}
+
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size)
+{
+       u64 index = (icm_addr >> 5) | ht_size;
+
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_39_32_size, index >> 27);
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_31_5_size, index);
+}
+
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi)
+{
+       dr_ste_v1_set_lu_type(hw_ste_p, lu_type);
+       dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE);
+
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, gvmi, gvmi);
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, next_table_base_63_48, gvmi);
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, miss_address_63_48, gvmi);
+}
+
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size)
+{
+       u8 *tag = hw_ste_p + DR_STE_SIZE_CTRL;
+       u8 *mask = tag + DR_STE_SIZE_TAG;
+       u8 tmp_tag[DR_STE_SIZE_TAG] = {};
+
+       if (ste_size == DR_STE_SIZE_CTRL)
+               return;
+
+       WARN_ON(ste_size != DR_STE_SIZE);
+
+       /* Backup tag */
+       memcpy(tmp_tag, tag, DR_STE_SIZE_TAG);
+
+       /* Swap mask and tag  both are the same size */
+       memcpy(tag, mask, DR_STE_SIZE_MASK);
+       memcpy(mask, tmp_tag, DR_STE_SIZE_TAG);
+}
+
+static void dr_ste_v1_set_rx_flow_tag(u8 *s_action, u32 flow_tag)
+{
+       MLX5_SET(ste_single_action_flow_tag_v1, s_action, action_id,
+                DR_STE_V1_ACTION_ID_FLOW_TAG);
+       MLX5_SET(ste_single_action_flow_tag_v1, s_action, flow_tag, flow_tag);
+}
+
+static void dr_ste_v1_set_counter_id(u8 *hw_ste_p, u32 ctr_id)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, counter_id, ctr_id);
+}
+
+static void dr_ste_v1_set_reparse(u8 *hw_ste_p)
+{
+       MLX5_SET(ste_match_bwc_v1, hw_ste_p, reparse, 1);
+}
+
+static void dr_ste_v1_set_encap(u8 *hw_ste_p, u8 *d_action,
+                               u32 reformat_id, int size)
+{
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, action_id,
+                DR_STE_V1_ACTION_ID_INSERT_POINTER);
+       /* The hardware expects here size in words (2 byte) */
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
+                DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action,
+                                    u32 reformat_id,
+                                    u8 anchor, u8 offset,
+                                    int size)
+{
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action,
+                action_id, DR_STE_V1_ACTION_ID_INSERT_POINTER);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_anchor, anchor);
+
+       /* The hardware expects here size and offset in words (2 byte) */
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, size, size / 2);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, start_offset, offset / 2);
+
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, pointer, reformat_id);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, d_action, attributes,
+                DR_STE_V1_ACTION_INSERT_PTR_ATTR_NONE);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action,
+                                    u8 anchor, u8 offset,
+                                    int size)
+{
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+                action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor);
+
+       /* The hardware expects here size and offset in words (2 byte) */
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2);
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action,
+                                   u32 vlan_hdr)
+{
+       MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+                action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE);
+       /* The hardware expects offset to vlan header in words (2 byte) */
+       MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+                start_offset, HDR_LEN_L2_MACS >> 1);
+       MLX5_SET(ste_double_action_insert_with_inline_v1, d_action,
+                inline_data, vlan_hdr);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num)
+{
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+                action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+                start_anchor, DR_STE_HEADER_ANCHOR_1ST_VLAN);
+       /* The hardware expects here size in words (2 byte) */
+       MLX5_SET(ste_single_action_remove_header_size_v1, s_action,
+                remove_size, (HDR_LEN_L2_VLAN >> 1) * vlans_num);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_encap_l3(u8 *hw_ste_p,
+                                  u8 *frst_s_action,
+                                  u8 *scnd_d_action,
+                                  u32 reformat_id,
+                                  int size)
+{
+       /* Remove L2 headers */
+       MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, action_id,
+                DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+       MLX5_SET(ste_single_action_remove_header_v1, frst_s_action, end_anchor,
+                DR_STE_HEADER_ANCHOR_IPV6_IPV4);
+
+       /* Encapsulate with given reformat ID */
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, action_id,
+                DR_STE_V1_ACTION_ID_INSERT_POINTER);
+       /* The hardware expects here size in words (2 byte) */
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, size, size / 2);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, pointer, reformat_id);
+       MLX5_SET(ste_double_action_insert_with_ptr_v1, scnd_d_action, attributes,
+                DR_STE_V1_ACTION_INSERT_PTR_ATTR_ENCAP);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rx_decap(u8 *hw_ste_p, u8 *s_action)
+{
+       MLX5_SET(ste_single_action_remove_header_v1, s_action, action_id,
+                DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+       MLX5_SET(ste_single_action_remove_header_v1, s_action, decap, 1);
+       MLX5_SET(ste_single_action_remove_header_v1, s_action, vni_to_cqe, 1);
+       MLX5_SET(ste_single_action_remove_header_v1, s_action, end_anchor,
+                DR_STE_HEADER_ANCHOR_INNER_MAC);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_accelerated_rewrite_actions(u8 *hw_ste_p,
+                                                     u8 *d_action,
+                                                     u16 num_of_actions,
+                                                     u32 rewrite_pattern,
+                                                     u32 rewrite_args,
+                                                     u8 *action_data)
+{
+       if (action_data) {
+               memcpy(d_action, action_data, DR_MODIFY_ACTION_SIZE);
+       } else {
+               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+                        action_id, DR_STE_V1_ACTION_ID_ACCELERATED_LIST);
+               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+                        modify_actions_pattern_pointer, rewrite_pattern);
+               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+                        number_of_modify_actions, num_of_actions);
+               MLX5_SET(ste_double_action_accelerated_modify_action_list_v1, d_action,
+                        modify_actions_argument_pointer, rewrite_args);
+       }
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_basic_rewrite_actions(u8 *hw_ste_p,
+                                               u8 *s_action,
+                                               u16 num_of_actions,
+                                               u32 rewrite_index)
+{
+       MLX5_SET(ste_single_action_modify_list_v1, s_action, action_id,
+                DR_STE_V1_ACTION_ID_MODIFY_LIST);
+       MLX5_SET(ste_single_action_modify_list_v1, s_action, num_of_modify_actions,
+                num_of_actions);
+       MLX5_SET(ste_single_action_modify_list_v1, s_action, modify_actions_ptr,
+                rewrite_index);
+
+       dr_ste_v1_set_reparse(hw_ste_p);
+}
+
+static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
+                                         u8 *action,
+                                         u16 num_of_actions,
+                                         u32 rewrite_pattern,
+                                         u32 rewrite_args,
+                                         u8 *action_data)
+{
+       if (rewrite_pattern != MLX5DR_INVALID_PATTERN_INDEX)
+               return dr_ste_v1_set_accelerated_rewrite_actions(hw_ste_p,
+                                                                action,
+                                                                num_of_actions,
+                                                                rewrite_pattern,
+                                                                rewrite_args,
+                                                                action_data);
+
+       /* fall back to the code that doesn't support accelerated modify header */
+       return dr_ste_v1_set_basic_rewrite_actions(hw_ste_p,
+                                                  action,
+                                                  num_of_actions,
+                                                  rewrite_args);
+}
+
+static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
+                                        u32 object_id,
+                                        u32 offset,
+                                        u8 dest_reg_id,
+                                        u8 init_color)
+{
+       MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
+                DR_STE_V1_ACTION_ID_ASO);
+       MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
+                object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
+       /* Convert reg_c index to HW 64bit index */
+       MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
+                (dest_reg_id - 1) / 2);
+       MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
+                DR_STE_V1_ASO_CTX_TYPE_POLICERS);
+       MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
+                offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
+       MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
+                init_color);
+}
+
+static void dr_ste_v1_set_match_range_pkt_len(u8 *hw_ste_p, u32 definer_id,
+                                             u32 min, u32 max)
+{
+       MLX5_SET(ste_match_ranges_v1, hw_ste_p, match_definer_ctx_idx, definer_id);
+
+       /* When the STE will be sent, its mask and tags will be swapped in
+        * dr_ste_v1_prepare_for_postsend(). This, however, is match range STE
+        * which doesn't have mask, and shouldn't have mask/tag swapped.
+        * We're using the common utilities functions to send this STE, so need
+        * to allow for this swapping - place the values in the corresponding
+        * locations to allow flipping them when writing to ICM.
+        *
+        * min/max_value_2 corresponds to match_dw_0 in its definer.
+        * To allow mask/tag swapping, writing the min/max_2 to min/max_0.
+        *
+        * Pkt len is 2 bytes that are stored in the higher section of the DW.
+        */
+       MLX5_SET(ste_match_ranges_v1, hw_ste_p, min_value_0, min << 16);
+       MLX5_SET(ste_match_ranges_v1, hw_ste_p, max_value_0, max << 16);
+}
+
+static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
+                                         u32 *added_stes,
+                                         u16 gvmi)
+{
+       u8 *action;
+
+       (*added_stes)++;
+       *last_ste += DR_STE_SIZE;
+       dr_ste_v1_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, 0, gvmi);
+       dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH);
+
+       action = MLX5_ADDR_OF(ste_mask_and_match_v1, *last_ste, action);
+       memset(action, 0, MLX5_FLD_SZ_BYTES(ste_mask_and_match_v1, action));
+}
+
+static void dr_ste_v1_arr_init_next_match_range(u8 **last_ste,
+                                               u32 *added_stes,
+                                               u16 gvmi)
+{
+       dr_ste_v1_arr_init_next_match(last_ste, added_stes, gvmi);
+       dr_ste_v1_set_entry_type(*last_ste, DR_STE_V1_TYPE_MATCH_RANGES);
+}
+
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
+                             u8 *action_type_set,
+                             u32 actions_caps,
+                             u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr,
+                             u32 *added_stes)
+{
+       u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
+       u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+       bool allow_modify_hdr = true;
+       bool allow_encap = true;
+
+       if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
+                                                     attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+                                             last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+               action_sz -= DR_STE_ACTION_SINGLE_SZ;
+               action += DR_STE_ACTION_SINGLE_SZ;
+
+               /* Check if vlan_pop and modify_hdr on same STE is supported */
+               if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+                       allow_modify_hdr = false;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+               if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes,
+                                                     attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+                                             last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_rewrite_actions(last_ste, action,
+                                             attr->modify_actions,
+                                             attr->modify_pat_idx,
+                                             attr->modify_index,
+                                             attr->single_modify_action);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+               allow_encap = false;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+               int i;
+
+               for (i = 0; i < attr->vlans.count; i++) {
+                       if (action_sz < DR_STE_ACTION_DOUBLE_SZ || !allow_encap) {
+                               dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                               action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                               action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                               allow_encap = true;
+                       }
+                       dr_ste_v1_set_push_vlan(last_ste, action,
+                                               attr->vlans.headers[i]);
+                       action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+                       action += DR_STE_ACTION_DOUBLE_SZ;
+               }
+       }
+
+       if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
+               if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                       allow_encap = true;
+               }
+               dr_ste_v1_set_encap(last_ste, action,
+                                   attr->reformat.id,
+                                   attr->reformat.size);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+       } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
+               u8 *d_action;
+
+               if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               d_action = action + DR_STE_ACTION_SINGLE_SZ;
+
+               dr_ste_v1_set_encap_l3(last_ste,
+                                      action, d_action,
+                                      attr->reformat.id,
+                                      attr->reformat.size);
+               action_sz -= DR_STE_ACTION_TRIPLE_SZ;
+               action += DR_STE_ACTION_TRIPLE_SZ;
+       } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
+               if (!allow_encap || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_insert_hdr(last_ste, action,
+                                        attr->reformat.id,
+                                        attr->reformat.param_0,
+                                        attr->reformat.param_1,
+                                        attr->reformat.size);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+       } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
+               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_remove_hdr(last_ste, action,
+                                        attr->reformat.param_0,
+                                        attr->reformat.param_1,
+                                        attr->reformat.size);
+               action_sz -= DR_STE_ACTION_SINGLE_SZ;
+               action += DR_STE_ACTION_SINGLE_SZ;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+               if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_aso_flow_meter(action,
+                                            attr->aso_flow_meter.obj_id,
+                                            attr->aso_flow_meter.offset,
+                                            attr->aso_flow_meter.dest_reg_id,
+                                            attr->aso_flow_meter.init_color);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_RANGE]) {
+               /* match ranges requires a new STE of its own type */
+               dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
+               dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
+
+               /* we do not support setting any action on the match ranges STE */
+               action_sz = 0;
+
+               dr_ste_v1_set_match_range_pkt_len(last_ste,
+                                                 attr->range.definer_id,
+                                                 attr->range.min,
+                                                 attr->range.max);
+       }
+
+       /* set counter ID on the last STE to adhere to DMFS behavior */
+       if (action_type_set[DR_ACTION_TYP_CTR])
+               dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+
+       dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
+       dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
+                             u8 *action_type_set,
+                             u32 actions_caps,
+                             u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr,
+                             u32 *added_stes)
+{
+       u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action);
+       u8 action_sz = DR_STE_ACTION_DOUBLE_SZ;
+       bool allow_modify_hdr = true;
+       bool allow_ctr = true;
+
+       if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) {
+               dr_ste_v1_set_rewrite_actions(last_ste, action,
+                                             attr->decap_actions,
+                                             attr->decap_pat_idx,
+                                             attr->decap_index,
+                                             NULL);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+               allow_modify_hdr = false;
+               allow_ctr = false;
+       } else if (action_type_set[DR_ACTION_TYP_TNL_L2_TO_L2]) {
+               dr_ste_v1_set_rx_decap(last_ste, action);
+               action_sz -= DR_STE_ACTION_SINGLE_SZ;
+               action += DR_STE_ACTION_SINGLE_SZ;
+               allow_modify_hdr = false;
+               allow_ctr = false;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_TAG]) {
+               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                       allow_modify_hdr = true;
+                       allow_ctr = true;
+               }
+               dr_ste_v1_set_rx_flow_tag(action, attr->flow_tag);
+               action_sz -= DR_STE_ACTION_SINGLE_SZ;
+               action += DR_STE_ACTION_SINGLE_SZ;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_POP_VLAN]) {
+               if (action_sz < DR_STE_ACTION_SINGLE_SZ ||
+                   !allow_modify_hdr) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+
+               dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count);
+               action_sz -= DR_STE_ACTION_SINGLE_SZ;
+               action += DR_STE_ACTION_SINGLE_SZ;
+               allow_ctr = false;
+
+               /* Check if vlan_pop and modify_hdr on same STE is supported */
+               if (!(actions_caps & DR_STE_CTX_ACTION_CAP_POP_MDFY))
+                       allow_modify_hdr = false;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) {
+               /* Modify header and decapsulation must use different STEs */
+               if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                       allow_modify_hdr = true;
+                       allow_ctr = true;
+               }
+               dr_ste_v1_set_rewrite_actions(last_ste, action,
+                                             attr->modify_actions,
+                                             attr->modify_pat_idx,
+                                             attr->modify_index,
+                                             attr->single_modify_action);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) {
+               int i;
+
+               for (i = 0; i < attr->vlans.count; i++) {
+                       if (action_sz < DR_STE_ACTION_DOUBLE_SZ ||
+                           !allow_modify_hdr) {
+                               dr_ste_v1_arr_init_next_match(&last_ste,
+                                                             added_stes,
+                                                             attr->gvmi);
+                               action = MLX5_ADDR_OF(ste_mask_and_match_v1,
+                                                     last_ste, action);
+                               action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                       }
+                       dr_ste_v1_set_push_vlan(last_ste, action,
+                                               attr->vlans.headers[i]);
+                       action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+                       action += DR_STE_ACTION_DOUBLE_SZ;
+               }
+       }
+
+       if (action_type_set[DR_ACTION_TYP_CTR]) {
+               /* Counter action set after decap and before insert_hdr
+                * to exclude decaped / encaped header respectively.
+                */
+               if (!allow_ctr) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                       allow_modify_hdr = true;
+               }
+               dr_ste_v1_set_counter_id(last_ste, attr->ctr_id);
+               allow_ctr = false;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L2]) {
+               if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_encap(last_ste, action,
+                                   attr->reformat.id,
+                                   attr->reformat.size);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+               allow_modify_hdr = false;
+       } else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
+               u8 *d_action;
+
+               if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+
+               d_action = action + DR_STE_ACTION_SINGLE_SZ;
+
+               dr_ste_v1_set_encap_l3(last_ste,
+                                      action, d_action,
+                                      attr->reformat.id,
+                                      attr->reformat.size);
+               action_sz -= DR_STE_ACTION_TRIPLE_SZ;
+               allow_modify_hdr = false;
+       } else if (action_type_set[DR_ACTION_TYP_INSERT_HDR]) {
+               /* Modify header, decap, and encap must use different STEs */
+               if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_insert_hdr(last_ste, action,
+                                        attr->reformat.id,
+                                        attr->reformat.param_0,
+                                        attr->reformat.param_1,
+                                        attr->reformat.size);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+               allow_modify_hdr = false;
+       } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) {
+               if (action_sz < DR_STE_ACTION_SINGLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+                       allow_modify_hdr = true;
+                       allow_ctr = true;
+               }
+               dr_ste_v1_set_remove_hdr(last_ste, action,
+                                        attr->reformat.param_0,
+                                        attr->reformat.param_1,
+                                        attr->reformat.size);
+               action_sz -= DR_STE_ACTION_SINGLE_SZ;
+               action += DR_STE_ACTION_SINGLE_SZ;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
+               if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
+                       dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+                       action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+                       action_sz = DR_STE_ACTION_TRIPLE_SZ;
+               }
+               dr_ste_v1_set_aso_flow_meter(action,
+                                            attr->aso_flow_meter.obj_id,
+                                            attr->aso_flow_meter.offset,
+                                            attr->aso_flow_meter.dest_reg_id,
+                                            attr->aso_flow_meter.init_color);
+               action_sz -= DR_STE_ACTION_DOUBLE_SZ;
+               action += DR_STE_ACTION_DOUBLE_SZ;
+       }
+
+       if (action_type_set[DR_ACTION_TYP_RANGE]) {
+               /* match ranges requires a new STE of its own type */
+               dr_ste_v1_arr_init_next_match_range(&last_ste, added_stes, attr->gvmi);
+               dr_ste_v1_set_miss_addr(last_ste, attr->range.miss_icm_addr);
+
+               /* we do not support setting any action on the match ranges STE */
+               action_sz = 0;
+
+               dr_ste_v1_set_match_range_pkt_len(last_ste,
+                                                 attr->range.definer_id,
+                                                 attr->range.min,
+                                                 attr->range.max);
+       }
+
+       dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
+       dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
+}
+
+void dr_ste_v1_set_action_set(u8 *d_action,
+                             u8 hw_field,
+                             u8 shifter,
+                             u8 length,
+                             u32 data)
+{
+       shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+       MLX5_SET(ste_double_action_set_v1, d_action, action_id, DR_STE_V1_ACTION_ID_SET);
+       MLX5_SET(ste_double_action_set_v1, d_action, destination_dw_offset, hw_field);
+       MLX5_SET(ste_double_action_set_v1, d_action, destination_left_shifter, shifter);
+       MLX5_SET(ste_double_action_set_v1, d_action, destination_length, length);
+       MLX5_SET(ste_double_action_set_v1, d_action, inline_data, data);
+}
+
+void dr_ste_v1_set_action_add(u8 *d_action,
+                             u8 hw_field,
+                             u8 shifter,
+                             u8 length,
+                             u32 data)
+{
+       shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+       MLX5_SET(ste_double_action_add_v1, d_action, action_id, DR_STE_V1_ACTION_ID_ADD);
+       MLX5_SET(ste_double_action_add_v1, d_action, destination_dw_offset, hw_field);
+       MLX5_SET(ste_double_action_add_v1, d_action, destination_left_shifter, shifter);
+       MLX5_SET(ste_double_action_add_v1, d_action, destination_length, length);
+       MLX5_SET(ste_double_action_add_v1, d_action, add_value, data);
+}
+
+void dr_ste_v1_set_action_copy(u8 *d_action,
+                              u8 dst_hw_field,
+                              u8 dst_shifter,
+                              u8 dst_len,
+                              u8 src_hw_field,
+                              u8 src_shifter)
+{
+       dst_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+       src_shifter += MLX5_MODIFY_HEADER_V1_QW_OFFSET;
+       MLX5_SET(ste_double_action_copy_v1, d_action, action_id, DR_STE_V1_ACTION_ID_COPY);
+       MLX5_SET(ste_double_action_copy_v1, d_action, destination_dw_offset, dst_hw_field);
+       MLX5_SET(ste_double_action_copy_v1, d_action, destination_left_shifter, dst_shifter);
+       MLX5_SET(ste_double_action_copy_v1, d_action, destination_length, dst_len);
+       MLX5_SET(ste_double_action_copy_v1, d_action, source_dw_offset, src_hw_field);
+       MLX5_SET(ste_double_action_copy_v1, d_action, source_right_shifter, src_shifter);
+}
+
+#define DR_STE_DECAP_L3_ACTION_NUM     8
+#define DR_STE_L2_HDR_MAX_SZ           20
+
+int dr_ste_v1_set_action_decap_l3_list(void *data,
+                                      u32 data_sz,
+                                      u8 *hw_action,
+                                      u32 hw_action_sz,
+                                      u16 *used_hw_action_num)
+{
+       u8 padded_data[DR_STE_L2_HDR_MAX_SZ] = {};
+       void *data_ptr = padded_data;
+       u16 used_actions = 0;
+       u32 inline_data_sz;
+       u32 i;
+
+       if (hw_action_sz / DR_STE_ACTION_DOUBLE_SZ < DR_STE_DECAP_L3_ACTION_NUM)
+               return -EINVAL;
+
+       inline_data_sz =
+               MLX5_FLD_SZ_BYTES(ste_double_action_insert_with_inline_v1, inline_data);
+
+       /* Add an alignment padding  */
+       memcpy(padded_data + data_sz % inline_data_sz, data, data_sz);
+
+       /* Remove L2L3 outer headers */
+       MLX5_SET(ste_single_action_remove_header_v1, hw_action, action_id,
+                DR_STE_V1_ACTION_ID_REMOVE_HEADER_TO_HEADER);
+       MLX5_SET(ste_single_action_remove_header_v1, hw_action, decap, 1);
+       MLX5_SET(ste_single_action_remove_header_v1, hw_action, vni_to_cqe, 1);
+       MLX5_SET(ste_single_action_remove_header_v1, hw_action, end_anchor,
+                DR_STE_HEADER_ANCHOR_INNER_IPV6_IPV4);
+       hw_action += DR_STE_ACTION_DOUBLE_SZ;
+       used_actions++; /* Remove and NOP are a single double action */
+
+       /* Point to the last dword of the header */
+       data_ptr += (data_sz / inline_data_sz) * inline_data_sz;
+
+       /* Add the new header using inline action 4Byte at a time, the header
+        * is added in reversed order to the beginning of the packet to avoid
+        * incorrect parsing by the HW. Since header is 14B or 18B an extra
+        * two bytes are padded and later removed.
+        */
+       for (i = 0; i < data_sz / inline_data_sz + 1; i++) {
+               void *addr_inline;
+
+               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, action_id,
+                        DR_STE_V1_ACTION_ID_INSERT_INLINE);
+               /* The hardware expects here offset to words (2 bytes) */
+               MLX5_SET(ste_double_action_insert_with_inline_v1, hw_action, start_offset, 0);
+
+               /* Copy bytes one by one to avoid endianness problem */
+               addr_inline = MLX5_ADDR_OF(ste_double_action_insert_with_inline_v1,
+                                          hw_action, inline_data);
+               memcpy(addr_inline, data_ptr - i * inline_data_sz, inline_data_sz);
+               hw_action += DR_STE_ACTION_DOUBLE_SZ;
+               used_actions++;
+       }
+
+       /* Remove first 2 extra bytes */
+       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, action_id,
+                DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE);
+       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, start_offset, 0);
+       /* The hardware expects here size in words (2 bytes) */
+       MLX5_SET(ste_single_action_remove_header_size_v1, hw_action, remove_size, 1);
+       used_actions++;
+
+       *used_hw_action_num = used_actions;
+
+       return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_dst_bit_mask(struct mlx5dr_match_param *value,
+                                                   bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_47_16, mask, smac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, smac_15_0, mask, smac_15_0);
+
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_vlan_id, mask, first_vid);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_cfi, mask, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, bit_mask, first_priority, mask, first_prio);
+       DR_STE_SET_ONES(eth_l2_src_dst_v1, bit_mask, l3_type, mask, ip_version);
+
+       if (mask->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
+               mask->cvlan_tag = 0;
+       } else if (mask->svlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst_v1, bit_mask, first_vlan_qualifier, -1);
+               mask->svlan_tag = 0;
+       }
+}
+
+static int dr_ste_v1_build_eth_l2_src_dst_tag(struct mlx5dr_match_param *value,
+                                             struct mlx5dr_ste_build *sb,
+                                             u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
+
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_47_16, spec, smac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, smac_15_0, spec, smac_15_0);
+
+       if (spec->ip_version == IP_VERSION_IPV4) {
+               MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV4);
+               spec->ip_version = 0;
+       } else if (spec->ip_version == IP_VERSION_IPV6) {
+               MLX5_SET(ste_eth_l2_src_dst_v1, tag, l3_type, STE_IPV6);
+               spec->ip_version = 0;
+       } else if (spec->ip_version) {
+               return -EINVAL;
+       }
+
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_vlan_id, spec, first_vid);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_cfi, spec, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src_dst_v1, tag, first_priority, spec, first_prio);
+
+       if (spec->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+               spec->cvlan_tag = 0;
+       } else if (spec->svlan_tag) {
+               MLX5_SET(ste_eth_l2_src_dst_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+               spec->svlan_tag = 0;
+       }
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l2_src_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC_DST, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_dst_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
+                                              struct mlx5dr_ste_build *sb,
+                                              u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_63_32, spec, dst_ip_63_32);
+       DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_31_0, spec, dst_ip_31_0);
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l3_ipv6_dst_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_DES, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_dst_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
+                                              struct mlx5dr_ste_build *sb,
+                                              u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_63_32, spec, src_ip_63_32);
+       DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_31_0, spec, src_ip_31_0);
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l3_ipv6_src_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(IPV6_SRC, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv6_src_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
+                                                  struct mlx5dr_ste_build *sb,
+                                                  u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_address, spec, dst_ip_31_0);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_address, spec, src_ip_31_0);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, tcp_dport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, destination_port, spec, udp_dport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, tcp_sport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, source_port, spec, udp_sport);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, protocol, spec, ip_protocol);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, dscp, spec, ip_dscp);
+       DR_STE_SET_TAG(eth_l3_ipv4_5_tuple_v1, tag, ecn, spec, ip_ecn);
+
+       if (spec->tcp_flags) {
+               DR_STE_SET_TCP_FLAGS(eth_l3_ipv4_5_tuple_v1, tag, spec);
+               spec->tcp_flags = 0;
+       }
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+                                             struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_5_TUPLE, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
+                                                      bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_vlan_id, mask, first_vid);
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_cfi, mask, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, first_priority, mask, first_prio);
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, ip_fragmented, mask, frag);
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, l3_ethertype, mask, ethertype);
+       DR_STE_SET_ONES(eth_l2_src_v1, bit_mask, l3_type, mask, ip_version);
+
+       if (mask->svlan_tag || mask->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src_v1, bit_mask, first_vlan_qualifier, -1);
+               mask->cvlan_tag = 0;
+               mask->svlan_tag = 0;
+       }
+
+       if (inner) {
+               if (misc_mask->inner_second_cvlan_tag ||
+                   misc_mask->inner_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
+                       misc_mask->inner_second_cvlan_tag = 0;
+                       misc_mask->inner_second_svlan_tag = 0;
+               }
+
+               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+                              second_vlan_id, misc_mask, inner_second_vid);
+               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+                              second_cfi, misc_mask, inner_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+                              second_priority, misc_mask, inner_second_prio);
+       } else {
+               if (misc_mask->outer_second_cvlan_tag ||
+                   misc_mask->outer_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src_v1, bit_mask, second_vlan_qualifier, -1);
+                       misc_mask->outer_second_cvlan_tag = 0;
+                       misc_mask->outer_second_svlan_tag = 0;
+               }
+
+               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+                              second_vlan_id, misc_mask, outer_second_vid);
+               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+                              second_cfi, misc_mask, outer_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src_v1, bit_mask,
+                              second_priority, misc_mask, outer_second_prio);
+       }
+}
+
+static int dr_ste_v1_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
+                                                bool inner, u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc_spec = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, first_vlan_id, spec, first_vid);
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, first_cfi, spec, first_cfi);
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, first_priority, spec, first_prio);
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, ip_fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, l3_ethertype, spec, ethertype);
+
+       if (spec->ip_version == IP_VERSION_IPV4) {
+               MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV4);
+               spec->ip_version = 0;
+       } else if (spec->ip_version == IP_VERSION_IPV6) {
+               MLX5_SET(ste_eth_l2_src_v1, tag, l3_type, STE_IPV6);
+               spec->ip_version = 0;
+       } else if (spec->ip_version) {
+               return -EINVAL;
+       }
+
+       if (spec->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+               spec->cvlan_tag = 0;
+       } else if (spec->svlan_tag) {
+               MLX5_SET(ste_eth_l2_src_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+               spec->svlan_tag = 0;
+       }
+
+       if (inner) {
+               if (misc_spec->inner_second_cvlan_tag) {
+                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
+                       misc_spec->inner_second_cvlan_tag = 0;
+               } else if (misc_spec->inner_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
+                       misc_spec->inner_second_svlan_tag = 0;
+               }
+
+               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, inner_second_vid);
+               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, inner_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, inner_second_prio);
+       } else {
+               if (misc_spec->outer_second_cvlan_tag) {
+                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_CVLAN);
+                       misc_spec->outer_second_cvlan_tag = 0;
+               } else if (misc_spec->outer_second_svlan_tag) {
+                       MLX5_SET(ste_eth_l2_src_v1, tag, second_vlan_qualifier, DR_STE_SVLAN);
+                       misc_spec->outer_second_svlan_tag = 0;
+               }
+               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_vlan_id, misc_spec, outer_second_vid);
+               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_cfi, misc_spec, outer_second_cfi);
+               DR_STE_SET_TAG(eth_l2_src_v1, tag, second_priority, misc_spec, outer_second_prio);
+       }
+
+       return 0;
+}
+
+static void dr_ste_v1_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
+                                               bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_47_16, mask, smac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_v1, bit_mask, smac_15_0, mask, smac_15_0);
+
+       dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_v1_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
+                                         struct mlx5dr_ste_build *sb,
+                                         u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_47_16, spec, smac_47_16);
+       DR_STE_SET_TAG(eth_l2_src_v1, tag, smac_15_0, spec, smac_15_0);
+
+       return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l2_src_bit_mask(mask, sb->inner, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2_SRC, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_src_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
+                                               bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_dst_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+
+       dr_ste_v1_build_eth_l2_src_or_dst_bit_mask(value, inner, bit_mask);
+}
+
+static int dr_ste_v1_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
+                                         struct mlx5dr_ste_build *sb,
+                                         u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_47_16, spec, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_dst_v1, tag, dmac_15_0, spec, dmac_15_0);
+
+       return dr_ste_v1_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
+}
+
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l2_dst_bit_mask(mask, sb->inner, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL2, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_dst_tag;
+}
+
+static void dr_ste_v1_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
+                                               bool inner, u8 *bit_mask)
+{
+       struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_47_16, mask, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, dmac_15_0, mask, dmac_15_0);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_vlan_id, mask, first_vid);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_cfi, mask, first_cfi);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, first_priority, mask, first_prio);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, ip_fragmented, mask, frag);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, bit_mask, l3_ethertype, mask, ethertype);
+       DR_STE_SET_ONES(eth_l2_tnl_v1, bit_mask, l3_type, mask, ip_version);
+
+       if (misc->vxlan_vni) {
+               MLX5_SET(ste_eth_l2_tnl_v1, bit_mask,
+                        l2_tunneling_network_id, (misc->vxlan_vni << 8));
+               misc->vxlan_vni = 0;
+       }
+
+       if (mask->svlan_tag || mask->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_tnl_v1, bit_mask, first_vlan_qualifier, -1);
+               mask->cvlan_tag = 0;
+               mask->svlan_tag = 0;
+       }
+}
+
+static int dr_ste_v1_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
+                                         struct mlx5dr_ste_build *sb,
+                                         u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_47_16, spec, dmac_47_16);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, dmac_15_0, spec, dmac_15_0);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_vlan_id, spec, first_vid);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_cfi, spec, first_cfi);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, ip_fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, first_priority, spec, first_prio);
+       DR_STE_SET_TAG(eth_l2_tnl_v1, tag, l3_ethertype, spec, ethertype);
+
+       if (misc->vxlan_vni) {
+               MLX5_SET(ste_eth_l2_tnl_v1, tag, l2_tunneling_network_id,
+                        (misc->vxlan_vni << 8));
+               misc->vxlan_vni = 0;
+       }
+
+       if (spec->cvlan_tag) {
+               MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_CVLAN);
+               spec->cvlan_tag = 0;
+       } else if (spec->svlan_tag) {
+               MLX5_SET(ste_eth_l2_tnl_v1, tag, first_vlan_qualifier, DR_STE_SVLAN);
+               spec->svlan_tag = 0;
+       }
+
+       if (spec->ip_version == IP_VERSION_IPV4) {
+               MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV4);
+               spec->ip_version = 0;
+       } else if (spec->ip_version == IP_VERSION_IPV6) {
+               MLX5_SET(ste_eth_l2_tnl_v1, tag, l3_type, STE_IPV6);
+               spec->ip_version = 0;
+       } else if (spec->ip_version) {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l2_tnl_bit_mask(mask, sb->inner, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_ETHL2_TNL;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l2_tnl_tag;
+}
+
+static int dr_ste_v1_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
+                                               struct mlx5dr_ste_build *sb,
+                                               u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+
+       DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, time_to_live, spec, ttl_hoplimit);
+       DR_STE_SET_TAG(eth_l3_ipv4_misc_v1, tag, ihl, spec, ipv4_ihl);
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+                                          struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l3_ipv4_misc_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL3_IPV4_MISC, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l3_ipv4_misc_tag;
+}
+
+static int dr_ste_v1_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
+                                             struct mlx5dr_ste_build *sb,
+                                             u8 *tag)
+{
+       struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, tcp_dport);
+       DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, tcp_sport);
+       DR_STE_SET_TAG(eth_l4_v1, tag, dst_port, spec, udp_dport);
+       DR_STE_SET_TAG(eth_l4_v1, tag, src_port, spec, udp_sport);
+       DR_STE_SET_TAG(eth_l4_v1, tag, protocol, spec, ip_protocol);
+       DR_STE_SET_TAG(eth_l4_v1, tag, fragmented, spec, frag);
+       DR_STE_SET_TAG(eth_l4_v1, tag, dscp, spec, ip_dscp);
+       DR_STE_SET_TAG(eth_l4_v1, tag, ecn, spec, ip_ecn);
+       DR_STE_SET_TAG(eth_l4_v1, tag, ipv6_hop_limit, spec, ttl_hoplimit);
+
+       if (sb->inner)
+               DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, inner_ipv6_flow_label);
+       else
+               DR_STE_SET_TAG(eth_l4_v1, tag, flow_label, misc, outer_ipv6_flow_label);
+
+       if (spec->tcp_flags) {
+               DR_STE_SET_TCP_FLAGS(eth_l4_v1, tag, spec);
+               spec->tcp_flags = 0;
+       }
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_ipv6_l3_l4_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(ETHL4, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_ipv6_l3_l4_tag;
+}
+
+static int dr_ste_v1_build_mpls_tag(struct mlx5dr_match_param *value,
+                                   struct mlx5dr_ste_build *sb,
+                                   u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       if (sb->inner)
+               DR_STE_SET_MPLS(mpls_v1, misc2, inner, tag);
+       else
+               DR_STE_SET_MPLS(mpls_v1, misc2, outer, tag);
+
+       return 0;
+}
+
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_mpls_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_CALC_DFNR_TYPE(MPLS, sb->inner);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_mpls_tag;
+}
+
+static int dr_ste_v1_build_tnl_gre_tag(struct mlx5dr_match_param *value,
+                                      struct mlx5dr_ste_build *sb,
+                                      u8 *tag)
+{
+       struct  mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(gre_v1, tag, gre_protocol, misc, gre_protocol);
+       DR_STE_SET_TAG(gre_v1, tag, gre_k_present, misc, gre_k_present);
+       DR_STE_SET_TAG(gre_v1, tag, gre_key_h, misc, gre_key_h);
+       DR_STE_SET_TAG(gre_v1, tag, gre_key_l, misc, gre_key_l);
+
+       DR_STE_SET_TAG(gre_v1, tag, gre_c_present, misc, gre_c_present);
+       DR_STE_SET_TAG(gre_v1, tag, gre_s_present, misc, gre_s_present);
+
+       return 0;
+}
+
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_tnl_gre_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_GRE;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gre_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_tag(struct mlx5dr_match_param *value,
+                                       struct mlx5dr_ste_build *sb,
+                                       u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc2)) {
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
+                              misc2, outer_first_mpls_over_gre_label);
+
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
+                              misc2, outer_first_mpls_over_gre_exp);
+
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
+                              misc2, outer_first_mpls_over_gre_s_bos);
+
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
+                              misc2, outer_first_mpls_over_gre_ttl);
+       } else {
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_label,
+                              misc2, outer_first_mpls_over_udp_label);
+
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_exp,
+                              misc2, outer_first_mpls_over_udp_exp);
+
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_s_bos,
+                              misc2, outer_first_mpls_over_udp_s_bos);
+
+               DR_STE_SET_TAG(mpls_v1, tag, mpls0_ttl,
+                              misc2, outer_first_mpls_over_udp_ttl);
+       }
+
+       return 0;
+}
+
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+                                  struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_tnl_mpls_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_MPLS_I;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_over_udp_tag(struct mlx5dr_match_param *value,
+                                                struct mlx5dr_ste_build *sb,
+                                                u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+       u8 *parser_ptr;
+       u8 parser_id;
+       u32 mpls_hdr;
+
+       mpls_hdr = misc2->outer_first_mpls_over_udp_label << HDR_MPLS_OFFSET_LABEL;
+       misc2->outer_first_mpls_over_udp_label = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_udp_exp << HDR_MPLS_OFFSET_EXP;
+       misc2->outer_first_mpls_over_udp_exp = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_udp_s_bos << HDR_MPLS_OFFSET_S_BOS;
+       misc2->outer_first_mpls_over_udp_s_bos = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_udp_ttl << HDR_MPLS_OFFSET_TTL;
+       misc2->outer_first_mpls_over_udp_ttl = 0;
+
+       parser_id = sb->caps->flex_parser_id_mpls_over_udp;
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+       return 0;
+}
+
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_tnl_mpls_over_udp_tag(mask, sb, sb->bit_mask);
+
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       sb->lu_type = sb->caps->flex_parser_id_mpls_over_udp > DR_STE_MAX_FLEX_0_ID ?
+                     DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+                     DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_udp_tag;
+}
+
+static int dr_ste_v1_build_tnl_mpls_over_gre_tag(struct mlx5dr_match_param *value,
+                                                struct mlx5dr_ste_build *sb,
+                                                u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+       u8 *parser_ptr;
+       u8 parser_id;
+       u32 mpls_hdr;
+
+       mpls_hdr = misc2->outer_first_mpls_over_gre_label << HDR_MPLS_OFFSET_LABEL;
+       misc2->outer_first_mpls_over_gre_label = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_gre_exp << HDR_MPLS_OFFSET_EXP;
+       misc2->outer_first_mpls_over_gre_exp = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_gre_s_bos << HDR_MPLS_OFFSET_S_BOS;
+       misc2->outer_first_mpls_over_gre_s_bos = 0;
+       mpls_hdr |= misc2->outer_first_mpls_over_gre_ttl << HDR_MPLS_OFFSET_TTL;
+       misc2->outer_first_mpls_over_gre_ttl = 0;
+
+       parser_id = sb->caps->flex_parser_id_mpls_over_gre;
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+       *(__be32 *)parser_ptr = cpu_to_be32(mpls_hdr);
+
+       return 0;
+}
+
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_tnl_mpls_over_gre_tag(mask, sb, sb->bit_mask);
+
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       sb->lu_type = sb->caps->flex_parser_id_mpls_over_gre > DR_STE_MAX_FLEX_0_ID ?
+                     DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+                     DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_mpls_over_gre_tag;
+}
+
+static int dr_ste_v1_build_icmp_tag(struct mlx5dr_match_param *value,
+                                   struct mlx5dr_ste_build *sb,
+                                   u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+       bool is_ipv4 = DR_MASK_IS_ICMPV4_SET(misc3);
+       u32 *icmp_header_data;
+       u8 *icmp_type;
+       u8 *icmp_code;
+
+       if (is_ipv4) {
+               icmp_header_data        = &misc3->icmpv4_header_data;
+               icmp_type               = &misc3->icmpv4_type;
+               icmp_code               = &misc3->icmpv4_code;
+       } else {
+               icmp_header_data        = &misc3->icmpv6_header_data;
+               icmp_type               = &misc3->icmpv6_type;
+               icmp_code               = &misc3->icmpv6_code;
+       }
+
+       MLX5_SET(ste_icmp_v1, tag, icmp_header_data, *icmp_header_data);
+       MLX5_SET(ste_icmp_v1, tag, icmp_type, *icmp_type);
+       MLX5_SET(ste_icmp_v1, tag, icmp_code, *icmp_code);
+
+       *icmp_header_data = 0;
+       *icmp_type = 0;
+       *icmp_code = 0;
+
+       return 0;
+}
+
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_icmp_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_icmp_tag;
+}
+
+static int dr_ste_v1_build_general_purpose_tag(struct mlx5dr_match_param *value,
+                                              struct mlx5dr_ste_build *sb,
+                                              u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
+                      misc2, metadata_reg_a);
+
+       return 0;
+}
+
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_general_purpose_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_GENERAL_PURPOSE;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_general_purpose_tag;
+}
+
+static int dr_ste_v1_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+       if (sb->inner) {
+               DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, inner_tcp_seq_num);
+               DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, inner_tcp_ack_num);
+       } else {
+               DR_STE_SET_TAG(eth_l4_misc_v1, tag, seq_num, misc3, outer_tcp_seq_num);
+               DR_STE_SET_TAG(eth_l4_misc_v1, tag, ack_num, misc3, outer_tcp_ack_num);
+       }
+
+       return 0;
+}
+
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_eth_l4_misc_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_ETHL4_MISC_O;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_eth_l4_misc_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
+                                             struct mlx5dr_ste_build *sb,
+                                             u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+                      outer_vxlan_gpe_flags, misc3,
+                      outer_vxlan_gpe_flags);
+       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+                      outer_vxlan_gpe_next_protocol, misc3,
+                      outer_vxlan_gpe_next_protocol);
+       DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
+                      outer_vxlan_gpe_vni, misc3,
+                      outer_vxlan_gpe_vni);
+
+       return 0;
+}
+
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+                                                   struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_protocol_type, misc, geneve_protocol_type);
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_oam, misc, geneve_oam);
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_opt_len, misc, geneve_opt_len);
+       DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
+                      geneve_vni, misc, geneve_vni);
+
+       return 0;
+}
+
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_flex_parser_tnl_geneve_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tag;
+}
+
+static int dr_ste_v1_build_tnl_header_0_1_tag(struct mlx5dr_match_param *value,
+                                             struct mlx5dr_ste_build *sb,
+                                             u8 *tag)
+{
+       struct mlx5dr_match_misc5 *misc5 = &value->misc5;
+
+       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_0, misc5, tunnel_header_0);
+       DR_STE_SET_TAG(tunnel_header, tag, tunnel_header_1, misc5, tunnel_header_1);
+
+       return 0;
+}
+
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       dr_ste_v1_build_tnl_header_0_1_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_header_0_1_tag;
+}
+
+static int dr_ste_v1_build_register_0_tag(struct mlx5dr_match_param *value,
+                                         struct mlx5dr_ste_build *sb,
+                                         u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
+       DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
+       DR_STE_SET_TAG(register_0, tag, register_1_h, misc2, metadata_reg_c_2);
+       DR_STE_SET_TAG(register_0, tag, register_1_l, misc2, metadata_reg_c_3);
+
+       return 0;
+}
+
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_register_0_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_0;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_register_0_tag;
+}
+
+static int dr_ste_v1_build_register_1_tag(struct mlx5dr_match_param *value,
+                                         struct mlx5dr_ste_build *sb,
+                                         u8 *tag)
+{
+       struct mlx5dr_match_misc2 *misc2 = &value->misc2;
+
+       DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
+       DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
+       DR_STE_SET_TAG(register_1, tag, register_3_h, misc2, metadata_reg_c_6);
+       DR_STE_SET_TAG(register_1, tag, register_3_l, misc2, metadata_reg_c_7);
+
+       return 0;
+}
+
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_register_1_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_STEERING_REGISTERS_1;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_register_1_tag;
+}
+
+static void dr_ste_v1_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
+                                                 u8 *bit_mask)
+{
+       struct mlx5dr_match_misc *misc_mask = &value->misc;
+
+       DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_gvmi, misc_mask, source_port);
+       DR_STE_SET_ONES(src_gvmi_qp_v1, bit_mask, source_qp, misc_mask, source_sqn);
+       misc_mask->source_eswitch_owner_vhca_id = 0;
+}
+
+static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
+                                           struct mlx5dr_ste_build *sb,
+                                           u8 *tag)
+{
+       struct mlx5dr_match_misc *misc = &value->misc;
+       int id = misc->source_eswitch_owner_vhca_id;
+       struct mlx5dr_cmd_vport_cap *vport_cap;
+       struct mlx5dr_domain *dmn = sb->dmn;
+       struct mlx5dr_domain *vport_dmn;
+       u8 *bit_mask = sb->bit_mask;
+       struct mlx5dr_domain *peer;
+
+       DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
+
+       if (sb->vhca_id_valid) {
+               peer = xa_load(&dmn->peer_dmn_xa, id);
+               /* Find port GVMI based on the eswitch_owner_vhca_id */
+               if (id == dmn->info.caps.gvmi)
+                       vport_dmn = dmn;
+               else if (peer && (id == peer->info.caps.gvmi))
+                       vport_dmn = peer;
+               else
+                       return -EINVAL;
+
+               misc->source_eswitch_owner_vhca_id = 0;
+       } else {
+               vport_dmn = dmn;
+       }
+
+       if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
+               return 0;
+
+       vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
+       if (!vport_cap) {
+               mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
+                          misc->source_port);
+               return -EINVAL;
+       }
+
+       if (vport_cap->vport_gvmi)
+               MLX5_SET(ste_src_gvmi_qp_v1, tag, source_gvmi, vport_cap->vport_gvmi);
+
+       misc->source_port = 0;
+       return 0;
+}
+
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_SRC_QP_GVMI;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_src_gvmi_qpn_tag;
+}
+
+static void dr_ste_v1_set_flex_parser(u32 *misc4_field_id,
+                                     u32 *misc4_field_value,
+                                     bool *parser_is_used,
+                                     u8 *tag)
+{
+       u32 id = *misc4_field_id;
+       u8 *parser_ptr;
+
+       if (id >= DR_NUM_OF_FLEX_PARSERS || parser_is_used[id])
+               return;
+
+       parser_is_used[id] = true;
+       parser_ptr = dr_ste_calc_flex_parser_offset(tag, id);
+
+       *(__be32 *)parser_ptr = cpu_to_be32(*misc4_field_value);
+       *misc4_field_id = 0;
+       *misc4_field_value = 0;
+}
+
+static int dr_ste_v1_build_felx_parser_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       struct mlx5dr_match_misc4 *misc_4_mask = &value->misc4;
+       bool parser_is_used[DR_NUM_OF_FLEX_PARSERS] = {};
+
+       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_0,
+                                 &misc_4_mask->prog_sample_field_value_0,
+                                 parser_is_used, tag);
+
+       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_1,
+                                 &misc_4_mask->prog_sample_field_value_1,
+                                 parser_is_used, tag);
+
+       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_2,
+                                 &misc_4_mask->prog_sample_field_value_2,
+                                 parser_is_used, tag);
+
+       dr_ste_v1_set_flex_parser(&misc_4_mask->prog_sample_field_id_3,
+                                 &misc_4_mask->prog_sample_field_value_3,
+                                 parser_is_used, tag);
+
+       return 0;
+}
+
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+       dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
+}
+
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
+       dr_ste_v1_build_felx_parser_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_felx_parser_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(struct mlx5dr_match_param *value,
+                                                  struct mlx5dr_ste_build *sb,
+                                                  u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+       u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+       u8 *parser_ptr = dr_ste_calc_flex_parser_offset(tag, parser_id);
+
+       MLX5_SET(ste_flex_parser_0, parser_ptr, flex_parser_3,
+                misc3->geneve_tlv_option_0_data);
+       misc3->geneve_tlv_option_0_data = 0;
+
+       return 0;
+}
+
+void
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+                                                   struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag(mask, sb, sb->bit_mask);
+
+       /* STEs with lookup type FLEX_PARSER_{0/1} includes
+        * flex parsers_{0-3}/{4-7} respectively.
+        */
+       sb->lu_type = sb->caps->flex_parser_id_geneve_tlv_option_0 > 3 ?
+                     DR_STE_V1_LU_TYPE_FLEX_PARSER_1 :
+                     DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_tag;
+}
+
+static int
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(struct mlx5dr_match_param *value,
+                                                        struct mlx5dr_ste_build *sb,
+                                                        u8 *tag)
+{
+       u8 parser_id = sb->caps->flex_parser_id_geneve_tlv_option_0;
+       struct mlx5dr_match_misc *misc = &value->misc;
+
+       if (misc->geneve_tlv_option_0_exist) {
+               MLX5_SET(ste_flex_parser_ok, tag, flex_parsers_ok, 1 << parser_id);
+               misc->geneve_tlv_option_0_exist = 0;
+       }
+
+       return 0;
+}
+
+void
+dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+                                                         struct mlx5dr_match_param *mask)
+{
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_OK;
+       dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag(mask, sb, sb->bit_mask);
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_tag;
+}
+
+static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value,
+                                                   struct mlx5dr_ste_build *sb,
+                                                   u8 *tag)
+{
+       struct mlx5dr_match_misc3 *misc3 = &value->misc3;
+
+       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_flags, misc3, gtpu_msg_flags);
+       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_msg_type, misc3, gtpu_msg_type);
+       DR_STE_SET_TAG(flex_parser_tnl_gtpu, tag, gtpu_teid, misc3, gtpu_teid);
+
+       return 0;
+}
+
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_flex_parser_tnl_gtpu_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_TNL_HEADER;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_flex_parser_tnl_gtpu_tag;
+}
+
+static int
+dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_teid))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_2))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+       return 0;
+}
+
+void
+dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_0;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag;
+}
+
+static int
+dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value,
+                                          struct mlx5dr_ste_build *sb,
+                                          u8 *tag)
+{
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_teid))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_teid, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_2))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_2, sb->caps, &value->misc3);
+       if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_first_ext_dw_0))
+               DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_first_ext_dw_0, sb->caps, &value->misc3);
+       return 0;
+}
+
+void
+dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask)
+{
+       dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(mask, sb, sb->bit_mask);
+
+       sb->lu_type = DR_STE_V1_LU_TYPE_FLEX_PARSER_1;
+       sb->byte_mask = mlx5dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
+       sb->ste_build_tag_func = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag;
+}
+
+int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
+{
+       struct mlx5dr_ptrn_mgr *ptrn_mgr;
+       int ret;
+
+       ptrn_mgr = action->rewrite->dmn->ptrn_mgr;
+       if (!ptrn_mgr)
+               return -EOPNOTSUPP;
+
+       action->rewrite->arg = mlx5dr_arg_get_obj(action->rewrite->dmn->arg_mgr,
+                                                 action->rewrite->num_of_actions,
+                                                 action->rewrite->data);
+       if (!action->rewrite->arg) {
+               mlx5dr_err(action->rewrite->dmn, "Failed allocating args for modify header\n");
+               return -EAGAIN;
+       }
+
+       action->rewrite->ptrn =
+               mlx5dr_ptrn_cache_get_pattern(ptrn_mgr,
+                                             action->rewrite->num_of_actions,
+                                             action->rewrite->data);
+       if (!action->rewrite->ptrn) {
+               mlx5dr_err(action->rewrite->dmn, "Failed to get pattern\n");
+               ret = -EAGAIN;
+               goto put_arg;
+       }
+
+       return 0;
+
+put_arg:
+       mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
+                          action->rewrite->arg);
+       return ret;
+}
+
+void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action)
+{
+       mlx5dr_ptrn_cache_put_pattern(action->rewrite->dmn->ptrn_mgr,
+                                     action->rewrite->ptrn);
+       mlx5dr_arg_put_obj(action->rewrite->dmn->arg_mgr,
+                          action->rewrite->arg);
+}
+
+static struct mlx5dr_ste_ctx ste_ctx_v1 = {
+       /* Builders */
+       .build_eth_l2_src_dst_init      = &dr_ste_v1_build_eth_l2_src_dst_init,
+       .build_eth_l3_ipv6_src_init     = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+       .build_eth_l3_ipv6_dst_init     = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+       .build_eth_l2_src_init          = &dr_ste_v1_build_eth_l2_src_init,
+       .build_eth_l2_dst_init          = &dr_ste_v1_build_eth_l2_dst_init,
+       .build_eth_l2_tnl_init          = &dr_ste_v1_build_eth_l2_tnl_init,
+       .build_eth_l3_ipv4_misc_init    = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+       .build_eth_ipv6_l3_l4_init      = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+       .build_mpls_init                = &dr_ste_v1_build_mpls_init,
+       .build_tnl_gre_init             = &dr_ste_v1_build_tnl_gre_init,
+       .build_tnl_mpls_init            = &dr_ste_v1_build_tnl_mpls_init,
+       .build_tnl_mpls_over_udp_init   = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+       .build_tnl_mpls_over_gre_init   = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+       .build_icmp_init                = &dr_ste_v1_build_icmp_init,
+       .build_general_purpose_init     = &dr_ste_v1_build_general_purpose_init,
+       .build_eth_l4_misc_init         = &dr_ste_v1_build_eth_l4_misc_init,
+       .build_tnl_vxlan_gpe_init       = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+       .build_tnl_geneve_init          = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+       .build_tnl_geneve_tlv_opt_exist_init = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+       .build_register_0_init          = &dr_ste_v1_build_register_0_init,
+       .build_register_1_init          = &dr_ste_v1_build_register_1_init,
+       .build_src_gvmi_qpn_init        = &dr_ste_v1_build_src_gvmi_qpn_init,
+       .build_flex_parser_0_init       = &dr_ste_v1_build_flex_parser_0_init,
+       .build_flex_parser_1_init       = &dr_ste_v1_build_flex_parser_1_init,
+       .build_tnl_gtpu_init            = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+       .build_tnl_header_0_1_init      = &dr_ste_v1_build_tnl_header_0_1_init,
+       .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+       .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+       /* Getters and Setters */
+       .ste_init                       = &dr_ste_v1_init,
+       .set_next_lu_type               = &dr_ste_v1_set_next_lu_type,
+       .get_next_lu_type               = &dr_ste_v1_get_next_lu_type,
+       .is_miss_addr_set               = &dr_ste_v1_is_miss_addr_set,
+       .set_miss_addr                  = &dr_ste_v1_set_miss_addr,
+       .get_miss_addr                  = &dr_ste_v1_get_miss_addr,
+       .set_hit_addr                   = &dr_ste_v1_set_hit_addr,
+       .set_byte_mask                  = &dr_ste_v1_set_byte_mask,
+       .get_byte_mask                  = &dr_ste_v1_get_byte_mask,
+       /* Actions */
+       .actions_caps                   = DR_STE_CTX_ACTION_CAP_TX_POP |
+                                         DR_STE_CTX_ACTION_CAP_RX_PUSH |
+                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP |
+                                         DR_STE_CTX_ACTION_CAP_POP_MDFY,
+       .set_actions_rx                 = &dr_ste_v1_set_actions_rx,
+       .set_actions_tx                 = &dr_ste_v1_set_actions_tx,
+       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr),
+       .modify_field_arr               = dr_ste_v1_action_modify_field_arr,
+       .set_action_set                 = &dr_ste_v1_set_action_set,
+       .set_action_add                 = &dr_ste_v1_set_action_add,
+       .set_action_copy                = &dr_ste_v1_set_action_copy,
+       .set_action_decap_l3_list       = &dr_ste_v1_set_action_decap_l3_list,
+       .alloc_modify_hdr_chunk         = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+       .dealloc_modify_hdr_chunk       = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+
+       /* Send */
+       .prepare_for_postsend           = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v1(void)
+{
+       return &ste_ctx_v1;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v1.h
new file mode 100644 (file)
index 0000000..e2fc698
--- /dev/null
@@ -0,0 +1,97 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef        _DR_STE_V1_
+#define        _DR_STE_V1_
+
+#include "dr_types.h"
+#include "dr_ste.h"
+
+bool dr_ste_v1_is_miss_addr_set(u8 *hw_ste_p);
+void dr_ste_v1_set_miss_addr(u8 *hw_ste_p, u64 miss_addr);
+u64 dr_ste_v1_get_miss_addr(u8 *hw_ste_p);
+void dr_ste_v1_set_byte_mask(u8 *hw_ste_p, u16 byte_mask);
+u16 dr_ste_v1_get_byte_mask(u8 *hw_ste_p);
+void dr_ste_v1_set_next_lu_type(u8 *hw_ste_p, u16 lu_type);
+u16 dr_ste_v1_get_next_lu_type(u8 *hw_ste_p);
+void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size);
+void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, bool is_rx, u16 gvmi);
+void dr_ste_v1_prepare_for_postsend(u8 *hw_ste_p, u32 ste_size);
+void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+                             u32 actions_caps, u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, u8 *action_type_set,
+                             u32 actions_caps, u8 *last_ste,
+                             struct mlx5dr_ste_actions_attr *attr, u32 *added_stes);
+void dr_ste_v1_set_action_set(u8 *d_action, u8 hw_field, u8 shifter,
+                             u8 length, u32 data);
+void dr_ste_v1_set_action_add(u8 *d_action, u8 hw_field, u8 shifter,
+                             u8 length, u32 data);
+void dr_ste_v1_set_action_copy(u8 *d_action, u8 dst_hw_field, u8 dst_shifter,
+                              u8 dst_len, u8 src_hw_field, u8 src_shifter);
+int dr_ste_v1_set_action_decap_l3_list(void *data, u32 data_sz, u8 *hw_action,
+                                      u32 hw_action_sz, u16 *used_hw_action_num);
+int dr_ste_v1_alloc_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_free_modify_hdr_ptrn_arg(struct mlx5dr_action *action);
+void dr_ste_v1_build_eth_l2_src_dst_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_dst_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv6_src_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_5_tuple_init(struct mlx5dr_ste_build *sb,
+                                             struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_src_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_dst_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l2_tnl_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l3_ipv4_misc_init(struct mlx5dr_ste_build *sb,
+                                          struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_ipv6_l3_l4_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_mpls_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gre_init(struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_init(struct mlx5dr_ste_build *sb,
+                                  struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_udp_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_mpls_over_gre_init(struct mlx5dr_ste_build *sb,
+                                           struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_icmp_init(struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_general_purpose_init(struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_eth_l4_misc_init(struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init(struct mlx5dr_ste_build *sb,
+                                                   struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_header_0_1_init(struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_0_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_register_1_init(struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_src_gvmi_qpn_init(struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb,
+                                                        struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init(struct mlx5dr_ste_build *sb,
+                                                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask);
+void dr_ste_v1_build_tnl_gtpu_flex_parser_1_init(struct mlx5dr_ste_build *sb,
+                                                struct mlx5dr_match_param *mask);
+
+#endif  /* _DR_STE_V1_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_ste_v2.c
new file mode 100644 (file)
index 0000000..808b013
--- /dev/null
@@ -0,0 +1,234 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include "dr_ste_v1.h"
+
+enum {
+       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0              = 0x00,
+       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1              = 0x01,
+       DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2              = 0x02,
+       DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0          = 0x08,
+       DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1          = 0x09,
+       DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0              = 0x0e,
+       DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0              = 0x18,
+       DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1              = 0x19,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0            = 0x40,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1            = 0x41,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0        = 0x44,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1        = 0x45,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2        = 0x46,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3        = 0x47,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0        = 0x4c,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1        = 0x4d,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2        = 0x4e,
+       DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3        = 0x4f,
+       DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0            = 0x5e,
+       DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1            = 0x5f,
+       DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0           = 0x6f,
+       DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1           = 0x70,
+       DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE        = 0x7b,
+       DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE          = 0x7c,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0          = 0x90,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1          = 0x91,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0          = 0x92,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1          = 0x93,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0          = 0x94,
+       DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1          = 0x95,
+};
+
+static const struct mlx5dr_ste_action_modify_field dr_ste_v2_action_modify_field_arr[] = {
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_SRC_L2_OUT_1, .start = 16, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_1, .start = 16, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_DSCP] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 18, .end = 23,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_1, .start = 16, .end = 24,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_TCP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IP_TTL] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_IPV6_HOPLIMIT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L3_OUT_0, .start = 8, .end = 15,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 16, .end = 31,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L4_OUT_0, .start = 0, .end = 15,
+               .l4_type = DR_STE_ACTION_MDFY_TYPE_L4_UDP,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_SRC_OUT_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_2, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV6_DST_OUT_3, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV6,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_SIPV4] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_0, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_DIPV4] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_IPV4_OUT_1, .start = 0, .end = 31,
+               .l3_type = DR_STE_ACTION_MDFY_TYPE_L3_IPV4,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_A] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_GNRL_PURPOSE, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_B] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_METADATA_2_CQE, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_1] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_0_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_2] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_3] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_1_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_4] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_METADATA_REG_C_5] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_REGISTER_2_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_0, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_TCP_MISC_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_FIRST_VID] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_L2_OUT_2, .start = 0, .end = 15,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_EMD_31_0] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_1, .start = 0, .end = 31,
+       },
+       [MLX5_ACTION_IN_FIELD_OUT_EMD_47_32] = {
+               .hw_field = DR_STE_V2_ACTION_MDFY_FLD_CFG_HDR_0_0, .start = 0, .end = 15,
+       },
+};
+
+static struct mlx5dr_ste_ctx ste_ctx_v2 = {
+       /* Builders */
+       .build_eth_l2_src_dst_init      = &dr_ste_v1_build_eth_l2_src_dst_init,
+       .build_eth_l3_ipv6_src_init     = &dr_ste_v1_build_eth_l3_ipv6_src_init,
+       .build_eth_l3_ipv6_dst_init     = &dr_ste_v1_build_eth_l3_ipv6_dst_init,
+       .build_eth_l3_ipv4_5_tuple_init = &dr_ste_v1_build_eth_l3_ipv4_5_tuple_init,
+       .build_eth_l2_src_init          = &dr_ste_v1_build_eth_l2_src_init,
+       .build_eth_l2_dst_init          = &dr_ste_v1_build_eth_l2_dst_init,
+       .build_eth_l2_tnl_init          = &dr_ste_v1_build_eth_l2_tnl_init,
+       .build_eth_l3_ipv4_misc_init    = &dr_ste_v1_build_eth_l3_ipv4_misc_init,
+       .build_eth_ipv6_l3_l4_init      = &dr_ste_v1_build_eth_ipv6_l3_l4_init,
+       .build_mpls_init                = &dr_ste_v1_build_mpls_init,
+       .build_tnl_gre_init             = &dr_ste_v1_build_tnl_gre_init,
+       .build_tnl_mpls_init            = &dr_ste_v1_build_tnl_mpls_init,
+       .build_tnl_mpls_over_udp_init   = &dr_ste_v1_build_tnl_mpls_over_udp_init,
+       .build_tnl_mpls_over_gre_init   = &dr_ste_v1_build_tnl_mpls_over_gre_init,
+       .build_icmp_init                = &dr_ste_v1_build_icmp_init,
+       .build_general_purpose_init     = &dr_ste_v1_build_general_purpose_init,
+       .build_eth_l4_misc_init         = &dr_ste_v1_build_eth_l4_misc_init,
+       .build_tnl_vxlan_gpe_init       = &dr_ste_v1_build_flex_parser_tnl_vxlan_gpe_init,
+       .build_tnl_geneve_init          = &dr_ste_v1_build_flex_parser_tnl_geneve_init,
+       .build_tnl_geneve_tlv_opt_init  = &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init,
+       .build_tnl_geneve_tlv_opt_exist_init =
+                                 &dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_exist_init,
+       .build_register_0_init          = &dr_ste_v1_build_register_0_init,
+       .build_register_1_init          = &dr_ste_v1_build_register_1_init,
+       .build_src_gvmi_qpn_init        = &dr_ste_v1_build_src_gvmi_qpn_init,
+       .build_flex_parser_0_init       = &dr_ste_v1_build_flex_parser_0_init,
+       .build_flex_parser_1_init       = &dr_ste_v1_build_flex_parser_1_init,
+       .build_tnl_gtpu_init            = &dr_ste_v1_build_flex_parser_tnl_gtpu_init,
+       .build_tnl_header_0_1_init      = &dr_ste_v1_build_tnl_header_0_1_init,
+       .build_tnl_gtpu_flex_parser_0_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_0_init,
+       .build_tnl_gtpu_flex_parser_1_init = &dr_ste_v1_build_tnl_gtpu_flex_parser_1_init,
+
+       /* Getters and Setters */
+       .ste_init                       = &dr_ste_v1_init,
+       .set_next_lu_type               = &dr_ste_v1_set_next_lu_type,
+       .get_next_lu_type               = &dr_ste_v1_get_next_lu_type,
+       .is_miss_addr_set               = &dr_ste_v1_is_miss_addr_set,
+       .set_miss_addr                  = &dr_ste_v1_set_miss_addr,
+       .get_miss_addr                  = &dr_ste_v1_get_miss_addr,
+       .set_hit_addr                   = &dr_ste_v1_set_hit_addr,
+       .set_byte_mask                  = &dr_ste_v1_set_byte_mask,
+       .get_byte_mask                  = &dr_ste_v1_get_byte_mask,
+
+       /* Actions */
+       .actions_caps                   = DR_STE_CTX_ACTION_CAP_TX_POP |
+                                         DR_STE_CTX_ACTION_CAP_RX_PUSH |
+                                         DR_STE_CTX_ACTION_CAP_RX_ENCAP,
+       .set_actions_rx                 = &dr_ste_v1_set_actions_rx,
+       .set_actions_tx                 = &dr_ste_v1_set_actions_tx,
+       .modify_field_arr_sz            = ARRAY_SIZE(dr_ste_v2_action_modify_field_arr),
+       .modify_field_arr               = dr_ste_v2_action_modify_field_arr,
+       .set_action_set                 = &dr_ste_v1_set_action_set,
+       .set_action_add                 = &dr_ste_v1_set_action_add,
+       .set_action_copy                = &dr_ste_v1_set_action_copy,
+       .set_action_decap_l3_list       = &dr_ste_v1_set_action_decap_l3_list,
+       .alloc_modify_hdr_chunk         = &dr_ste_v1_alloc_modify_hdr_ptrn_arg,
+       .dealloc_modify_hdr_chunk       = &dr_ste_v1_free_modify_hdr_ptrn_arg,
+
+       /* Send */
+       .prepare_for_postsend           = &dr_ste_v1_prepare_for_postsend,
+};
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx_v2(void)
+{
+       return &ste_ctx_v2;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_table.c
new file mode 100644 (file)
index 0000000..69294a6
--- /dev/null
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies. */
+
+#include "dr_types.h"
+
+static int dr_table_set_miss_action_nic(struct mlx5dr_domain *dmn,
+                                       struct mlx5dr_table_rx_tx *nic_tbl,
+                                       struct mlx5dr_action *action)
+{
+       struct mlx5dr_matcher_rx_tx *last_nic_matcher = NULL;
+       struct mlx5dr_htbl_connect_info info;
+       struct mlx5dr_ste_htbl *last_htbl;
+       struct mlx5dr_icm_chunk *chunk;
+       int ret;
+
+       if (!list_empty(&nic_tbl->nic_matcher_list))
+               last_nic_matcher = list_last_entry(&nic_tbl->nic_matcher_list,
+                                                  struct mlx5dr_matcher_rx_tx,
+                                                  list_node);
+
+       if (last_nic_matcher)
+               last_htbl = last_nic_matcher->e_anchor;
+       else
+               last_htbl = nic_tbl->s_anchor;
+
+       if (action) {
+               chunk = nic_tbl->nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX ?
+                       action->dest_tbl->tbl->rx.s_anchor->chunk :
+                       action->dest_tbl->tbl->tx.s_anchor->chunk;
+               nic_tbl->default_icm_addr = mlx5dr_icm_pool_get_chunk_icm_addr(chunk);
+       } else {
+               nic_tbl->default_icm_addr = nic_tbl->nic_dmn->default_icm_addr;
+       }
+
+       info.type = CONNECT_MISS;
+       info.miss_icm_addr = nic_tbl->default_icm_addr;
+
+       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_tbl->nic_dmn,
+                                               last_htbl, &info, true);
+       if (ret)
+               mlx5dr_dbg(dmn, "Failed to set NIC RX/TX miss action, ret %d\n", ret);
+
+       return ret;
+}
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+                                struct mlx5dr_action *action)
+{
+       int ret = -EOPNOTSUPP;
+
+       if (action && action->action_type != DR_ACTION_TYP_FT)
+               return -EOPNOTSUPP;
+
+       mlx5dr_domain_lock(tbl->dmn);
+
+       if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX ||
+           tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
+               ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->rx, action);
+               if (ret)
+                       goto out;
+       }
+
+       if (tbl->dmn->type == MLX5DR_DOMAIN_TYPE_NIC_TX ||
+           tbl->dmn->type == MLX5DR_DOMAIN_TYPE_FDB) {
+               ret = dr_table_set_miss_action_nic(tbl->dmn, &tbl->tx, action);
+               if (ret)
+                       goto out;
+       }
+
+       if (ret)
+               goto out;
+
+       /* Release old action */
+       if (tbl->miss_action)
+               refcount_dec(&tbl->miss_action->refcount);
+
+       /* Set new miss action */
+       tbl->miss_action = action;
+       if (tbl->miss_action)
+               refcount_inc(&action->refcount);
+
+out:
+       mlx5dr_domain_unlock(tbl->dmn);
+       return ret;
+}
+
+static void dr_table_uninit_nic(struct mlx5dr_table_rx_tx *nic_tbl)
+{
+       mlx5dr_htbl_put(nic_tbl->s_anchor);
+}
+
+static void dr_table_uninit_fdb(struct mlx5dr_table *tbl)
+{
+       dr_table_uninit_nic(&tbl->rx);
+       dr_table_uninit_nic(&tbl->tx);
+}
+
+static void dr_table_uninit(struct mlx5dr_table *tbl)
+{
+       mlx5dr_domain_lock(tbl->dmn);
+
+       switch (tbl->dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               dr_table_uninit_nic(&tbl->rx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               dr_table_uninit_nic(&tbl->tx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               dr_table_uninit_fdb(tbl);
+               break;
+       default:
+               WARN_ON(true);
+               break;
+       }
+
+       mlx5dr_domain_unlock(tbl->dmn);
+}
+
+static int dr_table_init_nic(struct mlx5dr_domain *dmn,
+                            struct mlx5dr_table_rx_tx *nic_tbl)
+{
+       struct mlx5dr_domain_rx_tx *nic_dmn = nic_tbl->nic_dmn;
+       struct mlx5dr_htbl_connect_info info;
+       int ret;
+
+       INIT_LIST_HEAD(&nic_tbl->nic_matcher_list);
+
+       nic_tbl->default_icm_addr = nic_dmn->default_icm_addr;
+
+       nic_tbl->s_anchor = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
+                                                 DR_CHUNK_SIZE_1,
+                                                 MLX5DR_STE_LU_TYPE_DONT_CARE,
+                                                 0);
+       if (!nic_tbl->s_anchor) {
+               mlx5dr_err(dmn, "Failed allocating htbl\n");
+               return -ENOMEM;
+       }
+
+       info.type = CONNECT_MISS;
+       info.miss_icm_addr = nic_dmn->default_icm_addr;
+       ret = mlx5dr_ste_htbl_init_and_postsend(dmn, nic_dmn,
+                                               nic_tbl->s_anchor,
+                                               &info, true);
+       if (ret) {
+               mlx5dr_err(dmn, "Failed int and send htbl\n");
+               goto free_s_anchor;
+       }
+
+       mlx5dr_htbl_get(nic_tbl->s_anchor);
+
+       return 0;
+
+free_s_anchor:
+       mlx5dr_ste_htbl_free(nic_tbl->s_anchor);
+       return ret;
+}
+
+static int dr_table_init_fdb(struct mlx5dr_table *tbl)
+{
+       int ret;
+
+       ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
+       if (ret)
+               return ret;
+
+       ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
+       if (ret)
+               goto destroy_rx;
+
+       return 0;
+
+destroy_rx:
+       dr_table_uninit_nic(&tbl->rx);
+       return ret;
+}
+
+static int dr_table_init(struct mlx5dr_table *tbl)
+{
+       int ret = 0;
+
+       INIT_LIST_HEAD(&tbl->matcher_list);
+
+       mlx5dr_domain_lock(tbl->dmn);
+
+       switch (tbl->dmn->type) {
+       case MLX5DR_DOMAIN_TYPE_NIC_RX:
+               tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_RX;
+               tbl->rx.nic_dmn = &tbl->dmn->info.rx;
+               ret = dr_table_init_nic(tbl->dmn, &tbl->rx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_NIC_TX:
+               tbl->table_type = MLX5_FLOW_TABLE_TYPE_NIC_TX;
+               tbl->tx.nic_dmn = &tbl->dmn->info.tx;
+               ret = dr_table_init_nic(tbl->dmn, &tbl->tx);
+               break;
+       case MLX5DR_DOMAIN_TYPE_FDB:
+               tbl->table_type = MLX5_FLOW_TABLE_TYPE_FDB;
+               tbl->rx.nic_dmn = &tbl->dmn->info.rx;
+               tbl->tx.nic_dmn = &tbl->dmn->info.tx;
+               ret = dr_table_init_fdb(tbl);
+               break;
+       default:
+               WARN_ON(true);
+               break;
+       }
+
+       mlx5dr_domain_unlock(tbl->dmn);
+
+       return ret;
+}
+
+static int dr_table_destroy_sw_owned_tbl(struct mlx5dr_table *tbl)
+{
+       return mlx5dr_cmd_destroy_flow_table(tbl->dmn->mdev,
+                                            tbl->table_id,
+                                            tbl->table_type);
+}
+
+static int dr_table_create_sw_owned_tbl(struct mlx5dr_table *tbl, u16 uid)
+{
+       bool en_encap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT);
+       bool en_decap = !!(tbl->flags & MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+       struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
+       u64 icm_addr_rx = 0;
+       u64 icm_addr_tx = 0;
+       int ret;
+
+       if (tbl->rx.s_anchor)
+               icm_addr_rx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->rx.s_anchor->chunk);
+
+       if (tbl->tx.s_anchor)
+               icm_addr_tx = mlx5dr_icm_pool_get_chunk_icm_addr(tbl->tx.s_anchor->chunk);
+
+       ft_attr.table_type = tbl->table_type;
+       ft_attr.icm_addr_rx = icm_addr_rx;
+       ft_attr.icm_addr_tx = icm_addr_tx;
+       ft_attr.level = tbl->dmn->info.caps.max_ft_level - 1;
+       ft_attr.sw_owner = true;
+       ft_attr.decap_en = en_decap;
+       ft_attr.reformat_en = en_encap;
+       ft_attr.uid = uid;
+
+       ret = mlx5dr_cmd_create_flow_table(tbl->dmn->mdev, &ft_attr,
+                                          NULL, &tbl->table_id);
+
+       return ret;
+}
+
+struct mlx5dr_table *mlx5dr_table_create(struct mlx5dr_domain *dmn, u32 level,
+                                        u32 flags, u16 uid)
+{
+       struct mlx5dr_table *tbl;
+       int ret;
+
+       refcount_inc(&dmn->refcount);
+
+       tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
+       if (!tbl)
+               goto dec_ref;
+
+       tbl->dmn = dmn;
+       tbl->level = level;
+       tbl->flags = flags;
+       refcount_set(&tbl->refcount, 1);
+
+       ret = dr_table_init(tbl);
+       if (ret)
+               goto free_tbl;
+
+       ret = dr_table_create_sw_owned_tbl(tbl, uid);
+       if (ret)
+               goto uninit_tbl;
+
+       INIT_LIST_HEAD(&tbl->dbg_node);
+       mlx5dr_dbg_tbl_add(tbl);
+       return tbl;
+
+uninit_tbl:
+       dr_table_uninit(tbl);
+free_tbl:
+       kfree(tbl);
+dec_ref:
+       refcount_dec(&dmn->refcount);
+       return NULL;
+}
+
+int mlx5dr_table_destroy(struct mlx5dr_table *tbl)
+{
+       int ret;
+
+       if (WARN_ON_ONCE(refcount_read(&tbl->refcount) > 1))
+               return -EBUSY;
+
+       mlx5dr_dbg_tbl_del(tbl);
+       ret = dr_table_destroy_sw_owned_tbl(tbl);
+       if (ret)
+               mlx5dr_err(tbl->dmn, "Failed to destroy sw owned table\n");
+
+       dr_table_uninit(tbl);
+
+       if (tbl->miss_action)
+               refcount_dec(&tbl->miss_action->refcount);
+
+       refcount_dec(&tbl->dmn->refcount);
+       kfree(tbl);
+
+       return ret;
+}
+
+u32 mlx5dr_table_get_id(struct mlx5dr_table *tbl)
+{
+       return tbl->table_id;
+}
+
+struct mlx5dr_table *mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft)
+{
+       return ft->fs_dr_table.dr_table;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/dr_types.h
new file mode 100644 (file)
index 0000000..7618c61
--- /dev/null
@@ -0,0 +1,1599 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef        _DR_TYPES_
+#define        _DR_TYPES_
+
+#include <linux/mlx5/vport.h>
+#include <linux/refcount.h>
+#include "fs_core.h"
+#include "wq.h"
+#include "lib/mlx5.h"
+#include "mlx5_ifc_dr.h"
+#include "mlx5dr.h"
+#include "dr_dbg.h"
+
+#define DR_RULE_MAX_STES 18
+#define DR_ACTION_MAX_STES 5
+#define DR_STE_SVLAN 0x1
+#define DR_STE_CVLAN 0x2
+#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
+#define DR_NUM_OF_FLEX_PARSERS 8
+#define DR_STE_MAX_FLEX_0_ID 3
+#define DR_STE_MAX_FLEX_1_ID 7
+#define DR_ACTION_CACHE_LINE_SIZE 64
+
+#define mlx5dr_err(dmn, arg...) mlx5_core_err((dmn)->mdev, ##arg)
+#define mlx5dr_info(dmn, arg...) mlx5_core_info((dmn)->mdev, ##arg)
+#define mlx5dr_dbg(dmn, arg...) mlx5_core_dbg((dmn)->mdev, ##arg)
+
+struct mlx5dr_ptrn_mgr;
+struct mlx5dr_arg_mgr;
+struct mlx5dr_arg_obj;
+
+static inline bool dr_is_flex_parser_0_id(u8 parser_id)
+{
+       return parser_id <= DR_STE_MAX_FLEX_0_ID;
+}
+
+static inline bool dr_is_flex_parser_1_id(u8 parser_id)
+{
+       return parser_id > DR_STE_MAX_FLEX_0_ID;
+}
+
+enum mlx5dr_icm_chunk_size {
+       DR_CHUNK_SIZE_1,
+       DR_CHUNK_SIZE_MIN = DR_CHUNK_SIZE_1, /* keep updated when changing */
+       DR_CHUNK_SIZE_2,
+       DR_CHUNK_SIZE_4,
+       DR_CHUNK_SIZE_8,
+       DR_CHUNK_SIZE_16,
+       DR_CHUNK_SIZE_32,
+       DR_CHUNK_SIZE_64,
+       DR_CHUNK_SIZE_128,
+       DR_CHUNK_SIZE_256,
+       DR_CHUNK_SIZE_512,
+       DR_CHUNK_SIZE_1K,
+       DR_CHUNK_SIZE_2K,
+       DR_CHUNK_SIZE_4K,
+       DR_CHUNK_SIZE_8K,
+       DR_CHUNK_SIZE_16K,
+       DR_CHUNK_SIZE_32K,
+       DR_CHUNK_SIZE_64K,
+       DR_CHUNK_SIZE_128K,
+       DR_CHUNK_SIZE_256K,
+       DR_CHUNK_SIZE_512K,
+       DR_CHUNK_SIZE_1024K,
+       DR_CHUNK_SIZE_2048K,
+       DR_CHUNK_SIZE_MAX,
+};
+
+enum mlx5dr_icm_type {
+       DR_ICM_TYPE_STE,
+       DR_ICM_TYPE_MODIFY_ACTION,
+       DR_ICM_TYPE_MODIFY_HDR_PTRN,
+       DR_ICM_TYPE_MAX,
+};
+
+static inline enum mlx5dr_icm_chunk_size
+mlx5dr_icm_next_higher_chunk(enum mlx5dr_icm_chunk_size chunk)
+{
+       chunk += 2;
+       if (chunk < DR_CHUNK_SIZE_MAX)
+               return chunk;
+
+       return DR_CHUNK_SIZE_MAX;
+}
+
+enum {
+       DR_STE_SIZE = 64,
+       DR_STE_SIZE_CTRL = 32,
+       DR_STE_SIZE_MATCH_TAG = 32,
+       DR_STE_SIZE_TAG = 16,
+       DR_STE_SIZE_MASK = 16,
+       DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK,
+};
+
+enum mlx5dr_ste_ctx_action_cap {
+       DR_STE_CTX_ACTION_CAP_NONE = 0,
+       DR_STE_CTX_ACTION_CAP_TX_POP   = 1 << 0,
+       DR_STE_CTX_ACTION_CAP_RX_PUSH  = 1 << 1,
+       DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2,
+       DR_STE_CTX_ACTION_CAP_POP_MDFY = 1 << 3,
+};
+
+enum {
+       DR_MODIFY_ACTION_SIZE = 8,
+};
+
+enum mlx5dr_matcher_criteria {
+       DR_MATCHER_CRITERIA_EMPTY = 0,
+       DR_MATCHER_CRITERIA_OUTER = 1 << 0,
+       DR_MATCHER_CRITERIA_MISC = 1 << 1,
+       DR_MATCHER_CRITERIA_INNER = 1 << 2,
+       DR_MATCHER_CRITERIA_MISC2 = 1 << 3,
+       DR_MATCHER_CRITERIA_MISC3 = 1 << 4,
+       DR_MATCHER_CRITERIA_MISC4 = 1 << 5,
+       DR_MATCHER_CRITERIA_MISC5 = 1 << 6,
+       DR_MATCHER_CRITERIA_MAX = 1 << 7,
+};
+
+enum mlx5dr_action_type {
+       DR_ACTION_TYP_TNL_L2_TO_L2,
+       DR_ACTION_TYP_L2_TO_TNL_L2,
+       DR_ACTION_TYP_TNL_L3_TO_L2,
+       DR_ACTION_TYP_L2_TO_TNL_L3,
+       DR_ACTION_TYP_DROP,
+       DR_ACTION_TYP_QP,
+       DR_ACTION_TYP_FT,
+       DR_ACTION_TYP_CTR,
+       DR_ACTION_TYP_TAG,
+       DR_ACTION_TYP_MODIFY_HDR,
+       DR_ACTION_TYP_VPORT,
+       DR_ACTION_TYP_POP_VLAN,
+       DR_ACTION_TYP_PUSH_VLAN,
+       DR_ACTION_TYP_INSERT_HDR,
+       DR_ACTION_TYP_REMOVE_HDR,
+       DR_ACTION_TYP_SAMPLER,
+       DR_ACTION_TYP_ASO_FLOW_METER,
+       DR_ACTION_TYP_RANGE,
+       DR_ACTION_TYP_MAX,
+};
+
+enum mlx5dr_ipv {
+       DR_RULE_IPV4,
+       DR_RULE_IPV6,
+       DR_RULE_IPV_MAX,
+};
+
+struct mlx5dr_icm_pool;
+struct mlx5dr_icm_chunk;
+struct mlx5dr_icm_buddy_mem;
+struct mlx5dr_ste_htbl;
+struct mlx5dr_match_param;
+struct mlx5dr_cmd_caps;
+struct mlx5dr_rule_rx_tx;
+struct mlx5dr_matcher_rx_tx;
+struct mlx5dr_ste_ctx;
+struct mlx5dr_send_info_pool;
+struct mlx5dr_icm_hot_chunk;
+
+struct mlx5dr_ste {
+       /* refcount: indicates the num of rules that using this ste */
+       u32 refcount;
+
+       /* this ste is part of a rule, located in ste's chain */
+       u8 ste_chain_location;
+
+       /* attached to the miss_list head at each htbl entry */
+       struct list_head miss_list_node;
+
+       /* this ste is member of htbl */
+       struct mlx5dr_ste_htbl *htbl;
+
+       struct mlx5dr_ste_htbl *next_htbl;
+
+       /* The rule this STE belongs to */
+       struct mlx5dr_rule_rx_tx *rule_rx_tx;
+};
+
+struct mlx5dr_ste_htbl_ctrl {
+       /* total number of valid entries belonging to this hash table. This
+        * includes the non collision and collision entries
+        */
+       unsigned int num_of_valid_entries;
+
+       /* total number of collisions entries attached to this table */
+       unsigned int num_of_collisions;
+};
+
+struct mlx5dr_ste_htbl {
+       u16 lu_type;
+       u16 byte_mask;
+       u32 refcount;
+       struct mlx5dr_icm_chunk *chunk;
+       struct mlx5dr_ste *pointing_ste;
+       struct mlx5dr_ste_htbl_ctrl ctrl;
+};
+
+struct mlx5dr_ste_send_info {
+       struct mlx5dr_ste *ste;
+       struct list_head send_list;
+       u16 size;
+       u16 offset;
+       u8 data_cont[DR_STE_SIZE];
+       u8 *data;
+};
+
+void mlx5dr_send_fill_and_append_ste_send_info(struct mlx5dr_ste *ste, u16 size,
+                                              u16 offset, u8 *data,
+                                              struct mlx5dr_ste_send_info *ste_info,
+                                              struct list_head *send_list,
+                                              bool copy_data);
+
+struct mlx5dr_ste_build {
+       u8 inner:1;
+       u8 rx:1;
+       u8 vhca_id_valid:1;
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_cmd_caps *caps;
+       u16 lu_type;
+       u16 byte_mask;
+       u8 bit_mask[DR_STE_SIZE_MASK];
+       int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
+                                 struct mlx5dr_ste_build *sb,
+                                 u8 *tag);
+};
+
+struct mlx5dr_ste_htbl *
+mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool,
+                     enum mlx5dr_icm_chunk_size chunk_size,
+                     u16 lu_type, u16 byte_mask);
+
+int mlx5dr_ste_htbl_free(struct mlx5dr_ste_htbl *htbl);
+
+static inline void mlx5dr_htbl_put(struct mlx5dr_ste_htbl *htbl)
+{
+       htbl->refcount--;
+       if (!htbl->refcount)
+               mlx5dr_ste_htbl_free(htbl);
+}
+
+static inline void mlx5dr_htbl_get(struct mlx5dr_ste_htbl *htbl)
+{
+       htbl->refcount++;
+}
+
+/* STE utils */
+u32 mlx5dr_ste_calc_hash_index(u8 *hw_ste_p, struct mlx5dr_ste_htbl *htbl);
+bool mlx5dr_ste_is_miss_addr_set(struct mlx5dr_ste_ctx *ste_ctx, u8 *hw_ste_p);
+void mlx5dr_ste_set_miss_addr(struct mlx5dr_ste_ctx *ste_ctx,
+                             u8 *hw_ste, u64 miss_addr);
+void mlx5dr_ste_set_hit_addr(struct mlx5dr_ste_ctx *ste_ctx,
+                            u8 *hw_ste, u64 icm_addr, u32 ht_size);
+void mlx5dr_ste_set_hit_addr_by_next_htbl(struct mlx5dr_ste_ctx *ste_ctx,
+                                         u8 *hw_ste,
+                                         struct mlx5dr_ste_htbl *next_htbl);
+void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
+bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
+                               u8 ste_location);
+u64 mlx5dr_ste_get_icm_addr(struct mlx5dr_ste *ste);
+u64 mlx5dr_ste_get_mr_addr(struct mlx5dr_ste *ste);
+struct list_head *mlx5dr_ste_get_miss_list(struct mlx5dr_ste *ste);
+
+#define MLX5DR_MAX_VLANS 2
+#define MLX5DR_INVALID_PATTERN_INDEX 0xffffffff
+
+struct mlx5dr_ste_actions_attr {
+       u32     modify_index;
+       u32     modify_pat_idx;
+       u16     modify_actions;
+       u8      *single_modify_action;
+       u32     decap_index;
+       u32     decap_pat_idx;
+       u16     decap_actions;
+       u8      decap_with_vlan:1;
+       u64     final_icm_addr;
+       u32     flow_tag;
+       u32     ctr_id;
+       u16     gvmi;
+       u16     hit_gvmi;
+       struct {
+               u32     id;
+               u32     size;
+               u8      param_0;
+               u8      param_1;
+       } reformat;
+       struct {
+               int     count;
+               u32     headers[MLX5DR_MAX_VLANS];
+       } vlans;
+
+       struct {
+               u32 obj_id;
+               u32 offset;
+               u8 dest_reg_id;
+               u8 init_color;
+       } aso_flow_meter;
+
+       struct {
+               u64     miss_icm_addr;
+               u32     definer_id;
+               u32     min;
+               u32     max;
+       } range;
+};
+
+void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
+                              struct mlx5dr_domain *dmn,
+                              u8 *action_type_set,
+                              u8 *last_ste,
+                              struct mlx5dr_ste_actions_attr *attr,
+                              u32 *added_stes);
+void mlx5dr_ste_set_actions_tx(struct mlx5dr_ste_ctx *ste_ctx,
+                              struct mlx5dr_domain *dmn,
+                              u8 *action_type_set,
+                              u8 *last_ste,
+                              struct mlx5dr_ste_actions_attr *attr,
+                              u32 *added_stes);
+
+void mlx5dr_ste_set_action_set(struct mlx5dr_ste_ctx *ste_ctx,
+                              __be64 *hw_action,
+                              u8 hw_field,
+                              u8 shifter,
+                              u8 length,
+                              u32 data);
+void mlx5dr_ste_set_action_add(struct mlx5dr_ste_ctx *ste_ctx,
+                              __be64 *hw_action,
+                              u8 hw_field,
+                              u8 shifter,
+                              u8 length,
+                              u32 data);
+void mlx5dr_ste_set_action_copy(struct mlx5dr_ste_ctx *ste_ctx,
+                               __be64 *hw_action,
+                               u8 dst_hw_field,
+                               u8 dst_shifter,
+                               u8 dst_len,
+                               u8 src_hw_field,
+                               u8 src_shifter);
+int mlx5dr_ste_set_action_decap_l3_list(struct mlx5dr_ste_ctx *ste_ctx,
+                                       void *data,
+                                       u32 data_sz,
+                                       u8 *hw_action,
+                                       u32 hw_action_sz,
+                                       u16 *used_hw_action_num);
+int mlx5dr_ste_alloc_modify_hdr(struct mlx5dr_action *action);
+void mlx5dr_ste_free_modify_hdr(struct mlx5dr_action *action);
+
+const struct mlx5dr_ste_action_modify_field *
+mlx5dr_ste_conv_modify_hdr_sw_field(struct mlx5dr_ste_ctx *ste_ctx, u16 sw_field);
+
+struct mlx5dr_ste_ctx *mlx5dr_ste_get_ctx(u8 version);
+void mlx5dr_ste_free(struct mlx5dr_ste *ste,
+                    struct mlx5dr_matcher *matcher,
+                    struct mlx5dr_matcher_rx_tx *nic_matcher);
+static inline void mlx5dr_ste_put(struct mlx5dr_ste *ste,
+                                 struct mlx5dr_matcher *matcher,
+                                 struct mlx5dr_matcher_rx_tx *nic_matcher)
+{
+       ste->refcount--;
+       if (!ste->refcount)
+               mlx5dr_ste_free(ste, matcher, nic_matcher);
+}
+
+/* initial as 0, increased only when ste appears in a new rule */
+static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
+{
+       ste->refcount++;
+}
+
+static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
+{
+       return !ste->refcount;
+}
+
+bool mlx5dr_ste_equal_tag(void *src, void *dst);
+int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
+                               struct mlx5dr_matcher_rx_tx *nic_matcher,
+                               struct mlx5dr_ste *ste,
+                               u8 *cur_hw_ste,
+                               enum mlx5dr_icm_chunk_size log_table_size);
+
+/* STE build functions */
+int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
+                              u8 match_criteria,
+                              struct mlx5dr_match_param *mask,
+                              struct mlx5dr_match_param *value);
+int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
+                            struct mlx5dr_matcher_rx_tx *nic_matcher,
+                            struct mlx5dr_match_param *value,
+                            u8 *ste_arr);
+void mlx5dr_ste_build_eth_l2_src_dst(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste_build *builder,
+                                    struct mlx5dr_match_param *mask,
+                                    bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_ctx *ste_ctx,
+                                         struct mlx5dr_ste_build *sb,
+                                         struct mlx5dr_match_param *mask,
+                                         bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+                                      struct mlx5dr_ste_build *sb,
+                                      struct mlx5dr_match_param *mask,
+                                      bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv6_dst(struct mlx5dr_ste_ctx *ste_ctx,
+                                     struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask,
+                                     bool inner, bool rx);
+void mlx5dr_ste_build_eth_l3_ipv6_src(struct mlx5dr_ste_ctx *ste_ctx,
+                                     struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask,
+                                     bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx);
+void mlx5dr_ste_build_eth_l2_tnl(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx);
+void mlx5dr_ste_build_eth_ipv6_l3_l4(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask,
+                                    bool inner, bool rx);
+void mlx5dr_ste_build_eth_l4_misc(struct mlx5dr_ste_ctx *ste_ctx,
+                                 struct mlx5dr_ste_build *sb,
+                                 struct mlx5dr_match_param *mask,
+                                 bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gre(struct mlx5dr_ste_ctx *ste_ctx,
+                             struct mlx5dr_ste_build *sb,
+                             struct mlx5dr_match_param *mask,
+                             bool inner, bool rx);
+void mlx5dr_ste_build_mpls(struct mlx5dr_ste_ctx *ste_ctx,
+                          struct mlx5dr_ste_build *sb,
+                          struct mlx5dr_match_param *mask,
+                          bool inner, bool rx);
+void mlx5dr_ste_build_tnl_mpls_over_gre(struct mlx5dr_ste_ctx *ste_ctx,
+                                       struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask,
+                                       struct mlx5dr_cmd_caps *caps,
+                                       bool inner, bool rx);
+void mlx5dr_ste_build_tnl_mpls_over_udp(struct mlx5dr_ste_ctx *ste_ctx,
+                                       struct mlx5dr_ste_build *sb,
+                                       struct mlx5dr_match_param *mask,
+                                       struct mlx5dr_cmd_caps *caps,
+                                       bool inner, bool rx);
+void mlx5dr_ste_build_icmp(struct mlx5dr_ste_ctx *ste_ctx,
+                          struct mlx5dr_ste_build *sb,
+                          struct mlx5dr_match_param *mask,
+                          struct mlx5dr_cmd_caps *caps,
+                          bool inner, bool rx);
+void mlx5dr_ste_build_tnl_vxlan_gpe(struct mlx5dr_ste_ctx *ste_ctx,
+                                   struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask,
+                                   bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve_tlv_opt(struct mlx5dr_ste_ctx *ste_ctx,
+                                        struct mlx5dr_ste_build *sb,
+                                        struct mlx5dr_match_param *mask,
+                                        struct mlx5dr_cmd_caps *caps,
+                                        bool inner, bool rx);
+void mlx5dr_ste_build_tnl_geneve_tlv_opt_exist(struct mlx5dr_ste_ctx *ste_ctx,
+                                              struct mlx5dr_ste_build *sb,
+                                              struct mlx5dr_match_param *mask,
+                                              struct mlx5dr_cmd_caps *caps,
+                                              bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu(struct mlx5dr_ste_ctx *ste_ctx,
+                              struct mlx5dr_ste_build *sb,
+                              struct mlx5dr_match_param *mask,
+                              bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+                                            struct mlx5dr_ste_build *sb,
+                                            struct mlx5dr_match_param *mask,
+                                            struct mlx5dr_cmd_caps *caps,
+                                            bool inner, bool rx);
+void mlx5dr_ste_build_tnl_gtpu_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                            struct mlx5dr_ste_build *sb,
+                                            struct mlx5dr_match_param *mask,
+                                            struct mlx5dr_cmd_caps *caps,
+                                            bool inner, bool rx);
+void mlx5dr_ste_build_tnl_header_0_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                    struct mlx5dr_ste_build *sb,
+                                    struct mlx5dr_match_param *mask,
+                                    bool inner, bool rx);
+void mlx5dr_ste_build_general_purpose(struct mlx5dr_ste_ctx *ste_ctx,
+                                     struct mlx5dr_ste_build *sb,
+                                     struct mlx5dr_match_param *mask,
+                                     bool inner, bool rx);
+void mlx5dr_ste_build_register_0(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx);
+void mlx5dr_ste_build_register_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                struct mlx5dr_ste_build *sb,
+                                struct mlx5dr_match_param *mask,
+                                bool inner, bool rx);
+void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_ctx *ste_ctx,
+                                  struct mlx5dr_ste_build *sb,
+                                  struct mlx5dr_match_param *mask,
+                                  struct mlx5dr_domain *dmn,
+                                  bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_0(struct mlx5dr_ste_ctx *ste_ctx,
+                                   struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask,
+                                   bool inner, bool rx);
+void mlx5dr_ste_build_flex_parser_1(struct mlx5dr_ste_ctx *ste_ctx,
+                                   struct mlx5dr_ste_build *sb,
+                                   struct mlx5dr_match_param *mask,
+                                   bool inner, bool rx);
+void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
+
+/* Actions utils */
+int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
+                                struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                struct mlx5dr_action *actions[],
+                                u32 num_actions,
+                                u8 *ste_arr,
+                                u32 *new_hw_ste_arr_sz);
+
+struct mlx5dr_match_spec {
+       u32 smac_47_16;         /* Source MAC address of incoming packet */
+       /* Incoming packet Ethertype - this is the Ethertype
+        * following the last VLAN tag of the packet
+        */
+       u32 smac_15_0:16;       /* Source MAC address of incoming packet */
+       u32 ethertype:16;
+
+       u32 dmac_47_16;         /* Destination MAC address of incoming packet */
+
+       u32 dmac_15_0:16;       /* Destination MAC address of incoming packet */
+       /* Priority of first VLAN tag in the incoming packet.
+        * Valid only when cvlan_tag==1 or svlan_tag==1
+        */
+       u32 first_prio:3;
+       /* CFI bit of first VLAN tag in the incoming packet.
+        * Valid only when cvlan_tag==1 or svlan_tag==1
+        */
+       u32 first_cfi:1;
+       /* VLAN ID of first VLAN tag in the incoming packet.
+        * Valid only when cvlan_tag==1 or svlan_tag==1
+        */
+       u32 first_vid:12;
+
+       u32 ip_protocol:8;      /* IP protocol */
+       /* Differentiated Services Code Point derived from
+        * Traffic Class/TOS field of IPv6/v4
+        */
+       u32 ip_dscp:6;
+       /* Explicit Congestion Notification derived from
+        * Traffic Class/TOS field of IPv6/v4
+        */
+       u32 ip_ecn:2;
+       /* The first vlan in the packet is c-vlan (0x8100).
+        * cvlan_tag and svlan_tag cannot be set together
+        */
+       u32 cvlan_tag:1;
+       /* The first vlan in the packet is s-vlan (0x8a88).
+        * cvlan_tag and svlan_tag cannot be set together
+        */
+       u32 svlan_tag:1;
+       u32 frag:1;             /* Packet is an IP fragment */
+       u32 ip_version:4;       /* IP version */
+       /* TCP flags. ;Bit 0: FIN;Bit 1: SYN;Bit 2: RST;Bit 3: PSH;Bit 4: ACK;
+        *             Bit 5: URG;Bit 6: ECE;Bit 7: CWR;Bit 8: NS
+        */
+       u32 tcp_flags:9;
+
+       /* TCP source port.;tcp and udp sport/dport are mutually exclusive */
+       u32 tcp_sport:16;
+       /* TCP destination port.
+        * tcp and udp sport/dport are mutually exclusive
+        */
+       u32 tcp_dport:16;
+
+       u32 reserved_auto1:16;
+       u32 ipv4_ihl:4;
+       u32 reserved_auto2:4;
+       u32 ttl_hoplimit:8;
+
+       /* UDP source port.;tcp and udp sport/dport are mutually exclusive */
+       u32 udp_sport:16;
+       /* UDP destination port.;tcp and udp sport/dport are mutually exclusive */
+       u32 udp_dport:16;
+
+       /* IPv6 source address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 src_ip_127_96;
+       /* IPv6 source address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 src_ip_95_64;
+       /* IPv6 source address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 src_ip_63_32;
+       /* IPv6 source address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 src_ip_31_0;
+       /* IPv6 destination address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 dst_ip_127_96;
+       /* IPv6 destination address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 dst_ip_95_64;
+       /* IPv6 destination address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 dst_ip_63_32;
+       /* IPv6 destination address of incoming packets
+        * For IPv4 address use bits 31:0 (rest of the bits are reserved)
+        * This field should be qualified by an appropriate ethertype
+        */
+       u32 dst_ip_31_0;
+};
+
+struct mlx5dr_match_misc {
+       /* used with GRE, checksum exist when gre_c_present == 1 */
+       u32 gre_c_present:1;
+       u32 reserved_auto1:1;
+       /* used with GRE, key exist when gre_k_present == 1 */
+       u32 gre_k_present:1;
+       /* used with GRE, sequence number exist when gre_s_present == 1 */
+       u32 gre_s_present:1;
+       u32 source_vhca_port:4;
+       u32 source_sqn:24;              /* Source SQN */
+
+       u32 source_eswitch_owner_vhca_id:16;
+       /* Source port.;0xffff determines wire port */
+       u32 source_port:16;
+
+       /* Priority of second VLAN tag in the outer header of the incoming packet.
+        * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+        */
+       u32 outer_second_prio:3;
+       /* CFI bit of first VLAN tag in the outer header of the incoming packet.
+        * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+        */
+       u32 outer_second_cfi:1;
+       /* VLAN ID of first VLAN tag the outer header of the incoming packet.
+        * Valid only when outer_second_cvlan_tag ==1 or outer_second_svlan_tag ==1
+        */
+       u32 outer_second_vid:12;
+       /* Priority of second VLAN tag in the inner header of the incoming packet.
+        * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+        */
+       u32 inner_second_prio:3;
+       /* CFI bit of first VLAN tag in the inner header of the incoming packet.
+        * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+        */
+       u32 inner_second_cfi:1;
+       /* VLAN ID of first VLAN tag the inner header of the incoming packet.
+        * Valid only when inner_second_cvlan_tag ==1 or inner_second_svlan_tag ==1
+        */
+       u32 inner_second_vid:12;
+
+       u32 outer_second_cvlan_tag:1;
+       u32 inner_second_cvlan_tag:1;
+       /* The second vlan in the outer header of the packet is c-vlan (0x8100).
+        * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
+        */
+       u32 outer_second_svlan_tag:1;
+       /* The second vlan in the inner header of the packet is c-vlan (0x8100).
+        * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+        */
+       u32 inner_second_svlan_tag:1;
+       /* The second vlan in the outer header of the packet is s-vlan (0x8a88).
+        * outer_second_cvlan_tag and outer_second_svlan_tag cannot be set together
+        */
+       u32 reserved_auto2:12;
+       /* The second vlan in the inner header of the packet is s-vlan (0x8a88).
+        * inner_second_cvlan_tag and inner_second_svlan_tag cannot be set together
+        */
+       u32 gre_protocol:16;            /* GRE Protocol (outer) */
+
+       u32 gre_key_h:24;               /* GRE Key[31:8] (outer) */
+       u32 gre_key_l:8;                /* GRE Key [7:0] (outer) */
+
+       u32 vxlan_vni:24;               /* VXLAN VNI (outer) */
+       u32 reserved_auto3:8;
+
+       u32 geneve_vni:24;              /* GENEVE VNI field (outer) */
+       u32 reserved_auto4:6;
+       u32 geneve_tlv_option_0_exist:1;
+       u32 geneve_oam:1;               /* GENEVE OAM field (outer) */
+
+       u32 reserved_auto5:12;
+       u32 outer_ipv6_flow_label:20;   /* Flow label of incoming IPv6 packet (outer) */
+
+       u32 reserved_auto6:12;
+       u32 inner_ipv6_flow_label:20;   /* Flow label of incoming IPv6 packet (inner) */
+
+       u32 reserved_auto7:10;
+       u32 geneve_opt_len:6;           /* GENEVE OptLen (outer) */
+       u32 geneve_protocol_type:16;    /* GENEVE protocol type (outer) */
+
+       u32 reserved_auto8:8;
+       u32 bth_dst_qp:24;              /* Destination QP in BTH header */
+
+       u32 reserved_auto9;
+       u32 outer_esp_spi;
+       u32 reserved_auto10[3];
+};
+
+struct mlx5dr_match_misc2 {
+       u32 outer_first_mpls_label:20;          /* First MPLS LABEL (outer) */
+       u32 outer_first_mpls_exp:3;             /* First MPLS EXP (outer) */
+       u32 outer_first_mpls_s_bos:1;           /* First MPLS S_BOS (outer) */
+       u32 outer_first_mpls_ttl:8;             /* First MPLS TTL (outer) */
+
+       u32 inner_first_mpls_label:20;          /* First MPLS LABEL (inner) */
+       u32 inner_first_mpls_exp:3;             /* First MPLS EXP (inner) */
+       u32 inner_first_mpls_s_bos:1;           /* First MPLS S_BOS (inner) */
+       u32 inner_first_mpls_ttl:8;             /* First MPLS TTL (inner) */
+
+       u32 outer_first_mpls_over_gre_label:20; /* last MPLS LABEL (outer) */
+       u32 outer_first_mpls_over_gre_exp:3;    /* last MPLS EXP (outer) */
+       u32 outer_first_mpls_over_gre_s_bos:1;  /* last MPLS S_BOS (outer) */
+       u32 outer_first_mpls_over_gre_ttl:8;    /* last MPLS TTL (outer) */
+
+       u32 outer_first_mpls_over_udp_label:20; /* last MPLS LABEL (outer) */
+       u32 outer_first_mpls_over_udp_exp:3;    /* last MPLS EXP (outer) */
+       u32 outer_first_mpls_over_udp_s_bos:1;  /* last MPLS S_BOS (outer) */
+       u32 outer_first_mpls_over_udp_ttl:8;    /* last MPLS TTL (outer) */
+
+       u32 metadata_reg_c_7;                   /* metadata_reg_c_7 */
+       u32 metadata_reg_c_6;                   /* metadata_reg_c_6 */
+       u32 metadata_reg_c_5;                   /* metadata_reg_c_5 */
+       u32 metadata_reg_c_4;                   /* metadata_reg_c_4 */
+       u32 metadata_reg_c_3;                   /* metadata_reg_c_3 */
+       u32 metadata_reg_c_2;                   /* metadata_reg_c_2 */
+       u32 metadata_reg_c_1;                   /* metadata_reg_c_1 */
+       u32 metadata_reg_c_0;                   /* metadata_reg_c_0 */
+       u32 metadata_reg_a;                     /* metadata_reg_a */
+       u32 reserved_auto1[3];
+};
+
+struct mlx5dr_match_misc3 {
+       u32 inner_tcp_seq_num;
+       u32 outer_tcp_seq_num;
+       u32 inner_tcp_ack_num;
+       u32 outer_tcp_ack_num;
+
+       u32 reserved_auto1:8;
+       u32 outer_vxlan_gpe_vni:24;
+
+       u32 outer_vxlan_gpe_next_protocol:8;
+       u32 outer_vxlan_gpe_flags:8;
+       u32 reserved_auto2:16;
+
+       u32 icmpv4_header_data;
+       u32 icmpv6_header_data;
+
+       u8 icmpv4_type;
+       u8 icmpv4_code;
+       u8 icmpv6_type;
+       u8 icmpv6_code;
+
+       u32 geneve_tlv_option_0_data;
+
+       u32 gtpu_teid;
+
+       u8 gtpu_msg_type;
+       u8 gtpu_msg_flags;
+       u32 reserved_auto3:16;
+
+       u32 gtpu_dw_2;
+       u32 gtpu_first_ext_dw_0;
+       u32 gtpu_dw_0;
+       u32 reserved_auto4;
+};
+
+struct mlx5dr_match_misc4 {
+       u32 prog_sample_field_value_0;
+       u32 prog_sample_field_id_0;
+       u32 prog_sample_field_value_1;
+       u32 prog_sample_field_id_1;
+       u32 prog_sample_field_value_2;
+       u32 prog_sample_field_id_2;
+       u32 prog_sample_field_value_3;
+       u32 prog_sample_field_id_3;
+       u32 reserved_auto1[8];
+};
+
+struct mlx5dr_match_misc5 {
+       u32 macsec_tag_0;
+       u32 macsec_tag_1;
+       u32 macsec_tag_2;
+       u32 macsec_tag_3;
+       u32 tunnel_header_0;
+       u32 tunnel_header_1;
+       u32 tunnel_header_2;
+       u32 tunnel_header_3;
+};
+
+struct mlx5dr_match_param {
+       struct mlx5dr_match_spec outer;
+       struct mlx5dr_match_misc misc;
+       struct mlx5dr_match_spec inner;
+       struct mlx5dr_match_misc2 misc2;
+       struct mlx5dr_match_misc3 misc3;
+       struct mlx5dr_match_misc4 misc4;
+       struct mlx5dr_match_misc5 misc5;
+};
+
+#define DR_MASK_IS_ICMPV4_SET(_misc3) ((_misc3)->icmpv4_type || \
+                                      (_misc3)->icmpv4_code || \
+                                      (_misc3)->icmpv4_header_data)
+
+#define DR_MASK_IS_SRC_IP_SET(_spec) ((_spec)->src_ip_127_96 || \
+                                     (_spec)->src_ip_95_64  || \
+                                     (_spec)->src_ip_63_32  || \
+                                     (_spec)->src_ip_31_0)
+
+#define DR_MASK_IS_DST_IP_SET(_spec) ((_spec)->dst_ip_127_96 || \
+                                     (_spec)->dst_ip_95_64  || \
+                                     (_spec)->dst_ip_63_32  || \
+                                     (_spec)->dst_ip_31_0)
+
+struct mlx5dr_esw_caps {
+       u64 drop_icm_address_rx;
+       u64 drop_icm_address_tx;
+       u64 uplink_icm_address_rx;
+       u64 uplink_icm_address_tx;
+       u8 sw_owner:1;
+       u8 sw_owner_v2:1;
+};
+
+struct mlx5dr_cmd_vport_cap {
+       u16 vport_gvmi;
+       u16 vhca_gvmi;
+       u16 num;
+       u64 icm_address_rx;
+       u64 icm_address_tx;
+};
+
+struct mlx5dr_roce_cap {
+       u8 roce_en:1;
+       u8 fl_rc_qp_when_roce_disabled:1;
+       u8 fl_rc_qp_when_roce_enabled:1;
+};
+
+struct mlx5dr_vports {
+       struct mlx5dr_cmd_vport_cap esw_manager_caps;
+       struct mlx5dr_cmd_vport_cap uplink_caps;
+       struct xarray vports_caps_xa;
+};
+
+struct mlx5dr_cmd_caps {
+       u16 gvmi;
+       u64 nic_rx_drop_address;
+       u64 nic_tx_drop_address;
+       u64 nic_tx_allow_address;
+       u64 esw_rx_drop_address;
+       u64 esw_tx_drop_address;
+       u32 log_icm_size;
+       u64 hdr_modify_icm_addr;
+       u32 log_modify_pattern_icm_size;
+       u64 hdr_modify_pattern_icm_addr;
+       u32 flex_protocols;
+       u8 flex_parser_id_icmp_dw0;
+       u8 flex_parser_id_icmp_dw1;
+       u8 flex_parser_id_icmpv6_dw0;
+       u8 flex_parser_id_icmpv6_dw1;
+       u8 flex_parser_id_geneve_tlv_option_0;
+       u8 flex_parser_id_mpls_over_gre;
+       u8 flex_parser_id_mpls_over_udp;
+       u8 flex_parser_id_gtpu_dw_0;
+       u8 flex_parser_id_gtpu_teid;
+       u8 flex_parser_id_gtpu_dw_2;
+       u8 flex_parser_id_gtpu_first_ext_dw_0;
+       u8 flex_parser_ok_bits_supp;
+       u8 max_ft_level;
+       u16 roce_min_src_udp;
+       u8 sw_format_ver;
+       bool eswitch_manager;
+       bool rx_sw_owner;
+       bool tx_sw_owner;
+       bool fdb_sw_owner;
+       u8 rx_sw_owner_v2:1;
+       u8 tx_sw_owner_v2:1;
+       u8 fdb_sw_owner_v2:1;
+       struct mlx5dr_esw_caps esw_caps;
+       struct mlx5dr_vports vports;
+       bool prio_tag_required;
+       struct mlx5dr_roce_cap roce_caps;
+       u16 log_header_modify_argument_granularity;
+       u16 log_header_modify_argument_max_alloc;
+       bool support_modify_argument;
+       u8 is_ecpf:1;
+       u8 isolate_vl_tc:1;
+};
+
+enum mlx5dr_domain_nic_type {
+       DR_DOMAIN_NIC_TYPE_RX,
+       DR_DOMAIN_NIC_TYPE_TX,
+};
+
+struct mlx5dr_domain_rx_tx {
+       u64 drop_icm_addr;
+       u64 default_icm_addr;
+       enum mlx5dr_domain_nic_type type;
+       struct mutex mutex; /* protect rx/tx domain */
+};
+
+struct mlx5dr_domain_info {
+       bool supp_sw_steering;
+       u32 max_inline_size;
+       u32 max_send_wr;
+       u32 max_log_sw_icm_sz;
+       u32 max_log_action_icm_sz;
+       u32 max_log_modify_hdr_pattern_icm_sz;
+       struct mlx5dr_domain_rx_tx rx;
+       struct mlx5dr_domain_rx_tx tx;
+       struct mlx5dr_cmd_caps caps;
+};
+
+struct mlx5dr_domain {
+       struct mlx5_core_dev *mdev;
+       u32 pdn;
+       struct mlx5_uars_page *uar;
+       enum mlx5dr_domain_type type;
+       refcount_t refcount;
+       struct mlx5dr_icm_pool *ste_icm_pool;
+       struct mlx5dr_icm_pool *action_icm_pool;
+       struct mlx5dr_send_info_pool *send_info_pool_rx;
+       struct mlx5dr_send_info_pool *send_info_pool_tx;
+       struct kmem_cache *chunks_kmem_cache;
+       struct kmem_cache *htbls_kmem_cache;
+       struct mlx5dr_ptrn_mgr *ptrn_mgr;
+       struct mlx5dr_arg_mgr *arg_mgr;
+       struct mlx5dr_send_ring *send_ring;
+       struct mlx5dr_domain_info info;
+       struct xarray csum_fts_xa;
+       struct mlx5dr_ste_ctx *ste_ctx;
+       struct list_head dbg_tbl_list;
+       struct mlx5dr_dbg_dump_info dump_info;
+       struct xarray definers_xa;
+       struct xarray peer_dmn_xa;
+       /* memory management statistics */
+       u32 num_buddies[DR_ICM_TYPE_MAX];
+};
+
+struct mlx5dr_table_rx_tx {
+       struct mlx5dr_ste_htbl *s_anchor;
+       struct mlx5dr_domain_rx_tx *nic_dmn;
+       u64 default_icm_addr;
+       struct list_head nic_matcher_list;
+};
+
+struct mlx5dr_table {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_table_rx_tx rx;
+       struct mlx5dr_table_rx_tx tx;
+       u32 level;
+       u32 table_type;
+       u32 table_id;
+       u32 flags;
+       struct list_head matcher_list;
+       struct mlx5dr_action *miss_action;
+       refcount_t refcount;
+       struct list_head dbg_node;
+};
+
+struct mlx5dr_matcher_rx_tx {
+       struct mlx5dr_ste_htbl *s_htbl;
+       struct mlx5dr_ste_htbl *e_anchor;
+       struct mlx5dr_ste_build *ste_builder;
+       struct mlx5dr_ste_build ste_builder_arr[DR_RULE_IPV_MAX]
+                                              [DR_RULE_IPV_MAX]
+                                              [DR_RULE_MAX_STES];
+       u8 num_of_builders;
+       u8 num_of_builders_arr[DR_RULE_IPV_MAX][DR_RULE_IPV_MAX];
+       u64 default_icm_addr;
+       struct mlx5dr_table_rx_tx *nic_tbl;
+       u32 prio;
+       struct list_head list_node;
+       u32 rules;
+};
+
+struct mlx5dr_matcher {
+       struct mlx5dr_table *tbl;
+       struct mlx5dr_matcher_rx_tx rx;
+       struct mlx5dr_matcher_rx_tx tx;
+       struct list_head list_node; /* Used for both matchers and dbg managing */
+       u32 prio;
+       struct mlx5dr_match_param mask;
+       u8 match_criteria;
+       refcount_t refcount;
+       struct list_head dbg_rule_list;
+};
+
+struct mlx5dr_ste_action_modify_field {
+       u16 hw_field;
+       u8 start;
+       u8 end;
+       u8 l3_type;
+       u8 l4_type;
+};
+
+struct mlx5dr_ptrn_obj {
+       struct mlx5dr_icm_chunk *chunk;
+       u8 *data;
+       u16 num_of_actions;
+       u32 index;
+       refcount_t refcount;
+       struct list_head list;
+};
+
+struct mlx5dr_arg_obj {
+       u32 obj_id;
+       u32 obj_offset;
+       struct list_head list_node;
+       u32 log_chunk_size;
+};
+
+struct mlx5dr_action_rewrite {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_icm_chunk *chunk;
+       u8 *data;
+       u16 num_of_actions;
+       u32 index;
+       u8 single_action_opt:1;
+       u8 allow_rx:1;
+       u8 allow_tx:1;
+       u8 modify_ttl:1;
+       struct mlx5dr_ptrn_obj *ptrn;
+       struct mlx5dr_arg_obj *arg;
+};
+
+struct mlx5dr_action_reformat {
+       struct mlx5dr_domain *dmn;
+       u32 id;
+       u32 size;
+       u8 param_0;
+       u8 param_1;
+};
+
+struct mlx5dr_action_sampler {
+       struct mlx5dr_domain *dmn;
+       u64 rx_icm_addr;
+       u64 tx_icm_addr;
+       u32 sampler_id;
+};
+
+struct mlx5dr_action_dest_tbl {
+       u8 is_fw_tbl:1;
+       u8 is_wire_ft:1;
+       union {
+               struct mlx5dr_table *tbl;
+               struct {
+                       struct mlx5dr_domain *dmn;
+                       u32 id;
+                       u32 group_id;
+                       enum fs_flow_table_type type;
+                       u64 rx_icm_addr;
+                       u64 tx_icm_addr;
+                       struct mlx5dr_action **ref_actions;
+                       u32 num_of_ref_actions;
+               } fw_tbl;
+       };
+};
+
+struct mlx5dr_action_range {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_action *hit_tbl_action;
+       struct mlx5dr_action *miss_tbl_action;
+       u32 definer_id;
+       u32 min;
+       u32 max;
+};
+
+struct mlx5dr_action_ctr {
+       u32 ctr_id;
+       u32 offset;
+};
+
+struct mlx5dr_action_vport {
+       struct mlx5dr_domain *dmn;
+       struct mlx5dr_cmd_vport_cap *caps;
+};
+
+struct mlx5dr_action_push_vlan {
+       u32 vlan_hdr; /* tpid_pcp_dei_vid */
+};
+
+struct mlx5dr_action_flow_tag {
+       u32 flow_tag;
+};
+
+struct mlx5dr_rule_action_member {
+       struct mlx5dr_action *action;
+       struct list_head list;
+};
+
+struct mlx5dr_action_aso_flow_meter {
+       struct mlx5dr_domain *dmn;
+       u32 obj_id;
+       u32 offset;
+       u8 dest_reg_id;
+       u8 init_color;
+};
+
+struct mlx5dr_action {
+       enum mlx5dr_action_type action_type;
+       refcount_t refcount;
+
+       union {
+               void *data;
+               struct mlx5dr_action_rewrite *rewrite;
+               struct mlx5dr_action_reformat *reformat;
+               struct mlx5dr_action_sampler *sampler;
+               struct mlx5dr_action_dest_tbl *dest_tbl;
+               struct mlx5dr_action_ctr *ctr;
+               struct mlx5dr_action_vport *vport;
+               struct mlx5dr_action_push_vlan *push_vlan;
+               struct mlx5dr_action_flow_tag *flow_tag;
+               struct mlx5dr_action_aso_flow_meter *aso;
+               struct mlx5dr_action_range *range;
+       };
+};
+
+enum mlx5dr_connect_type {
+       CONNECT_HIT     = 1,
+       CONNECT_MISS    = 2,
+};
+
+struct mlx5dr_htbl_connect_info {
+       enum mlx5dr_connect_type type;
+       union {
+               struct mlx5dr_ste_htbl *hit_next_htbl;
+               u64 miss_icm_addr;
+       };
+};
+
+struct mlx5dr_rule_rx_tx {
+       struct mlx5dr_matcher_rx_tx *nic_matcher;
+       struct mlx5dr_ste *last_rule_ste;
+};
+
+struct mlx5dr_rule {
+       struct mlx5dr_matcher *matcher;
+       struct mlx5dr_rule_rx_tx rx;
+       struct mlx5dr_rule_rx_tx tx;
+       struct list_head rule_actions_list;
+       struct list_head dbg_node;
+       u32 flow_source;
+};
+
+void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule,
+                                struct mlx5dr_ste *ste,
+                                bool force);
+int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr,
+                                        struct mlx5dr_ste *curr_ste,
+                                        int *num_of_stes);
+
+struct mlx5dr_icm_chunk {
+       struct mlx5dr_icm_buddy_mem *buddy_mem;
+
+       /* indicates the index of this chunk in the whole memory,
+        * used for deleting the chunk from the buddy
+        */
+       unsigned int seg;
+       enum mlx5dr_icm_chunk_size size;
+
+       /* Memory optimisation */
+       struct mlx5dr_ste *ste_arr;
+       u8 *hw_ste_arr;
+       struct list_head *miss_list;
+};
+
+static inline void mlx5dr_domain_nic_lock(struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+       mutex_lock(&nic_dmn->mutex);
+}
+
+static inline void mlx5dr_domain_nic_unlock(struct mlx5dr_domain_rx_tx *nic_dmn)
+{
+       mutex_unlock(&nic_dmn->mutex);
+}
+
+static inline void mlx5dr_domain_lock(struct mlx5dr_domain *dmn)
+{
+       mlx5dr_domain_nic_lock(&dmn->info.rx);
+       mlx5dr_domain_nic_lock(&dmn->info.tx);
+}
+
+static inline void mlx5dr_domain_unlock(struct mlx5dr_domain *dmn)
+{
+       mlx5dr_domain_nic_unlock(&dmn->info.tx);
+       mlx5dr_domain_nic_unlock(&dmn->info.rx);
+}
+
+int mlx5dr_matcher_add_to_tbl_nic(struct mlx5dr_domain *dmn,
+                                 struct mlx5dr_matcher_rx_tx *nic_matcher);
+int mlx5dr_matcher_remove_from_tbl_nic(struct mlx5dr_domain *dmn,
+                                      struct mlx5dr_matcher_rx_tx *nic_matcher);
+
+int mlx5dr_matcher_select_builders(struct mlx5dr_matcher *matcher,
+                                  struct mlx5dr_matcher_rx_tx *nic_matcher,
+                                  enum mlx5dr_ipv outer_ipv,
+                                  enum mlx5dr_ipv inner_ipv);
+
+u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk);
+u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk);
+u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk);
+u8 *mlx5dr_ste_get_hw_ste(struct mlx5dr_ste *ste);
+
+struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool);
+void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl);
+
+static inline int
+mlx5dr_icm_pool_dm_type_to_entry_size(enum mlx5dr_icm_type icm_type)
+{
+       if (icm_type == DR_ICM_TYPE_STE)
+               return DR_STE_SIZE;
+
+       return DR_MODIFY_ACTION_SIZE;
+}
+
+static inline u32
+mlx5dr_icm_pool_chunk_size_to_entries(enum mlx5dr_icm_chunk_size chunk_size)
+{
+       return 1 << chunk_size;
+}
+
+static inline int
+mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size,
+                                  enum mlx5dr_icm_type icm_type)
+{
+       int num_of_entries;
+       int entry_size;
+
+       entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(icm_type);
+       num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
+
+       return entry_size * num_of_entries;
+}
+
+static inline int
+mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl)
+{
+       int num_of_entries =
+               mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk->size);
+
+       /* Threshold is 50%, one is added to table of size 1 */
+       return (num_of_entries + 1) / 2;
+}
+
+static inline bool
+mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
+{
+       if (htbl->chunk->size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
+               return false;
+
+       return true;
+}
+
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
+
+struct mlx5dr_cmd_query_flow_table_details {
+       u8 status;
+       u8 level;
+       u64 sw_owner_icm_root_1;
+       u64 sw_owner_icm_root_0;
+};
+
+struct mlx5dr_cmd_create_flow_table_attr {
+       u32 table_type;
+       u16 uid;
+       u64 icm_addr_rx;
+       u64 icm_addr_tx;
+       u8 level;
+       bool sw_owner;
+       bool term_tbl;
+       bool decap_en;
+       bool reformat_en;
+};
+
+/* internal API functions */
+int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
+                           struct mlx5dr_cmd_caps *caps);
+int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
+                                      bool other_vport, u16 vport_number,
+                                      u64 *icm_address_rx,
+                                      u64 *icm_address_tx);
+int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev,
+                         bool other_vport, u16 vport_number, u16 *gvmi);
+int mlx5dr_cmd_query_esw_caps(struct mlx5_core_dev *mdev,
+                             struct mlx5dr_esw_caps *caps);
+int mlx5dr_cmd_query_flow_sampler(struct mlx5_core_dev *dev,
+                                 u32 sampler_id,
+                                 u64 *rx_icm_addr,
+                                 u64 *tx_icm_addr);
+int mlx5dr_cmd_sync_steering(struct mlx5_core_dev *mdev);
+int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
+                                       u32 table_type,
+                                       u32 table_id,
+                                       u32 group_id,
+                                       u32 modify_header_id,
+                                       u16 vport_id);
+int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
+                                   u32 table_type,
+                                   u32 table_id);
+int mlx5dr_cmd_alloc_modify_header(struct mlx5_core_dev *mdev,
+                                  u32 table_type,
+                                  u8 num_of_actions,
+                                  u64 *actions,
+                                  u32 *modify_header_id);
+int mlx5dr_cmd_dealloc_modify_header(struct mlx5_core_dev *mdev,
+                                    u32 modify_header_id);
+int mlx5dr_cmd_create_empty_flow_group(struct mlx5_core_dev *mdev,
+                                      u32 table_type,
+                                      u32 table_id,
+                                      u32 *group_id);
+int mlx5dr_cmd_destroy_flow_group(struct mlx5_core_dev *mdev,
+                                 u32 table_type,
+                                 u32 table_id,
+                                 u32 group_id);
+int mlx5dr_cmd_create_flow_table(struct mlx5_core_dev *mdev,
+                                struct mlx5dr_cmd_create_flow_table_attr *attr,
+                                u64 *fdb_rx_icm_addr,
+                                u32 *table_id);
+int mlx5dr_cmd_destroy_flow_table(struct mlx5_core_dev *mdev,
+                                 u32 table_id,
+                                 u32 table_type);
+int mlx5dr_cmd_query_flow_table(struct mlx5_core_dev *dev,
+                               enum fs_flow_table_type type,
+                               u32 table_id,
+                               struct mlx5dr_cmd_query_flow_table_details *output);
+int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev,
+                                  enum mlx5_reformat_ctx_type rt,
+                                  u8 reformat_param_0,
+                                  u8 reformat_param_1,
+                                  size_t reformat_size,
+                                  void *reformat_data,
+                                  u32 *reformat_id);
+void mlx5dr_cmd_destroy_reformat_ctx(struct mlx5_core_dev *mdev,
+                                    u32 reformat_id);
+int mlx5dr_cmd_create_definer(struct mlx5_core_dev *mdev,
+                             u16 format_id,
+                             u8 *dw_selectors,
+                             u8 *byte_selectors,
+                             u8 *match_mask,
+                             u32 *definer_id);
+void mlx5dr_cmd_destroy_definer(struct mlx5_core_dev *mdev,
+                               u32 definer_id);
+
+struct mlx5dr_cmd_gid_attr {
+       u8 gid[16];
+       u8 mac[6];
+       u32 roce_ver;
+};
+
+int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num,
+                        u16 index, struct mlx5dr_cmd_gid_attr *attr);
+
+int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev,
+                                       u16 log_obj_range, u32 pd,
+                                       u32 *obj_id);
+void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev,
+                                         u32 obj_id);
+
+int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id,
+                      u8 *dw_selectors, u8 *byte_selectors,
+                      u8 *match_mask, u32 *definer_id);
+void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id);
+
+struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
+                                              enum mlx5dr_icm_type icm_type);
+void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool);
+
+struct mlx5dr_icm_chunk *
+mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
+                      enum mlx5dr_icm_chunk_size chunk_size);
+void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
+
+void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx,
+                                    u8 *hw_ste_p, u32 ste_size);
+int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
+                                     struct mlx5dr_domain_rx_tx *nic_dmn,
+                                     struct mlx5dr_ste_htbl *htbl,
+                                     struct mlx5dr_htbl_connect_info *connect_info,
+                                     bool update_hw_ste);
+void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
+                                 u16 gvmi,
+                                 enum mlx5dr_domain_nic_type nic_type,
+                                 struct mlx5dr_ste_htbl *htbl,
+                                 u8 *formatted_ste,
+                                 struct mlx5dr_htbl_connect_info *connect_info);
+void mlx5dr_ste_copy_param(u8 match_criteria,
+                          struct mlx5dr_match_param *set_param,
+                          struct mlx5dr_match_parameters *mask,
+                          bool clear);
+
+struct mlx5dr_qp {
+       struct mlx5_core_dev *mdev;
+       struct mlx5_wq_qp wq;
+       struct mlx5_uars_page *uar;
+       struct mlx5_wq_ctrl wq_ctrl;
+       u32 qpn;
+       struct {
+               unsigned int head;
+               unsigned int pc;
+               unsigned int cc;
+               unsigned int size;
+               unsigned int *wqe_head;
+               unsigned int wqe_cnt;
+       } sq;
+       struct {
+               unsigned int pc;
+               unsigned int cc;
+               unsigned int size;
+               unsigned int wqe_cnt;
+       } rq;
+       int max_inline_data;
+};
+
+struct mlx5dr_cq {
+       struct mlx5_core_dev *mdev;
+       struct mlx5_cqwq wq;
+       struct mlx5_wq_ctrl wq_ctrl;
+       struct mlx5_core_cq mcq;
+       struct mlx5dr_qp *qp;
+};
+
+struct mlx5dr_mr {
+       struct mlx5_core_dev *mdev;
+       u32 mkey;
+       dma_addr_t dma_addr;
+       void *addr;
+       size_t size;
+};
+
+struct mlx5dr_send_ring {
+       struct mlx5dr_cq *cq;
+       struct mlx5dr_qp *qp;
+       struct mlx5dr_mr *mr;
+       /* How much wqes are waiting for completion */
+       u32 pending_wqe;
+       /* Signal request per this trash hold value */
+       u16 signal_th;
+       /* Each post_send_size less than max_post_send_size */
+       u32 max_post_send_size;
+       /* manage the send queue */
+       u32 tx_head;
+       void *buf;
+       u32 buf_size;
+       u8 *sync_buff;
+       struct mlx5dr_mr *sync_mr;
+       spinlock_t lock; /* Protect the data path of the send ring */
+       bool err_state; /* send_ring is not usable in err state */
+};
+
+int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
+void mlx5dr_send_ring_free(struct mlx5dr_domain *dmn,
+                          struct mlx5dr_send_ring *send_ring);
+int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn);
+int mlx5dr_send_postsend_ste(struct mlx5dr_domain *dmn,
+                            struct mlx5dr_ste *ste,
+                            u8 *data,
+                            u16 size,
+                            u16 offset);
+int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
+                             struct mlx5dr_ste_htbl *htbl,
+                             u8 *formatted_ste, u8 *mask);
+int mlx5dr_send_postsend_formatted_htbl(struct mlx5dr_domain *dmn,
+                                       struct mlx5dr_ste_htbl *htbl,
+                                       u8 *ste_init_data,
+                                       bool update_hw_ste);
+int mlx5dr_send_postsend_action(struct mlx5dr_domain *dmn,
+                               struct mlx5dr_action *action);
+int mlx5dr_send_postsend_pattern(struct mlx5dr_domain *dmn,
+                                struct mlx5dr_icm_chunk *chunk,
+                                u16 num_of_actions,
+                                u8 *data);
+int mlx5dr_send_postsend_args(struct mlx5dr_domain *dmn, u64 arg_id,
+                             u16 num_of_actions, u8 *actions_data);
+
+int mlx5dr_send_info_pool_create(struct mlx5dr_domain *dmn);
+void mlx5dr_send_info_pool_destroy(struct mlx5dr_domain *dmn);
+struct mlx5dr_ste_send_info *mlx5dr_send_info_alloc(struct mlx5dr_domain *dmn,
+                                                   enum mlx5dr_domain_nic_type nic_type);
+void mlx5dr_send_info_free(struct mlx5dr_ste_send_info *ste_send_info);
+
+struct mlx5dr_cmd_ft_info {
+       u32 id;
+       u16 vport;
+       enum fs_flow_table_type type;
+};
+
+struct mlx5dr_cmd_flow_destination_hw_info {
+       enum mlx5_flow_destination_type type;
+       union {
+               u32 tir_num;
+               u32 ft_num;
+               u32 ft_id;
+               u32 counter_id;
+               u32 sampler_id;
+               struct {
+                       u16 num;
+                       u16 vhca_id;
+                       u32 reformat_id;
+                       u8 flags;
+               } vport;
+       };
+};
+
+struct mlx5dr_cmd_fte_info {
+       u32 dests_size;
+       u32 index;
+       struct mlx5_flow_context flow_context;
+       u32 *val;
+       struct mlx5_flow_act action;
+       struct mlx5dr_cmd_flow_destination_hw_info *dest_arr;
+       bool ignore_flow_level;
+};
+
+int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev,
+                      int opmod, int modify_mask,
+                      struct mlx5dr_cmd_ft_info *ft,
+                      u32 group_id,
+                      struct mlx5dr_cmd_fte_info *fte);
+
+bool mlx5dr_ste_supp_ttl_cs_recalc(struct mlx5dr_cmd_caps *caps);
+
+struct mlx5dr_fw_recalc_cs_ft {
+       u64 rx_icm_addr;
+       u32 table_id;
+       u32 group_id;
+       u32 modify_hdr_id;
+};
+
+struct mlx5dr_fw_recalc_cs_ft *
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
+void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
+                                   struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+                                       u16 vport_num,
+                                       u64 *rx_icm_addr);
+int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
+                           struct mlx5dr_cmd_flow_destination_hw_info *dest,
+                           int num_dest,
+                           bool reformat_req,
+                           u32 *tbl_id,
+                           u32 *group_id,
+                           bool ignore_flow_level,
+                           u32 flow_source);
+void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id,
+                             u32 group_id);
+
+static inline bool mlx5dr_is_fw_table(struct mlx5_flow_table *ft)
+{
+       return !ft->fs_dr_table.dr_table;
+}
+
+static inline bool mlx5dr_supp_match_ranges(struct mlx5_core_dev *dev)
+{
+       return (MLX5_CAP_GEN(dev, steering_format_version) >=
+               MLX5_STEERING_FORMAT_CONNECTX_6DX) &&
+              (MLX5_CAP_GEN_64(dev, match_definer_format_supported) &
+                       (1ULL << MLX5_IFC_DEFINER_FORMAT_ID_SELECT));
+}
+
+bool mlx5dr_domain_is_support_ptrn_arg(struct mlx5dr_domain *dmn);
+struct mlx5dr_ptrn_mgr *mlx5dr_ptrn_mgr_create(struct mlx5dr_domain *dmn);
+void mlx5dr_ptrn_mgr_destroy(struct mlx5dr_ptrn_mgr *mgr);
+struct mlx5dr_ptrn_obj *mlx5dr_ptrn_cache_get_pattern(struct mlx5dr_ptrn_mgr *mgr,
+                                                     u16 num_of_actions, u8 *data);
+void mlx5dr_ptrn_cache_put_pattern(struct mlx5dr_ptrn_mgr *mgr,
+                                  struct mlx5dr_ptrn_obj *pattern);
+struct mlx5dr_arg_mgr *mlx5dr_arg_mgr_create(struct mlx5dr_domain *dmn);
+void mlx5dr_arg_mgr_destroy(struct mlx5dr_arg_mgr *mgr);
+struct mlx5dr_arg_obj *mlx5dr_arg_get_obj(struct mlx5dr_arg_mgr *mgr,
+                                         u16 num_of_actions,
+                                         u8 *data);
+void mlx5dr_arg_put_obj(struct mlx5dr_arg_mgr *mgr,
+                       struct mlx5dr_arg_obj *arg_obj);
+u32 mlx5dr_arg_get_obj_id(struct mlx5dr_arg_obj *arg_obj);
+
+#endif  /* _DR_TYPES_H_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.c
new file mode 100644 (file)
index 0000000..4b349d4
--- /dev/null
@@ -0,0 +1,879 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2019 Mellanox Technologies */
+
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+#include "mlx5dr.h"
+#include "fs_dr.h"
+#include "dr_types.h"
+
+static int mlx5_cmd_dr_update_root_ft(struct mlx5_flow_root_namespace *ns,
+                                     struct mlx5_flow_table *ft,
+                                     u32 underlay_qpn,
+                                     bool disconnect)
+{
+       return mlx5_fs_cmd_get_fw_cmds()->update_root_ft(ns, ft, underlay_qpn,
+                                                        disconnect);
+}
+
+static int set_miss_action(struct mlx5_flow_root_namespace *ns,
+                          struct mlx5_flow_table *ft,
+                          struct mlx5_flow_table *next_ft)
+{
+       struct mlx5dr_action *old_miss_action;
+       struct mlx5dr_action *action = NULL;
+       struct mlx5dr_table *next_tbl;
+       int err;
+
+       next_tbl = next_ft ? next_ft->fs_dr_table.dr_table : NULL;
+       if (next_tbl) {
+               action = mlx5dr_action_create_dest_table(next_tbl);
+               if (!action)
+                       return -EINVAL;
+       }
+       old_miss_action = ft->fs_dr_table.miss_action;
+       err = mlx5dr_table_set_miss_action(ft->fs_dr_table.dr_table, action);
+       if (err && action) {
+               err = mlx5dr_action_destroy(action);
+               if (err)
+                       mlx5_core_err(ns->dev,
+                                     "Failed to destroy action (%d)\n", err);
+               action = NULL;
+       }
+       ft->fs_dr_table.miss_action = action;
+       if (old_miss_action) {
+               err = mlx5dr_action_destroy(old_miss_action);
+               if (err)
+                       mlx5_core_err(ns->dev, "Failed to destroy action (%d)\n",
+                                     err);
+       }
+
+       return err;
+}
+
+static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
+                                        struct mlx5_flow_table *ft,
+                                        struct mlx5_flow_table_attr *ft_attr,
+                                        struct mlx5_flow_table *next_ft)
+{
+       struct mlx5dr_table *tbl;
+       u32 flags;
+       int err;
+
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
+                                                                   ft_attr,
+                                                                   next_ft);
+       flags = ft->flags;
+       /* turn off encap/decap if not supported for sw-str by fw */
+       if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported))
+               flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+                                     MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
+
+       tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags,
+                                 ft_attr->uid);
+       if (!tbl) {
+               mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
+               return -EINVAL;
+       }
+
+       ft->fs_dr_table.dr_table = tbl;
+       ft->id = mlx5dr_table_get_id(tbl);
+
+       if (next_ft) {
+               err = set_miss_action(ns, ft, next_ft);
+               if (err) {
+                       mlx5dr_table_destroy(tbl);
+                       ft->fs_dr_table.dr_table = NULL;
+                       return err;
+               }
+       }
+
+       ft->max_fte = INT_MAX;
+
+       return 0;
+}
+
+static int mlx5_cmd_dr_destroy_flow_table(struct mlx5_flow_root_namespace *ns,
+                                         struct mlx5_flow_table *ft)
+{
+       struct mlx5dr_action *action = ft->fs_dr_table.miss_action;
+       int err;
+
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_table(ns, ft);
+
+       err = mlx5dr_table_destroy(ft->fs_dr_table.dr_table);
+       if (err) {
+               mlx5_core_err(ns->dev, "Failed to destroy flow_table (%d)\n",
+                             err);
+               return err;
+       }
+       if (action) {
+               err = mlx5dr_action_destroy(action);
+               if (err) {
+                       mlx5_core_err(ns->dev, "Failed to destroy action(%d)\n",
+                                     err);
+                       return err;
+               }
+       }
+
+       return err;
+}
+
+static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns,
+                                        struct mlx5_flow_table *ft,
+                                        struct mlx5_flow_table *next_ft)
+{
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft);
+
+       return set_miss_action(ns, ft, next_ft);
+}
+
+static int mlx5_cmd_dr_create_flow_group(struct mlx5_flow_root_namespace *ns,
+                                        struct mlx5_flow_table *ft,
+                                        u32 *in,
+                                        struct mlx5_flow_group *fg)
+{
+       struct mlx5dr_matcher *matcher;
+       u32 priority = MLX5_GET(create_flow_group_in, in,
+                               start_flow_index);
+       u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
+                                           in,
+                                           match_criteria_enable);
+       struct mlx5dr_match_parameters mask;
+
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->create_flow_group(ns, ft, in,
+                                                                   fg);
+
+       mask.match_buf = MLX5_ADDR_OF(create_flow_group_in,
+                                     in, match_criteria);
+       mask.match_sz = sizeof(fg->mask.match_criteria);
+
+       matcher = mlx5dr_matcher_create(ft->fs_dr_table.dr_table,
+                                       priority,
+                                       match_criteria_enable,
+                                       &mask);
+       if (!matcher) {
+               mlx5_core_err(ns->dev, "Failed creating matcher\n");
+               return -EINVAL;
+       }
+
+       fg->fs_dr_matcher.dr_matcher = matcher;
+       return 0;
+}
+
+static int mlx5_cmd_dr_destroy_flow_group(struct mlx5_flow_root_namespace *ns,
+                                         struct mlx5_flow_table *ft,
+                                         struct mlx5_flow_group *fg)
+{
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->destroy_flow_group(ns, ft, fg);
+
+       return mlx5dr_matcher_destroy(fg->fs_dr_matcher.dr_matcher);
+}
+
+static struct mlx5dr_action *create_vport_action(struct mlx5dr_domain *domain,
+                                                struct mlx5_flow_rule *dst)
+{
+       struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+       return mlx5dr_action_create_dest_vport(domain, dest_attr->vport.num,
+                                              dest_attr->vport.flags &
+                                              MLX5_FLOW_DEST_VPORT_VHCA_ID,
+                                              dest_attr->vport.vhca_id);
+}
+
+static struct mlx5dr_action *create_uplink_action(struct mlx5dr_domain *domain,
+                                                 struct mlx5_flow_rule *dst)
+{
+       struct mlx5_flow_destination *dest_attr = &dst->dest_attr;
+
+       return mlx5dr_action_create_dest_vport(domain, MLX5_VPORT_UPLINK, 1,
+                                              dest_attr->vport.vhca_id);
+}
+
+static struct mlx5dr_action *create_ft_action(struct mlx5dr_domain *domain,
+                                             struct mlx5_flow_rule *dst)
+{
+       struct mlx5_flow_table *dest_ft = dst->dest_attr.ft;
+       struct mlx5dr_action *tbl_action;
+
+       if (mlx5dr_is_fw_table(dest_ft))
+               return mlx5dr_action_create_dest_flow_fw_table(domain, dest_ft);
+
+       tbl_action = mlx5dr_action_create_dest_table(dest_ft->fs_dr_table.dr_table);
+       if (tbl_action)
+               tbl_action->dest_tbl->is_wire_ft =
+                       dest_ft->flags & MLX5_FLOW_TABLE_UPLINK_VPORT ? 1 : 0;
+
+       return tbl_action;
+}
+
+static struct mlx5dr_action *create_range_action(struct mlx5dr_domain *domain,
+                                                struct mlx5_flow_rule *dst)
+{
+       return mlx5dr_action_create_dest_match_range(domain,
+                                                    dst->dest_attr.range.field,
+                                                    dst->dest_attr.range.hit_ft,
+                                                    dst->dest_attr.range.miss_ft,
+                                                    dst->dest_attr.range.min,
+                                                    dst->dest_attr.range.max);
+}
+
+static struct mlx5dr_action *create_action_push_vlan(struct mlx5dr_domain *domain,
+                                                    struct mlx5_fs_vlan *vlan)
+{
+       u16 n_ethtype = vlan->ethtype;
+       u8  prio = vlan->prio;
+       u16 vid = vlan->vid;
+       u32 vlan_hdr;
+
+       vlan_hdr = (u32)n_ethtype << 16 | (u32)(prio) << 12 |  (u32)vid;
+       return mlx5dr_action_create_push_vlan(domain, htonl(vlan_hdr));
+}
+
+static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
+{
+       return (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT ||
+               dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) &&
+               dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
+}
+
+/* We want to support a rule with 32 destinations, which means we need to
+ * account for 32 destinations plus usually a counter plus one more action
+ * for a multi-destination flow table.
+ */
+#define MLX5_FLOW_CONTEXT_ACTION_MAX  34
+static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
+                                 struct mlx5_flow_table *ft,
+                                 struct mlx5_flow_group *group,
+                                 struct fs_fte *fte)
+{
+       struct mlx5dr_domain *domain = ns->fs_dr_domain.dr_domain;
+       struct mlx5dr_action_dest *term_actions;
+       struct mlx5_pkt_reformat *pkt_reformat;
+       struct mlx5dr_match_parameters params;
+       struct mlx5_core_dev *dev = ns->dev;
+       struct mlx5dr_action **fs_dr_actions;
+       struct mlx5dr_action *tmp_action;
+       struct mlx5dr_action **actions;
+       bool delay_encap_set = false;
+       struct mlx5dr_rule *rule;
+       struct mlx5_flow_rule *dst;
+       int fs_dr_num_actions = 0;
+       int num_term_actions = 0;
+       int num_actions = 0;
+       size_t match_sz;
+       int err = 0;
+       int i;
+
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->create_fte(ns, ft, group, fte);
+
+       actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX, sizeof(*actions),
+                         GFP_KERNEL);
+       if (!actions) {
+               err = -ENOMEM;
+               goto out_err;
+       }
+
+       fs_dr_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+                               sizeof(*fs_dr_actions), GFP_KERNEL);
+       if (!fs_dr_actions) {
+               err = -ENOMEM;
+               goto free_actions_alloc;
+       }
+
+       term_actions = kcalloc(MLX5_FLOW_CONTEXT_ACTION_MAX,
+                              sizeof(*term_actions), GFP_KERNEL);
+       if (!term_actions) {
+               err = -ENOMEM;
+               goto free_fs_dr_actions_alloc;
+       }
+
+       match_sz = sizeof(fte->val);
+
+       /* Drop reformat action bit if destination vport set with reformat */
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       if (!contain_vport_reformat_action(dst))
+                               continue;
+
+                       fte->act_dests.action.action &= ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+                       break;
+               }
+       }
+
+       /* The order of the actions are must to be keep, only the following
+        * order is supported by SW steering:
+        * TX: modify header -> push vlan -> encap
+        * RX: decap -> pop vlan -> modify header
+        */
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
+               enum mlx5dr_action_reformat_type decap_type =
+                       DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2;
+
+               tmp_action = mlx5dr_action_create_packet_reformat(domain,
+                                                                 decap_type,
+                                                                 0, 0, 0,
+                                                                 NULL);
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
+               bool is_decap;
+
+               pkt_reformat = fte->act_dests.action.pkt_reformat;
+               if (pkt_reformat->owner == MLX5_FLOW_RESOURCE_OWNER_FW) {
+                       err = -EINVAL;
+                       mlx5dr_err(domain, "FW-owned reformat can't be used in SW rule\n");
+                       goto free_actions;
+               }
+
+               is_decap = pkt_reformat->reformat_type ==
+                          MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
+
+               if (is_decap)
+                       actions[num_actions++] =
+                               pkt_reformat->fs_dr_action.dr_action;
+               else
+                       delay_encap_set = true;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
+               tmp_action =
+                       mlx5dr_action_create_pop_vlan();
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2) {
+               tmp_action =
+                       mlx5dr_action_create_pop_vlan();
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
+               struct mlx5_modify_hdr *modify_hdr = fte->act_dests.action.modify_hdr;
+
+               actions[num_actions++] = modify_hdr->fs_dr_action.dr_action;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
+               tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[0]);
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
+               tmp_action = create_action_push_vlan(domain, &fte->act_dests.action.vlan[1]);
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       if (delay_encap_set)
+               actions[num_actions++] = pkt_reformat->fs_dr_action.dr_action;
+
+       /* The order of the actions below is not important */
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_DROP) {
+               tmp_action = mlx5dr_action_create_drop();
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               term_actions[num_term_actions++].dest = tmp_action;
+       }
+
+       if (fte->act_dests.flow_context.flow_tag) {
+               tmp_action =
+                       mlx5dr_action_create_tag(fte->act_dests.flow_context.flow_tag);
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       enum mlx5_flow_destination_type type = dst->dest_attr.type;
+                       u32 id;
+
+                       if (fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+                           num_term_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -EOPNOTSUPP;
+                               goto free_actions;
+                       }
+
+                       if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       switch (type) {
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
+                               tmp_action = create_ft_action(domain, dst);
+                               if (!tmp_action) {
+                                       err = -ENOMEM;
+                                       goto free_actions;
+                               }
+                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                               term_actions[num_term_actions++].dest = tmp_action;
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+                       case MLX5_FLOW_DESTINATION_TYPE_VPORT:
+                               tmp_action = type == MLX5_FLOW_DESTINATION_TYPE_VPORT ?
+                                            create_vport_action(domain, dst) :
+                                            create_uplink_action(domain, dst);
+                               if (!tmp_action) {
+                                       err = -ENOMEM;
+                                       goto free_actions;
+                               }
+                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                               term_actions[num_term_actions].dest = tmp_action;
+
+                               if (dst->dest_attr.vport.flags &
+                                   MLX5_FLOW_DEST_VPORT_REFORMAT_ID) {
+                                       pkt_reformat = dst->dest_attr.vport.pkt_reformat;
+                                       term_actions[num_term_actions].reformat =
+                                               pkt_reformat->fs_dr_action.dr_action;
+                               }
+
+                               num_term_actions++;
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM:
+                               id = dst->dest_attr.ft_num;
+                               tmp_action = mlx5dr_action_create_dest_table_num(domain,
+                                                                                id);
+                               if (!tmp_action) {
+                                       err = -ENOMEM;
+                                       goto free_actions;
+                               }
+                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                               term_actions[num_term_actions++].dest = tmp_action;
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER:
+                               id = dst->dest_attr.sampler_id;
+                               tmp_action = mlx5dr_action_create_flow_sampler(domain,
+                                                                              id);
+                               if (!tmp_action) {
+                                       err = -ENOMEM;
+                                       goto free_actions;
+                               }
+                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                               term_actions[num_term_actions++].dest = tmp_action;
+                               break;
+                       case MLX5_FLOW_DESTINATION_TYPE_RANGE:
+                               tmp_action = create_range_action(domain, dst);
+                               if (!tmp_action) {
+                                       err = -ENOMEM;
+                                       goto free_actions;
+                               }
+                               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                               term_actions[num_term_actions++].dest = tmp_action;
+                               break;
+                       default:
+                               err = -EOPNOTSUPP;
+                               goto free_actions;
+                       }
+               }
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               list_for_each_entry(dst, &fte->node.children, node.list) {
+                       u32 id;
+
+                       if (dst->dest_attr.type !=
+                           MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+                               continue;
+
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+                           fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -EOPNOTSUPP;
+                               goto free_actions;
+                       }
+
+                       id = dst->dest_attr.counter_id;
+                       tmp_action =
+                               mlx5dr_action_create_flow_counter(id);
+                       if (!tmp_action) {
+                               err = -ENOMEM;
+                               goto free_actions;
+                       }
+
+                       fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+                       actions[num_actions++] = tmp_action;
+               }
+       }
+
+       if (fte->act_dests.action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
+               struct mlx5_flow_act *action = &fte->act_dests.action;
+
+               if (fte->act_dests.action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
+                       err = -EOPNOTSUPP;
+                       goto free_actions;
+               }
+
+               tmp_action =
+                       mlx5dr_action_create_aso(domain,
+                                                action->exe_aso.object_id,
+                                                action->exe_aso.return_reg_id,
+                                                action->exe_aso.type,
+                                                action->exe_aso.flow_meter.init_color,
+                                                action->exe_aso.flow_meter.meter_idx);
+               if (!tmp_action) {
+                       err = -ENOMEM;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       params.match_sz = match_sz;
+       params.match_buf = (u64 *)fte->val;
+       if (num_term_actions == 1) {
+               if (term_actions->reformat) {
+                       if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                               err = -EOPNOTSUPP;
+                               goto free_actions;
+                       }
+                       actions[num_actions++] = term_actions->reformat;
+               }
+
+               if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                       err = -EOPNOTSUPP;
+                       goto free_actions;
+               }
+               actions[num_actions++] = term_actions->dest;
+       } else if (num_term_actions > 1) {
+               bool ignore_flow_level =
+                       !!(fte->act_dests.action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL);
+               u32 flow_source = fte->act_dests.flow_context.flow_source;
+
+               if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX ||
+                   fs_dr_num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) {
+                       err = -EOPNOTSUPP;
+                       goto free_actions;
+               }
+               tmp_action = mlx5dr_action_create_mult_dest_tbl(domain,
+                                                               term_actions,
+                                                               num_term_actions,
+                                                               ignore_flow_level,
+                                                               flow_source);
+               if (!tmp_action) {
+                       err = -EOPNOTSUPP;
+                       goto free_actions;
+               }
+               fs_dr_actions[fs_dr_num_actions++] = tmp_action;
+               actions[num_actions++] = tmp_action;
+       }
+
+       rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
+                                 &params,
+                                 num_actions,
+                                 actions,
+                                 fte->act_dests.flow_context.flow_source);
+       if (!rule) {
+               err = -EINVAL;
+               goto free_actions;
+       }
+
+       kfree(term_actions);
+       kfree(actions);
+
+       fte->fs_dr_rule.dr_rule = rule;
+       fte->fs_dr_rule.num_actions = fs_dr_num_actions;
+       fte->fs_dr_rule.dr_actions = fs_dr_actions;
+
+       return 0;
+
+free_actions:
+       /* Free in reverse order to handle action dependencies */
+       for (i = fs_dr_num_actions - 1; i >= 0; i--)
+               if (!IS_ERR_OR_NULL(fs_dr_actions[i]))
+                       mlx5dr_action_destroy(fs_dr_actions[i]);
+
+       kfree(term_actions);
+free_fs_dr_actions_alloc:
+       kfree(fs_dr_actions);
+free_actions_alloc:
+       kfree(actions);
+out_err:
+       mlx5_core_err(dev, "Failed to create dr rule err(%d)\n", err);
+       return err;
+}
+
+static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns,
+                                            struct mlx5_pkt_reformat_params *params,
+                                            enum mlx5_flow_namespace_type namespace,
+                                            struct mlx5_pkt_reformat *pkt_reformat)
+{
+       struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
+       struct mlx5dr_action *action;
+       int dr_reformat;
+
+       switch (params->type) {
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+       case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+               dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2;
+               break;
+       case MLX5_REFORMAT_TYPE_L3_TUNNEL_TO_L2:
+               dr_reformat = DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2;
+               break;
+       case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+               dr_reformat = DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3;
+               break;
+       case MLX5_REFORMAT_TYPE_INSERT_HDR:
+               dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR;
+               break;
+       case MLX5_REFORMAT_TYPE_REMOVE_HDR:
+               dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR;
+               break;
+       default:
+               mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n",
+                             params->type);
+               return -EOPNOTSUPP;
+       }
+
+       action = mlx5dr_action_create_packet_reformat(dr_domain,
+                                                     dr_reformat,
+                                                     params->param_0,
+                                                     params->param_1,
+                                                     params->size,
+                                                     params->data);
+       if (!action) {
+               mlx5_core_err(ns->dev, "Failed allocating packet-reformat action\n");
+               return -EINVAL;
+       }
+
+       pkt_reformat->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+       pkt_reformat->fs_dr_action.dr_action = action;
+
+       return 0;
+}
+
+static void mlx5_cmd_dr_packet_reformat_dealloc(struct mlx5_flow_root_namespace *ns,
+                                               struct mlx5_pkt_reformat *pkt_reformat)
+{
+       mlx5dr_action_destroy(pkt_reformat->fs_dr_action.dr_action);
+}
+
+static int mlx5_cmd_dr_modify_header_alloc(struct mlx5_flow_root_namespace *ns,
+                                          u8 namespace, u8 num_actions,
+                                          void *modify_actions,
+                                          struct mlx5_modify_hdr *modify_hdr)
+{
+       struct mlx5dr_domain *dr_domain = ns->fs_dr_domain.dr_domain;
+       struct mlx5dr_action *action;
+       size_t actions_sz;
+
+       actions_sz = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) *
+               num_actions;
+       action = mlx5dr_action_create_modify_header(dr_domain, 0,
+                                                   actions_sz,
+                                                   modify_actions);
+       if (!action) {
+               mlx5_core_err(ns->dev, "Failed allocating modify-header action\n");
+               return -EINVAL;
+       }
+
+       modify_hdr->owner = MLX5_FLOW_RESOURCE_OWNER_SW;
+       modify_hdr->fs_dr_action.dr_action = action;
+
+       return 0;
+}
+
+static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
+                                             struct mlx5_modify_hdr *modify_hdr)
+{
+       mlx5dr_action_destroy(modify_hdr->fs_dr_action.dr_action);
+}
+
+static int
+mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+                                 int definer_id)
+{
+       return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
+                                           u16 format_id, u32 *match_mask)
+{
+       return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
+                                 struct mlx5_flow_table *ft,
+                                 struct fs_fte *fte)
+{
+       struct mlx5_fs_dr_rule *rule = &fte->fs_dr_rule;
+       int err;
+       int i;
+
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->delete_fte(ns, ft, fte);
+
+       err = mlx5dr_rule_destroy(rule->dr_rule);
+       if (err)
+               return err;
+
+       /* Free in reverse order to handle action dependencies */
+       for (i = rule->num_actions - 1; i >= 0; i--)
+               if (!IS_ERR_OR_NULL(rule->dr_actions[i]))
+                       mlx5dr_action_destroy(rule->dr_actions[i]);
+
+       kfree(rule->dr_actions);
+       return 0;
+}
+
+static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns,
+                                 struct mlx5_flow_table *ft,
+                                 struct mlx5_flow_group *group,
+                                 int modify_mask,
+                                 struct fs_fte *fte)
+{
+       struct fs_fte fte_tmp = {};
+       int ret;
+
+       if (mlx5_fs_cmd_is_fw_term_table(ft))
+               return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte);
+
+       /* Backup current dr rule details */
+       fte_tmp.fs_dr_rule = fte->fs_dr_rule;
+       memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule));
+
+       /* First add the new updated rule, then delete the old rule */
+       ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte);
+       if (ret)
+               goto restore_fte;
+
+       ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp);
+       WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n");
+       return ret;
+
+restore_fte:
+       fte->fs_dr_rule = fte_tmp.fs_dr_rule;
+       return ret;
+}
+
+static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
+                               struct mlx5_flow_root_namespace *peer_ns,
+                               u16 peer_vhca_id)
+{
+       struct mlx5dr_domain *peer_domain = NULL;
+
+       if (peer_ns)
+               peer_domain = peer_ns->fs_dr_domain.dr_domain;
+       mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
+                              peer_domain, peer_vhca_id);
+       return 0;
+}
+
+static int mlx5_cmd_dr_create_ns(struct mlx5_flow_root_namespace *ns)
+{
+       ns->fs_dr_domain.dr_domain =
+               mlx5dr_domain_create(ns->dev,
+                                    MLX5DR_DOMAIN_TYPE_FDB);
+       if (!ns->fs_dr_domain.dr_domain) {
+               mlx5_core_err(ns->dev, "Failed to create dr flow namespace\n");
+               return -EOPNOTSUPP;
+       }
+       return 0;
+}
+
+static int mlx5_cmd_dr_destroy_ns(struct mlx5_flow_root_namespace *ns)
+{
+       return mlx5dr_domain_destroy(ns->fs_dr_domain.dr_domain);
+}
+
+static u32 mlx5_cmd_dr_get_capabilities(struct mlx5_flow_root_namespace *ns,
+                                       enum fs_flow_table_type ft_type)
+{
+       u32 steering_caps = MLX5_FLOW_STEERING_CAP_DUPLICATE_MATCH;
+
+       if (ft_type != FS_FT_FDB ||
+           MLX5_CAP_GEN(ns->dev, steering_format_version) == MLX5_STEERING_FORMAT_CONNECTX_5)
+               return steering_caps;
+
+       steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_PUSH_ON_RX;
+       steering_caps |= MLX5_FLOW_STEERING_CAP_VLAN_POP_ON_TX;
+
+       if (mlx5dr_supp_match_ranges(ns->dev))
+               steering_caps |= MLX5_FLOW_STEERING_CAP_MATCH_RANGES;
+
+       return steering_caps;
+}
+
+int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+{
+       switch (pkt_reformat->reformat_type) {
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_NVGRE:
+       case MLX5_REFORMAT_TYPE_L2_TO_L2_TUNNEL:
+       case MLX5_REFORMAT_TYPE_L2_TO_L3_TUNNEL:
+       case MLX5_REFORMAT_TYPE_INSERT_HDR:
+               return mlx5dr_action_get_pkt_reformat_id(pkt_reformat->fs_dr_action.dr_action);
+       }
+       return -EOPNOTSUPP;
+}
+
+bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
+{
+       return mlx5dr_is_supported(dev);
+}
+
+static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
+       .create_flow_table = mlx5_cmd_dr_create_flow_table,
+       .destroy_flow_table = mlx5_cmd_dr_destroy_flow_table,
+       .modify_flow_table = mlx5_cmd_dr_modify_flow_table,
+       .create_flow_group = mlx5_cmd_dr_create_flow_group,
+       .destroy_flow_group = mlx5_cmd_dr_destroy_flow_group,
+       .create_fte = mlx5_cmd_dr_create_fte,
+       .update_fte = mlx5_cmd_dr_update_fte,
+       .delete_fte = mlx5_cmd_dr_delete_fte,
+       .update_root_ft = mlx5_cmd_dr_update_root_ft,
+       .packet_reformat_alloc = mlx5_cmd_dr_packet_reformat_alloc,
+       .packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
+       .modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
+       .modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+       .create_match_definer = mlx5_cmd_dr_create_match_definer,
+       .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
+       .set_peer = mlx5_cmd_dr_set_peer,
+       .create_ns = mlx5_cmd_dr_create_ns,
+       .destroy_ns = mlx5_cmd_dr_destroy_ns,
+       .get_capabilities = mlx5_cmd_dr_get_capabilities,
+};
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
+{
+               return &mlx5_flow_cmds_dr;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/fs_dr.h
new file mode 100644 (file)
index 0000000..99a3b2e
--- /dev/null
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+ * Copyright (c) 2019 Mellanox Technologies
+ */
+
+#ifndef _MLX5_FS_DR_
+#define _MLX5_FS_DR_
+
+#include "mlx5dr.h"
+
+struct mlx5_flow_root_namespace;
+struct fs_fte;
+
+struct mlx5_fs_dr_action {
+       struct mlx5dr_action *dr_action;
+};
+
+struct mlx5_fs_dr_rule {
+       struct mlx5dr_rule    *dr_rule;
+       /* Only actions created by fs_dr */
+       struct mlx5dr_action  **dr_actions;
+       int                      num_actions;
+};
+
+struct mlx5_fs_dr_domain {
+       struct mlx5dr_domain    *dr_domain;
+};
+
+struct mlx5_fs_dr_matcher {
+       struct mlx5dr_matcher *dr_matcher;
+};
+
+struct mlx5_fs_dr_table {
+       struct mlx5dr_table  *dr_table;
+       struct mlx5dr_action *miss_action;
+};
+
+#ifdef CONFIG_MLX5_SW_STEERING
+
+bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev);
+
+int mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat);
+
+const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void);
+
+#else
+
+static inline const struct mlx5_flow_cmds *mlx5_fs_cmd_get_dr_cmds(void)
+{
+       return NULL;
+}
+
+static inline u32 mlx5_fs_dr_action_get_pkt_reformat_id(struct mlx5_pkt_reformat *pkt_reformat)
+{
+       return 0;
+}
+
+static inline bool mlx5_fs_dr_is_supported(struct mlx5_core_dev *dev)
+{
+       return false;
+}
+
+#endif /* CONFIG_MLX5_SW_STEERING */
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr.h
new file mode 100644 (file)
index 0000000..fb078fa
--- /dev/null
@@ -0,0 +1,603 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef MLX5_IFC_DR_H
+#define MLX5_IFC_DR_H
+
+enum {
+       MLX5DR_STE_LU_TYPE_DONT_CARE                    = 0x0f,
+};
+
+struct mlx5_ifc_ste_general_bits {
+       u8         entry_type[0x4];
+       u8         reserved_at_4[0x4];
+       u8         entry_sub_type[0x8];
+       u8         byte_mask[0x10];
+
+       u8         next_table_base_63_48[0x10];
+       u8         next_lu_type[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         linear_hash_enable[0x1];
+       u8         reserved_at_5c[0x2];
+       u8         next_table_rank[0x2];
+
+       u8         reserved_at_60[0xa0];
+       u8         tag_value[0x60];
+       u8         bit_mask[0x60];
+};
+
+struct mlx5_ifc_ste_sx_transmit_bits {
+       u8         entry_type[0x4];
+       u8         reserved_at_4[0x4];
+       u8         entry_sub_type[0x8];
+       u8         byte_mask[0x10];
+
+       u8         next_table_base_63_48[0x10];
+       u8         next_lu_type[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         linear_hash_enable[0x1];
+       u8         reserved_at_5c[0x2];
+       u8         next_table_rank[0x2];
+
+       u8         sx_wire[0x1];
+       u8         sx_func_lb[0x1];
+       u8         sx_sniffer[0x1];
+       u8         sx_wire_enable[0x1];
+       u8         sx_func_lb_enable[0x1];
+       u8         sx_sniffer_enable[0x1];
+       u8         action_type[0x3];
+       u8         reserved_at_69[0x1];
+       u8         action_description[0x6];
+       u8         gvmi[0x10];
+
+       u8         encap_pointer_vlan_data[0x20];
+
+       u8         loopback_syndome_en[0x8];
+       u8         loopback_syndome[0x8];
+       u8         counter_trigger[0x10];
+
+       u8         miss_address_63_48[0x10];
+       u8         counter_trigger_23_16[0x8];
+       u8         miss_address_39_32[0x8];
+
+       u8         miss_address_31_6[0x1a];
+       u8         learning_point[0x1];
+       u8         go_back[0x1];
+       u8         match_polarity[0x1];
+       u8         mask_mode[0x1];
+       u8         miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_rx_steering_mult_bits {
+       u8         entry_type[0x4];
+       u8         reserved_at_4[0x4];
+       u8         entry_sub_type[0x8];
+       u8         byte_mask[0x10];
+
+       u8         next_table_base_63_48[0x10];
+       u8         next_lu_type[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         linear_hash_enable[0x1];
+       u8         reserved_at_[0x2];
+       u8         next_table_rank[0x2];
+
+       u8         member_count[0x10];
+       u8         gvmi[0x10];
+
+       u8         qp_list_pointer[0x20];
+
+       u8         reserved_at_a0[0x1];
+       u8         tunneling_action[0x3];
+       u8         action_description[0x4];
+       u8         reserved_at_a8[0x8];
+       u8         counter_trigger_15_0[0x10];
+
+       u8         miss_address_63_48[0x10];
+       u8         counter_trigger_23_16[0x08];
+       u8         miss_address_39_32[0x8];
+
+       u8         miss_address_31_6[0x1a];
+       u8         learning_point[0x1];
+       u8         fail_on_error[0x1];
+       u8         match_polarity[0x1];
+       u8         mask_mode[0x1];
+       u8         miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_modify_packet_bits {
+       u8         entry_type[0x4];
+       u8         reserved_at_4[0x4];
+       u8         entry_sub_type[0x8];
+       u8         byte_mask[0x10];
+
+       u8         next_table_base_63_48[0x10];
+       u8         next_lu_type[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         linear_hash_enable[0x1];
+       u8         reserved_at_[0x2];
+       u8         next_table_rank[0x2];
+
+       u8         number_of_re_write_actions[0x10];
+       u8         gvmi[0x10];
+
+       u8         header_re_write_actions_pointer[0x20];
+
+       u8         reserved_at_a0[0x1];
+       u8         tunneling_action[0x3];
+       u8         action_description[0x4];
+       u8         reserved_at_a8[0x8];
+       u8         counter_trigger_15_0[0x10];
+
+       u8         miss_address_63_48[0x10];
+       u8         counter_trigger_23_16[0x08];
+       u8         miss_address_39_32[0x8];
+
+       u8         miss_address_31_6[0x1a];
+       u8         learning_point[0x1];
+       u8         fail_on_error[0x1];
+       u8         match_polarity[0x1];
+       u8         mask_mode[0x1];
+       u8         miss_rank[0x2];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_bits {
+       u8         smac_47_16[0x20];
+
+       u8         smac_15_0[0x10];
+       u8         l3_ethertype[0x10];
+
+       u8         qp_type[0x2];
+       u8         ethertype_filter[0x1];
+       u8         reserved_at_43[0x1];
+       u8         sx_sniffer[0x1];
+       u8         force_lb[0x1];
+       u8         functional_lb[0x1];
+       u8         port[0x1];
+       u8         reserved_at_48[0x4];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_qualifier[0x2];
+       u8         reserved_at_52[0x2];
+       u8         first_vlan_id[0xc];
+
+       u8         ip_fragmented[0x1];
+       u8         tcp_syn[0x1];
+       u8         encp_type[0x2];
+       u8         l3_type[0x2];
+       u8         l4_type[0x2];
+       u8         reserved_at_68[0x4];
+       u8         second_priority[0x3];
+       u8         second_cfi[0x1];
+       u8         second_vlan_qualifier[0x2];
+       u8         reserved_at_72[0x2];
+       u8         second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_dst_bits {
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         l3_ethertype[0x10];
+
+       u8         qp_type[0x2];
+       u8         ethertype_filter[0x1];
+       u8         reserved_at_43[0x1];
+       u8         sx_sniffer[0x1];
+       u8         force_lb[0x1];
+       u8         functional_lb[0x1];
+       u8         port[0x1];
+       u8         reserved_at_48[0x4];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_qualifier[0x2];
+       u8         reserved_at_52[0x2];
+       u8         first_vlan_id[0xc];
+
+       u8         ip_fragmented[0x1];
+       u8         tcp_syn[0x1];
+       u8         encp_type[0x2];
+       u8         l3_type[0x2];
+       u8         l4_type[0x2];
+       u8         reserved_at_68[0x4];
+       u8         second_priority[0x3];
+       u8         second_cfi[0x1];
+       u8         second_vlan_qualifier[0x2];
+       u8         reserved_at_72[0x2];
+       u8         second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_dst_bits {
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         smac_47_32[0x10];
+
+       u8         smac_31_0[0x20];
+
+       u8         sx_sniffer[0x1];
+       u8         force_lb[0x1];
+       u8         functional_lb[0x1];
+       u8         port[0x1];
+       u8         l3_type[0x2];
+       u8         reserved_at_66[0x6];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_qualifier[0x2];
+       u8         reserved_at_72[0x2];
+       u8         first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_bits {
+       u8         destination_address[0x20];
+
+       u8         source_address[0x20];
+
+       u8         source_port[0x10];
+       u8         destination_port[0x10];
+
+       u8         fragmented[0x1];
+       u8         first_fragment[0x1];
+       u8         reserved_at_62[0x2];
+       u8         reserved_at_64[0x1];
+       u8         ecn[0x2];
+       u8         tcp_ns[0x1];
+       u8         tcp_cwr[0x1];
+       u8         tcp_ece[0x1];
+       u8         tcp_urg[0x1];
+       u8         tcp_ack[0x1];
+       u8         tcp_psh[0x1];
+       u8         tcp_rst[0x1];
+       u8         tcp_syn[0x1];
+       u8         tcp_fin[0x1];
+       u8         dscp[0x6];
+       u8         reserved_at_76[0x2];
+       u8         protocol[0x8];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv6_dst_bits {
+       u8         dst_ip_127_96[0x20];
+
+       u8         dst_ip_95_64[0x20];
+
+       u8         dst_ip_63_32[0x20];
+
+       u8         dst_ip_31_0[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l2_tnl_bits {
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         l3_ethertype[0x10];
+
+       u8         l2_tunneling_network_id[0x20];
+
+       u8         ip_fragmented[0x1];
+       u8         tcp_syn[0x1];
+       u8         encp_type[0x2];
+       u8         l3_type[0x2];
+       u8         l4_type[0x2];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         reserved_at_6c[0x3];
+       u8         gre_key_flag[0x1];
+       u8         first_vlan_qualifier[0x2];
+       u8         reserved_at_72[0x2];
+       u8         first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv6_src_bits {
+       u8         src_ip_127_96[0x20];
+
+       u8         src_ip_95_64[0x20];
+
+       u8         src_ip_63_32[0x20];
+
+       u8         src_ip_31_0[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_misc_bits {
+       u8         version[0x4];
+       u8         ihl[0x4];
+       u8         reserved_at_8[0x8];
+       u8         total_length[0x10];
+
+       u8         identification[0x10];
+       u8         flags[0x3];
+       u8         fragment_offset[0xd];
+
+       u8         time_to_live[0x8];
+       u8         reserved_at_48[0x8];
+       u8         checksum[0x10];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l4_bits {
+       u8         fragmented[0x1];
+       u8         first_fragment[0x1];
+       u8         reserved_at_2[0x6];
+       u8         protocol[0x8];
+       u8         dst_port[0x10];
+
+       u8         ipv6_version[0x4];
+       u8         reserved_at_24[0x1];
+       u8         ecn[0x2];
+       u8         tcp_ns[0x1];
+       u8         tcp_cwr[0x1];
+       u8         tcp_ece[0x1];
+       u8         tcp_urg[0x1];
+       u8         tcp_ack[0x1];
+       u8         tcp_psh[0x1];
+       u8         tcp_rst[0x1];
+       u8         tcp_syn[0x1];
+       u8         tcp_fin[0x1];
+       u8         src_port[0x10];
+
+       u8         ipv6_payload_length[0x10];
+       u8         ipv6_hop_limit[0x8];
+       u8         dscp[0x6];
+       u8         reserved_at_5e[0x2];
+
+       u8         tcp_data_offset[0x4];
+       u8         reserved_at_64[0x8];
+       u8         flow_label[0x14];
+};
+
+struct mlx5_ifc_ste_eth_l4_misc_bits {
+       u8         checksum[0x10];
+       u8         length[0x10];
+
+       u8         seq_num[0x20];
+
+       u8         ack_num[0x20];
+
+       u8         urgent_pointer[0x10];
+       u8         window_size[0x10];
+};
+
+struct mlx5_ifc_ste_mpls_bits {
+       u8         mpls0_label[0x14];
+       u8         mpls0_exp[0x3];
+       u8         mpls0_s_bos[0x1];
+       u8         mpls0_ttl[0x8];
+
+       u8         mpls1_label[0x20];
+
+       u8         mpls2_label[0x20];
+
+       u8         reserved_at_60[0x16];
+       u8         mpls4_s_bit[0x1];
+       u8         mpls4_qualifier[0x1];
+       u8         mpls3_s_bit[0x1];
+       u8         mpls3_qualifier[0x1];
+       u8         mpls2_s_bit[0x1];
+       u8         mpls2_qualifier[0x1];
+       u8         mpls1_s_bit[0x1];
+       u8         mpls1_qualifier[0x1];
+       u8         mpls0_s_bit[0x1];
+       u8         mpls0_qualifier[0x1];
+};
+
+struct mlx5_ifc_ste_register_0_bits {
+       u8         register_0_h[0x20];
+
+       u8         register_0_l[0x20];
+
+       u8         register_1_h[0x20];
+
+       u8         register_1_l[0x20];
+};
+
+struct mlx5_ifc_ste_register_1_bits {
+       u8         register_2_h[0x20];
+
+       u8         register_2_l[0x20];
+
+       u8         register_3_h[0x20];
+
+       u8         register_3_l[0x20];
+};
+
+struct mlx5_ifc_ste_gre_bits {
+       u8         gre_c_present[0x1];
+       u8         reserved_at_30[0x1];
+       u8         gre_k_present[0x1];
+       u8         gre_s_present[0x1];
+       u8         strict_src_route[0x1];
+       u8         recur[0x3];
+       u8         flags[0x5];
+       u8         version[0x3];
+       u8         gre_protocol[0x10];
+
+       u8         checksum[0x10];
+       u8         offset[0x10];
+
+       u8         gre_key_h[0x18];
+       u8         gre_key_l[0x8];
+
+       u8         seq_num[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_0_bits {
+       u8         flex_parser_3[0x20];
+
+       u8         flex_parser_2[0x20];
+
+       u8         flex_parser_1[0x20];
+
+       u8         flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_1_bits {
+       u8         flex_parser_7[0x20];
+
+       u8         flex_parser_6[0x20];
+
+       u8         flex_parser_5[0x20];
+
+       u8         flex_parser_4[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_ok_bits {
+       u8         flex_parser_3[0x20];
+       u8         flex_parser_2[0x20];
+       u8         flex_parsers_ok[0x8];
+       u8         reserved_at_48[0x18];
+       u8         flex_parser_0[0x20];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_bits {
+       u8         flex_parser_tunneling_header_63_32[0x20];
+
+       u8         flex_parser_tunneling_header_31_0[0x20];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_vxlan_gpe_bits {
+       u8         outer_vxlan_gpe_flags[0x8];
+       u8         reserved_at_8[0x10];
+       u8         outer_vxlan_gpe_next_protocol[0x8];
+
+       u8         outer_vxlan_gpe_vni[0x18];
+       u8         reserved_at_38[0x8];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_geneve_bits {
+       u8         reserved_at_0[0x2];
+       u8         geneve_opt_len[0x6];
+       u8         geneve_oam[0x1];
+       u8         reserved_at_9[0x7];
+       u8         geneve_protocol_type[0x10];
+
+       u8         geneve_vni[0x18];
+       u8         reserved_at_38[0x8];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_flex_parser_tnl_gtpu_bits {
+       u8         reserved_at_0[0x5];
+       u8         gtpu_msg_flags[0x3];
+       u8         gtpu_msg_type[0x8];
+       u8         reserved_at_10[0x10];
+
+       u8         gtpu_teid[0x20];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_tunnel_header_bits {
+       u8         tunnel_header_0[0x20];
+
+       u8         tunnel_header_1[0x20];
+
+       u8         reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_ste_general_purpose_bits {
+       u8         general_purpose_lookup_field[0x20];
+
+       u8         reserved_at_20[0x20];
+
+       u8         reserved_at_40[0x20];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_src_gvmi_qp_bits {
+       u8         loopback_syndrome[0x8];
+       u8         reserved_at_8[0x8];
+       u8         source_gvmi[0x10];
+
+       u8         reserved_at_20[0x5];
+       u8         force_lb[0x1];
+       u8         functional_lb[0x1];
+       u8         source_is_requestor[0x1];
+       u8         source_qp[0x18];
+
+       u8         reserved_at_40[0x20];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_l2_hdr_bits {
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         smac_47_32[0x10];
+
+       u8         smac_31_0[0x20];
+
+       u8         ethertype[0x10];
+       u8         vlan_type[0x10];
+
+       u8         vlan[0x10];
+       u8         reserved_at_90[0x10];
+};
+
+/* Both HW set and HW add share the same HW format with different opcodes */
+struct mlx5_ifc_dr_action_hw_set_bits {
+       u8         opcode[0x8];
+       u8         destination_field_code[0x8];
+       u8         reserved_at_10[0x2];
+       u8         destination_left_shifter[0x6];
+       u8         reserved_at_18[0x3];
+       u8         destination_length[0x5];
+
+       u8         inline_data[0x20];
+};
+
+struct mlx5_ifc_dr_action_hw_copy_bits {
+       u8         opcode[0x8];
+       u8         destination_field_code[0x8];
+       u8         reserved_at_10[0x2];
+       u8         destination_left_shifter[0x6];
+       u8         reserved_at_18[0x2];
+       u8         destination_length[0x6];
+
+       u8         reserved_at_20[0x8];
+       u8         source_field_code[0x8];
+       u8         reserved_at_30[0x2];
+       u8         source_left_shifter[0x6];
+       u8         reserved_at_38[0x8];
+};
+
+enum {
+       MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
+};
+
+struct mlx5_ifc_ste_aso_flow_meter_action_bits {
+       u8         reserved_at_0[0xc];
+       u8         action[0x1];
+       u8         initial_color[0x2];
+       u8         line_id[0x1];
+};
+
+struct mlx5_ifc_ste_double_action_aso_v1_bits {
+       u8         action_id[0x8];
+       u8         aso_context_number[0x18];
+
+       u8         dest_reg_id[0x2];
+       u8         change_ordering_tag[0x1];
+       u8         aso_check_ordering[0x1];
+       u8         aso_context_type[0x4];
+       u8         reserved_at_28[0x8];
+       union {
+               u8 aso_fields[0x10];
+               struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
+       };
+};
+
+#endif /* MLX5_IFC_DR_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5_ifc_dr_ste_v1.h
new file mode 100644 (file)
index 0000000..ca3b0f1
--- /dev/null
@@ -0,0 +1,469 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2020 NVIDIA CORPORATION. All rights reserved. */
+
+#ifndef MLX5_IFC_DR_STE_V1_H
+#define MLX5_IFC_DR_STE_V1_H
+
+enum mlx5_ifc_ste_v1_modify_hdr_offset {
+       MLX5_MODIFY_HEADER_V1_QW_OFFSET = 0x20,
+};
+
+struct mlx5_ifc_ste_single_action_flow_tag_v1_bits {
+       u8         action_id[0x8];
+       u8         flow_tag[0x18];
+};
+
+struct mlx5_ifc_ste_single_action_modify_list_v1_bits {
+       u8         action_id[0x8];
+       u8         num_of_modify_actions[0x8];
+       u8         modify_actions_ptr[0x10];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_v1_bits {
+       u8         action_id[0x8];
+       u8         reserved_at_8[0x2];
+       u8         start_anchor[0x6];
+       u8         reserved_at_10[0x2];
+       u8         end_anchor[0x6];
+       u8         reserved_at_18[0x4];
+       u8         decap[0x1];
+       u8         vni_to_cqe[0x1];
+       u8         qos_profile[0x2];
+};
+
+struct mlx5_ifc_ste_single_action_remove_header_size_v1_bits {
+       u8         action_id[0x8];
+       u8         reserved_at_8[0x2];
+       u8         start_anchor[0x6];
+       u8         outer_l4_remove[0x1];
+       u8         reserved_at_11[0x1];
+       u8         start_offset[0x7];
+       u8         reserved_at_18[0x1];
+       u8         remove_size[0x6];
+};
+
+struct mlx5_ifc_ste_double_action_copy_v1_bits {
+       u8         action_id[0x8];
+       u8         destination_dw_offset[0x8];
+       u8         reserved_at_10[0x2];
+       u8         destination_left_shifter[0x6];
+       u8         reserved_at_17[0x2];
+       u8         destination_length[0x6];
+
+       u8         reserved_at_20[0x8];
+       u8         source_dw_offset[0x8];
+       u8         reserved_at_30[0x2];
+       u8         source_right_shifter[0x6];
+       u8         reserved_at_38[0x8];
+};
+
+struct mlx5_ifc_ste_double_action_set_v1_bits {
+       u8         action_id[0x8];
+       u8         destination_dw_offset[0x8];
+       u8         reserved_at_10[0x2];
+       u8         destination_left_shifter[0x6];
+       u8         reserved_at_18[0x2];
+       u8         destination_length[0x6];
+
+       u8         inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_add_v1_bits {
+       u8         action_id[0x8];
+       u8         destination_dw_offset[0x8];
+       u8         reserved_at_10[0x2];
+       u8         destination_left_shifter[0x6];
+       u8         reserved_at_18[0x2];
+       u8         destination_length[0x6];
+
+       u8         add_value[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_inline_v1_bits {
+       u8         action_id[0x8];
+       u8         reserved_at_8[0x2];
+       u8         start_anchor[0x6];
+       u8         start_offset[0x7];
+       u8         reserved_at_17[0x9];
+
+       u8         inline_data[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_insert_with_ptr_v1_bits {
+       u8         action_id[0x8];
+       u8         reserved_at_8[0x2];
+       u8         start_anchor[0x6];
+       u8         start_offset[0x7];
+       u8         size[0x6];
+       u8         attributes[0x3];
+
+       u8         pointer[0x20];
+};
+
+struct mlx5_ifc_ste_double_action_accelerated_modify_action_list_v1_bits {
+       u8         action_id[0x8];
+       u8         modify_actions_pattern_pointer[0x18];
+
+       u8         number_of_modify_actions[0x8];
+       u8         modify_actions_argument_pointer[0x18];
+};
+
+struct mlx5_ifc_ste_match_bwc_v1_bits {
+       u8         entry_format[0x8];
+       u8         counter_id[0x18];
+
+       u8         miss_address_63_48[0x10];
+       u8         match_definer_ctx_idx[0x8];
+       u8         miss_address_39_32[0x8];
+
+       u8         miss_address_31_6[0x1a];
+       u8         reserved_at_5a[0x1];
+       u8         match_polarity[0x1];
+       u8         reparse[0x1];
+       u8         reserved_at_5d[0x3];
+
+       u8         next_table_base_63_48[0x10];
+       u8         hash_definer_ctx_idx[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         hash_type[0x2];
+       u8         hash_after_actions[0x1];
+       u8         reserved_at_9e[0x2];
+
+       u8         byte_mask[0x10];
+       u8         next_entry_format[0x1];
+       u8         mask_mode[0x1];
+       u8         gvmi[0xe];
+
+       u8         action[0x40];
+};
+
+struct mlx5_ifc_ste_mask_and_match_v1_bits {
+       u8         entry_format[0x8];
+       u8         counter_id[0x18];
+
+       u8         miss_address_63_48[0x10];
+       u8         match_definer_ctx_idx[0x8];
+       u8         miss_address_39_32[0x8];
+
+       u8         miss_address_31_6[0x1a];
+       u8         reserved_at_5a[0x1];
+       u8         match_polarity[0x1];
+       u8         reparse[0x1];
+       u8         reserved_at_5d[0x3];
+
+       u8         next_table_base_63_48[0x10];
+       u8         hash_definer_ctx_idx[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         hash_type[0x2];
+       u8         hash_after_actions[0x1];
+       u8         reserved_at_9e[0x2];
+
+       u8         action[0x60];
+};
+
+struct mlx5_ifc_ste_match_ranges_v1_bits {
+       u8         entry_format[0x8];
+       u8         counter_id[0x18];
+
+       u8         miss_address_63_48[0x10];
+       u8         match_definer_ctx_idx[0x8];
+       u8         miss_address_39_32[0x8];
+
+       u8         miss_address_31_6[0x1a];
+       u8         reserved_at_5a[0x1];
+       u8         match_polarity[0x1];
+       u8         reparse[0x1];
+       u8         reserved_at_5d[0x3];
+
+       u8         next_table_base_63_48[0x10];
+       u8         hash_definer_ctx_idx[0x8];
+       u8         next_table_base_39_32_size[0x8];
+
+       u8         next_table_base_31_5_size[0x1b];
+       u8         hash_type[0x2];
+       u8         hash_after_actions[0x1];
+       u8         reserved_at_9e[0x2];
+
+       u8         action[0x60];
+
+       u8         max_value_0[0x20];
+       u8         min_value_0[0x20];
+       u8         max_value_1[0x20];
+       u8         min_value_1[0x20];
+       u8         max_value_2[0x20];
+       u8         min_value_2[0x20];
+       u8         max_value_3[0x20];
+       u8         min_value_3[0x20];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_v1_bits {
+       u8         reserved_at_0[0x1];
+       u8         sx_sniffer[0x1];
+       u8         functional_loopback[0x1];
+       u8         ip_fragmented[0x1];
+       u8         qp_type[0x2];
+       u8         encapsulation_type[0x2];
+       u8         port[0x2];
+       u8         l3_type[0x2];
+       u8         l4_type[0x2];
+       u8         first_vlan_qualifier[0x2];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_id[0xc];
+
+       u8         smac_47_16[0x20];
+
+       u8         smac_15_0[0x10];
+       u8         l3_ethertype[0x10];
+
+       u8         reserved_at_60[0x6];
+       u8         tcp_syn[0x1];
+       u8         reserved_at_67[0x3];
+       u8         force_loopback[0x1];
+       u8         l2_ok[0x1];
+       u8         l3_ok[0x1];
+       u8         l4_ok[0x1];
+       u8         second_vlan_qualifier[0x2];
+
+       u8         second_priority[0x3];
+       u8         second_cfi[0x1];
+       u8         second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_dst_v1_bits {
+       u8         reserved_at_0[0x1];
+       u8         sx_sniffer[0x1];
+       u8         functional_lb[0x1];
+       u8         ip_fragmented[0x1];
+       u8         qp_type[0x2];
+       u8         encapsulation_type[0x2];
+       u8         port[0x2];
+       u8         l3_type[0x2];
+       u8         l4_type[0x2];
+       u8         first_vlan_qualifier[0x2];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_id[0xc];
+
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         l3_ethertype[0x10];
+
+       u8         reserved_at_60[0x6];
+       u8         tcp_syn[0x1];
+       u8         reserved_at_67[0x3];
+       u8         force_lb[0x1];
+       u8         l2_ok[0x1];
+       u8         l3_ok[0x1];
+       u8         l4_ok[0x1];
+       u8         second_vlan_qualifier[0x2];
+       u8         second_priority[0x3];
+       u8         second_cfi[0x1];
+       u8         second_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l2_src_dst_v1_bits {
+       u8         dmac_47_16[0x20];
+
+       u8         smac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         reserved_at_50[0x2];
+       u8         functional_lb[0x1];
+       u8         reserved_at_53[0x5];
+       u8         port[0x2];
+       u8         l3_type[0x2];
+       u8         reserved_at_5c[0x2];
+       u8         first_vlan_qualifier[0x2];
+
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_id[0xc];
+       u8         smac_15_0[0x10];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_5_tuple_v1_bits {
+       u8         source_address[0x20];
+
+       u8         destination_address[0x20];
+
+       u8         source_port[0x10];
+       u8         destination_port[0x10];
+
+       u8         reserved_at_60[0x4];
+       u8         l4_ok[0x1];
+       u8         l3_ok[0x1];
+       u8         fragmented[0x1];
+       u8         tcp_ns[0x1];
+       u8         tcp_cwr[0x1];
+       u8         tcp_ece[0x1];
+       u8         tcp_urg[0x1];
+       u8         tcp_ack[0x1];
+       u8         tcp_psh[0x1];
+       u8         tcp_rst[0x1];
+       u8         tcp_syn[0x1];
+       u8         tcp_fin[0x1];
+       u8         dscp[0x6];
+       u8         ecn[0x2];
+       u8         protocol[0x8];
+};
+
+struct mlx5_ifc_ste_eth_l2_tnl_v1_bits {
+       u8         l2_tunneling_network_id[0x20];
+
+       u8         dmac_47_16[0x20];
+
+       u8         dmac_15_0[0x10];
+       u8         l3_ethertype[0x10];
+
+       u8         reserved_at_60[0x3];
+       u8         ip_fragmented[0x1];
+       u8         reserved_at_64[0x2];
+       u8         encp_type[0x2];
+       u8         reserved_at_68[0x2];
+       u8         l3_type[0x2];
+       u8         l4_type[0x2];
+       u8         first_vlan_qualifier[0x2];
+       u8         first_priority[0x3];
+       u8         first_cfi[0x1];
+       u8         first_vlan_id[0xc];
+};
+
+struct mlx5_ifc_ste_eth_l3_ipv4_misc_v1_bits {
+       u8         identification[0x10];
+       u8         flags[0x3];
+       u8         fragment_offset[0xd];
+
+       u8         total_length[0x10];
+       u8         checksum[0x10];
+
+       u8         version[0x4];
+       u8         ihl[0x4];
+       u8         time_to_live[0x8];
+       u8         reserved_at_50[0x10];
+
+       u8         reserved_at_60[0x1c];
+       u8         voq_internal_prio[0x4];
+};
+
+struct mlx5_ifc_ste_eth_l4_v1_bits {
+       u8         ipv6_version[0x4];
+       u8         reserved_at_4[0x4];
+       u8         dscp[0x6];
+       u8         ecn[0x2];
+       u8         ipv6_hop_limit[0x8];
+       u8         protocol[0x8];
+
+       u8         src_port[0x10];
+       u8         dst_port[0x10];
+
+       u8         first_fragment[0x1];
+       u8         reserved_at_41[0xb];
+       u8         flow_label[0x14];
+
+       u8         tcp_data_offset[0x4];
+       u8         l4_ok[0x1];
+       u8         l3_ok[0x1];
+       u8         fragmented[0x1];
+       u8         tcp_ns[0x1];
+       u8         tcp_cwr[0x1];
+       u8         tcp_ece[0x1];
+       u8         tcp_urg[0x1];
+       u8         tcp_ack[0x1];
+       u8         tcp_psh[0x1];
+       u8         tcp_rst[0x1];
+       u8         tcp_syn[0x1];
+       u8         tcp_fin[0x1];
+       u8         ipv6_paylen[0x10];
+};
+
+struct mlx5_ifc_ste_eth_l4_misc_v1_bits {
+       u8         window_size[0x10];
+       u8         urgent_pointer[0x10];
+
+       u8         ack_num[0x20];
+
+       u8         seq_num[0x20];
+
+       u8         length[0x10];
+       u8         checksum[0x10];
+};
+
+struct mlx5_ifc_ste_mpls_v1_bits {
+       u8         reserved_at_0[0x15];
+       u8         mpls_ok[0x1];
+       u8         mpls4_s_bit[0x1];
+       u8         mpls4_qualifier[0x1];
+       u8         mpls3_s_bit[0x1];
+       u8         mpls3_qualifier[0x1];
+       u8         mpls2_s_bit[0x1];
+       u8         mpls2_qualifier[0x1];
+       u8         mpls1_s_bit[0x1];
+       u8         mpls1_qualifier[0x1];
+       u8         mpls0_s_bit[0x1];
+       u8         mpls0_qualifier[0x1];
+
+       u8         mpls0_label[0x14];
+       u8         mpls0_exp[0x3];
+       u8         mpls0_s_bos[0x1];
+       u8         mpls0_ttl[0x8];
+
+       u8         mpls1_label[0x20];
+
+       u8         mpls2_label[0x20];
+};
+
+struct mlx5_ifc_ste_gre_v1_bits {
+       u8         gre_c_present[0x1];
+       u8         reserved_at_1[0x1];
+       u8         gre_k_present[0x1];
+       u8         gre_s_present[0x1];
+       u8         strict_src_route[0x1];
+       u8         recur[0x3];
+       u8         flags[0x5];
+       u8         version[0x3];
+       u8         gre_protocol[0x10];
+
+       u8         reserved_at_20[0x20];
+
+       u8         gre_key_h[0x18];
+       u8         gre_key_l[0x8];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_src_gvmi_qp_v1_bits {
+       u8         loopback_synd[0x8];
+       u8         reserved_at_8[0x7];
+       u8         functional_lb[0x1];
+       u8         source_gvmi[0x10];
+
+       u8         force_lb[0x1];
+       u8         reserved_at_21[0x1];
+       u8         source_is_requestor[0x1];
+       u8         reserved_at_23[0x5];
+       u8         source_qp[0x18];
+
+       u8         reserved_at_40[0x20];
+
+       u8         reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_ste_icmp_v1_bits {
+       u8         icmp_payload_data[0x20];
+
+       u8         icmp_header_data[0x20];
+
+       u8         icmp_type[0x8];
+       u8         icmp_code[0x8];
+       u8         reserved_at_50[0x10];
+
+       u8         reserved_at_60[0x20];
+};
+
+#endif /* MLX5_IFC_DR_STE_V1_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/sws/mlx5dr.h
new file mode 100644 (file)
index 0000000..3ac7dc6
--- /dev/null
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2019, Mellanox Technologies */
+
+#ifndef _MLX5DR_H_
+#define _MLX5DR_H_
+
+struct mlx5dr_domain;
+struct mlx5dr_table;
+struct mlx5dr_matcher;
+struct mlx5dr_rule;
+struct mlx5dr_action;
+
+enum mlx5dr_domain_type {
+       MLX5DR_DOMAIN_TYPE_NIC_RX,
+       MLX5DR_DOMAIN_TYPE_NIC_TX,
+       MLX5DR_DOMAIN_TYPE_FDB,
+};
+
+enum mlx5dr_domain_sync_flags {
+       MLX5DR_DOMAIN_SYNC_FLAGS_SW = 1 << 0,
+       MLX5DR_DOMAIN_SYNC_FLAGS_HW = 1 << 1,
+};
+
+enum mlx5dr_action_reformat_type {
+       DR_ACTION_REFORMAT_TYP_TNL_L2_TO_L2,
+       DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L2,
+       DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2,
+       DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3,
+       DR_ACTION_REFORMAT_TYP_INSERT_HDR,
+       DR_ACTION_REFORMAT_TYP_REMOVE_HDR,
+};
+
+struct mlx5dr_match_parameters {
+       size_t match_sz;
+       u64 *match_buf; /* Device spec format */
+};
+
+struct mlx5dr_action_dest {
+       struct mlx5dr_action *dest;
+       struct mlx5dr_action *reformat;
+};
+
+struct mlx5dr_domain *
+mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type);
+
+int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
+
+int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
+
+void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
+                           struct mlx5dr_domain *peer_dmn,
+                           u16 peer_vhca_id);
+
+struct mlx5dr_table *
+mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
+                   u16 uid);
+
+struct mlx5dr_table *
+mlx5dr_table_get_from_fs_ft(struct mlx5_flow_table *ft);
+
+int mlx5dr_table_destroy(struct mlx5dr_table *table);
+
+u32 mlx5dr_table_get_id(struct mlx5dr_table *table);
+
+struct mlx5dr_matcher *
+mlx5dr_matcher_create(struct mlx5dr_table *table,
+                     u32 priority,
+                     u8 match_criteria_enable,
+                     struct mlx5dr_match_parameters *mask);
+
+int mlx5dr_matcher_destroy(struct mlx5dr_matcher *matcher);
+
+struct mlx5dr_rule *
+mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
+                  struct mlx5dr_match_parameters *value,
+                  size_t num_actions,
+                  struct mlx5dr_action *actions[],
+                  u32 flow_source);
+
+int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);
+
+int mlx5dr_table_set_miss_action(struct mlx5dr_table *tbl,
+                                struct mlx5dr_action *action);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table_num(struct mlx5dr_domain *dmn, u32 table_num);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_table(struct mlx5dr_table *table);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
+                                       struct mlx5_flow_table *ft);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
+                               u16 vport, u8 vhca_id_valid,
+                               u16 vhca_id);
+
+struct mlx5dr_action *
+mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
+                                  struct mlx5dr_action_dest *dests,
+                                  u32 num_of_dests,
+                                  bool ignore_flow_level,
+                                  u32 flow_source);
+
+struct mlx5dr_action *mlx5dr_action_create_drop(void);
+
+struct mlx5dr_action *mlx5dr_action_create_tag(u32 tag_value);
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_sampler(struct mlx5dr_domain *dmn, u32 sampler_id);
+
+struct mlx5dr_action *
+mlx5dr_action_create_flow_counter(u32 counter_id);
+
+struct mlx5dr_action *
+mlx5dr_action_create_packet_reformat(struct mlx5dr_domain *dmn,
+                                    enum mlx5dr_action_reformat_type reformat_type,
+                                    u8 reformat_param_0,
+                                    u8 reformat_param_1,
+                                    size_t data_sz,
+                                    void *data);
+
+struct mlx5dr_action *
+mlx5dr_action_create_modify_header(struct mlx5dr_domain *domain,
+                                  u32 flags,
+                                  size_t actions_sz,
+                                  __be64 actions[]);
+
+struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
+
+struct mlx5dr_action *
+mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
+
+struct mlx5dr_action *
+mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
+                        u32 obj_id,
+                        u8 return_reg_id,
+                        u8 aso_type,
+                        u8 init_color,
+                        u8 meter_id);
+
+struct mlx5dr_action *
+mlx5dr_action_create_dest_match_range(struct mlx5dr_domain *dmn,
+                                     u32 field,
+                                     struct mlx5_flow_table *hit_ft,
+                                     struct mlx5_flow_table *miss_ft,
+                                     u32 min,
+                                     u32 max);
+
+int mlx5dr_action_destroy(struct mlx5dr_action *action);
+
+u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action);
+
+static inline bool
+mlx5dr_is_supported(struct mlx5_core_dev *dev)
+{
+       return MLX5_CAP_GEN(dev, roce) &&
+              (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner) ||
+               (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, sw_owner_v2) &&
+                (MLX5_CAP_GEN(dev, steering_format_version) <=
+                 MLX5_STEERING_FORMAT_CONNECTX_7)));
+}
+
+/* buddy functions & structure */
+
+struct mlx5dr_icm_mr;
+
+struct mlx5dr_icm_buddy_mem {
+       unsigned long           **bitmap;
+       unsigned int            *num_free;
+       u32                     max_order;
+       struct list_head        list_node;
+       struct mlx5dr_icm_mr    *icm_mr;
+       struct mlx5dr_icm_pool  *pool;
+
+       /* Amount of memory in used chunks - HW may be accessing this memory */
+       u64                     used_memory;
+
+       /* Memory optimisation */
+       struct mlx5dr_ste       *ste_arr;
+       struct list_head        *miss_list;
+       u8                      *hw_ste_arr;
+};
+
+int mlx5dr_buddy_init(struct mlx5dr_icm_buddy_mem *buddy,
+                     unsigned int max_order);
+void mlx5dr_buddy_cleanup(struct mlx5dr_icm_buddy_mem *buddy);
+int mlx5dr_buddy_alloc_mem(struct mlx5dr_icm_buddy_mem *buddy,
+                          unsigned int order,
+                          unsigned int *segment);
+void mlx5dr_buddy_free_mem(struct mlx5dr_icm_buddy_mem *buddy,
+                          unsigned int seg, unsigned int order);
+
+#endif /* _MLX5DR_H_ */